summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format39
-rw-r--r--Documentation/ABI/stable/sysfs-bus-nvmem2
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-acm7
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-uac18
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-uac211
-rw-r--r--Documentation/ABI/testing/debugfs-iio-ad946739
-rw-r--r--Documentation/ABI/testing/debugfs-iio-backend20
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio76
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-adc-max961117
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-chemical-sgp4014
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-dac61
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-dac-ltc268831
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-filter-admv88182
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-ina2xx-adc9
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs56
-rw-r--r--Documentation/admin-guide/device-mapper/delay.rst41
-rw-r--r--Documentation/admin-guide/device-mapper/dm-crypt.rst4
-rw-r--r--Documentation/admin-guide/device-mapper/vdo.rst7
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt17
-rw-r--r--Documentation/arch/loongarch/irq-chip-model.rst32
-rw-r--r--Documentation/arch/s390/vfio-ap.rst30
-rw-r--r--Documentation/core-api/cleanup.rst8
-rw-r--r--Documentation/core-api/folio_queue.rst212
-rw-r--r--Documentation/core-api/index.rst1
-rw-r--r--Documentation/devicetree/bindings/arm/cirrus/cirrus,ep9301.yaml38
-rw-r--r--Documentation/devicetree/bindings/ata/cirrus,ep9312-pata.yaml42
-rw-r--r--Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2m.yaml84
-rw-r--r--Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2p.yaml144
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml11
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt21
-rw-r--r--Documentation/devicetree/bindings/extcon/linux,extcon-usb-gpio.yaml37
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-ep9301.yaml9
-rw-r--r--Documentation/devicetree/bindings/iio/accel/adi,adxl380.yaml92
-rw-r--r--Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml254
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml33
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml13
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml123
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/microchip,pac1921.yaml71
-rw-r--r--Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml25
-rw-r--r--Documentation/devicetree/bindings/iio/adc/sophgo,cv1800b-saradc.yaml83
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml4
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml122
-rw-r--r--Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml12
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ltc2664.yaml181
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ltc2672.yaml160
-rw-r--r--Documentation/devicetree/bindings/iio/dac/dac.yaml50
-rw-r--r--Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml10
-rw-r--r--Documentation/devicetree/bindings/iio/humidity/sciosense,ens210.yaml55
-rw-r--r--Documentation/devicetree/bindings/iio/light/liteon,ltrf216a.yaml4
-rw-r--r--Documentation/devicetree/bindings/iio/light/rohm,bh1745.yaml53
-rw-r--r--Documentation/devicetree/bindings/iio/light/rohm,bu27034anuc.yaml (renamed from Documentation/devicetree/bindings/iio/light/rohm,bu27034.yaml)11
-rw-r--r--Documentation/devicetree/bindings/iio/light/stk33xx.yaml13
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml5
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/sensirion,sdp500.yaml46
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/awinic,aw96103.yaml61
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/tyhx,hx9023s.yaml93
-rw-r--r--Documentation/devicetree/bindings/input/cirrus,ep9307-keypad.yaml87
-rw-r--r--Documentation/devicetree/bindings/input/goodix,gt7986u.yaml71
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml25
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8953.yaml3
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml2
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml5
-rw-r--r--Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml12
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml3
-rw-r--r--Documentation/devicetree/bindings/mtd/technologic,nand.yaml45
-rw-r--r--Documentation/devicetree/bindings/net/cirrus,ep9301-eth.yaml59
-rw-r--r--Documentation/devicetree/bindings/net/ti,cc1352p7.yaml7
-rw-r--r--Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml3
-rw-r--r--Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/layouts/u-boot,env.yaml (renamed from Documentation/devicetree/bindings/nvmem/u-boot,env.yaml)39
-rw-r--r--Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml3
-rw-r--r--Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml17
-rw-r--r--Documentation/devicetree/bindings/pwm/cirrus,ep9301-pwm.yaml53
-rw-r--r--Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml3
-rw-r--r--Documentation/devicetree/bindings/rtc/sprd,sc2731-rtc.yaml49
-rw-r--r--Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt26
-rw-r--r--Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml28
-rw-r--r--Documentation/devicetree/bindings/rtc/trivial-rtc.yaml9
-rw-r--r--Documentation/devicetree/bindings/serial/8250_omap.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml9
-rw-r--r--Documentation/devicetree/bindings/serial/mediatek,uart.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scif.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/samsung_uart.yaml70
-rw-r--r--Documentation/devicetree/bindings/soc/cirrus/cirrus,ep9301-syscon.yaml94
-rw-r--r--Documentation/devicetree/bindings/sound/cirrus,ep9301-i2s.yaml16
-rw-r--r--Documentation/devicetree/bindings/spi/cirrus,ep9301-spi.yaml70
-rw-r--r--Documentation/devicetree/bindings/usb/fsl,ls1028a.yaml52
-rw-r--r--Documentation/devicetree/bindings/usb/msm-hsusb.txt110
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.yaml18
-rw-r--r--Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml3
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml4
-rw-r--r--Documentation/driver-api/cxl/access-coordinates.rst91
-rw-r--r--Documentation/driver-api/cxl/index.rst1
-rw-r--r--Documentation/filesystems/9p.rst58
-rw-r--r--Documentation/filesystems/nfs/index.rst1
-rw-r--r--Documentation/filesystems/nfs/localio.rst357
-rw-r--r--Documentation/iio/ad4000.rst131
-rw-r--r--Documentation/iio/ad4695.rst167
-rw-r--r--Documentation/iio/ad7380.rst130
-rw-r--r--Documentation/iio/adxl380.rst233
-rw-r--r--Documentation/iio/index.rst4
-rw-r--r--Documentation/networking/tproxy.rst2
-rw-r--r--Documentation/rust/general-information.rst27
-rw-r--r--Documentation/rust/index.rst18
-rw-r--r--Documentation/rust/quick-start.rst4
-rw-r--r--Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst32
-rw-r--r--Documentation/usb/functionfs-desc.rst39
-rw-r--r--Documentation/usb/functionfs.rst2
-rw-r--r--Documentation/usb/gadget-testing.rst19
-rw-r--r--Documentation/usb/index.rst1
-rw-r--r--Documentation/virt/kvm/api.rst31
-rw-r--r--Documentation/virt/kvm/locking.rst32
-rw-r--r--Documentation/virt/uml/user_mode_linux_howto_v2.rst37
-rw-r--r--Documentation/watchdog/convert_drivers_to_kernel_api.rst1
-rw-r--r--MAINTAINERS92
-rw-r--r--Makefile23
-rw-r--r--arch/Kconfig16
-rw-r--r--arch/alpha/include/asm/cmpxchg.h239
-rw-r--r--arch/alpha/include/asm/xchg.h246
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/dts/cirrus/Makefile4
-rw-r--r--arch/arm/boot/dts/cirrus/ep93xx-bk3.dts125
-rw-r--r--arch/arm/boot/dts/cirrus/ep93xx-edb9302.dts181
-rw-r--r--arch/arm/boot/dts/cirrus/ep93xx-ts7250.dts145
-rw-r--r--arch/arm/boot/dts/cirrus/ep93xx.dtsi444
-rw-r--r--arch/arm/mach-ep93xx/Kconfig20
-rw-r--r--arch/arm/mach-ep93xx/Makefile11
-rw-r--r--arch/arm/mach-ep93xx/clock.c733
-rw-r--r--arch/arm/mach-ep93xx/core.c1018
-rw-r--r--arch/arm/mach-ep93xx/dma.c114
-rw-r--r--arch/arm/mach-ep93xx/edb93xx.c368
-rw-r--r--arch/arm/mach-ep93xx/ep93xx-regs.h38
-rw-r--r--arch/arm/mach-ep93xx/gpio-ep93xx.h111
-rw-r--r--arch/arm/mach-ep93xx/hardware.h25
-rw-r--r--arch/arm/mach-ep93xx/irqs.h76
-rw-r--r--arch/arm/mach-ep93xx/platform.h42
-rw-r--r--arch/arm/mach-ep93xx/soc.h212
-rw-r--r--arch/arm/mach-ep93xx/timer-ep93xx.c143
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c422
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.h94
-rw-r--r--arch/arm/mach-ep93xx/vision_ep9307.c319
-rw-r--r--arch/arm64/Kconfig14
-rw-r--r--arch/arm64/Makefile3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7981b.dtsi33
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts3
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S3
-rw-r--r--arch/arm64/kvm/arm.c6
-rw-r--r--arch/loongarch/Kconfig7
-rw-r--r--arch/loongarch/include/asm/atomic.h2
-rw-r--r--arch/loongarch/include/asm/cpu-features.h2
-rw-r--r--arch/loongarch/include/asm/cpu.h30
-rw-r--r--arch/loongarch/include/asm/loongarch.h1
-rw-r--r--arch/loongarch/include/asm/mmu_context.h35
-rw-r--r--arch/loongarch/include/asm/percpu.h124
-rw-r--r--arch/loongarch/include/asm/pgtable.h32
-rw-r--r--arch/loongarch/include/asm/set_memory.h21
-rw-r--r--arch/loongarch/include/uapi/asm/hwcap.h1
-rw-r--r--arch/loongarch/include/uapi/asm/sigcontext.h1
-rw-r--r--arch/loongarch/kernel/acpi.c4
-rw-r--r--arch/loongarch/kernel/cpu-probe.c120
-rw-r--r--arch/loongarch/kernel/proc.c10
-rw-r--r--arch/loongarch/kernel/syscall.c4
-rw-r--r--arch/loongarch/kvm/main.c4
-rw-r--r--arch/loongarch/mm/Makefile3
-rw-r--r--arch/loongarch/mm/fault.c41
-rw-r--r--arch/loongarch/mm/pageattr.c218
-rw-r--r--arch/loongarch/pci/acpi.c1
-rw-r--r--arch/loongarch/vdso/vgetrandom-chacha.S92
-rw-r--r--arch/mips/include/asm/kvm_host.h4
-rw-r--r--arch/mips/kvm/mips.c8
-rw-r--r--arch/mips/kvm/vz.c8
-rw-r--r--arch/parisc/kernel/perf.c1
-rw-r--r--arch/powerpc/kernel/eeh.c198
-rw-r--r--arch/powerpc/lib/crtsavres.S2
-rw-r--r--arch/riscv/Kconfig9
-rw-r--r--arch/riscv/kvm/main.c4
-rw-r--r--arch/s390/configs/debug_defconfig1
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c1
-rw-r--r--arch/s390/hypfs/inode.c1
-rw-r--r--arch/s390/kernel/debug.c1
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c1
-rw-r--r--arch/s390/kernel/sysinfo.c1
-rw-r--r--arch/s390/kernel/vdso64/vdso_user_wrapper.S14
-rw-r--r--arch/s390/kernel/vdso64/vgetrandom-chacha.S76
-rw-r--r--arch/s390/kernel/vmlinux.lds.S3
-rw-r--r--arch/s390/kvm/kvm-s390.c27
-rw-r--r--arch/s390/mm/init.c2
-rw-r--r--arch/s390/pci/pci_clp.c1
-rw-r--r--arch/sh/include/asm/irq.h6
-rw-r--r--arch/sparc/mm/leon_mm.c8
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/um/drivers/harddog_kern.c1
-rw-r--r--arch/um/drivers/hostaudio_kern.c2
-rw-r--r--arch/um/drivers/vector_kern.c212
-rw-r--r--arch/um/drivers/vector_kern.h4
-rw-r--r--arch/um/drivers/vector_user.c83
-rw-r--r--arch/um/include/asm/pgtable.h7
-rw-r--r--arch/um/include/asm/processor-generic.h20
-rw-r--r--arch/um/include/asm/sysrq.h8
-rw-r--r--arch/um/include/shared/skas/mm_id.h5
-rw-r--r--arch/um/include/shared/skas/skas.h2
-rw-r--r--arch/um/kernel/exec.c3
-rw-r--r--arch/um/kernel/process.c8
-rw-r--r--arch/um/kernel/reboot.c2
-rw-r--r--arch/um/kernel/skas/mmu.c12
-rw-r--r--arch/um/kernel/skas/process.c4
-rw-r--r--arch/um/kernel/skas/syscall.c34
-rw-r--r--arch/um/kernel/sysrq.c1
-rw-r--r--arch/um/kernel/time.c2
-rw-r--r--arch/um/kernel/tlb.c16
-rw-r--r--arch/um/os-Linux/file.c8
-rw-r--r--arch/um/os-Linux/skas/mem.c2
-rw-r--r--arch/um/os-Linux/skas/process.c2
-rw-r--r--arch/x86/Makefile11
-rw-r--r--arch/x86/coco/tdx/tdx.c6
-rw-r--r--arch/x86/include/asm/atomic64_32.h6
-rw-r--r--arch/x86/include/asm/cpuid.h1
-rw-r--r--arch/x86/include/asm/intel-family.h5
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h6
-rw-r--r--arch/x86/include/asm/kvm_host.h32
-rw-r--r--arch/x86/include/asm/msr-index.h34
-rw-r--r--arch/x86/include/asm/pgtable_64.h23
-rw-r--r--arch/x86/include/asm/reboot.h4
-rw-r--r--arch/x86/include/asm/svm.h20
-rw-r--r--arch/x86/include/asm/vmx.h40
-rw-r--r--arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--arch/x86/kernel/cpu/mce/dev-mcelog.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/mtrr.c6
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c1
-rw-r--r--arch/x86/kernel/head_64.S20
-rw-r--r--arch/x86/kernel/vmlinux.lds.S3
-rw-r--r--arch/x86/kvm/cpuid.c30
-rw-r--r--arch/x86/kvm/irq.c10
-rw-r--r--arch/x86/kvm/lapic.c84
-rw-r--r--arch/x86/kvm/lapic.h3
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/mmu/mmu.c556
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h5
-rw-r--r--arch/x86/kvm/mmu/mmutrace.h1
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h63
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c6
-rw-r--r--arch/x86/kvm/reverse_cpuid.h8
-rw-r--r--arch/x86/kvm/smm.c24
-rw-r--r--arch/x86/kvm/svm/nested.c4
-rw-r--r--arch/x86/kvm/svm/svm.c87
-rw-r--r--arch/x86/kvm/svm/svm.h18
-rw-r--r--arch/x86/kvm/svm/vmenter.S8
-rw-r--r--arch/x86/kvm/vmx/capabilities.h10
-rw-r--r--arch/x86/kvm/vmx/main.c10
-rw-r--r--arch/x86/kvm/vmx/nested.c134
-rw-r--r--arch/x86/kvm/vmx/nested.h8
-rw-r--r--arch/x86/kvm/vmx/sgx.c2
-rw-r--r--arch/x86/kvm/vmx/vmx.c67
-rw-r--r--arch/x86/kvm/vmx/vmx.h9
-rw-r--r--arch/x86/kvm/vmx/vmx_onhyperv.h8
-rw-r--r--arch/x86/kvm/vmx/vmx_ops.h2
-rw-r--r--arch/x86/kvm/vmx/x86_ops.h7
-rw-r--r--arch/x86/kvm/x86.c1002
-rw-r--r--arch/x86/kvm/x86.h31
-rw-r--r--arch/x86/lib/atomic64_cx8_32.S9
-rw-r--r--arch/x86/mm/pat/memtype.c36
-rw-r--r--arch/x86/platform/pvh/head.S161
-rw-r--r--arch/x86/um/sysrq_32.c1
-rw-r--r--arch/x86/um/sysrq_64.c1
-rw-r--r--arch/x86/xen/enlighten_pvh.c23
-rw-r--r--block/bdev.c4
-rw-r--r--block/bio-integrity.c1
-rw-r--r--block/blk-integrity.c36
-rw-r--r--block/blk-merge.c4
-rw-r--r--block/blk-mq.c5
-rw-r--r--block/blk-settings.c42
-rw-r--r--block/elevator.c4
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/apei/einj-cxl.c2
-rw-r--r--drivers/acpi/apei/erst-dbg.c1
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/pci_irq.c2
-rw-r--r--drivers/android/binder.c288
-rw-r--r--drivers/android/binder_internal.h21
-rw-r--r--drivers/android/binderfs.c8
-rw-r--r--drivers/ata/libata-scsi.c9
-rw-r--r--drivers/ata/pata_ep93xx.c107
-rw-r--r--drivers/auxdisplay/charlcd.c1
-rw-r--r--drivers/base/attribute_container.c48
-rw-r--r--drivers/base/auxiliary.c2
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/bus.c19
-rw-r--r--drivers/base/cacheinfo.c41
-rw-r--r--drivers/base/class.c14
-rw-r--r--drivers/base/core.c168
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/devres.c2
-rw-r--r--drivers/base/driver.c2
-rw-r--r--drivers/base/firmware_loader/main.c30
-rw-r--r--drivers/base/module.c14
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/block/drbd/drbd_main.c6
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/pktcdvd.c1
-rw-r--r--drivers/block/ublk_drv.c1
-rw-r--r--drivers/block/zram/zram_drv.c6
-rw-r--r--drivers/bluetooth/hci_vhci.c1
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c2
-rw-r--r--drivers/bus/mhi/host/init.c2
-rw-r--r--drivers/bus/mhi/host/internal.h2
-rw-r--r--drivers/bus/mhi/host/pci_generic.c64
-rw-r--r--drivers/bus/moxtet.c2
-rw-r--r--drivers/cdx/controller/mcdi.c4
-rw-r--r--drivers/char/applicom.c1
-rw-r--r--drivers/char/ds1620.c1
-rw-r--r--drivers/char/dtlk.c1
-rw-r--r--drivers/char/hpet.c7
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c1
-rw-r--r--drivers/char/pc8736x_gpio.c1
-rw-r--r--drivers/char/ppdev.c1
-rw-r--r--drivers/char/scx200_gpio.c1
-rw-r--r--drivers/char/sonypi.c1
-rw-r--r--drivers/char/tpm/tpm-dev.c1
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c1
-rw-r--r--drivers/char/tpm/tpmrm-dev.c1
-rw-r--r--drivers/char/virtio_console.c1
-rw-r--r--drivers/clk/Kconfig8
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/clk-ep93xx.c850
-rw-r--r--drivers/comedi/drivers/ni_atmio.c9
-rw-r--r--drivers/comedi/drivers/ni_mio_common.c9
-rw-r--r--drivers/comedi/drivers/ni_pcimio.c9
-rw-r--r--drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c5
-rw-r--r--drivers/comedi/drivers/ni_stc.h2
-rw-r--r--drivers/counter/counter-chrdev.c1
-rw-r--r--drivers/cxl/core/cdat.c508
-rw-r--r--drivers/cxl/core/core.h4
-rw-r--r--drivers/cxl/core/mbox.c96
-rw-r--r--drivers/cxl/core/memdev.c41
-rw-r--r--drivers/cxl/core/pci.c164
-rw-r--r--drivers/cxl/core/port.c206
-rw-r--r--drivers/cxl/core/region.c81
-rw-r--r--drivers/cxl/cxl.h9
-rw-r--r--drivers/cxl/cxlmem.h27
-rw-r--r--drivers/cxl/mem.c29
-rw-r--r--drivers/cxl/pci.c91
-rw-r--r--drivers/cxl/pmem.c26
-rw-r--r--drivers/cxl/port.c2
-rw-r--r--drivers/cxl/security.c23
-rw-r--r--drivers/dma/ep93xx_dma.c287
-rw-r--r--drivers/extcon/Kconfig11
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-lc824206xa.c495
-rw-r--r--drivers/firewire/core-cdev.c1
-rw-r--r--drivers/firmware/arm_scmi/driver.c1
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c5
-rw-r--r--drivers/firmware/efi/capsule-loader.c1
-rw-r--r--drivers/firmware/efi/cper.c11
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/firmware/efi/libstub/tpm.c2
-rw-r--r--drivers/firmware/efi/test/efi_test.c1
-rw-r--r--drivers/firmware/qemu_fw_cfg.c2
-rw-r--r--drivers/firmware/turris-mox-rwtm.c1
-rw-r--r--drivers/fpga/socfpga.c7
-rw-r--r--drivers/fpga/tests/fpga-bridge-test.c25
-rw-r--r--drivers/fpga/tests/fpga-mgr-test.c28
-rw-r--r--drivers/fpga/tests/fpga-region-test.c41
-rw-r--r--drivers/fpga/zynq-fpga.c8
-rw-r--r--drivers/gnss/core.c1
-rw-r--r--drivers/gpio/gpio-ep93xx.c345
-rw-r--r--drivers/gpio/gpio-mockup.c1
-rw-r--r--drivers/gpio/gpio-sloppy-logic-analyzer.c1
-rw-r--r--drivers/gpio/gpiolib-cdev.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c132
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c165
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c30
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c86
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c80
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h25
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c1
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c6
-rw-r--r--drivers/gpu/drm/drm_file.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h2
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c1
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c1
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c1
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c3
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c14
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h6
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c6
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h6
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c1
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c1
-rw-r--r--drivers/greybus/Kconfig2
-rw-r--r--drivers/greybus/gb-beagleplay.c658
-rw-r--r--drivers/hid/hid-goodix-spi.c9
-rw-r--r--drivers/hid/uhid.c1
-rw-r--r--drivers/hwmon/asus_atk0110.c1
-rw-r--r--drivers/hwmon/fschmd.c1
-rw-r--r--drivers/hwmon/w83793.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c37
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-platform.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-dummy.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c43
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h18
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c9
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c9
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-sysfs.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h5
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c9
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.c138
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.h70
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.c1
-rw-r--r--drivers/hwtracing/intel_th/msu.c1
-rw-r--r--drivers/hwtracing/stm/core.c1
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c14
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h1
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c38
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c5
-rw-r--r--drivers/i2c/busses/i2c-xiic.c2
-rw-r--r--drivers/i2c/i2c-dev.c1
-rw-r--r--drivers/idle/intel_idle.c37
-rw-r--r--drivers/iio/accel/Kconfig27
-rw-r--r--drivers/iio/accel/Makefile3
-rw-r--r--drivers/iio/accel/adxl367.c2
-rw-r--r--drivers/iio/accel/adxl367_spi.c2
-rw-r--r--drivers/iio/accel/adxl372.c2
-rw-r--r--drivers/iio/accel/adxl380.c1905
-rw-r--r--drivers/iio/accel/adxl380.h26
-rw-r--r--drivers/iio/accel/adxl380_i2c.c64
-rw-r--r--drivers/iio/accel/adxl380_spi.c66
-rw-r--r--drivers/iio/accel/bma180.c3
-rw-r--r--drivers/iio/accel/bma400_core.c11
-rw-r--r--drivers/iio/accel/bma400_spi.c2
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c13
-rw-r--r--drivers/iio/accel/bmi088-accel-spi.c2
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c2
-rw-r--r--drivers/iio/accel/fxls8962af-core.c3
-rw-r--r--drivers/iio/accel/kxcjk-1013.c8
-rw-r--r--drivers/iio/accel/msa311.c3
-rw-r--r--drivers/iio/accel/sca3300.c3
-rw-r--r--drivers/iio/accel/stk8312.c3
-rw-r--r--drivers/iio/accel/stk8ba50.c3
-rw-r--r--drivers/iio/adc/Kconfig48
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/ad4000.c722
-rw-r--r--drivers/iio/adc/ad4695.c1185
-rw-r--r--drivers/iio/adc/ad7091r5.c6
-rw-r--r--drivers/iio/adc/ad7091r8.c2
-rw-r--r--drivers/iio/adc/ad7124.c38
-rw-r--r--drivers/iio/adc/ad7192.c189
-rw-r--r--drivers/iio/adc/ad7266.c7
-rw-r--r--drivers/iio/adc/ad7280a.c14
-rw-r--r--drivers/iio/adc/ad7291.c4
-rw-r--r--drivers/iio/adc/ad7292.c4
-rw-r--r--drivers/iio/adc/ad7298.c7
-rw-r--r--drivers/iio/adc/ad7380.c525
-rw-r--r--drivers/iio/adc/ad7476.c58
-rw-r--r--drivers/iio/adc/ad7606.c47
-rw-r--r--drivers/iio/adc/ad7606_par.c2
-rw-r--r--drivers/iio/adc/ad7606_spi.c9
-rw-r--r--drivers/iio/adc/ad7766.c14
-rw-r--r--drivers/iio/adc/ad7768-1.c7
-rw-r--r--drivers/iio/adc/ad7780.c10
-rw-r--r--drivers/iio/adc/ad7793.c20
-rw-r--r--drivers/iio/adc/ad7887.c4
-rw-r--r--drivers/iio/adc/ad7923.c18
-rw-r--r--drivers/iio/adc/ad799x.c3
-rw-r--r--drivers/iio/adc/ad9467.c491
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c6
-rw-r--r--drivers/iio/adc/adi-axi-adc.c71
-rw-r--r--drivers/iio/adc/aspeed_adc.c5
-rw-r--r--drivers/iio/adc/at91_adc.c19
-rw-r--r--drivers/iio/adc/axp20x_adc.c182
-rw-r--r--drivers/iio/adc/axp288_adc.c2
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c2
-rw-r--r--drivers/iio/adc/berlin2-adc.c2
-rw-r--r--drivers/iio/adc/cc10001_adc.c4
-rw-r--r--drivers/iio/adc/dln2-adc.c8
-rw-r--r--drivers/iio/adc/ep93xx_adc.c2
-rw-r--r--drivers/iio/adc/exynos_adc.c2
-rw-r--r--drivers/iio/adc/hi8435.c2
-rw-r--r--drivers/iio/adc/hx711.c7
-rw-r--r--drivers/iio/adc/ina2xx-adc.c17
-rw-r--r--drivers/iio/adc/ingenic-adc.c2
-rw-r--r--drivers/iio/adc/lpc32xx_adc.c2
-rw-r--r--drivers/iio/adc/ltc2496.c2
-rw-r--r--drivers/iio/adc/ltc2497.c2
-rw-r--r--drivers/iio/adc/max1027.c16
-rw-r--r--drivers/iio/adc/max11100.c4
-rw-r--r--drivers/iio/adc/max1118.c7
-rw-r--r--drivers/iio/adc/max1241.c4
-rw-r--r--drivers/iio/adc/max1363.c34
-rw-r--r--drivers/iio/adc/max34408.c4
-rw-r--r--drivers/iio/adc/max9611.c6
-rw-r--r--drivers/iio/adc/mcp320x.c10
-rw-r--r--drivers/iio/adc/mcp3564.c54
-rw-r--r--drivers/iio/adc/mcp3911.c61
-rw-r--r--drivers/iio/adc/mp2629_adc.c4
-rw-r--r--drivers/iio/adc/mt6360-adc.c4
-rw-r--r--drivers/iio/adc/nau7802.c2
-rw-r--r--drivers/iio/adc/pac1921.c1261
-rw-r--r--drivers/iio/adc/pac1934.c6
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c2
-rw-r--r--drivers/iio/adc/qcom-spmi-rradc.c2
-rw-r--r--drivers/iio/adc/rockchip_saradc.c4
-rw-r--r--drivers/iio/adc/rtq6056.c4
-rw-r--r--drivers/iio/adc/sd_adc_modulator.c97
-rw-r--r--drivers/iio/adc/sophgo-cv1800b-adc.c227
-rw-r--r--drivers/iio/adc/stm32-adc.c6
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c297
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c2
-rw-r--r--drivers/iio/adc/stmpe-adc.c2
-rw-r--r--drivers/iio/adc/ti-adc0832.c7
-rw-r--r--drivers/iio/adc/ti-adc084s021.c7
-rw-r--r--drivers/iio/adc/ti-adc12138.c7
-rw-r--r--drivers/iio/adc/ti-adc161s626.c8
-rw-r--r--drivers/iio/adc/ti-ads1015.c6
-rw-r--r--drivers/iio/adc/ti-ads1119.c4
-rw-r--r--drivers/iio/adc/ti-ads124s08.c5
-rw-r--r--drivers/iio/adc/ti-ads1298.c3
-rw-r--r--drivers/iio/adc/ti-ads131e08.c6
-rw-r--r--drivers/iio/adc/ti-ads7924.c4
-rw-r--r--drivers/iio/adc/ti-ads7950.c2
-rw-r--r--drivers/iio/adc/ti-ads8344.c2
-rw-r--r--drivers/iio/adc/ti-ads8688.c10
-rw-r--r--drivers/iio/adc/ti-lmp92064.c2
-rw-r--r--drivers/iio/adc/ti-tlc4541.c8
-rw-r--r--drivers/iio/adc/ti-tsc2046.c83
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/adc/xilinx-ams.c15
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c5
-rw-r--r--drivers/iio/buffer/industrialio-buffer-cb.c2
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c36
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c1
-rw-r--r--drivers/iio/buffer/industrialio-hw-consumer.c4
-rw-r--r--drivers/iio/chemical/bme680.h41
-rw-r--r--drivers/iio/chemical/bme680_core.c633
-rw-r--r--drivers/iio/chemical/bme680_spi.c2
-rw-r--r--drivers/iio/chemical/sgp40.c11
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c8
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c2
-rw-r--r--drivers/iio/dac/Kconfig11
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5449.c15
-rw-r--r--drivers/iio/dac/ad9739a.c13
-rw-r--r--drivers/iio/dac/adi-axi-dac.c21
-rw-r--r--drivers/iio/dac/ltc2664.c735
-rw-r--r--drivers/iio/dac/ltc2688.c2
-rw-r--r--drivers/iio/dac/mcp4728.c45
-rw-r--r--drivers/iio/dac/mcp4922.c47
-rw-r--r--drivers/iio/dac/ti-dac7311.c4
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_buffer.c2
-rw-r--r--drivers/iio/frequency/adf4377.c35
-rw-r--r--drivers/iio/health/afe4403.c3
-rw-r--r--drivers/iio/health/afe4404.c3
-rw-r--r--drivers/iio/health/max30102.c2
-rw-r--r--drivers/iio/humidity/Kconfig11
-rw-r--r--drivers/iio/humidity/Makefile1
-rw-r--r--drivers/iio/humidity/am2315.c3
-rw-r--r--drivers/iio/humidity/ens210.c339
-rw-r--r--drivers/iio/imu/adis16400.c18
-rw-r--r--drivers/iio/imu/adis16460.c18
-rw-r--r--drivers/iio/imu/adis16475.c12
-rw-r--r--drivers/iio/imu/adis16480.c20
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c3
-rw-r--r--drivers/iio/imu/bmi323/bmi323.h1
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c182
-rw-r--r--drivers/iio/imu/bmi323/bmi323_i2c.c3
-rw-r--r--drivers/iio/imu/bmi323/bmi323_spi.c3
-rw-r--r--drivers/iio/imu/bno055/bno055.c2
-rw-r--r--drivers/iio/imu/bno055/bno055_ser_core.c2
-rw-r--r--drivers/iio/imu/kmx61.c3
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c93
-rw-r--r--drivers/iio/industrialio-backend.c264
-rw-r--r--drivers/iio/industrialio-buffer.c52
-rw-r--r--drivers/iio/industrialio-core.c46
-rw-r--r--drivers/iio/industrialio-trigger.c27
-rw-r--r--drivers/iio/light/Kconfig13
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/adjd_s311.c3
-rw-r--r--drivers/iio/light/apds9960.c55
-rw-r--r--drivers/iio/light/bh1745.c906
-rw-r--r--drivers/iio/light/gp2ap002.c2
-rw-r--r--drivers/iio/light/gp2ap020a00f.c9
-rw-r--r--drivers/iio/light/isl29125.c3
-rw-r--r--drivers/iio/light/ltr390.c241
-rw-r--r--drivers/iio/light/ltrf216a.c53
-rw-r--r--drivers/iio/light/noa1305.c169
-rw-r--r--drivers/iio/light/rohm-bu27034.c337
-rw-r--r--drivers/iio/light/si1145.c7
-rw-r--r--drivers/iio/light/stk3310.c7
-rw-r--r--drivers/iio/light/tcs3414.c3
-rw-r--r--drivers/iio/light/tcs3472.c3
-rw-r--r--drivers/iio/magnetometer/Kconfig2
-rw-r--r--drivers/iio/magnetometer/ak8975.c80
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c2
-rw-r--r--drivers/iio/pressure/Kconfig11
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/bmp280-core.c654
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c2
-rw-r--r--drivers/iio/pressure/bmp280-regmap.c45
-rw-r--r--drivers/iio/pressure/bmp280-spi.c18
-rw-r--r--drivers/iio/pressure/bmp280.h37
-rw-r--r--drivers/iio/pressure/dlhl60d.c3
-rw-r--r--drivers/iio/pressure/sdp500.c156
-rw-r--r--drivers/iio/proximity/Kconfig25
-rw-r--r--drivers/iio/proximity/Makefile2
-rw-r--r--drivers/iio/proximity/aw96103.c846
-rw-r--r--drivers/iio/proximity/cros_ec_mkbp_proximity.c2
-rw-r--r--drivers/iio/proximity/hx9023s.c1144
-rw-r--r--drivers/iio/proximity/sx9500.c3
-rw-r--r--drivers/iio/proximity/sx_common.c6
-rw-r--r--drivers/infiniband/core/ucma.c1
-rw-r--r--drivers/infiniband/core/user_mad.c2
-rw-r--r--drivers/infiniband/core/uverbs_main.c4
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c1
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c2
-rw-r--r--drivers/input/evdev.c1
-rw-r--r--drivers/input/joydev.c1
-rw-r--r--drivers/input/keyboard/applespi.c1
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c74
-rw-r--r--drivers/input/misc/uinput.c1
-rw-r--r--drivers/input/serio/userio.c1
-rw-r--r--drivers/interconnect/icc-clk.c3
-rw-r--r--drivers/interconnect/qcom/Kconfig18
-rw-r--r--drivers/interconnect/qcom/Makefile4
-rw-r--r--drivers/interconnect/qcom/msm8937.c1350
-rw-r--r--drivers/interconnect/qcom/msm8953.c2
-rw-r--r--drivers/interconnect/qcom/msm8976.c1440
-rw-r--r--drivers/interconnect/qcom/qcs404.c127
-rw-r--r--drivers/interconnect/qcom/sm8350.c155
-rw-r--r--drivers/interconnect/qcom/sm8350.h10
-rw-r--r--drivers/iommu/iommufd/fault.c1
-rw-r--r--drivers/isdn/capi/capi.c1
-rw-r--r--drivers/isdn/mISDN/timerdev.c1
-rw-r--r--drivers/leds/uleds.c1
-rw-r--r--drivers/macintosh/adb.c1
-rw-r--r--drivers/macintosh/smu.c1
-rw-r--r--drivers/mailbox/Kconfig3
-rw-r--r--drivers/mailbox/bcm2835-mailbox.c3
-rw-r--r--drivers/mailbox/imx-mailbox.c6
-rw-r--r--drivers/mailbox/mailbox.c22
-rw-r--r--drivers/mailbox/omap-mailbox.c2
-rw-r--r--drivers/mailbox/rockchip-mailbox.c2
-rw-r--r--drivers/mailbox/sprd-mailbox.c25
-rw-r--r--drivers/md/dm-bufio.c3
-rw-r--r--drivers/md/dm-cache-target.c6
-rw-r--r--drivers/md/dm-clone-metadata.c5
-rw-r--r--drivers/md/dm-crypt.c47
-rw-r--r--drivers/md/dm-integrity.c326
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-rq.c4
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm-vdo/data-vio.c15
-rw-r--r--drivers/md/dm-vdo/dedupe.c3
-rw-r--r--drivers/md/dm-vdo/dm-vdo-target.c29
-rw-r--r--drivers/md/dm-vdo/indexer/chapter-index.c2
-rw-r--r--drivers/md/dm-vdo/io-submitter.c1
-rw-r--r--drivers/md/dm-vdo/message-stats.c48
-rw-r--r--drivers/md/dm-vdo/message-stats.h1
-rw-r--r--drivers/md/dm-vdo/repair.c41
-rw-r--r--drivers/md/dm-vdo/status-codes.c2
-rw-r--r--drivers/md/dm-vdo/status-codes.h2
-rw-r--r--drivers/md/dm-verity-target.c23
-rw-r--r--drivers/md/dm-verity-verify-sig.c2
-rw-r--r--drivers/md/dm.c11
-rw-r--r--drivers/md/dm.h5
-rw-r--r--drivers/media/cec/core/cec-api.c1
-rw-r--r--drivers/media/mc/mc-devnode.c1
-rw-r--r--drivers/media/rc/lirc_dev.c1
-rw-r--r--drivers/media/usb/uvc/uvc_debugfs.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c1
-rw-r--r--drivers/message/fusion/mptctl.c3
-rw-r--r--drivers/misc/cxl/of.c207
-rw-r--r--drivers/misc/cxl/pci.c32
-rw-r--r--drivers/misc/cxl/sysfs.c2
-rw-r--r--drivers/misc/fastrpc.c10
-rw-r--r--drivers/misc/kgdbts.c4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c5
-rw-r--r--drivers/misc/mei/main.c1
-rw-r--r--drivers/misc/ntsync.c2
-rw-r--r--drivers/misc/ocxl/ocxl_internal.h2
-rw-r--r--drivers/misc/phantom.c1
-rw-r--r--drivers/misc/tsl2550.c8
-rw-r--r--drivers/mmc/core/block.c1
-rw-r--r--drivers/mtd/nand/raw/Kconfig6
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/technologic-nand-controller.c222
-rw-r--r--drivers/mtd/ubi/cdev.c2
-rw-r--r--drivers/mtd/ubi/debug.c1
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c21
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c28
-rw-r--r--drivers/net/ethernet/renesas/ravb.h1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c18
-rw-r--r--drivers/net/ethernet/seeq/ether3.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c37
-rw-r--r--drivers/net/hamradio/6pack.c60
-rw-r--r--drivers/net/mctp/mctp-serial.c23
-rw-r--r--drivers/net/netdevsim/fib.c1
-rw-r--r--drivers/net/phy/aquantia/aquantia_firmware.c42
-rw-r--r--drivers/net/phy/aquantia/aquantia_leds.c3
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c24
-rw-r--r--drivers/net/tap.c1
-rw-r--r--drivers/net/tun.c1
-rw-r--r--drivers/net/usb/usbnet.c37
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c1
-rw-r--r--drivers/nvdimm/nd_virtio.c9
-rw-r--r--drivers/nvme/host/core.c5
-rw-r--r--drivers/nvme/host/ioctl.c6
-rw-r--r--drivers/nvme/host/multipath.c14
-rw-r--r--drivers/nvme/host/rdma.c6
-rw-r--r--drivers/nvme/host/sysfs.c1
-rw-r--r--drivers/nvmem/Kconfig3
-rw-r--r--drivers/nvmem/imx-ocotp-ele.c32
-rw-r--r--drivers/nvmem/layouts/Kconfig11
-rw-r--r--drivers/nvmem/layouts/Makefile1
-rw-r--r--drivers/nvmem/layouts/u-boot-env.c211
-rw-r--r--drivers/nvmem/layouts/u-boot-env.h15
-rw-r--r--drivers/nvmem/sunplus-ocotp.c7
-rw-r--r--drivers/nvmem/u-boot-env.c165
-rw-r--r--drivers/pinctrl/Kconfig7
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/pinctrl-ep93xx.c1434
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c1
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c1
-rw-r--r--drivers/platform/chrome/wilco_ec/event.c1
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c1
-rw-r--r--drivers/platform/surface/surface_aggregator_cdev.c1
-rw-r--r--drivers/platform/surface/surface_dtx.c1
-rw-r--r--drivers/power/reset/Kconfig10
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/ep93xx-restart.c84
-rw-r--r--drivers/pps/clients/pps_parport.c8
-rw-r--r--drivers/pps/pps.c1
-rw-r--r--drivers/pwm/pwm-ep93xx.c26
-rw-r--r--drivers/remoteproc/Kconfig6
-rw-r--r--drivers/rtc/Kconfig16
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/dev.c1
-rw-r--r--drivers/rtc/rtc-at91sam9.c1
-rw-r--r--drivers/rtc/rtc-m41t80.c1
-rw-r--r--drivers/rtc/rtc-m48t59.c4
-rw-r--r--drivers/rtc/rtc-rc5t619.c13
-rw-r--r--drivers/rtc/rtc-s35390a.c1
-rw-r--r--drivers/rtc/rtc-sd2405al.c227
-rw-r--r--drivers/rtc/rtc-stm32.c281
-rw-r--r--drivers/rtc/rtc-sun6i.c1
-rw-r--r--drivers/rtc/rtc-twl.c4
-rw-r--r--drivers/s390/char/fs3270.c1
-rw-r--r--drivers/s390/char/sclp_ctl.c1
-rw-r--r--drivers/s390/char/tape_char.c1
-rw-r--r--drivers/s390/char/uvdevice.c1
-rw-r--r--drivers/s390/char/vmcp.c1
-rw-r--r--drivers/s390/char/vmlogrdr.c1
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/chsc_sch.c1
-rw-r--r--drivers/s390/cio/css.c1
-rw-r--r--drivers/s390/crypto/pkey_api.c1
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c13
-rw-r--r--drivers/s390/crypto/zcrypt_api.c1
-rw-r--r--drivers/sbus/char/openprom.c1
-rw-r--r--drivers/sbus/char/uctrl.c1
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c21
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c132
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c32
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c52
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c43
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h35
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h13
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h8
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h4
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h10
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c79
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c6
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c2
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c2
-rw-r--r--drivers/scsi/scsi_debug.c1
-rw-r--r--drivers/scsi/scsi_lib.c12
-rw-r--r--drivers/scsi/sd.c32
-rw-r--r--drivers/scsi/sg.c1
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/scsi/zalon.c2
-rw-r--r--drivers/sh/intc/userimask.c5
-rw-r--r--drivers/slimbus/messaging.c9
-rw-r--r--drivers/slimbus/qcom-ctrl.c7
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c29
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/cirrus/Kconfig17
-rw-r--r--drivers/soc/cirrus/Makefile2
-rw-r--r--drivers/soc/cirrus/soc-ep93xx.c252
-rw-r--r--drivers/spi/atmel-quadspi.c15
-rw-r--r--drivers/spi/spi-airoha-snfi.c43
-rw-r--r--drivers/spi/spi-ep93xx.c68
-rw-r--r--drivers/spi/spi-fsl-lpspi.c1
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/fbtft/fb_ili9320.c2
-rw-r--r--drivers/staging/fbtft/fb_ra8875.c7
-rw-r--r--drivers/staging/fbtft/fb_sh1106.c3
-rw-r--r--drivers/staging/fbtft/fb_ssd1289.c3
-rw-r--r--drivers/staging/fbtft/fb_ssd1306.c3
-rw-r--r--drivers/staging/fbtft/fb_ssd1325.c9
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c5
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c3
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c9
-rw-r--r--drivers/staging/fbtft/fbtft-core.c13
-rw-r--r--drivers/staging/fbtft/fbtft-sysfs.c4
-rw-r--r--drivers/staging/fbtft/fbtft.h2
-rw-r--r--drivers/staging/greybus/gb-camera.h4
-rw-r--r--drivers/staging/greybus/spilib.c6
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c5
-rw-r--r--drivers/staging/ks7010/Kconfig14
-rw-r--r--drivers/staging/ks7010/Makefile4
-rw-r--r--drivers/staging/ks7010/TODO36
-rw-r--r--drivers/staging/ks7010/eap_packet.h70
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c1143
-rw-r--r--drivers/staging/ks7010/ks_hostif.c2312
-rw-r--r--drivers/staging/ks7010/ks_hostif.h617
-rw-r--r--drivers/staging/ks7010/ks_wlan.h567
-rw-r--r--drivers/staging/ks7010/ks_wlan_ioctl.h61
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c2676
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c3
-rw-r--r--drivers/staging/most/video/video.c6
-rw-r--r--drivers/staging/nvec/nvec.c17
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_def.h5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c10
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c89
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c67
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c20
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c15
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c40
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c30
-rw-r--r--drivers/staging/rtl8192e/rtllib.h141
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c6
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c140
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c4
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c5
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.c4
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c2
-rw-r--r--drivers/staging/rtl8723bs/Kconfig1
-rw-r--r--drivers/staging/rtl8723bs/Makefile2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c5
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_btcoex.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_debug.c68
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c60
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c5
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c7
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c3
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_rf.c34
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c3
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c3
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_intf.c7
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_sdio.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_dm.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c1
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h7
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h1
-rw-r--r--drivers/staging/rtl8723bs/include/hal_pwr_seq.h2
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h4
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service_linux.h72
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_cmd.h4
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_debug.h14
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_event.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_io.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h6
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_rf.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_security.h127
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/wifi_regd.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/xmit_linux.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c20
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c323
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h16
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c15
-rw-r--r--drivers/staging/vme_user/vme.c10
-rw-r--r--drivers/staging/vme_user/vme.h17
-rw-r--r--drivers/staging/vme_user/vme_fake.c10
-rw-r--r--drivers/staging/vme_user/vme_tsi148.c155
-rw-r--r--drivers/staging/vt6655/TODO2
-rw-r--r--drivers/staging/vt6655/card.c12
-rw-r--r--drivers/staging/vt6655/card.h4
-rw-r--r--drivers/staging/vt6655/device.h12
-rw-r--r--drivers/staging/vt6655/device_main.c34
-rw-r--r--drivers/staging/vt6655/mac.h4
-rw-r--r--drivers/staging/vt6655/rxtx.c14
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c1
-rw-r--r--drivers/thunderbolt/acpi.c40
-rw-r--r--drivers/thunderbolt/debugfs.c382
-rw-r--r--drivers/thunderbolt/sb_regs.h18
-rw-r--r--drivers/thunderbolt/tb.h42
-rw-r--r--drivers/thunderbolt/usb4.c62
-rw-r--r--drivers/tty/hvc/hvsi_lib.c2
-rw-r--r--drivers/tty/mxser.c7
-rw-r--r--drivers/tty/serdev/core.c2
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c1
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c47
-rw-r--r--drivers/tty/serial/8250/8250_dma.c19
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c2
-rw-r--r--drivers/tty/serial/8250/8250_early.c11
-rw-r--r--drivers/tty/serial/8250/8250_exar.c2
-rw-r--r--drivers/tty/serial/8250/8250_omap.c10
-rw-r--r--drivers/tty/serial/8250/8250_pci.c2
-rw-r--r--drivers/tty/serial/8250/8250_platform.c122
-rw-r--r--drivers/tty/serial/8250/8250_port.c4
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c16
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c137
-rw-r--r--drivers/tty/serial/rp2.c2
-rw-r--r--drivers/tty/serial/samsung_tty.c51
-rw-r--r--drivers/tty/serial/sc16is7xx.c183
-rw-r--r--drivers/tty/serial/serial_core.c142
-rw-r--r--drivers/tty/serial/st-asc.c10
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/tty_io.c14
-rw-r--r--drivers/ufs/host/ufs-qcom.c2
-rw-r--r--drivers/uio/uio.c4
-rw-r--r--drivers/usb/cdns3/cdns3-pci-wrap.c5
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c29
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c6
-rw-r--r--drivers/usb/cdns3/host.c4
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_npcm.c4
-rw-r--r--drivers/usb/chipidea/udc.c8
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/class/usbtmc.c2
-rw-r--r--drivers/usb/common/common.c22
-rw-r--r--drivers/usb/core/usb-acpi.c53
-rw-r--r--drivers/usb/dwc2/debugfs.c1
-rw-r--r--drivers/usb/dwc2/drd.c9
-rw-r--r--drivers/usb/dwc2/params.c2
-rw-r--r--drivers/usb/dwc2/platform.c26
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c113
-rw-r--r--drivers/usb/dwc3/dwc3-octeon.c19
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c16
-rw-r--r--drivers/usb/dwc3/dwc3-rtk.c52
-rw-r--r--drivers/usb/dwc3/dwc3-st.c38
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c7
-rw-r--r--drivers/usb/gadget/configfs.c12
-rw-r--r--drivers/usb/gadget/function/f_acm.c52
-rw-r--r--drivers/usb/gadget/function/f_fs.c16
-rw-r--r--drivers/usb/gadget/function/f_hid.c275
-rw-r--r--drivers/usb/gadget/function/f_loopback.c2
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c3
-rw-r--r--drivers/usb/gadget/function/f_midi.c2
-rw-r--r--drivers/usb/gadget/function/f_midi2.c2
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c2
-rw-r--r--drivers/usb/gadget/function/f_uac1.c63
-rw-r--r--drivers/usb/gadget/function/f_uac2.c80
-rw-r--r--drivers/usb/gadget/function/u_audio.c10
-rw-r--r--drivers/usb/gadget/function/u_serial.c22
-rw-r--r--drivers/usb/gadget/function/u_serial.h4
-rw-r--r--drivers/usb/gadget/function/u_uac1.h12
-rw-r--r--drivers/usb/gadget/function/u_uac2.h15
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c12
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c1
-rw-r--r--drivers/usb/gadget/u_f.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c1
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c1
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-gadget.c4
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-pci.c7
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c14
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c67
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c2
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-brcm.c1
-rw-r--r--drivers/usb/host/ehci-exynos.c9
-rw-r--r--drivers/usb/host/ohci-exynos.c9
-rw-r--r--drivers/usb/host/ohci-nxp.c18
-rw-r--r--drivers/usb/host/ohci-ppc-of.c4
-rw-r--r--drivers/usb/host/r8a66597-hcd.c6
-rw-r--r--drivers/usb/host/xhci-dbgcap.c133
-rw-r--r--drivers/usb/host/xhci-dbgcap.h3
-rw-r--r--drivers/usb/host/xhci-dbgtty.c32
-rw-r--r--drivers/usb/host/xhci-ext-caps.h5
-rw-r--r--drivers/usb/host/xhci-hub.c36
-rw-r--r--drivers/usb/host/xhci-mem.c8
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c48
-rw-r--r--drivers/usb/host/xhci-pci.c96
-rw-r--r--drivers/usb/host/xhci-pci.h19
-rw-r--r--drivers/usb/host/xhci-plat.c6
-rw-r--r--drivers/usb/host/xhci-ring.c72
-rw-r--r--drivers/usb/host/xhci.c18
-rw-r--r--drivers/usb/host/xhci.h20
-rw-r--r--drivers/usb/misc/appledisplay.c15
-rw-r--r--drivers/usb/misc/brcmstb-usb-pinmap.c1
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c4
-rw-r--r--drivers/usb/misc/ldusb.c1
-rw-r--r--drivers/usb/misc/onboard_usb_dev.c78
-rw-r--r--drivers/usb/misc/onboard_usb_dev.h2
-rw-r--r--drivers/usb/misc/qcom_eud.c2
-rw-r--r--drivers/usb/misc/yurex.c10
-rw-r--r--drivers/usb/mon/mon_bin.c1
-rw-r--r--drivers/usb/mon/mon_stat.c1
-rw-r--r--drivers/usb/mon/mon_text.c2
-rw-r--r--drivers/usb/musb/mediatek.c27
-rw-r--r--drivers/usb/musb/mpfs.c160
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c1
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c132
-rw-r--r--drivers/usb/roles/class.c7
-rw-r--r--drivers/usb/serial/aircable.c1
-rw-r--r--drivers/usb/serial/ark3116.c1
-rw-r--r--drivers/usb/serial/belkin_sa.c1
-rw-r--r--drivers/usb/serial/ch341.c1
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/cyberjack.c1
-rw-r--r--drivers/usb/serial/cypress_m8.c3
-rw-r--r--drivers/usb/serial/digi_acceleport.c2
-rw-r--r--drivers/usb/serial/empeg.c1
-rw-r--r--drivers/usb/serial/f81232.c2
-rw-r--r--drivers/usb/serial/f81534.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/garmin_gps.c1
-rw-r--r--drivers/usb/serial/generic.c1
-rw-r--r--drivers/usb/serial/io_edgeport.c4
-rw-r--r--drivers/usb/serial/io_ti.c2
-rw-r--r--drivers/usb/serial/ipaq.c1
-rw-r--r--drivers/usb/serial/ipw.c1
-rw-r--r--drivers/usb/serial/ir-usb.c1
-rw-r--r--drivers/usb/serial/iuu_phoenix.c1
-rw-r--r--drivers/usb/serial/keyspan.c4
-rw-r--r--drivers/usb/serial/keyspan_pda.c2
-rw-r--r--drivers/usb/serial/kl5kusb105.c1
-rw-r--r--drivers/usb/serial/kobil_sct.c4
-rw-r--r--drivers/usb/serial/mct_u232.c1
-rw-r--r--drivers/usb/serial/metro-usb.c1
-rw-r--r--drivers/usb/serial/mos7720.c1
-rw-r--r--drivers/usb/serial/mos7840.c1
-rw-r--r--drivers/usb/serial/mxuport.c1
-rw-r--r--drivers/usb/serial/navman.c1
-rw-r--r--drivers/usb/serial/omninet.c1
-rw-r--r--drivers/usb/serial/opticon.c1
-rw-r--r--drivers/usb/serial/option.c1
-rw-r--r--drivers/usb/serial/oti6858.c1
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/qcaux.c1
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/serial/quatech2.c1
-rw-r--r--drivers/usb/serial/safe_serial.c1
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/serial/spcp8x5.c1
-rw-r--r--drivers/usb/serial/ssu100.c1
-rw-r--r--drivers/usb/serial/symbolserial.c1
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/serial/upd78f0730.c1
-rw-r--r--drivers/usb/serial/usb-serial-simple.c1
-rw-r--r--drivers/usb/serial/usb-serial.c12
-rw-r--r--drivers/usb/serial/usb_debug.c2
-rw-r--r--drivers/usb/serial/visor.c3
-rw-r--r--drivers/usb/serial/whiteheat.c2
-rw-r--r--drivers/usb/serial/wishbone-serial.c1
-rw-r--r--drivers/usb/serial/xr_serial.c1
-rw-r--r--drivers/usb/serial/xsens_mt.c1
-rw-r--r--drivers/usb/storage/alauda.c4
-rw-r--r--drivers/usb/storage/cypress_atacb.c4
-rw-r--r--drivers/usb/storage/datafab.c4
-rw-r--r--drivers/usb/storage/ene_ub6250.c6
-rw-r--r--drivers/usb/storage/freecom.c4
-rw-r--r--drivers/usb/storage/isd200.c4
-rw-r--r--drivers/usb/storage/jumpshot.c4
-rw-r--r--drivers/usb/storage/karma.c4
-rw-r--r--drivers/usb/storage/onetouch.c4
-rw-r--r--drivers/usb/storage/sddr09.c4
-rw-r--r--drivers/usb/storage/sddr55.c4
-rw-r--r--drivers/usb/storage/shuttle_usbat.c4
-rw-r--r--drivers/usb/storage/uas.c2
-rw-r--r--drivers/usb/typec/anx7411.c6
-rw-r--r--drivers/usb/typec/tcpm/maxim_contaminant.c53
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c134
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.h34
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim_core.c82
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c27
-rw-r--r--drivers/usb/typec/tipd/core.c3
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c158
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h46
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c2
-rw-r--r--drivers/usb/usbip/vhci_hcd.c36
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c3
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h3
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h47
-rw-r--r--drivers/vdpa/mlx5/core/mr.c291
-rw-r--r--drivers/vdpa/mlx5/core/resources.c76
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c465
-rw-r--r--drivers/vdpa/pds/cmds.h1
-rw-r--r--drivers/vdpa/vdpa.c79
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c21
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c2
-rw-r--r--drivers/vfio/pci/mlx5/main.c2
-rw-r--r--drivers/vfio/pci/pds/lm.c2
-rw-r--r--drivers/vfio/pci/qat/main.c2
-rw-r--r--drivers/vhost/vdpa.c16
-rw-r--r--drivers/video/fbdev/core/fbcon.c10
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c7
-rw-r--r--drivers/video/fbdev/sis/sis_main.c2
-rw-r--r--drivers/virt/coco/tdx-guest/tdx-guest.c1
-rw-r--r--drivers/virtio/virtio_balloon.c18
-rw-r--r--drivers/w1/masters/ds2482.c4
-rw-r--r--drivers/watchdog/acquirewdt.c1
-rw-r--r--drivers/watchdog/advantechwdt.c1
-rw-r--r--drivers/watchdog/alim1535_wdt.c1
-rw-r--r--drivers/watchdog/alim7101_wdt.c1
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c1
-rw-r--r--drivers/watchdog/ath79_wdt.c1
-rw-r--r--drivers/watchdog/cpu5wdt.c1
-rw-r--r--drivers/watchdog/cpwd.c1
-rw-r--r--drivers/watchdog/eurotechwdt.c1
-rw-r--r--drivers/watchdog/gef_wdt.c1
-rw-r--r--drivers/watchdog/geodewdt.c1
-rw-r--r--drivers/watchdog/ib700wdt.c1
-rw-r--r--drivers/watchdog/ibmasr.c1
-rw-r--r--drivers/watchdog/indydog.c1
-rw-r--r--drivers/watchdog/it8712f_wdt.c1
-rw-r--r--drivers/watchdog/m54xx_wdt.c1
-rw-r--r--drivers/watchdog/machzwd.c1
-rw-r--r--drivers/watchdog/mixcomwd.c1
-rw-r--r--drivers/watchdog/mtx-1_wdt.c1
-rw-r--r--drivers/watchdog/nv_tco.c1
-rw-r--r--drivers/watchdog/pc87413_wdt.c1
-rw-r--r--drivers/watchdog/pcwd.c2
-rw-r--r--drivers/watchdog/pcwd_pci.c2
-rw-r--r--drivers/watchdog/pcwd_usb.c2
-rw-r--r--drivers/watchdog/pika_wdt.c1
-rw-r--r--drivers/watchdog/rc32434_wdt.c1
-rw-r--r--drivers/watchdog/rdc321x_wdt.c1
-rw-r--r--drivers/watchdog/riowd.c1
-rw-r--r--drivers/watchdog/sa1100_wdt.c1
-rw-r--r--drivers/watchdog/sb_wdog.c1
-rw-r--r--drivers/watchdog/sbc60xxwdt.c1
-rw-r--r--drivers/watchdog/sbc7240_wdt.c1
-rw-r--r--drivers/watchdog/sbc8360.c1
-rw-r--r--drivers/watchdog/sbc_epx_c3.c1
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c1
-rw-r--r--drivers/watchdog/sc1200wdt.c1
-rw-r--r--drivers/watchdog/sc520_wdt.c1
-rw-r--r--drivers/watchdog/sch311x_wdt.c1
-rw-r--r--drivers/watchdog/scx200_wdt.c1
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c1
-rw-r--r--drivers/watchdog/ts72xx_wdt.c8
-rw-r--r--drivers/watchdog/w83877f_wdt.c1
-rw-r--r--drivers/watchdog/w83977f_wdt.c1
-rw-r--r--drivers/watchdog/wafer5823wdt.c1
-rw-r--r--drivers/watchdog/wdrtas.c2
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt285.c1
-rw-r--r--drivers/watchdog/wdt977.c1
-rw-r--r--drivers/watchdog/wdt_pci.c2
-rw-r--r--drivers/xen/Kconfig1
-rw-r--r--drivers/xen/acpi.c50
-rw-r--r--drivers/xen/evtchn.c1
-rw-r--r--drivers/xen/mcelog.c1
-rw-r--r--drivers/xen/pci.c13
-rw-r--r--drivers/xen/privcmd.c32
-rw-r--r--drivers/xen/xen-pciback/conf_space_capability.c2
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c78
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c1
-rw-r--r--fs/Kconfig23
-rw-r--r--fs/afs/afs_vl.h9
-rw-r--r--fs/afs/file.c1
-rw-r--r--fs/afs/fs_operation.c2
-rw-r--r--fs/afs/fs_probe.c4
-rw-r--r--fs/afs/rotate.c11
-rw-r--r--fs/bcachefs/backpointers.c2
-rw-r--r--fs/bcachefs/bcachefs.h3
-rw-r--r--fs/bcachefs/bcachefs_format.h8
-rw-r--r--fs/bcachefs/bkey.h8
-rw-r--r--fs/bcachefs/bkey_methods.c2
-rw-r--r--fs/bcachefs/bkey_methods.h2
-rw-r--r--fs/bcachefs/btree_gc.c8
-rw-r--r--fs/bcachefs/btree_io.c6
-rw-r--r--fs/bcachefs/btree_node_scan.c2
-rw-r--r--fs/bcachefs/btree_trans_commit.c108
-rw-r--r--fs/bcachefs/btree_update.h3
-rw-r--r--fs/bcachefs/chardev.c1
-rw-r--r--fs/bcachefs/data_update.c2
-rw-r--r--fs/bcachefs/disk_accounting.c82
-rw-r--r--fs/bcachefs/disk_accounting.h29
-rw-r--r--fs/bcachefs/disk_accounting_types.h2
-rw-r--r--fs/bcachefs/error.c14
-rw-r--r--fs/bcachefs/error.h2
-rw-r--r--fs/bcachefs/fsck.c295
-rw-r--r--fs/bcachefs/inode.c12
-rw-r--r--fs/bcachefs/inode.h1
-rw-r--r--fs/bcachefs/io_read.c4
-rw-r--r--fs/bcachefs/io_write.c4
-rw-r--r--fs/bcachefs/journal_io.c2
-rw-r--r--fs/bcachefs/logged_ops.c13
-rw-r--r--fs/bcachefs/recovery.c7
-rw-r--r--fs/bcachefs/recovery_passes_types.h2
-rw-r--r--fs/bcachefs/reflink.c2
-rw-r--r--fs/bcachefs/replicas.c18
-rw-r--r--fs/bcachefs/replicas.h2
-rw-r--r--fs/bcachefs/sb-clean.c1
-rw-r--r--fs/bcachefs/sb-downgrade.c9
-rw-r--r--fs/bcachefs/sb-errors.c6
-rw-r--r--fs/bcachefs/sb-errors.h2
-rw-r--r--fs/bcachefs/sb-errors_format.h39
-rw-r--r--fs/bcachefs/six.c12
-rw-r--r--fs/bcachefs/snapshot.c3
-rw-r--r--fs/bcachefs/subvolume.c54
-rw-r--r--fs/bcachefs/super-io.c7
-rw-r--r--fs/bcachefs/tests.c2
-rw-r--r--fs/bcachefs/thread_with_file.c2
-rw-r--r--fs/binfmt_elf.c48
-rw-r--r--fs/cachefiles/namei.c7
-rw-r--r--fs/ceph/addr.c1
-rw-r--r--fs/ceph/caps.c29
-rw-r--r--fs/ceph/dir.c2
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/mds_client.c25
-rw-r--r--fs/ceph/mds_client.h7
-rw-r--r--fs/ceph/super.c1
-rw-r--r--fs/ceph/super.h7
-rw-r--r--fs/coredump.c107
-rw-r--r--fs/debugfs/file.c1
-rw-r--r--fs/dlm/debug_fs.c1
-rw-r--r--fs/efivarfs/file.c1
-rw-r--r--fs/exfat/balloc.c10
-rw-r--r--fs/exfat/exfat_fs.h24
-rw-r--r--fs/exfat/file.c110
-rw-r--r--fs/exfat/inode.c94
-rw-r--r--fs/exfat/namei.c17
-rw-r--r--fs/exfat/nls.c5
-rw-r--r--fs/exfat/super.c41
-rw-r--r--fs/f2fs/checkpoint.c17
-rw-r--r--fs/f2fs/compress.c63
-rw-r--r--fs/f2fs/data.c164
-rw-r--r--fs/f2fs/debug.c2
-rw-r--r--fs/f2fs/dir.c8
-rw-r--r--fs/f2fs/extent_cache.c4
-rw-r--r--fs/f2fs/f2fs.h148
-rw-r--r--fs/f2fs/file.c199
-rw-r--r--fs/f2fs/gc.c113
-rw-r--r--fs/f2fs/gc.h29
-rw-r--r--fs/f2fs/inline.c31
-rw-r--r--fs/f2fs/inode.c9
-rw-r--r--fs/f2fs/namei.c68
-rw-r--r--fs/f2fs/node.c46
-rw-r--r--fs/f2fs/segment.c72
-rw-r--r--fs/f2fs/segment.h5
-rw-r--r--fs/f2fs/super.c119
-rw-r--r--fs/f2fs/sysfs.c82
-rw-r--r--fs/f2fs/verity.c5
-rw-r--r--fs/f2fs/xattr.c14
-rw-r--r--fs/fsopen.c1
-rw-r--r--fs/fuse/Makefile3
-rw-r--r--fs/fuse/acl.c10
-rw-r--r--fs/fuse/control.c4
-rw-r--r--fs/fuse/dev.c215
-rw-r--r--fs/fuse/dir.c152
-rw-r--r--fs/fuse/file.c184
-rw-r--r--fs/fuse/fuse_i.h42
-rw-r--r--fs/fuse/fuse_trace.h132
-rw-r--r--fs/fuse/inode.c13
-rw-r--r--fs/fuse/passthrough.c7
-rw-r--r--fs/fuse/virtio_fs.c206
-rw-r--r--fs/mnt_idmapping.c22
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/netfs/buffered_write.c1
-rw-r--r--fs/netfs/internal.h1
-rw-r--r--fs/netfs/misc.c74
-rw-r--r--fs/netfs/write_issue.c24
-rw-r--r--fs/nfs/Kconfig1
-rw-r--r--fs/nfs/Makefile1
-rw-r--r--fs/nfs/client.c21
-rw-r--r--fs/nfs/dir.c6
-rw-r--r--fs/nfs/filelayout/filelayout.c6
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c56
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c6
-rw-r--r--fs/nfs/fs_context.c8
-rw-r--r--fs/nfs/getroot.c2
-rw-r--r--fs/nfs/inode.c53
-rw-r--r--fs/nfs/internal.h54
-rw-r--r--fs/nfs/localio.c757
-rw-r--r--fs/nfs/nfs2xdr.c70
-rw-r--r--fs/nfs/nfs3xdr.c108
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4proc.c16
-rw-r--r--fs/nfs/nfs4state.c22
-rw-r--r--fs/nfs/nfs4xdr.c90
-rw-r--r--fs/nfs/nfstrace.h61
-rw-r--r--fs/nfs/pagelist.c16
-rw-r--r--fs/nfs/pnfs_nfs.c2
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfs/super.c3
-rw-r--r--fs/nfs/write.c21
-rw-r--r--fs/nfs_common/Makefile5
-rw-r--r--fs/nfs_common/common.c134
-rw-r--r--fs/nfs_common/nfslocalio.c172
-rw-r--r--fs/nfsd/Kconfig1
-rw-r--r--fs/nfsd/Makefile1
-rw-r--r--fs/nfsd/export.c30
-rw-r--r--fs/nfsd/filecache.c101
-rw-r--r--fs/nfsd/filecache.h5
-rw-r--r--fs/nfsd/localio.c169
-rw-r--r--fs/nfsd/netns.h12
-rw-r--r--fs/nfsd/nfsctl.c27
-rw-r--r--fs/nfsd/nfsd.h6
-rw-r--r--fs/nfsd/nfsfh.c137
-rw-r--r--fs/nfsd/nfsfh.h2
-rw-r--r--fs/nfsd/nfssvc.c105
-rw-r--r--fs/nfsd/trace.h21
-rw-r--r--fs/nfsd/vfs.h2
-rw-r--r--fs/nsfs.c1
-rw-r--r--fs/ocfs2/aops.c5
-rw-r--r--fs/ocfs2/extent_map.c8
-rw-r--r--fs/ocfs2/refcounttree.c26
-rw-r--r--fs/ocfs2/xattr.c11
-rw-r--r--fs/overlayfs/file.c2
-rw-r--r--fs/pidfs.c5
-rw-r--r--fs/pipe.c1
-rw-r--r--fs/smb/client/cifsencrypt.c151
-rw-r--r--fs/smb/client/cifsfs.h4
-rw-r--r--fs/smb/client/cifsglob.h5
-rw-r--r--fs/smb/client/cifsproto.h12
-rw-r--r--fs/smb/client/connect.c66
-rw-r--r--fs/smb/client/dfs.c73
-rw-r--r--fs/smb/client/dfs.h42
-rw-r--r--fs/smb/client/dfs_cache.c218
-rw-r--r--fs/smb/client/fs_context.h1
-rw-r--r--fs/smb/client/inode.c19
-rw-r--r--fs/smb/client/misc.c6
-rw-r--r--fs/smb/client/namespace.c2
-rw-r--r--fs/smb/client/reparse.c10
-rw-r--r--fs/smb/client/reparse.h9
-rw-r--r--fs/smb/client/sess.c2
-rw-r--r--fs/smb/client/smb2misc.c28
-rw-r--r--fs/smb/client/smb2ops.c56
-rw-r--r--fs/smb/client/smb2pdu.c32
-rw-r--r--fs/smb/client/smb2proto.h2
-rw-r--r--fs/smb/client/smb2transport.c32
-rw-r--r--fs/smb/client/trace.h6
-rw-r--r--fs/smb/client/transport.c3
-rw-r--r--fs/smb/common/smb2pdu.h6
-rw-r--r--fs/smb/server/connection.c2
-rw-r--r--fs/smb/server/ksmbd_netlink.h2
-rw-r--r--fs/smb/server/oplock.c4
-rw-r--r--fs/smb/server/server.c2
-rw-r--r--fs/smb/server/smb2pdu.c35
-rw-r--r--fs/smb/server/smb2pdu.h4
-rw-r--r--fs/smb/server/smb_common.c2
-rw-r--r--fs/smb/server/vfs_cache.h4
-rw-r--r--fs/smb/server/xattr.h2
-rw-r--r--fs/ubifs/debug.c2
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/cxl/einj.h (renamed from include/linux/einj-cxl.h)0
-rw-r--r--include/cxl/event.h (renamed from include/linux/cxl-event.h)0
-rw-r--r--include/cxl/mailbox.h28
-rw-r--r--include/dt-bindings/clock/cirrus,ep9301-syscon.h46
-rw-r--r--include/dt-bindings/iio/adi,ad4695.h9
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8937.h93
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8976.h97
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8350.h10
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/attribute_container.h6
-rw-r--r--include/linux/auxiliary_bus.h2
-rw-r--r--include/linux/bitmap.h140
-rw-r--r--include/linux/bits.h15
-rw-r--r--include/linux/blk-integrity.h15
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blk_types.h4
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/bpf.h11
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/cleanup.h136
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/coredump.h8
-rw-r--r--include/linux/coresight-pmu.h17
-rw-r--r--include/linux/coresight.h21
-rw-r--r--include/linux/cpumask.h212
-rw-r--r--include/linux/debugfs.h1
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--include/linux/device/bus.h6
-rw-r--r--include/linux/device/class.h2
-rw-r--r--include/linux/device/driver.h2
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/f2fs_fs.h4
-rw-r--r--include/linux/find.h50
-rw-r--r--include/linux/folio_queue.h168
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/fsl/mc.h2
-rw-r--r--include/linux/hugetlb.h10
-rw-r--r--include/linux/iio/backend.h62
-rw-r--r--include/linux/iio/iio.h39
-rw-r--r--include/linux/kprobes.h9
-rw-r--r--include/linux/kvm_host.h18
-rw-r--r--include/linux/lsm_hook_defs.h2
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/mnt_idmapping.h1
-rw-r--r--include/linux/mutex.h19
-rw-r--r--include/linux/netfilter.h4
-rw-r--r--include/linux/nfs.h9
-rw-r--r--include/linux/nfs_common.h17
-rw-r--r--include/linux/nfs_fs_sb.h13
-rw-r--r--include/linux/nfs_xdr.h22
-rw-r--r--include/linux/nfslocalio.h74
-rw-r--r--include/linux/nodemask.h86
-rw-r--r--include/linux/platform_data/ad5449.h39
-rw-r--r--include/linux/platform_data/dma-ep93xx.h94
-rw-r--r--include/linux/platform_data/eth-ep93xx.h10
-rw-r--r--include/linux/platform_data/keypad-ep93xx.h32
-rw-r--r--include/linux/platform_data/spi-ep93xx.h15
-rw-r--r--include/linux/platform_device.h2
-rw-r--r--include/linux/sbitmap.h2
-rw-r--r--include/linux/security.h4
-rw-r--r--include/linux/serial_8250.h2
-rw-r--r--include/linux/serial_s3c.h24
-rw-r--r--include/linux/soc/cirrus/ep93xx.h47
-rw-r--r--include/linux/soc/qcom/geni-se.h9
-rw-r--r--include/linux/sunrpc/sched.h16
-rw-r--r--include/linux/sunrpc/svc.h7
-rw-r--r--include/linux/sunrpc/svcauth.h5
-rw-r--r--include/linux/tracepoint.h20
-rw-r--r--include/linux/usb.h8
-rw-r--r--include/linux/usb/composite.h2
-rw-r--r--include/linux/usb/func_utils.h (renamed from drivers/usb/gadget/u_f.h)8
-rw-r--r--include/linux/usb/gadget_configfs.h7
-rw-r--r--include/linux/usb/serial.h7
-rw-r--r--include/linux/usb/tcpci.h31
-rw-r--r--include/linux/usb/usbnet.h15
-rw-r--r--include/linux/vdpa.h9
-rw-r--r--include/net/tcp.h21
-rw-r--r--include/trace/events/dma.h37
-rw-r--r--include/trace/events/f2fs.h3
-rw-r--r--include/trace/events/netfs.h3
-rw-r--r--include/uapi/linux/android/binder.h36
-rw-r--r--include/uapi/linux/bits.h3
-rw-r--r--include/uapi/linux/const.h17
-rw-r--r--include/uapi/linux/exfat.h25
-rw-r--r--include/uapi/linux/fuse.h22
-rw-r--r--include/uapi/linux/usb/ch9.h8
-rw-r--r--include/uapi/linux/usb/functionfs.h97
-rw-r--r--include/uapi/linux/usb/g_hid.h40
-rw-r--r--include/uapi/linux/usb/gadgetfs.h2
-rw-r--r--include/uapi/linux/vdpa.h1
-rw-r--r--include/uapi/linux/virtio_balloon.h16
-rw-r--r--include/uapi/xen/privcmd.h7
-rw-r--r--include/xen/acpi.h27
-rw-r--r--include/xen/interface/elfnote.h93
-rw-r--r--include/xen/interface/physdev.h17
-rw-r--r--include/xen/pci.h6
-rw-r--r--init/Kconfig19
-rw-r--r--kernel/bpf/bpf_inode_storage.c24
-rw-r--r--kernel/bpf/bpf_iter.c1
-rw-r--r--kernel/bpf/btf.c11
-rw-r--r--kernel/bpf/map_in_map.c38
-rw-r--r--kernel/bpf/syscall.c181
-rw-r--r--kernel/bpf/token.c74
-rw-r--r--kernel/bpf/verifier.c110
-rw-r--r--kernel/events/core.c1
-rw-r--r--kernel/events/uprobes.c2
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/jump_label.c34
-rw-r--r--kernel/locking/lockdep.c53
-rw-r--r--kernel/locking/lockdep_proc.c2
-rw-r--r--kernel/locking/rwsem.c22
-rw-r--r--kernel/module/Kconfig78
-rw-r--r--kernel/module/debug_kmemleak.c18
-rw-r--r--kernel/module/sysfs.c63
-rw-r--r--kernel/power/user.c1
-rw-r--r--kernel/relay.c1
-rw-r--r--kernel/signal.c21
-rw-r--r--kernel/static_call_inline.c13
-rw-r--r--kernel/time/posix-clock.c1
-rw-r--r--kernel/trace/rv/rv.c3
-rw-r--r--kernel/trace/rv/rv_reactors.c1
-rw-r--r--kernel/trace/trace.c3
-rw-r--r--kernel/trace/trace_fprobe.c179
-rw-r--r--kernel/trace/trace_uprobe.c24
-rw-r--r--kernel/tracepoint.c42
-rw-r--r--lib/list-test.c6
-rw-r--r--lib/sbitmap.c4
-rw-r--r--lib/test_bits.c34
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/damon/Kconfig2
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/gup.c1
-rw-r--r--mm/huge_memory.c1
-rw-r--r--mm/hugetlb.c17
-rw-r--r--mm/kasan/Makefile8
-rw-r--r--mm/kasan/kasan.h6
-rw-r--r--mm/kasan/kasan_test_c.c (renamed from mm/kasan/kasan_test.c)11
-rw-r--r--mm/kasan/kasan_test_rust.rs21
-rw-r--r--mm/kfence/report.c2
-rw-r--r--mm/memblock.c17
-rw-r--r--mm/memfd.c18
-rw-r--r--mm/memory-tiers.c6
-rw-r--r--mm/migrate.c2
-rw-r--r--net/9p/Kconfig6
-rw-r--r--net/9p/Makefile4
-rw-r--r--net/9p/trans_usbg.c956
-rw-r--r--net/ceph/messenger.c2
-rw-r--r--net/core/sock_map.c23
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c10
-rw-r--r--net/ipv6/Kconfig1
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c19
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c141
-rw-r--r--net/netfilter/nf_conntrack_netlink.c9
-rw-r--r--net/netfilter/nf_nat_core.c121
-rw-r--r--net/netfilter/nf_tables_api.c6
-rw-r--r--net/netfilter/nft_compat.c6
-rw-r--r--net/netfilter/nft_log.c2
-rw-r--r--net/netfilter/nft_meta.c2
-rw-r--r--net/netfilter/nft_numgen.c2
-rw-r--r--net/netfilter/nft_set_pipapo.c13
-rw-r--r--net/netfilter/nft_tunnel.c5
-rw-r--r--net/qrtr/af_qrtr.c2
-rw-r--r--net/rfkill/core.c1
-rw-r--r--net/socket.c1
-rw-r--r--net/sunrpc/cache.c14
-rw-r--r--net/sunrpc/clnt.c13
-rw-r--r--net/sunrpc/rpc_pipe.c1
-rw-r--r--net/sunrpc/svc.c68
-rw-r--r--net/sunrpc/svc_xprt.c2
-rw-r--r--net/sunrpc/svcauth.c28
-rw-r--r--net/sunrpc/svcauth_unix.c3
-rw-r--r--net/vmw_vsock/virtio_transport.c144
-rw-r--r--rust/Makefile56
-rw-r--r--rust/bindings/bindings_helper.h2
-rw-r--r--rust/exports.c1
-rw-r--r--rust/helpers.c239
-rw-r--r--rust/helpers/blk.c14
-rw-r--r--rust/helpers/bug.c8
-rw-r--r--rust/helpers/build_assert.c25
-rw-r--r--rust/helpers/build_bug.c9
-rw-r--r--rust/helpers/err.c19
-rw-r--r--rust/helpers/helpers.c26
-rw-r--r--rust/helpers/kunit.c9
-rw-r--r--rust/helpers/mutex.c9
-rw-r--r--rust/helpers/page.c19
-rw-r--r--rust/helpers/rbtree.c9
-rw-r--r--rust/helpers/refcount.c19
-rw-r--r--rust/helpers/signal.c9
-rw-r--r--rust/helpers/slab.c9
-rw-r--r--rust/helpers/spinlock.c24
-rw-r--r--rust/helpers/task.c19
-rw-r--r--rust/helpers/uaccess.c15
-rw-r--r--rust/helpers/wait.c9
-rw-r--r--rust/helpers/workqueue.c15
-rw-r--r--rust/kernel/alloc/box_ext.rs33
-rw-r--r--rust/kernel/error.rs5
-rw-r--r--rust/kernel/init.rs191
-rw-r--r--rust/kernel/init/__internal.rs29
-rw-r--r--rust/kernel/lib.rs2
-rw-r--r--rust/kernel/list.rs686
-rw-r--r--rust/kernel/list/arc.rs521
-rw-r--r--rust/kernel/list/arc_field.rs96
-rw-r--r--rust/kernel/list/impl_list_item_mod.rs274
-rw-r--r--rust/kernel/prelude.rs2
-rw-r--r--rust/kernel/print.rs20
-rw-r--r--rust/kernel/rbtree.rs1278
-rw-r--r--rust/kernel/std_vendor.rs2
-rw-r--r--rust/kernel/sync/arc.rs25
-rw-r--r--rust/kernel/types.rs63
-rw-r--r--rust/macros/lib.rs4
-rw-r--r--rust/macros/module.rs12
-rw-r--r--samples/vfio-mdev/mtty.c2
-rw-r--r--scripts/Kconfig.include8
-rw-r--r--scripts/Makefile.build9
-rw-r--r--scripts/Makefile.compiler15
-rw-r--r--scripts/Makefile.kasan57
-rw-r--r--scripts/Makefile.lib3
-rw-r--r--scripts/Makefile.modinst2
-rw-r--r--scripts/coccinelle/api/stream_open.cocci1
-rw-r--r--scripts/coccinelle/api/string_choices.cocci259
-rw-r--r--scripts/generate_rust_target.rs98
-rw-r--r--scripts/mod/devicetable-offsets.c4
-rw-r--r--scripts/mod/file2alias.c11
-rwxr-xr-xscripts/rustc-version.sh26
-rw-r--r--security/security.c2
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--security/tomoyo/Kconfig15
-rw-r--r--security/tomoyo/Makefile8
-rw-r--r--security/tomoyo/common.c14
-rw-r--r--security/tomoyo/common.h72
-rw-r--r--security/tomoyo/domain.c9
-rw-r--r--security/tomoyo/gc.c3
-rw-r--r--security/tomoyo/hooks.h (renamed from security/tomoyo/tomoyo.c)110
-rw-r--r--security/tomoyo/init.c366
-rw-r--r--security/tomoyo/load_policy.c12
-rw-r--r--security/tomoyo/proxy.c82
-rw-r--r--security/tomoyo/securityfs_if.c10
-rw-r--r--security/tomoyo/util.c3
-rw-r--r--sound/core/control.c1
-rw-r--r--sound/core/oss/mixer_oss.c1
-rw-r--r--sound/core/oss/pcm_oss.c1
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/core/rawmidi.c1
-rw-r--r--sound/core/seq/seq_clientmgr.c1
-rw-r--r--sound/core/timer.c1
-rw-r--r--sound/oss/dmasound/dmasound_core.c3
-rw-r--r--sound/soc/cirrus/Kconfig9
-rw-r--r--sound/soc/cirrus/Makefile4
-rw-r--r--sound/soc/cirrus/edb93xx.c116
-rw-r--r--sound/soc/cirrus/ep93xx-i2s.c19
-rw-r--r--sound/soc/cirrus/ep93xx-pcm.c19
-rw-r--r--sound/soc/intel/avs/debugfs.c3
-rw-r--r--tools/iio/Makefile2
-rw-r--r--tools/iio/iio_generic_buffer.c4
-rw-r--r--tools/include/linux/compiler.h4
-rw-r--r--tools/include/linux/init.h (renamed from tools/testing/memblock/linux/init.h)19
-rw-r--r--tools/include/linux/linkage.h6
-rw-r--r--tools/include/linux/mm.h6
-rw-r--r--tools/include/linux/pfn.h1
-rw-r--r--tools/include/linux/string.h3
-rw-r--r--tools/lib/cmdline.c53
-rw-r--r--tools/objtool/arch/loongarch/decode.c11
-rw-r--r--tools/objtool/check.c75
-rw-r--r--tools/objtool/include/objtool/elf.h1
-rw-r--r--tools/objtool/noreturns.h2
-rw-r--r--tools/testing/cxl/Kbuild2
-rw-r--r--tools/testing/cxl/mock_acpi.c2
-rw-r--r--tools/testing/cxl/test/mem.c44
-rw-r--r--tools/testing/cxl/test/mock.c10
-rw-r--r--tools/testing/memblock/Makefile2
-rw-r--r--tools/testing/memblock/linux/kernel.h2
-rw-r--r--tools/testing/memblock/linux/mmzone.h1
-rw-r--r--tools/testing/radix-tree/maple.c2
-rw-r--r--tools/testing/selftests/filesystems/binderfs/binderfs_test.c1
-rw-r--r--tools/testing/selftests/ftrace/config1
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_tprobe_module.tc61
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/tprobe_syntax_errors.tc1
-rw-r--r--tools/testing/selftests/kvm/.gitignore4
-rw-r--r--tools/testing/selftests/kvm/Makefile4
-rw-r--r--tools/testing/selftests/kvm/coalesced_io_test.c236
-rw-r--r--tools/testing/selftests/kvm/guest_print_test.c19
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h28
-rw-r--r--tools/testing/selftests/kvm/include/s390x/debug_print.h69
-rw-r--r--tools/testing/selftests/kvm/include/s390x/processor.h5
-rw-r--r--tools/testing/selftests/kvm/include/s390x/sie.h240
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/apic.h21
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/hyperv.h18
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h7
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c85
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/processor.c10
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/hyperv.c67
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c69
-rw-r--r--tools/testing/selftests/kvm/memslot_modification_stress_test.c19
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c12
-rw-r--r--tools/testing/selftests/kvm/s390x/cmma_test.c7
-rw-r--r--tools/testing/selftests/kvm/s390x/config2
-rw-r--r--tools/testing/selftests/kvm/s390x/debug_test.c4
-rw-r--r--tools/testing/selftests/kvm/s390x/memop.c4
-rw-r--r--tools/testing/selftests/kvm/s390x/tprot.c5
-rw-r--r--tools/testing/selftests/kvm/s390x/ucontrol_test.c332
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c29
-rw-r--r--tools/testing/selftests/kvm/x86_64/debug_regs.c11
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/sev_smoke_test.c32
-rw-r--r--tools/testing/selftests/kvm/x86_64/xapic_state_test.c54
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c1
-rw-r--r--tools/testing/selftests/mm/pagemap_ioctl.c2
-rw-r--r--tools/testing/selftests/net/netfilter/Makefile4
-rw-r--r--tools/testing/selftests/net/netfilter/config1
-rw-r--r--tools/testing/selftests/net/netfilter/conntrack_reverse_clash.c125
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_reverse_clash.sh51
-rwxr-xr-xtools/testing/selftests/net/netfilter/ipvs.sh2
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_queue.sh92
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_tproxy_tcp.sh358
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_tproxy_udp.sh262
-rwxr-xr-xtools/testing/selftests/net/packetdrill/ksft_runner.sh9
-rw-r--r--tools/testing/selftests/vDSO/vdso_standalone_test_x86.c2
-rw-r--r--tools/testing/shared/linux/init.h2
-rw-r--r--tools/testing/shared/maple-shared.h4
-rw-r--r--tools/testing/shared/shared.h4
-rw-r--r--tools/testing/shared/shared.mk4
-rw-r--r--tools/testing/shared/xarray-shared.h4
-rwxr-xr-xtools/usb/p9_fwd.py243
-rw-r--r--tools/virtio/ringtest/main.c2
-rw-r--r--virt/kvm/coalesced_mmio.c31
-rw-r--r--virt/kvm/kvm_main.c282
1836 files changed, 55148 insertions, 26689 deletions
diff --git a/.clang-format b/.clang-format
index 252820d9c80a..fe1aa1a30d40 100644
--- a/.clang-format
+++ b/.clang-format
@@ -141,11 +141,13 @@ ForEachMacros:
- 'damon_for_each_target_safe'
- 'damos_for_each_filter'
- 'damos_for_each_filter_safe'
+ - 'damos_for_each_quota_goal'
+ - 'damos_for_each_quota_goal_safe'
- 'data__for_each_file'
- 'data__for_each_file_new'
- 'data__for_each_file_start'
- 'device_for_each_child_node'
- - 'displayid_iter_for_each'
+ - 'device_for_each_child_node_scoped'
- 'dma_fence_array_for_each'
- 'dma_fence_chain_for_each'
- 'dma_fence_unwrap_for_each'
@@ -172,11 +174,14 @@ ForEachMacros:
- 'drm_for_each_plane'
- 'drm_for_each_plane_mask'
- 'drm_for_each_privobj'
- - 'drm_gem_for_each_gpuva'
- - 'drm_gem_for_each_gpuva_safe'
+ - 'drm_gem_for_each_gpuvm_bo'
+ - 'drm_gem_for_each_gpuvm_bo_safe'
- 'drm_gpuva_for_each_op'
- 'drm_gpuva_for_each_op_from_reverse'
+ - 'drm_gpuva_for_each_op_reverse'
- 'drm_gpuva_for_each_op_safe'
+ - 'drm_gpuvm_bo_for_each_va'
+ - 'drm_gpuvm_bo_for_each_va_safe'
- 'drm_gpuvm_for_each_va'
- 'drm_gpuvm_for_each_va_range'
- 'drm_gpuvm_for_each_va_range_safe'
@@ -192,11 +197,11 @@ ForEachMacros:
- 'dsa_switch_for_each_port_continue_reverse'
- 'dsa_switch_for_each_port_safe'
- 'dsa_switch_for_each_user_port'
+ - 'dsa_switch_for_each_user_port_continue_reverse'
- 'dsa_tree_for_each_cpu_port'
- 'dsa_tree_for_each_user_port'
- 'dsa_tree_for_each_user_port_continue_reverse'
- 'dso__for_each_symbol'
- - 'dsos__for_each_with_build_id'
- 'elf_hash_for_each_possible'
- 'elf_symtab__for_each_symbol'
- 'evlist__for_each_cpu'
@@ -216,6 +221,7 @@ ForEachMacros:
- 'for_each_and_bit'
- 'for_each_andnot_bit'
- 'for_each_available_child_of_node'
+ - 'for_each_available_child_of_node_scoped'
- 'for_each_bench'
- 'for_each_bio'
- 'for_each_board_func_rsrc'
@@ -234,6 +240,7 @@ ForEachMacros:
- 'for_each_card_widgets_safe'
- 'for_each_cgroup_storage_type'
- 'for_each_child_of_node'
+ - 'for_each_child_of_node_scoped'
- 'for_each_clear_bit'
- 'for_each_clear_bit_from'
- 'for_each_clear_bitrange'
@@ -251,6 +258,7 @@ ForEachMacros:
- 'for_each_cpu'
- 'for_each_cpu_and'
- 'for_each_cpu_andnot'
+ - 'for_each_cpu_from'
- 'for_each_cpu_or'
- 'for_each_cpu_wrap'
- 'for_each_dapm_widgets'
@@ -269,13 +277,14 @@ ForEachMacros:
- 'for_each_element'
- 'for_each_element_extid'
- 'for_each_element_id'
+ - 'for_each_enabled_cpu'
- 'for_each_endpoint_of_node'
- 'for_each_event'
- 'for_each_event_tps'
- 'for_each_evictable_lru'
- 'for_each_fib6_node_rt_rcu'
- 'for_each_fib6_walker_rt'
- - 'for_each_free_mem_pfn_range_in_zone'
+ - 'for_each_file_lock'
- 'for_each_free_mem_pfn_range_in_zone_from'
- 'for_each_free_mem_range'
- 'for_each_free_mem_range_reverse'
@@ -286,15 +295,18 @@ ForEachMacros:
- 'for_each_group_member'
- 'for_each_group_member_head'
- 'for_each_hstate'
+ - 'for_each_hwgpio'
- 'for_each_if'
- 'for_each_inject_fn'
- 'for_each_insn'
+ - 'for_each_insn_op_loc'
- 'for_each_insn_prefix'
- 'for_each_intid'
- 'for_each_iommu'
- 'for_each_ip_tunnel_rcu'
- 'for_each_irq_nr'
- 'for_each_lang'
+ - 'for_each_link_ch_maps'
- 'for_each_link_codecs'
- 'for_each_link_cpus'
- 'for_each_link_platforms'
@@ -332,6 +344,9 @@ ForEachMacros:
- 'for_each_new_plane_in_state_reverse'
- 'for_each_new_private_obj_in_state'
- 'for_each_new_reg'
+ - 'for_each_nhlt_endpoint'
+ - 'for_each_nhlt_endpoint_fmtcfg'
+ - 'for_each_nhlt_fmtcfg'
- 'for_each_node'
- 'for_each_node_by_name'
- 'for_each_node_by_type'
@@ -387,12 +402,15 @@ ForEachMacros:
- 'for_each_reloc_from'
- 'for_each_requested_gpio'
- 'for_each_requested_gpio_in_range'
+ - 'for_each_reserved_child_of_node'
- 'for_each_reserved_mem_range'
- 'for_each_reserved_mem_region'
+ - 'for_each_rtd_ch_maps'
- 'for_each_rtd_codec_dais'
- 'for_each_rtd_components'
- 'for_each_rtd_cpu_dais'
- 'for_each_rtd_dais'
+ - 'for_each_rtd_dais_reverse'
- 'for_each_sband_iftype_data'
- 'for_each_script'
- 'for_each_sec'
@@ -533,8 +551,6 @@ ForEachMacros:
- 'lwq_for_each_safe'
- 'map__for_each_symbol'
- 'map__for_each_symbol_by_name'
- - 'maps__for_each_entry'
- - 'maps__for_each_entry_safe'
- 'mas_for_each'
- 'mci_for_each_dimm'
- 'media_device_for_each_entity'
@@ -560,7 +576,9 @@ ForEachMacros:
- 'netdev_hw_addr_list_for_each'
- 'nft_rule_for_each_expr'
- 'nla_for_each_attr'
+ - 'nla_for_each_attr_type'
- 'nla_for_each_nested'
+ - 'nla_for_each_nested_type'
- 'nlmsg_for_each_attr'
- 'nlmsg_for_each_msg'
- 'nr_neigh_for_each'
@@ -579,6 +597,7 @@ ForEachMacros:
- 'perf_config_sections__for_each_entry'
- 'perf_config_set__for_each_entry'
- 'perf_cpu_map__for_each_cpu'
+ - 'perf_cpu_map__for_each_cpu_skip_any'
- 'perf_cpu_map__for_each_idx'
- 'perf_evlist__for_each_entry'
- 'perf_evlist__for_each_entry_reverse'
@@ -639,7 +658,6 @@ ForEachMacros:
- 'shost_for_each_device'
- 'sk_for_each'
- 'sk_for_each_bound'
- - 'sk_for_each_bound_bhash2'
- 'sk_for_each_entry_offset_rcu'
- 'sk_for_each_from'
- 'sk_for_each_rcu'
@@ -653,6 +671,7 @@ ForEachMacros:
- 'snd_soc_dapm_widget_for_each_path_safe'
- 'snd_soc_dapm_widget_for_each_sink_path'
- 'snd_soc_dapm_widget_for_each_source_path'
+ - 'sparsebit_for_each_set_range'
- 'strlist__for_each_entry'
- 'strlist__for_each_entry_safe'
- 'sym_for_each_insn'
@@ -662,7 +681,6 @@ ForEachMacros:
- 'tcf_act_for_each_action'
- 'tcf_exts_for_each_action'
- 'ttm_resource_manager_for_each_res'
- - 'twsk_for_each_bound_bhash2'
- 'udp_portaddr_for_each_entry'
- 'udp_portaddr_for_each_entry_rcu'
- 'usb_hub_for_each_child'
@@ -686,6 +704,9 @@ ForEachMacros:
- 'xbc_node_for_each_child'
- 'xbc_node_for_each_key_value'
- 'xbc_node_for_each_subkey'
+ - 'ynl_attr_for_each'
+ - 'ynl_attr_for_each_nested'
+ - 'ynl_attr_for_each_payload'
- 'zorro_for_each_dev'
IncludeBlocks: Preserve
diff --git a/Documentation/ABI/stable/sysfs-bus-nvmem b/Documentation/ABI/stable/sysfs-bus-nvmem
index aa89adf18bc5..0ae8cb074acf 100644
--- a/Documentation/ABI/stable/sysfs-bus-nvmem
+++ b/Documentation/ABI/stable/sysfs-bus-nvmem
@@ -11,7 +11,7 @@ Description:
Read returns '0' or '1' for read-write or read-only modes
respectively.
Write parses one of 'YyTt1NnFf0', or [oO][NnFf] for "on"
- and "off", i.e. what kstrbool() supports.
+ and "off", i.e. what kstrtobool() supports.
Note: This file is only present if CONFIG_NVMEM_SYSFS
is enabled.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-acm b/Documentation/ABI/testing/configfs-usb-gadget-acm
index d21092d75a05..25e68be9eb66 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-acm
+++ b/Documentation/ABI/testing/configfs-usb-gadget-acm
@@ -6,3 +6,10 @@ Description:
This item contains just one readonly attribute: port_num.
It contains the port number of the /dev/ttyGS<n> device
associated with acm function's instance "name".
+
+What: /config/usb-gadget/gadget/functions/acm.name/protocol
+Date: Aug 2024
+KernelVersion: 6.13
+Description:
+ Reported bInterfaceProtocol for the ACM device. For legacy
+ reasons, this defaults to 1 (USB_CDC_ACM_PROTO_AT_V25TER).
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uac1 b/Documentation/ABI/testing/configfs-usb-gadget-uac1
index c4ba92f004c3..64188a85592b 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-uac1
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uac1
@@ -30,4 +30,12 @@ Description:
req_number the number of pre-allocated requests
for both capture and playback
function_name name of the interface
+ p_it_name playback input terminal name
+ p_it_ch_name playback channels name
+ p_ot_name playback output terminal name
+ p_fu_vol_name playback mute/volume functional unit name
+ c_it_name capture input terminal name
+ c_it_ch_name capture channels name
+ c_ot_name capture output terminal name
+ c_fu_vol_name capture mute/volume functional unit name
===================== =======================================
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uac2 b/Documentation/ABI/testing/configfs-usb-gadget-uac2
index a2bf4fd82a5b..133e995c3e92 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-uac2
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uac2
@@ -35,6 +35,17 @@ Description:
req_number the number of pre-allocated requests
for both capture and playback
function_name name of the interface
+ if_ctrl_name topology control name
+ clksrc_in_name input clock name
+ clksrc_out_name output clock name
+ p_it_name playback input terminal name
+ p_it_ch_name playback input first channel name
+ p_ot_name playback output terminal name
+ p_fu_vol_name playback mute/volume function unit name
+ c_it_name capture input terminal name
+ c_it_ch_name capture input first channel name
+ c_ot_name capture output terminal name
+ c_fu_vol_name capture mute/volume functional unit name
c_terminal_type code of the capture terminal type
p_terminal_type code of the playback terminal type
===================== =======================================
diff --git a/Documentation/ABI/testing/debugfs-iio-ad9467 b/Documentation/ABI/testing/debugfs-iio-ad9467
new file mode 100644
index 000000000000..0352fca1f7f2
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-iio-ad9467
@@ -0,0 +1,39 @@
+What: /sys/kernel/debug/iio/iio:deviceX/calibration_table_dump
+KernelVersion: 6.11
+Contact: linux-iio@vger.kernel.org
+Description:
+ This dumps the calibration table that was filled during the
+ digital interface tuning process.
+
+What: /sys/kernel/debug/iio/iio:deviceX/in_voltage_test_mode_available
+KernelVersion: 6.11
+Contact: linux-iio@vger.kernel.org
+Description:
+ List all the available test tones:
+ - off
+ - midscale_short
+ - pos_fullscale
+ - neg_fullscale
+ - checkerboard
+ - prbs23
+ - prbs9
+ - one_zero_toggle
+ - user
+ - bit_toggle
+ - sync
+ - one_bit_high
+ - mixed_bit_frequency
+ - ramp
+
+ Note that depending on the actual device being used, some of the
+ above might not be available (and they won't be listed when
+ reading the file).
+
+What: /sys/kernel/debug/iio/iio:deviceX/in_voltageY_test_mode
+KernelVersion: 6.11
+Contact: linux-iio@vger.kernel.org
+Description:
+ Writing to this file will initiate one of available test tone on
+ channel Y. Reading it, shows which test is running. In cases
+ where an IIO backend is available and supports the test tone,
+ additional information about the data correctness is given.
diff --git a/Documentation/ABI/testing/debugfs-iio-backend b/Documentation/ABI/testing/debugfs-iio-backend
new file mode 100644
index 000000000000..01ab94469432
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-iio-backend
@@ -0,0 +1,20 @@
+What: /sys/kernel/debug/iio/iio:deviceX/backendY/name
+KernelVersion: 6.11
+Contact: linux-iio@vger.kernel.org
+Description:
+ Name of Backend Y connected to device X.
+
+What: /sys/kernel/debug/iio/iio:deviceX/backendY/direct_reg_access
+KernelVersion: 6.11
+Contact: linux-iio@vger.kernel.org
+Description:
+ Directly access the registers of backend Y. Typical usage is:
+
+ Reading address 0x50
+ echo 0x50 > direct_reg_access
+ cat direct_reg_access
+
+ Writing address 0x50
+ echo 0x50 0x3 > direct_reg_access
+ //readback address 0x50
+ cat direct_reg_access
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 7cee78ad4108..89943c2d54e8 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -523,13 +523,27 @@ Description:
What: /sys/bus/iio/devices/iio:deviceX/in_accel_x_calibbias
What: /sys/bus/iio/devices/iio:deviceX/in_accel_y_calibbias
What: /sys/bus/iio/devices/iio:deviceX/in_accel_z_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_altvoltageY_i_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_altvoltageY_q_calibbias
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_x_calibbias
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_y_calibbias
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_z_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_capacitance_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_calibbias
What: /sys/bus/iio/devices/iio:deviceX/in_illuminance0_calibbias
-What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_calibbias
-What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_intensityY_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_x_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_y_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_z_calibbias
What: /sys/bus/iio/devices/iio:deviceX/in_pressure_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_resistance_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_temp_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/out_currentY_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_calibbias
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -541,6 +555,10 @@ Description:
What: /sys/bus/iio/devices/iio:deviceX/in_accel_calibbias_available
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_calibbias_available
+What: /sys/bus/iio/devices/iio:deviceX/in_temp_calibbias_available
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity_calibbias_available
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibbias_available
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_calibbias_available
KernelVersion: 5.8
Contact: linux-iio@vger.kernel.org
Description:
@@ -549,25 +567,34 @@ Description:
- a small discrete set of values like "0 2 4 6 8"
- a range specified as "[min step max]"
-What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_i_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_q_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_voltage_i_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_voltage_q_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_altvoltage_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_voltage_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_accel_x_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_accel_y_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_accel_z_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_altvoltage_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_x_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_y_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_z_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_capacitance_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_illuminance0_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_both_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_ir_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_x_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_y_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_z_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_pressure_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltage_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltage_i_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltage_q_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_i_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_q_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/out_currentY_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_calibscale
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -575,6 +602,20 @@ Description:
production inaccuracies). If shared across all channels,
<type>_calibscale is used.
+What: /sys/bus/iio/devices/iio:deviceX/in_illuminanceY_calibscale_available
+What: /sys/bus/iio/devices/iio:deviceX/in_intensityY_calibscale_available
+What: /sys/bus/iio/devices/iio:deviceX/in_proximityY_calibscale_available
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibscale_available
+KernelVersion: 4.8
+Contact: linux-iio@vger.kernel.org
+Description:
+ Available values of calibscale. Maybe expressed as either of:
+
+ - a small discrete set of values like "1 8 16"
+ - a range specified as "[min step max]"
+
+ If shared across all channels, <type>_calibscale_available is used.
+
What: /sys/bus/iio/devices/iio:deviceX/in_activity_calibgender
What: /sys/bus/iio/devices/iio:deviceX/in_energy_calibgender
What: /sys/bus/iio/devices/iio:deviceX/in_distance_calibgender
@@ -708,6 +749,7 @@ Description:
2.5kohm_to_gnd: connected to ground via a 2.5kOhm resistor,
6kohm_to_gnd: connected to ground via a 6kOhm resistor,
20kohm_to_gnd: connected to ground via a 20kOhm resistor,
+ 42kohm_to_gnd: connected to ground via a 42kOhm resistor,
90kohm_to_gnd: connected to ground via a 90kOhm resistor,
100kohm_to_gnd: connected to ground via an 100kOhm resistor,
125kohm_to_gnd: connected to ground via an 125kOhm resistor,
@@ -2289,3 +2331,11 @@ KernelVersion: 6.7
Contact: linux-iio@vger.kernel.org
Description:
List of available timeout value for tap gesture confirmation.
+
+What: /sys/.../iio:deviceX/in_shunt_resistor
+What: /sys/.../iio:deviceX/in_current_shunt_resistor
+What: /sys/.../iio:deviceX/in_power_shunt_resistor
+KernelVersion: 6.10
+Contact: linux-iio@vger.kernel.org
+Description:
+ The value of current sense resistor in Ohms.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-adc-max9611 b/Documentation/ABI/testing/sysfs-bus-iio-adc-max9611
deleted file mode 100644
index 6d2d2b094941..000000000000
--- a/Documentation/ABI/testing/sysfs-bus-iio-adc-max9611
+++ /dev/null
@@ -1,17 +0,0 @@
-What: /sys/bus/iio/devices/iio:deviceX/in_power_shunt_resistor
-Date: March 2017
-KernelVersion: 4.12
-Contact: linux-iio@vger.kernel.org
-Description: The value of the shunt resistor used to compute power drain on
- common input voltage pin (RS+). In Ohms.
-
-What: /sys/bus/iio/devices/iio:deviceX/in_current_shunt_resistor
-Date: March 2017
-KernelVersion: 4.12
-Contact: linux-iio@vger.kernel.org
-Description: The value of the shunt resistor used to compute current flowing
- between RS+ and RS- voltage sense inputs. In Ohms.
-
-These attributes describe a single physical component, exposed as two distinct
-attributes as it is used to calculate two different values: power load and
-current flowing between RS+ and RS- inputs.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-chemical-sgp40 b/Documentation/ABI/testing/sysfs-bus-iio-chemical-sgp40
index 469a7c00fad4..a95547e874f1 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-chemical-sgp40
+++ b/Documentation/ABI/testing/sysfs-bus-iio-chemical-sgp40
@@ -15,17 +15,3 @@ Description:
Set the relative humidity. This value is sent to the sensor for
humidity compensation.
Default value: 50000 (50 % relative humidity)
-
-What: /sys/bus/iio/devices/iio:deviceX/in_resistance_calibbias
-Date: August 2021
-KernelVersion: 5.15
-Contact: Andreas Klinger <ak@it-klinger.de>
-Description:
- Set the bias value for the resistance which is used for
- calculation of in_concentration_input as follows:
-
- x = (in_resistance_raw - in_resistance_calibbias) * 0.65
-
- in_concentration_input = 500 / (1 + e^x)
-
- Default value: 30000
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-dac b/Documentation/ABI/testing/sysfs-bus-iio-dac
new file mode 100644
index 000000000000..810eaac5533c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-dac
@@ -0,0 +1,61 @@
+What: /sys/bus/iio/devices/iio:deviceX/out_currentY_toggle_en
+KernelVersion: 5.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ Toggle enable. Write 1 to enable toggle or 0 to disable it. This
+ is useful when one wants to change the DAC output codes. For
+ autonomous toggling, the way it should be done is:
+
+ - disable toggle operation;
+ - change out_currentY_rawN, where N is the integer value of the symbol;
+ - enable toggle operation.
+
+What: /sys/bus/iio/devices/iio:deviceX/out_currentY_rawN
+KernelVersion: 5.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute has the same meaning as out_currentY_raw. It is
+ specific to toggle enabled channels and refers to the DAC output
+ code in INPUT_N (_rawN), where N is the integer value of the symbol.
+ The same scale and offset as in out_currentY_raw applies.
+
+What: /sys/bus/iio/devices/iio:deviceX/out_currentY_symbol
+KernelVersion: 5.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ Performs a SW switch to a predefined output symbol. This attribute
+ is specific to toggle enabled channels and allows switching between
+ multiple predefined symbols. Each symbol corresponds to a different
+ output, denoted as out_currentY_rawN, where N is the integer value
+ of the symbol. Writing an integer value N will select out_currentY_rawN.
+
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_toggle_en
+KernelVersion: 5.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ Toggle enable. Write 1 to enable toggle or 0 to disable it. This
+ is useful when one wants to change the DAC output codes. For
+ autonomous toggling, the way it should be done is:
+
+ - disable toggle operation;
+ - change out_voltageY_rawN, where N is the integer value of the symbol;
+ - enable toggle operation.
+
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_rawN
+KernelVersion: 5.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute has the same meaning as out_currentY_raw. It is
+ specific to toggle enabled channels and refers to the DAC output
+ code in INPUT_N (_rawN), where N is the integer value of the symbol.
+ The same scale and offset as in out_currentY_raw applies.
+
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_symbol
+KernelVersion: 5.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ Performs a SW switch to a predefined output symbol. This attribute
+ is specific to toggle enabled channels and allows switching between
+ multiple predefined symbols. Each symbol corresponds to a different
+ output, denoted as out_voltageY_rawN, where N is the integer value
+ of the symbol. Writing an integer value N will select out_voltageY_rawN.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-dac-ltc2688 b/Documentation/ABI/testing/sysfs-bus-iio-dac-ltc2688
index 1c35971277ba..ae95a5477382 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-dac-ltc2688
+++ b/Documentation/ABI/testing/sysfs-bus-iio-dac-ltc2688
@@ -53,34 +53,3 @@ KernelVersion: 5.18
Contact: linux-iio@vger.kernel.org
Description:
Returns the available values for the dither phase.
-
-What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_toggle_en
-KernelVersion: 5.18
-Contact: linux-iio@vger.kernel.org
-Description:
- Toggle enable. Write 1 to enable toggle or 0 to disable it. This is
- useful when one wants to change the DAC output codes. The way it should
- be done is:
-
- - disable toggle operation;
- - change out_voltageY_raw0 and out_voltageY_raw1;
- - enable toggle operation.
-
-What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw0
-What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw1
-KernelVersion: 5.18
-Contact: linux-iio@vger.kernel.org
-Description:
- It has the same meaning as out_voltageY_raw. This attribute is
- specific to toggle enabled channels and refers to the DAC output
- code in INPUT_A (_raw0) and INPUT_B (_raw1). The same scale and offset
- as in out_voltageY_raw applies.
-
-What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_symbol
-KernelVersion: 5.18
-Contact: linux-iio@vger.kernel.org
-Description:
- Performs a SW toggle. This attribute is specific to toggle
- enabled channels and allows to toggle between out_voltageY_raw0
- and out_voltageY_raw1 through software. Writing 0 will select
- out_voltageY_raw0 while 1 selects out_voltageY_raw1.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818
index 31dbb390573f..c431f0a13cf5 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818
+++ b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818
@@ -3,7 +3,7 @@ KernelVersion:
Contact: linux-iio@vger.kernel.org
Description:
Reading this returns the valid values that can be written to the
- on_altvoltage0_mode attribute:
+ filter_mode attribute:
- auto -> Adjust bandpass filter to track changes in input clock rate.
- manual -> disable/unregister the clock rate notifier / input clock tracking.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-ina2xx-adc b/Documentation/ABI/testing/sysfs-bus-iio-ina2xx-adc
index 8916f7ec6507..8dbca113112d 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-ina2xx-adc
+++ b/Documentation/ABI/testing/sysfs-bus-iio-ina2xx-adc
@@ -13,12 +13,3 @@ Description:
available for reading data. However, samples can be occasionally skipped
or repeated, depending on the beat between the capture and conversion
rates.
-
-What: /sys/bus/iio/devices/iio:deviceX/in_shunt_resistor
-Date: December 2015
-KernelVersion: 4.4
-Contact: linux-iio@vger.kernel.org
-Description:
- The value of the shunt resistor may be known only at runtime fom an
- eeprom content read by a client application. This attribute allows to
- set its value in ohms.
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index cad6c3dc1f9c..fdedf1ea944b 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -579,6 +579,12 @@ Description: When ATGC is on, it controls age threshold to bypass GCing young
candidates whose age is not beyond the threshold, by default it was
initialized as 604800 seconds (equals to 7 days).
+What: /sys/fs/f2fs/<disk>/atgc_enabled
+Date: Feb 2024
+Contact: "Jinbao Liu" <liujinbao1@xiaomi.com>
+Description: It represents whether ATGC is on or off. The value is 1 which
+ indicates that ATGC is on, and 0 indicates that it is off.
+
What: /sys/fs/f2fs/<disk>/gc_reclaimed_segments
Date: July 2021
Contact: "Daeho Jeong" <daehojeong@google.com>
@@ -763,3 +769,53 @@ Date: November 2023
Contact: "Chao Yu" <chao@kernel.org>
Description: It controls to enable/disable IO aware feature for background discard.
By default, the value is 1 which indicates IO aware is on.
+
+What: /sys/fs/f2fs/<disk>/blkzone_alloc_policy
+Date: July 2024
+Contact: "Yuanhong Liao" <liaoyuanhong@vivo.com>
+Description: The zone UFS we are currently using consists of two parts:
+ conventional zones and sequential zones. It can be used to control which part
+ to prioritize for writes, with a default value of 0.
+
+ ======================== =========================================
+ value description
+ blkzone_alloc_policy = 0 Prioritize writing to sequential zones
+ blkzone_alloc_policy = 1 Only allow writing to sequential zones
+ blkzone_alloc_policy = 2 Prioritize writing to conventional zones
+ ======================== =========================================
+
+What: /sys/fs/f2fs/<disk>/migration_window_granularity
+Date: September 2024
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: Controls migration window granularity of garbage collection on large
+ section. it can control the scanning window granularity for GC migration
+ in a unit of segment, while migration_granularity controls the number
+ of segments which can be migrated at the same turn.
+
+What: /sys/fs/f2fs/<disk>/reserved_segments
+Date: September 2024
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: In order to fine tune GC behavior, we can control the number of
+ reserved segments.
+
+What: /sys/fs/f2fs/<disk>/gc_no_zoned_gc_percent
+Date: September 2024
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: If the percentage of free sections over total sections is above this
+ number, F2FS do not garbage collection for zoned devices through the
+ background GC thread. the default number is "60".
+
+What: /sys/fs/f2fs/<disk>/gc_boost_zoned_gc_percent
+Date: September 2024
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: If the percentage of free sections over total sections is under this
+ number, F2FS boosts garbage collection for zoned devices through the
+ background GC thread. the default number is "25".
+
+What: /sys/fs/f2fs/<disk>/gc_valid_thresh_ratio
+Date: September 2024
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: It controls the valid block ratio threshold not to trigger excessive GC
+ for zoned deivces. The initial value of it is 95(%). F2FS will stop the
+ background GC thread from intiating GC for sections having valid blocks
+ exceeding the ratio.
diff --git a/Documentation/admin-guide/device-mapper/delay.rst b/Documentation/admin-guide/device-mapper/delay.rst
index 917ba8c33359..4d667228e744 100644
--- a/Documentation/admin-guide/device-mapper/delay.rst
+++ b/Documentation/admin-guide/device-mapper/delay.rst
@@ -3,29 +3,52 @@ dm-delay
========
Device-Mapper's "delay" target delays reads and/or writes
-and maps them to different devices.
+and/or flushs and optionally maps them to different devices.
-Parameters::
+Arguments::
<device> <offset> <delay> [<write_device> <write_offset> <write_delay>
[<flush_device> <flush_offset> <flush_delay>]]
-With separate write parameters, the first set is only used for reads.
+Table line has to either have 3, 6 or 9 arguments:
+
+3: apply offset and delay to read, write and flush operations on device
+
+6: apply offset and delay to device, also apply write_offset and write_delay
+ to write and flush operations on optionally different write_device with
+ optionally different sector offset
+
+9: same as 6 arguments plus define flush_offset and flush_delay explicitely
+ on/with optionally different flush_device/flush_offset.
+
Offsets are specified in sectors.
+
Delays are specified in milliseconds.
+
Example scripts
===============
::
-
#!/bin/sh
- # Create device delaying rw operation for 500ms
- echo "0 `blockdev --getsz $1` delay $1 0 500" | dmsetup create delayed
+ #
+ # Create mapped device named "delayed" delaying read, write and flush operations for 500ms.
+ #
+ dmsetup create delayed --table "0 `blockdev --getsz $1` delay $1 0 500"
::
+ #!/bin/sh
+ #
+ # Create mapped device delaying write and flush operations for 400ms and
+ # splitting reads to device $1 but writes and flushs to different device $2
+ # to different offsets of 2048 and 4096 sectors respectively.
+ #
+ dmsetup create delayed --table "0 `blockdev --getsz $1` delay $1 2048 0 $2 4096 400"
+::
#!/bin/sh
- # Create device delaying only write operation for 500ms and
- # splitting reads and writes to different devices $1 $2
- echo "0 `blockdev --getsz $1` delay $1 0 0 $2 0 500" | dmsetup create delayed
+ #
+ # Create mapped device delaying reads for 50ms, writes for 100ms and flushs for 333ms
+ # onto the same backing device at offset 0 sectors.
+ #
+ dmsetup create delayed --table "0 `blockdev --getsz $1` delay $1 0 50 $2 0 100 $1 0 333"
diff --git a/Documentation/admin-guide/device-mapper/dm-crypt.rst b/Documentation/admin-guide/device-mapper/dm-crypt.rst
index 48a48bd09372..9f8139ff97d6 100644
--- a/Documentation/admin-guide/device-mapper/dm-crypt.rst
+++ b/Documentation/admin-guide/device-mapper/dm-crypt.rst
@@ -160,6 +160,10 @@ iv_large_sectors
The <iv_offset> must be multiple of <sector_size> (in 512 bytes units)
if this flag is specified.
+integrity_key_size:<bytes>
+ Use an integrity key of <bytes> size instead of using an integrity key size
+ of the digest size of the used HMAC algorithm.
+
Module parameters::
max_read_size
diff --git a/Documentation/admin-guide/device-mapper/vdo.rst b/Documentation/admin-guide/device-mapper/vdo.rst
index c69ac186863a..a14e6d3e787c 100644
--- a/Documentation/admin-guide/device-mapper/vdo.rst
+++ b/Documentation/admin-guide/device-mapper/vdo.rst
@@ -251,7 +251,12 @@ The messages are:
by the vdostats userspace program to interpret the output
buffer.
- dump:
+ config:
+ Outputs useful vdo configuration information. Mostly used
+ by users who want to recreate a similar VDO volume and
+ want to know the creation configuration used.
+
+ dump:
Dumps many internal structures to the system log. This is
not always safe to run, so it should only be used to debug
a hung vdo. Optional parameters to specify structures to
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index bb48ae24ae69..1518343bbe22 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2677,6 +2677,23 @@
Default is Y (on).
+ kvm.enable_virt_at_load=[KVM,ARM64,LOONGARCH,MIPS,RISCV,X86]
+ If enabled, KVM will enable virtualization in hardware
+ when KVM is loaded, and disable virtualization when KVM
+ is unloaded (if KVM is built as a module).
+
+ If disabled, KVM will dynamically enable and disable
+ virtualization on-demand when creating and destroying
+ VMs, i.e. on the 0=>1 and 1=>0 transitions of the
+ number of VMs.
+
+ Enabling virtualization at module lode avoids potential
+ latency for creation of the 0=>1 VM, as KVM serializes
+ virtualization enabling across all online CPUs. The
+ "cost" of enabling virtualization when KVM is loaded,
+ is that doing so may interfere with using out-of-tree
+ hypervisors that want to "own" virtualization hardware.
+
kvm.enable_vmware_backdoor=[KVM] Support VMware backdoor PV interface.
Default is false (don't support).
diff --git a/Documentation/arch/loongarch/irq-chip-model.rst b/Documentation/arch/loongarch/irq-chip-model.rst
index 7988f4192363..6dd48256e39f 100644
--- a/Documentation/arch/loongarch/irq-chip-model.rst
+++ b/Documentation/arch/loongarch/irq-chip-model.rst
@@ -85,6 +85,38 @@ to CPUINTC directly::
| Devices |
+---------+
+Advanced Extended IRQ model
+===========================
+
+In this model, IPI (Inter-Processor Interrupt) and CPU Local Timer interrupt go
+to CPUINTC directly, CPU UARTS interrupts go to LIOINTC, PCH-MSI interrupts go
+to AVECINTC, and then go to CPUINTC directly, while all other devices interrupts
+go to PCH-PIC/PCH-LPC and gathered by EIOINTC, and then go to CPUINTC directly::
+
+ +-----+ +-----------------------+ +-------+
+ | IPI | --> | CPUINTC | <-- | Timer |
+ +-----+ +-----------------------+ +-------+
+ ^ ^ ^
+ | | |
+ +---------+ +----------+ +---------+ +-------+
+ | EIOINTC | | AVECINTC | | LIOINTC | <-- | UARTs |
+ +---------+ +----------+ +---------+ +-------+
+ ^ ^
+ | |
+ +---------+ +---------+
+ | PCH-PIC | | PCH-MSI |
+ +---------+ +---------+
+ ^ ^ ^
+ | | |
+ +---------+ +---------+ +---------+
+ | Devices | | PCH-LPC | | Devices |
+ +---------+ +---------+ +---------+
+ ^
+ |
+ +---------+
+ | Devices |
+ +---------+
+
ACPI-related definitions
========================
diff --git a/Documentation/arch/s390/vfio-ap.rst b/Documentation/arch/s390/vfio-ap.rst
index ea744cbc8687..eba1991fbdba 100644
--- a/Documentation/arch/s390/vfio-ap.rst
+++ b/Documentation/arch/s390/vfio-ap.rst
@@ -999,6 +999,36 @@ the vfio_ap mediated device to which it is assigned as long as each new APQN
resulting from plugging it in references a queue device bound to the vfio_ap
device driver.
+Driver Features
+===============
+The vfio_ap driver exposes a sysfs file containing supported features.
+This exists so third party tools (like Libvirt and mdevctl) can query the
+availability of specific features.
+
+The features list can be found here: /sys/bus/matrix/devices/matrix/features
+
+Entries are space delimited. Each entry consists of a combination of
+alphanumeric and underscore characters.
+
+Example:
+cat /sys/bus/matrix/devices/matrix/features
+guest_matrix dyn ap_config
+
+the following features are advertised:
+
+---------------+---------------------------------------------------------------+
+| Flag | Description |
++==============+===============================================================+
+| guest_matrix | guest_matrix attribute exists. It reports the matrix of |
+| | adapters and domains that are or will be passed through to a |
+| | guest when the mdev is attached to it. |
++--------------+---------------------------------------------------------------+
+| dyn | Indicates hot plug/unplug of AP adapters, domains and control |
+| | domains for a guest to which the mdev is attached. |
++------------+-----------------------------------------------------------------+
+| ap_config | ap_config interface for one-shot modifications to mdev config |
++--------------+---------------------------------------------------------------+
+
Limitations
===========
Live guest migration is not supported for guests using AP devices without
diff --git a/Documentation/core-api/cleanup.rst b/Documentation/core-api/cleanup.rst
new file mode 100644
index 000000000000..527eb2f8ec6e
--- /dev/null
+++ b/Documentation/core-api/cleanup.rst
@@ -0,0 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================
+Scope-based Cleanup Helpers
+===========================
+
+.. kernel-doc:: include/linux/cleanup.h
+ :doc: scope-based cleanup helpers
diff --git a/Documentation/core-api/folio_queue.rst b/Documentation/core-api/folio_queue.rst
new file mode 100644
index 000000000000..1fe7a9bc4b8d
--- /dev/null
+++ b/Documentation/core-api/folio_queue.rst
@@ -0,0 +1,212 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+===========
+Folio Queue
+===========
+
+:Author: David Howells <dhowells@redhat.com>
+
+.. Contents:
+
+ * Overview
+ * Initialisation
+ * Adding and removing folios
+ * Querying information about a folio
+ * Querying information about a folio_queue
+ * Folio queue iteration
+ * Folio marks
+ * Lockless simultaneous production/consumption issues
+
+
+Overview
+========
+
+The folio_queue struct forms a single segment in a segmented list of folios
+that can be used to form an I/O buffer. As such, the list can be iterated over
+using the ITER_FOLIOQ iov_iter type.
+
+The publicly accessible members of the structure are::
+
+ struct folio_queue {
+ struct folio_queue *next;
+ struct folio_queue *prev;
+ ...
+ };
+
+A pair of pointers are provided, ``next`` and ``prev``, that point to the
+segments on either side of the segment being accessed. Whilst this is a
+doubly-linked list, it is intentionally not a circular list; the outward
+sibling pointers in terminal segments should be NULL.
+
+Each segment in the list also stores:
+
+ * an ordered sequence of folio pointers,
+ * the size of each folio and
+ * three 1-bit marks per folio,
+
+but hese should not be accessed directly as the underlying data structure may
+change, but rather the access functions outlined below should be used.
+
+The facility can be made accessible by::
+
+ #include <linux/folio_queue.h>
+
+and to use the iterator::
+
+ #include <linux/uio.h>
+
+
+Initialisation
+==============
+
+A segment should be initialised by calling::
+
+ void folioq_init(struct folio_queue *folioq);
+
+with a pointer to the segment to be initialised. Note that this will not
+necessarily initialise all the folio pointers, so care must be taken to check
+the number of folios added.
+
+
+Adding and removing folios
+==========================
+
+Folios can be set in the next unused slot in a segment struct by calling one
+of::
+
+ unsigned int folioq_append(struct folio_queue *folioq,
+ struct folio *folio);
+
+ unsigned int folioq_append_mark(struct folio_queue *folioq,
+ struct folio *folio);
+
+Both functions update the stored folio count, store the folio and note its
+size. The second function also sets the first mark for the folio added. Both
+functions return the number of the slot used. [!] Note that no attempt is made
+to check that the capacity wasn't overrun and the list will not be extended
+automatically.
+
+A folio can be excised by calling::
+
+ void folioq_clear(struct folio_queue *folioq, unsigned int slot);
+
+This clears the slot in the array and also clears all the marks for that folio,
+but doesn't change the folio count - so future accesses of that slot must check
+if the slot is occupied.
+
+
+Querying information about a folio
+==================================
+
+Information about the folio in a particular slot may be queried by the
+following function::
+
+ struct folio *folioq_folio(const struct folio_queue *folioq,
+ unsigned int slot);
+
+If a folio has not yet been set in that slot, this may yield an undefined
+pointer. The size of the folio in a slot may be queried with either of::
+
+ unsigned int folioq_folio_order(const struct folio_queue *folioq,
+ unsigned int slot);
+
+ size_t folioq_folio_size(const struct folio_queue *folioq,
+ unsigned int slot);
+
+The first function returns the size as an order and the second as a number of
+bytes.
+
+
+Querying information about a folio_queue
+========================================
+
+Information may be retrieved about a particular segment with the following
+functions::
+
+ unsigned int folioq_nr_slots(const struct folio_queue *folioq);
+
+ unsigned int folioq_count(struct folio_queue *folioq);
+
+ bool folioq_full(struct folio_queue *folioq);
+
+The first function returns the maximum capacity of a segment. It must not be
+assumed that this won't vary between segments. The second returns the number
+of folios added to a segments and the third is a shorthand to indicate if the
+segment has been filled to capacity.
+
+Not that the count and fullness are not affected by clearing folios from the
+segment. These are more about indicating how many slots in the array have been
+initialised, and it assumed that slots won't get reused, but rather the segment
+will get discarded as the queue is consumed.
+
+
+Folio marks
+===========
+
+Folios within a queue can also have marks assigned to them. These marks can be
+used to note information such as if a folio needs folio_put() calling upon it.
+There are three marks available to be set for each folio.
+
+The marks can be set by::
+
+ void folioq_mark(struct folio_queue *folioq, unsigned int slot);
+ void folioq_mark2(struct folio_queue *folioq, unsigned int slot);
+ void folioq_mark3(struct folio_queue *folioq, unsigned int slot);
+
+Cleared by::
+
+ void folioq_unmark(struct folio_queue *folioq, unsigned int slot);
+ void folioq_unmark2(struct folio_queue *folioq, unsigned int slot);
+ void folioq_unmark3(struct folio_queue *folioq, unsigned int slot);
+
+And the marks can be queried by::
+
+ bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot);
+ bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot);
+ bool folioq_is_marked3(const struct folio_queue *folioq, unsigned int slot);
+
+The marks can be used for any purpose and are not interpreted by this API.
+
+
+Folio queue iteration
+=====================
+
+A list of segments may be iterated over using the I/O iterator facility using
+an ``iov_iter`` iterator of ``ITER_FOLIOQ`` type. The iterator may be
+initialised with::
+
+ void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction,
+ const struct folio_queue *folioq,
+ unsigned int first_slot, unsigned int offset,
+ size_t count);
+
+This may be told to start at a particular segment, slot and offset within a
+queue. The iov iterator functions will follow the next pointers when advancing
+and prev pointers when reverting when needed.
+
+
+Lockless simultaneous production/consumption issues
+===================================================
+
+If properly managed, the list can be extended by the producer at the head end
+and shortened by the consumer at the tail end simultaneously without the need
+to take locks. The ITER_FOLIOQ iterator inserts appropriate barriers to aid
+with this.
+
+Care must be taken when simultaneously producing and consuming a list. If the
+last segment is reached and the folios it refers to are entirely consumed by
+the IOV iterators, an iov_iter struct will be left pointing to the last segment
+with a slot number equal to the capacity of that segment. The iterator will
+try to continue on from this if there's another segment available when it is
+used again, but care must be taken lest the segment got removed and freed by
+the consumer before the iterator was advanced.
+
+It is recommended that the queue always contain at least one segment, even if
+that segment has never been filled or is entirely spent. This prevents the
+head and tail pointers from collapsing.
+
+
+API Function Reference
+======================
+
+.. kernel-doc:: include/linux/folio_queue.h
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index e18a2ffe0787..a331d2c814f5 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -35,6 +35,7 @@ Library functionality that is used throughout the kernel.
kobject
kref
+ cleanup
assoc_array
xarray
maple_tree
diff --git a/Documentation/devicetree/bindings/arm/cirrus/cirrus,ep9301.yaml b/Documentation/devicetree/bindings/arm/cirrus/cirrus,ep9301.yaml
new file mode 100644
index 000000000000..170aad5dd7ed
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/cirrus/cirrus,ep9301.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/cirrus/cirrus,ep9301.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic EP93xx platforms
+
+description:
+ The EP93xx SoC is a ARMv4T-based with 200 MHz ARM9 CPU.
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - description: The TS-7250 is a compact, full-featured Single Board
+ Computer (SBC) based upon the Cirrus EP9302 ARM9 CPU
+ items:
+ - const: technologic,ts7250
+ - const: cirrus,ep9301
+
+ - description: The Liebherr BK3 is a derivate from ts7250 board
+ items:
+ - const: liebherr,bk3
+ - const: cirrus,ep9301
+
+ - description: EDB302 is an evaluation board by Cirrus Logic,
+ based on a Cirrus Logic EP9302 CPU
+ items:
+ - const: cirrus,edb9302
+ - const: cirrus,ep9301
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/ata/cirrus,ep9312-pata.yaml b/Documentation/devicetree/bindings/ata/cirrus,ep9312-pata.yaml
new file mode 100644
index 000000000000..8130923fdc72
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/cirrus,ep9312-pata.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/cirrus,ep9312-pata.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic EP9312 PATA controller
+
+maintainers:
+ - Damien Le Moal <dlemoal@kernel.org>
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9312-pata
+ - items:
+ - const: cirrus,ep9315-pata
+ - const: cirrus,ep9312-pata
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ ide@800a0000 {
+ compatible = "cirrus,ep9312-pata";
+ reg = <0x800a0000 0x38>;
+ interrupt-parent = <&vic1>;
+ interrupts = <8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ide_default_pins>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2m.yaml b/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2m.yaml
new file mode 100644
index 000000000000..871b76ddf90f
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2m.yaml
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/cirrus,ep9301-dma-m2m.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic ep93xx SoC DMA controller
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+allOf:
+ - $ref: dma-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9301-dma-m2m
+ - items:
+ - enum:
+ - cirrus,ep9302-dma-m2m
+ - cirrus,ep9307-dma-m2m
+ - cirrus,ep9312-dma-m2m
+ - cirrus,ep9315-dma-m2m
+ - const: cirrus,ep9301-dma-m2m
+
+ reg:
+ items:
+ - description: m2m0 channel registers
+ - description: m2m1 channel registers
+
+ clocks:
+ items:
+ - description: m2m0 channel gate clock
+ - description: m2m1 channel gate clock
+
+ clock-names:
+ items:
+ - const: m2m0
+ - const: m2m1
+
+ interrupts:
+ items:
+ - description: m2m0 channel interrupt
+ - description: m2m1 channel interrupt
+
+ '#dma-cells':
+ const: 2
+ description: |
+ The first cell is the unique device channel number as indicated by this
+ table for ep93xx:
+
+ 10: SPI controller
+ 11: IDE controller
+
+ The second cell is the DMA direction line number:
+
+ 1: Memory to device
+ 2: Device to memory
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/cirrus,ep9301-syscon.h>
+ dma-controller@80000100 {
+ compatible = "cirrus,ep9301-dma-m2m";
+ reg = <0x80000100 0x0040>,
+ <0x80000140 0x0040>;
+ clocks = <&syscon EP93XX_CLK_M2M0>,
+ <&syscon EP93XX_CLK_M2M1>;
+ clock-names = "m2m0", "m2m1";
+ interrupt-parent = <&vic0>;
+ interrupts = <17>, <18>;
+ #dma-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2p.yaml b/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2p.yaml
new file mode 100644
index 000000000000..d14c31553543
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2p.yaml
@@ -0,0 +1,144 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/cirrus,ep9301-dma-m2p.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic ep93xx SoC M2P DMA controller
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+allOf:
+ - $ref: dma-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9301-dma-m2p
+ - items:
+ - enum:
+ - cirrus,ep9302-dma-m2p
+ - cirrus,ep9307-dma-m2p
+ - cirrus,ep9312-dma-m2p
+ - cirrus,ep9315-dma-m2p
+ - const: cirrus,ep9301-dma-m2p
+
+ reg:
+ items:
+ - description: m2p0 channel registers
+ - description: m2p1 channel registers
+ - description: m2p2 channel registers
+ - description: m2p3 channel registers
+ - description: m2p4 channel registers
+ - description: m2p5 channel registers
+ - description: m2p6 channel registers
+ - description: m2p7 channel registers
+ - description: m2p8 channel registers
+ - description: m2p9 channel registers
+
+ clocks:
+ items:
+ - description: m2p0 channel gate clock
+ - description: m2p1 channel gate clock
+ - description: m2p2 channel gate clock
+ - description: m2p3 channel gate clock
+ - description: m2p4 channel gate clock
+ - description: m2p5 channel gate clock
+ - description: m2p6 channel gate clock
+ - description: m2p7 channel gate clock
+ - description: m2p8 channel gate clock
+ - description: m2p9 channel gate clock
+
+ clock-names:
+ items:
+ - const: m2p0
+ - const: m2p1
+ - const: m2p2
+ - const: m2p3
+ - const: m2p4
+ - const: m2p5
+ - const: m2p6
+ - const: m2p7
+ - const: m2p8
+ - const: m2p9
+
+ interrupts:
+ items:
+ - description: m2p0 channel interrupt
+ - description: m2p1 channel interrupt
+ - description: m2p2 channel interrupt
+ - description: m2p3 channel interrupt
+ - description: m2p4 channel interrupt
+ - description: m2p5 channel interrupt
+ - description: m2p6 channel interrupt
+ - description: m2p7 channel interrupt
+ - description: m2p8 channel interrupt
+ - description: m2p9 channel interrupt
+
+ '#dma-cells':
+ const: 2
+ description: |
+ The first cell is the unique device channel number as indicated by this
+ table for ep93xx:
+
+ 0: I2S channel 1
+ 1: I2S channel 2 (unused)
+ 2: AC97 channel 1 (unused)
+ 3: AC97 channel 2 (unused)
+ 4: AC97 channel 3 (unused)
+ 5: I2S channel 3 (unused)
+ 6: UART1 (unused)
+ 7: UART2 (unused)
+ 8: UART3 (unused)
+ 9: IRDA (unused)
+
+ The second cell is the DMA direction line number:
+
+ 1: Memory to device
+ 2: Device to memory
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/cirrus,ep9301-syscon.h>
+ dma-controller@80000000 {
+ compatible = "cirrus,ep9301-dma-m2p";
+ reg = <0x80000000 0x0040>,
+ <0x80000040 0x0040>,
+ <0x80000080 0x0040>,
+ <0x800000c0 0x0040>,
+ <0x80000240 0x0040>,
+ <0x80000200 0x0040>,
+ <0x800002c0 0x0040>,
+ <0x80000280 0x0040>,
+ <0x80000340 0x0040>,
+ <0x80000300 0x0040>;
+ clocks = <&syscon EP93XX_CLK_M2P0>,
+ <&syscon EP93XX_CLK_M2P1>,
+ <&syscon EP93XX_CLK_M2P2>,
+ <&syscon EP93XX_CLK_M2P3>,
+ <&syscon EP93XX_CLK_M2P4>,
+ <&syscon EP93XX_CLK_M2P5>,
+ <&syscon EP93XX_CLK_M2P6>,
+ <&syscon EP93XX_CLK_M2P7>,
+ <&syscon EP93XX_CLK_M2P8>,
+ <&syscon EP93XX_CLK_M2P9>;
+ clock-names = "m2p0", "m2p1",
+ "m2p2", "m2p3",
+ "m2p4", "m2p5",
+ "m2p6", "m2p7",
+ "m2p8", "m2p9";
+ interrupt-parent = <&vic0>;
+ interrupts = <7>, <8>, <9>, <10>, <11>, <12>, <13>, <14>, <15>, <16>;
+ #dma-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml b/Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
index d5cfa32ea52d..072b3c0c5fd0 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
+++ b/Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
@@ -37,6 +37,11 @@ properties:
GPIO pin (output) used to control VBUS. If skipped, no such control
takes place.
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ A port node to link the usb controller for the dual role switch.
+
required:
- compatible
- interrupts
@@ -58,5 +63,11 @@ examples:
interrupt-parent = <&msmgpio>;
interrupts = <78 IRQ_TYPE_LEVEL_HIGH>;
vbus-gpios = <&msmgpio 148 GPIO_ACTIVE_HIGH>;
+
+ port {
+ endpoint {
+ remote-endpoint = <&usb1_drd_sw>;
+ };
+ };
};
};
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
deleted file mode 100644
index dfc14f71e81f..000000000000
--- a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-USB GPIO Extcon device
-
-This is a virtual device used to generate USB cable states from the USB ID pin
-connected to a GPIO pin.
-
-Required properties:
-- compatible: Should be "linux,extcon-usb-gpio"
-
-Either one of id-gpio or vbus-gpio must be present. Both can be present as well.
-- id-gpio: gpio for USB ID pin. See gpio binding.
-- vbus-gpio: gpio for USB VBUS pin.
-
-Example: Examples of extcon-usb-gpio node in dra7-evm.dts as listed below:
- extcon_usb1 {
- compatible = "linux,extcon-usb-gpio";
- id-gpio = <&gpio6 1 GPIO_ACTIVE_HIGH>;
- }
-
- &omap_dwc3_1 {
- extcon = <&extcon_usb1>;
- };
diff --git a/Documentation/devicetree/bindings/extcon/linux,extcon-usb-gpio.yaml b/Documentation/devicetree/bindings/extcon/linux,extcon-usb-gpio.yaml
new file mode 100644
index 000000000000..8856107bdd33
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/linux,extcon-usb-gpio.yaml
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/extcon/linux,extcon-usb-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: USB GPIO Extcon device
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+description:
+ This is a virtual device used to generate USB cable states from the USB ID pin
+ connected to a GPIO pin.
+
+properties:
+ compatible:
+ const: linux,extcon-usb-gpio
+
+ id-gpios:
+ description: gpio for USB ID pin. See gpio binding.
+ vbus-gpios:
+ description: gpio for USB VBUS pin.
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ extcon_usb1 {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpios = <&gpio6 1 GPIO_ACTIVE_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-ep9301.yaml b/Documentation/devicetree/bindings/gpio/gpio-ep9301.yaml
index daadfb4926c3..3a1079d6ee20 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-ep9301.yaml
+++ b/Documentation/devicetree/bindings/gpio/gpio-ep9301.yaml
@@ -73,9 +73,10 @@ examples:
reg-names = "data", "dir", "intr";
gpio-controller;
#gpio-cells = <2>;
- interrupt-controller;
- interrupt-parent = <&vic1>;
- interrupts = <27>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&vic1>;
+ interrupts = <27>;
};
gpio@80840004 {
@@ -87,6 +88,7 @@ examples:
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&vic1>;
interrupts = <27>;
};
@@ -127,6 +129,7 @@ examples:
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupts-extended = <&vic0 19>, <&vic0 20>,
<&vic0 21>, <&vic0 22>,
<&vic1 15>, <&vic1 16>,
diff --git a/Documentation/devicetree/bindings/iio/accel/adi,adxl380.yaml b/Documentation/devicetree/bindings/iio/accel/adi,adxl380.yaml
new file mode 100644
index 000000000000..f1ff5ff4f478
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/accel/adi,adxl380.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/accel/adi,adxl380.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices ADXL380/382 3-Axis Digital Accelerometer
+
+maintainers:
+ - Ramona Gradinariu <ramona.gradinariu@analog.com>
+ - Antoniu Miclaus <antoniu.miclaus@analog.com>
+
+description: |
+ The ADXL380/ADXL382 is a low noise density, low power, 3-axis
+ accelerometer with selectable measurement ranges. The ADXL380
+ supports the ±4 g, ±8 g, and ±16 g ranges, and the ADXL382 supports
+ ±15 g, ±30 g, and ±60 g ranges.
+
+ https://www.analog.com/en/products/adxl380.html
+
+properties:
+ compatible:
+ enum:
+ - adi,adxl380
+ - adi,adxl382
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - enum: [INT0, INT1]
+ - const: INT1
+
+ vddio-supply: true
+
+ vsupply-supply: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - vddio-supply
+ - vsupply-supply
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ accelerometer@54 {
+ compatible = "adi,adxl380";
+ reg = <0x54>;
+ vddio-supply = <&vddio>;
+ vsupply-supply = <&vsupply>;
+ interrupt-parent = <&gpio>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "INT0";
+ };
+ };
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ accelerometer@0 {
+ compatible = "adi,adxl380";
+ reg = <0>;
+ spi-max-frequency = <8000000>;
+ vddio-supply = <&vddio>;
+ vsupply-supply = <&vsupply>;
+ interrupt-parent = <&gpio>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "INT0";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml b/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml
index 6ddb03f61bd9..951a3a2ba8fc 100644
--- a/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml
@@ -16,6 +16,7 @@ properties:
- kionix,kxcj91008
- kionix,kxtj21009
- kionix,kxtf9
+ - kionix,kx022-1020
- kionix,kx023-1025
reg:
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml
new file mode 100644
index 000000000000..310f046e139f
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml
@@ -0,0 +1,254 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/adi,ad4695.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices Easy Drive Multiplexed SAR Analog to Digital Converters
+
+maintainers:
+ - Michael Hennerich <Michael.Hennerich@analog.com>
+ - Nuno Sá <nuno.sa@analog.com>
+
+description: |
+ A family of similar multi-channel analog to digital converters with SPI bus.
+
+ * https://www.analog.com/en/products/ad4695.html
+ * https://www.analog.com/en/products/ad4696.html
+ * https://www.analog.com/en/products/ad4697.html
+ * https://www.analog.com/en/products/ad4698.html
+
+$ref: /schemas/spi/spi-peripheral-props.yaml#
+
+properties:
+ compatible:
+ enum:
+ - adi,ad4695
+ - adi,ad4696
+ - adi,ad4697
+ - adi,ad4698
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 80000000
+
+ spi-cpol: true
+ spi-cpha: true
+
+ spi-rx-bus-width:
+ minimum: 1
+ maximum: 4
+
+ avdd-supply:
+ description: Analog power supply.
+
+ vio-supply:
+ description: I/O pin power supply.
+
+ ldo-in-supply:
+ description: Internal LDO Input. Mutually exclusive with vdd-supply.
+
+ vdd-supply:
+ description: Core power supply. Mutually exclusive with ldo-in-supply.
+
+ ref-supply:
+ description:
+ External reference voltage. Mutually exclusive with refin-supply.
+
+ refin-supply:
+ description:
+ Internal reference buffer input. Mutually exclusive with ref-supply.
+
+ com-supply:
+ description: Common voltage supply for pseudo-differential analog inputs.
+
+ adi,no-ref-current-limit:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ When this flag is present, the REF Overvoltage Reduced Current protection
+ is disabled.
+
+ adi,no-ref-high-z:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Enable this flag if the ref-supply requires Reference Input High-Z Mode
+ to be disabled for proper operation.
+
+ cnv-gpios:
+ description: The Convert Input (CNV). If omitted, CNV is tied to SPI CS.
+ maxItems: 1
+
+ reset-gpios:
+ description: The Reset Input (RESET). Should be configured GPIO_ACTIVE_LOW.
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ items:
+ - description: Signal coming from the BSY_ALT_GP0 pin (ALERT or BUSY).
+ - description: Signal coming from the GP2 pin (ALERT).
+ - description: Signal coming from the GP3 pin (BUSY).
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: gp0
+ - const: gp2
+ - const: gp3
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+ description: |
+ The first cell is the GPn number: 0 to 3.
+ The second cell takes standard GPIO flags.
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^in(?:[13579]|1[135])-supply$":
+ description:
+ Optional voltage supply for odd numbered channels when they are used as
+ the negative input for a pseudo-differential channel.
+
+ "^channel@[0-9a-f]$":
+ type: object
+ $ref: adc.yaml
+ unevaluatedProperties: false
+ description:
+ Describes each individual channel. In addition the properties defined
+ below, bipolar from adc.yaml is also supported.
+
+ properties:
+ reg:
+ maximum: 15
+
+ common-mode-channel:
+ description:
+ Describes the common mode channel for single channels. 0xFF is REFGND
+ and OxFE is COM. Macros are available for these values in
+ dt-bindings/iio/adi,ad4695.h. Values 1 to 15 correspond to INx inputs.
+ Only odd numbered INx inputs can be used as common mode channels.
+ enum: [1, 3, 5, 7, 9, 11, 13, 15, 0xFE, 0xFF]
+ default: 0xFF
+
+ adi,no-high-z:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Enable this flag if the input pin requires the Analog Input High-Z
+ Mode to be disabled for proper operation.
+
+ required:
+ - reg
+
+ allOf:
+ # bipolar mode can't be used with REFGND
+ - if:
+ properties:
+ common-mode-channel:
+ const: 0xFF
+ then:
+ properties:
+ bipolar: false
+
+required:
+ - compatible
+ - reg
+ - avdd-supply
+ - vio-supply
+
+allOf:
+ - oneOf:
+ - required:
+ - ldo-in-supply
+ - required:
+ - vdd-supply
+
+ - oneOf:
+ - required:
+ - ref-supply
+ - required:
+ - refin-supply
+
+ # the internal reference buffer always requires high-z mode
+ - if:
+ required:
+ - refin-supply
+ then:
+ properties:
+ adi,no-ref-high-z: false
+
+ # limit channels for 8-channel chips
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,ad4697
+ - adi,ad4698
+ then:
+ patternProperties:
+ "^in(?:9|1[135])-supply$": false
+ "^channel@[0-7]$":
+ properties:
+ reg:
+ maximum: 7
+ common-mode-channel:
+ enum: [1, 3, 5, 7, 0xFE, 0xFF]
+ "^channel@[8-9a-f]$": false
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/iio/adi,ad4695.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "adi,ad4695";
+ reg = <0>;
+ spi-cpol;
+ spi-cpha;
+ spi-max-frequency = <80000000>;
+ avdd-supply = <&power_supply>;
+ ldo-in-supply = <&power_supply>;
+ vio-supply = <&io_supply>;
+ refin-supply = <&supply_5V>;
+ com-supply = <&supply_2V5>;
+ in3-supply = <&supply_2V5>;
+ reset-gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Pseudo-differential channel between IN0 and REFGND. */
+ channel@0 {
+ reg = <0>;
+ };
+
+ /* Pseudo-differential channel between IN1 and COM. */
+ channel@1 {
+ reg = <1>;
+ common-mode-channel = <AD4695_COMMON_MODE_COM>;
+ bipolar;
+ };
+
+ /* Pseudo-differential channel between IN2 and IN3. */
+ channel@2 {
+ reg = <2>;
+ common-mode-channel = <3>;
+ bipolar;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
index 190889c7b62a..66dd1c549bd3 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
@@ -39,11 +39,21 @@ properties:
clocks:
maxItems: 1
- description: phandle to the master clock (mclk)
+ description:
+ Optionally, either a crystal can be attached externally between MCLK1 and
+ MCLK2 pins, or an external CMOS-compatible clock can drive the MCLK2
+ pin. If absent, internal 4.92MHz clock is used, which can be made
+ available on MCLK2 pin.
clock-names:
- items:
- - const: mclk
+ enum:
+ - xtal
+ - mclk
+
+ "#clock-cells":
+ const: 0
+ description:
+ If present when internal clock is used, configured as clock provider.
interrupts:
maxItems: 1
@@ -134,8 +144,6 @@ patternProperties:
required:
- compatible
- reg
- - clocks
- - clock-names
- interrupts
- dvdd-supply
- avdd-supply
@@ -156,6 +164,18 @@ allOf:
then:
patternProperties:
"^channel@[0-9a-f]+$": false
+ - if:
+ anyOf:
+ - required:
+ - clocks
+ - required:
+ - clock-names
+ then:
+ properties:
+ "#clock-cells": false
+ required:
+ - clocks
+ - clock-names
unevaluatedProperties: false
@@ -201,8 +221,7 @@ examples:
spi-max-frequency = <1000000>;
spi-cpol;
spi-cpha;
- clocks = <&ad7192_mclk>;
- clock-names = "mclk";
+ #clock-cells = <0>;
interrupts = <25 0x2>;
interrupt-parent = <&gpio>;
aincom-supply = <&aincom>;
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml
index 899b777017ce..bd19abb867d9 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml
@@ -15,10 +15,17 @@ description: |
* https://www.analog.com/en/products/ad7381.html
* https://www.analog.com/en/products/ad7383.html
* https://www.analog.com/en/products/ad7384.html
+ * https://www.analog.com/en/products/ad7386.html
+ * https://www.analog.com/en/products/ad7387.html
+ * https://www.analog.com/en/products/ad7388.html
* https://www.analog.com/en/products/ad7380-4.html
* https://www.analog.com/en/products/ad7381-4.html
* https://www.analog.com/en/products/ad7383-4.html
* https://www.analog.com/en/products/ad7384-4.html
+ * https://www.analog.com/en/products/ad7386-4.html
+ * https://www.analog.com/en/products/ad7387-4.html
+ * https://www.analog.com/en/products/ad7388-4.html
+
$ref: /schemas/spi/spi-peripheral-props.yaml#
@@ -29,10 +36,16 @@ properties:
- adi,ad7381
- adi,ad7383
- adi,ad7384
+ - adi,ad7386
+ - adi,ad7387
+ - adi,ad7388
- adi,ad7380-4
- adi,ad7381-4
- adi,ad7383-4
- adi,ad7384-4
+ - adi,ad7386-4
+ - adi,ad7387-4
+ - adi,ad7388-4
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
index 00fdaed11cbd..69408cae3db9 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
@@ -35,65 +35,83 @@ properties:
avcc-supply: true
+ vdrive-supply:
+ description:
+ Determines the voltage level at which the interface logic pins will
+ operate.
+
+ refin-supply:
+ description:
+ The voltage supply for optional external reference voltage.
+
interrupts:
+ description:
+ The BUSY pin falling edge indicates that the conversion is over, and thus
+ new data is available.
maxItems: 1
adi,conversion-start-gpios:
description:
- Must be the device tree identifier of the CONVST pin.
- This logic input is used to initiate conversions on the analog
- input channels. As the line is active high, it should be marked
- GPIO_ACTIVE_HIGH.
- maxItems: 1
+ Must be the device tree identifier of the CONVST pin(s). This logic input
+ is used to initiate conversions on the analog input channels. As the line
+ is active high, it should be marked GPIO_ACTIVE_HIGH.
+ minItems: 1
+ maxItems: 2
reset-gpios:
description:
- Must be the device tree identifier of the RESET pin. If specified,
- it will be asserted during driver probe. As the line is active high,
- it should be marked GPIO_ACTIVE_HIGH.
+ Must be the device tree identifier of the RESET pin. If specified, it will
+ be asserted during driver probe. On the AD7606x, as the line is active
+ high, it should be marked GPIO_ACTIVE_HIGH. On the AD7616, as the line is
+ active low, it should be marked GPIO_ACTIVE_LOW.
maxItems: 1
standby-gpios:
description:
- Must be the device tree identifier of the STBY pin. This pin is used
- to place the AD7606 into one of two power-down modes, Standby mode or
+ Must be the device tree identifier of the STBY pin. This pin is used to
+ place the AD7606 into one of two power-down modes, Standby mode or
Shutdown mode. As the line is active low, it should be marked
GPIO_ACTIVE_LOW.
maxItems: 1
adi,first-data-gpios:
description:
- Must be the device tree identifier of the FRSTDATA pin.
- The FRSTDATA output indicates when the first channel, V1, is
- being read back on either the parallel, byte or serial interface.
- As the line is active high, it should be marked GPIO_ACTIVE_HIGH.
+ Must be the device tree identifier of the FRSTDATA pin. The FRSTDATA
+ output indicates when the first channel, V1, is being read back on either
+ the parallel, byte or serial interface. As the line is active high, it
+ should be marked GPIO_ACTIVE_HIGH.
maxItems: 1
adi,range-gpios:
description:
- Must be the device tree identifier of the RANGE pin. The polarity on
- this pin determines the input range of the analog input channels. If
- this pin is tied to a logic high, the analog input range is ±10V for
- all channels. If this pin is tied to a logic low, the analog input range
+ Must be the device tree identifier of the RANGE pin. The state on this
+ pin determines the input range of the analog input channels. If this pin
+ is tied to a logic high, the analog input range is ±10V for all channels.
+ On the AD760X, if this pin is tied to a logic low, the analog input range
is ±5V for all channels. As the line is active high, it should be marked
- GPIO_ACTIVE_HIGH.
- maxItems: 1
+ GPIO_ACTIVE_HIGH. On the AD7616, there are 2 pins, and if the 2 pins are
+ tied to a logic high, software mode is enabled, otherwise one of the 3
+ possible range values is selected.
+ minItems: 1
+ maxItems: 2
adi,oversampling-ratio-gpios:
description:
- Must be the device tree identifier of the over-sampling
- mode pins. As the line is active high, it should be marked
- GPIO_ACTIVE_HIGH.
+ Must be the device tree identifier of the over-sampling mode pins. As the
+ line is active high, it should be marked GPIO_ACTIVE_HIGH. On the AD7606X
+ parts that support it, if all 3 pins are tied to a logic high, software
+ mode is enabled.
maxItems: 3
adi,sw-mode:
description:
- Software mode of operation, so far available only for ad7616 and ad7606b.
- It is enabled when all three oversampling mode pins are connected to
- high level. The device is configured by the corresponding registers. If the
- adi,oversampling-ratio-gpios property is defined, then the driver will set the
- oversampling gpios to high. Otherwise, it is assumed that the pins are hardwired
- to VDD.
+ Software mode of operation, so far available only for AD7616 and AD7606B.
+ It is enabled when all three oversampling mode pins are connected to high
+ level for the AD7606B, or both the range selection are connected to high
+ level for the AD7616. The device is configured by the corresponding
+ registers. If the adi,oversampling-ratio-gpios property is defined, then
+ the driver will set the oversampling gpios to high. Otherwise, it is
+ assumed that the pins are hardwired to VDD.
type: boolean
required:
@@ -101,12 +119,57 @@ required:
- reg
- spi-cpha
- avcc-supply
+ - vdrive-supply
- interrupts
- adi,conversion-start-gpios
allOf:
- $ref: /schemas/spi/spi-peripheral-props.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: adi,ad7616
+ then:
+ properties:
+ adi,first-data-gpios: false
+ standby-gpios: false
+ adi,range-gpios:
+ maxItems: 2
+ else:
+ properties:
+ adi,range-gpios:
+ maxItems: 1
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,ad7605-4
+ - adi,ad7616
+ then:
+ properties:
+ adi,oversampling-ratio-gpios: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,ad7605-4
+ - adi,ad7606-4
+ - adi,ad7606-6
+ - adi,ad7606-8
+ then:
+ properties:
+ adi,sw-mode: false
+ else:
+ properties:
+ adi,conversion-start-gpios:
+ maxItems: 1
+
unevaluatedProperties: false
examples:
@@ -125,6 +188,7 @@ examples:
spi-cpha;
avcc-supply = <&adc_vref>;
+ vdrive-supply = <&vdd_supply>;
interrupts = <25 IRQ_TYPE_EDGE_FALLING>;
interrupt-parent = <&gpio>;
@@ -136,7 +200,6 @@ examples:
<&gpio 23 GPIO_ACTIVE_HIGH>,
<&gpio 26 GPIO_ACTIVE_HIGH>;
standby-gpios = <&gpio 24 GPIO_ACTIVE_LOW>;
- adi,sw-mode;
};
};
...
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml
index eecd5fbab695..2606c0c5dfc6 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml
@@ -28,6 +28,9 @@ properties:
- adi,ad9265
- adi,ad9434
- adi,ad9467
+ - adi,ad9643
+ - adi,ad9649
+ - adi,ad9652
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/adc/microchip,pac1921.yaml b/Documentation/devicetree/bindings/iio/adc/microchip,pac1921.yaml
new file mode 100644
index 000000000000..12e56b1b3d3f
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/microchip,pac1921.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/microchip,pac1921.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip PAC1921 High-Side Power/Current Monitor with Anaog Output
+
+maintainers:
+ - Matteo Martelli <matteomartelli3@gmail.com>
+
+description: |
+ The PAC1921 is a power/current monitoring device with an analog output
+ and I2C/SMBus interface.
+
+ Datasheet can be found here:
+ https://ww1.microchip.com/downloads/en/DeviceDoc/PAC1921-Data-Sheet-DS20005293E.pdf
+
+properties:
+ compatible:
+ const: microchip,pac1921
+
+ reg:
+ maxItems: 1
+
+ vdd-supply: true
+
+ "#io-channel-cells":
+ const: 1
+
+ shunt-resistor-micro-ohms:
+ description:
+ Value in micro Ohms of the shunt resistor connected between
+ the SENSE+ and SENSE- inputs, across which the current is measured.
+ Value is needed to compute the scaling of the measured current.
+
+ label:
+ description: Unique name to identify which device this is.
+
+ read-integrate-gpios:
+ description:
+ READ/INT input pin to control the current state of the device, either in
+ the INTEGRATE state when driven high, or in the READ state when driven low.
+ When not connected the pin is floating and it can be overridden by the
+ INT_EN register bit after asserting the READ/INT_OVR register bit.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+ - shunt-resistor-micro-ohms
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@4c {
+ compatible = "microchip,pac1921";
+ reg = <0x4c>;
+ vdd-supply = <&vdd>;
+ #io-channel-cells = <1>;
+ label = "vbat";
+ shunt-resistor-micro-ohms = <10000>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml
index aa24b841393c..fd93ed3991e0 100644
--- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml
@@ -17,6 +17,9 @@ properties:
- const: rockchip,rk3399-saradc
- const: rockchip,rk3588-saradc
- items:
+ - const: rockchip,rk3576-saradc
+ - const: rockchip,rk3588-saradc
+ - items:
- enum:
- rockchip,px30-saradc
- rockchip,rk3308-saradc
diff --git a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml
index cab0d425eaa4..c3a116427dc3 100644
--- a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml
@@ -18,18 +18,39 @@ properties:
- sd-modulator
- ads1201
+ '#io-backend-cells':
+ const: 0
+
'#io-channel-cells':
const: 0
+ vref-supply:
+ description: Phandle to the vref input analog reference voltage.
+
+dependencies:
+ vref-supply: [ '#io-backend-cells' ]
+
required:
- compatible
- - '#io-channel-cells'
+
+anyOf:
+ - required: ['#io-backend-cells']
+ - required: ['#io-channel-cells']
additionalProperties: false
examples:
- |
- ads1202: adc {
+ // Backend binding example. SD modulator configured as an IIO backend device
+ ads1201_0: adc {
+ compatible = "sd-modulator";
+ vref-supply = <&vdd_adc>;
+ #io-backend-cells = <0>;
+ };
+
+ - |
+ // Legacy binding example. SD modulator configured as an IIO channel provider
+ ads1201_1: adc {
compatible = "sd-modulator";
#io-channel-cells = <0>;
};
diff --git a/Documentation/devicetree/bindings/iio/adc/sophgo,cv1800b-saradc.yaml b/Documentation/devicetree/bindings/iio/adc/sophgo,cv1800b-saradc.yaml
new file mode 100644
index 000000000000..f652b98615f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/sophgo,cv1800b-saradc.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/sophgo,cv1800b-saradc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title:
+ Sophgo CV1800B SoC 3 channels Successive Approximation Analog to
+ Digital Converters
+
+maintainers:
+ - Thomas Bonnefille <thomas.bonnefille@bootlin.com>
+
+description:
+ Datasheet at https://github.com/sophgo/sophgo-doc/releases
+
+properties:
+ compatible:
+ const: sophgo,cv1800b-saradc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ "^channel@[0-2]$":
+ $ref: adc.yaml
+
+ properties:
+ reg:
+ items:
+ - minimum: 0
+ maximum: 2
+
+ required:
+ - reg
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#address-cells'
+ - '#size-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sophgo,cv1800.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ adc@30f0000 {
+ compatible = "sophgo,cv1800b-saradc";
+ reg = <0x030f0000 0x1000>;
+ clocks = <&clk CLK_SARADC>;
+ interrupts = <100 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@0 {
+ reg = <0>;
+ };
+
+ channel@1 {
+ reg = <1>;
+ };
+
+ channel@2 {
+ reg = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
index ec34c48d4878..ef9dcc365eab 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
@@ -54,7 +54,9 @@ properties:
It's not present on stm32f4.
It's required on stm32h7 and stm32mp1.
- clock-names: true
+ clock-names:
+ minItems: 1
+ maxItems: 2
st,max-clk-rate-hz:
description:
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
index 2722edab1d9a..c24ac98bbb3d 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
@@ -102,9 +102,11 @@ patternProperties:
items:
minimum: 0
maximum: 7
+ deprecated: true
st,adc-channel-names:
description: List of single-ended channel names.
+ deprecated: true
st,filter-order:
description: |
@@ -118,6 +120,12 @@ patternProperties:
"#io-channel-cells":
const: 1
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
st,adc-channel-types:
description: |
Single-ended channel input type.
@@ -128,6 +136,7 @@ patternProperties:
items:
enum: [ SPI_R, SPI_F, MANCH_R, MANCH_F ]
$ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ deprecated: true
st,adc-channel-clk-src:
description: |
@@ -139,6 +148,7 @@ patternProperties:
items:
enum: [ CLKIN, CLKOUT, CLKOUT_F, CLKOUT_R ]
$ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ deprecated: true
st,adc-alt-channel:
description:
@@ -147,6 +157,7 @@ patternProperties:
If not set, channel n is connected to SPI input n.
If set, channel n is connected to SPI input n + 1.
type: boolean
+ deprecated: true
st,filter0-sync:
description:
@@ -165,11 +176,60 @@ patternProperties:
- compatible
- reg
- interrupts
- - st,adc-channels
- - st,adc-channel-names
- st,filter-order
- "#io-channel-cells"
+ patternProperties:
+ "^channel@[0-7]$":
+ type: object
+ $ref: adc.yaml
+ unevaluatedProperties: false
+ description: Represents the external channels which are connected to the DFSDM.
+
+ properties:
+ reg:
+ maximum: 7
+
+ label:
+ description:
+ Unique name to identify which channel this is.
+
+ st,adc-channel-type:
+ description: |
+ Single-ended channel input type.
+ - "SPI_R": SPI with data on rising edge (default)
+ - "SPI_F": SPI with data on falling edge
+ - "MANCH_R": manchester codec, rising edge = logic 0, falling edge = logic 1
+ - "MANCH_F": manchester codec, rising edge = logic 1, falling edge = logic 0
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [ SPI_R, SPI_F, MANCH_R, MANCH_F ]
+
+ st,adc-channel-clk-src:
+ description: |
+ Conversion clock source.
+ - "CLKIN": external SPI clock (CLKIN x)
+ - "CLKOUT": internal SPI clock (CLKOUT) (default)
+ - "CLKOUT_F": internal SPI clock divided by 2 (falling edge).
+ - "CLKOUT_R": internal SPI clock divided by 2 (rising edge).
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [ CLKIN, CLKOUT, CLKOUT_F, CLKOUT_R ]
+
+ st,adc-alt-channel:
+ description:
+ Must be defined if two sigma delta modulators are
+ connected on same SPI input.
+ If not set, channel n is connected to SPI input n.
+ If set, channel n is connected to SPI input n + 1.
+ type: boolean
+
+ io-backends:
+ description:
+ Used to pipe external sigma delta modulator or internal ADC backend to DFSDM channel.
+ maxItems: 1
+
+ required:
+ - reg
+
allOf:
- if:
properties:
@@ -199,9 +259,19 @@ patternProperties:
description:
From common IIO binding. Used to pipe external sigma delta
modulator or internal ADC output to DFSDM channel.
+ deprecated: true
- required:
- - io-channels
+ if:
+ required:
+ - st,adc-channels
+ then:
+ required:
+ - io-channels
+
+ patternProperties:
+ "^channel@[0-7]$":
+ required:
+ - io-backends
- if:
properties:
@@ -298,6 +368,7 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
+ // Example 1: Audio use case with generic binding
dfsdm0: filter@0 {
compatible = "st,stm32-dfsdm-dmic";
reg = <0>;
@@ -305,12 +376,18 @@ examples:
dmas = <&dmamux1 101 0x400 0x01>;
dma-names = "rx";
#io-channel-cells = <1>;
- st,adc-channels = <1>;
- st,adc-channel-names = "dmic0";
- st,adc-channel-types = "SPI_R";
- st,adc-channel-clk-src = "CLKOUT";
+ #address-cells = <1>;
+ #size-cells = <0>;
st,filter-order = <5>;
+ channel@1 {
+ reg = <1>;
+ label = "dmic0";
+ st,adc-channel-type = "SPI_R";
+ st,adc-channel-clk-src = "CLKOUT";
+ st,adc-alt-channel;
+ };
+
asoc_pdm0: dfsdm-dai {
compatible = "st,stm32h7-dfsdm-dai";
#sound-dai-cells = <0>;
@@ -318,19 +395,34 @@ examples:
};
};
- dfsdm_pdm1: filter@1 {
+ // Example 2: Analog use case with generic binding
+ dfsdm1: filter@1 {
compatible = "st,stm32-dfsdm-adc";
reg = <1>;
interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&dmamux1 102 0x400 0x01>;
dma-names = "rx";
- #io-channel-cells = <1>;
- st,adc-channels = <2 3>;
- st,adc-channel-names = "in2", "in3";
- st,adc-channel-types = "SPI_R", "SPI_R";
- st,adc-channel-clk-src = "CLKOUT_F", "CLKOUT_F";
- io-channels = <&sd_adc2 &sd_adc3>;
st,filter-order = <1>;
+ #io-channel-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@2 {
+ reg = <2>;
+ label = "in2";
+ st,adc-channel-type = "SPI_F";
+ st,adc-channel-clk-src = "CLKOUT";
+ st,adc-alt-channel;
+ io-backends = <&sd_adc2>;
+ };
+
+ channel@3 {
+ reg = <3>;
+ label = "in3";
+ st,adc-channel-type = "SPI_R";
+ st,adc-channel-clk-src = "CLKOUT";
+ io-backends = <&sd_adc3>;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml b/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml
index d40689f233f2..1caa896fce82 100644
--- a/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml
@@ -37,6 +37,17 @@ description: |
3 | batt_dischrg_i
4 | ts_v
+ AXP717
+ ------
+ 0 | batt_v
+ 1 | ts_v
+ 2 | vbus_v
+ 3 | vsys_v
+ 4 | pmic_temp
+ 5 | batt_chrg_i
+ 6 | vmid_v
+ 7 | bkup_batt_v
+
AXP813
------
0 | pmic_temp
@@ -52,6 +63,7 @@ properties:
oneOf:
- const: x-powers,axp209-adc
- const: x-powers,axp221-adc
+ - const: x-powers,axp717-adc
- const: x-powers,axp813-adc
- items:
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ltc2664.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ltc2664.yaml
new file mode 100644
index 000000000000..33490853497b
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ltc2664.yaml
@@ -0,0 +1,181 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/dac/adi,ltc2664.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices LTC2664 DAC
+
+maintainers:
+ - Michael Hennerich <michael.hennerich@analog.com>
+ - Kim Seer Paller <kimseer.paller@analog.com>
+
+description: |
+ Analog Devices LTC2664 4 channel, 12-/16-Bit, +-10V DAC
+ https://www.analog.com/media/en/technical-documentation/data-sheets/2664fa.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,ltc2664
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 50000000
+
+ vcc-supply:
+ description: Analog Supply Voltage Input.
+
+ v-pos-supply:
+ description: Positive Supply Voltage Input.
+
+ v-neg-supply:
+ description: Negative Supply Voltage Input.
+
+ iovcc-supply:
+ description: Digital Input/Output Supply Voltage.
+
+ ref-supply:
+ description:
+ Reference Input/Output. The voltage at the REF pin sets the full-scale
+ range of all channels. If not provided the internal reference is used and
+ also provided on the VREF pin.
+
+ reset-gpios:
+ description:
+ Active-low Asynchronous Clear Input. A logic low at this level-triggered
+ input clears the part to the reset code and range determined by the
+ hardwired option chosen using the MSPAN pins. The control registers are
+ cleared to zero.
+ maxItems: 1
+
+ adi,manual-span-operation-config:
+ description:
+ This property must mimic the MSPAN pin configurations. By tying the MSPAN
+ pins (MSP2, MSP1 and MSP0) to GND and/or VCC, any output range can be
+ hardware-configured with different mid-scale or zero-scale reset options.
+ The hardware configuration is latched during power on reset for proper
+ operation.
+ 0 - MPS2=GND, MPS1=GND, MSP0=GND (+-10V, reset to 0V)
+ 1 - MPS2=GND, MPS1=GND, MSP0=VCC (+-5V, reset to 0V)
+ 2 - MPS2=GND, MPS1=VCC, MSP0=GND (+-2.5V, reset to 0V)
+ 3 - MPS2=GND, MPS1=VCC, MSP0=VCC (0V to 10, reset to 0V)
+ 4 - MPS2=VCC, MPS1=GND, MSP0=GND (0V to 10V, reset to 5V)
+ 5 - MPS2=VCC, MPS1=GND, MSP0=VCC (0V to 5V, reset to 0V)
+ 6 - MPS2=VCC, MPS1=VCC, MSP0=GND (0V to 5V, reset to 2.5V)
+ 7 - MPS2=VCC, MPS1=VCC, MSP0=VCC (0V to 5V, reset to 0V, enables SoftSpan)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3, 4, 5, 6, 7]
+ default: 7
+
+ io-channels:
+ description:
+ ADC channel to monitor voltages and temperature at the MUXOUT pin.
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ "^channel@[0-3]$":
+ $ref: dac.yaml
+ type: object
+ additionalProperties: false
+
+ properties:
+ reg:
+ description: The channel number representing the DAC output channel.
+ maximum: 3
+
+ adi,toggle-mode:
+ description:
+ Set the channel as a toggle enabled channel. Toggle operation enables
+ fast switching of a DAC output between two different DAC codes without
+ any SPI transaction.
+ type: boolean
+
+ output-range-microvolt:
+ description:
+ This property is only allowed when SoftSpan is enabled. If not present,
+ [0, 5000000] is the default output range.
+ oneOf:
+ - items:
+ - const: 0
+ - enum: [5000000, 10000000]
+ - items:
+ - const: -5000000
+ - const: 5000000
+ - items:
+ - const: -10000000
+ - const: 10000000
+ - items:
+ - const: -2500000
+ - const: 2500000
+
+ required:
+ - reg
+
+ allOf:
+ - if:
+ not:
+ properties:
+ adi,manual-span-operation-config:
+ const: 7
+ then:
+ patternProperties:
+ "^channel@[0-3]$":
+ properties:
+ output-range-microvolt: false
+
+required:
+ - compatible
+ - reg
+ - spi-max-frequency
+ - vcc-supply
+ - iovcc-supply
+ - v-pos-supply
+ - v-neg-supply
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+additionalProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dac@0 {
+ compatible = "adi,ltc2664";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+
+ vcc-supply = <&vcc>;
+ iovcc-supply = <&vcc>;
+ ref-supply = <&vref>;
+ v-pos-supply = <&vpos>;
+ v-neg-supply = <&vneg>;
+
+ io-channels = <&adc 0>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ channel@0 {
+ reg = <0>;
+ adi,toggle-mode;
+ output-range-microvolt = <(-10000000) 10000000>;
+ };
+
+ channel@1 {
+ reg = <1>;
+ output-range-microvolt= <0 10000000>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ltc2672.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ltc2672.yaml
new file mode 100644
index 000000000000..c8c434c10643
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ltc2672.yaml
@@ -0,0 +1,160 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/dac/adi,ltc2672.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices LTC2672 DAC
+
+maintainers:
+ - Michael Hennerich <michael.hennerich@analog.com>
+ - Kim Seer Paller <kimseer.paller@analog.com>
+
+description: |
+ Analog Devices LTC2672 5 channel, 12-/16-Bit, 300mA DAC
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ltc2672.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,ltc2672
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 50000000
+
+ vcc-supply:
+ description: Analog Supply Voltage Input.
+
+ v-neg-supply:
+ description: Negative Supply Voltage Input.
+
+ vdd0-supply:
+ description: Positive Supply Voltage Input for DAC OUT0.
+
+ vdd1-supply:
+ description: Positive Supply Voltage Input for DAC OUT1.
+
+ vdd2-supply:
+ description: Positive Supply Voltage Input for DAC OUT2.
+
+ vdd3-supply:
+ description: Positive Supply Voltage Input for DAC OUT3.
+
+ vdd4-supply:
+ description: Positive Supply Voltage Input for DAC OUT4.
+
+ iovcc-supply:
+ description: Digital Input/Output Supply Voltage.
+
+ ref-supply:
+ description:
+ Reference Input/Output. The voltage at the REF pin sets the full-scale
+ range of all channels. If not provided the internal reference is used and
+ also provided on the VREF pin.
+
+ reset-gpios:
+ description:
+ Active Low Asynchronous Clear Input. A logic low at this level triggered
+ input clears the device to the default reset code and output range, which
+ is zero-scale with the outputs off. The control registers are cleared to
+ zero.
+ maxItems: 1
+
+ adi,rfsadj-ohms:
+ description:
+ If FSADJ is tied to VCC, an internal RFSADJ (20 kΩ) is selected, which
+ results in nominal output ranges. When an external resistor of 19 kΩ to
+ 41 kΩ can be used instead by connecting the resistor between FSADJ and GND
+ it controls the scaling of the ranges, and the internal resistor is
+ automatically disconnected.
+ minimum: 19000
+ maximum: 41000
+ default: 20000
+
+ io-channels:
+ description:
+ ADC channel to monitor voltages and currents at the MUX pin.
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ "^channel@[0-4]$":
+ $ref: dac.yaml
+ type: object
+ additionalProperties: false
+
+ properties:
+ reg:
+ description: The channel number representing the DAC output channel.
+ maximum: 4
+
+ adi,toggle-mode:
+ description:
+ Set the channel as a toggle enabled channel. Toggle operation enables
+ fast switching of a DAC output between two different DAC codes without
+ any SPI transaction.
+ type: boolean
+
+ output-range-microamp:
+ items:
+ - const: 0
+ - enum: [3125000, 6250000, 12500000, 25000000, 50000000, 100000000,
+ 200000000, 300000000]
+
+ required:
+ - reg
+ - output-range-microamp
+
+required:
+ - compatible
+ - reg
+ - spi-max-frequency
+ - vcc-supply
+ - iovcc-supply
+ - v-neg-supply
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+additionalProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dac@0 {
+ compatible = "adi,ltc2672";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+
+ vcc-supply = <&vcc>;
+ iovcc-supply = <&vcc>;
+ ref-supply = <&vref>;
+ v-neg-supply = <&vneg>;
+
+ io-channels = <&adc 0>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ channel@0 {
+ reg = <0>;
+ adi,toggle-mode;
+ output-range-microamp = <0 3125000>;
+ };
+
+ channel@1 {
+ reg = <1>;
+ output-range-microamp = <0 6250000>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/dac/dac.yaml b/Documentation/devicetree/bindings/iio/dac/dac.yaml
new file mode 100644
index 000000000000..daa40724e1cf
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/dac.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/dac/dac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: IIO Common Properties for DAC Channels
+
+maintainers:
+ - Jonathan Cameron <jic23@kernel.org>
+
+description:
+ A few properties are defined in a common way for DAC channels.
+
+properties:
+ $nodename:
+ pattern: "^channel(@[0-9a-f]+)?$"
+ description:
+ A channel index should match reg.
+
+ reg:
+ maxItems: 1
+
+ label:
+ description: Unique name to identify which channel this is.
+
+ output-range-microamp:
+ maxItems: 2
+ minItems: 2
+ description:
+ Specify the channel output full scale range in microamperes.
+
+ output-range-microvolt:
+ maxItems: 2
+ minItems: 2
+ description:
+ Specify the channel output full scale range in microvolts.
+
+anyOf:
+ - oneOf:
+ - required:
+ - reg
+ - output-range-microamp
+ - required:
+ - reg
+ - output-range-microvolt
+ - required:
+ - reg
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml b/Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml
index aa6a3193b4e0..5f950ee9aec7 100644
--- a/Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml
+++ b/Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml
@@ -17,6 +17,7 @@ description: |
applications.
https://www.analog.com/en/products/adf4377.html
+ https://www.analog.com/en/products/adf4378.html
properties:
compatible:
@@ -73,6 +74,15 @@ required:
allOf:
- $ref: /schemas/spi/spi-peripheral-props.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,adf4378
+ then:
+ properties:
+ clk2-enable-gpios: false
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/iio/humidity/sciosense,ens210.yaml b/Documentation/devicetree/bindings/iio/humidity/sciosense,ens210.yaml
new file mode 100644
index 000000000000..ed0ea938f7f8
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/humidity/sciosense,ens210.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/humidity/sciosense,ens210.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ScioSense ENS210 temperature and humidity sensor
+
+maintainers:
+ - Joshua Felmeden <jfelmeden@thegoodpenguin.co.uk>
+
+description: |
+ Temperature and Humidity sensor.
+
+ Datasheet:
+ https://www.sciosense.com/wp-content/uploads/2024/04/ENS21x-Datasheet.pdf
+ https://www.sciosense.com/wp-content/uploads/2023/12/ENS210-Datasheet.pdf
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - sciosense,ens210a
+ - sciosense,ens211
+ - sciosense,ens212
+ - sciosense,ens213a
+ - sciosense,ens215
+ - const: sciosense,ens210
+ - const: sciosense,ens210
+
+ reg:
+ maxItems: 1
+
+ vdd-supply: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ temperature-sensor@43 {
+ compatible = "sciosense,ens210";
+ reg = <0x43>;
+ };
+ };
+...
+
diff --git a/Documentation/devicetree/bindings/iio/light/liteon,ltrf216a.yaml b/Documentation/devicetree/bindings/iio/light/liteon,ltrf216a.yaml
index 7de1b0e721ca..877e955d4ebd 100644
--- a/Documentation/devicetree/bindings/iio/light/liteon,ltrf216a.yaml
+++ b/Documentation/devicetree/bindings/iio/light/liteon,ltrf216a.yaml
@@ -14,7 +14,9 @@ description:
properties:
compatible:
- const: liteon,ltrf216a
+ enum:
+ - liteon,ltr308
+ - liteon,ltrf216a
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/light/rohm,bh1745.yaml b/Documentation/devicetree/bindings/iio/light/rohm,bh1745.yaml
new file mode 100644
index 000000000000..44896795c67e
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/rohm,bh1745.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/light/rohm,bh1745.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ROHM BH1745 colour sensor
+
+maintainers:
+ - Mudit Sharma <muditsharma.info@gmail.com>
+
+description:
+ BH1745 is an I2C colour sensor with red, green, blue and clear
+ channels. It has a programmable active low interrupt pin.
+ Interrupt occurs when the signal from the selected interrupt
+ source channel crosses set interrupt threshold high/low level.
+
+properties:
+ compatible:
+ const: rohm,bh1745
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vdd-supply: true
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ colour-sensor@38 {
+ compatible = "rohm,bh1745";
+ reg = <0x38>;
+ interrupt-parent = <&gpio>;
+ interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
+ vdd-supply = <&vdd>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/iio/light/rohm,bu27034.yaml b/Documentation/devicetree/bindings/iio/light/rohm,bu27034anuc.yaml
index 30a109a1bf3b..29c90ca5b258 100644
--- a/Documentation/devicetree/bindings/iio/light/rohm,bu27034.yaml
+++ b/Documentation/devicetree/bindings/iio/light/rohm,bu27034anuc.yaml
@@ -1,23 +1,22 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/iio/light/rohm,bu27034.yaml#
+$id: http://devicetree.org/schemas/iio/light/rohm,bu27034anuc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: ROHM BU27034 ambient light sensor
+title: ROHM BU27034ANUC ambient light sensor
maintainers:
- Matti Vaittinen <mazziesaccount@gmail.com>
description: |
- ROHM BU27034 is an ambient light sesnor with 3 channels and 3 photo diodes
+ ROHM BU27034ANUC is an ambient light sensor with 2 channels and 2 photo diodes
capable of detecting a very wide range of illuminance. Typical application
is adjusting LCD and backlight power of TVs and mobile phones.
- https://fscdn.rohm.com/en/products/databook/datasheet/ic/sensor/light/bu27034nuc-e.pdf
properties:
compatible:
- const: rohm,bu27034
+ const: rohm,bu27034anuc
reg:
maxItems: 1
@@ -37,7 +36,7 @@ examples:
#size-cells = <0>;
light-sensor@38 {
- compatible = "rohm,bu27034";
+ compatible = "rohm,bu27034anuc";
reg = <0x38>;
vdd-supply = <&vdd>;
};
diff --git a/Documentation/devicetree/bindings/iio/light/stk33xx.yaml b/Documentation/devicetree/bindings/iio/light/stk33xx.yaml
index f6e22dc9814a..e4341fdced98 100644
--- a/Documentation/devicetree/bindings/iio/light/stk33xx.yaml
+++ b/Documentation/devicetree/bindings/iio/light/stk33xx.yaml
@@ -18,10 +18,15 @@ allOf:
properties:
compatible:
- enum:
- - sensortek,stk3310
- - sensortek,stk3311
- - sensortek,stk3335
+ oneOf:
+ - enum:
+ - sensortek,stk3310
+ - sensortek,stk3311
+ - sensortek,stk3335
+ - items:
+ - enum:
+ - sensortek,stk3013
+ - const: sensortek,stk3310
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml b/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml
index 9790f75fc669..e8ca9a234027 100644
--- a/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml
+++ b/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml
@@ -18,12 +18,15 @@ properties:
- asahi-kasei,ak09911
- asahi-kasei,ak09912
- asahi-kasei,ak09916
+ - items:
+ # ak09918 is register compatible with ak09912.
+ - const: asahi-kasei,ak09918
+ - const: asahi-kasei,ak09912
- enum:
- ak8975
- ak8963
- ak09911
- ak09912
- - ak09916
deprecated: true
reg:
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml b/Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml
index 2867ab6bf9b0..a3838ab0c524 100644
--- a/Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml
+++ b/Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml
@@ -36,6 +36,9 @@ properties:
interrupts:
maxItems: 1
+ mount-matrix:
+ description: an optional 3x3 mounting rotation matrix.
+
additionalProperties: false
required:
diff --git a/Documentation/devicetree/bindings/iio/pressure/sensirion,sdp500.yaml b/Documentation/devicetree/bindings/iio/pressure/sensirion,sdp500.yaml
new file mode 100644
index 000000000000..813239f6879a
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/pressure/sensirion,sdp500.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/pressure/sensirion,sdp500.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: sdp500/sdp510 pressure sensor with I2C bus interface
+
+maintainers:
+ - Petar Stoykov <petar.stoykov@prodrive-technologies.com>
+
+description: |
+ Pressure sensor from Sensirion with I2C bus interface.
+ There is no software difference between sdp500 and sdp510.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: sensirion,sdp510
+ - const: sensirion,sdp500
+ - const: sensirion,sdp500
+
+ reg:
+ maxItems: 1
+
+ vdd-supply: true
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pressure@40 {
+ compatible = "sensirion,sdp500";
+ reg = <0x40>;
+ vdd-supply = <&foo>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/proximity/awinic,aw96103.yaml b/Documentation/devicetree/bindings/iio/proximity/awinic,aw96103.yaml
new file mode 100644
index 000000000000..7a83ceced11c
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/proximity/awinic,aw96103.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/proximity/awinic,aw96103.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Awinic's AW96103 capacitive proximity sensor and similar
+
+maintainers:
+ - Wang Shuaijie <wangshuaijie@awinic.com>
+
+description: |
+ Awinic's AW96103/AW96105 proximity sensor.
+ The specific absorption rate (SAR) is a metric that measures
+ the degree of absorption of electromagnetic radiation emitted by
+ wireless devices, such as mobile phones and tablets, by human tissue.
+ In mobile phone applications, the proximity sensor is primarily
+ used to detect the proximity of the human body to the phone. When the
+ phone approaches the human body, it will actively reduce the transmit
+ power of the antenna to keep the SAR within a safe range. Therefore,
+ we also refer to the proximity sensor as a SAR sensor.
+
+properties:
+ compatible:
+ enum:
+ - awinic,aw96103
+ - awinic,aw96105
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description:
+ Generated by the device to announce that a close/far
+ proximity event has happened.
+ maxItems: 1
+
+ vcc-supply: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - vcc-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ proximity@12 {
+ compatible = "awinic,aw96103";
+ reg = <0x12>;
+ interrupt-parent = <&gpio>;
+ interrupts = <23 IRQ_TYPE_EDGE_FALLING>;
+ vcc-supply = <&pp1800_prox>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/proximity/tyhx,hx9023s.yaml b/Documentation/devicetree/bindings/iio/proximity/tyhx,hx9023s.yaml
new file mode 100644
index 000000000000..64ce8bc8bd36
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/proximity/tyhx,hx9023s.yaml
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/proximity/tyhx,hx9023s.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TYHX HX9023S capacitive proximity sensor
+
+maintainers:
+ - Yasin Lee <yasin.lee.x@gmail.com>
+
+description: |
+ TYHX HX9023S proximity sensor. Datasheet can be found here:
+ http://www.tianyihexin.com/ueditor/php/upload/file/20240614/1718336303992081.pdf
+
+properties:
+ compatible:
+ const: tyhx,hx9023s
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description:
+ Generated by device to announce preceding read request has finished
+ and data is available or that a close/far proximity event has happened.
+ maxItems: 1
+
+ vdd-supply: true
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^channel@[0-4]$":
+ $ref: /schemas/iio/adc/adc.yaml
+ type: object
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ minimum: 0
+ maximum: 4
+ description: The channel number.
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ proximity@2a {
+ compatible = "tyhx,hx9023s";
+ reg = <0x2a>;
+ interrupt-parent = <&pio>;
+ interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
+ vdd-supply = <&pp1800_prox>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@0 {
+ reg = <0>;
+ single-channel = <0>;
+ };
+ channel@1 {
+ reg = <1>;
+ single-channel = <1>;
+ };
+ channel@2 {
+ reg = <2>;
+ single-channel = <2>;
+ };
+ channel@3 {
+ reg = <3>;
+ diff-channels = <1 0>;
+ };
+ channel@4 {
+ reg = <4>;
+ diff-channels = <2 0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/cirrus,ep9307-keypad.yaml b/Documentation/devicetree/bindings/input/cirrus,ep9307-keypad.yaml
new file mode 100644
index 000000000000..a0d2460c55ab
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/cirrus,ep9307-keypad.yaml
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/cirrus,ep9307-keypad.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus ep93xx keypad
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+
+allOf:
+ - $ref: /schemas/input/matrix-keymap.yaml#
+
+description:
+ The KPP is designed to interface with a keypad matrix with 2-point contact
+ or 3-point contact keys. The KPP is designed to simplify the software task
+ of scanning a keypad matrix. The KPP is capable of detecting, debouncing,
+ and decoding one or multiple keys pressed simultaneously on a keypad.
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9307-keypad
+ - items:
+ - enum:
+ - cirrus,ep9312-keypad
+ - cirrus,ep9315-keypad
+ - const: cirrus,ep9307-keypad
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ debounce-delay-ms:
+ description: |
+ Time in microseconds that key must be pressed or
+ released for state change interrupt to trigger.
+
+ cirrus,prescale:
+ description: row/column counter pre-scaler load value
+ $ref: /schemas/types.yaml#/definitions/uint16
+ maximum: 1023
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - linux,keymap
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/input/input.h>
+ #include <dt-bindings/clock/cirrus,ep9301-syscon.h>
+ keypad@800f0000 {
+ compatible = "cirrus,ep9307-keypad";
+ reg = <0x800f0000 0x0c>;
+ interrupt-parent = <&vic0>;
+ interrupts = <29>;
+ clocks = <&eclk EP93XX_CLK_KEYPAD>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&keypad_default_pins>;
+ linux,keymap = <KEY_UP>,
+ <KEY_DOWN>,
+ <KEY_VOLUMEDOWN>,
+ <KEY_HOME>,
+ <KEY_RIGHT>,
+ <KEY_LEFT>,
+ <KEY_ENTER>,
+ <KEY_VOLUMEUP>,
+ <KEY_F6>,
+ <KEY_F8>,
+ <KEY_F9>,
+ <KEY_F10>,
+ <KEY_F1>,
+ <KEY_F2>,
+ <KEY_F3>,
+ <KEY_POWER>;
+ };
diff --git a/Documentation/devicetree/bindings/input/goodix,gt7986u.yaml b/Documentation/devicetree/bindings/input/goodix,gt7986u.yaml
deleted file mode 100644
index a7d42a5d6128..000000000000
--- a/Documentation/devicetree/bindings/input/goodix,gt7986u.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/input/goodix,gt7986u.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: GOODIX GT7986U SPI HID Touchscreen
-
-maintainers:
- - Charles Wang <charles.goodix@gmail.com>
-
-description: Supports the Goodix GT7986U touchscreen.
- This touch controller reports data packaged according to the HID protocol,
- but is incompatible with Microsoft's HID-over-SPI protocol.
-
-allOf:
- - $ref: /schemas/spi/spi-peripheral-props.yaml#
-
-properties:
- compatible:
- enum:
- - goodix,gt7986u
-
- reg:
- maxItems: 1
-
- interrupts:
- maxItems: 1
-
- reset-gpios:
- maxItems: 1
-
- goodix,hid-report-addr:
- $ref: /schemas/types.yaml#/definitions/uint32
- description:
- The register address for retrieving HID report data.
- This address is related to the device firmware and may
- change after a firmware update.
-
- spi-max-frequency: true
-
-additionalProperties: false
-
-required:
- - compatible
- - reg
- - interrupts
- - reset-gpios
- - goodix,hid-report-addr
-
-examples:
- - |
- #include <dt-bindings/interrupt-controller/irq.h>
- #include <dt-bindings/gpio/gpio.h>
-
- spi {
- #address-cells = <1>;
- #size-cells = <0>;
-
- touchscreen@0 {
- compatible = "goodix,gt7986u";
- reg = <0>;
- interrupt-parent = <&gpio>;
- interrupts = <25 IRQ_TYPE_LEVEL_LOW>;
- reset-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
- spi-max-frequency = <10000000>;
- goodix,hid-report-addr = <0x22c8c>;
- };
- };
-
-...
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml
index fd15ab5014fb..4b08be72bbd7 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml
@@ -4,14 +4,14 @@
$id: http://devicetree.org/schemas/interconnect/qcom,msm8939.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Qualcomm MSM8939 Network-On-Chip interconnect
+title: Qualcomm MSM8937/MSM8939/MSM8976 Network-On-Chip interconnect
maintainers:
- Konrad Dybcio <konradybcio@kernel.org>
-description: |
- The Qualcomm MSM8939 interconnect providers support adjusting the
- bandwidth requirements between the various NoC fabrics.
+description:
+ The Qualcomm MSM8937/MSM8939/MSM8976 interconnect providers support
+ adjusting the bandwidth requirements between the various NoC fabrics.
allOf:
- $ref: qcom,rpm-common.yaml#
@@ -19,9 +19,15 @@ allOf:
properties:
compatible:
enum:
+ - qcom,msm8937-bimc
+ - qcom,msm8937-pcnoc
+ - qcom,msm8937-snoc
- qcom,msm8939-bimc
- qcom,msm8939-pcnoc
- qcom,msm8939-snoc
+ - qcom,msm8976-bimc
+ - qcom,msm8976-pcnoc
+ - qcom,msm8976-snoc
reg:
maxItems: 1
@@ -39,7 +45,10 @@ patternProperties:
properties:
compatible:
- const: qcom,msm8939-snoc-mm
+ enum:
+ - qcom,msm8937-snoc-mm
+ - qcom,msm8939-snoc-mm
+ - qcom,msm8976-snoc-mm
required:
- compatible
@@ -60,12 +69,6 @@ examples:
compatible = "qcom,msm8939-snoc";
reg = <0x00580000 0x14000>;
#interconnect-cells = <1>;
- };
-
- bimc: interconnect@400000 {
- compatible = "qcom,msm8939-bimc";
- reg = <0x00400000 0x62000>;
- #interconnect-cells = <1>;
snoc_mm: interconnect-snoc {
compatible = "qcom,msm8939-snoc-mm";
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8953.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8953.yaml
index 732e9fa001a4..343ff62d7b65 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,msm8953.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8953.yaml
@@ -13,8 +13,7 @@ description: |
The Qualcomm MSM8953 interconnect providers support adjusting the
bandwidth requirements between the various NoC fabrics.
- See also:
- - dt-bindings/interconnect/qcom,msm8953.h
+ See also: include/dt-bindings/interconnect/qcom,msm8953.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
index 2cd1f5590fd9..189f5900ee50 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
@@ -26,6 +26,7 @@ properties:
- items:
- enum:
- qcom,qcm2290-cpu-bwmon
+ - qcom,sa8775p-cpu-bwmon
- qcom,sc7180-cpu-bwmon
- qcom,sc7280-cpu-bwmon
- qcom,sc8280xp-cpu-bwmon
@@ -39,6 +40,7 @@ properties:
- const: qcom,sdm845-bwmon # BWMON v4, unified register space
- items:
- enum:
+ - qcom,sa8775p-llcc-bwmon
- qcom,sc7180-llcc-bwmon
- qcom,sc8280xp-llcc-bwmon
- qcom,sm6350-cpu-bwmon
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
index 9318b845ec35..1b9164dc162f 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
@@ -71,7 +71,7 @@ properties:
- qcom,sdx65-system-noc
- qcom,sm8150-aggre1-noc
- qcom,sm8150-aggre2-noc
- - qcom,sm8150-camnoc-noc
+ - qcom,sm8150-camnoc-virt
- qcom,sm8150-compute-noc
- qcom,sm8150-config-noc
- qcom,sm8150-dc-noc
@@ -113,6 +113,9 @@ allOf:
properties:
compatible:
enum:
+ - qcom,sc8180x-camnoc-virt
+ - qcom,sc8180x-mc-virt
+ - qcom,sc8180x-qup-virt
- qcom,sdx65-mc-virt
- qcom,sm8250-qup-virt
then:
diff --git a/Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml b/Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml
index 72c1d9e82c89..8a1369df4ecb 100644
--- a/Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml
+++ b/Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml
@@ -17,9 +17,15 @@ description: |
properties:
compatible:
- enum:
- - mediatek,mt8195-adsp-mbox
- - mediatek,mt8186-adsp-mbox
+ oneOf:
+ - enum:
+ - mediatek,mt8186-adsp-mbox
+ - mediatek,mt8195-adsp-mbox
+ - items:
+ - enum:
+ - mediatek,mt8188-adsp-mbox
+ - const: mediatek,mt8186-adsp-mbox
+
"#mbox-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
index 05e4e1d51713..2d66770ed361 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
+++ b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
@@ -24,7 +24,9 @@ properties:
compatible:
items:
- enum:
+ - qcom,qcs8300-ipcc
- qcom,qdu1000-ipcc
+ - qcom,sa8255p-ipcc
- qcom,sa8775p-ipcc
- qcom,sc7280-ipcc
- qcom,sc8280xp-ipcc
diff --git a/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml b/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml
index c27a8f33d8d7..0840a3d92513 100644
--- a/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml
+++ b/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml
@@ -26,6 +26,7 @@ properties:
- mdsp
- sdsp
- cdsp
+ - cdsp1
memory-region:
maxItems: 1
@@ -81,7 +82,7 @@ patternProperties:
iommus:
minItems: 1
- maxItems: 3
+ maxItems: 10
qcom,nsessions:
$ref: /schemas/types.yaml#/definitions/uint32
diff --git a/Documentation/devicetree/bindings/mtd/technologic,nand.yaml b/Documentation/devicetree/bindings/mtd/technologic,nand.yaml
new file mode 100644
index 000000000000..f9d87c46094b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/technologic,nand.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/technologic,nand.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Technologic Systems NAND controller
+
+maintainers:
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+allOf:
+ - $ref: nand-controller.yaml
+
+properties:
+ compatible:
+ oneOf:
+ - const: technologic,ts7200-nand
+ - items:
+ - enum:
+ - technologic,ts7300-nand
+ - technologic,ts7260-nand
+ - technologic,ts7250-nand
+ - const: technologic,ts7200-nand
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ nand-controller@60000000 {
+ compatible = "technologic,ts7200-nand";
+ reg = <0x60000000 0x8000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ nand@0 {
+ reg = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/cirrus,ep9301-eth.yaml b/Documentation/devicetree/bindings/net/cirrus,ep9301-eth.yaml
new file mode 100644
index 000000000000..ad0915307095
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/cirrus,ep9301-eth.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/cirrus,ep9301-eth.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: EP93xx SoC Ethernet Controller
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9301-eth
+ - items:
+ - enum:
+ - cirrus,ep9302-eth
+ - cirrus,ep9307-eth
+ - cirrus,ep9312-eth
+ - cirrus,ep9315-eth
+ - const: cirrus,ep9301-eth
+
+ reg:
+ items:
+ - description: The physical base address and size of IO range
+
+ interrupts:
+ items:
+ - description: Combined signal for various interrupt events
+
+ phy-handle: true
+
+ mdio:
+ $ref: mdio.yaml#
+ unevaluatedProperties: false
+ description: optional node for embedded MDIO controller
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - phy-handle
+
+additionalProperties: false
+
+examples:
+ - |
+ ethernet@80010000 {
+ compatible = "cirrus,ep9301-eth";
+ reg = <0x80010000 0x10000>;
+ interrupt-parent = <&vic1>;
+ interrupts = <7>;
+ phy-handle = <&phy0>;
+ };
diff --git a/Documentation/devicetree/bindings/net/ti,cc1352p7.yaml b/Documentation/devicetree/bindings/net/ti,cc1352p7.yaml
index 3dde10de4630..4f4253441547 100644
--- a/Documentation/devicetree/bindings/net/ti,cc1352p7.yaml
+++ b/Documentation/devicetree/bindings/net/ti,cc1352p7.yaml
@@ -29,6 +29,12 @@ properties:
reset-gpios:
maxItems: 1
+ bootloader-backdoor-gpios:
+ maxItems: 1
+ description: |
+ gpios to enable bootloader backdoor in cc1352p7 bootloader to allow
+ flashing new firmware.
+
vdds-supply: true
required:
@@ -46,6 +52,7 @@ examples:
clocks = <&sclk_hf 0>, <&sclk_lf 25>;
clock-names = "sclk_hf", "sclk_lf";
reset-gpios = <&pio 35 GPIO_ACTIVE_LOW>;
+ bootloader-backdoor-gpios = <&pio 36 GPIO_ACTIVE_LOW>;
vdds-supply = <&vdds>;
};
};
diff --git a/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml b/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml
index 70fb2ad25103..1b20b49eee79 100644
--- a/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml
+++ b/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml
@@ -15,6 +15,7 @@ description: |
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml
index e21c06e9a741..b2cb76cf9053 100644
--- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml
@@ -14,7 +14,7 @@ maintainers:
description: |
This binding represents the on-chip eFuse OTP controller found on
i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ, i.MX6SLL,
- i.MX7D/S, i.MX7ULP, i.MX8MQ, i.MX8MM, i.MX8MN i.MX8MP and i.MX93 SoCs.
+ i.MX7D/S, i.MX7ULP, i.MX8MQ, i.MX8MM, i.MX8MN i.MX8MP and i.MX93/5 SoCs.
allOf:
- $ref: nvmem.yaml#
@@ -36,6 +36,7 @@ properties:
- fsl,imx8mq-ocotp
- fsl,imx8mm-ocotp
- fsl,imx93-ocotp
+ - fsl,imx95-ocotp
- const: syscon
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml b/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml
index 3b40f7880774..382507060651 100644
--- a/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml
+++ b/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml
@@ -21,6 +21,7 @@ oneOf:
- $ref: fixed-layout.yaml
- $ref: kontron,sl28-vpd.yaml
- $ref: onie,tlv-layout.yaml
+ - $ref: u-boot,env.yaml
properties:
compatible: true
diff --git a/Documentation/devicetree/bindings/nvmem/u-boot,env.yaml b/Documentation/devicetree/bindings/nvmem/layouts/u-boot,env.yaml
index 9c36afc7084b..56a8f55d4a09 100644
--- a/Documentation/devicetree/bindings/nvmem/u-boot,env.yaml
+++ b/Documentation/devicetree/bindings/nvmem/layouts/u-boot,env.yaml
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
-$id: http://devicetree.org/schemas/nvmem/u-boot,env.yaml#
+$id: http://devicetree.org/schemas/nvmem/layouts/u-boot,env.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: U-Boot environment variables
+title: U-Boot environment variables layout
description: |
U-Boot uses environment variables to store device parameters and
@@ -21,9 +21,6 @@ description: |
This binding allows marking storage device (as containing env data) and
specifying used format.
- Right now only flash partition case is covered but it may be extended to e.g.
- UBI volumes in the future.
-
Variables can be defined as NVMEM device subnodes.
maintainers:
@@ -42,6 +39,7 @@ properties:
const: brcm,env
reg:
+ description: Partition offset and size for env on top of MTD
maxItems: 1
bootcmd:
@@ -58,6 +56,17 @@ properties:
description: The first argument is a MAC address offset.
const: 1
+allOf:
+ - if:
+ properties:
+ $nodename:
+ not:
+ contains:
+ pattern: "^partition@[0-9a-f]+$"
+ then:
+ properties:
+ reg: false
+
additionalProperties: false
examples:
@@ -101,3 +110,23 @@ examples:
};
};
};
+ - |
+ partition@0 {
+ reg = <0x0 0x100000>;
+ label = "ubi";
+ compatible = "linux,ubi";
+
+ volumes {
+ ubi-volume-u-boot-env {
+ volname = "env";
+
+ nvmem-layout {
+ compatible = "u-boot,env";
+
+ ethaddr {
+ #nvmem-cell-cells = <1>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml
index 92bfe25f0571..3b2aa605a551 100644
--- a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml
+++ b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml
@@ -17,6 +17,7 @@ maintainers:
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
properties:
compatible:
@@ -32,6 +33,8 @@ properties:
patternProperties:
"^.*@[0-9a-f]+$":
type: object
+ $ref: layouts/fixed-cell.yaml
+ unevaluatedProperties: false
properties:
st,non-secure-otp:
diff --git a/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml b/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml
index f4b1ca2fb562..ce665a2779b7 100644
--- a/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml
+++ b/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml
@@ -87,6 +87,12 @@ properties:
maximum: 119
default: 100
+ nxp,sim:
+ description:
+ The system integration module (SIM) provides system control and chip
+ configuration registers.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
required:
- compatible
- reg
@@ -110,6 +116,17 @@ allOf:
required:
- fsl,anatop
+ - if:
+ properties:
+ compatible:
+ const: fsl,imx7ulp-usbphy
+ then:
+ required:
+ - nxp,sim
+ else:
+ properties:
+ nxp,sim: false
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/pwm/cirrus,ep9301-pwm.yaml b/Documentation/devicetree/bindings/pwm/cirrus,ep9301-pwm.yaml
new file mode 100644
index 000000000000..903210ef9c31
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/cirrus,ep9301-pwm.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/cirrus,ep9301-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic ep93xx PWM controller
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+allOf:
+ - $ref: pwm.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9301-pwm
+ - items:
+ - enum:
+ - cirrus,ep9302-pwm
+ - cirrus,ep9307-pwm
+ - cirrus,ep9312-pwm
+ - cirrus,ep9315-pwm
+ - const: cirrus,ep9301-pwm
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: SoC PWM clock
+
+ "#pwm-cells":
+ const: 3
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/cirrus,ep9301-syscon.h>
+ pwm@80910000 {
+ compatible = "cirrus,ep9301-pwm";
+ reg = <0x80910000 0x10>;
+ clocks = <&syscon EP93XX_CLK_PWM>;
+ #pwm-cells = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml b/Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml
index 5ade5dfad048..cda8ad7c1203 100644
--- a/Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml
+++ b/Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml
@@ -22,6 +22,9 @@ properties:
interrupts:
maxItems: 1
+ "#clock-cells":
+ const: 0
+
trickle-resistor-ohms:
enum:
- 3000
diff --git a/Documentation/devicetree/bindings/rtc/sprd,sc2731-rtc.yaml b/Documentation/devicetree/bindings/rtc/sprd,sc2731-rtc.yaml
new file mode 100644
index 000000000000..f3d20e976965
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/sprd,sc2731-rtc.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/sprd,sc2731-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Spreadtrum SC2731 Real Time Clock
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+properties:
+ compatible:
+ const: sprd,sc2731-rtc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+allOf:
+ - $ref: rtc.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ pmic {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@280 {
+ compatible = "sprd,sc2731-rtc";
+ reg = <0x280>;
+ interrupt-parent = <&sc2731_pmic>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt b/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt
deleted file mode 100644
index 1f5754299d31..000000000000
--- a/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Spreadtrum SC27xx Real Time Clock
-
-Required properties:
-- compatible: should be "sprd,sc2731-rtc".
-- reg: address offset of rtc register.
-- interrupts: rtc alarm interrupt.
-
-Example:
-
- sc2731_pmic: pmic@0 {
- compatible = "sprd,sc2731";
- reg = <0>;
- spi-max-frequency = <26000000>;
- interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-controller;
- #interrupt-cells = <2>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- rtc@280 {
- compatible = "sprd,sc2731-rtc";
- reg = <0x280>;
- interrupt-parent = <&sc2731_pmic>;
- interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
- };
- };
diff --git a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
index 7a0fab721cf1..aae06e570c22 100644
--- a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
@@ -53,6 +53,28 @@ properties:
override default rtc_ck parent clock phandle of the new parent clock of rtc_ck
maxItems: 1
+patternProperties:
+ "^rtc-[a-z]+-[0-9]+$":
+ type: object
+ $ref: /schemas/pinctrl/pinmux-node.yaml
+ description: |
+ Configuration of STM32 RTC pins description. STM32 RTC is able to output
+ some signals on specific pins:
+ - LSCO (Low Speed Clock Output) that allow to output LSE clock on a pin.
+ - Alarm out that allow to send a pulse on a pin when alarm A of the RTC
+ expires.
+ additionalProperties: false
+ properties:
+ function:
+ enum:
+ - lsco
+ - alarm-a
+ pins:
+ enum:
+ - out1
+ - out2
+ - out2_rmp
+
allOf:
- if:
properties:
@@ -68,6 +90,9 @@ allOf:
clock-names: false
+ patternProperties:
+ "^rtc-[a-z]+-[0-9]+$": false
+
required:
- st,syscfg
@@ -83,6 +108,9 @@ allOf:
minItems: 2
maxItems: 2
+ patternProperties:
+ "^rtc-[a-z]+-[0-9]+$": false
+
required:
- clock-names
- st,syscfg
diff --git a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
index fffd759c603f..7330a7200831 100644
--- a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
@@ -38,12 +38,13 @@ properties:
- dallas,ds1672
# Extremely Accurate I²C RTC with Integrated Crystal and SRAM
- dallas,ds3232
+ # SD2405AL Real-Time Clock
+ - dfrobot,sd2405al
# EM Microelectronic EM3027 RTC
- emmicro,em3027
# I2C-BUS INTERFACE REAL TIME CLOCK MODULE
- epson,rx8010
# I2C-BUS INTERFACE REAL TIME CLOCK MODULE
- - epson,rx8025
- epson,rx8035
# I2C-BUS INTERFACE REAL TIME CLOCK MODULE with Battery Backed RAM
- epson,rx8111
@@ -52,10 +53,6 @@ properties:
- epson,rx8581
# Android Goldfish Real-time Clock
- google,goldfish-rtc
- # Intersil ISL1208 Low Power RTC with Battery Backed SRAM
- - isil,isl1208
- # Intersil ISL1218 Low Power RTC with Battery Backed SRAM
- - isil,isl1218
# Mvebu Real-time Clock
- marvell,orion-rtc
# Maxim DS1742/DS1743 Real-time Clock
@@ -68,8 +65,6 @@ properties:
- microcrystal,rv8523
# NXP LPC32xx SoC Real-time Clock
- nxp,lpc3220-rtc
- # Real-time Clock Module
- - pericom,pt7c4338
# I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
- ricoh,r2025sd
# I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
diff --git a/Documentation/devicetree/bindings/serial/8250_omap.yaml b/Documentation/devicetree/bindings/serial/8250_omap.yaml
index 6a7be42da523..4b78de6b46a2 100644
--- a/Documentation/devicetree/bindings/serial/8250_omap.yaml
+++ b/Documentation/devicetree/bindings/serial/8250_omap.yaml
@@ -76,6 +76,7 @@ properties:
clock-frequency: true
current-speed: true
overrun-throttle-ms: true
+ wakeup-source: true
required:
- compatible
diff --git a/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml b/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
index eb2992a447d7..f466c38518c4 100644
--- a/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
+++ b/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
@@ -23,13 +23,20 @@ properties:
- const: atmel,at91sam9260-dbgu
- const: atmel,at91sam9260-usart
- items:
- - const: microchip,sam9x60-usart
+ - enum:
+ - microchip,sam9x60-usart
+ - microchip,sam9x7-usart
- const: atmel,at91sam9260-usart
- items:
- const: microchip,sam9x60-dbgu
- const: microchip,sam9x60-usart
- const: atmel,at91sam9260-dbgu
- const: atmel,at91sam9260-usart
+ - items:
+ - const: microchip,sam9x7-dbgu
+ - const: atmel,at91sam9260-dbgu
+ - const: microchip,sam9x7-usart
+ - const: atmel,at91sam9260-usart
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/serial/mediatek,uart.yaml b/Documentation/devicetree/bindings/serial/mediatek,uart.yaml
index ff61ffdcad1d..1b02f0b197ff 100644
--- a/Documentation/devicetree/bindings/serial/mediatek,uart.yaml
+++ b/Documentation/devicetree/bindings/serial/mediatek,uart.yaml
@@ -36,6 +36,7 @@ properties:
- mediatek,mt7622-uart
- mediatek,mt7623-uart
- mediatek,mt7629-uart
+ - mediatek,mt7981-uart
- mediatek,mt7986-uart
- mediatek,mt7988-uart
- mediatek,mt8127-uart
diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
index afc7c05898a1..51d9fb0f4763 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
@@ -46,6 +46,7 @@ properties:
- items:
- enum:
- renesas,scif-r8a774a1 # RZ/G2M
+ - renesas,scif-r8a774a3 # RZ/G2M v3.0
- renesas,scif-r8a774b1 # RZ/G2N
- renesas,scif-r8a774c0 # RZ/G2E
- renesas,scif-r8a774e1 # RZ/G2H
diff --git a/Documentation/devicetree/bindings/serial/samsung_uart.yaml b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
index 0f0131026911..788c80e47831 100644
--- a/Documentation/devicetree/bindings/serial/samsung_uart.yaml
+++ b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
@@ -56,14 +56,8 @@ properties:
maxItems: 5
clock-names:
- description: N = 0 is allowed for SoCs without internal baud clock mux.
minItems: 2
- items:
- - const: uart
- - pattern: '^clk_uart_baud[0-3]$'
- - pattern: '^clk_uart_baud[0-3]$'
- - pattern: '^clk_uart_baud[0-3]$'
- - pattern: '^clk_uart_baud[0-3]$'
+ maxItems: 5
dmas:
items:
@@ -103,18 +97,45 @@ allOf:
compatible:
contains:
enum:
- - samsung,s5pv210-uart
+ - samsung,s3c6400-uart
then:
properties:
clocks:
- minItems: 2
+ minItems: 3
maxItems: 3
+
+ clock-names:
+ items:
+ - const: uart
+ - const: clk_uart_baud2
+ - const: clk_uart_baud3
+
+ else:
+ properties:
clock-names:
minItems: 2
items:
- const: uart
- - pattern: '^clk_uart_baud[0-1]$'
- - pattern: '^clk_uart_baud[0-1]$'
+ - const: clk_uart_baud0
+ - const: clk_uart_baud1
+ - const: clk_uart_baud2
+ - const: clk_uart_baud3
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,s5pv210-uart
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 3
+
+ clock-names:
+ minItems: 3
+ maxItems: 3
- if:
properties:
@@ -129,10 +150,9 @@ allOf:
properties:
clocks:
maxItems: 2
+
clock-names:
- items:
- - const: uart
- - const: clk_uart_baud0
+ maxItems: 2
- if:
properties:
@@ -146,6 +166,12 @@ allOf:
properties:
reg-io-width: false
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ maxItems: 2
+
unevaluatedProperties: false
examples:
@@ -163,3 +189,19 @@ examples:
<&clocks SCLK_UART>;
samsung,uart-fifosize = <16>;
};
+ - |
+ #include <dt-bindings/clock/google,gs101.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ serial_0: serial@10a00000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10a00000 0xc0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_0>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_0>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 634 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart0_bus>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <256>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/cirrus/cirrus,ep9301-syscon.yaml b/Documentation/devicetree/bindings/soc/cirrus/cirrus,ep9301-syscon.yaml
new file mode 100644
index 000000000000..7cb1b4114985
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/cirrus/cirrus,ep9301-syscon.yaml
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/cirrus/cirrus,ep9301-syscon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic EP93xx Platforms System Controller
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+description: |
+ Central resources are controlled by a set of software-locked registers,
+ which can be used to prevent accidental accesses. Syscon generates
+ the various bus and peripheral clocks and controls the system startup
+ configuration.
+
+ The System Controller (Syscon) provides:
+ - Clock control
+ - Power management
+ - System configuration management
+
+ Syscon registers are common for all EP93xx SoC's, through some actual peripheral
+ may be missing depending on actual SoC model.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - cirrus,ep9302-syscon
+ - cirrus,ep9307-syscon
+ - cirrus,ep9312-syscon
+ - cirrus,ep9315-syscon
+ - const: cirrus,ep9301-syscon
+ - const: syscon
+ - items:
+ - const: cirrus,ep9301-syscon
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ clocks:
+ items:
+ - description: reference clock
+
+patternProperties:
+ '^pins-':
+ type: object
+ description: pin node
+ $ref: /schemas/pinctrl/pinmux-node.yaml
+
+ properties:
+ function:
+ enum: [ spi, ac97, i2s, pwm, keypad, pata, lcd, gpio ]
+
+ groups:
+ enum: [ ssp, ac97, i2s_on_ssp, i2s_on_ac97, pwm1, gpio1agrp,
+ gpio2agrp, gpio3agrp, gpio4agrp, gpio6agrp, gpio7agrp,
+ rasteronsdram0grp, rasteronsdram3grp, keypadgrp, idegrp ]
+
+ required:
+ - function
+ - groups
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ syscon@80930000 {
+ compatible = "cirrus,ep9301-syscon", "syscon";
+ reg = <0x80930000 0x1000>;
+
+ #clock-cells = <1>;
+ clocks = <&xtali>;
+
+ spi_default_pins: pins-spi {
+ function = "spi";
+ groups = "ssp";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/cirrus,ep9301-i2s.yaml b/Documentation/devicetree/bindings/sound/cirrus,ep9301-i2s.yaml
index 453d493c941f..4693e85aed37 100644
--- a/Documentation/devicetree/bindings/sound/cirrus,ep9301-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/cirrus,ep9301-i2s.yaml
@@ -40,6 +40,20 @@ properties:
- const: sclk
- const: lrclk
+ dmas:
+ items:
+ - description: out DMA channel
+ - description: in DMA channel
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
+ port:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+
required:
- compatible
- '#sound-dai-cells'
@@ -61,6 +75,8 @@ examples:
<&syscon 30>,
<&syscon 31>;
clock-names = "mclk", "sclk", "lrclk";
+ dmas = <&dma0 0 1>, <&dma0 0 2>;
+ dma-names = "tx", "rx";
};
...
diff --git a/Documentation/devicetree/bindings/spi/cirrus,ep9301-spi.yaml b/Documentation/devicetree/bindings/spi/cirrus,ep9301-spi.yaml
new file mode 100644
index 000000000000..73980a27dc00
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/cirrus,ep9301-spi.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/cirrus,ep9301-spi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: EP93xx SoC SPI controller
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9301-spi
+ - items:
+ - enum:
+ - cirrus,ep9302-spi
+ - cirrus,ep9307-spi
+ - cirrus,ep9312-spi
+ - cirrus,ep9315-spi
+ - const: cirrus,ep9301-spi
+
+ reg:
+ items:
+ - description: SPI registers region
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: SPI Controller reference clock source
+
+ dmas:
+ items:
+ - description: rx DMA channel
+ - description: tx DMA channel
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/clock/cirrus,ep9301-syscon.h>
+ spi@808a0000 {
+ compatible = "cirrus,ep9301-spi";
+ reg = <0x808a0000 0x18>;
+ interrupt-parent = <&vic1>;
+ interrupts = <21>;
+ clocks = <&syscon EP93XX_CLK_SPI>;
+ dmas = <&dma1 10 2>, <&dma1 10 1>;
+ dma-names = "rx", "tx";
+ cs-gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/fsl,ls1028a.yaml b/Documentation/devicetree/bindings/usb/fsl,ls1028a.yaml
new file mode 100644
index 000000000000..a44bdf391887
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/fsl,ls1028a.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/fsl,ls1028a.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale layerscape SuperSpeed DWC3 USB SoC controller
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,ls1028a-dwc3
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - fsl,ls1028a-dwc3
+ - const: snps,dwc3
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+allOf:
+ - $ref: snps,dwc3.yaml#
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ usb@fe800000 {
+ compatible = "fsl,ls1028a-dwc3", "snps,dwc3";
+ reg = <0xfe800000 0x100000>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
deleted file mode 100644
index afc30e98b123..000000000000
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ /dev/null
@@ -1,110 +0,0 @@
-MSM SoC HSUSB controllers
-
-EHCI
-
-Required properties:
-- compatible: Should contain "qcom,ehci-host"
-- regs: offset and length of the register set in the memory map
-- usb-phy: phandle for the PHY device
-
-Example EHCI controller device node:
-
- ehci: ehci@f9a55000 {
- compatible = "qcom,ehci-host";
- reg = <0xf9a55000 0x400>;
- usb-phy = <&usb_otg>;
- };
-
-USB PHY with optional OTG:
-
-Required properties:
-- compatible: Should contain:
- "qcom,usb-otg-ci" for chipsets with ChipIdea 45nm PHY
- "qcom,usb-otg-snps" for chipsets with Synopsys 28nm PHY
-
-- regs: Offset and length of the register set in the memory map
-- interrupts: interrupt-specifier for the OTG interrupt.
-
-- clocks: A list of phandle + clock-specifier pairs for the
- clocks listed in clock-names
-- clock-names: Should contain the following:
- "phy" USB PHY reference clock
- "core" Protocol engine clock
- "iface" Interface bus clock
- "alt_core" Protocol engine clock for targets with asynchronous
- reset methodology. (optional)
-
-- vdccx-supply: phandle to the regulator for the vdd supply for
- digital circuit operation.
-- v1p8-supply: phandle to the regulator for the 1.8V supply
-- v3p3-supply: phandle to the regulator for the 3.3V supply
-
-- resets: A list of phandle + reset-specifier pairs for the
- resets listed in reset-names
-- reset-names: Should contain the following:
- "phy" USB PHY controller reset
- "link" USB LINK controller reset
-
-- qcom,otg-control: OTG control (VBUS and ID notifications) can be one of
- 1 - PHY control
- 2 - PMIC control
-
-Optional properties:
-- dr_mode: One of "host", "peripheral" or "otg". Defaults to "otg"
-
-- switch-gpio: A phandle + gpio-specifier pair. Some boards are using Dual
- SPDT USB Switch, witch is controlled by GPIO to de/multiplex
- D+/D- USB lines between connectors.
-
-- qcom,phy-init-sequence: PHY configuration sequence values. This is related to Device
- Mode Eye Diagram test. Start address at which these values will be
- written is ULPI_EXT_VENDOR_SPECIFIC. Value of -1 is reserved as
- "do not overwrite default value at this address".
- For example: qcom,phy-init-sequence = < -1 0x63 >;
- Will update only value at address ULPI_EXT_VENDOR_SPECIFIC + 1.
-
-- qcom,phy-num: Select number of pyco-phy to use, can be one of
- 0 - PHY one, default
- 1 - Second PHY
- Some platforms may have configuration to allow USB
- controller work with any of the two HSPHYs present.
-
-- qcom,vdd-levels: This property must be a list of three integer values
- (no, min, max) where each value represents either a voltage
- in microvolts or a value corresponding to voltage corner.
-
-- qcom,manual-pullup: If present, vbus is not routed to USB controller/phy
- and controller driver therefore enables pull-up explicitly
- before starting controller using usbcmd run/stop bit.
-
-- extcon: phandles to external connector devices. First phandle
- should point to external connector, which provide "USB"
- cable events, the second should point to external connector
- device, which provide "USB-HOST" cable events. If one of
- the external connector devices is not required empty <0>
- phandle should be specified.
-
-Example HSUSB OTG controller device node:
-
- usb@f9a55000 {
- compatible = "qcom,usb-otg-snps";
- reg = <0xf9a55000 0x400>;
- interrupts = <0 134 0>;
- dr_mode = "peripheral";
-
- clocks = <&gcc GCC_XO_CLK>, <&gcc GCC_USB_HS_SYSTEM_CLK>,
- <&gcc GCC_USB_HS_AHB_CLK>;
-
- clock-names = "phy", "core", "iface";
-
- vddcx-supply = <&pm8841_s2_corner>;
- v1p8-supply = <&pm8941_l6>;
- v3p3-supply = <&pm8941_l24>;
-
- resets = <&gcc GCC_USB2A_PHY_BCR>, <&gcc GCC_USB_HS_BCR>;
- reset-names = "phy", "link";
-
- qcom,otg-control = <1>;
- qcom,phy-init-sequence = < -1 0x63 >;
- qcom,vdd-levels = <1 5 7>;
- };
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
index aef42dacc202..18758efb8d29 100644
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
@@ -52,6 +52,7 @@ properties:
- qcom,sm8550-dwc3
- qcom,sm8650-dwc3
- qcom,x1e80100-dwc3
+ - qcom,x1e80100-dwc3-mp
- const: qcom,dwc3
reg:
@@ -289,6 +290,7 @@ allOf:
- qcom,sc8280xp-dwc3
- qcom,sc8280xp-dwc3-mp
- qcom,x1e80100-dwc3
+ - qcom,x1e80100-dwc3-mp
then:
properties:
clocks:
@@ -428,6 +430,21 @@ allOf:
contains:
enum:
- qcom,ipq5332-dwc3
+ then:
+ properties:
+ interrupts:
+ maxItems: 3
+ interrupt-names:
+ items:
+ - const: pwr_event
+ - const: dp_hs_phy_irq
+ - const: dm_hs_phy_irq
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,x1e80100-dwc3
then:
properties:
@@ -486,6 +503,7 @@ allOf:
contains:
enum:
- qcom,sc8180x-dwc3-mp
+ - qcom,x1e80100-dwc3-mp
then:
properties:
interrupts:
diff --git a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
index 95ff9791baea..653a89586f4e 100644
--- a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
@@ -13,10 +13,9 @@ properties:
compatible:
oneOf:
- const: ti,j721e-usb
- - const: ti,am64-usb
- items:
- - const: ti,j721e-usb
- const: ti,am64-usb
+ - const: ti,j721e-usb
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 56d180f9c1cc..b320a39de7fe 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -368,6 +368,8 @@ patternProperties:
description: Devantech, Ltd.
"^dfi,.*":
description: DFI Inc.
+ "^dfrobot,.*":
+ description: DFRobot Corporation
"^dh,.*":
description: DH electronics GmbH
"^difrnce,.*":
@@ -1539,6 +1541,8 @@ patternProperties:
description: Turing Machines, Inc.
"^tyan,.*":
description: Tyan Computer Corporation
+ "^tyhx,.*":
+ description: NanjingTianyihexin Electronics Ltd.
"^u-blox,.*":
description: u-blox
"^u-boot,.*":
diff --git a/Documentation/driver-api/cxl/access-coordinates.rst b/Documentation/driver-api/cxl/access-coordinates.rst
new file mode 100644
index 000000000000..b07950ea30c9
--- /dev/null
+++ b/Documentation/driver-api/cxl/access-coordinates.rst
@@ -0,0 +1,91 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+==================================
+CXL Access Coordinates Computation
+==================================
+
+Shared Upstream Link Calculation
+================================
+For certain CXL region construction with endpoints behind CXL switches (SW) or
+Root Ports (RP), there is the possibility of the total bandwidth for all
+the endpoints behind a switch being more than the switch upstream link.
+A similar situation can occur within the host, upstream of the root ports.
+The CXL driver performs an additional pass after all the targets have
+arrived for a region in order to recalculate the bandwidths with possible
+upstream link being a limiting factor in mind.
+
+The algorithm assumes the configuration is a symmetric topology as that
+maximizes performance. When asymmetric topology is detected, the calculation
+is aborted. An asymmetric topology is detected during topology walk where the
+number of RPs detected as a grandparent is not equal to the number of devices
+iterated in the same iteration loop. The assumption is made that subtle
+asymmetry in properties does not happen and all paths to EPs are equal.
+
+There can be multiple switches under an RP. There can be multiple RPs under
+a CXL Host Bridge (HB). There can be multiple HBs under a CXL Fixed Memory
+Window Structure (CFMWS).
+
+An example hierarchy:
+
+> CFMWS 0
+> |
+> _________|_________
+> | |
+> ACPI0017-0 ACPI0017-1
+> GP0/HB0/ACPI0016-0 GP1/HB1/ACPI0016-1
+> | | | |
+> RP0 RP1 RP2 RP3
+> | | | |
+> SW 0 SW 1 SW 2 SW 3
+> | | | | | | | |
+> EP0 EP1 EP2 EP3 EP4 EP5 EP6 EP7
+
+Computation for the example hierarchy:
+
+Min (GP0 to CPU BW,
+ Min(SW 0 Upstream Link to RP0 BW,
+ Min(SW0SSLBIS for SW0DSP0 (EP0), EP0 DSLBIS, EP0 Upstream Link) +
+ Min(SW0SSLBIS for SW0DSP1 (EP1), EP1 DSLBIS, EP1 Upstream link)) +
+ Min(SW 1 Upstream Link to RP1 BW,
+ Min(SW1SSLBIS for SW1DSP0 (EP2), EP2 DSLBIS, EP2 Upstream Link) +
+ Min(SW1SSLBIS for SW1DSP1 (EP3), EP3 DSLBIS, EP3 Upstream link))) +
+Min (GP1 to CPU BW,
+ Min(SW 2 Upstream Link to RP2 BW,
+ Min(SW2SSLBIS for SW2DSP0 (EP4), EP4 DSLBIS, EP4 Upstream Link) +
+ Min(SW2SSLBIS for SW2DSP1 (EP5), EP5 DSLBIS, EP5 Upstream link)) +
+ Min(SW 3 Upstream Link to RP3 BW,
+ Min(SW3SSLBIS for SW3DSP0 (EP6), EP6 DSLBIS, EP6 Upstream Link) +
+ Min(SW3SSLBIS for SW3DSP1 (EP7), EP7 DSLBIS, EP7 Upstream link))))
+
+The calculation starts at cxl_region_shared_upstream_perf_update(). A xarray
+is created to collect all the endpoint bandwidths via the
+cxl_endpoint_gather_bandwidth() function. The min() of bandwidth from the
+endpoint CDAT and the upstream link bandwidth is calculated. If the endpoint
+has a CXL switch as a parent, then min() of calculated bandwidth and the
+bandwidth from the SSLBIS for the switch downstream port that is associated
+with the endpoint is calculated. The final bandwidth is stored in a
+'struct cxl_perf_ctx' in the xarray indexed by a device pointer. If the
+endpoint is direct attached to a root port (RP), the device pointer would be an
+RP device. If the endpoint is behind a switch, the device pointer would be the
+upstream device of the parent switch.
+
+At the next stage, the code walks through one or more switches if they exist
+in the topology. For endpoints directly attached to RPs, this step is skipped.
+If there is another switch upstream, the code takes the min() of the current
+gathered bandwidth and the upstream link bandwidth. If there's a switch
+upstream, then the SSLBIS of the upstream switch.
+
+Once the topology walk reaches the RP, whether it's direct attached endpoints
+or walking through the switch(es), cxl_rp_gather_bandwidth() is called. At
+this point all the bandwidths are aggregated per each host bridge, which is
+also the index for the resulting xarray.
+
+The next step is to take the min() of the per host bridge bandwidth and the
+bandwidth from the Generic Port (GP). The bandwidths for the GP is retrieved
+via ACPI tables SRAT/HMAT. The min bandwidth are aggregated under the same
+ACPI0017 device to form a new xarray.
+
+Finally, the cxl_region_update_bandwidth() is called and the aggregated
+bandwidth from all the members of the last xarray is updated for the
+access coordinates residing in the cxl region (cxlr) context.
diff --git a/Documentation/driver-api/cxl/index.rst b/Documentation/driver-api/cxl/index.rst
index 12b82725d322..965ba90e8fb7 100644
--- a/Documentation/driver-api/cxl/index.rst
+++ b/Documentation/driver-api/cxl/index.rst
@@ -8,6 +8,7 @@ Compute Express Link
:maxdepth: 1
memory-devices
+ access-coordinates
maturity-map
diff --git a/Documentation/filesystems/9p.rst b/Documentation/filesystems/9p.rst
index d270a0aa8d55..2bbf68b56b0d 100644
--- a/Documentation/filesystems/9p.rst
+++ b/Documentation/filesystems/9p.rst
@@ -48,11 +48,66 @@ For server running on QEMU host with virtio transport::
mount -t 9p -o trans=virtio <mount_tag> /mnt/9
-where mount_tag is the tag associated by the server to each of the exported
+where mount_tag is the tag generated by the server to each of the exported
mount points. Each 9P export is seen by the client as a virtio device with an
associated "mount_tag" property. Available mount tags can be
seen by reading /sys/bus/virtio/drivers/9pnet_virtio/virtio<n>/mount_tag files.
+USBG Usage
+==========
+
+To mount a 9p FS on a USB Host accessible via the gadget at runtime::
+
+ mount -t 9p -o trans=usbg,aname=/path/to/fs <device> /mnt/9
+
+To mount a 9p FS on a USB Host accessible via the gadget as root filesystem::
+
+ root=<device> rootfstype=9p rootflags=trans=usbg,cache=loose,uname=root,access=0,dfltuid=0,dfltgid=0,aname=/path/to/rootfs
+
+where <device> is the tag associated by the usb gadget transport.
+It is defined by the configfs instance name.
+
+USBG Example
+============
+
+The USB host exports a filesystem, while the gadget on the USB device
+side makes it mountable.
+
+Diod (9pfs server) and the forwarder are on the development host, where
+the root filesystem is actually stored. The gadget is initialized during
+boot (or later) on the embedded board. Then the forwarder will find it
+on the USB bus and start forwarding requests.
+
+In this case the 9p requests come from the device and are handled by the
+host. The reason is that USB device ports are normally not available on
+PCs, so a connection in the other direction would not work.
+
+When using the usbg transport, for now there is no native usb host
+service capable to handle the requests from the gadget driver. For
+this we have to use the extra python tool p9_fwd.py from tools/usb.
+
+Just start the 9pfs capable network server like diod/nfs-ganesha e.g.::
+
+ $ diod -f -n -d 0 -S -l 0.0.0.0:9999 -e $PWD
+
+Optionaly scan your bus if there are more then one usbg gadgets to find their path::
+
+ $ python $kernel_dir/tools/usb/p9_fwd.py list
+
+ Bus | Addr | Manufacturer | Product | ID | Path
+ --- | ---- | ---------------- | ---------------- | --------- | ----
+ 2 | 67 | unknown | unknown | 1d6b:0109 | 2-1.1.2
+ 2 | 68 | unknown | unknown | 1d6b:0109 | 2-1.1.3
+
+Then start the python transport::
+
+ $ python $kernel_dir/tools/usb/p9_fwd.py --path 2-1.1.2 connect -p 9999
+
+After that the gadget driver can be used as described above.
+
+One use-case is to use it as an alternative to NFS root booting during
+the development of embedded Linux devices.
+
Options
=======
@@ -68,6 +123,7 @@ Options
virtio connect to the next virtio channel available
(from QEMU with trans_virtio module)
rdma connect to a specified RDMA channel
+ usbg connect to a specified usb gadget channel
======== ============================================
uname=name user name to attempt mount as on the remote server. The
diff --git a/Documentation/filesystems/nfs/index.rst b/Documentation/filesystems/nfs/index.rst
index 8536134f31fd..95c2c009874c 100644
--- a/Documentation/filesystems/nfs/index.rst
+++ b/Documentation/filesystems/nfs/index.rst
@@ -8,6 +8,7 @@ NFS
client-identifier
exporting
+ localio
pnfs
rpc-cache
rpc-server-gss
diff --git a/Documentation/filesystems/nfs/localio.rst b/Documentation/filesystems/nfs/localio.rst
new file mode 100644
index 000000000000..bd1967e2eab3
--- /dev/null
+++ b/Documentation/filesystems/nfs/localio.rst
@@ -0,0 +1,357 @@
+===========
+NFS LOCALIO
+===========
+
+Overview
+========
+
+The LOCALIO auxiliary RPC protocol allows the Linux NFS client and
+server to reliably handshake to determine if they are on the same
+host. Select "NFS client and server support for LOCALIO auxiliary
+protocol" in menuconfig to enable CONFIG_NFS_LOCALIO in the kernel
+config (both CONFIG_NFS_FS and CONFIG_NFSD must also be enabled).
+
+Once an NFS client and server handshake as "local", the client will
+bypass the network RPC protocol for read, write and commit operations.
+Due to this XDR and RPC bypass, these operations will operate faster.
+
+The LOCALIO auxiliary protocol's implementation, which uses the same
+connection as NFS traffic, follows the pattern established by the NFS
+ACL protocol extension.
+
+The LOCALIO auxiliary protocol is needed to allow robust discovery of
+clients local to their servers. In a private implementation that
+preceded use of this LOCALIO protocol, a fragile sockaddr network
+address based match against all local network interfaces was attempted.
+But unlike the LOCALIO protocol, the sockaddr-based matching didn't
+handle use of iptables or containers.
+
+The robust handshake between local client and server is just the
+beginning, the ultimate use case this locality makes possible is the
+client is able to open files and issue reads, writes and commits
+directly to the server without having to go over the network. The
+requirement is to perform these loopback NFS operations as efficiently
+as possible, this is particularly useful for container use cases
+(e.g. kubernetes) where it is possible to run an IO job local to the
+server.
+
+The performance advantage realized from LOCALIO's ability to bypass
+using XDR and RPC for reads, writes and commits can be extreme, e.g.:
+
+fio for 20 secs with directio, qd of 8, 16 libaio threads:
+ - With LOCALIO:
+ 4K read: IOPS=979k, BW=3825MiB/s (4011MB/s)(74.7GiB/20002msec)
+ 4K write: IOPS=165k, BW=646MiB/s (678MB/s)(12.6GiB/20002msec)
+ 128K read: IOPS=402k, BW=49.1GiB/s (52.7GB/s)(982GiB/20002msec)
+ 128K write: IOPS=11.5k, BW=1433MiB/s (1503MB/s)(28.0GiB/20004msec)
+
+ - Without LOCALIO:
+ 4K read: IOPS=79.2k, BW=309MiB/s (324MB/s)(6188MiB/20003msec)
+ 4K write: IOPS=59.8k, BW=234MiB/s (245MB/s)(4671MiB/20002msec)
+ 128K read: IOPS=33.9k, BW=4234MiB/s (4440MB/s)(82.7GiB/20004msec)
+ 128K write: IOPS=11.5k, BW=1434MiB/s (1504MB/s)(28.0GiB/20011msec)
+
+fio for 20 secs with directio, qd of 8, 1 libaio thread:
+ - With LOCALIO:
+ 4K read: IOPS=230k, BW=898MiB/s (941MB/s)(17.5GiB/20001msec)
+ 4K write: IOPS=22.6k, BW=88.3MiB/s (92.6MB/s)(1766MiB/20001msec)
+ 128K read: IOPS=38.8k, BW=4855MiB/s (5091MB/s)(94.8GiB/20001msec)
+ 128K write: IOPS=11.4k, BW=1428MiB/s (1497MB/s)(27.9GiB/20001msec)
+
+ - Without LOCALIO:
+ 4K read: IOPS=77.1k, BW=301MiB/s (316MB/s)(6022MiB/20001msec)
+ 4K write: IOPS=32.8k, BW=128MiB/s (135MB/s)(2566MiB/20001msec)
+ 128K read: IOPS=24.4k, BW=3050MiB/s (3198MB/s)(59.6GiB/20001msec)
+ 128K write: IOPS=11.4k, BW=1430MiB/s (1500MB/s)(27.9GiB/20001msec)
+
+FAQ
+===
+
+1. What are the use cases for LOCALIO?
+
+ a. Workloads where the NFS client and server are on the same host
+ realize improved IO performance. In particular, it is common when
+ running containerised workloads for jobs to find themselves
+ running on the same host as the knfsd server being used for
+ storage.
+
+2. What are the requirements for LOCALIO?
+
+ a. Bypass use of the network RPC protocol as much as possible. This
+ includes bypassing XDR and RPC for open, read, write and commit
+ operations.
+ b. Allow client and server to autonomously discover if they are
+ running local to each other without making any assumptions about
+ the local network topology.
+ c. Support the use of containers by being compatible with relevant
+ namespaces (e.g. network, user, mount).
+ d. Support all versions of NFS. NFSv3 is of particular importance
+ because it has wide enterprise usage and pNFS flexfiles makes use
+ of it for the data path.
+
+3. Why doesn’t LOCALIO just compare IP addresses or hostnames when
+ deciding if the NFS client and server are co-located on the same
+ host?
+
+ Since one of the main use cases is containerised workloads, we cannot
+ assume that IP addresses will be shared between the client and
+ server. This sets up a requirement for a handshake protocol that
+ needs to go over the same connection as the NFS traffic in order to
+ identify that the client and the server really are running on the
+ same host. The handshake uses a secret that is sent over the wire,
+ and can be verified by both parties by comparing with a value stored
+ in shared kernel memory if they are truly co-located.
+
+4. Does LOCALIO improve pNFS flexfiles?
+
+ Yes, LOCALIO complements pNFS flexfiles by allowing it to take
+ advantage of NFS client and server locality. Policy that initiates
+ client IO as closely to the server where the data is stored naturally
+ benefits from the data path optimization LOCALIO provides.
+
+5. Why not develop a new pNFS layout to enable LOCALIO?
+
+ A new pNFS layout could be developed, but doing so would put the
+ onus on the server to somehow discover that the client is co-located
+ when deciding to hand out the layout.
+ There is value in a simpler approach (as provided by LOCALIO) that
+ allows the NFS client to negotiate and leverage locality without
+ requiring more elaborate modeling and discovery of such locality in a
+ more centralized manner.
+
+6. Why is having the client perform a server-side file OPEN, without
+ using RPC, beneficial? Is the benefit pNFS specific?
+
+ Avoiding the use of XDR and RPC for file opens is beneficial to
+ performance regardless of whether pNFS is used. Especially when
+ dealing with small files its best to avoid going over the wire
+ whenever possible, otherwise it could reduce or even negate the
+ benefits of avoiding the wire for doing the small file I/O itself.
+ Given LOCALIO's requirements the current approach of having the
+ client perform a server-side file open, without using RPC, is ideal.
+ If in the future requirements change then we can adapt accordingly.
+
+7. Why is LOCALIO only supported with UNIX Authentication (AUTH_UNIX)?
+
+ Strong authentication is usually tied to the connection itself. It
+ works by establishing a context that is cached by the server, and
+ that acts as the key for discovering the authorisation token, which
+ can then be passed to rpc.mountd to complete the authentication
+ process. On the other hand, in the case of AUTH_UNIX, the credential
+ that was passed over the wire is used directly as the key in the
+ upcall to rpc.mountd. This simplifies the authentication process, and
+ so makes AUTH_UNIX easier to support.
+
+8. How do export options that translate RPC user IDs behave for LOCALIO
+ operations (eg. root_squash, all_squash)?
+
+ Export options that translate user IDs are managed by nfsd_setuser()
+ which is called by nfsd_setuser_and_check_port() which is called by
+ __fh_verify(). So they get handled exactly the same way for LOCALIO
+ as they do for non-LOCALIO.
+
+9. How does LOCALIO make certain that object lifetimes are managed
+ properly given NFSD and NFS operate in different contexts?
+
+ See the detailed "NFS Client and Server Interlock" section below.
+
+RPC
+===
+
+The LOCALIO auxiliary RPC protocol consists of a single "UUID_IS_LOCAL"
+RPC method that allows the Linux NFS client to verify the local Linux
+NFS server can see the nonce (single-use UUID) the client generated and
+made available in nfs_common. This protocol isn't part of an IETF
+standard, nor does it need to be considering it is Linux-to-Linux
+auxiliary RPC protocol that amounts to an implementation detail.
+
+The UUID_IS_LOCAL method encodes the client generated uuid_t in terms of
+the fixed UUID_SIZE (16 bytes). The fixed size opaque encode and decode
+XDR methods are used instead of the less efficient variable sized
+methods.
+
+The RPC program number for the NFS_LOCALIO_PROGRAM is 400122 (as assigned
+by IANA, see https://www.iana.org/assignments/rpc-program-numbers/ ):
+Linux Kernel Organization 400122 nfslocalio
+
+The LOCALIO protocol spec in rpcgen syntax is::
+
+ /* raw RFC 9562 UUID */
+ #define UUID_SIZE 16
+ typedef u8 uuid_t<UUID_SIZE>;
+
+ program NFS_LOCALIO_PROGRAM {
+ version LOCALIO_V1 {
+ void
+ NULL(void) = 0;
+
+ void
+ UUID_IS_LOCAL(uuid_t) = 1;
+ } = 1;
+ } = 400122;
+
+LOCALIO uses the same transport connection as NFS traffic. As such,
+LOCALIO is not registered with rpcbind.
+
+NFS Common and Client/Server Handshake
+======================================
+
+fs/nfs_common/nfslocalio.c provides interfaces that enable an NFS client
+to generate a nonce (single-use UUID) and associated short-lived
+nfs_uuid_t struct, register it with nfs_common for subsequent lookup and
+verification by the NFS server and if matched the NFS server populates
+members in the nfs_uuid_t struct. The NFS client then uses nfs_common to
+transfer the nfs_uuid_t from its nfs_uuids to the nn->nfsd_serv
+clients_list from the nfs_common's uuids_list. See:
+fs/nfs/localio.c:nfs_local_probe()
+
+nfs_common's nfs_uuids list is the basis for LOCALIO enablement, as such
+it has members that point to nfsd memory for direct use by the client
+(e.g. 'net' is the server's network namespace, through it the client can
+access nn->nfsd_serv with proper rcu read access). It is this client
+and server synchronization that enables advanced usage and lifetime of
+objects to span from the host kernel's nfsd to per-container knfsd
+instances that are connected to nfs client's running on the same local
+host.
+
+NFS Client and Server Interlock
+===============================
+
+LOCALIO provides the nfs_uuid_t object and associated interfaces to
+allow proper network namespace (net-ns) and NFSD object refcounting:
+
+ We don't want to keep a long-term counted reference on each NFSD's
+ net-ns in the client because that prevents a server container from
+ completely shutting down.
+
+ So we avoid taking a reference at all and rely on the per-cpu
+ reference to the server (detailed below) being sufficient to keep
+ the net-ns active. This involves allowing the NFSD's net-ns exit
+ code to iterate all active clients and clear their ->net pointers
+ (which are needed to find the per-cpu-refcount for the nfsd_serv).
+
+ Details:
+
+ - Embed nfs_uuid_t in nfs_client. nfs_uuid_t provides a list_head
+ that can be used to find the client. It does add the 16-byte
+ uuid_t to nfs_client so it is bigger than needed (given that
+ uuid_t is only used during the initial NFS client and server
+ LOCALIO handshake to determine if they are local to each other).
+ If that is really a problem we can find a fix.
+
+ - When the nfs server confirms that the uuid_t is local, it moves
+ the nfs_uuid_t onto a per-net-ns list in NFSD's nfsd_net.
+
+ - When each server's net-ns is shutting down - in a "pre_exit"
+ handler, all these nfs_uuid_t have their ->net cleared. There is
+ an rcu_synchronize() call between pre_exit() handlers and exit()
+ handlers so any caller that sees nfs_uuid_t ->net as not NULL can
+ safely manage the per-cpu-refcount for nfsd_serv.
+
+ - The client's nfs_uuid_t is passed to nfsd_open_local_fh() so it
+ can safely dereference ->net in a private rcu_read_lock() section
+ to allow safe access to the associated nfsd_net and nfsd_serv.
+
+So LOCALIO required the introduction and use of NFSD's percpu_ref to
+interlock nfsd_destroy_serv() and nfsd_open_local_fh(), to ensure each
+nn->nfsd_serv is not destroyed while in use by nfsd_open_local_fh(), and
+warrants a more detailed explanation:
+
+ nfsd_open_local_fh() uses nfsd_serv_try_get() before opening its
+ nfsd_file handle and then the caller (NFS client) must drop the
+ reference for the nfsd_file and associated nn->nfsd_serv using
+ nfs_file_put_local() once it has completed its IO.
+
+ This interlock working relies heavily on nfsd_open_local_fh() being
+ afforded the ability to safely deal with the possibility that the
+ NFSD's net-ns (and nfsd_net by association) may have been destroyed
+ by nfsd_destroy_serv() via nfsd_shutdown_net() -- which is only
+ possible given the nfs_uuid_t ->net pointer managemenet detailed
+ above.
+
+All told, this elaborate interlock of the NFS client and server has been
+verified to fix an easy to hit crash that would occur if an NFSD
+instance running in a container, with a LOCALIO client mounted, is
+shutdown. Upon restart of the container and associated NFSD the client
+would go on to crash due to NULL pointer dereference that occurred due
+to the LOCALIO client's attempting to nfsd_open_local_fh(), using
+nn->nfsd_serv, without having a proper reference on nn->nfsd_serv.
+
+NFS Client issues IO instead of Server
+======================================
+
+Because LOCALIO is focused on protocol bypass to achieve improved IO
+performance, alternatives to the traditional NFS wire protocol (SUNRPC
+with XDR) must be provided to access the backing filesystem.
+
+See fs/nfs/localio.c:nfs_local_open_fh() and
+fs/nfsd/localio.c:nfsd_open_local_fh() for the interface that makes
+focused use of select nfs server objects to allow a client local to a
+server to open a file pointer without needing to go over the network.
+
+The client's fs/nfs/localio.c:nfs_local_open_fh() will call into the
+server's fs/nfsd/localio.c:nfsd_open_local_fh() and carefully access
+both the associated nfsd network namespace and nn->nfsd_serv in terms of
+RCU. If nfsd_open_local_fh() finds that the client no longer sees valid
+nfsd objects (be it struct net or nn->nfsd_serv) it returns -ENXIO
+to nfs_local_open_fh() and the client will try to reestablish the
+LOCALIO resources needed by calling nfs_local_probe() again. This
+recovery is needed if/when an nfsd instance running in a container were
+to reboot while a LOCALIO client is connected to it.
+
+Once the client has an open nfsd_file pointer it will issue reads,
+writes and commits directly to the underlying local filesystem (normally
+done by the nfs server). As such, for these operations, the NFS client
+is issuing IO to the underlying local filesystem that it is sharing with
+the NFS server. See: fs/nfs/localio.c:nfs_local_doio() and
+fs/nfs/localio.c:nfs_local_commit().
+
+Security
+========
+
+Localio is only supported when UNIX-style authentication (AUTH_UNIX, aka
+AUTH_SYS) is used.
+
+Care is taken to ensure the same NFS security mechanisms are used
+(authentication, etc) regardless of whether LOCALIO or regular NFS
+access is used. The auth_domain established as part of the traditional
+NFS client access to the NFS server is also used for LOCALIO.
+
+Relative to containers, LOCALIO gives the client access to the network
+namespace the server has. This is required to allow the client to access
+the server's per-namespace nfsd_net struct. With traditional NFS, the
+client is afforded this same level of access (albeit in terms of the NFS
+protocol via SUNRPC). No other namespaces (user, mount, etc) have been
+altered or purposely extended from the server to the client.
+
+Testing
+=======
+
+The LOCALIO auxiliary protocol and associated NFS LOCALIO read, write
+and commit access have proven stable against various test scenarios:
+
+- Client and server both on the same host.
+
+- All permutations of client and server support enablement for both
+ local and remote client and server.
+
+- Testing against NFS storage products that don't support the LOCALIO
+ protocol was also performed.
+
+- Client on host, server within a container (for both v3 and v4.2).
+ The container testing was in terms of podman managed containers and
+ includes successful container stop/restart scenario.
+
+- Formalizing these test scenarios in terms of existing test
+ infrastructure is on-going. Initial regular coverage is provided in
+ terms of ktest running xfstests against a LOCALIO-enabled NFS loopback
+ mount configuration, and includes lockdep and KASAN coverage, see:
+ https://evilpiepirate.org/~testdashboard/ci?user=snitzer&branch=snitm-nfs-next
+ https://github.com/koverstreet/ktest
+
+- Various kdevops testing (in terms of "Chuck's BuildBot") has been
+ performed to regularly verify the LOCALIO changes haven't caused any
+ regressions to non-LOCALIO NFS use cases.
+
+- All of Hammerspace's various sanity tests pass with LOCALIO enabled
+ (this includes numerous pNFS and flexfiles tests).
diff --git a/Documentation/iio/ad4000.rst b/Documentation/iio/ad4000.rst
new file mode 100644
index 000000000000..de8fd3ae6e62
--- /dev/null
+++ b/Documentation/iio/ad4000.rst
@@ -0,0 +1,131 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+=============
+AD4000 driver
+=============
+
+Device driver for Analog Devices Inc. AD4000 series of ADCs.
+
+Supported devices
+=================
+
+* `AD4000 <https://www.analog.com/AD4000>`_
+* `AD4001 <https://www.analog.com/AD4001>`_
+* `AD4002 <https://www.analog.com/AD4002>`_
+* `AD4003 <https://www.analog.com/AD4003>`_
+* `AD4004 <https://www.analog.com/AD4004>`_
+* `AD4005 <https://www.analog.com/AD4005>`_
+* `AD4006 <https://www.analog.com/AD4006>`_
+* `AD4007 <https://www.analog.com/AD4007>`_
+* `AD4008 <https://www.analog.com/AD4008>`_
+* `AD4010 <https://www.analog.com/AD4010>`_
+* `AD4011 <https://www.analog.com/AD4011>`_
+* `AD4020 <https://www.analog.com/AD4020>`_
+* `AD4021 <https://www.analog.com/AD4021>`_
+* `AD4022 <https://www.analog.com/AD4022>`_
+* `ADAQ4001 <https://www.analog.com/ADAQ4001>`_
+* `ADAQ4003 <https://www.analog.com/ADAQ4003>`_
+
+Wiring connections
+------------------
+
+Devices of the AD4000 series can be connected to the SPI host controller in a
+few different modes.
+
+CS mode, 3-wire turbo mode
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Datasheet "3-wire" mode is what most resembles standard SPI connection which,
+for these devices, comprises of connecting the controller CS line to device CNV
+pin and other SPI lines as usual. This configuration is (misleadingly) called
+"CS Mode, 3-Wire Turbo Mode" connection in datasheets.
+NOTE: The datasheet definition of 3-wire mode for the AD4000 series is NOT the
+same of standard spi-3wire mode.
+This is the only connection mode that allows configuration register access but
+it requires the SPI controller to support the ``SPI_MOSI_IDLE_HIGH`` feature.
+
+Omit the ``adi,sdi-pin`` property in device tree to select this mode.
+
+::
+
+ +-------------+
+ + ----------------------------------| SDO |
+ | | |
+ | +-------------------| CS |
+ | v | |
+ | +--------------------+ | HOST |
+ | | CNV | | |
+ +--->| SDI AD4000 SDO |-------->| SDI |
+ | SCK | | |
+ +--------------------+ | |
+ ^ | |
+ +--------------------| SCLK |
+ +-------------+
+
+CS mode, 3-wire, without busy indicator
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Another wiring configuration supported as "3-wire" mode has the SDI pin
+hard-wired to digital input/output interface supply (VIO). In this setup, the
+controller is not required to support ``SPI_MOSI_IDLE_HIGH`` but register access
+is not possible. This connection mode saves one wire and works with any SPI
+controller.
+
+Set the ``adi,sdi-pin`` device tree property to ``"high"`` to select this mode.
+
+::
+
+ +-------------+
+ +--------------------| CS |
+ v | |
+ VIO +--------------------+ | HOST |
+ | | CNV | | |
+ +--->| SDI AD4000 SDO |-------->| SDI |
+ | SCK | | |
+ +--------------------+ | |
+ ^ | |
+ +--------------------| SCLK |
+ +-------------+
+
+Alternatively, a GPIO may be connected to the device CNV pin. This is similar to
+the previous wiring configuration but saves the use of a CS line.
+
+::
+
+ +-------------+
+ +--------------------| GPIO |
+ v | |
+ VIO +--------------------+ | HOST |
+ | | CNV | | |
+ +--->| SDI AD4000 SDO |-------->| SDI |
+ | SCK | | |
+ +--------------------+ | |
+ ^ | |
+ +--------------------| SCLK |
+ +-------------+
+
+CS mode, 4-wire without busy indicator
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In datasheet "4-wire" mode, the controller CS line is connected to the ADC SDI
+pin and a GPIO is connected to the ADC CNV pin. This connection mode may better
+suit scenarios where multiple ADCs can share one CNV trigger.
+
+Set ``adi,sdi-pin`` to ``"cs"`` to select this mode.
+
+
+::
+
+ +-------------+
+ + ----------------------------------| CS |
+ | | |
+ | +-------------------| GPIO |
+ | v | |
+ | +--------------------+ | HOST |
+ | | CNV | | |
+ +--->| SDI AD4000 SDO |-------->| SDI |
+ | SCK | | |
+ +--------------------+ | |
+ ^ | |
+ +--------------------| SCLK |
+ +-------------+
diff --git a/Documentation/iio/ad4695.rst b/Documentation/iio/ad4695.rst
new file mode 100644
index 000000000000..33ed29b7c98a
--- /dev/null
+++ b/Documentation/iio/ad4695.rst
@@ -0,0 +1,167 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+=============
+AD4695 driver
+=============
+
+ADC driver for Analog Devices Inc. AD4695 and similar devices. The module name
+is ``ad4695``.
+
+
+Supported devices
+=================
+
+The following chips are supported by this driver:
+
+* `AD4695 <https://www.analog.com/AD4695>`_
+* `AD4696 <https://www.analog.com/AD4696>`_
+* `AD4697 <https://www.analog.com/AD4697>`_
+* `AD4698 <https://www.analog.com/AD4698>`_
+
+
+Supported features
+==================
+
+SPI wiring modes
+----------------
+
+The driver currently supports the following SPI wiring configuration:
+
+4-wire mode
+^^^^^^^^^^^
+
+In this mode, CNV and CS are tied together and there is a single SDO line.
+
+.. code-block::
+
+ +-------------+ +-------------+
+ | CS |<-+------| CS |
+ | CNV |<-+ | |
+ | ADC | | HOST |
+ | | | |
+ | SDI |<--------| SDO |
+ | SDO |-------->| SDI |
+ | SCLK |<--------| SCLK |
+ +-------------+ +-------------+
+
+To use this mode, in the device tree, omit the ``cnv-gpios`` and
+``spi-rx-bus-width`` properties.
+
+Channel configuration
+---------------------
+
+Since the chip supports multiple ways to configure each channel, this must be
+described in the device tree based on what is actually wired up to the inputs.
+
+There are three typical configurations:
+
+An ``INx`` pin is used as the positive input with the ``REFGND``, ``COM`` or
+the next ``INx`` pin as the negative input.
+
+Pairing with REFGND
+^^^^^^^^^^^^^^^^^^^
+
+Each ``INx`` pin can be used as a pseudo-differential input in conjunction with
+the ``REFGND`` pin. The device tree will look like this:
+
+.. code-block::
+
+ channel@0 {
+ reg = <0>; /* IN0 */
+ };
+
+If no other channel properties are needed (e.g. ``adi,no-high-z``), the channel
+node can be omitted entirely.
+
+This will appear on the IIO bus as the ``voltage0`` channel. The processed value
+(*raw × scale*) will be the voltage present on the ``IN0`` pin relative to
+``REFGND``. (Offset is always 0 when pairing with ``REFGND``.)
+
+Pairing with COM
+^^^^^^^^^^^^^^^^
+
+Each ``INx`` pin can be used as a pseudo-differential input in conjunction with
+the ``COM`` pin. The device tree will look like this:
+
+.. code-block::
+
+ com-supply = <&vref_div_2>;
+
+ channel@1 {
+ reg = <1>; /* IN1 */
+ common-mode-channel = <AD4695_COMMON_MODE_COM>;
+ bipolar;
+ };
+
+This will appear on the IIO bus as the ``voltage1`` channel. The processed value
+(*(raw + offset) × scale*) will be the voltage measured on the ``IN1`` pin
+relative to ``REFGND``. (The offset is determined by the ``com-supply`` voltage.)
+
+The macro comes from:
+
+.. code-block::
+
+ #include <dt-bindings/iio/adi,ad4695.h>
+
+Pairing two INx pins
+^^^^^^^^^^^^^^^^^^^^
+
+An even-numbered ``INx`` pin and the following odd-numbered ``INx`` pin can be
+used as a pseudo-differential input. The device tree for using ``IN2`` as the
+positive input and ``IN3`` as the negative input will look like this:
+
+.. code-block::
+
+ in3-supply = <&vref_div_2>;
+
+ channel@2 {
+ reg = <2>; /* IN2 */
+ common-mode-channel = <3>; /* IN3 */
+ bipolar;
+ };
+
+This will appear on the IIO bus as the ``voltage2`` channel. The processed value
+(*(raw + offset) × scale*) will be the voltage measured on the ``IN1`` pin
+relative to ``REFGND``. (Offset is determined by the ``in3-supply`` voltage.)
+
+VCC supply
+----------
+
+The chip supports being powered by an external LDO via the ``VCC`` input or an
+internal LDO via the ``LDO_IN`` input. The driver looks at the device tree to
+determine which is being used. If ``ldo-supply`` is present, then the internal
+LDO is used. If ``vcc-supply`` is present, then the external LDO is used and
+the internal LDO is disabled.
+
+Reference voltage
+-----------------
+
+The chip supports an external reference voltage via the ``REF`` input or an
+internal buffered reference voltage via the ``REFIN`` input. The driver looks
+at the device tree to determine which is being used. If ``ref-supply`` is
+present, then the external reference voltage is used and the internal buffer is
+disabled. If ``refin-supply`` is present, then the internal buffered reference
+voltage is used.
+
+Gain/offset calibration
+-----------------------
+
+System calibration is supported using the channel gain and offset registers via
+the ``calibscale`` and ``calibbias`` attributes respectively.
+
+Unimplemented features
+----------------------
+
+- Additional wiring modes
+- Threshold events
+- Oversampling
+- GPIO support
+- CRC support
+
+Device buffers
+==============
+
+This driver supports hardware triggered buffers. This uses the "advanced
+sequencer" feature of the chip to trigger a burst of conversions.
+
+Also see :doc:`iio_devbuf` for more general information.
diff --git a/Documentation/iio/ad7380.rst b/Documentation/iio/ad7380.rst
new file mode 100644
index 000000000000..9c784c1e652e
--- /dev/null
+++ b/Documentation/iio/ad7380.rst
@@ -0,0 +1,130 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+=============
+AD7380 driver
+=============
+
+ADC driver for Analog Devices Inc. AD7380 and similar devices. The module name
+is ``ad7380``.
+
+
+Supported devices
+=================
+
+The following chips are supported by this driver:
+
+* `AD7380 <https://www.analog.com/en/products/ad7380.html>`_
+* `AD7381 <https://www.analog.com/en/products/ad7381.html>`_
+* `AD7383 <https://www.analog.com/en/products/ad7383.html>`_
+* `AD7384 <https://www.analog.com/en/products/ad7384.html>`_
+* `AD7386 <https://www.analog.com/en/products/ad7386.html>`_
+* `AD7387 <https://www.analog.com/en/products/ad7387.html>`_
+* `AD7388 <https://www.analog.com/en/products/ad7388.html>`_
+* `AD7380-4 <https://www.analog.com/en/products/ad7380-4.html>`_
+* `AD7381-4 <https://www.analog.com/en/products/ad7381-4.html>`_
+* `AD7383-4 <https://www.analog.com/en/products/ad7383-4.html>`_
+* `AD7384-4 <https://www.analog.com/en/products/ad7384-4.html>`_
+* `AD7386-4 <https://www.analog.com/en/products/ad7386-4.html>`_
+* `AD7387-4 <https://www.analog.com/en/products/ad7387-4.html>`_
+* `AD7388-4 <https://www.analog.com/en/products/ad7388-4.html>`_
+
+
+Supported features
+==================
+
+SPI wiring modes
+----------------
+
+ad738x ADCs can output data on several SDO lines (1/2/4). The driver currently
+supports only 1 SDO line.
+
+Reference voltage
+-----------------
+
+2 possible reference voltage sources are supported:
+
+- Internal reference (2.5V)
+- External reference (2.5V to 3.3V)
+
+The source is determined by the device tree. If ``refio-supply`` is present,
+then the external reference is used, else the internal reference is used.
+
+Oversampling and resolution boost
+---------------------------------
+
+This family supports 2 types of oversampling: normal average and rolling
+average. Only normal average is supported by the driver, as rolling average can
+be achieved by processing a captured data buffer. The following ratios are
+available: 1 (oversampling disabled)/2/4/8/16/32.
+
+When the on-chip oversampling function is enabled the performance of the ADC can
+exceed the default resolution. To accommodate the performance boost achievable,
+it is possible to enable an additional two bits of resolution. Because the
+resolution boost feature can only be enabled when oversampling is enabled and
+oversampling is not as useful without the resolution boost, the driver
+automatically enables the resolution boost if and only if oversampling is
+enabled.
+
+Since the resolution boost feature causes 16-bit chips to now have 18-bit data
+which means the storagebits has to change from 16 to 32 bits, we use the new
+ext_scan_type feature to allow changing the scan_type at runtime. Unfortunately
+libiio does not support it. So when enabling or disabling oversampling, user
+must restart iiod using the following command:
+
+.. code-block:: bash
+
+ root:~# systemctl restart iiod
+
+Channel selection and sequencer (single-end chips only)
+-------------------------------------------------------
+
+Single-ended chips of this family (ad7386/7/8(-4)) have a 2:1 multiplexer in
+front of each ADC. They also include additional configuration registers that
+allow for either manual selection or automatic switching (sequencer mode), of
+the multiplexer inputs.
+
+From an IIO point of view, all inputs are exported, i.e ad7386/7/8
+export 4 channels and ad7386-4/7-4/8-4 export 8 channels.
+
+Inputs ``AinX0`` of multiplexers correspond to the first half of IIO channels (i.e
+0-1 or 0-3) and inputs ``AinX1`` correspond to second half (i.e 2-3 or 4-7).
+Example for AD7386/7/8 (2 channels parts):
+
+.. code-block::
+
+ IIO | AD7386/7/8
+ | +----------------------------
+ | | _____ ______
+ | | | | | |
+ voltage0 | AinA0 --|--->| | | |
+ | | | mux |----->| ADCA |---
+ voltage2 | AinA1 --|--->| | | |
+ | | |_____| |_____ |
+ | | _____ ______
+ | | | | | |
+ voltage1 | AinB0 --|--->| | | |
+ | | | mux |----->| ADCB |---
+ voltage3 | AinB1 --|--->| | | |
+ | | |_____| |______|
+ | |
+ | +----------------------------
+
+
+When enabling sequencer mode, the effective sampling rate is divided by two.
+
+Unimplemented features
+----------------------
+
+- 2/4 SDO lines
+- Rolling average oversampling
+- Power down mode
+- CRC indication
+- Alert
+
+
+Device buffers
+==============
+
+This driver supports IIO triggered buffers.
+
+See :doc:`iio_devbuf` for more information.
diff --git a/Documentation/iio/adxl380.rst b/Documentation/iio/adxl380.rst
new file mode 100644
index 000000000000..376dee5fe1dd
--- /dev/null
+++ b/Documentation/iio/adxl380.rst
@@ -0,0 +1,233 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+ADXL380 driver
+===============
+
+This driver supports Analog Device's ADXL380/382 on SPI/I2C bus.
+
+1. Supported devices
+====================
+
+* `ADXL380 <https://www.analog.com/ADXL380>`_
+* `ADXL382 <https://www.analog.com/ADXL382>`_
+
+The ADXL380/ADXL382 is a low noise density, low power, 3-axis accelerometer with
+selectable measurement ranges. The ADXL380 supports the ±4 g, ±8 g, and ±16 g
+ranges, and the ADXL382 supports ±15 g, ±30 g, and ±60 g ranges.
+
+2. Device attributes
+====================
+
+Accelerometer measurements are always provided.
+
+Temperature data are also provided. This data can be used to monitor the
+internal system temperature or to improve the temperature stability of the
+device via calibration.
+
+Each IIO device, has a device folder under ``/sys/bus/iio/devices/iio:deviceX``,
+where X is the IIO index of the device. Under these folders reside a set of
+device files, depending on the characteristics and features of the hardware
+device in questions. These files are consistently generalized and documented in
+the IIO ABI documentation.
+
+The following tables show the adxl380 related device files, found in the
+specific device folder path ``/sys/bus/iio/devices/iio:deviceX``.
+
++---------------------------------------------------+----------------------------------------------------------+
+| 3-Axis Accelerometer related device files | Description |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_scale | Scale for the accelerometer channels. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_filter_high_pass_3db_frequency | Low pass filter bandwidth. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_filter_high_pass_3db_frequency_available | Available low pass filter bandwidth configurations. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_filter_low_pass_3db_frequency | High pass filter bandwidth. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_filter_low_pass_3db_frequency_available | Available high pass filter bandwidth configurations. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_x_calibbias | Calibration offset for the X-axis accelerometer channel. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_x_raw | Raw X-axis accelerometer channel value. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_y_calibbias | y-axis acceleration offset correction |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_y_raw | Raw Y-axis accelerometer channel value. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_z_calibbias | Calibration offset for the Z-axis accelerometer channel. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_z_raw | Raw Z-axis accelerometer channel value. |
++---------------------------------------------------+----------------------------------------------------------+
+
++----------------------------------+--------------------------------------------+
+| Temperature sensor related files | Description |
++----------------------------------+--------------------------------------------+
+| in_temp_raw | Raw temperature channel value. |
++----------------------------------+--------------------------------------------+
+| in_temp_offset | Offset for the temperature sensor channel. |
++----------------------------------+--------------------------------------------+
+| in_temp_scale | Scale for the temperature sensor channel. |
++----------------------------------+--------------------------------------------+
+
++------------------------------+----------------------------------------------+
+| Miscellaneous device files | Description |
++------------------------------+----------------------------------------------+
+| name | Name of the IIO device. |
++------------------------------+----------------------------------------------+
+| sampling_frequency | Currently selected sample rate. |
++------------------------------+----------------------------------------------+
+| sampling_frequency_available | Available sampling frequency configurations. |
++------------------------------+----------------------------------------------+
+
+Channels processed values
+-------------------------
+
+A channel value can be read from its _raw attribute. The value returned is the
+raw value as reported by the devices. To get the processed value of the channel,
+apply the following formula:
+
+.. code-block:: bash
+
+ processed value = (_raw + _offset) * _scale
+
+Where _offset and _scale are device attributes. If no _offset attribute is
+present, simply assume its value is 0.
+
+The adis16475 driver offers data for 2 types of channels, the table below shows
+the measurement units for the processed value, which are defined by the IIO
+framework:
+
++-------------------------------------+---------------------------+
+| Channel type | Measurement unit |
++-------------------------------------+---------------------------+
+| Acceleration on X, Y, and Z axis | Meters per Second squared |
++-------------------------------------+---------------------------+
+| Temperature | Millidegrees Celsius |
++-------------------------------------+---------------------------+
+
+Usage examples
+--------------
+
+Show device name:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat name
+ adxl382
+
+Show accelerometer channels value:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_raw
+ -1771
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_y_raw
+ 282
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_z_raw
+ -1523
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_scale
+ 0.004903325
+
+- X-axis acceleration = in_accel_x_raw * in_accel_scale = −8.683788575 m/s^2
+- Y-axis acceleration = in_accel_y_raw * in_accel_scale = 1.38273765 m/s^2
+- Z-axis acceleration = in_accel_z_raw * in_accel_scale = -7.467763975 m/s^2
+
+Set calibration offset for accelerometer channels:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_calibbias
+ 0
+
+ root:/sys/bus/iio/devices/iio:device0> echo 50 > in_accel_x_calibbias
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_calibbias
+ 50
+
+Set sampling frequency:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat sampling_frequency
+ 16000
+ root:/sys/bus/iio/devices/iio:device0> cat sampling_frequency_available
+ 16000 32000 64000
+
+ root:/sys/bus/iio/devices/iio:device0> echo 32000 > sampling_frequency
+ root:/sys/bus/iio/devices/iio:device0> cat sampling_frequency
+ 32000
+
+Set low pass filter bandwidth for accelerometer channels:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_filter_low_pass_3db_frequency
+ 32000
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_filter_low_pass_3db_frequency_available
+ 32000 8000 4000 2000
+
+ root:/sys/bus/iio/devices/iio:device0> echo 2000 > in_accel_filter_low_pass_3db_frequency
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_filter_low_pass_3db_frequency
+ 2000
+
+3. Device buffers
+=================
+
+This driver supports IIO buffers.
+
+All devices support retrieving the raw acceleration and temperature measurements
+using buffers.
+
+Usage examples
+--------------
+
+Select channels for buffer read:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_accel_x_en
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_accel_y_en
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_accel_z_en
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_temp_en
+
+Set the number of samples to be stored in the buffer:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> echo 10 > buffer/length
+
+Enable buffer readings:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > buffer/enable
+
+Obtain buffered data:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> hexdump -C /dev/iio\:device0
+ ...
+ 002bc300 f7 e7 00 a8 fb c5 24 80 f7 e7 01 04 fb d6 24 80 |......$.......$.|
+ 002bc310 f7 f9 00 ab fb dc 24 80 f7 c3 00 b8 fb e2 24 80 |......$.......$.|
+ 002bc320 f7 fb 00 bb fb d1 24 80 f7 b1 00 5f fb d1 24 80 |......$...._..$.|
+ 002bc330 f7 c4 00 c6 fb a6 24 80 f7 a6 00 68 fb f1 24 80 |......$....h..$.|
+ 002bc340 f7 b8 00 a3 fb e7 24 80 f7 9a 00 b1 fb af 24 80 |......$.......$.|
+ 002bc350 f7 b1 00 67 fb ee 24 80 f7 96 00 be fb 92 24 80 |...g..$.......$.|
+ 002bc360 f7 ab 00 7a fc 1b 24 80 f7 b6 00 ae fb 76 24 80 |...z..$......v$.|
+ 002bc370 f7 ce 00 a3 fc 02 24 80 f7 c0 00 be fb 8b 24 80 |......$.......$.|
+ 002bc380 f7 c3 00 93 fb d0 24 80 f7 ce 00 d8 fb c8 24 80 |......$.......$.|
+ 002bc390 f7 bd 00 c0 fb 82 24 80 f8 00 00 e8 fb db 24 80 |......$.......$.|
+ 002bc3a0 f7 d8 00 d3 fb b4 24 80 f8 0b 00 e5 fb c3 24 80 |......$.......$.|
+ 002bc3b0 f7 eb 00 c8 fb 92 24 80 f7 e7 00 ea fb cb 24 80 |......$.......$.|
+ 002bc3c0 f7 fd 00 cb fb 94 24 80 f7 e3 00 f2 fb b8 24 80 |......$.......$.|
+ ...
+
+See ``Documentation/iio/iio_devbuf.rst`` for more information about how buffered
+data is structured.
+
+4. IIO Interfacing Tools
+========================
+
+See ``Documentation/iio/iio_tools.rst`` for the description of the available IIO
+interfacing tools.
diff --git a/Documentation/iio/index.rst b/Documentation/iio/index.rst
index 9cb4c50cb20d..dfcf9618568a 100644
--- a/Documentation/iio/index.rst
+++ b/Documentation/iio/index.rst
@@ -18,8 +18,12 @@ Industrial I/O Kernel Drivers
.. toctree::
:maxdepth: 1
+ ad4000
+ ad4695
+ ad7380
ad7944
adis16475
adis16480
+ adxl380
bno055
ep93xx_adc
diff --git a/Documentation/networking/tproxy.rst b/Documentation/networking/tproxy.rst
index 00dc3a1a66b4..7f7c1ff6f159 100644
--- a/Documentation/networking/tproxy.rst
+++ b/Documentation/networking/tproxy.rst
@@ -17,7 +17,7 @@ The idea is that you identify packets with destination address matching a local
socket on your box, set the packet mark to a certain value::
# iptables -t mangle -N DIVERT
- # iptables -t mangle -A PREROUTING -p tcp -m socket -j DIVERT
+ # iptables -t mangle -A PREROUTING -p tcp -m socket --transparent -j DIVERT
# iptables -t mangle -A DIVERT -j MARK --set-mark 1
# iptables -t mangle -A DIVERT -j ACCEPT
diff --git a/Documentation/rust/general-information.rst b/Documentation/rust/general-information.rst
index e3f388ef4ee4..6146b49b6a98 100644
--- a/Documentation/rust/general-information.rst
+++ b/Documentation/rust/general-information.rst
@@ -15,6 +15,8 @@ but not `std <https://doc.rust-lang.org/std/>`_. Crates for use in the
kernel must opt into this behavior using the ``#![no_std]`` attribute.
+.. _rust_code_documentation:
+
Code documentation
------------------
@@ -22,10 +24,17 @@ Rust kernel code is documented using ``rustdoc``, its built-in documentation
generator.
The generated HTML docs include integrated search, linked items (e.g. types,
-functions, constants), source code, etc. They may be read at (TODO: link when
-in mainline and generated alongside the rest of the documentation):
+functions, constants), source code, etc. They may be read at:
+
+ https://rust.docs.kernel.org
+
+For linux-next, please see:
+
+ https://rust.docs.kernel.org/next/
- http://kernel.org/
+There are also tags for each main release, e.g.:
+
+ https://rust.docs.kernel.org/6.10/
The docs can also be easily generated and read locally. This is quite fast
(same order as compiling the code itself) and no special tools or environment
@@ -75,7 +84,7 @@ should provide as-safe-as-possible abstractions as needed.
.. code-block::
rust/bindings/
- (rust/helpers.c)
+ (rust/helpers/)
include/ -----+ <-+
| |
@@ -112,7 +121,7 @@ output files in the ``rust/bindings/`` directory.
For parts of the C header that ``bindgen`` does not auto generate, e.g. C
``inline`` functions or non-trivial macros, it is acceptable to add a small
-wrapper function to ``rust/helpers.c`` to make it available for the Rust side as
+wrapper function to ``rust/helpers/`` to make it available for the Rust side as
well.
Abstractions
@@ -142,3 +151,11 @@ configuration:
#[cfg(CONFIG_X="y")] // Enabled as a built-in (`y`)
#[cfg(CONFIG_X="m")] // Enabled as a module (`m`)
#[cfg(not(CONFIG_X))] // Disabled
+
+For other predicates that Rust's ``cfg`` does not support, e.g. expressions with
+numerical comparisons, one may define a new Kconfig symbol:
+
+.. code-block:: kconfig
+
+ config RUSTC_VERSION_MIN_107900
+ def_bool y if RUSTC_VERSION >= 107900
diff --git a/Documentation/rust/index.rst b/Documentation/rust/index.rst
index 46d35bd395cf..55dcde9e9e7e 100644
--- a/Documentation/rust/index.rst
+++ b/Documentation/rust/index.rst
@@ -25,13 +25,27 @@ support is still in development/experimental, especially for certain kernel
configurations.
+Code documentation
+------------------
+
+Given a kernel configuration, the kernel may generate Rust code documentation,
+i.e. HTML rendered by the ``rustdoc`` tool.
+
.. only:: rustdoc and html
- You can also browse `rustdoc documentation <rustdoc/kernel/index.html>`_.
+ This kernel documentation was built with `Rust code documentation
+ <rustdoc/kernel/index.html>`_.
.. only:: not rustdoc and html
- This documentation does not include rustdoc generated information.
+ This kernel documentation was not built with Rust code documentation.
+
+A pregenerated version is provided at:
+
+ https://rust.docs.kernel.org
+
+Please see the :ref:`Code documentation <rust_code_documentation>` section for
+more details.
.. toctree::
:maxdepth: 1
diff --git a/Documentation/rust/quick-start.rst b/Documentation/rust/quick-start.rst
index 8e3ad9678719..2d107982c87b 100644
--- a/Documentation/rust/quick-start.rst
+++ b/Documentation/rust/quick-start.rst
@@ -39,8 +39,8 @@ of the box, e.g.::
Debian
******
-Debian Unstable (Sid), outside of the freeze period, provides recent Rust
-releases and thus it should generally work out of the box, e.g.::
+Debian Testing and Debian Unstable (Sid), outside of the freeze period, provide
+recent Rust releases and thus they should generally work out of the box, e.g.::
apt install rustc rust-src bindgen rustfmt rust-clippy
diff --git a/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst b/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst
index f1e9ab18206c..472761938682 100644
--- a/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst
+++ b/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst
@@ -87,6 +87,38 @@ PCH-LPC/PCH-MSI,然后被EIOINTC统一收集,再直接到达CPUINTC::
| Devices |
+---------+
+高级扩展IRQ模型
+===============
+
+在这种模型里面,IPI(Inter-Processor Interrupt)和CPU本地时钟中断直接发送到CPUINTC,
+CPU串口(UARTs)中断发送到LIOINTC,PCH-MSI中断发送到AVECINTC,而后通过AVECINTC直接
+送达CPUINTC,而其他所有设备的中断则分别发送到所连接的PCH-PIC/PCH-LPC,然后由EIOINTC
+统一收集,再直接到达CPUINTC::
+
+ +-----+ +-----------------------+ +-------+
+ | IPI | --> | CPUINTC | <-- | Timer |
+ +-----+ +-----------------------+ +-------+
+ ^ ^ ^
+ | | |
+ +---------+ +----------+ +---------+ +-------+
+ | EIOINTC | | AVECINTC | | LIOINTC | <-- | UARTs |
+ +---------+ +----------+ +---------+ +-------+
+ ^ ^
+ | |
+ +---------+ +---------+
+ | PCH-PIC | | PCH-MSI |
+ +---------+ +---------+
+ ^ ^ ^
+ | | |
+ +---------+ +---------+ +---------+
+ | Devices | | PCH-LPC | | Devices |
+ +---------+ +---------+ +---------+
+ ^
+ |
+ +---------+
+ | Devices |
+ +---------+
+
ACPI相关的定义
==============
diff --git a/Documentation/usb/functionfs-desc.rst b/Documentation/usb/functionfs-desc.rst
new file mode 100644
index 000000000000..39649774da54
--- /dev/null
+++ b/Documentation/usb/functionfs-desc.rst
@@ -0,0 +1,39 @@
+======================
+FunctionFS Descriptors
+======================
+
+Some of the descriptors that can be written to the FFS gadget are
+described below. Device and configuration descriptors are handled
+by the composite gadget and are not written by the user to the
+FFS gadget.
+
+Descriptors are written to the "ep0" file in the FFS gadget
+following the descriptor header.
+
+.. kernel-doc:: include/uapi/linux/usb/functionfs.h
+ :doc: descriptors
+
+Interface Descriptors
+---------------------
+
+Standard USB interface descriptors may be written. The class/subclass of the
+most recent interface descriptor determines what type of class-specific
+descriptors are accepted.
+
+Class-Specific Descriptors
+--------------------------
+
+Class-specific descriptors are accepted only for the class/subclass of the
+most recent interface descriptor. The following are some of the
+class-specific descriptors that are supported.
+
+DFU Functional Descriptor
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When the interface class is USB_CLASS_APP_SPEC and the interface subclass
+is USB_SUBCLASS_DFU, a DFU functional descriptor can be provided.
+The DFU functional descriptor is a described in the USB specification for
+Device Firmware Upgrade (DFU), version 1.1 as of this writing.
+
+.. kernel-doc:: include/uapi/linux/usb/functionfs.h
+ :doc: usb_dfu_functional_descriptor
diff --git a/Documentation/usb/functionfs.rst b/Documentation/usb/functionfs.rst
index d05a775bc45b..f7487b0d8057 100644
--- a/Documentation/usb/functionfs.rst
+++ b/Documentation/usb/functionfs.rst
@@ -25,6 +25,8 @@ interface numbers starting from zero). The FunctionFS changes
them as needed also handling situation when numbers differ in
different configurations.
+For more information about FunctionFS descriptors see :doc:`functionfs-desc`
+
When descriptors and strings are written "ep#" files appear
(one for each declared endpoint) which handle communication on
a single endpoint. Again, FunctionFS takes care of the real
diff --git a/Documentation/usb/gadget-testing.rst b/Documentation/usb/gadget-testing.rst
index b086c7ab72f0..bf555c2270f5 100644
--- a/Documentation/usb/gadget-testing.rst
+++ b/Documentation/usb/gadget-testing.rst
@@ -765,6 +765,17 @@ The uac2 function provides these attributes in its function directory:
req_number the number of pre-allocated request for both capture
and playback
function_name name of the interface
+ if_ctrl_name topology control name
+ clksrc_in_name input clock name
+ clksrc_out_name output clock name
+ p_it_name playback input terminal name
+ p_it_ch_name playback input first channel name
+ p_ot_name playback output terminal name
+ p_fu_vol_name playback function unit name
+ c_it_name capture input terminal name
+ c_it_ch_name capture input first channel name
+ c_ot_name capture output terminal name
+ c_fu_vol_name capture functional unit name
c_terminal_type code of the capture terminal type
p_terminal_type code of the playback terminal type
================ ====================================================
@@ -957,6 +968,14 @@ The uac1 function provides these attributes in its function directory:
req_number the number of pre-allocated requests for both capture
and playback
function_name name of the interface
+ p_it_name playback input terminal name
+ p_it_ch_name playback channels name
+ p_ot_name playback output terminal name
+ p_fu_vol_name playback mute/volume functional unit name
+ c_it_name capture input terminal name
+ c_it_ch_name capture channels name
+ c_ot_name capture output terminal name
+ c_fu_vol_name capture mute/volume functional unit name
================ ====================================================
The attributes have sane default values.
diff --git a/Documentation/usb/index.rst b/Documentation/usb/index.rst
index 27955dad95e1..826492c813ac 100644
--- a/Documentation/usb/index.rst
+++ b/Documentation/usb/index.rst
@@ -11,6 +11,7 @@ USB support
dwc3
ehci
functionfs
+ functionfs-desc
gadget_configfs
gadget_hid
gadget_multi
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index b3be87489108..e32471977d0a 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -4214,7 +4214,9 @@ whether or not KVM_CAP_X86_USER_SPACE_MSR's KVM_MSR_EXIT_REASON_FILTER is
enabled. If KVM_MSR_EXIT_REASON_FILTER is enabled, KVM will exit to userspace
on denied accesses, i.e. userspace effectively intercepts the MSR access. If
KVM_MSR_EXIT_REASON_FILTER is not enabled, KVM will inject a #GP into the guest
-on denied accesses.
+on denied accesses. Note, if an MSR access is denied during emulation of MSR
+load/stores during VMX transitions, KVM ignores KVM_MSR_EXIT_REASON_FILTER.
+See the below warning for full details.
If an MSR access is allowed by userspace, KVM will emulate and/or virtualize
the access in accordance with the vCPU model. Note, KVM may still ultimately
@@ -4229,9 +4231,22 @@ filtering. In that mode, ``KVM_MSR_FILTER_DEFAULT_DENY`` is invalid and causes
an error.
.. warning::
- MSR accesses as part of nested VM-Enter/VM-Exit are not filtered.
- This includes both writes to individual VMCS fields and reads/writes
- through the MSR lists pointed to by the VMCS.
+ MSR accesses that are side effects of instruction execution (emulated or
+ native) are not filtered as hardware does not honor MSR bitmaps outside of
+ RDMSR and WRMSR, and KVM mimics that behavior when emulating instructions
+ to avoid pointless divergence from hardware. E.g. RDPID reads MSR_TSC_AUX,
+ SYSENTER reads the SYSENTER MSRs, etc.
+
+ MSRs that are loaded/stored via dedicated VMCS fields are not filtered as
+ part of VM-Enter/VM-Exit emulation.
+
+ MSRs that are loaded/store via VMX's load/store lists _are_ filtered as part
+ of VM-Enter/VM-Exit emulation. If an MSR access is denied on VM-Enter, KVM
+ synthesizes a consistency check VM-Exit(EXIT_REASON_MSR_LOAD_FAIL). If an
+ MSR access is denied on VM-Exit, KVM synthesizes a VM-Abort. In short, KVM
+ extends Intel's architectural list of MSRs that cannot be loaded/saved via
+ the VM-Enter/VM-Exit MSR list. It is platform owner's responsibility to
+ to communicate any such restrictions to their end users.
x2APIC MSR accesses cannot be filtered (KVM silently ignores filters that
cover any x2APIC MSRs).
@@ -8082,6 +8097,14 @@ KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS By default, KVM emulates MONITOR/MWAIT (if
guest CPUID on writes to MISC_ENABLE if
KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT is
disabled.
+
+KVM_X86_QUIRK_SLOT_ZAP_ALL By default, KVM invalidates all SPTEs in
+ fast way for memslot deletion when VM type
+ is KVM_X86_DEFAULT_VM.
+ When this quirk is disabled or when VM type
+ is other than KVM_X86_DEFAULT_VM, KVM zaps
+ only leaf SPTEs that are within the range of
+ the memslot being deleted.
=================================== ============================================
7.32 KVM_CAP_MAX_VCPU_ID
diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst
index 02880d5552d5..20a9a37d1cdd 100644
--- a/Documentation/virt/kvm/locking.rst
+++ b/Documentation/virt/kvm/locking.rst
@@ -11,6 +11,8 @@ The acquisition orders for mutexes are as follows:
- cpus_read_lock() is taken outside kvm_lock
+- kvm_usage_lock is taken outside cpus_read_lock()
+
- kvm->lock is taken outside vcpu->mutex
- kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock
@@ -24,6 +26,13 @@ The acquisition orders for mutexes are as follows:
are taken on the waiting side when modifying memslots, so MMU notifiers
must not take either kvm->slots_lock or kvm->slots_arch_lock.
+cpus_read_lock() vs kvm_lock:
+
+- Taking cpus_read_lock() outside of kvm_lock is problematic, despite that
+ being the official ordering, as it is quite easy to unknowingly trigger
+ cpus_read_lock() while holding kvm_lock. Use caution when walking vm_list,
+ e.g. avoid complex operations when possible.
+
For SRCU:
- ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections
@@ -227,10 +236,16 @@ time it will be set using the Dirty tracking mechanism described above.
:Type: mutex
:Arch: any
:Protects: - vm_list
- - kvm_usage_count
+
+``kvm_usage_lock``
+^^^^^^^^^^^^^^^^^^
+
+:Type: mutex
+:Arch: any
+:Protects: - kvm_usage_count
- hardware virtualization enable/disable
-:Comment: KVM also disables CPU hotplug via cpus_read_lock() during
- enable/disable.
+:Comment: Exists to allow taking cpus_read_lock() while kvm_usage_count is
+ protected, which simplifies the virtualization enabling logic.
``kvm->mn_invalidate_lock``
^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -290,11 +305,12 @@ time it will be set using the Dirty tracking mechanism described above.
wakeup.
``vendor_module_lock``
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^
:Type: mutex
:Arch: x86
:Protects: loading a vendor module (kvm_amd or kvm_intel)
-:Comment: Exists because using kvm_lock leads to deadlock. cpu_hotplug_lock is
- taken outside of kvm_lock, e.g. in KVM's CPU online/offline callbacks, and
- many operations need to take cpu_hotplug_lock when loading a vendor module,
- e.g. updating static calls.
+:Comment: Exists because using kvm_lock leads to deadlock. kvm_lock is taken
+ in notifiers, e.g. __kvmclock_cpufreq_notifier(), that may be invoked while
+ cpu_hotplug_lock is held, e.g. from cpufreq_boost_trigger_state(), and many
+ operations need to take cpu_hotplug_lock when loading a vendor module, e.g.
+ updating static calls.
diff --git a/Documentation/virt/uml/user_mode_linux_howto_v2.rst b/Documentation/virt/uml/user_mode_linux_howto_v2.rst
index 27942446f406..584000b743f3 100644
--- a/Documentation/virt/uml/user_mode_linux_howto_v2.rst
+++ b/Documentation/virt/uml/user_mode_linux_howto_v2.rst
@@ -217,6 +217,8 @@ remote UML and other VM instances.
+-----------+--------+------------------------------------+------------+
| fd | vector | dependent on fd type | varies |
+-----------+--------+------------------------------------+------------+
+| vde | vector | dep. on VDE VPN: Virt.Net Locator | varies |
++-----------+--------+------------------------------------+------------+
| tuntap | legacy | none | ~ 500Mbit |
+-----------+--------+------------------------------------+------------+
| daemon | legacy | none | ~ 450Mbit |
@@ -573,6 +575,41 @@ https://github.com/NetSys/bess/wiki/Built-In-Modules-and-Ports
BESS transport does not require any special privileges.
+VDE vector transport
+--------------------
+
+Virtual Distributed Ethernet (VDE) is a project whose main goal is to provide a
+highly flexible support for virtual networking.
+
+http://wiki.virtualsquare.org/#/tutorials/vdebasics
+
+Common usages of VDE include fast prototyping and teaching.
+
+Examples:
+
+ ``vecX:transport=vde,vnl=tap://tap0``
+
+use tap0
+
+ ``vecX:transport=vde,vnl=slirp://``
+
+use slirp
+
+ ``vec0:transport=vde,vnl=vde:///tmp/switch``
+
+connect to a vde switch
+
+ ``vecX:transport=\"vde,vnl=cmd://ssh remote.host //tmp/sshlirp\"``
+
+connect to a remote slirp (instant VPN: convert ssh to VPN, it uses sshlirp)
+https://github.com/virtualsquare/sshlirp
+
+ ``vec0:transport=vde,vnl=vxvde://234.0.0.1``
+
+connect to a local area cloud (all the UML nodes using the same
+multicast address running on hosts in the same multicast domain (LAN)
+will be automagically connected together to a virtual LAN.
+
Configuring Legacy transports
=============================
diff --git a/Documentation/watchdog/convert_drivers_to_kernel_api.rst b/Documentation/watchdog/convert_drivers_to_kernel_api.rst
index a1c3f038ce0e..e83609a5d007 100644
--- a/Documentation/watchdog/convert_drivers_to_kernel_api.rst
+++ b/Documentation/watchdog/convert_drivers_to_kernel_api.rst
@@ -75,7 +75,6 @@ Example conversion::
-static const struct file_operations s3c2410wdt_fops = {
- .owner = THIS_MODULE,
- - .llseek = no_llseek,
- .write = s3c2410wdt_write,
- .unlocked_ioctl = s3c2410wdt_ioctl,
- .open = s3c2410wdt_open,
diff --git a/MAINTAINERS b/MAINTAINERS
index 01849d68e4ab..c27f3190737f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -449,6 +449,7 @@ S: Supported
W: https://wiki.analog.com/resources/tools-software/linux-drivers/iio-adc/ad738x
W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml
+F: Documentation/iio/ad7380.rst
F: drivers/iio/adc/ad7380.c
AD7877 TOUCHSCREEN DRIVER
@@ -631,6 +632,17 @@ F: drivers/iio/accel/adxl372.c
F: drivers/iio/accel/adxl372_i2c.c
F: drivers/iio/accel/adxl372_spi.c
+ADXL380 THREE-AXIS DIGITAL ACCELEROMETER DRIVER
+M: Ramona Gradinariu <ramona.gradinariu@analog.com>
+M: Antoniu Miclaus <antoniu.miclaus@analog.com>
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/iio/accel/adi,adxl380.yaml
+F: drivers/iio/accel/adxl380.c
+F: drivers/iio/accel/adxl380.h
+F: drivers/iio/accel/adxl380_i2c.c
+F: drivers/iio/accel/adxl380_spi.c
+
AF8133J THREE-AXIS MAGNETOMETER DRIVER
M: Ondřej Jirman <megi@xff.cz>
S: Maintained
@@ -1242,6 +1254,8 @@ L: linux-iio@vger.kernel.org
S: Supported
W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml
+F: Documentation/iio/ad4000.rst
+F: drivers/iio/adc/ad4000.c
ANALOG DEVICES INC AD4130 DRIVER
M: Cosmin Tanislav <cosmin.tanislav@analog.com>
@@ -1252,6 +1266,18 @@ F: Documentation/ABI/testing/sysfs-bus-iio-adc-ad4130
F: Documentation/devicetree/bindings/iio/adc/adi,ad4130.yaml
F: drivers/iio/adc/ad4130.c
+ANALOG DEVICES INC AD4695 DRIVER
+M: Michael Hennerich <michael.hennerich@analog.com>
+M: Nuno Sá <nuno.sa@analog.com>
+R: David Lechner <dlechner@baylibre.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml
+F: Documentation/iio/ad4695.rst
+F: drivers/iio/adc/ad4695.c
+F: include/dt-bindings/iio/adi,ad4695.h
+
ANALOG DEVICES INC AD7091R DRIVER
M: Marcelo Schmitt <marcelo.schmitt@analog.com>
L: linux-iio@vger.kernel.org
@@ -1318,6 +1344,16 @@ W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml
F: drivers/iio/adc/ad7780.c
+ANALOG DEVICES INC AD9467 DRIVER
+M: Michael Hennerich <Michael.Hennerich@analog.com>
+M: Nuno Sa <nuno.sa@analog.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/ABI/testing/debugfs-iio-ad9467
+F: Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml
+F: drivers/iio/adc/ad9467.c
+
ANALOG DEVICES INC AD9739a DRIVER
M: Nuno Sa <nuno.sa@analog.com>
M: Dragos Bogdan <dragos.bogdan@analog.com>
@@ -2236,6 +2272,7 @@ N: clps711x
ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
M: Hartley Sweeten <hsweeten@visionengravers.com>
M: Alexander Sverdlin <alexander.sverdlin@gmail.com>
+M: Nikita Shubin <nikita.shubin@maquefel.me>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/iio/adc/cirrus,ep9301-adc.yaml
@@ -5691,8 +5728,7 @@ L: linux-cxl@vger.kernel.org
S: Maintained
F: Documentation/driver-api/cxl
F: drivers/cxl/
-F: include/linux/einj-cxl.h
-F: include/linux/cxl-event.h
+F: include/cxl/
F: include/uapi/linux/cxl_mem.h
F: tools/testing/cxl/
@@ -6557,6 +6593,12 @@ F: include/net/devlink.h
F: include/uapi/linux/devlink.h
F: net/devlink/
+DFROBOT SD2405AL RTC DRIVER
+M: Tóth János <gomba007@gmail.com>
+L: linux-rtc@vger.kernel.org
+S: Maintained
+F: drivers/rtc/rtc-sd2405al.c
+
DH ELECTRONICS IMX6 DHCOM/DHCOR BOARD SUPPORT
M: Christoph Niedermaier <cniedermaier@dh-electronics.com>
L: kernel@dh-electronics.com
@@ -8466,6 +8508,7 @@ N: binfmt
EXFAT FILE SYSTEM
M: Namjae Jeon <linkinjeon@kernel.org>
M: Sungjong Seo <sj1557.seo@samsung.com>
+R: Yuezhang Mo <yuezhang.mo@sony.com>
L: linux-fsdevel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linkinjeon/exfat.git
@@ -10970,6 +11013,7 @@ M: Nuno Sa <nuno.sa@analog.com>
R: Olivier Moysan <olivier.moysan@foss.st.com>
L: linux-iio@vger.kernel.org
S: Maintained
+F: Documentation/ABI/testing/debugfs-iio-backend
F: drivers/iio/industrialio-backend.c
F: include/linux/iio/backend.h
@@ -13411,6 +13455,16 @@ S: Maintained
F: Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml
F: drivers/iio/dac/ltc1660.c
+LTC2664 IIO DAC DRIVER
+M: Michael Hennerich <michael.hennerich@analog.com>
+M: Kim Seer Paller <kimseer.paller@analog.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/iio/dac/adi,ltc2664.yaml
+F: Documentation/devicetree/bindings/iio/dac/adi,ltc2672.yaml
+F: drivers/iio/dac/ltc2664.c
+
LTC2688 IIO DAC DRIVER
M: Nuno Sá <nuno.sa@analog.com>
L: linux-iio@vger.kernel.org
@@ -15179,6 +15233,13 @@ F: Documentation/devicetree/bindings/nvmem/microchip,sama7g5-otpc.yaml
F: drivers/nvmem/microchip-otpc.c
F: include/dt-bindings/nvmem/microchip,sama7g5-otpc.h
+MICROCHIP PAC1921 POWER/CURRENT MONITOR DRIVER
+M: Matteo Martelli <matteomartelli3@gmail.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/iio/adc/microchip,pac1921.yaml
+F: drivers/iio/adc/pac1921.c
+
MICROCHIP PAC1934 POWER/ENERGY MONITOR DRIVER
M: Marius Cristea <marius.cristea@microchip.com>
L: linux-iio@vger.kernel.org
@@ -15617,6 +15678,9 @@ F: include/dt-bindings/clock/mobileye,eyeq5-clk.h
MODULE SUPPORT
M: Luis Chamberlain <mcgrof@kernel.org>
+R: Petr Pavlu <petr.pavlu@suse.com>
+R: Sami Tolvanen <samitolvanen@google.com>
+R: Daniel Gomez <da.gomez@samsung.com>
L: linux-modules@vger.kernel.org
L: linux-kernel@vger.kernel.org
S: Maintained
@@ -17255,8 +17319,8 @@ M: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/oa-tc6-framework.rst
-F: drivers/include/linux/oa_tc6.h
F: drivers/net/ethernet/oa_tc6.c
+F: include/linux/oa_tc6.h
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Rob Herring <robh@kernel.org>
@@ -19283,10 +19347,7 @@ F: drivers/char/random.c
F: include/linux/random.h
F: include/uapi/linux/random.h
F: drivers/virt/vmgenid.c
-F: include/vdso/getrandom.h
-F: lib/vdso/getrandom.c
-F: arch/x86/entry/vdso/vgetrandom*
-F: arch/x86/include/asm/vdso/getrandom*
+N: ^.*/vdso/[^/]*getrandom[^/]+$
RAPIDIO SUBSYSTEM
M: Matt Porter <mporter@kernel.crashing.org>
@@ -19983,6 +20044,12 @@ S: Supported
F: drivers/power/supply/bd99954-charger.c
F: drivers/power/supply/bd99954-charger.h
+ROHM BH1745 COLOUR SENSOR
+M: Mudit Sharma <muditsharma.info@gmail.com>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: drivers/iio/light/bh1745.c
+
ROHM BH1750 AMBIENT LIGHT SENSOR DRIVER
M: Tomasz Duszynski <tduszyns@gmail.com>
S: Maintained
@@ -20143,6 +20210,7 @@ R: Björn Roy Baron <bjorn3_gh@protonmail.com>
R: Benno Lossin <benno.lossin@proton.me>
R: Andreas Hindborg <a.hindborg@kernel.org>
R: Alice Ryhl <aliceryhl@google.com>
+R: Trevor Gross <tmgross@umich.edu>
L: rust-for-linux@vger.kernel.org
S: Supported
W: https://rust-for-linux.com
@@ -20835,6 +20903,12 @@ S: Maintained
F: Documentation/devicetree/bindings/iio/chemical/sensirion,scd4x.yaml
F: drivers/iio/chemical/scd4x.c
+SENSIRION SDP500 DIFFERENTIAL PRESSURE SENSOR DRIVER
+M: Petar Stoykov <petar.stoykov@prodrive-technologies.com>
+S: Maintained
+F: Documentation/devicetree/bindings/iio/pressure/sensirion,sdp500.yaml
+F: drivers/iio/pressure/sdp500.c
+
SENSIRION SGP40 GAS SENSOR DRIVER
M: Andreas Klinger <ak@it-klinger.de>
S: Maintained
@@ -23597,7 +23671,8 @@ F: drivers/media/pci/tw686x/
U-BOOT ENVIRONMENT VARIABLES
M: Rafał Miłecki <rafal@milecki.pl>
S: Maintained
-F: Documentation/devicetree/bindings/nvmem/u-boot,env.yaml
+F: Documentation/devicetree/bindings/nvmem/layouts/u-boot,env.yaml
+F: drivers/nvmem/layouts/u-boot-env.c
F: drivers/nvmem/u-boot-env.c
UACCE ACCELERATOR FRAMEWORK
@@ -24456,6 +24531,7 @@ F: include/linux/vdpa.h
F: include/linux/virtio*.h
F: include/linux/vringh.h
F: include/uapi/linux/virtio_*.h
+F: net/vmw_vsock/virtio*
F: tools/virtio/
F: tools/testing/selftests/drivers/net/virtio_net/
diff --git a/Makefile b/Makefile
index dfc7b0753e50..187a4ce2728e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
-PATCHLEVEL = 11
+PATCHLEVEL = 12
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@@ -645,9 +645,11 @@ endif
# The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
# Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
-# CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
-# and from include/config/auto.conf.cmd to detect the compiler upgrade.
+# CC_VERSION_TEXT and RUSTC_VERSION_TEXT are referenced from Kconfig (so they
+# need export), and from include/config/auto.conf.cmd to detect the compiler
+# upgrade.
CC_VERSION_TEXT = $(subst $(pound),,$(shell LC_ALL=C $(CC) --version 2>/dev/null | head -n 1))
+RUSTC_VERSION_TEXT = $(subst $(pound),,$(shell $(RUSTC) --version 2>/dev/null))
ifneq ($(findstring clang,$(CC_VERSION_TEXT)),)
include $(srctree)/scripts/Makefile.clang
@@ -668,7 +670,7 @@ ifdef config-build
# KBUILD_DEFCONFIG may point out an alternative default configuration
# used for 'make defconfig'
include $(srctree)/arch/$(SRCARCH)/Makefile
-export KBUILD_DEFCONFIG KBUILD_KCONFIG CC_VERSION_TEXT
+export KBUILD_DEFCONFIG KBUILD_KCONFIG CC_VERSION_TEXT RUSTC_VERSION_TEXT
config: outputmakefile scripts_basic FORCE
$(Q)$(MAKE) $(build)=scripts/kconfig $@
@@ -924,6 +926,7 @@ ifdef CONFIG_SHADOW_CALL_STACK
ifndef CONFIG_DYNAMIC_SCS
CC_FLAGS_SCS := -fsanitize=shadow-call-stack
KBUILD_CFLAGS += $(CC_FLAGS_SCS)
+KBUILD_RUSTFLAGS += -Zsanitizer=shadow-call-stack
endif
export CC_FLAGS_SCS
endif
@@ -948,6 +951,16 @@ endif
ifdef CONFIG_CFI_CLANG
CC_FLAGS_CFI := -fsanitize=kcfi
+ifdef CONFIG_CFI_ICALL_NORMALIZE_INTEGERS
+ CC_FLAGS_CFI += -fsanitize-cfi-icall-experimental-normalize-integers
+endif
+ifdef CONFIG_RUST
+ # Always pass -Zsanitizer-cfi-normalize-integers as CONFIG_RUST selects
+ # CONFIG_CFI_ICALL_NORMALIZE_INTEGERS.
+ RUSTC_FLAGS_CFI := -Zsanitizer=kcfi -Zsanitizer-cfi-normalize-integers
+ KBUILD_RUSTFLAGS += $(RUSTC_FLAGS_CFI)
+ export RUSTC_FLAGS_CFI
+endif
KBUILD_CFLAGS += $(CC_FLAGS_CFI)
export CC_FLAGS_CFI
endif
diff --git a/arch/Kconfig b/arch/Kconfig
index 405c85ab86f2..98157b38f5cf 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -835,6 +835,22 @@ config CFI_CLANG
https://clang.llvm.org/docs/ControlFlowIntegrity.html
+config CFI_ICALL_NORMALIZE_INTEGERS
+ bool "Normalize CFI tags for integers"
+ depends on CFI_CLANG
+ depends on $(cc-option,-fsanitize=kcfi -fsanitize-cfi-icall-experimental-normalize-integers)
+ help
+ This option normalizes the CFI tags for integer types so that all
+ integer types of the same size and signedness receive the same CFI
+ tag.
+
+ The option is separate from CONFIG_RUST because it affects the ABI.
+ When working with build systems that care about the ABI, it is
+ convenient to be able to turn on this flag first, before Rust is
+ turned on.
+
+ This option is necessary for using CFI with Rust. If unsure, say N.
+
config CFI_PERMISSIVE
bool "Use CFI in permissive mode"
depends on CFI_CLANG
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 91d4a4d9258c..ae1b96479d0c 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -3,17 +3,232 @@
#define _ALPHA_CMPXCHG_H
/*
- * Atomic exchange routines.
+ * Atomic exchange.
+ * Since it can be used to implement critical sections
+ * it must clobber "memory" (also for interrupts in UP).
*/
-#define ____xchg(type, args...) __arch_xchg ## type ## _local(args)
-#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
-#include <asm/xchg.h>
+static inline unsigned long
+____xchg_u8(volatile char *m, unsigned long val)
+{
+ unsigned long ret, tmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %4,7,%3\n"
+ " insbl %1,%4,%1\n"
+ "1: ldq_l %2,0(%3)\n"
+ " extbl %2,%4,%0\n"
+ " mskbl %2,%4,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%3)\n"
+ " beq %2,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+ : "r" ((long)m), "1" (val) : "memory");
+
+ return ret;
+}
+
+static inline unsigned long
+____xchg_u16(volatile short *m, unsigned long val)
+{
+ unsigned long ret, tmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %4,7,%3\n"
+ " inswl %1,%4,%1\n"
+ "1: ldq_l %2,0(%3)\n"
+ " extwl %2,%4,%0\n"
+ " mskwl %2,%4,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%3)\n"
+ " beq %2,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+ : "r" ((long)m), "1" (val) : "memory");
+
+ return ret;
+}
+
+static inline unsigned long
+____xchg_u32(volatile int *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%4\n"
+ " bis $31,%3,%1\n"
+ " stl_c %1,%2\n"
+ " beq %1,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (val), "=&r" (dummy), "=m" (*m)
+ : "rI" (val), "m" (*m) : "memory");
+
+ return val;
+}
+
+static inline unsigned long
+____xchg_u64(volatile long *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ "1: ldq_l %0,%4\n"
+ " bis $31,%3,%1\n"
+ " stq_c %1,%2\n"
+ " beq %1,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (val), "=&r" (dummy), "=m" (*m)
+ : "rI" (val), "m" (*m) : "memory");
+
+ return val;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid xchg(). */
+extern void __xchg_called_with_bad_pointer(void);
+
+static __always_inline unsigned long
+____xchg(volatile void *ptr, unsigned long x, int size)
+{
+ return
+ size == 1 ? ____xchg_u8(ptr, x) :
+ size == 2 ? ____xchg_u16(ptr, x) :
+ size == 4 ? ____xchg_u32(ptr, x) :
+ size == 8 ? ____xchg_u64(ptr, x) :
+ (__xchg_called_with_bad_pointer(), x);
+}
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+static inline unsigned long
+____cmpxchg_u8(volatile char *m, unsigned char old, unsigned char new)
+{
+ unsigned long prev, tmp, cmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %5,7,%4\n"
+ " insbl %1,%5,%1\n"
+ "1: ldq_l %2,0(%4)\n"
+ " extbl %2,%5,%0\n"
+ " cmpeq %0,%6,%3\n"
+ " beq %3,2f\n"
+ " mskbl %2,%5,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%4)\n"
+ " beq %2,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+ : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+ return prev;
+}
+
+static inline unsigned long
+____cmpxchg_u16(volatile short *m, unsigned short old, unsigned short new)
+{
+ unsigned long prev, tmp, cmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %5,7,%4\n"
+ " inswl %1,%5,%1\n"
+ "1: ldq_l %2,0(%4)\n"
+ " extwl %2,%5,%0\n"
+ " cmpeq %0,%6,%3\n"
+ " beq %3,2f\n"
+ " mskwl %2,%5,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%4)\n"
+ " beq %2,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+ : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+ return prev;
+}
+
+static inline unsigned long
+____cmpxchg_u32(volatile int *m, int old, int new)
+{
+ unsigned long prev, cmp;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%5\n"
+ " cmpeq %0,%3,%1\n"
+ " beq %1,2f\n"
+ " mov %4,%1\n"
+ " stl_c %1,%2\n"
+ " beq %1,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r"(prev), "=&r"(cmp), "=m"(*m)
+ : "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+ return prev;
+}
+
+static inline unsigned long
+____cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
+{
+ unsigned long prev, cmp;
+
+ __asm__ __volatile__(
+ "1: ldq_l %0,%5\n"
+ " cmpeq %0,%3,%1\n"
+ " beq %1,2f\n"
+ " mov %4,%1\n"
+ " stq_c %1,%2\n"
+ " beq %1,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r"(prev), "=&r"(cmp), "=m"(*m)
+ : "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+ return prev;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __always_inline unsigned long
+____cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
+ int size)
+{
+ return
+ size == 1 ? ____cmpxchg_u8(ptr, old, new) :
+ size == 2 ? ____cmpxchg_u16(ptr, old, new) :
+ size == 4 ? ____cmpxchg_u32(ptr, old, new) :
+ size == 8 ? ____cmpxchg_u64(ptr, old, new) :
+ (__cmpxchg_called_with_bad_pointer(), old);
+}
#define xchg_local(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __arch_xchg_local((ptr), (unsigned long)_x_,\
+ (__typeof__(*(ptr))) ____xchg((ptr), (unsigned long)_x_, \
sizeof(*(ptr))); \
})
@@ -21,7 +236,7 @@
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
+ (__typeof__(*(ptr))) ____cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, \
sizeof(*(ptr))); \
})
@@ -32,12 +247,6 @@
cmpxchg_local((ptr), (o), (n)); \
})
-#undef ____xchg
-#undef ____cmpxchg
-#define ____xchg(type, args...) __arch_xchg ##type(args)
-#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
-#include <asm/xchg.h>
-
/*
* The leading and the trailing memory barriers guarantee that these
* operations are fully ordered.
@@ -48,7 +257,7 @@
__typeof__(*(ptr)) _x_ = (x); \
smp_mb(); \
__ret = (__typeof__(*(ptr))) \
- __arch_xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
+ ____xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
smp_mb(); \
__ret; \
})
@@ -59,7 +268,7 @@
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
smp_mb(); \
- __ret = (__typeof__(*(ptr))) __cmpxchg((ptr), \
+ __ret = (__typeof__(*(ptr))) ____cmpxchg((ptr), \
(unsigned long)_o_, (unsigned long)_n_, sizeof(*(ptr)));\
smp_mb(); \
__ret; \
@@ -71,6 +280,4 @@
arch_cmpxchg((ptr), (o), (n)); \
})
-#undef ____cmpxchg
-
#endif /* _ALPHA_CMPXCHG_H */
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
deleted file mode 100644
index 7adb80c6746a..000000000000
--- a/arch/alpha/include/asm/xchg.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ALPHA_CMPXCHG_H
-#error Do not include xchg.h directly!
-#else
-/*
- * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code
- * except that local version do not have the expensive memory barrier.
- * So this file is included twice from asm/cmpxchg.h.
- */
-
-/*
- * Atomic exchange.
- * Since it can be used to implement critical sections
- * it must clobber "memory" (also for interrupts in UP).
- */
-
-static inline unsigned long
-____xchg(_u8, volatile char *m, unsigned long val)
-{
- unsigned long ret, tmp, addr64;
-
- __asm__ __volatile__(
- " andnot %4,7,%3\n"
- " insbl %1,%4,%1\n"
- "1: ldq_l %2,0(%3)\n"
- " extbl %2,%4,%0\n"
- " mskbl %2,%4,%2\n"
- " or %1,%2,%2\n"
- " stq_c %2,0(%3)\n"
- " beq %2,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
- : "r" ((long)m), "1" (val) : "memory");
-
- return ret;
-}
-
-static inline unsigned long
-____xchg(_u16, volatile short *m, unsigned long val)
-{
- unsigned long ret, tmp, addr64;
-
- __asm__ __volatile__(
- " andnot %4,7,%3\n"
- " inswl %1,%4,%1\n"
- "1: ldq_l %2,0(%3)\n"
- " extwl %2,%4,%0\n"
- " mskwl %2,%4,%2\n"
- " or %1,%2,%2\n"
- " stq_c %2,0(%3)\n"
- " beq %2,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
- : "r" ((long)m), "1" (val) : "memory");
-
- return ret;
-}
-
-static inline unsigned long
-____xchg(_u32, volatile int *m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- "1: ldl_l %0,%4\n"
- " bis $31,%3,%1\n"
- " stl_c %1,%2\n"
- " beq %1,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=&r" (val), "=&r" (dummy), "=m" (*m)
- : "rI" (val), "m" (*m) : "memory");
-
- return val;
-}
-
-static inline unsigned long
-____xchg(_u64, volatile long *m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- "1: ldq_l %0,%4\n"
- " bis $31,%3,%1\n"
- " stq_c %1,%2\n"
- " beq %1,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=&r" (val), "=&r" (dummy), "=m" (*m)
- : "rI" (val), "m" (*m) : "memory");
-
- return val;
-}
-
-/* This function doesn't exist, so you'll get a linker error
- if something tries to do an invalid xchg(). */
-extern void __xchg_called_with_bad_pointer(void);
-
-static __always_inline unsigned long
-____xchg(, volatile void *ptr, unsigned long x, int size)
-{
- switch (size) {
- case 1:
- return ____xchg(_u8, ptr, x);
- case 2:
- return ____xchg(_u16, ptr, x);
- case 4:
- return ____xchg(_u32, ptr, x);
- case 8:
- return ____xchg(_u64, ptr, x);
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-static inline unsigned long
-____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
-{
- unsigned long prev, tmp, cmp, addr64;
-
- __asm__ __volatile__(
- " andnot %5,7,%4\n"
- " insbl %1,%5,%1\n"
- "1: ldq_l %2,0(%4)\n"
- " extbl %2,%5,%0\n"
- " cmpeq %0,%6,%3\n"
- " beq %3,2f\n"
- " mskbl %2,%5,%2\n"
- " or %1,%2,%2\n"
- " stq_c %2,0(%4)\n"
- " beq %2,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
- : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
-
- return prev;
-}
-
-static inline unsigned long
-____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
-{
- unsigned long prev, tmp, cmp, addr64;
-
- __asm__ __volatile__(
- " andnot %5,7,%4\n"
- " inswl %1,%5,%1\n"
- "1: ldq_l %2,0(%4)\n"
- " extwl %2,%5,%0\n"
- " cmpeq %0,%6,%3\n"
- " beq %3,2f\n"
- " mskwl %2,%5,%2\n"
- " or %1,%2,%2\n"
- " stq_c %2,0(%4)\n"
- " beq %2,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
- : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
-
- return prev;
-}
-
-static inline unsigned long
-____cmpxchg(_u32, volatile int *m, int old, int new)
-{
- unsigned long prev, cmp;
-
- __asm__ __volatile__(
- "1: ldl_l %0,%5\n"
- " cmpeq %0,%3,%1\n"
- " beq %1,2f\n"
- " mov %4,%1\n"
- " stl_c %1,%2\n"
- " beq %1,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r"(prev), "=&r"(cmp), "=m"(*m)
- : "r"((long) old), "r"(new), "m"(*m) : "memory");
-
- return prev;
-}
-
-static inline unsigned long
-____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
-{
- unsigned long prev, cmp;
-
- __asm__ __volatile__(
- "1: ldq_l %0,%5\n"
- " cmpeq %0,%3,%1\n"
- " beq %1,2f\n"
- " mov %4,%1\n"
- " stq_c %1,%2\n"
- " beq %1,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r"(prev), "=&r"(cmp), "=m"(*m)
- : "r"((long) old), "r"(new), "m"(*m) : "memory");
-
- return prev;
-}
-
-/* This function doesn't exist, so you'll get a linker error
- if something tries to do an invalid cmpxchg(). */
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-static __always_inline unsigned long
-____cmpxchg(, volatile void *ptr, unsigned long old, unsigned long new,
- int size)
-{
- switch (size) {
- case 1:
- return ____cmpxchg(_u8, ptr, old, new);
- case 2:
- return ____cmpxchg(_u16, ptr, old, new);
- case 4:
- return ____cmpxchg(_u32, ptr, old, new);
- case 8:
- return ____cmpxchg(_u64, ptr, old, new);
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-#endif
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 71afdd98ddf2..aafebf145738 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -183,7 +183,6 @@ machine-$(CONFIG_ARCH_CLPS711X) += clps711x
machine-$(CONFIG_ARCH_DAVINCI) += davinci
machine-$(CONFIG_ARCH_DIGICOLOR) += digicolor
machine-$(CONFIG_ARCH_DOVE) += dove
-machine-$(CONFIG_ARCH_EP93XX) += ep93xx
machine-$(CONFIG_ARCH_EXYNOS) += exynos
machine-$(CONFIG_ARCH_FOOTBRIDGE) += footbridge
machine-$(CONFIG_ARCH_GEMINI) += gemini
diff --git a/arch/arm/boot/dts/cirrus/Makefile b/arch/arm/boot/dts/cirrus/Makefile
index e944d3e2129d..e6015983e464 100644
--- a/arch/arm/boot/dts/cirrus/Makefile
+++ b/arch/arm/boot/dts/cirrus/Makefile
@@ -3,3 +3,7 @@ dtb-$(CONFIG_ARCH_CLPS711X) += \
ep7211-edb7211.dtb
dtb-$(CONFIG_ARCH_CLPS711X) += \
ep7211-edb7211.dtb
+dtb-$(CONFIG_ARCH_EP93XX) += \
+ ep93xx-edb9302.dtb \
+ ep93xx-bk3.dtb \
+ ep93xx-ts7250.dtb
diff --git a/arch/arm/boot/dts/cirrus/ep93xx-bk3.dts b/arch/arm/boot/dts/cirrus/ep93xx-bk3.dts
new file mode 100644
index 000000000000..40bc9b2a6ba8
--- /dev/null
+++ b/arch/arm/boot/dts/cirrus/ep93xx-bk3.dts
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree file for Liebherr controller BK3.1 based on Cirrus EP9302 SoC
+ */
+/dts-v1/;
+#include "ep93xx.dtsi"
+
+/ {
+ model = "Liebherr controller BK3.1";
+ compatible = "liebherr,bk3", "cirrus,ep9301";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ chosen {
+ };
+
+ memory@0 {
+ device_type = "memory";
+ /* should be set from ATAGS */
+ reg = <0x00000000 0x02000000>,
+ <0x000530c0 0x01fdd000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led-0 {
+ label = "grled";
+ gpios = <&gpio4 0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ function = LED_FUNCTION_HEARTBEAT;
+ };
+
+ led-1 {
+ label = "rdled";
+ gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
+ function = LED_FUNCTION_FAULT;
+ };
+ };
+};
+
+&ebi {
+ nand-controller@60000000 {
+ compatible = "technologic,ts7200-nand";
+ reg = <0x60000000 0x8000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nand@0 {
+ reg = <0>;
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "System";
+ reg = <0x00000000 0x01e00000>;
+ read-only;
+ };
+
+ partition@1e00000 {
+ label = "Data";
+ reg = <0x01e00000 0x05f20000>;
+ };
+
+ partition@7d20000 {
+ label = "RedBoot";
+ reg = <0x07d20000 0x002e0000>;
+ read-only;
+ };
+ };
+ };
+ };
+};
+
+&eth0 {
+ phy-handle = <&phy0>;
+};
+
+&i2s {
+ dmas = <&dma0 0 1>, <&dma0 0 2>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s_on_ac97_pins>;
+ status = "okay";
+};
+
+&gpio1 {
+ /* PWM */
+ gpio-ranges = <&syscon 6 163 1>;
+};
+
+&gpio4 {
+ gpio-ranges = <&syscon 0 97 2>;
+ status = "okay";
+};
+
+&gpio6 {
+ gpio-ranges = <&syscon 0 87 2>;
+ status = "okay";
+};
+
+&gpio7 {
+ gpio-ranges = <&syscon 2 199 4>;
+ status = "okay";
+};
+
+&mdio0 {
+ phy0: ethernet-phy@1 {
+ reg = <1>;
+ device_type = "ethernet-phy";
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/cirrus/ep93xx-edb9302.dts b/arch/arm/boot/dts/cirrus/ep93xx-edb9302.dts
new file mode 100644
index 000000000000..312b2be1c638
--- /dev/null
+++ b/arch/arm/boot/dts/cirrus/ep93xx-edb9302.dts
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+/*
+ * Device Tree file for Cirrus Logic EDB9302 board based on EP9302 SoC
+ */
+/dts-v1/;
+#include "ep93xx.dtsi"
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cirrus,edb9302", "cirrus,ep9301";
+ model = "cirrus,edb9302";
+
+ chosen {
+ };
+
+ memory@0 {
+ device_type = "memory";
+ /* should be set from ATAGS */
+ reg = <0x0000000 0x800000>,
+ <0x1000000 0x800000>,
+ <0x4000000 0x800000>,
+ <0x5000000 0x800000>;
+ };
+
+ sound {
+ compatible = "audio-graph-card2";
+ label = "EDB93XX";
+ links = <&i2s_port>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led-0 {
+ label = "grled";
+ gpios = <&gpio4 0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ function = LED_FUNCTION_HEARTBEAT;
+ };
+
+ led-1 {
+ label = "rdled";
+ gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
+ function = LED_FUNCTION_FAULT;
+ };
+ };
+};
+
+&adc {
+ status = "okay";
+};
+
+&ebi {
+ flash@60000000 {
+ compatible = "cfi-flash";
+ reg = <0x60000000 0x1000000>;
+ bank-width = <2>;
+ };
+};
+
+&eth0 {
+ phy-handle = <&phy0>;
+};
+
+&gpio0 {
+ gpio-ranges = <&syscon 0 153 1>,
+ <&syscon 1 152 1>,
+ <&syscon 2 151 1>,
+ <&syscon 3 148 1>,
+ <&syscon 4 147 1>,
+ <&syscon 5 146 1>,
+ <&syscon 6 145 1>,
+ <&syscon 7 144 1>;
+};
+
+&gpio1 {
+ gpio-ranges = <&syscon 0 143 1>,
+ <&syscon 1 142 1>,
+ <&syscon 2 141 1>,
+ <&syscon 3 140 1>,
+ <&syscon 4 165 1>,
+ <&syscon 5 164 1>,
+ <&syscon 6 163 1>,
+ <&syscon 7 160 1>;
+};
+
+&gpio2 {
+ gpio-ranges = <&syscon 0 115 1>;
+};
+
+/* edb9302 doesn't have GPIO Port D present */
+&gpio3 {
+ status = "disabled";
+};
+
+&gpio4 {
+ gpio-ranges = <&syscon 0 97 2>;
+};
+
+&gpio5 {
+ gpio-ranges = <&syscon 1 170 1>,
+ <&syscon 2 169 1>,
+ <&syscon 3 168 1>;
+};
+
+&gpio6 {
+ gpio-ranges = <&syscon 0 87 2>;
+};
+
+&gpio7 {
+ gpio-ranges = <&syscon 2 199 4>;
+};
+
+&i2s {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s_on_ac97_pins>;
+ status = "okay";
+ i2s_port: port {
+ i2s_ep: endpoint {
+ system-clock-direction-out;
+ frame-master;
+ bitclock-master;
+ mclk-fs = <256>;
+ dai-format = "i2s";
+ convert-channels = <2>;
+ convert-sample-format = "s32_le";
+ remote-endpoint = <&codec_ep>;
+ };
+ };
+};
+
+&mdio0 {
+ phy0: ethernet-phy@1 {
+ reg = <1>;
+ device_type = "ethernet-phy";
+ };
+};
+
+&spi0 {
+ cs-gpios = <&gpio0 6 GPIO_ACTIVE_LOW
+ &gpio0 7 GPIO_ACTIVE_LOW>;
+ dmas = <&dma1 10 2>, <&dma1 10 1>;
+ dma-names = "rx", "tx";
+ status = "okay";
+
+ cs4271: codec@0 {
+ compatible = "cirrus,cs4271";
+ reg = <0>;
+ #sound-dai-cells = <0>;
+ spi-max-frequency = <6000000>;
+ spi-cpol;
+ spi-cpha;
+ reset-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
+ port {
+ codec_ep: endpoint {
+ remote-endpoint = <&i2s_ep>;
+ };
+ };
+ };
+
+ at25f1024: eeprom@1 {
+ compatible = "atmel,at25";
+ reg = <1>;
+ address-width = <8>;
+ size = <0x20000>;
+ pagesize = <256>;
+ spi-max-frequency = <20000000>;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/cirrus/ep93xx-ts7250.dts b/arch/arm/boot/dts/cirrus/ep93xx-ts7250.dts
new file mode 100644
index 000000000000..9e03f93d9fc8
--- /dev/null
+++ b/arch/arm/boot/dts/cirrus/ep93xx-ts7250.dts
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree file for Technologic Systems ts7250 board based on Cirrus EP9302 SoC
+ */
+/dts-v1/;
+#include "ep93xx.dtsi"
+
+/ {
+ compatible = "technologic,ts7250", "cirrus,ep9301";
+ model = "TS-7250 SBC";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ chosen {
+ };
+
+ memory@0 {
+ device_type = "memory";
+ /* should be set from ATAGS */
+ reg = <0x00000000 0x02000000>,
+ <0x000530c0 0x01fdd000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led-0 {
+ label = "grled";
+ gpios = <&gpio4 0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ function = LED_FUNCTION_HEARTBEAT;
+ };
+
+ led-1 {
+ label = "rdled";
+ gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
+ function = LED_FUNCTION_FAULT;
+ };
+ };
+};
+
+&ebi {
+ nand-controller@60000000 {
+ compatible = "technologic,ts7200-nand";
+ reg = <0x60000000 0x8000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nand@0 {
+ reg = <0>;
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "TS-BOOTROM";
+ reg = <0x00000000 0x00020000>;
+ read-only;
+ };
+
+ partition@20000 {
+ label = "Linux";
+ reg = <0x00020000 0x07d00000>;
+ };
+
+ partition@7d20000 {
+ label = "RedBoot";
+ reg = <0x07d20000 0x002e0000>;
+ read-only;
+ };
+ };
+ };
+ };
+
+ rtc@10800000 {
+ compatible = "st,m48t86";
+ reg = <0x10800000 0x1>,
+ <0x11700000 0x1>;
+ };
+
+ watchdog@23800000 {
+ compatible = "technologic,ts7200-wdt";
+ reg = <0x23800000 0x01>,
+ <0x23c00000 0x01>;
+ timeout-sec = <30>;
+ };
+};
+
+&eth0 {
+ phy-handle = <&phy0>;
+};
+
+&gpio1 {
+ /* PWM */
+ gpio-ranges = <&syscon 6 163 1>;
+};
+
+/* ts7250 doesn't have GPIO Port D present */
+&gpio3 {
+ status = "disabled";
+};
+
+&gpio4 {
+ gpio-ranges = <&syscon 0 97 2>;
+};
+
+&gpio6 {
+ gpio-ranges = <&syscon 0 87 2>;
+};
+
+&gpio7 {
+ gpio-ranges = <&syscon 2 199 4>;
+};
+
+&spi0 {
+ cs-gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>;
+ dmas = <&dma1 10 2>, <&dma1 10 1>;
+ dma-names = "rx", "tx";
+ status = "okay";
+
+ tmp122: temperature-sensor@0 {
+ compatible = "ti,tmp122";
+ reg = <0>;
+ spi-max-frequency = <2000000>;
+ };
+};
+
+&mdio0 {
+ phy0: ethernet-phy@1 {
+ reg = <1>;
+ device_type = "ethernet-phy";
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/cirrus/ep93xx.dtsi b/arch/arm/boot/dts/cirrus/ep93xx.dtsi
new file mode 100644
index 000000000000..0dd1eee346ca
--- /dev/null
+++ b/arch/arm/boot/dts/cirrus/ep93xx.dtsi
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree file for Cirrus Logic systems EP93XX SoC
+ */
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/clock/cirrus,ep9301-syscon.h>
+/ {
+ soc: soc {
+ compatible = "simple-bus";
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ syscon: syscon@80930000 {
+ compatible = "cirrus,ep9301-syscon", "syscon";
+ reg = <0x80930000 0x1000>;
+
+ #clock-cells = <1>;
+ clocks = <&xtali>;
+
+ spi_default_pins: pins-spi {
+ function = "spi";
+ groups = "ssp";
+ };
+
+ ac97_default_pins: pins-ac97 {
+ function = "ac97";
+ groups = "ac97";
+ };
+
+ i2s_on_ssp_pins: pins-i2sonssp {
+ function = "i2s";
+ groups = "i2s_on_ssp";
+ };
+
+ i2s_on_ac97_pins: pins-i2sonac97 {
+ function = "i2s";
+ groups = "i2s_on_ac97";
+ };
+
+ gpio1_default_pins: pins-gpio1 {
+ function = "gpio";
+ groups = "gpio1agrp";
+ };
+
+ pwm1_default_pins: pins-pwm1 {
+ function = "pwm";
+ groups = "pwm1";
+ };
+
+ gpio2_default_pins: pins-gpio2 {
+ function = "gpio";
+ groups = "gpio2agrp";
+ };
+
+ gpio3_default_pins: pins-gpio3 {
+ function = "gpio";
+ groups = "gpio3agrp";
+ };
+
+ keypad_default_pins: pins-keypad {
+ function = "keypad";
+ groups = "keypadgrp";
+ };
+
+ gpio4_default_pins: pins-gpio4 {
+ function = "gpio";
+ groups = "gpio4agrp";
+ };
+
+ gpio6_default_pins: pins-gpio6 {
+ function = "gpio";
+ groups = "gpio6agrp";
+ };
+
+ gpio7_default_pins: pins-gpio7 {
+ function = "gpio";
+ groups = "gpio7agrp";
+ };
+
+ ide_default_pins: pins-ide {
+ function = "pata";
+ groups = "idegrp";
+ };
+
+ lcd_on_dram0_pins: pins-rasteronsdram0 {
+ function = "lcd";
+ groups = "rasteronsdram0grp";
+ };
+
+ lcd_on_dram3_pins: pins-rasteronsdram3 {
+ function = "lcd";
+ groups = "rasteronsdram3grp";
+ };
+ };
+
+ adc: adc@80900000 {
+ compatible = "cirrus,ep9301-adc";
+ reg = <0x80900000 0x28>;
+ clocks = <&syscon EP93XX_CLK_ADC>;
+ interrupt-parent = <&vic0>;
+ interrupts = <30>;
+ status = "disabled";
+ };
+
+ /*
+ * The EP93XX expansion bus is a set of up to 7 each up to 16MB
+ * windows in the 256MB space from 0x50000000 to 0x5fffffff.
+ * But since we don't require to setup it in any way, we can
+ * represent it as a simple-bus.
+ */
+ ebi: bus@80080000 {
+ compatible = "simple-bus";
+ reg = <0x80080000 0x20>;
+ native-endian;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ };
+
+ dma0: dma-controller@80000000 {
+ compatible = "cirrus,ep9301-dma-m2p";
+ reg = <0x80000000 0x0040>,
+ <0x80000040 0x0040>,
+ <0x80000080 0x0040>,
+ <0x800000c0 0x0040>,
+ <0x80000240 0x0040>,
+ <0x80000200 0x0040>,
+ <0x800002c0 0x0040>,
+ <0x80000280 0x0040>,
+ <0x80000340 0x0040>,
+ <0x80000300 0x0040>;
+ clocks = <&syscon EP93XX_CLK_M2P0>,
+ <&syscon EP93XX_CLK_M2P1>,
+ <&syscon EP93XX_CLK_M2P2>,
+ <&syscon EP93XX_CLK_M2P3>,
+ <&syscon EP93XX_CLK_M2P4>,
+ <&syscon EP93XX_CLK_M2P5>,
+ <&syscon EP93XX_CLK_M2P6>,
+ <&syscon EP93XX_CLK_M2P7>,
+ <&syscon EP93XX_CLK_M2P8>,
+ <&syscon EP93XX_CLK_M2P9>;
+ clock-names = "m2p0", "m2p1",
+ "m2p2", "m2p3",
+ "m2p4", "m2p5",
+ "m2p6", "m2p7",
+ "m2p8", "m2p9";
+ interrupt-parent = <&vic0>;
+ interrupts = <7>, <8>, <9>, <10>, <11>,
+ <12>, <13>, <14>, <15>, <16>;
+ #dma-cells = <2>;
+ };
+
+ dma1: dma-controller@80000100 {
+ compatible = "cirrus,ep9301-dma-m2m";
+ reg = <0x80000100 0x0040>,
+ <0x80000140 0x0040>;
+ clocks = <&syscon EP93XX_CLK_M2M0>,
+ <&syscon EP93XX_CLK_M2M1>;
+ clock-names = "m2m0", "m2m1";
+ interrupt-parent = <&vic0>;
+ interrupts = <17>, <18>;
+ #dma-cells = <2>;
+ };
+
+ eth0: ethernet@80010000 {
+ compatible = "cirrus,ep9301-eth";
+ reg = <0x80010000 0x10000>;
+ interrupt-parent = <&vic1>;
+ interrupts = <7>;
+ mdio0: mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ gpio0: gpio@80840000 {
+ compatible = "cirrus,ep9301-gpio";
+ reg = <0x80840000 0x04>,
+ <0x80840010 0x04>,
+ <0x80840090 0x1c>;
+ reg-names = "data", "dir", "intr";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&vic1>;
+ interrupts = <27>;
+ };
+
+ gpio1: gpio@80840004 {
+ compatible = "cirrus,ep9301-gpio";
+ reg = <0x80840004 0x04>,
+ <0x80840014 0x04>,
+ <0x808400ac 0x1c>;
+ reg-names = "data", "dir", "intr";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&vic1>;
+ interrupts = <27>;
+ };
+
+ gpio2: gpio@80840008 {
+ compatible = "cirrus,ep9301-gpio";
+ reg = <0x80840008 0x04>,
+ <0x80840018 0x04>;
+ reg-names = "data", "dir";
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio2_default_pins>;
+ };
+
+ gpio3: gpio@8084000c {
+ compatible = "cirrus,ep9301-gpio";
+ reg = <0x8084000c 0x04>,
+ <0x8084001c 0x04>;
+ reg-names = "data", "dir";
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio3_default_pins>;
+ };
+
+ gpio4: gpio@80840020 {
+ compatible = "cirrus,ep9301-gpio";
+ reg = <0x80840020 0x04>,
+ <0x80840024 0x04>;
+ reg-names = "data", "dir";
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio4_default_pins>;
+ };
+
+ gpio5: gpio@80840030 {
+ compatible = "cirrus,ep9301-gpio";
+ reg = <0x80840030 0x04>,
+ <0x80840034 0x04>,
+ <0x8084004c 0x1c>;
+ reg-names = "data", "dir", "intr";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts-extended = <&vic0 19>, <&vic0 20>,
+ <&vic0 21>, <&vic0 22>,
+ <&vic1 15>, <&vic1 16>,
+ <&vic1 17>, <&vic1 18>;
+ };
+
+ gpio6: gpio@80840038 {
+ compatible = "cirrus,ep9301-gpio";
+ reg = <0x80840038 0x04>,
+ <0x8084003c 0x04>;
+ reg-names = "data", "dir";
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio6_default_pins>;
+ };
+
+ gpio7: gpio@80840040 {
+ compatible = "cirrus,ep9301-gpio";
+ reg = <0x80840040 0x04>,
+ <0x80840044 0x04>;
+ reg-names = "data", "dir";
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio7_default_pins>;
+ };
+
+ i2s: i2s@80820000 {
+ compatible = "cirrus,ep9301-i2s";
+ reg = <0x80820000 0x100>;
+ #sound-dai-cells = <0>;
+ interrupt-parent = <&vic1>;
+ interrupts = <28>;
+ clocks = <&syscon EP93XX_CLK_I2S_MCLK>,
+ <&syscon EP93XX_CLK_I2S_SCLK>,
+ <&syscon EP93XX_CLK_I2S_LRCLK>;
+ clock-names = "mclk", "sclk", "lrclk";
+ dmas = <&dma0 0 1>, <&dma0 0 2>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ ide: ide@800a0000 {
+ compatible = "cirrus,ep9312-pata";
+ reg = <0x800a0000 0x38>;
+ interrupt-parent = <&vic1>;
+ interrupts = <8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ide_default_pins>;
+ status = "disabled";
+ };
+
+ vic0: interrupt-controller@800b0000 {
+ compatible = "arm,pl192-vic";
+ reg = <0x800b0000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ valid-mask = <0x7ffffffc>;
+ valid-wakeup-mask = <0x0>;
+ };
+
+ vic1: interrupt-controller@800c0000 {
+ compatible = "arm,pl192-vic";
+ reg = <0x800c0000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ valid-mask = <0x1fffffff>;
+ valid-wakeup-mask = <0x0>;
+ };
+
+ keypad: keypad@800f0000 {
+ compatible = "cirrus,ep9307-keypad";
+ reg = <0x800f0000 0x0c>;
+ interrupt-parent = <&vic0>;
+ interrupts = <29>;
+ clocks = <&syscon EP93XX_CLK_KEYPAD>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&keypad_default_pins>;
+ linux,keymap = <KEY_UP>,
+ <KEY_DOWN>,
+ <KEY_VOLUMEDOWN>,
+ <KEY_HOME>,
+ <KEY_RIGHT>,
+ <KEY_LEFT>,
+ <KEY_ENTER>,
+ <KEY_VOLUMEUP>,
+ <KEY_F6>,
+ <KEY_F8>,
+ <KEY_F9>,
+ <KEY_F10>,
+ <KEY_F1>,
+ <KEY_F2>,
+ <KEY_F3>,
+ <KEY_POWER>;
+ };
+
+ pwm0: pwm@80910000 {
+ compatible = "cirrus,ep9301-pwm";
+ reg = <0x80910000 0x10>;
+ clocks = <&syscon EP93XX_CLK_PWM>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm1: pwm@80910020 {
+ compatible = "cirrus,ep9301-pwm";
+ reg = <0x80910020 0x10>;
+ clocks = <&syscon EP93XX_CLK_PWM>;
+ #pwm-cells = <3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm1_default_pins>;
+ status = "disabled";
+ };
+
+ rtc0: rtc@80920000 {
+ compatible = "cirrus,ep9301-rtc";
+ reg = <0x80920000 0x100>;
+ };
+
+ spi0: spi@808a0000 {
+ compatible = "cirrus,ep9301-spi";
+ reg = <0x808a0000 0x18>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-parent = <&vic1>;
+ interrupts = <21>;
+ clocks = <&syscon EP93XX_CLK_SPI>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi_default_pins>;
+ status = "disabled";
+ };
+
+ timer: timer@80810000 {
+ compatible = "cirrus,ep9301-timer";
+ reg = <0x80810000 0x100>;
+ interrupt-parent = <&vic1>;
+ interrupts = <19>;
+ };
+
+ uart0: serial@808c0000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x808c0000 0x1000>;
+ arm,primecell-periphid = <0x00041010>;
+ clocks = <&syscon EP93XX_CLK_UART1>, <&syscon EP93XX_CLK_UART>;
+ clock-names = "uartclk", "apb_pclk";
+ interrupt-parent = <&vic1>;
+ interrupts = <20>;
+ status = "disabled";
+ };
+
+ uart1: uart@808d0000 {
+ compatible = "arm,primecell";
+ reg = <0x808d0000 0x1000>;
+ arm,primecell-periphid = <0x00041010>;
+ clocks = <&syscon EP93XX_CLK_UART2>, <&syscon EP93XX_CLK_UART>;
+ clock-names = "apb:uart2", "apb_pclk";
+ interrupt-parent = <&vic1>;
+ interrupts = <22>;
+ status = "disabled";
+ };
+
+ uart2: uart@808b0000 {
+ compatible = "arm,primecell";
+ reg = <0x808b0000 0x1000>;
+ arm,primecell-periphid = <0x00041010>;
+ clocks = <&syscon EP93XX_CLK_UART3>, <&syscon EP93XX_CLK_UART>;
+ clock-names = "apb:uart3", "apb_pclk";
+ interrupt-parent = <&vic1>;
+ interrupts = <23>;
+ status = "disabled";
+ };
+
+ usb0: usb@80020000 {
+ compatible = "generic-ohci";
+ reg = <0x80020000 0x10000>;
+ interrupt-parent = <&vic1>;
+ interrupts = <24>;
+ clocks = <&syscon EP93XX_CLK_USB>;
+ status = "disabled";
+ };
+
+ watchdog0: watchdog@80940000 {
+ compatible = "cirrus,ep9301-wdt";
+ reg = <0x80940000 0x08>;
+ };
+ };
+
+ xtali: oscillator {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <14745600>;
+ clock-output-names = "xtali";
+ };
+};
diff --git a/arch/arm/mach-ep93xx/Kconfig b/arch/arm/mach-ep93xx/Kconfig
index 703f3d232a60..812b71dcf60e 100644
--- a/arch/arm/mach-ep93xx/Kconfig
+++ b/arch/arm/mach-ep93xx/Kconfig
@@ -3,27 +3,27 @@ menuconfig ARCH_EP93XX
bool "EP93xx-based"
depends on ATAGS
depends on ARCH_MULTI_V4T
+ # CONFIG_ARCH_MULTI_V7 is not set
depends on CPU_LITTLE_ENDIAN
+ select ARCH_HAS_RESET_CONTROLLER
select ARCH_SPARSEMEM_ENABLE
select ARM_AMBA
select ARM_VIC
+ select ARM_APPENDED_DTB # Old Redboot bootloaders deployed
+ select ARM_ATAG_DTB_COMPAT # we need this to update dt memory node
+ select COMMON_CLK_EP93XX
+ select EP93XX_TIMER
select CLKSRC_MMIO
select CPU_ARM920T
select GPIOLIB
+ select PINCTRL
+ select PINCTRL_EP93XX
help
This enables support for the Cirrus EP93xx series of CPUs.
if ARCH_EP93XX
-menu "Cirrus EP93xx Implementation Options"
-
-config EP93XX_SOC_COMMON
- bool
- default y
- select SOC_BUS
- select LEDS_GPIO_REGISTER
-
-comment "EP93xx Platforms"
+# menu "EP93xx Platforms"
config MACH_BK3
bool "Support Liebherr BK3.1"
@@ -103,6 +103,6 @@ config MACH_VISION_EP9307
Say 'Y' here if you want your kernel to support the
Vision Engraving Systems EP9307 SoM.
-endmenu
+# endmenu
endif
diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile
deleted file mode 100644
index 62e37403df14..000000000000
--- a/arch/arm/mach-ep93xx/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the linux kernel.
-#
-obj-y := core.o clock.o timer-ep93xx.o
-
-obj-$(CONFIG_EP93XX_DMA) += dma.o
-
-obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o
-obj-$(CONFIG_MACH_TS72XX) += ts72xx.o
-obj-$(CONFIG_MACH_VISION_EP9307)+= vision_ep9307.o
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
deleted file mode 100644
index 85a496ddc619..000000000000
--- a/arch/arm/mach-ep93xx/clock.c
+++ /dev/null
@@ -1,733 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/arm/mach-ep93xx/clock.c
- * Clock control for Cirrus EP93xx chips.
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- */
-
-#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/io.h>
-#include <linux/spinlock.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/soc/cirrus/ep93xx.h>
-
-#include "hardware.h"
-
-#include <asm/div64.h>
-
-#include "soc.h"
-
-static DEFINE_SPINLOCK(clk_lock);
-
-static char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
-static char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
-static char pclk_divisors[] = { 1, 2, 4, 8 };
-
-static char adc_divisors[] = { 16, 4 };
-static char sclk_divisors[] = { 2, 4 };
-static char lrclk_divisors[] = { 32, 64, 128 };
-
-static const char * const mux_parents[] = {
- "xtali",
- "pll1",
- "pll2"
-};
-
-/*
- * PLL rate = 14.7456 MHz * (X1FBD + 1) * (X2FBD + 1) / (X2IPD + 1) / 2^PS
- */
-static unsigned long calc_pll_rate(unsigned long long rate, u32 config_word)
-{
- int i;
-
- rate *= ((config_word >> 11) & 0x1f) + 1; /* X1FBD */
- rate *= ((config_word >> 5) & 0x3f) + 1; /* X2FBD */
- do_div(rate, (config_word & 0x1f) + 1); /* X2IPD */
- for (i = 0; i < ((config_word >> 16) & 3); i++) /* PS */
- rate >>= 1;
-
- return (unsigned long)rate;
-}
-
-struct clk_psc {
- struct clk_hw hw;
- void __iomem *reg;
- u8 bit_idx;
- u32 mask;
- u8 shift;
- u8 width;
- char *div;
- u8 num_div;
- spinlock_t *lock;
-};
-
-#define to_clk_psc(_hw) container_of(_hw, struct clk_psc, hw)
-
-static int ep93xx_clk_is_enabled(struct clk_hw *hw)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- u32 val = readl(psc->reg);
-
- return (val & BIT(psc->bit_idx)) ? 1 : 0;
-}
-
-static int ep93xx_clk_enable(struct clk_hw *hw)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- unsigned long flags = 0;
- u32 val;
-
- if (psc->lock)
- spin_lock_irqsave(psc->lock, flags);
-
- val = __raw_readl(psc->reg);
- val |= BIT(psc->bit_idx);
-
- ep93xx_syscon_swlocked_write(val, psc->reg);
-
- if (psc->lock)
- spin_unlock_irqrestore(psc->lock, flags);
-
- return 0;
-}
-
-static void ep93xx_clk_disable(struct clk_hw *hw)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- unsigned long flags = 0;
- u32 val;
-
- if (psc->lock)
- spin_lock_irqsave(psc->lock, flags);
-
- val = __raw_readl(psc->reg);
- val &= ~BIT(psc->bit_idx);
-
- ep93xx_syscon_swlocked_write(val, psc->reg);
-
- if (psc->lock)
- spin_unlock_irqrestore(psc->lock, flags);
-}
-
-static const struct clk_ops clk_ep93xx_gate_ops = {
- .enable = ep93xx_clk_enable,
- .disable = ep93xx_clk_disable,
- .is_enabled = ep93xx_clk_is_enabled,
-};
-
-static struct clk_hw *ep93xx_clk_register_gate(const char *name,
- const char *parent_name,
- void __iomem *reg,
- u8 bit_idx)
-{
- struct clk_init_data init;
- struct clk_psc *psc;
- struct clk *clk;
-
- psc = kzalloc(sizeof(*psc), GFP_KERNEL);
- if (!psc)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &clk_ep93xx_gate_ops;
- init.flags = CLK_SET_RATE_PARENT;
- init.parent_names = (parent_name ? &parent_name : NULL);
- init.num_parents = (parent_name ? 1 : 0);
-
- psc->reg = reg;
- psc->bit_idx = bit_idx;
- psc->hw.init = &init;
- psc->lock = &clk_lock;
-
- clk = clk_register(NULL, &psc->hw);
- if (IS_ERR(clk)) {
- kfree(psc);
- return ERR_CAST(clk);
- }
-
- return &psc->hw;
-}
-
-static u8 ep93xx_mux_get_parent(struct clk_hw *hw)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- u32 val = __raw_readl(psc->reg);
-
- if (!(val & EP93XX_SYSCON_CLKDIV_ESEL))
- return 0;
-
- if (!(val & EP93XX_SYSCON_CLKDIV_PSEL))
- return 1;
-
- return 2;
-}
-
-static int ep93xx_mux_set_parent_lock(struct clk_hw *hw, u8 index)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- unsigned long flags = 0;
- u32 val;
-
- if (index >= ARRAY_SIZE(mux_parents))
- return -EINVAL;
-
- if (psc->lock)
- spin_lock_irqsave(psc->lock, flags);
-
- val = __raw_readl(psc->reg);
- val &= ~(EP93XX_SYSCON_CLKDIV_ESEL | EP93XX_SYSCON_CLKDIV_PSEL);
-
-
- if (index != 0) {
- val |= EP93XX_SYSCON_CLKDIV_ESEL;
- val |= (index - 1) ? EP93XX_SYSCON_CLKDIV_PSEL : 0;
- }
-
- ep93xx_syscon_swlocked_write(val, psc->reg);
-
- if (psc->lock)
- spin_unlock_irqrestore(psc->lock, flags);
-
- return 0;
-}
-
-static bool is_best(unsigned long rate, unsigned long now,
- unsigned long best)
-{
- return abs(rate - now) < abs(rate - best);
-}
-
-static int ep93xx_mux_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- unsigned long rate = req->rate;
- struct clk *best_parent = NULL;
- unsigned long __parent_rate;
- unsigned long best_rate = 0, actual_rate, mclk_rate;
- unsigned long best_parent_rate;
- int __div = 0, __pdiv = 0;
- int i;
-
- /*
- * Try the two pll's and the external clock
- * Because the valid predividers are 2, 2.5 and 3, we multiply
- * all the clocks by 2 to avoid floating point math.
- *
- * This is based on the algorithm in the ep93xx raster guide:
- * http://be-a-maverick.com/en/pubs/appNote/AN269REV1.pdf
- *
- */
- for (i = 0; i < ARRAY_SIZE(mux_parents); i++) {
- struct clk *parent = clk_get_sys(mux_parents[i], NULL);
-
- __parent_rate = clk_get_rate(parent);
- mclk_rate = __parent_rate * 2;
-
- /* Try each predivider value */
- for (__pdiv = 4; __pdiv <= 6; __pdiv++) {
- __div = mclk_rate / (rate * __pdiv);
- if (__div < 2 || __div > 127)
- continue;
-
- actual_rate = mclk_rate / (__pdiv * __div);
- if (is_best(rate, actual_rate, best_rate)) {
- best_rate = actual_rate;
- best_parent_rate = __parent_rate;
- best_parent = parent;
- }
- }
- }
-
- if (!best_parent)
- return -EINVAL;
-
- req->best_parent_rate = best_parent_rate;
- req->best_parent_hw = __clk_get_hw(best_parent);
- req->rate = best_rate;
-
- return 0;
-}
-
-static unsigned long ep93xx_ddiv_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- unsigned long rate = 0;
- u32 val = __raw_readl(psc->reg);
- int __pdiv = ((val >> EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) & 0x03);
- int __div = val & 0x7f;
-
- if (__div > 0)
- rate = (parent_rate * 2) / ((__pdiv + 3) * __div);
-
- return rate;
-}
-
-static int ep93xx_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- int pdiv = 0, div = 0;
- unsigned long best_rate = 0, actual_rate, mclk_rate;
- int __div = 0, __pdiv = 0;
- u32 val;
-
- mclk_rate = parent_rate * 2;
-
- for (__pdiv = 4; __pdiv <= 6; __pdiv++) {
- __div = mclk_rate / (rate * __pdiv);
- if (__div < 2 || __div > 127)
- continue;
-
- actual_rate = mclk_rate / (__pdiv * __div);
- if (is_best(rate, actual_rate, best_rate)) {
- pdiv = __pdiv - 3;
- div = __div;
- best_rate = actual_rate;
- }
- }
-
- if (!best_rate)
- return -EINVAL;
-
- val = __raw_readl(psc->reg);
-
- /* Clear old dividers */
- val &= ~0x37f;
-
- /* Set the new pdiv and div bits for the new clock rate */
- val |= (pdiv << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | div;
- ep93xx_syscon_swlocked_write(val, psc->reg);
-
- return 0;
-}
-
-static const struct clk_ops clk_ddiv_ops = {
- .enable = ep93xx_clk_enable,
- .disable = ep93xx_clk_disable,
- .is_enabled = ep93xx_clk_is_enabled,
- .get_parent = ep93xx_mux_get_parent,
- .set_parent = ep93xx_mux_set_parent_lock,
- .determine_rate = ep93xx_mux_determine_rate,
- .recalc_rate = ep93xx_ddiv_recalc_rate,
- .set_rate = ep93xx_ddiv_set_rate,
-};
-
-static struct clk_hw *clk_hw_register_ddiv(const char *name,
- void __iomem *reg,
- u8 bit_idx)
-{
- struct clk_init_data init;
- struct clk_psc *psc;
- struct clk *clk;
-
- psc = kzalloc(sizeof(*psc), GFP_KERNEL);
- if (!psc)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &clk_ddiv_ops;
- init.flags = 0;
- init.parent_names = mux_parents;
- init.num_parents = ARRAY_SIZE(mux_parents);
-
- psc->reg = reg;
- psc->bit_idx = bit_idx;
- psc->lock = &clk_lock;
- psc->hw.init = &init;
-
- clk = clk_register(NULL, &psc->hw);
- if (IS_ERR(clk)) {
- kfree(psc);
- return ERR_CAST(clk);
- }
- return &psc->hw;
-}
-
-static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- u32 val = __raw_readl(psc->reg);
- u8 index = (val & psc->mask) >> psc->shift;
-
- if (index > psc->num_div)
- return 0;
-
- return DIV_ROUND_UP_ULL(parent_rate, psc->div[index]);
-}
-
-static long ep93xx_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- unsigned long best = 0, now, maxdiv;
- int i;
-
- maxdiv = psc->div[psc->num_div - 1];
-
- for (i = 0; i < psc->num_div; i++) {
- if ((rate * psc->div[i]) == *parent_rate)
- return DIV_ROUND_UP_ULL((u64)*parent_rate, psc->div[i]);
-
- now = DIV_ROUND_UP_ULL((u64)*parent_rate, psc->div[i]);
-
- if (is_best(rate, now, best))
- best = now;
- }
-
- if (!best)
- best = DIV_ROUND_UP_ULL(*parent_rate, maxdiv);
-
- return best;
-}
-
-static int ep93xx_div_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- u32 val = __raw_readl(psc->reg) & ~psc->mask;
- int i;
-
- for (i = 0; i < psc->num_div; i++)
- if (rate == parent_rate / psc->div[i]) {
- val |= i << psc->shift;
- break;
- }
-
- if (i == psc->num_div)
- return -EINVAL;
-
- ep93xx_syscon_swlocked_write(val, psc->reg);
-
- return 0;
-}
-
-static const struct clk_ops ep93xx_div_ops = {
- .enable = ep93xx_clk_enable,
- .disable = ep93xx_clk_disable,
- .is_enabled = ep93xx_clk_is_enabled,
- .recalc_rate = ep93xx_div_recalc_rate,
- .round_rate = ep93xx_div_round_rate,
- .set_rate = ep93xx_div_set_rate,
-};
-
-static struct clk_hw *clk_hw_register_div(const char *name,
- const char *parent_name,
- void __iomem *reg,
- u8 enable_bit,
- u8 shift,
- u8 width,
- char *clk_divisors,
- u8 num_div)
-{
- struct clk_init_data init;
- struct clk_psc *psc;
- struct clk *clk;
-
- psc = kzalloc(sizeof(*psc), GFP_KERNEL);
- if (!psc)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &ep93xx_div_ops;
- init.flags = 0;
- init.parent_names = (parent_name ? &parent_name : NULL);
- init.num_parents = 1;
-
- psc->reg = reg;
- psc->bit_idx = enable_bit;
- psc->mask = GENMASK(shift + width - 1, shift);
- psc->shift = shift;
- psc->div = clk_divisors;
- psc->num_div = num_div;
- psc->lock = &clk_lock;
- psc->hw.init = &init;
-
- clk = clk_register(NULL, &psc->hw);
- if (IS_ERR(clk)) {
- kfree(psc);
- return ERR_CAST(clk);
- }
- return &psc->hw;
-}
-
-struct ep93xx_gate {
- unsigned int bit;
- const char *dev_id;
- const char *con_id;
-};
-
-static struct ep93xx_gate ep93xx_uarts[] = {
- {EP93XX_SYSCON_DEVCFG_U1EN, "apb:uart1", NULL},
- {EP93XX_SYSCON_DEVCFG_U2EN, "apb:uart2", NULL},
- {EP93XX_SYSCON_DEVCFG_U3EN, "apb:uart3", NULL},
-};
-
-static void __init ep93xx_uart_clock_init(void)
-{
- unsigned int i;
- struct clk_hw *hw;
- u32 value;
- unsigned int clk_uart_div;
-
- value = __raw_readl(EP93XX_SYSCON_PWRCNT);
- if (value & EP93XX_SYSCON_PWRCNT_UARTBAUD)
- clk_uart_div = 1;
- else
- clk_uart_div = 2;
-
- hw = clk_hw_register_fixed_factor(NULL, "uart", "xtali", 0, 1, clk_uart_div);
-
- /* parenting uart gate clocks to uart clock */
- for (i = 0; i < ARRAY_SIZE(ep93xx_uarts); i++) {
- hw = ep93xx_clk_register_gate(ep93xx_uarts[i].dev_id,
- "uart",
- EP93XX_SYSCON_DEVCFG,
- ep93xx_uarts[i].bit);
-
- clk_hw_register_clkdev(hw, NULL, ep93xx_uarts[i].dev_id);
- }
-}
-
-static struct ep93xx_gate ep93xx_dmas[] = {
- {EP93XX_SYSCON_PWRCNT_DMA_M2P0, NULL, "m2p0"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P1, NULL, "m2p1"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P2, NULL, "m2p2"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P3, NULL, "m2p3"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P4, NULL, "m2p4"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P5, NULL, "m2p5"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P6, NULL, "m2p6"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P7, NULL, "m2p7"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P8, NULL, "m2p8"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P9, NULL, "m2p9"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2M0, NULL, "m2m0"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2M1, NULL, "m2m1"},
-};
-
-static void __init ep93xx_dma_clock_init(void)
-{
- unsigned int i;
- struct clk_hw *hw;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(ep93xx_dmas); i++) {
- hw = clk_hw_register_gate(NULL, ep93xx_dmas[i].con_id,
- "hclk", 0,
- EP93XX_SYSCON_PWRCNT,
- ep93xx_dmas[i].bit,
- 0,
- &clk_lock);
-
- ret = clk_hw_register_clkdev(hw, ep93xx_dmas[i].con_id, NULL);
- if (ret)
- pr_err("%s: failed to register lookup %s\n",
- __func__, ep93xx_dmas[i].con_id);
- }
-}
-
-static int __init ep93xx_clock_init(void)
-{
- u32 value;
- struct clk_hw *hw;
- unsigned long clk_pll1_rate;
- unsigned long clk_f_rate;
- unsigned long clk_h_rate;
- unsigned long clk_p_rate;
- unsigned long clk_pll2_rate;
- unsigned int clk_f_div;
- unsigned int clk_h_div;
- unsigned int clk_p_div;
- unsigned int clk_usb_div;
- unsigned long clk_spi_div;
-
- hw = clk_hw_register_fixed_rate(NULL, "xtali", NULL, 0, EP93XX_EXT_CLK_RATE);
- clk_hw_register_clkdev(hw, NULL, "xtali");
-
- /* Determine the bootloader configured pll1 rate */
- value = __raw_readl(EP93XX_SYSCON_CLKSET1);
- if (!(value & EP93XX_SYSCON_CLKSET1_NBYP1))
- clk_pll1_rate = EP93XX_EXT_CLK_RATE;
- else
- clk_pll1_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value);
-
- hw = clk_hw_register_fixed_rate(NULL, "pll1", "xtali", 0, clk_pll1_rate);
- clk_hw_register_clkdev(hw, NULL, "pll1");
-
- /* Initialize the pll1 derived clocks */
- clk_f_div = fclk_divisors[(value >> 25) & 0x7];
- clk_h_div = hclk_divisors[(value >> 20) & 0x7];
- clk_p_div = pclk_divisors[(value >> 18) & 0x3];
-
- hw = clk_hw_register_fixed_factor(NULL, "fclk", "pll1", 0, 1, clk_f_div);
- clk_f_rate = clk_get_rate(hw->clk);
- hw = clk_hw_register_fixed_factor(NULL, "hclk", "pll1", 0, 1, clk_h_div);
- clk_h_rate = clk_get_rate(hw->clk);
- hw = clk_hw_register_fixed_factor(NULL, "pclk", "hclk", 0, 1, clk_p_div);
- clk_p_rate = clk_get_rate(hw->clk);
-
- clk_hw_register_clkdev(hw, "apb_pclk", NULL);
-
- ep93xx_dma_clock_init();
-
- /* Determine the bootloader configured pll2 rate */
- value = __raw_readl(EP93XX_SYSCON_CLKSET2);
- if (!(value & EP93XX_SYSCON_CLKSET2_NBYP2))
- clk_pll2_rate = EP93XX_EXT_CLK_RATE;
- else if (value & EP93XX_SYSCON_CLKSET2_PLL2_EN)
- clk_pll2_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value);
- else
- clk_pll2_rate = 0;
-
- hw = clk_hw_register_fixed_rate(NULL, "pll2", "xtali", 0, clk_pll2_rate);
- clk_hw_register_clkdev(hw, NULL, "pll2");
-
- /* Initialize the pll2 derived clocks */
- /*
- * These four bits set the divide ratio between the PLL2
- * output and the USB clock.
- * 0000 - Divide by 1
- * 0001 - Divide by 2
- * 0010 - Divide by 3
- * 0011 - Divide by 4
- * 0100 - Divide by 5
- * 0101 - Divide by 6
- * 0110 - Divide by 7
- * 0111 - Divide by 8
- * 1000 - Divide by 9
- * 1001 - Divide by 10
- * 1010 - Divide by 11
- * 1011 - Divide by 12
- * 1100 - Divide by 13
- * 1101 - Divide by 14
- * 1110 - Divide by 15
- * 1111 - Divide by 1
- * On power-on-reset these bits are reset to 0000b.
- */
- clk_usb_div = (((value >> 28) & 0xf) + 1);
- hw = clk_hw_register_fixed_factor(NULL, "usb_clk", "pll2", 0, 1, clk_usb_div);
- hw = clk_hw_register_gate(NULL, "ohci-platform",
- "usb_clk", 0,
- EP93XX_SYSCON_PWRCNT,
- EP93XX_SYSCON_PWRCNT_USH_EN,
- 0,
- &clk_lock);
- clk_hw_register_clkdev(hw, NULL, "ohci-platform");
-
- /*
- * EP93xx SSP clock rate was doubled in version E2. For more information
- * see:
- * http://www.cirrus.com/en/pubs/appNote/AN273REV4.pdf
- */
- clk_spi_div = 1;
- if (ep93xx_chip_revision() < EP93XX_CHIP_REV_E2)
- clk_spi_div = 2;
- hw = clk_hw_register_fixed_factor(NULL, "ep93xx-spi.0", "xtali", 0, 1, clk_spi_div);
- clk_hw_register_clkdev(hw, NULL, "ep93xx-spi.0");
-
- /* pwm clock */
- hw = clk_hw_register_fixed_factor(NULL, "pwm_clk", "xtali", 0, 1, 1);
- clk_hw_register_clkdev(hw, "pwm_clk", NULL);
-
- pr_info("PLL1 running at %ld MHz, PLL2 at %ld MHz\n",
- clk_pll1_rate / 1000000, clk_pll2_rate / 1000000);
- pr_info("FCLK %ld MHz, HCLK %ld MHz, PCLK %ld MHz\n",
- clk_f_rate / 1000000, clk_h_rate / 1000000,
- clk_p_rate / 1000000);
-
- ep93xx_uart_clock_init();
-
- /* touchscreen/adc clock */
- hw = clk_hw_register_div("ep93xx-adc",
- "xtali",
- EP93XX_SYSCON_KEYTCHCLKDIV,
- EP93XX_SYSCON_KEYTCHCLKDIV_TSEN,
- EP93XX_SYSCON_KEYTCHCLKDIV_ADIV,
- 1,
- adc_divisors,
- ARRAY_SIZE(adc_divisors));
-
- clk_hw_register_clkdev(hw, NULL, "ep93xx-adc");
-
- /* keypad clock */
- hw = clk_hw_register_div("ep93xx-keypad",
- "xtali",
- EP93XX_SYSCON_KEYTCHCLKDIV,
- EP93XX_SYSCON_KEYTCHCLKDIV_KEN,
- EP93XX_SYSCON_KEYTCHCLKDIV_KDIV,
- 1,
- adc_divisors,
- ARRAY_SIZE(adc_divisors));
-
- clk_hw_register_clkdev(hw, NULL, "ep93xx-keypad");
-
- /* On reset PDIV and VDIV is set to zero, while PDIV zero
- * means clock disable, VDIV shouldn't be zero.
- * So i set both dividers to minimum.
- */
- /* ENA - Enable CLK divider. */
- /* PDIV - 00 - Disable clock */
- /* VDIV - at least 2 */
- /* Check and enable video clk registers */
- value = __raw_readl(EP93XX_SYSCON_VIDCLKDIV);
- value |= (1 << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2;
- ep93xx_syscon_swlocked_write(value, EP93XX_SYSCON_VIDCLKDIV);
-
- /* check and enable i2s clk registers */
- value = __raw_readl(EP93XX_SYSCON_I2SCLKDIV);
- value |= (1 << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2;
- ep93xx_syscon_swlocked_write(value, EP93XX_SYSCON_I2SCLKDIV);
-
- /* video clk */
- hw = clk_hw_register_ddiv("ep93xx-fb",
- EP93XX_SYSCON_VIDCLKDIV,
- EP93XX_SYSCON_CLKDIV_ENABLE);
-
- clk_hw_register_clkdev(hw, NULL, "ep93xx-fb");
-
- /* i2s clk */
- hw = clk_hw_register_ddiv("mclk",
- EP93XX_SYSCON_I2SCLKDIV,
- EP93XX_SYSCON_CLKDIV_ENABLE);
-
- clk_hw_register_clkdev(hw, "mclk", "ep93xx-i2s");
-
- /* i2s sclk */
-#define EP93XX_I2SCLKDIV_SDIV_SHIFT 16
-#define EP93XX_I2SCLKDIV_SDIV_WIDTH 1
- hw = clk_hw_register_div("sclk",
- "mclk",
- EP93XX_SYSCON_I2SCLKDIV,
- EP93XX_SYSCON_I2SCLKDIV_SENA,
- EP93XX_I2SCLKDIV_SDIV_SHIFT,
- EP93XX_I2SCLKDIV_SDIV_WIDTH,
- sclk_divisors,
- ARRAY_SIZE(sclk_divisors));
-
- clk_hw_register_clkdev(hw, "sclk", "ep93xx-i2s");
-
- /* i2s lrclk */
-#define EP93XX_I2SCLKDIV_LRDIV32_SHIFT 17
-#define EP93XX_I2SCLKDIV_LRDIV32_WIDTH 3
- hw = clk_hw_register_div("lrclk",
- "sclk",
- EP93XX_SYSCON_I2SCLKDIV,
- EP93XX_SYSCON_I2SCLKDIV_SENA,
- EP93XX_I2SCLKDIV_LRDIV32_SHIFT,
- EP93XX_I2SCLKDIV_LRDIV32_WIDTH,
- lrclk_divisors,
- ARRAY_SIZE(lrclk_divisors));
-
- clk_hw_register_clkdev(hw, "lrclk", "ep93xx-i2s");
-
- return 0;
-}
-postcore_initcall(ep93xx_clock_init);
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
deleted file mode 100644
index 8b1ec60a9a46..000000000000
--- a/arch/arm/mach-ep93xx/core.c
+++ /dev/null
@@ -1,1018 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/arm/mach-ep93xx/core.c
- * Core routines for Cirrus EP93xx chips.
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- * Copyright (C) 2007 Herbert Valerio Riedel <hvr@gnu.org>
- *
- * Thanks go to Michael Burian and Ray Lehtiniemi for their key
- * role in the ep93xx linux community.
- */
-
-#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-#include <linux/sys_soc.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/leds.h>
-#include <linux/uaccess.h>
-#include <linux/termios.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/serial.h>
-#include <linux/mtd/physmap.h>
-#include <linux/i2c.h>
-#include <linux/gpio/machine.h>
-#include <linux/spi/spi.h>
-#include <linux/export.h>
-#include <linux/irqchip/arm-vic.h>
-#include <linux/reboot.h>
-#include <linux/usb/ohci_pdriver.h>
-#include <linux/random.h>
-
-#include "hardware.h"
-#include <linux/platform_data/video-ep93xx.h>
-#include <linux/platform_data/keypad-ep93xx.h>
-#include <linux/platform_data/spi-ep93xx.h>
-#include <linux/soc/cirrus/ep93xx.h>
-
-#include "gpio-ep93xx.h"
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include "soc.h"
-#include "irqs.h"
-
-/*************************************************************************
- * Static I/O mappings that are needed for all EP93xx platforms
- *************************************************************************/
-static struct map_desc ep93xx_io_desc[] __initdata = {
- {
- .virtual = EP93XX_AHB_VIRT_BASE,
- .pfn = __phys_to_pfn(EP93XX_AHB_PHYS_BASE),
- .length = EP93XX_AHB_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = EP93XX_APB_VIRT_BASE,
- .pfn = __phys_to_pfn(EP93XX_APB_PHYS_BASE),
- .length = EP93XX_APB_SIZE,
- .type = MT_DEVICE,
- },
-};
-
-void __init ep93xx_map_io(void)
-{
- iotable_init(ep93xx_io_desc, ARRAY_SIZE(ep93xx_io_desc));
-}
-
-/*************************************************************************
- * EP93xx IRQ handling
- *************************************************************************/
-void __init ep93xx_init_irq(void)
-{
- vic_init(EP93XX_VIC1_BASE, IRQ_EP93XX_VIC0, EP93XX_VIC1_VALID_IRQ_MASK, 0);
- vic_init(EP93XX_VIC2_BASE, IRQ_EP93XX_VIC1, EP93XX_VIC2_VALID_IRQ_MASK, 0);
-}
-
-
-/*************************************************************************
- * EP93xx System Controller Software Locked register handling
- *************************************************************************/
-
-/*
- * syscon_swlock prevents anything else from writing to the syscon
- * block while a software locked register is being written.
- */
-static DEFINE_SPINLOCK(syscon_swlock);
-
-void ep93xx_syscon_swlocked_write(unsigned int val, void __iomem *reg)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&syscon_swlock, flags);
-
- __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
- __raw_writel(val, reg);
-
- spin_unlock_irqrestore(&syscon_swlock, flags);
-}
-
-void ep93xx_devcfg_set_clear(unsigned int set_bits, unsigned int clear_bits)
-{
- unsigned long flags;
- unsigned int val;
-
- spin_lock_irqsave(&syscon_swlock, flags);
-
- val = __raw_readl(EP93XX_SYSCON_DEVCFG);
- val &= ~clear_bits;
- val |= set_bits;
- __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
- __raw_writel(val, EP93XX_SYSCON_DEVCFG);
-
- spin_unlock_irqrestore(&syscon_swlock, flags);
-}
-
-/**
- * ep93xx_chip_revision() - returns the EP93xx chip revision
- *
- * See "platform.h" for more information.
- */
-unsigned int ep93xx_chip_revision(void)
-{
- unsigned int v;
-
- v = __raw_readl(EP93XX_SYSCON_SYSCFG);
- v &= EP93XX_SYSCON_SYSCFG_REV_MASK;
- v >>= EP93XX_SYSCON_SYSCFG_REV_SHIFT;
- return v;
-}
-EXPORT_SYMBOL_GPL(ep93xx_chip_revision);
-
-/*************************************************************************
- * EP93xx GPIO
- *************************************************************************/
-static struct resource ep93xx_gpio_resource[] = {
- DEFINE_RES_MEM(EP93XX_GPIO_PHYS_BASE, 0xcc),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO_AB),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO0MUX),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO1MUX),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO2MUX),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO3MUX),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO4MUX),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO5MUX),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO6MUX),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO7MUX),
-};
-
-static struct platform_device ep93xx_gpio_device = {
- .name = "gpio-ep93xx",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_gpio_resource),
- .resource = ep93xx_gpio_resource,
-};
-
-/*************************************************************************
- * EP93xx peripheral handling
- *************************************************************************/
-#define EP93XX_UART_MCR_OFFSET (0x0100)
-
-static void ep93xx_uart_set_mctrl(struct amba_device *dev,
- void __iomem *base, unsigned int mctrl)
-{
- unsigned int mcr;
-
- mcr = 0;
- if (mctrl & TIOCM_RTS)
- mcr |= 2;
- if (mctrl & TIOCM_DTR)
- mcr |= 1;
-
- __raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET);
-}
-
-static struct amba_pl010_data ep93xx_uart_data = {
- .set_mctrl = ep93xx_uart_set_mctrl,
-};
-
-static AMBA_APB_DEVICE(uart1, "apb:uart1", 0x00041010, EP93XX_UART1_PHYS_BASE,
- { IRQ_EP93XX_UART1 }, &ep93xx_uart_data);
-
-static AMBA_APB_DEVICE(uart2, "apb:uart2", 0x00041010, EP93XX_UART2_PHYS_BASE,
- { IRQ_EP93XX_UART2 }, NULL);
-
-static AMBA_APB_DEVICE(uart3, "apb:uart3", 0x00041010, EP93XX_UART3_PHYS_BASE,
- { IRQ_EP93XX_UART3 }, &ep93xx_uart_data);
-
-static struct resource ep93xx_rtc_resource[] = {
- DEFINE_RES_MEM(EP93XX_RTC_PHYS_BASE, 0x10c),
-};
-
-static struct platform_device ep93xx_rtc_device = {
- .name = "ep93xx-rtc",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_rtc_resource),
- .resource = ep93xx_rtc_resource,
-};
-
-/*************************************************************************
- * EP93xx OHCI USB Host
- *************************************************************************/
-
-static struct clk *ep93xx_ohci_host_clock;
-
-static int ep93xx_ohci_power_on(struct platform_device *pdev)
-{
- if (!ep93xx_ohci_host_clock) {
- ep93xx_ohci_host_clock = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(ep93xx_ohci_host_clock))
- return PTR_ERR(ep93xx_ohci_host_clock);
- }
-
- return clk_prepare_enable(ep93xx_ohci_host_clock);
-}
-
-static void ep93xx_ohci_power_off(struct platform_device *pdev)
-{
- clk_disable(ep93xx_ohci_host_clock);
-}
-
-static struct usb_ohci_pdata ep93xx_ohci_pdata = {
- .power_on = ep93xx_ohci_power_on,
- .power_off = ep93xx_ohci_power_off,
- .power_suspend = ep93xx_ohci_power_off,
-};
-
-static struct resource ep93xx_ohci_resources[] = {
- DEFINE_RES_MEM(EP93XX_USB_PHYS_BASE, 0x1000),
- DEFINE_RES_IRQ(IRQ_EP93XX_USB),
-};
-
-static u64 ep93xx_ohci_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ep93xx_ohci_device = {
- .name = "ohci-platform",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_ohci_resources),
- .resource = ep93xx_ohci_resources,
- .dev = {
- .dma_mask = &ep93xx_ohci_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &ep93xx_ohci_pdata,
- },
-};
-
-/*************************************************************************
- * EP93xx physmap'ed flash
- *************************************************************************/
-static struct physmap_flash_data ep93xx_flash_data;
-
-static struct resource ep93xx_flash_resource = {
- .flags = IORESOURCE_MEM,
-};
-
-static struct platform_device ep93xx_flash = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &ep93xx_flash_data,
- },
- .num_resources = 1,
- .resource = &ep93xx_flash_resource,
-};
-
-/**
- * ep93xx_register_flash() - Register the external flash device.
- * @width: bank width in octets
- * @start: resource start address
- * @size: resource size
- */
-void __init ep93xx_register_flash(unsigned int width,
- resource_size_t start, resource_size_t size)
-{
- ep93xx_flash_data.width = width;
-
- ep93xx_flash_resource.start = start;
- ep93xx_flash_resource.end = start + size - 1;
-
- platform_device_register(&ep93xx_flash);
-}
-
-
-/*************************************************************************
- * EP93xx ethernet peripheral handling
- *************************************************************************/
-static struct ep93xx_eth_data ep93xx_eth_data;
-
-static struct resource ep93xx_eth_resource[] = {
- DEFINE_RES_MEM(EP93XX_ETHERNET_PHYS_BASE, 0x10000),
- DEFINE_RES_IRQ(IRQ_EP93XX_ETHERNET),
-};
-
-static u64 ep93xx_eth_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ep93xx_eth_device = {
- .name = "ep93xx-eth",
- .id = -1,
- .dev = {
- .platform_data = &ep93xx_eth_data,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .dma_mask = &ep93xx_eth_dma_mask,
- },
- .num_resources = ARRAY_SIZE(ep93xx_eth_resource),
- .resource = ep93xx_eth_resource,
-};
-
-/**
- * ep93xx_register_eth - Register the built-in ethernet platform device.
- * @data: platform specific ethernet configuration (__initdata)
- * @copy_addr: flag indicating that the MAC address should be copied
- * from the IndAd registers (as programmed by the bootloader)
- */
-void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr)
-{
- if (copy_addr)
- memcpy_fromio(data->dev_addr, EP93XX_ETHERNET_BASE + 0x50, 6);
-
- ep93xx_eth_data = *data;
- platform_device_register(&ep93xx_eth_device);
-}
-
-
-/*************************************************************************
- * EP93xx i2c peripheral handling
- *************************************************************************/
-
-/* All EP93xx devices use the same two GPIO pins for I2C bit-banging */
-static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = {
- .dev_id = "i2c-gpio.0",
- .table = {
- /* Use local offsets on gpiochip/port "G" */
- GPIO_LOOKUP_IDX("G", 1, NULL, 0,
- GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
- GPIO_LOOKUP_IDX("G", 0, NULL, 1,
- GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
- { }
- },
-};
-
-static struct platform_device ep93xx_i2c_device = {
- .name = "i2c-gpio",
- .id = 0,
- .dev = {
- .platform_data = NULL,
- },
-};
-
-/**
- * ep93xx_register_i2c - Register the i2c platform device.
- * @devices: platform specific i2c bus device information (__initdata)
- * @num: the number of devices on the i2c bus
- */
-void __init ep93xx_register_i2c(struct i2c_board_info *devices, int num)
-{
- /*
- * FIXME: this just sets the two pins as non-opendrain, as no
- * platforms tries to do that anyway. Flag the applicable lines
- * as open drain in the GPIO_LOOKUP above and the driver or
- * gpiolib will handle open drain/open drain emulation as need
- * be. Right now i2c-gpio emulates open drain which is not
- * optimal.
- */
- __raw_writel((0 << 1) | (0 << 0),
- EP93XX_GPIO_EEDRIVE);
-
- i2c_register_board_info(0, devices, num);
- gpiod_add_lookup_table(&ep93xx_i2c_gpiod_table);
- platform_device_register(&ep93xx_i2c_device);
-}
-
-/*************************************************************************
- * EP93xx SPI peripheral handling
- *************************************************************************/
-static struct ep93xx_spi_info ep93xx_spi_master_data;
-
-static struct resource ep93xx_spi_resources[] = {
- DEFINE_RES_MEM(EP93XX_SPI_PHYS_BASE, 0x18),
- DEFINE_RES_IRQ(IRQ_EP93XX_SSP),
-};
-
-static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ep93xx_spi_device = {
- .name = "ep93xx-spi",
- .id = 0,
- .dev = {
- .platform_data = &ep93xx_spi_master_data,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .dma_mask = &ep93xx_spi_dma_mask,
- },
- .num_resources = ARRAY_SIZE(ep93xx_spi_resources),
- .resource = ep93xx_spi_resources,
-};
-
-/**
- * ep93xx_register_spi() - registers spi platform device
- * @info: ep93xx board specific spi master info (__initdata)
- * @devices: SPI devices to register (__initdata)
- * @num: number of SPI devices to register
- *
- * This function registers platform device for the EP93xx SPI controller and
- * also makes sure that SPI pins are muxed so that I2S is not using those pins.
- */
-void __init ep93xx_register_spi(struct ep93xx_spi_info *info,
- struct spi_board_info *devices, int num)
-{
- /*
- * When SPI is used, we need to make sure that I2S is muxed off from
- * SPI pins.
- */
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2SONSSP);
-
- ep93xx_spi_master_data = *info;
- spi_register_board_info(devices, num);
- platform_device_register(&ep93xx_spi_device);
-}
-
-/*************************************************************************
- * EP93xx LEDs
- *************************************************************************/
-static const struct gpio_led ep93xx_led_pins[] __initconst = {
- {
- .name = "platform:grled",
- }, {
- .name = "platform:rdled",
- },
-};
-
-static const struct gpio_led_platform_data ep93xx_led_data __initconst = {
- .num_leds = ARRAY_SIZE(ep93xx_led_pins),
- .leds = ep93xx_led_pins,
-};
-
-static struct gpiod_lookup_table ep93xx_leds_gpio_table = {
- .dev_id = "leds-gpio",
- .table = {
- /* Use local offsets on gpiochip/port "E" */
- GPIO_LOOKUP_IDX("E", 0, NULL, 0, GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP_IDX("E", 1, NULL, 1, GPIO_ACTIVE_HIGH),
- { }
- },
-};
-
-/*************************************************************************
- * EP93xx pwm peripheral handling
- *************************************************************************/
-static struct resource ep93xx_pwm0_resource[] = {
- DEFINE_RES_MEM(EP93XX_PWM_PHYS_BASE, 0x10),
-};
-
-static struct platform_device ep93xx_pwm0_device = {
- .name = "ep93xx-pwm",
- .id = 0,
- .num_resources = ARRAY_SIZE(ep93xx_pwm0_resource),
- .resource = ep93xx_pwm0_resource,
-};
-
-static struct resource ep93xx_pwm1_resource[] = {
- DEFINE_RES_MEM(EP93XX_PWM_PHYS_BASE + 0x20, 0x10),
-};
-
-static struct platform_device ep93xx_pwm1_device = {
- .name = "ep93xx-pwm",
- .id = 1,
- .num_resources = ARRAY_SIZE(ep93xx_pwm1_resource),
- .resource = ep93xx_pwm1_resource,
-};
-
-void __init ep93xx_register_pwm(int pwm0, int pwm1)
-{
- if (pwm0)
- platform_device_register(&ep93xx_pwm0_device);
-
- /* NOTE: EP9307 does not have PWMOUT1 (pin EGPIO14) */
- if (pwm1)
- platform_device_register(&ep93xx_pwm1_device);
-}
-
-int ep93xx_pwm_acquire_gpio(struct platform_device *pdev)
-{
- int err;
-
- if (pdev->id == 0) {
- err = 0;
- } else if (pdev->id == 1) {
- err = gpio_request(EP93XX_GPIO_LINE_EGPIO14,
- dev_name(&pdev->dev));
- if (err)
- return err;
- err = gpio_direction_output(EP93XX_GPIO_LINE_EGPIO14, 0);
- if (err)
- goto fail;
-
- /* PWM 1 output on EGPIO[14] */
- ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_PONG);
- } else {
- err = -ENODEV;
- }
-
- return err;
-
-fail:
- gpio_free(EP93XX_GPIO_LINE_EGPIO14);
- return err;
-}
-EXPORT_SYMBOL(ep93xx_pwm_acquire_gpio);
-
-void ep93xx_pwm_release_gpio(struct platform_device *pdev)
-{
- if (pdev->id == 1) {
- gpio_direction_input(EP93XX_GPIO_LINE_EGPIO14);
- gpio_free(EP93XX_GPIO_LINE_EGPIO14);
-
- /* EGPIO[14] used for GPIO */
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_PONG);
- }
-}
-EXPORT_SYMBOL(ep93xx_pwm_release_gpio);
-
-
-/*************************************************************************
- * EP93xx video peripheral handling
- *************************************************************************/
-static struct ep93xxfb_mach_info ep93xxfb_data;
-
-static struct resource ep93xx_fb_resource[] = {
- DEFINE_RES_MEM(EP93XX_RASTER_PHYS_BASE, 0x800),
-};
-
-static struct platform_device ep93xx_fb_device = {
- .name = "ep93xx-fb",
- .id = -1,
- .dev = {
- .platform_data = &ep93xxfb_data,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .dma_mask = &ep93xx_fb_device.dev.coherent_dma_mask,
- },
- .num_resources = ARRAY_SIZE(ep93xx_fb_resource),
- .resource = ep93xx_fb_resource,
-};
-
-/* The backlight use a single register in the framebuffer's register space */
-#define EP93XX_RASTER_REG_BRIGHTNESS 0x20
-
-static struct resource ep93xx_bl_resources[] = {
- DEFINE_RES_MEM(EP93XX_RASTER_PHYS_BASE +
- EP93XX_RASTER_REG_BRIGHTNESS, 0x04),
-};
-
-static struct platform_device ep93xx_bl_device = {
- .name = "ep93xx-bl",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_bl_resources),
- .resource = ep93xx_bl_resources,
-};
-
-/**
- * ep93xx_register_fb - Register the framebuffer platform device.
- * @data: platform specific framebuffer configuration (__initdata)
- */
-void __init ep93xx_register_fb(struct ep93xxfb_mach_info *data)
-{
- ep93xxfb_data = *data;
- platform_device_register(&ep93xx_fb_device);
- platform_device_register(&ep93xx_bl_device);
-}
-
-
-/*************************************************************************
- * EP93xx matrix keypad peripheral handling
- *************************************************************************/
-static struct ep93xx_keypad_platform_data ep93xx_keypad_data;
-
-static struct resource ep93xx_keypad_resource[] = {
- DEFINE_RES_MEM(EP93XX_KEY_MATRIX_PHYS_BASE, 0x0c),
- DEFINE_RES_IRQ(IRQ_EP93XX_KEY),
-};
-
-static struct platform_device ep93xx_keypad_device = {
- .name = "ep93xx-keypad",
- .id = -1,
- .dev = {
- .platform_data = &ep93xx_keypad_data,
- },
- .num_resources = ARRAY_SIZE(ep93xx_keypad_resource),
- .resource = ep93xx_keypad_resource,
-};
-
-/**
- * ep93xx_register_keypad - Register the keypad platform device.
- * @data: platform specific keypad configuration (__initdata)
- */
-void __init ep93xx_register_keypad(struct ep93xx_keypad_platform_data *data)
-{
- ep93xx_keypad_data = *data;
- platform_device_register(&ep93xx_keypad_device);
-}
-
-int ep93xx_keypad_acquire_gpio(struct platform_device *pdev)
-{
- int err;
- int i;
-
- for (i = 0; i < 8; i++) {
- err = gpio_request(EP93XX_GPIO_LINE_C(i), dev_name(&pdev->dev));
- if (err)
- goto fail_gpio_c;
- err = gpio_request(EP93XX_GPIO_LINE_D(i), dev_name(&pdev->dev));
- if (err)
- goto fail_gpio_d;
- }
-
- /* Enable the keypad controller; GPIO ports C and D used for keypad */
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_KEYS |
- EP93XX_SYSCON_DEVCFG_GONK);
-
- return 0;
-
-fail_gpio_d:
- gpio_free(EP93XX_GPIO_LINE_C(i));
-fail_gpio_c:
- for (--i; i >= 0; --i) {
- gpio_free(EP93XX_GPIO_LINE_C(i));
- gpio_free(EP93XX_GPIO_LINE_D(i));
- }
- return err;
-}
-EXPORT_SYMBOL(ep93xx_keypad_acquire_gpio);
-
-void ep93xx_keypad_release_gpio(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < 8; i++) {
- gpio_free(EP93XX_GPIO_LINE_C(i));
- gpio_free(EP93XX_GPIO_LINE_D(i));
- }
-
- /* Disable the keypad controller; GPIO ports C and D used for GPIO */
- ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS |
- EP93XX_SYSCON_DEVCFG_GONK);
-}
-EXPORT_SYMBOL(ep93xx_keypad_release_gpio);
-
-/*************************************************************************
- * EP93xx I2S audio peripheral handling
- *************************************************************************/
-static struct resource ep93xx_i2s_resource[] = {
- DEFINE_RES_MEM(EP93XX_I2S_PHYS_BASE, 0x100),
- DEFINE_RES_IRQ(IRQ_EP93XX_SAI),
-};
-
-static struct platform_device ep93xx_i2s_device = {
- .name = "ep93xx-i2s",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_i2s_resource),
- .resource = ep93xx_i2s_resource,
-};
-
-static struct platform_device ep93xx_pcm_device = {
- .name = "ep93xx-pcm-audio",
- .id = -1,
-};
-
-void __init ep93xx_register_i2s(void)
-{
- platform_device_register(&ep93xx_i2s_device);
- platform_device_register(&ep93xx_pcm_device);
-}
-
-#define EP93XX_SYSCON_DEVCFG_I2S_MASK (EP93XX_SYSCON_DEVCFG_I2SONSSP | \
- EP93XX_SYSCON_DEVCFG_I2SONAC97)
-
-#define EP93XX_I2SCLKDIV_MASK (EP93XX_SYSCON_I2SCLKDIV_ORIDE | \
- EP93XX_SYSCON_I2SCLKDIV_SPOL)
-
-int ep93xx_i2s_acquire(void)
-{
- unsigned val;
-
- ep93xx_devcfg_set_clear(EP93XX_SYSCON_DEVCFG_I2SONAC97,
- EP93XX_SYSCON_DEVCFG_I2S_MASK);
-
- /*
- * This is potentially racy with the clock api for i2s_mclk, sclk and
- * lrclk. Since the i2s driver is the only user of those clocks we
- * rely on it to prevent parallel use of this function and the
- * clock api for the i2s clocks.
- */
- val = __raw_readl(EP93XX_SYSCON_I2SCLKDIV);
- val &= ~EP93XX_I2SCLKDIV_MASK;
- val |= EP93XX_SYSCON_I2SCLKDIV_ORIDE | EP93XX_SYSCON_I2SCLKDIV_SPOL;
- ep93xx_syscon_swlocked_write(val, EP93XX_SYSCON_I2SCLKDIV);
-
- return 0;
-}
-EXPORT_SYMBOL(ep93xx_i2s_acquire);
-
-void ep93xx_i2s_release(void)
-{
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2S_MASK);
-}
-EXPORT_SYMBOL(ep93xx_i2s_release);
-
-/*************************************************************************
- * EP93xx AC97 audio peripheral handling
- *************************************************************************/
-static struct resource ep93xx_ac97_resources[] = {
- DEFINE_RES_MEM(EP93XX_AAC_PHYS_BASE, 0xac),
- DEFINE_RES_IRQ(IRQ_EP93XX_AACINTR),
-};
-
-static struct platform_device ep93xx_ac97_device = {
- .name = "ep93xx-ac97",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_ac97_resources),
- .resource = ep93xx_ac97_resources,
-};
-
-void __init ep93xx_register_ac97(void)
-{
- /*
- * Make sure that the AC97 pins are not used by I2S.
- */
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2SONAC97);
-
- platform_device_register(&ep93xx_ac97_device);
- platform_device_register(&ep93xx_pcm_device);
-}
-
-/*************************************************************************
- * EP93xx Watchdog
- *************************************************************************/
-static struct resource ep93xx_wdt_resources[] = {
- DEFINE_RES_MEM(EP93XX_WATCHDOG_PHYS_BASE, 0x08),
-};
-
-static struct platform_device ep93xx_wdt_device = {
- .name = "ep93xx-wdt",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_wdt_resources),
- .resource = ep93xx_wdt_resources,
-};
-
-/*************************************************************************
- * EP93xx IDE
- *************************************************************************/
-static struct resource ep93xx_ide_resources[] = {
- DEFINE_RES_MEM(EP93XX_IDE_PHYS_BASE, 0x38),
- DEFINE_RES_IRQ(IRQ_EP93XX_EXT3),
-};
-
-static struct platform_device ep93xx_ide_device = {
- .name = "ep93xx-ide",
- .id = -1,
- .dev = {
- .dma_mask = &ep93xx_ide_device.dev.coherent_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
- .num_resources = ARRAY_SIZE(ep93xx_ide_resources),
- .resource = ep93xx_ide_resources,
-};
-
-void __init ep93xx_register_ide(void)
-{
- platform_device_register(&ep93xx_ide_device);
-}
-
-int ep93xx_ide_acquire_gpio(struct platform_device *pdev)
-{
- int err;
- int i;
-
- err = gpio_request(EP93XX_GPIO_LINE_EGPIO2, dev_name(&pdev->dev));
- if (err)
- return err;
- err = gpio_request(EP93XX_GPIO_LINE_EGPIO15, dev_name(&pdev->dev));
- if (err)
- goto fail_egpio15;
- for (i = 2; i < 8; i++) {
- err = gpio_request(EP93XX_GPIO_LINE_E(i), dev_name(&pdev->dev));
- if (err)
- goto fail_gpio_e;
- }
- for (i = 4; i < 8; i++) {
- err = gpio_request(EP93XX_GPIO_LINE_G(i), dev_name(&pdev->dev));
- if (err)
- goto fail_gpio_g;
- }
- for (i = 0; i < 8; i++) {
- err = gpio_request(EP93XX_GPIO_LINE_H(i), dev_name(&pdev->dev));
- if (err)
- goto fail_gpio_h;
- }
-
- /* GPIO ports E[7:2], G[7:4] and H used by IDE */
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_EONIDE |
- EP93XX_SYSCON_DEVCFG_GONIDE |
- EP93XX_SYSCON_DEVCFG_HONIDE);
- return 0;
-
-fail_gpio_h:
- for (--i; i >= 0; --i)
- gpio_free(EP93XX_GPIO_LINE_H(i));
- i = 8;
-fail_gpio_g:
- for (--i; i >= 4; --i)
- gpio_free(EP93XX_GPIO_LINE_G(i));
- i = 8;
-fail_gpio_e:
- for (--i; i >= 2; --i)
- gpio_free(EP93XX_GPIO_LINE_E(i));
- gpio_free(EP93XX_GPIO_LINE_EGPIO15);
-fail_egpio15:
- gpio_free(EP93XX_GPIO_LINE_EGPIO2);
- return err;
-}
-EXPORT_SYMBOL(ep93xx_ide_acquire_gpio);
-
-void ep93xx_ide_release_gpio(struct platform_device *pdev)
-{
- int i;
-
- for (i = 2; i < 8; i++)
- gpio_free(EP93XX_GPIO_LINE_E(i));
- for (i = 4; i < 8; i++)
- gpio_free(EP93XX_GPIO_LINE_G(i));
- for (i = 0; i < 8; i++)
- gpio_free(EP93XX_GPIO_LINE_H(i));
- gpio_free(EP93XX_GPIO_LINE_EGPIO15);
- gpio_free(EP93XX_GPIO_LINE_EGPIO2);
-
-
- /* GPIO ports E[7:2], G[7:4] and H used by GPIO */
- ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_EONIDE |
- EP93XX_SYSCON_DEVCFG_GONIDE |
- EP93XX_SYSCON_DEVCFG_HONIDE);
-}
-EXPORT_SYMBOL(ep93xx_ide_release_gpio);
-
-/*************************************************************************
- * EP93xx ADC
- *************************************************************************/
-static struct resource ep93xx_adc_resources[] = {
- DEFINE_RES_MEM(EP93XX_ADC_PHYS_BASE, 0x28),
- DEFINE_RES_IRQ(IRQ_EP93XX_TOUCH),
-};
-
-static struct platform_device ep93xx_adc_device = {
- .name = "ep93xx-adc",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_adc_resources),
- .resource = ep93xx_adc_resources,
-};
-
-void __init ep93xx_register_adc(void)
-{
- /* Power up ADC, deactivate Touch Screen Controller */
- ep93xx_devcfg_set_clear(EP93XX_SYSCON_DEVCFG_TIN,
- EP93XX_SYSCON_DEVCFG_ADCPD);
-
- platform_device_register(&ep93xx_adc_device);
-}
-
-/*************************************************************************
- * EP93xx Security peripheral
- *************************************************************************/
-
-/*
- * The Maverick Key is 256 bits of micro fuses blown at the factory during
- * manufacturing to uniquely identify a part.
- *
- * See: http://arm.cirrus.com/forum/viewtopic.php?t=486&highlight=maverick+key
- */
-#define EP93XX_SECURITY_REG(x) (EP93XX_SECURITY_BASE + (x))
-#define EP93XX_SECURITY_SECFLG EP93XX_SECURITY_REG(0x2400)
-#define EP93XX_SECURITY_FUSEFLG EP93XX_SECURITY_REG(0x2410)
-#define EP93XX_SECURITY_UNIQID EP93XX_SECURITY_REG(0x2440)
-#define EP93XX_SECURITY_UNIQCHK EP93XX_SECURITY_REG(0x2450)
-#define EP93XX_SECURITY_UNIQVAL EP93XX_SECURITY_REG(0x2460)
-#define EP93XX_SECURITY_SECID1 EP93XX_SECURITY_REG(0x2500)
-#define EP93XX_SECURITY_SECID2 EP93XX_SECURITY_REG(0x2504)
-#define EP93XX_SECURITY_SECCHK1 EP93XX_SECURITY_REG(0x2520)
-#define EP93XX_SECURITY_SECCHK2 EP93XX_SECURITY_REG(0x2524)
-#define EP93XX_SECURITY_UNIQID2 EP93XX_SECURITY_REG(0x2700)
-#define EP93XX_SECURITY_UNIQID3 EP93XX_SECURITY_REG(0x2704)
-#define EP93XX_SECURITY_UNIQID4 EP93XX_SECURITY_REG(0x2708)
-#define EP93XX_SECURITY_UNIQID5 EP93XX_SECURITY_REG(0x270c)
-
-static char ep93xx_soc_id[33];
-
-static const char __init *ep93xx_get_soc_id(void)
-{
- unsigned int id, id2, id3, id4, id5;
-
- if (__raw_readl(EP93XX_SECURITY_UNIQVAL) != 1)
- return "bad Hamming code";
-
- id = __raw_readl(EP93XX_SECURITY_UNIQID);
- id2 = __raw_readl(EP93XX_SECURITY_UNIQID2);
- id3 = __raw_readl(EP93XX_SECURITY_UNIQID3);
- id4 = __raw_readl(EP93XX_SECURITY_UNIQID4);
- id5 = __raw_readl(EP93XX_SECURITY_UNIQID5);
-
- if (id != id2)
- return "invalid";
-
- /* Toss the unique ID into the entropy pool */
- add_device_randomness(&id2, 4);
- add_device_randomness(&id3, 4);
- add_device_randomness(&id4, 4);
- add_device_randomness(&id5, 4);
-
- snprintf(ep93xx_soc_id, sizeof(ep93xx_soc_id),
- "%08x%08x%08x%08x", id2, id3, id4, id5);
-
- return ep93xx_soc_id;
-}
-
-static const char __init *ep93xx_get_soc_rev(void)
-{
- int rev = ep93xx_chip_revision();
-
- switch (rev) {
- case EP93XX_CHIP_REV_D0:
- return "D0";
- case EP93XX_CHIP_REV_D1:
- return "D1";
- case EP93XX_CHIP_REV_E0:
- return "E0";
- case EP93XX_CHIP_REV_E1:
- return "E1";
- case EP93XX_CHIP_REV_E2:
- return "E2";
- default:
- return "unknown";
- }
-}
-
-static const char __init *ep93xx_get_machine_name(void)
-{
- return kasprintf(GFP_KERNEL,"%s", machine_desc->name);
-}
-
-static struct device __init *ep93xx_init_soc(void)
-{
- struct soc_device_attribute *soc_dev_attr;
- struct soc_device *soc_dev;
-
- soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
- if (!soc_dev_attr)
- return NULL;
-
- soc_dev_attr->machine = ep93xx_get_machine_name();
- soc_dev_attr->family = "Cirrus Logic EP93xx";
- soc_dev_attr->revision = ep93xx_get_soc_rev();
- soc_dev_attr->soc_id = ep93xx_get_soc_id();
-
- soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR(soc_dev)) {
- kfree(soc_dev_attr->machine);
- kfree(soc_dev_attr);
- return NULL;
- }
-
- return soc_device_to_device(soc_dev);
-}
-
-struct device __init *ep93xx_init_devices(void)
-{
- struct device *parent;
-
- /* Disallow access to MaverickCrunch initially */
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_CPENA);
-
- /* Default all ports to GPIO */
- ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS |
- EP93XX_SYSCON_DEVCFG_GONK |
- EP93XX_SYSCON_DEVCFG_EONIDE |
- EP93XX_SYSCON_DEVCFG_GONIDE |
- EP93XX_SYSCON_DEVCFG_HONIDE);
-
- parent = ep93xx_init_soc();
-
- /* Get the GPIO working early, other devices need it */
- platform_device_register(&ep93xx_gpio_device);
-
- amba_device_register(&uart1_device, &iomem_resource);
- amba_device_register(&uart2_device, &iomem_resource);
- amba_device_register(&uart3_device, &iomem_resource);
-
- platform_device_register(&ep93xx_rtc_device);
- platform_device_register(&ep93xx_ohci_device);
- platform_device_register(&ep93xx_wdt_device);
-
- gpiod_add_lookup_table(&ep93xx_leds_gpio_table);
- gpio_led_register_device(-1, &ep93xx_led_data);
-
- return parent;
-}
-
-void ep93xx_restart(enum reboot_mode mode, const char *cmd)
-{
- /*
- * Set then clear the SWRST bit to initiate a software reset
- */
- ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_SWRST);
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_SWRST);
-
- while (1)
- ;
-}
diff --git a/arch/arm/mach-ep93xx/dma.c b/arch/arm/mach-ep93xx/dma.c
deleted file mode 100644
index 74515acab8ef..000000000000
--- a/arch/arm/mach-ep93xx/dma.c
+++ /dev/null
@@ -1,114 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/arm/mach-ep93xx/dma.c
- *
- * Platform support code for the EP93xx dmaengine driver.
- *
- * Copyright (C) 2011 Mika Westerberg
- *
- * This work is based on the original dma-m2p implementation with
- * following copyrights:
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- * Copyright (C) 2006 Applied Data Systems
- * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
- */
-
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-
-#include <linux/platform_data/dma-ep93xx.h>
-#include "hardware.h"
-
-#include "soc.h"
-
-#define DMA_CHANNEL(_name, _base, _irq) \
- { .name = (_name), .base = (_base), .irq = (_irq) }
-
-/*
- * DMA M2P channels.
- *
- * On the EP93xx chip the following peripherals my be allocated to the 10
- * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
- *
- * I2S contains 3 Tx and 3 Rx DMA Channels
- * AAC contains 3 Tx and 3 Rx DMA Channels
- * UART1 contains 1 Tx and 1 Rx DMA Channels
- * UART2 contains 1 Tx and 1 Rx DMA Channels
- * UART3 contains 1 Tx and 1 Rx DMA Channels
- * IrDA contains 1 Tx and 1 Rx DMA Channels
- *
- * Registers are mapped statically in ep93xx_map_io().
- */
-static struct ep93xx_dma_chan_data ep93xx_dma_m2p_channels[] = {
- DMA_CHANNEL("m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0),
- DMA_CHANNEL("m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1),
- DMA_CHANNEL("m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2),
- DMA_CHANNEL("m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3),
- DMA_CHANNEL("m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4),
- DMA_CHANNEL("m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5),
- DMA_CHANNEL("m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6),
- DMA_CHANNEL("m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7),
- DMA_CHANNEL("m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8),
- DMA_CHANNEL("m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9),
-};
-
-static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = {
- .channels = ep93xx_dma_m2p_channels,
- .num_channels = ARRAY_SIZE(ep93xx_dma_m2p_channels),
-};
-
-static u64 ep93xx_dma_m2p_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ep93xx_dma_m2p_device = {
- .name = "ep93xx-dma-m2p",
- .id = -1,
- .dev = {
- .platform_data = &ep93xx_dma_m2p_data,
- .dma_mask = &ep93xx_dma_m2p_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
-/*
- * DMA M2M channels.
- *
- * There are 2 M2M channels which support memcpy/memset and in addition simple
- * hardware requests from/to SSP and IDE. We do not implement an external
- * hardware requests.
- *
- * Registers are mapped statically in ep93xx_map_io().
- */
-static struct ep93xx_dma_chan_data ep93xx_dma_m2m_channels[] = {
- DMA_CHANNEL("m2m0", EP93XX_DMA_BASE + 0x0100, IRQ_EP93XX_DMAM2M0),
- DMA_CHANNEL("m2m1", EP93XX_DMA_BASE + 0x0140, IRQ_EP93XX_DMAM2M1),
-};
-
-static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = {
- .channels = ep93xx_dma_m2m_channels,
- .num_channels = ARRAY_SIZE(ep93xx_dma_m2m_channels),
-};
-
-static u64 ep93xx_dma_m2m_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ep93xx_dma_m2m_device = {
- .name = "ep93xx-dma-m2m",
- .id = -1,
- .dev = {
- .platform_data = &ep93xx_dma_m2m_data,
- .dma_mask = &ep93xx_dma_m2m_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
-static int __init ep93xx_dma_init(void)
-{
- platform_device_register(&ep93xx_dma_m2p_device);
- platform_device_register(&ep93xx_dma_m2m_device);
- return 0;
-}
-arch_initcall(ep93xx_dma_init);
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
deleted file mode 100644
index dbdb822a0100..000000000000
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ /dev/null
@@ -1,368 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/arm/mach-ep93xx/edb93xx.c
- * Cirrus Logic EDB93xx Development Board support.
- *
- * EDB93XX, EDB9301, EDB9307A
- * Copyright (C) 2008-2009 H Hartley Sweeten <hsweeten@visionengravers.com>
- *
- * EDB9302
- * Copyright (C) 2006 George Kashperko <george@chas.com.ua>
- *
- * EDB9302A, EDB9315, EDB9315A
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- *
- * EDB9307
- * Copyright (C) 2007 Herbert Valerio Riedel <hvr@gnu.org>
- *
- * EDB9312
- * Copyright (C) 2006 Infosys Technologies Limited
- * Toufeeq Hussain <toufeeq_hussain@infosys.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <linux/spi/spi.h>
-#include <linux/gpio/machine.h>
-
-#include <sound/cs4271.h>
-
-#include "hardware.h"
-#include <linux/platform_data/video-ep93xx.h>
-#include <linux/platform_data/spi-ep93xx.h>
-#include "gpio-ep93xx.h"
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#include "soc.h"
-
-static void __init edb93xx_register_flash(void)
-{
- if (machine_is_edb9307() || machine_is_edb9312() ||
- machine_is_edb9315()) {
- ep93xx_register_flash(4, EP93XX_CS6_PHYS_BASE, SZ_32M);
- } else {
- ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_16M);
- }
-}
-
-static struct ep93xx_eth_data __initdata edb93xx_eth_data = {
- .phy_id = 1,
-};
-
-
-/*************************************************************************
- * EDB93xx i2c peripheral handling
- *************************************************************************/
-
-static struct i2c_board_info __initdata edb93xxa_i2c_board_info[] = {
- {
- I2C_BOARD_INFO("isl1208", 0x6f),
- },
-};
-
-static struct i2c_board_info __initdata edb93xx_i2c_board_info[] = {
- {
- I2C_BOARD_INFO("ds1337", 0x68),
- },
-};
-
-static void __init edb93xx_register_i2c(void)
-{
- if (machine_is_edb9302a() || machine_is_edb9307a() ||
- machine_is_edb9315a()) {
- ep93xx_register_i2c(edb93xxa_i2c_board_info,
- ARRAY_SIZE(edb93xxa_i2c_board_info));
- } else if (machine_is_edb9302() || machine_is_edb9307()
- || machine_is_edb9312() || machine_is_edb9315()) {
- ep93xx_register_i2c(edb93xx_i2c_board_info,
- ARRAY_SIZE(edb93xx_i2c_board_info));
- }
-}
-
-
-/*************************************************************************
- * EDB93xx SPI peripheral handling
- *************************************************************************/
-static struct cs4271_platform_data edb93xx_cs4271_data = {
- /* Intentionally left blank */
-};
-
-static struct spi_board_info edb93xx_spi_board_info[] __initdata = {
- {
- .modalias = "cs4271",
- .platform_data = &edb93xx_cs4271_data,
- .max_speed_hz = 6000000,
- .bus_num = 0,
- .chip_select = 0,
- .mode = SPI_MODE_3,
- },
-};
-
-static struct gpiod_lookup_table edb93xx_spi_cs_gpio_table = {
- .dev_id = "spi0",
- .table = {
- GPIO_LOOKUP("A", 6, "cs", GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static struct ep93xx_spi_info edb93xx_spi_info __initdata = {
- /* Intentionally left blank */
-};
-
-static struct gpiod_lookup_table edb93xx_cs4272_edb9301_gpio_table = {
- .dev_id = "spi0.0", /* CS0 on SPI0 */
- .table = {
- GPIO_LOOKUP("A", 1, "reset", GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static struct gpiod_lookup_table edb93xx_cs4272_edb9302_gpio_table = {
- .dev_id = "spi0.0", /* CS0 on SPI0 */
- .table = {
- GPIO_LOOKUP("H", 2, "reset", GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static struct gpiod_lookup_table edb93xx_cs4272_edb9315_gpio_table = {
- .dev_id = "spi0.0", /* CS0 on SPI0 */
- .table = {
- GPIO_LOOKUP("B", 6, "reset", GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static void __init edb93xx_register_spi(void)
-{
- if (machine_is_edb9301() || machine_is_edb9302())
- gpiod_add_lookup_table(&edb93xx_cs4272_edb9301_gpio_table);
- else if (machine_is_edb9302a() || machine_is_edb9307a())
- gpiod_add_lookup_table(&edb93xx_cs4272_edb9302_gpio_table);
- else if (machine_is_edb9315a())
- gpiod_add_lookup_table(&edb93xx_cs4272_edb9315_gpio_table);
-
- gpiod_add_lookup_table(&edb93xx_spi_cs_gpio_table);
- ep93xx_register_spi(&edb93xx_spi_info, edb93xx_spi_board_info,
- ARRAY_SIZE(edb93xx_spi_board_info));
-}
-
-
-/*************************************************************************
- * EDB93xx I2S
- *************************************************************************/
-static struct platform_device edb93xx_audio_device = {
- .name = "edb93xx-audio",
- .id = -1,
-};
-
-static int __init edb93xx_has_audio(void)
-{
- return (machine_is_edb9301() || machine_is_edb9302() ||
- machine_is_edb9302a() || machine_is_edb9307a() ||
- machine_is_edb9315a());
-}
-
-static void __init edb93xx_register_i2s(void)
-{
- if (edb93xx_has_audio()) {
- ep93xx_register_i2s();
- platform_device_register(&edb93xx_audio_device);
- }
-}
-
-
-/*************************************************************************
- * EDB93xx pwm
- *************************************************************************/
-static void __init edb93xx_register_pwm(void)
-{
- if (machine_is_edb9301() ||
- machine_is_edb9302() || machine_is_edb9302a()) {
- /* EP9301 and EP9302 only have pwm.1 (EGPIO14) */
- ep93xx_register_pwm(0, 1);
- } else if (machine_is_edb9307() || machine_is_edb9307a()) {
- /* EP9307 only has pwm.0 (PWMOUT) */
- ep93xx_register_pwm(1, 0);
- } else {
- /* EP9312 and EP9315 have both */
- ep93xx_register_pwm(1, 1);
- }
-}
-
-
-/*************************************************************************
- * EDB93xx framebuffer
- *************************************************************************/
-static struct ep93xxfb_mach_info __initdata edb93xxfb_info = {
- .flags = 0,
-};
-
-static int __init edb93xx_has_fb(void)
-{
- /* These platforms have an ep93xx with video capability */
- return machine_is_edb9307() || machine_is_edb9307a() ||
- machine_is_edb9312() || machine_is_edb9315() ||
- machine_is_edb9315a();
-}
-
-static void __init edb93xx_register_fb(void)
-{
- if (!edb93xx_has_fb())
- return;
-
- if (machine_is_edb9307a() || machine_is_edb9315a())
- edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN0;
- else
- edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN3;
-
- ep93xx_register_fb(&edb93xxfb_info);
-}
-
-
-/*************************************************************************
- * EDB93xx IDE
- *************************************************************************/
-static int __init edb93xx_has_ide(void)
-{
- /*
- * Although EDB9312 and EDB9315 do have IDE capability, they have
- * INTRQ line wired as pull-up, which makes using IDE interface
- * problematic.
- */
- return machine_is_edb9312() || machine_is_edb9315() ||
- machine_is_edb9315a();
-}
-
-static void __init edb93xx_register_ide(void)
-{
- if (!edb93xx_has_ide())
- return;
-
- ep93xx_register_ide();
-}
-
-
-static void __init edb93xx_init_machine(void)
-{
- ep93xx_init_devices();
- edb93xx_register_flash();
- ep93xx_register_eth(&edb93xx_eth_data, 1);
- edb93xx_register_i2c();
- edb93xx_register_spi();
- edb93xx_register_i2s();
- edb93xx_register_pwm();
- edb93xx_register_fb();
- edb93xx_register_ide();
- ep93xx_register_adc();
-}
-
-
-#ifdef CONFIG_MACH_EDB9301
-MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board")
- /* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ep93xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = edb93xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_MACH_EDB9302
-MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board")
- /* Maintainer: George Kashperko <george@chas.com.ua> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ep93xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = edb93xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_MACH_EDB9302A
-MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board")
- /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ep93xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = edb93xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_MACH_EDB9307
-MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board")
- /* Maintainer: Herbert Valerio Riedel <hvr@gnu.org> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ep93xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = edb93xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_MACH_EDB9307A
-MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board")
- /* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ep93xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = edb93xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_MACH_EDB9312
-MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board")
- /* Maintainer: Toufeeq Hussain <toufeeq_hussain@infosys.com> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ep93xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = edb93xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_MACH_EDB9315
-MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board")
- /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ep93xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = edb93xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_MACH_EDB9315A
-MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board")
- /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ep93xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = edb93xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-#endif
diff --git a/arch/arm/mach-ep93xx/ep93xx-regs.h b/arch/arm/mach-ep93xx/ep93xx-regs.h
deleted file mode 100644
index 8fa3646de0a4..000000000000
--- a/arch/arm/mach-ep93xx/ep93xx-regs.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_EP93XX_REGS_H
-#define __ASM_ARCH_EP93XX_REGS_H
-
-/*
- * EP93xx linux memory map:
- *
- * virt phys size
- * fe800000 5M per-platform mappings
- * fed00000 80800000 2M APB
- * fef00000 80000000 1M AHB
- */
-
-#define EP93XX_AHB_PHYS_BASE 0x80000000
-#define EP93XX_AHB_VIRT_BASE 0xfef00000
-#define EP93XX_AHB_SIZE 0x00100000
-
-#define EP93XX_AHB_PHYS(x) (EP93XX_AHB_PHYS_BASE + (x))
-#define EP93XX_AHB_IOMEM(x) IOMEM(EP93XX_AHB_VIRT_BASE + (x))
-
-#define EP93XX_APB_PHYS_BASE 0x80800000
-#define EP93XX_APB_VIRT_BASE 0xfed00000
-#define EP93XX_APB_SIZE 0x00200000
-
-#define EP93XX_APB_PHYS(x) (EP93XX_APB_PHYS_BASE + (x))
-#define EP93XX_APB_IOMEM(x) IOMEM(EP93XX_APB_VIRT_BASE + (x))
-
-/* APB UARTs */
-#define EP93XX_UART1_PHYS_BASE EP93XX_APB_PHYS(0x000c0000)
-#define EP93XX_UART1_BASE EP93XX_APB_IOMEM(0x000c0000)
-
-#define EP93XX_UART2_PHYS_BASE EP93XX_APB_PHYS(0x000d0000)
-#define EP93XX_UART2_BASE EP93XX_APB_IOMEM(0x000d0000)
-
-#define EP93XX_UART3_PHYS_BASE EP93XX_APB_PHYS(0x000e0000)
-#define EP93XX_UART3_BASE EP93XX_APB_IOMEM(0x000e0000)
-
-#endif
diff --git a/arch/arm/mach-ep93xx/gpio-ep93xx.h b/arch/arm/mach-ep93xx/gpio-ep93xx.h
deleted file mode 100644
index 7b46eb7e5507..000000000000
--- a/arch/arm/mach-ep93xx/gpio-ep93xx.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Include file for the EP93XX GPIO controller machine specifics */
-
-#ifndef __GPIO_EP93XX_H
-#define __GPIO_EP93XX_H
-
-#include "ep93xx-regs.h"
-
-#define EP93XX_GPIO_PHYS_BASE EP93XX_APB_PHYS(0x00040000)
-#define EP93XX_GPIO_BASE EP93XX_APB_IOMEM(0x00040000)
-#define EP93XX_GPIO_REG(x) (EP93XX_GPIO_BASE + (x))
-#define EP93XX_GPIO_F_INT_STATUS EP93XX_GPIO_REG(0x5c)
-#define EP93XX_GPIO_A_INT_STATUS EP93XX_GPIO_REG(0xa0)
-#define EP93XX_GPIO_B_INT_STATUS EP93XX_GPIO_REG(0xbc)
-#define EP93XX_GPIO_EEDRIVE EP93XX_GPIO_REG(0xc8)
-
-/* GPIO port A. */
-#define EP93XX_GPIO_LINE_A(x) ((x) + 0)
-#define EP93XX_GPIO_LINE_EGPIO0 EP93XX_GPIO_LINE_A(0)
-#define EP93XX_GPIO_LINE_EGPIO1 EP93XX_GPIO_LINE_A(1)
-#define EP93XX_GPIO_LINE_EGPIO2 EP93XX_GPIO_LINE_A(2)
-#define EP93XX_GPIO_LINE_EGPIO3 EP93XX_GPIO_LINE_A(3)
-#define EP93XX_GPIO_LINE_EGPIO4 EP93XX_GPIO_LINE_A(4)
-#define EP93XX_GPIO_LINE_EGPIO5 EP93XX_GPIO_LINE_A(5)
-#define EP93XX_GPIO_LINE_EGPIO6 EP93XX_GPIO_LINE_A(6)
-#define EP93XX_GPIO_LINE_EGPIO7 EP93XX_GPIO_LINE_A(7)
-
-/* GPIO port B. */
-#define EP93XX_GPIO_LINE_B(x) ((x) + 8)
-#define EP93XX_GPIO_LINE_EGPIO8 EP93XX_GPIO_LINE_B(0)
-#define EP93XX_GPIO_LINE_EGPIO9 EP93XX_GPIO_LINE_B(1)
-#define EP93XX_GPIO_LINE_EGPIO10 EP93XX_GPIO_LINE_B(2)
-#define EP93XX_GPIO_LINE_EGPIO11 EP93XX_GPIO_LINE_B(3)
-#define EP93XX_GPIO_LINE_EGPIO12 EP93XX_GPIO_LINE_B(4)
-#define EP93XX_GPIO_LINE_EGPIO13 EP93XX_GPIO_LINE_B(5)
-#define EP93XX_GPIO_LINE_EGPIO14 EP93XX_GPIO_LINE_B(6)
-#define EP93XX_GPIO_LINE_EGPIO15 EP93XX_GPIO_LINE_B(7)
-
-/* GPIO port C. */
-#define EP93XX_GPIO_LINE_C(x) ((x) + 40)
-#define EP93XX_GPIO_LINE_ROW0 EP93XX_GPIO_LINE_C(0)
-#define EP93XX_GPIO_LINE_ROW1 EP93XX_GPIO_LINE_C(1)
-#define EP93XX_GPIO_LINE_ROW2 EP93XX_GPIO_LINE_C(2)
-#define EP93XX_GPIO_LINE_ROW3 EP93XX_GPIO_LINE_C(3)
-#define EP93XX_GPIO_LINE_ROW4 EP93XX_GPIO_LINE_C(4)
-#define EP93XX_GPIO_LINE_ROW5 EP93XX_GPIO_LINE_C(5)
-#define EP93XX_GPIO_LINE_ROW6 EP93XX_GPIO_LINE_C(6)
-#define EP93XX_GPIO_LINE_ROW7 EP93XX_GPIO_LINE_C(7)
-
-/* GPIO port D. */
-#define EP93XX_GPIO_LINE_D(x) ((x) + 24)
-#define EP93XX_GPIO_LINE_COL0 EP93XX_GPIO_LINE_D(0)
-#define EP93XX_GPIO_LINE_COL1 EP93XX_GPIO_LINE_D(1)
-#define EP93XX_GPIO_LINE_COL2 EP93XX_GPIO_LINE_D(2)
-#define EP93XX_GPIO_LINE_COL3 EP93XX_GPIO_LINE_D(3)
-#define EP93XX_GPIO_LINE_COL4 EP93XX_GPIO_LINE_D(4)
-#define EP93XX_GPIO_LINE_COL5 EP93XX_GPIO_LINE_D(5)
-#define EP93XX_GPIO_LINE_COL6 EP93XX_GPIO_LINE_D(6)
-#define EP93XX_GPIO_LINE_COL7 EP93XX_GPIO_LINE_D(7)
-
-/* GPIO port E. */
-#define EP93XX_GPIO_LINE_E(x) ((x) + 32)
-#define EP93XX_GPIO_LINE_GRLED EP93XX_GPIO_LINE_E(0)
-#define EP93XX_GPIO_LINE_RDLED EP93XX_GPIO_LINE_E(1)
-#define EP93XX_GPIO_LINE_DIORn EP93XX_GPIO_LINE_E(2)
-#define EP93XX_GPIO_LINE_IDECS1n EP93XX_GPIO_LINE_E(3)
-#define EP93XX_GPIO_LINE_IDECS2n EP93XX_GPIO_LINE_E(4)
-#define EP93XX_GPIO_LINE_IDEDA0 EP93XX_GPIO_LINE_E(5)
-#define EP93XX_GPIO_LINE_IDEDA1 EP93XX_GPIO_LINE_E(6)
-#define EP93XX_GPIO_LINE_IDEDA2 EP93XX_GPIO_LINE_E(7)
-
-/* GPIO port F. */
-#define EP93XX_GPIO_LINE_F(x) ((x) + 16)
-#define EP93XX_GPIO_LINE_WP EP93XX_GPIO_LINE_F(0)
-#define EP93XX_GPIO_LINE_MCCD1 EP93XX_GPIO_LINE_F(1)
-#define EP93XX_GPIO_LINE_MCCD2 EP93XX_GPIO_LINE_F(2)
-#define EP93XX_GPIO_LINE_MCBVD1 EP93XX_GPIO_LINE_F(3)
-#define EP93XX_GPIO_LINE_MCBVD2 EP93XX_GPIO_LINE_F(4)
-#define EP93XX_GPIO_LINE_VS1 EP93XX_GPIO_LINE_F(5)
-#define EP93XX_GPIO_LINE_READY EP93XX_GPIO_LINE_F(6)
-#define EP93XX_GPIO_LINE_VS2 EP93XX_GPIO_LINE_F(7)
-
-/* GPIO port G. */
-#define EP93XX_GPIO_LINE_G(x) ((x) + 48)
-#define EP93XX_GPIO_LINE_EECLK EP93XX_GPIO_LINE_G(0)
-#define EP93XX_GPIO_LINE_EEDAT EP93XX_GPIO_LINE_G(1)
-#define EP93XX_GPIO_LINE_SLA0 EP93XX_GPIO_LINE_G(2)
-#define EP93XX_GPIO_LINE_SLA1 EP93XX_GPIO_LINE_G(3)
-#define EP93XX_GPIO_LINE_DD12 EP93XX_GPIO_LINE_G(4)
-#define EP93XX_GPIO_LINE_DD13 EP93XX_GPIO_LINE_G(5)
-#define EP93XX_GPIO_LINE_DD14 EP93XX_GPIO_LINE_G(6)
-#define EP93XX_GPIO_LINE_DD15 EP93XX_GPIO_LINE_G(7)
-
-/* GPIO port H. */
-#define EP93XX_GPIO_LINE_H(x) ((x) + 56)
-#define EP93XX_GPIO_LINE_DD0 EP93XX_GPIO_LINE_H(0)
-#define EP93XX_GPIO_LINE_DD1 EP93XX_GPIO_LINE_H(1)
-#define EP93XX_GPIO_LINE_DD2 EP93XX_GPIO_LINE_H(2)
-#define EP93XX_GPIO_LINE_DD3 EP93XX_GPIO_LINE_H(3)
-#define EP93XX_GPIO_LINE_DD4 EP93XX_GPIO_LINE_H(4)
-#define EP93XX_GPIO_LINE_DD5 EP93XX_GPIO_LINE_H(5)
-#define EP93XX_GPIO_LINE_DD6 EP93XX_GPIO_LINE_H(6)
-#define EP93XX_GPIO_LINE_DD7 EP93XX_GPIO_LINE_H(7)
-
-/* maximum value for gpio line identifiers */
-#define EP93XX_GPIO_LINE_MAX EP93XX_GPIO_LINE_H(7)
-
-/* maximum value for irq capable line identifiers */
-#define EP93XX_GPIO_LINE_MAX_IRQ EP93XX_GPIO_LINE_F(7)
-
-#endif /* __GPIO_EP93XX_H */
diff --git a/arch/arm/mach-ep93xx/hardware.h b/arch/arm/mach-ep93xx/hardware.h
deleted file mode 100644
index e7d850e04782..000000000000
--- a/arch/arm/mach-ep93xx/hardware.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * arch/arm/mach-ep93xx/include/mach/hardware.h
- */
-
-#ifndef __ASM_ARCH_HARDWARE_H
-#define __ASM_ARCH_HARDWARE_H
-
-#include "platform.h"
-
-/*
- * The EP93xx has two external crystal oscillators. To generate the
- * required high-frequency clocks, the processor uses two phase-locked-
- * loops (PLLs) to multiply the incoming external clock signal to much
- * higher frequencies that are then divided down by programmable dividers
- * to produce the needed clocks. The PLLs operate independently of one
- * another.
- */
-#define EP93XX_EXT_CLK_RATE 14745600
-#define EP93XX_EXT_RTC_RATE 32768
-
-#define EP93XX_KEYTCHCLK_DIV4 (EP93XX_EXT_CLK_RATE / 4)
-#define EP93XX_KEYTCHCLK_DIV16 (EP93XX_EXT_CLK_RATE / 16)
-
-#endif
diff --git a/arch/arm/mach-ep93xx/irqs.h b/arch/arm/mach-ep93xx/irqs.h
deleted file mode 100644
index 353201b90c66..000000000000
--- a/arch/arm/mach-ep93xx/irqs.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_IRQS_H
-#define __ASM_ARCH_IRQS_H
-
-#define IRQ_EP93XX_VIC0 1
-
-#define IRQ_EP93XX_COMMRX (IRQ_EP93XX_VIC0 + 2)
-#define IRQ_EP93XX_COMMTX (IRQ_EP93XX_VIC0 + 3)
-#define IRQ_EP93XX_TIMER1 (IRQ_EP93XX_VIC0 + 4)
-#define IRQ_EP93XX_TIMER2 (IRQ_EP93XX_VIC0 + 5)
-#define IRQ_EP93XX_AACINTR (IRQ_EP93XX_VIC0 + 6)
-#define IRQ_EP93XX_DMAM2P0 (IRQ_EP93XX_VIC0 + 7)
-#define IRQ_EP93XX_DMAM2P1 (IRQ_EP93XX_VIC0 + 8)
-#define IRQ_EP93XX_DMAM2P2 (IRQ_EP93XX_VIC0 + 9)
-#define IRQ_EP93XX_DMAM2P3 (IRQ_EP93XX_VIC0 + 10)
-#define IRQ_EP93XX_DMAM2P4 (IRQ_EP93XX_VIC0 + 11)
-#define IRQ_EP93XX_DMAM2P5 (IRQ_EP93XX_VIC0 + 12)
-#define IRQ_EP93XX_DMAM2P6 (IRQ_EP93XX_VIC0 + 13)
-#define IRQ_EP93XX_DMAM2P7 (IRQ_EP93XX_VIC0 + 14)
-#define IRQ_EP93XX_DMAM2P8 (IRQ_EP93XX_VIC0 + 15)
-#define IRQ_EP93XX_DMAM2P9 (IRQ_EP93XX_VIC0 + 16)
-#define IRQ_EP93XX_DMAM2M0 (IRQ_EP93XX_VIC0 + 17)
-#define IRQ_EP93XX_DMAM2M1 (IRQ_EP93XX_VIC0 + 18)
-#define IRQ_EP93XX_GPIO0MUX (IRQ_EP93XX_VIC0 + 19)
-#define IRQ_EP93XX_GPIO1MUX (IRQ_EP93XX_VIC0 + 20)
-#define IRQ_EP93XX_GPIO2MUX (IRQ_EP93XX_VIC0 + 21)
-#define IRQ_EP93XX_GPIO3MUX (IRQ_EP93XX_VIC0 + 22)
-#define IRQ_EP93XX_UART1RX (IRQ_EP93XX_VIC0 + 23)
-#define IRQ_EP93XX_UART1TX (IRQ_EP93XX_VIC0 + 24)
-#define IRQ_EP93XX_UART2RX (IRQ_EP93XX_VIC0 + 25)
-#define IRQ_EP93XX_UART2TX (IRQ_EP93XX_VIC0 + 26)
-#define IRQ_EP93XX_UART3RX (IRQ_EP93XX_VIC0 + 27)
-#define IRQ_EP93XX_UART3TX (IRQ_EP93XX_VIC0 + 28)
-#define IRQ_EP93XX_KEY (IRQ_EP93XX_VIC0 + 29)
-#define IRQ_EP93XX_TOUCH (IRQ_EP93XX_VIC0 + 30)
-#define EP93XX_VIC1_VALID_IRQ_MASK 0x7ffffffc
-
-#define IRQ_EP93XX_VIC1 (IRQ_EP93XX_VIC0 + 32)
-
-#define IRQ_EP93XX_EXT0 (IRQ_EP93XX_VIC1 + 0)
-#define IRQ_EP93XX_EXT1 (IRQ_EP93XX_VIC1 + 1)
-#define IRQ_EP93XX_EXT2 (IRQ_EP93XX_VIC1 + 2)
-#define IRQ_EP93XX_64HZ (IRQ_EP93XX_VIC1 + 3)
-#define IRQ_EP93XX_WATCHDOG (IRQ_EP93XX_VIC1 + 4)
-#define IRQ_EP93XX_RTC (IRQ_EP93XX_VIC1 + 5)
-#define IRQ_EP93XX_IRDA (IRQ_EP93XX_VIC1 + 6)
-#define IRQ_EP93XX_ETHERNET (IRQ_EP93XX_VIC1 + 7)
-#define IRQ_EP93XX_EXT3 (IRQ_EP93XX_VIC1 + 8)
-#define IRQ_EP93XX_PROG (IRQ_EP93XX_VIC1 + 9)
-#define IRQ_EP93XX_1HZ (IRQ_EP93XX_VIC1 + 10)
-#define IRQ_EP93XX_VSYNC (IRQ_EP93XX_VIC1 + 11)
-#define IRQ_EP93XX_VIDEO_FIFO (IRQ_EP93XX_VIC1 + 12)
-#define IRQ_EP93XX_SSP1RX (IRQ_EP93XX_VIC1 + 13)
-#define IRQ_EP93XX_SSP1TX (IRQ_EP93XX_VIC1 + 14)
-#define IRQ_EP93XX_GPIO4MUX (IRQ_EP93XX_VIC1 + 15)
-#define IRQ_EP93XX_GPIO5MUX (IRQ_EP93XX_VIC1 + 16)
-#define IRQ_EP93XX_GPIO6MUX (IRQ_EP93XX_VIC1 + 17)
-#define IRQ_EP93XX_GPIO7MUX (IRQ_EP93XX_VIC1 + 18)
-#define IRQ_EP93XX_TIMER3 (IRQ_EP93XX_VIC1 + 19)
-#define IRQ_EP93XX_UART1 (IRQ_EP93XX_VIC1 + 20)
-#define IRQ_EP93XX_SSP (IRQ_EP93XX_VIC1 + 21)
-#define IRQ_EP93XX_UART2 (IRQ_EP93XX_VIC1 + 22)
-#define IRQ_EP93XX_UART3 (IRQ_EP93XX_VIC1 + 23)
-#define IRQ_EP93XX_USB (IRQ_EP93XX_VIC1 + 24)
-#define IRQ_EP93XX_ETHERNET_PME (IRQ_EP93XX_VIC1 + 25)
-#define IRQ_EP93XX_DSP (IRQ_EP93XX_VIC1 + 26)
-#define IRQ_EP93XX_GPIO_AB (IRQ_EP93XX_VIC1 + 27)
-#define IRQ_EP93XX_SAI (IRQ_EP93XX_VIC1 + 28)
-#define EP93XX_VIC2_VALID_IRQ_MASK 0x1fffffff
-
-#define NR_EP93XX_IRQS (IRQ_EP93XX_VIC1 + 32 + 24)
-
-#define EP93XX_BOARD_IRQ(x) (NR_EP93XX_IRQS + (x))
-#define EP93XX_BOARD_IRQS 32
-
-#endif
diff --git a/arch/arm/mach-ep93xx/platform.h b/arch/arm/mach-ep93xx/platform.h
deleted file mode 100644
index 5fb1b919133f..000000000000
--- a/arch/arm/mach-ep93xx/platform.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * arch/arm/mach-ep93xx/include/mach/platform.h
- */
-
-#ifndef __ASSEMBLY__
-
-#include <linux/platform_data/eth-ep93xx.h>
-#include <linux/reboot.h>
-
-struct device;
-struct i2c_board_info;
-struct spi_board_info;
-struct platform_device;
-struct ep93xxfb_mach_info;
-struct ep93xx_keypad_platform_data;
-struct ep93xx_spi_info;
-
-void ep93xx_map_io(void);
-void ep93xx_init_irq(void);
-
-void ep93xx_register_flash(unsigned int width,
- resource_size_t start, resource_size_t size);
-
-void ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr);
-void ep93xx_register_i2c(struct i2c_board_info *devices, int num);
-void ep93xx_register_spi(struct ep93xx_spi_info *info,
- struct spi_board_info *devices, int num);
-void ep93xx_register_fb(struct ep93xxfb_mach_info *data);
-void ep93xx_register_pwm(int pwm0, int pwm1);
-void ep93xx_register_keypad(struct ep93xx_keypad_platform_data *data);
-void ep93xx_register_i2s(void);
-void ep93xx_register_ac97(void);
-void ep93xx_register_ide(void);
-void ep93xx_register_adc(void);
-
-struct device *ep93xx_init_devices(void);
-extern void ep93xx_timer_init(void);
-
-void ep93xx_restart(enum reboot_mode, const char *);
-
-#endif
diff --git a/arch/arm/mach-ep93xx/soc.h b/arch/arm/mach-ep93xx/soc.h
deleted file mode 100644
index 3245ebbd5069..000000000000
--- a/arch/arm/mach-ep93xx/soc.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * arch/arm/mach-ep93xx/soc.h
- *
- * Copyright (C) 2012 Open Kernel Labs <www.ok-labs.com>
- * Copyright (C) 2012 Ryan Mallon <rmallon@gmail.com>
- */
-
-#ifndef _EP93XX_SOC_H
-#define _EP93XX_SOC_H
-
-#include "ep93xx-regs.h"
-#include "irqs.h"
-
-/*
- * EP93xx Physical Memory Map:
- *
- * The ASDO pin is sampled at system reset to select a synchronous or
- * asynchronous boot configuration. When ASDO is "1" (i.e. pulled-up)
- * the synchronous boot mode is selected. When ASDO is "0" (i.e
- * pulled-down) the asynchronous boot mode is selected.
- *
- * In synchronous boot mode nSDCE3 is decoded starting at physical address
- * 0x00000000 and nCS0 is decoded starting at 0xf0000000. For asynchronous
- * boot mode they are swapped with nCS0 decoded at 0x00000000 ann nSDCE3
- * decoded at 0xf0000000.
- *
- * There is known errata for the EP93xx dealing with External Memory
- * Configurations. Please refer to "AN273: EP93xx Silicon Rev E Design
- * Guidelines" for more information. This document can be found at:
- *
- * http://www.cirrus.com/en/pubs/appNote/AN273REV4.pdf
- */
-
-#define EP93XX_CS0_PHYS_BASE_ASYNC 0x00000000 /* ASDO Pin = 0 */
-#define EP93XX_SDCE3_PHYS_BASE_SYNC 0x00000000 /* ASDO Pin = 1 */
-#define EP93XX_CS1_PHYS_BASE 0x10000000
-#define EP93XX_CS2_PHYS_BASE 0x20000000
-#define EP93XX_CS3_PHYS_BASE 0x30000000
-#define EP93XX_PCMCIA_PHYS_BASE 0x40000000
-#define EP93XX_CS6_PHYS_BASE 0x60000000
-#define EP93XX_CS7_PHYS_BASE 0x70000000
-#define EP93XX_SDCE0_PHYS_BASE 0xc0000000
-#define EP93XX_SDCE1_PHYS_BASE 0xd0000000
-#define EP93XX_SDCE2_PHYS_BASE 0xe0000000
-#define EP93XX_SDCE3_PHYS_BASE_ASYNC 0xf0000000 /* ASDO Pin = 0 */
-#define EP93XX_CS0_PHYS_BASE_SYNC 0xf0000000 /* ASDO Pin = 1 */
-
-/* AHB peripherals */
-#define EP93XX_DMA_BASE EP93XX_AHB_IOMEM(0x00000000)
-
-#define EP93XX_ETHERNET_PHYS_BASE EP93XX_AHB_PHYS(0x00010000)
-#define EP93XX_ETHERNET_BASE EP93XX_AHB_IOMEM(0x00010000)
-
-#define EP93XX_USB_PHYS_BASE EP93XX_AHB_PHYS(0x00020000)
-#define EP93XX_USB_BASE EP93XX_AHB_IOMEM(0x00020000)
-
-#define EP93XX_RASTER_PHYS_BASE EP93XX_AHB_PHYS(0x00030000)
-#define EP93XX_RASTER_BASE EP93XX_AHB_IOMEM(0x00030000)
-
-#define EP93XX_GRAPHICS_ACCEL_BASE EP93XX_AHB_IOMEM(0x00040000)
-
-#define EP93XX_SDRAM_CONTROLLER_BASE EP93XX_AHB_IOMEM(0x00060000)
-
-#define EP93XX_PCMCIA_CONTROLLER_BASE EP93XX_AHB_IOMEM(0x00080000)
-
-#define EP93XX_BOOT_ROM_BASE EP93XX_AHB_IOMEM(0x00090000)
-
-#define EP93XX_IDE_PHYS_BASE EP93XX_AHB_PHYS(0x000a0000)
-#define EP93XX_IDE_BASE EP93XX_AHB_IOMEM(0x000a0000)
-
-#define EP93XX_VIC1_BASE EP93XX_AHB_IOMEM(0x000b0000)
-
-#define EP93XX_VIC2_BASE EP93XX_AHB_IOMEM(0x000c0000)
-
-/* APB peripherals */
-#define EP93XX_TIMER_BASE EP93XX_APB_IOMEM(0x00010000)
-
-#define EP93XX_I2S_PHYS_BASE EP93XX_APB_PHYS(0x00020000)
-#define EP93XX_I2S_BASE EP93XX_APB_IOMEM(0x00020000)
-
-#define EP93XX_SECURITY_BASE EP93XX_APB_IOMEM(0x00030000)
-
-#define EP93XX_AAC_PHYS_BASE EP93XX_APB_PHYS(0x00080000)
-#define EP93XX_AAC_BASE EP93XX_APB_IOMEM(0x00080000)
-
-#define EP93XX_SPI_PHYS_BASE EP93XX_APB_PHYS(0x000a0000)
-#define EP93XX_SPI_BASE EP93XX_APB_IOMEM(0x000a0000)
-
-#define EP93XX_IRDA_BASE EP93XX_APB_IOMEM(0x000b0000)
-
-#define EP93XX_KEY_MATRIX_PHYS_BASE EP93XX_APB_PHYS(0x000f0000)
-#define EP93XX_KEY_MATRIX_BASE EP93XX_APB_IOMEM(0x000f0000)
-
-#define EP93XX_ADC_PHYS_BASE EP93XX_APB_PHYS(0x00100000)
-#define EP93XX_ADC_BASE EP93XX_APB_IOMEM(0x00100000)
-#define EP93XX_TOUCHSCREEN_BASE EP93XX_APB_IOMEM(0x00100000)
-
-#define EP93XX_PWM_PHYS_BASE EP93XX_APB_PHYS(0x00110000)
-#define EP93XX_PWM_BASE EP93XX_APB_IOMEM(0x00110000)
-
-#define EP93XX_RTC_PHYS_BASE EP93XX_APB_PHYS(0x00120000)
-#define EP93XX_RTC_BASE EP93XX_APB_IOMEM(0x00120000)
-
-#define EP93XX_WATCHDOG_PHYS_BASE EP93XX_APB_PHYS(0x00140000)
-#define EP93XX_WATCHDOG_BASE EP93XX_APB_IOMEM(0x00140000)
-
-/* System controller */
-#define EP93XX_SYSCON_BASE EP93XX_APB_IOMEM(0x00130000)
-#define EP93XX_SYSCON_REG(x) (EP93XX_SYSCON_BASE + (x))
-#define EP93XX_SYSCON_POWER_STATE EP93XX_SYSCON_REG(0x00)
-#define EP93XX_SYSCON_PWRCNT EP93XX_SYSCON_REG(0x04)
-#define EP93XX_SYSCON_PWRCNT_FIR_EN (1<<31)
-#define EP93XX_SYSCON_PWRCNT_UARTBAUD (1<<29)
-#define EP93XX_SYSCON_PWRCNT_USH_EN 28
-#define EP93XX_SYSCON_PWRCNT_DMA_M2M1 27
-#define EP93XX_SYSCON_PWRCNT_DMA_M2M0 26
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P8 25
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P9 24
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P6 23
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P7 22
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P4 21
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P5 20
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P2 19
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P3 18
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P0 17
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P1 16
-#define EP93XX_SYSCON_HALT EP93XX_SYSCON_REG(0x08)
-#define EP93XX_SYSCON_STANDBY EP93XX_SYSCON_REG(0x0c)
-#define EP93XX_SYSCON_CLKSET1 EP93XX_SYSCON_REG(0x20)
-#define EP93XX_SYSCON_CLKSET1_NBYP1 (1<<23)
-#define EP93XX_SYSCON_CLKSET2 EP93XX_SYSCON_REG(0x24)
-#define EP93XX_SYSCON_CLKSET2_NBYP2 (1<<19)
-#define EP93XX_SYSCON_CLKSET2_PLL2_EN (1<<18)
-#define EP93XX_SYSCON_DEVCFG EP93XX_SYSCON_REG(0x80)
-#define EP93XX_SYSCON_DEVCFG_SWRST (1<<31)
-#define EP93XX_SYSCON_DEVCFG_D1ONG (1<<30)
-#define EP93XX_SYSCON_DEVCFG_D0ONG (1<<29)
-#define EP93XX_SYSCON_DEVCFG_IONU2 (1<<28)
-#define EP93XX_SYSCON_DEVCFG_GONK (1<<27)
-#define EP93XX_SYSCON_DEVCFG_TONG (1<<26)
-#define EP93XX_SYSCON_DEVCFG_MONG (1<<25)
-#define EP93XX_SYSCON_DEVCFG_U3EN 24
-#define EP93XX_SYSCON_DEVCFG_CPENA (1<<23)
-#define EP93XX_SYSCON_DEVCFG_A2ONG (1<<22)
-#define EP93XX_SYSCON_DEVCFG_A1ONG (1<<21)
-#define EP93XX_SYSCON_DEVCFG_U2EN 20
-#define EP93XX_SYSCON_DEVCFG_EXVC (1<<19)
-#define EP93XX_SYSCON_DEVCFG_U1EN 18
-#define EP93XX_SYSCON_DEVCFG_TIN (1<<17)
-#define EP93XX_SYSCON_DEVCFG_HC3IN (1<<15)
-#define EP93XX_SYSCON_DEVCFG_HC3EN (1<<14)
-#define EP93XX_SYSCON_DEVCFG_HC1IN (1<<13)
-#define EP93XX_SYSCON_DEVCFG_HC1EN (1<<12)
-#define EP93XX_SYSCON_DEVCFG_HONIDE (1<<11)
-#define EP93XX_SYSCON_DEVCFG_GONIDE (1<<10)
-#define EP93XX_SYSCON_DEVCFG_PONG (1<<9)
-#define EP93XX_SYSCON_DEVCFG_EONIDE (1<<8)
-#define EP93XX_SYSCON_DEVCFG_I2SONSSP (1<<7)
-#define EP93XX_SYSCON_DEVCFG_I2SONAC97 (1<<6)
-#define EP93XX_SYSCON_DEVCFG_RASONP3 (1<<4)
-#define EP93XX_SYSCON_DEVCFG_RAS (1<<3)
-#define EP93XX_SYSCON_DEVCFG_ADCPD (1<<2)
-#define EP93XX_SYSCON_DEVCFG_KEYS (1<<1)
-#define EP93XX_SYSCON_DEVCFG_SHENA (1<<0)
-#define EP93XX_SYSCON_VIDCLKDIV EP93XX_SYSCON_REG(0x84)
-#define EP93XX_SYSCON_CLKDIV_ENABLE 15
-#define EP93XX_SYSCON_CLKDIV_ESEL (1<<14)
-#define EP93XX_SYSCON_CLKDIV_PSEL (1<<13)
-#define EP93XX_SYSCON_CLKDIV_PDIV_SHIFT 8
-#define EP93XX_SYSCON_I2SCLKDIV EP93XX_SYSCON_REG(0x8c)
-#define EP93XX_SYSCON_I2SCLKDIV_SENA 31
-#define EP93XX_SYSCON_I2SCLKDIV_ORIDE (1<<29)
-#define EP93XX_SYSCON_I2SCLKDIV_SPOL (1<<19)
-#define EP93XX_I2SCLKDIV_SDIV (1 << 16)
-#define EP93XX_I2SCLKDIV_LRDIV32 (0 << 17)
-#define EP93XX_I2SCLKDIV_LRDIV64 (1 << 17)
-#define EP93XX_I2SCLKDIV_LRDIV128 (2 << 17)
-#define EP93XX_I2SCLKDIV_LRDIV_MASK (3 << 17)
-#define EP93XX_SYSCON_KEYTCHCLKDIV EP93XX_SYSCON_REG(0x90)
-#define EP93XX_SYSCON_KEYTCHCLKDIV_TSEN 31
-#define EP93XX_SYSCON_KEYTCHCLKDIV_ADIV 16
-#define EP93XX_SYSCON_KEYTCHCLKDIV_KEN 15
-#define EP93XX_SYSCON_KEYTCHCLKDIV_KDIV (1<<0)
-#define EP93XX_SYSCON_SYSCFG EP93XX_SYSCON_REG(0x9c)
-#define EP93XX_SYSCON_SYSCFG_REV_MASK (0xf0000000)
-#define EP93XX_SYSCON_SYSCFG_REV_SHIFT (28)
-#define EP93XX_SYSCON_SYSCFG_SBOOT (1<<8)
-#define EP93XX_SYSCON_SYSCFG_LCSN7 (1<<7)
-#define EP93XX_SYSCON_SYSCFG_LCSN6 (1<<6)
-#define EP93XX_SYSCON_SYSCFG_LASDO (1<<5)
-#define EP93XX_SYSCON_SYSCFG_LEEDA (1<<4)
-#define EP93XX_SYSCON_SYSCFG_LEECLK (1<<3)
-#define EP93XX_SYSCON_SYSCFG_LCSN2 (1<<1)
-#define EP93XX_SYSCON_SYSCFG_LCSN1 (1<<0)
-#define EP93XX_SYSCON_SWLOCK EP93XX_SYSCON_REG(0xc0)
-
-/* EP93xx System Controller software locked register write */
-void ep93xx_syscon_swlocked_write(unsigned int val, void __iomem *reg);
-void ep93xx_devcfg_set_clear(unsigned int set_bits, unsigned int clear_bits);
-
-static inline void ep93xx_devcfg_set_bits(unsigned int bits)
-{
- ep93xx_devcfg_set_clear(bits, 0x00);
-}
-
-static inline void ep93xx_devcfg_clear_bits(unsigned int bits)
-{
- ep93xx_devcfg_set_clear(0x00, bits);
-}
-
-#endif /* _EP93XX_SOC_H */
diff --git a/arch/arm/mach-ep93xx/timer-ep93xx.c b/arch/arm/mach-ep93xx/timer-ep93xx.c
deleted file mode 100644
index a9efa7bc2fa1..000000000000
--- a/arch/arm/mach-ep93xx/timer-ep93xx.c
+++ /dev/null
@@ -1,143 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/sched_clock.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <asm/mach/time.h>
-#include "soc.h"
-#include "platform.h"
-
-/*************************************************************************
- * Timer handling for EP93xx
- *************************************************************************
- * The ep93xx has four internal timers. Timers 1, 2 (both 16 bit) and
- * 3 (32 bit) count down at 508 kHz, are self-reloading, and can generate
- * an interrupt on underflow. Timer 4 (40 bit) counts down at 983.04 kHz,
- * is free-running, and can't generate interrupts.
- *
- * The 508 kHz timers are ideal for use for the timer interrupt, as the
- * most common values of HZ divide 508 kHz nicely. We pick the 32 bit
- * timer (timer 3) to get as long sleep intervals as possible when using
- * CONFIG_NO_HZ.
- *
- * The higher clock rate of timer 4 makes it a better choice than the
- * other timers for use as clock source and for sched_clock(), providing
- * a stable 40 bit time base.
- *************************************************************************
- */
-#define EP93XX_TIMER_REG(x) (EP93XX_TIMER_BASE + (x))
-#define EP93XX_TIMER1_LOAD EP93XX_TIMER_REG(0x00)
-#define EP93XX_TIMER1_VALUE EP93XX_TIMER_REG(0x04)
-#define EP93XX_TIMER1_CONTROL EP93XX_TIMER_REG(0x08)
-#define EP93XX_TIMER123_CONTROL_ENABLE (1 << 7)
-#define EP93XX_TIMER123_CONTROL_MODE (1 << 6)
-#define EP93XX_TIMER123_CONTROL_CLKSEL (1 << 3)
-#define EP93XX_TIMER1_CLEAR EP93XX_TIMER_REG(0x0c)
-#define EP93XX_TIMER2_LOAD EP93XX_TIMER_REG(0x20)
-#define EP93XX_TIMER2_VALUE EP93XX_TIMER_REG(0x24)
-#define EP93XX_TIMER2_CONTROL EP93XX_TIMER_REG(0x28)
-#define EP93XX_TIMER2_CLEAR EP93XX_TIMER_REG(0x2c)
-#define EP93XX_TIMER4_VALUE_LOW EP93XX_TIMER_REG(0x60)
-#define EP93XX_TIMER4_VALUE_HIGH EP93XX_TIMER_REG(0x64)
-#define EP93XX_TIMER4_VALUE_HIGH_ENABLE (1 << 8)
-#define EP93XX_TIMER3_LOAD EP93XX_TIMER_REG(0x80)
-#define EP93XX_TIMER3_VALUE EP93XX_TIMER_REG(0x84)
-#define EP93XX_TIMER3_CONTROL EP93XX_TIMER_REG(0x88)
-#define EP93XX_TIMER3_CLEAR EP93XX_TIMER_REG(0x8c)
-
-#define EP93XX_TIMER123_RATE 508469
-#define EP93XX_TIMER4_RATE 983040
-
-static u64 notrace ep93xx_read_sched_clock(void)
-{
- u64 ret;
-
- ret = readl(EP93XX_TIMER4_VALUE_LOW);
- ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32);
- return ret;
-}
-
-static u64 ep93xx_clocksource_read(struct clocksource *c)
-{
- u64 ret;
-
- ret = readl(EP93XX_TIMER4_VALUE_LOW);
- ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32);
- return (u64) ret;
-}
-
-static int ep93xx_clkevt_set_next_event(unsigned long next,
- struct clock_event_device *evt)
-{
- /* Default mode: periodic, off, 508 kHz */
- u32 tmode = EP93XX_TIMER123_CONTROL_MODE |
- EP93XX_TIMER123_CONTROL_CLKSEL;
-
- /* Clear timer */
- writel(tmode, EP93XX_TIMER3_CONTROL);
-
- /* Set next event */
- writel(next, EP93XX_TIMER3_LOAD);
- writel(tmode | EP93XX_TIMER123_CONTROL_ENABLE,
- EP93XX_TIMER3_CONTROL);
- return 0;
-}
-
-
-static int ep93xx_clkevt_shutdown(struct clock_event_device *evt)
-{
- /* Disable timer */
- writel(0, EP93XX_TIMER3_CONTROL);
-
- return 0;
-}
-
-static struct clock_event_device ep93xx_clockevent = {
- .name = "timer1",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .set_state_shutdown = ep93xx_clkevt_shutdown,
- .set_state_oneshot = ep93xx_clkevt_shutdown,
- .tick_resume = ep93xx_clkevt_shutdown,
- .set_next_event = ep93xx_clkevt_set_next_event,
- .rating = 300,
-};
-
-static irqreturn_t ep93xx_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- /* Writing any value clears the timer interrupt */
- writel(1, EP93XX_TIMER3_CLEAR);
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-void __init ep93xx_timer_init(void)
-{
- int irq = IRQ_EP93XX_TIMER3;
- unsigned long flags = IRQF_TIMER | IRQF_IRQPOLL;
-
- /* Enable and register clocksource and sched_clock on timer 4 */
- writel(EP93XX_TIMER4_VALUE_HIGH_ENABLE,
- EP93XX_TIMER4_VALUE_HIGH);
- clocksource_mmio_init(NULL, "timer4",
- EP93XX_TIMER4_RATE, 200, 40,
- ep93xx_clocksource_read);
- sched_clock_register(ep93xx_read_sched_clock, 40,
- EP93XX_TIMER4_RATE);
-
- /* Set up clockevent on timer 3 */
- if (request_irq(irq, ep93xx_timer_interrupt, flags, "ep93xx timer",
- &ep93xx_clockevent))
- pr_err("Failed to request irq %d (ep93xx timer)\n", irq);
- clockevents_config_and_register(&ep93xx_clockevent,
- EP93XX_TIMER123_RATE,
- 1,
- 0xffffffffU);
-}
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
deleted file mode 100644
index d3de7283ecb3..000000000000
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ /dev/null
@@ -1,422 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/arm/mach-ep93xx/ts72xx.c
- * Technologic Systems TS72xx SBC support.
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/mtd/platnand.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/flash.h>
-#include <linux/spi/mmc_spi.h>
-#include <linux/mmc/host.h>
-#include <linux/platform_data/spi-ep93xx.h>
-#include <linux/gpio/machine.h>
-
-#include "gpio-ep93xx.h"
-#include "hardware.h"
-
-#include <asm/mach-types.h>
-#include <asm/mach/map.h>
-#include <asm/mach/arch.h>
-
-#include "soc.h"
-#include "ts72xx.h"
-
-/*************************************************************************
- * IO map
- *************************************************************************/
-static struct map_desc ts72xx_io_desc[] __initdata = {
- {
- .virtual = (unsigned long)TS72XX_MODEL_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_MODEL_PHYS_BASE),
- .length = TS72XX_MODEL_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)TS72XX_OPTIONS_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_OPTIONS_PHYS_BASE),
- .length = TS72XX_OPTIONS_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)TS72XX_OPTIONS2_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_OPTIONS2_PHYS_BASE),
- .length = TS72XX_OPTIONS2_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)TS72XX_CPLDVER_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_CPLDVER_PHYS_BASE),
- .length = TS72XX_CPLDVER_SIZE,
- .type = MT_DEVICE,
- }
-};
-
-static void __init ts72xx_map_io(void)
-{
- ep93xx_map_io();
- iotable_init(ts72xx_io_desc, ARRAY_SIZE(ts72xx_io_desc));
-}
-
-
-/*************************************************************************
- * NAND flash
- *************************************************************************/
-#define TS72XX_NAND_CONTROL_ADDR_LINE 22 /* 0xN0400000 */
-#define TS72XX_NAND_BUSY_ADDR_LINE 23 /* 0xN0800000 */
-
-static void ts72xx_nand_hwcontrol(struct nand_chip *chip,
- int cmd, unsigned int ctrl)
-{
- if (ctrl & NAND_CTRL_CHANGE) {
- void __iomem *addr = chip->legacy.IO_ADDR_R;
- unsigned char bits;
-
- addr += (1 << TS72XX_NAND_CONTROL_ADDR_LINE);
-
- bits = __raw_readb(addr) & ~0x07;
- bits |= (ctrl & NAND_NCE) << 2; /* bit 0 -> bit 2 */
- bits |= (ctrl & NAND_CLE); /* bit 1 -> bit 1 */
- bits |= (ctrl & NAND_ALE) >> 2; /* bit 2 -> bit 0 */
-
- __raw_writeb(bits, addr);
- }
-
- if (cmd != NAND_CMD_NONE)
- __raw_writeb(cmd, chip->legacy.IO_ADDR_W);
-}
-
-static int ts72xx_nand_device_ready(struct nand_chip *chip)
-{
- void __iomem *addr = chip->legacy.IO_ADDR_R;
-
- addr += (1 << TS72XX_NAND_BUSY_ADDR_LINE);
-
- return !!(__raw_readb(addr) & 0x20);
-}
-
-#define TS72XX_BOOTROM_PART_SIZE (SZ_16K)
-#define TS72XX_REDBOOT_PART_SIZE (SZ_2M + SZ_1M)
-
-static struct mtd_partition ts72xx_nand_parts[] = {
- {
- .name = "TS-BOOTROM",
- .offset = 0,
- .size = TS72XX_BOOTROM_PART_SIZE,
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- }, {
- .name = "Linux",
- .offset = MTDPART_OFS_RETAIN,
- .size = TS72XX_REDBOOT_PART_SIZE,
- /* leave so much for last partition */
- }, {
- .name = "RedBoot",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
-};
-
-static struct platform_nand_data ts72xx_nand_data = {
- .chip = {
- .nr_chips = 1,
- .chip_offset = 0,
- .chip_delay = 15,
- },
- .ctrl = {
- .cmd_ctrl = ts72xx_nand_hwcontrol,
- .dev_ready = ts72xx_nand_device_ready,
- },
-};
-
-static struct resource ts72xx_nand_resource[] = {
- {
- .start = 0, /* filled in later */
- .end = 0, /* filled in later */
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device ts72xx_nand_flash = {
- .name = "gen_nand",
- .id = -1,
- .dev.platform_data = &ts72xx_nand_data,
- .resource = ts72xx_nand_resource,
- .num_resources = ARRAY_SIZE(ts72xx_nand_resource),
-};
-
-static void __init ts72xx_register_flash(struct mtd_partition *parts, int n,
- resource_size_t start)
-{
- /*
- * TS7200 has NOR flash all other TS72xx board have NAND flash.
- */
- if (board_is_ts7200()) {
- ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_16M);
- } else {
- ts72xx_nand_resource[0].start = start;
- ts72xx_nand_resource[0].end = start + SZ_16M - 1;
-
- ts72xx_nand_data.chip.partitions = parts;
- ts72xx_nand_data.chip.nr_partitions = n;
-
- platform_device_register(&ts72xx_nand_flash);
- }
-}
-
-/*************************************************************************
- * RTC M48T86
- *************************************************************************/
-#define TS72XX_RTC_INDEX_PHYS_BASE (EP93XX_CS1_PHYS_BASE + 0x00800000)
-#define TS72XX_RTC_DATA_PHYS_BASE (EP93XX_CS1_PHYS_BASE + 0x01700000)
-
-static struct resource ts72xx_rtc_resources[] = {
- DEFINE_RES_MEM(TS72XX_RTC_INDEX_PHYS_BASE, 0x01),
- DEFINE_RES_MEM(TS72XX_RTC_DATA_PHYS_BASE, 0x01),
-};
-
-static struct platform_device ts72xx_rtc_device = {
- .name = "rtc-m48t86",
- .id = -1,
- .resource = ts72xx_rtc_resources,
- .num_resources = ARRAY_SIZE(ts72xx_rtc_resources),
-};
-
-/*************************************************************************
- * Watchdog (in CPLD)
- *************************************************************************/
-#define TS72XX_WDT_CONTROL_PHYS_BASE (EP93XX_CS2_PHYS_BASE + 0x03800000)
-#define TS72XX_WDT_FEED_PHYS_BASE (EP93XX_CS2_PHYS_BASE + 0x03c00000)
-
-static struct resource ts72xx_wdt_resources[] = {
- DEFINE_RES_MEM(TS72XX_WDT_CONTROL_PHYS_BASE, 0x01),
- DEFINE_RES_MEM(TS72XX_WDT_FEED_PHYS_BASE, 0x01),
-};
-
-static struct platform_device ts72xx_wdt_device = {
- .name = "ts72xx-wdt",
- .id = -1,
- .resource = ts72xx_wdt_resources,
- .num_resources = ARRAY_SIZE(ts72xx_wdt_resources),
-};
-
-/*************************************************************************
- * ETH
- *************************************************************************/
-static struct ep93xx_eth_data __initdata ts72xx_eth_data = {
- .phy_id = 1,
-};
-
-/*************************************************************************
- * SPI SD/MMC host
- *************************************************************************/
-#define BK3_EN_SDCARD_PHYS_BASE 0x12400000
-#define BK3_EN_SDCARD_PWR 0x0
-#define BK3_DIS_SDCARD_PWR 0x0C
-static void bk3_mmc_spi_setpower(struct device *dev, unsigned int vdd)
-{
- void __iomem *pwr_sd = ioremap(BK3_EN_SDCARD_PHYS_BASE, SZ_4K);
-
- if (!pwr_sd) {
- pr_err("Failed to enable SD card power!");
- return;
- }
-
- pr_debug("%s: SD card pwr %s VDD:0x%x\n", __func__,
- !!vdd ? "ON" : "OFF", vdd);
-
- if (!!vdd)
- __raw_writeb(BK3_EN_SDCARD_PWR, pwr_sd);
- else
- __raw_writeb(BK3_DIS_SDCARD_PWR, pwr_sd);
-
- iounmap(pwr_sd);
-}
-
-static struct mmc_spi_platform_data bk3_spi_mmc_data = {
- .detect_delay = 500,
- .powerup_msecs = 100,
- .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .caps = MMC_CAP_NONREMOVABLE,
- .setpower = bk3_mmc_spi_setpower,
-};
-
-/*************************************************************************
- * SPI Bus - SD card access
- *************************************************************************/
-static struct spi_board_info bk3_spi_board_info[] __initdata = {
- {
- .modalias = "mmc_spi",
- .platform_data = &bk3_spi_mmc_data,
- .max_speed_hz = 7.4E6,
- .bus_num = 0,
- .chip_select = 0,
- .mode = SPI_MODE_0,
- },
-};
-
-/*
- * This is a stub -> the FGPIO[3] pin is not connected on the schematic
- * The all work is performed automatically by !SPI_FRAME (SFRM1) and
- * goes through CPLD
- */
-static struct gpiod_lookup_table bk3_spi_cs_gpio_table = {
- .dev_id = "spi0",
- .table = {
- GPIO_LOOKUP("F", 3, "cs", GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static struct ep93xx_spi_info bk3_spi_master __initdata = {
- .use_dma = 1,
-};
-
-/*************************************************************************
- * TS72XX support code
- *************************************************************************/
-#if IS_ENABLED(CONFIG_FPGA_MGR_TS73XX)
-
-/* Relative to EP93XX_CS1_PHYS_BASE */
-#define TS73XX_FPGA_LOADER_BASE 0x03c00000
-
-static struct resource ts73xx_fpga_resources[] = {
- {
- .start = EP93XX_CS1_PHYS_BASE + TS73XX_FPGA_LOADER_BASE,
- .end = EP93XX_CS1_PHYS_BASE + TS73XX_FPGA_LOADER_BASE + 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device ts73xx_fpga_device = {
- .name = "ts73xx-fpga-mgr",
- .id = -1,
- .resource = ts73xx_fpga_resources,
- .num_resources = ARRAY_SIZE(ts73xx_fpga_resources),
-};
-
-#endif
-
-/*************************************************************************
- * SPI Bus
- *************************************************************************/
-static struct spi_board_info ts72xx_spi_devices[] __initdata = {
- {
- .modalias = "tmp122",
- .max_speed_hz = 2 * 1000 * 1000,
- .bus_num = 0,
- .chip_select = 0,
- },
-};
-
-static struct gpiod_lookup_table ts72xx_spi_cs_gpio_table = {
- .dev_id = "spi0",
- .table = {
- /* DIO_17 */
- GPIO_LOOKUP("F", 2, "cs", GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static struct ep93xx_spi_info ts72xx_spi_info __initdata = {
- /* Intentionally left blank */
-};
-
-static void __init ts72xx_init_machine(void)
-{
- ep93xx_init_devices();
- ts72xx_register_flash(ts72xx_nand_parts, ARRAY_SIZE(ts72xx_nand_parts),
- is_ts9420_installed() ?
- EP93XX_CS7_PHYS_BASE : EP93XX_CS6_PHYS_BASE);
- platform_device_register(&ts72xx_rtc_device);
- platform_device_register(&ts72xx_wdt_device);
-
- ep93xx_register_eth(&ts72xx_eth_data, 1);
-#if IS_ENABLED(CONFIG_FPGA_MGR_TS73XX)
- if (board_is_ts7300())
- platform_device_register(&ts73xx_fpga_device);
-#endif
- gpiod_add_lookup_table(&ts72xx_spi_cs_gpio_table);
- ep93xx_register_spi(&ts72xx_spi_info, ts72xx_spi_devices,
- ARRAY_SIZE(ts72xx_spi_devices));
-}
-
-MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC")
- /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ts72xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = ts72xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-
-/*************************************************************************
- * EP93xx I2S audio peripheral handling
- *************************************************************************/
-static struct resource ep93xx_i2s_resource[] = {
- DEFINE_RES_MEM(EP93XX_I2S_PHYS_BASE, 0x100),
- DEFINE_RES_IRQ_NAMED(IRQ_EP93XX_SAI, "spilink i2s slave"),
-};
-
-static struct platform_device ep93xx_i2s_device = {
- .name = "ep93xx-spilink-i2s",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_i2s_resource),
- .resource = ep93xx_i2s_resource,
-};
-
-/*************************************************************************
- * BK3 support code
- *************************************************************************/
-static struct mtd_partition bk3_nand_parts[] = {
- {
- .name = "System",
- .offset = 0x00000000,
- .size = 0x01e00000,
- }, {
- .name = "Data",
- .offset = 0x01e00000,
- .size = 0x05f20000
- }, {
- .name = "RedBoot",
- .offset = 0x07d20000,
- .size = 0x002e0000,
- .mask_flags = MTD_WRITEABLE, /* force RO */
- },
-};
-
-static void __init bk3_init_machine(void)
-{
- ep93xx_init_devices();
-
- ts72xx_register_flash(bk3_nand_parts, ARRAY_SIZE(bk3_nand_parts),
- EP93XX_CS6_PHYS_BASE);
-
- ep93xx_register_eth(&ts72xx_eth_data, 1);
-
- gpiod_add_lookup_table(&bk3_spi_cs_gpio_table);
- ep93xx_register_spi(&bk3_spi_master, bk3_spi_board_info,
- ARRAY_SIZE(bk3_spi_board_info));
-
- /* Configure ep93xx's I2S to use AC97 pins */
- ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_I2SONAC97);
- platform_device_register(&ep93xx_i2s_device);
-}
-
-MACHINE_START(BK3, "Liebherr controller BK3.1")
- /* Maintainer: Lukasz Majewski <lukma@denx.de> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ts72xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = bk3_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-ep93xx/ts72xx.h b/arch/arm/mach-ep93xx/ts72xx.h
deleted file mode 100644
index 00b4941d29c9..000000000000
--- a/arch/arm/mach-ep93xx/ts72xx.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * arch/arm/mach-ep93xx/include/mach/ts72xx.h
- */
-
-/*
- * TS72xx memory map:
- *
- * virt phys size
- * febff000 22000000 4K model number register (bits 0-2)
- * febfe000 22400000 4K options register
- * febfd000 22800000 4K options register #2
- * febfc000 23400000 4K CPLD version register
- */
-
-#ifndef __TS72XX_H_
-#define __TS72XX_H_
-
-#define TS72XX_MODEL_PHYS_BASE 0x22000000
-#define TS72XX_MODEL_VIRT_BASE IOMEM(0xfebff000)
-#define TS72XX_MODEL_SIZE 0x00001000
-
-#define TS72XX_MODEL_TS7200 0x00
-#define TS72XX_MODEL_TS7250 0x01
-#define TS72XX_MODEL_TS7260 0x02
-#define TS72XX_MODEL_TS7300 0x03
-#define TS72XX_MODEL_TS7400 0x04
-#define TS72XX_MODEL_MASK 0x07
-
-
-#define TS72XX_OPTIONS_PHYS_BASE 0x22400000
-#define TS72XX_OPTIONS_VIRT_BASE IOMEM(0xfebfe000)
-#define TS72XX_OPTIONS_SIZE 0x00001000
-
-#define TS72XX_OPTIONS_COM2_RS485 0x02
-#define TS72XX_OPTIONS_MAX197 0x01
-
-
-#define TS72XX_OPTIONS2_PHYS_BASE 0x22800000
-#define TS72XX_OPTIONS2_VIRT_BASE IOMEM(0xfebfd000)
-#define TS72XX_OPTIONS2_SIZE 0x00001000
-
-#define TS72XX_OPTIONS2_TS9420 0x04
-#define TS72XX_OPTIONS2_TS9420_BOOT 0x02
-
-#define TS72XX_CPLDVER_PHYS_BASE 0x23400000
-#define TS72XX_CPLDVER_VIRT_BASE IOMEM(0xfebfc000)
-#define TS72XX_CPLDVER_SIZE 0x00001000
-
-#ifndef __ASSEMBLY__
-
-static inline int ts72xx_model(void)
-{
- return __raw_readb(TS72XX_MODEL_VIRT_BASE) & TS72XX_MODEL_MASK;
-}
-
-static inline int board_is_ts7200(void)
-{
- return ts72xx_model() == TS72XX_MODEL_TS7200;
-}
-
-static inline int board_is_ts7250(void)
-{
- return ts72xx_model() == TS72XX_MODEL_TS7250;
-}
-
-static inline int board_is_ts7260(void)
-{
- return ts72xx_model() == TS72XX_MODEL_TS7260;
-}
-
-static inline int board_is_ts7300(void)
-{
- return ts72xx_model() == TS72XX_MODEL_TS7300;
-}
-
-static inline int board_is_ts7400(void)
-{
- return ts72xx_model() == TS72XX_MODEL_TS7400;
-}
-
-static inline int is_max197_installed(void)
-{
- return !!(__raw_readb(TS72XX_OPTIONS_VIRT_BASE) &
- TS72XX_OPTIONS_MAX197);
-}
-
-static inline int is_ts9420_installed(void)
-{
- return !!(__raw_readb(TS72XX_OPTIONS2_VIRT_BASE) &
- TS72XX_OPTIONS2_TS9420);
-}
-#endif
-#endif /* __TS72XX_H_ */
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
deleted file mode 100644
index 85f0dd7255a9..000000000000
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ /dev/null
@@ -1,319 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/arm/mach-ep93xx/vision_ep9307.c
- * Vision Engraving Systems EP9307 SoM support.
- *
- * Copyright (C) 2008-2011 Vision Engraving Systems
- * H Hartley Sweeten <hsweeten@visionengravers.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/gpio/machine.h>
-#include <linux/fb.h>
-#include <linux/io.h>
-#include <linux/mtd/partitions.h>
-#include <linux/i2c.h>
-#include <linux/platform_data/pca953x.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/flash.h>
-#include <linux/spi/mmc_spi.h>
-#include <linux/mmc/host.h>
-
-#include <sound/cs4271.h>
-
-#include "hardware.h"
-#include <linux/platform_data/video-ep93xx.h>
-#include <linux/platform_data/spi-ep93xx.h>
-#include "gpio-ep93xx.h"
-
-#include <asm/mach-types.h>
-#include <asm/mach/map.h>
-#include <asm/mach/arch.h>
-
-#include "soc.h"
-
-/*************************************************************************
- * Static I/O mappings for the FPGA
- *************************************************************************/
-#define VISION_PHYS_BASE EP93XX_CS7_PHYS_BASE
-#define VISION_VIRT_BASE 0xfebff000
-
-static struct map_desc vision_io_desc[] __initdata = {
- {
- .virtual = VISION_VIRT_BASE,
- .pfn = __phys_to_pfn(VISION_PHYS_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- },
-};
-
-static void __init vision_map_io(void)
-{
- ep93xx_map_io();
-
- iotable_init(vision_io_desc, ARRAY_SIZE(vision_io_desc));
-}
-
-/*************************************************************************
- * Ethernet
- *************************************************************************/
-static struct ep93xx_eth_data vision_eth_data __initdata = {
- .phy_id = 1,
-};
-
-/*************************************************************************
- * Framebuffer
- *************************************************************************/
-#define VISION_LCD_ENABLE EP93XX_GPIO_LINE_EGPIO1
-
-static int vision_lcd_setup(struct platform_device *pdev)
-{
- int err;
-
- err = gpio_request_one(VISION_LCD_ENABLE, GPIOF_OUT_INIT_HIGH, dev_name(&pdev->dev));
- if (err)
- return err;
-
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_RAS |
- EP93XX_SYSCON_DEVCFG_RASONP3 |
- EP93XX_SYSCON_DEVCFG_EXVC);
-
- return 0;
-}
-
-static void vision_lcd_teardown(struct platform_device *pdev)
-{
- gpio_free(VISION_LCD_ENABLE);
-}
-
-static void vision_lcd_blank(int blank_mode, struct fb_info *info)
-{
- if (blank_mode)
- gpio_set_value(VISION_LCD_ENABLE, 0);
- else
- gpio_set_value(VISION_LCD_ENABLE, 1);
-}
-
-static struct ep93xxfb_mach_info ep93xxfb_info __initdata = {
- .flags = EP93XXFB_USE_SDCSN0 | EP93XXFB_PCLK_FALLING,
- .setup = vision_lcd_setup,
- .teardown = vision_lcd_teardown,
- .blank = vision_lcd_blank,
-};
-
-
-/*************************************************************************
- * GPIO Expanders
- *************************************************************************/
-#define PCA9539_74_GPIO_BASE (EP93XX_GPIO_LINE_MAX + 1)
-#define PCA9539_75_GPIO_BASE (PCA9539_74_GPIO_BASE + 16)
-#define PCA9539_76_GPIO_BASE (PCA9539_75_GPIO_BASE + 16)
-#define PCA9539_77_GPIO_BASE (PCA9539_76_GPIO_BASE + 16)
-
-static struct pca953x_platform_data pca953x_74_gpio_data = {
- .gpio_base = PCA9539_74_GPIO_BASE,
- .irq_base = EP93XX_BOARD_IRQ(0),
-};
-
-static struct pca953x_platform_data pca953x_75_gpio_data = {
- .gpio_base = PCA9539_75_GPIO_BASE,
- .irq_base = -1,
-};
-
-static struct pca953x_platform_data pca953x_76_gpio_data = {
- .gpio_base = PCA9539_76_GPIO_BASE,
- .irq_base = -1,
-};
-
-static struct pca953x_platform_data pca953x_77_gpio_data = {
- .gpio_base = PCA9539_77_GPIO_BASE,
- .irq_base = -1,
-};
-
-/*************************************************************************
- * I2C Bus
- *************************************************************************/
-
-static struct i2c_board_info vision_i2c_info[] __initdata = {
- {
- I2C_BOARD_INFO("isl1208", 0x6f),
- .irq = IRQ_EP93XX_EXT1,
- }, {
- I2C_BOARD_INFO("pca9539", 0x74),
- .platform_data = &pca953x_74_gpio_data,
- }, {
- I2C_BOARD_INFO("pca9539", 0x75),
- .platform_data = &pca953x_75_gpio_data,
- }, {
- I2C_BOARD_INFO("pca9539", 0x76),
- .platform_data = &pca953x_76_gpio_data,
- }, {
- I2C_BOARD_INFO("pca9539", 0x77),
- .platform_data = &pca953x_77_gpio_data,
- },
-};
-
-/*************************************************************************
- * SPI CS4271 Audio Codec
- *************************************************************************/
-static struct cs4271_platform_data vision_cs4271_data = {
- /* Intentionally left blank */
-};
-
-/*************************************************************************
- * SPI Flash
- *************************************************************************/
-static struct mtd_partition vision_spi_flash_partitions[] = {
- {
- .name = "SPI bootstrap",
- .offset = 0,
- .size = SZ_4K,
- }, {
- .name = "Bootstrap config",
- .offset = MTDPART_OFS_APPEND,
- .size = SZ_4K,
- }, {
- .name = "System config",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-static struct flash_platform_data vision_spi_flash_data = {
- .name = "SPI Flash",
- .parts = vision_spi_flash_partitions,
- .nr_parts = ARRAY_SIZE(vision_spi_flash_partitions),
-};
-
-/*************************************************************************
- * SPI SD/MMC host
- *************************************************************************/
-static struct mmc_spi_platform_data vision_spi_mmc_data = {
- .detect_delay = 100,
- .powerup_msecs = 100,
- .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .caps2 = MMC_CAP2_RO_ACTIVE_HIGH,
-};
-
-static struct gpiod_lookup_table vision_spi_mmc_gpio_table = {
- .dev_id = "mmc_spi.2", /* "mmc_spi @ CS2 */
- .table = {
- /* Card detect */
- GPIO_LOOKUP_IDX("B", 7, NULL, 0, GPIO_ACTIVE_LOW),
- /* Write protect */
- GPIO_LOOKUP_IDX("F", 0, NULL, 1, GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
-/*************************************************************************
- * SPI Bus
- *************************************************************************/
-static struct spi_board_info vision_spi_board_info[] __initdata = {
- {
- .modalias = "cs4271",
- .platform_data = &vision_cs4271_data,
- .max_speed_hz = 6000000,
- .bus_num = 0,
- .chip_select = 0,
- .mode = SPI_MODE_3,
- }, {
- .modalias = "sst25l",
- .platform_data = &vision_spi_flash_data,
- .max_speed_hz = 20000000,
- .bus_num = 0,
- .chip_select = 1,
- .mode = SPI_MODE_3,
- }, {
- .modalias = "mmc_spi",
- .platform_data = &vision_spi_mmc_data,
- .max_speed_hz = 20000000,
- .bus_num = 0,
- .chip_select = 2,
- .mode = SPI_MODE_3,
- },
-};
-
-static struct gpiod_lookup_table vision_spi_cs4271_gpio_table = {
- .dev_id = "spi0.0", /* cs4271 @ CS0 */
- .table = {
- /* RESET */
- GPIO_LOOKUP_IDX("H", 2, NULL, 0, GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static struct gpiod_lookup_table vision_spi_cs_gpio_table = {
- .dev_id = "spi0",
- .table = {
- GPIO_LOOKUP_IDX("A", 6, "cs", 0, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("A", 7, "cs", 1, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("G", 2, "cs", 2, GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static struct ep93xx_spi_info vision_spi_master __initdata = {
- .use_dma = 1,
-};
-
-/*************************************************************************
- * I2S Audio
- *************************************************************************/
-static struct platform_device vision_audio_device = {
- .name = "edb93xx-audio",
- .id = -1,
-};
-
-static void __init vision_register_i2s(void)
-{
- ep93xx_register_i2s();
- platform_device_register(&vision_audio_device);
-}
-
-/*************************************************************************
- * Machine Initialization
- *************************************************************************/
-static void __init vision_init_machine(void)
-{
- ep93xx_init_devices();
- ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_64M);
- ep93xx_register_eth(&vision_eth_data, 1);
- ep93xx_register_fb(&ep93xxfb_info);
- ep93xx_register_pwm(1, 0);
-
- /*
- * Request the gpio expander's interrupt gpio line now to prevent
- * the kernel from doing a WARN in gpiolib:gpio_ensure_requested().
- */
- if (gpio_request_one(EP93XX_GPIO_LINE_F(7), GPIOF_IN, "pca9539:74"))
- pr_warn("cannot request interrupt gpio for pca9539:74\n");
-
- vision_i2c_info[1].irq = gpio_to_irq(EP93XX_GPIO_LINE_F(7));
-
- ep93xx_register_i2c(vision_i2c_info,
- ARRAY_SIZE(vision_i2c_info));
- gpiod_add_lookup_table(&vision_spi_cs4271_gpio_table);
- gpiod_add_lookup_table(&vision_spi_mmc_gpio_table);
- gpiod_add_lookup_table(&vision_spi_cs_gpio_table);
- ep93xx_register_spi(&vision_spi_master, vision_spi_board_info,
- ARRAY_SIZE(vision_spi_board_info));
- vision_register_i2s();
-}
-
-MACHINE_START(VISION_EP9307, "Vision Engraving Systems EP9307")
- /* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS + EP93XX_BOARD_IRQS,
- .map_io = vision_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = vision_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 49f054dcd4de..3e29b44d2d7b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -235,7 +235,7 @@ config ARM64
select HAVE_FUNCTION_ARG_ACCESS_API
select MMU_GATHER_RCU_TABLE_FREE
select HAVE_RSEQ
- select HAVE_RUST if CPU_LITTLE_ENDIAN
+ select HAVE_RUST if RUSTC_SUPPORTS_ARM64
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
@@ -270,6 +270,18 @@ config ARM64
help
ARM 64-bit (AArch64) Linux support.
+config RUSTC_SUPPORTS_ARM64
+ def_bool y
+ depends on CPU_LITTLE_ENDIAN
+ # Shadow call stack is only supported on certain rustc versions.
+ #
+ # When using the UNWIND_PATCH_PAC_INTO_SCS option, rustc version 1.80+ is
+ # required due to use of the -Zfixed-x18 flag.
+ #
+ # Otherwise, rustc version 1.82+ is required due to use of the
+ # -Zsanitizer=shadow-call-stack flag.
+ depends on !SHADOW_CALL_STACK || RUSTC_VERSION >= 108200 || RUSTC_VERSION >= 108000 && UNWIND_PATCH_PAC_INTO_SCS
+
config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS
def_bool CC_IS_CLANG
# https://github.com/ClangBuiltLinux/linux/issues/1507
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index f6bc3da1ef11..b058c4803efb 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -57,9 +57,11 @@ KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
ifneq ($(CONFIG_UNWIND_TABLES),y)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
KBUILD_AFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
+KBUILD_RUSTFLAGS += -Cforce-unwind-tables=n
else
KBUILD_CFLAGS += -fasynchronous-unwind-tables
KBUILD_AFLAGS += -fasynchronous-unwind-tables
+KBUILD_RUSTFLAGS += -Cforce-unwind-tables=y -Zuse-sync-unwind=n
endif
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
@@ -114,6 +116,7 @@ endif
ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
KBUILD_CFLAGS += -ffixed-x18
+KBUILD_RUSTFLAGS += -Zfixed-x18
endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
diff --git a/arch/arm64/boot/dts/mediatek/mt7981b.dtsi b/arch/arm64/boot/dts/mediatek/mt7981b.dtsi
index b096009ef99c..5cbea9cd411f 100644
--- a/arch/arm64/boot/dts/mediatek/mt7981b.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7981b.dtsi
@@ -94,6 +94,39 @@
#pwm-cells = <2>;
};
+ serial@11002000 {
+ compatible = "mediatek,mt7981-uart", "mediatek,mt6577-uart";
+ reg = <0 0x11002000 0 0x100>;
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uart", "wakeup";
+ clocks = <&infracfg CLK_INFRA_UART0_SEL>,
+ <&infracfg CLK_INFRA_UART0_CK>;
+ clock-names = "baud", "bus";
+ status = "disabled";
+ };
+
+ serial@11003000 {
+ compatible = "mediatek,mt7981-uart", "mediatek,mt6577-uart";
+ reg = <0 0x11003000 0 0x100>;
+ interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uart", "wakeup";
+ clocks = <&infracfg CLK_INFRA_UART1_SEL>,
+ <&infracfg CLK_INFRA_UART1_CK>;
+ clock-names = "baud", "bus";
+ status = "disabled";
+ };
+
+ serial@11004000 {
+ compatible = "mediatek,mt7981-uart", "mediatek,mt6577-uart";
+ reg = <0 0x11004000 0 0x100>;
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uart", "wakeup";
+ clocks = <&infracfg CLK_INFRA_UART2_SEL>,
+ <&infracfg CLK_INFRA_UART2_CK>;
+ clock-names = "baud", "bus";
+ status = "disabled";
+ };
+
i2c@11007000 {
compatible = "mediatek,mt7981-i2c";
reg = <0 0x11007000 0 0x1000>,
diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
index 70de288d728e..a1cd47d7f5e3 100644
--- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
@@ -888,7 +888,8 @@
mcu {
compatible = "ti,cc1352p7";
- reset-gpios = <&main_gpio0 72 GPIO_ACTIVE_LOW>;
+ bootloader-backdoor-gpios = <&main_gpio0 13 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&main_gpio0 14 GPIO_ACTIVE_HIGH>;
vdds-supply = <&vdd_3v3>;
};
};
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 55a8e310ea12..58d89d997d05 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -264,8 +264,7 @@ SECTIONS
EXIT_DATA
}
- RUNTIME_CONST(shift, d_hash_shift)
- RUNTIME_CONST(ptr, dentry_hashtable)
+ RUNTIME_CONST_VARIABLES
PERCPU_SECTION(L1_CACHE_BYTES)
HYPERVISOR_PERCPU_SECTION
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index fe0764173cd0..a0d01c46e408 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -2164,7 +2164,7 @@ static void cpu_hyp_uninit(void *discard)
}
}
-int kvm_arch_hardware_enable(void)
+int kvm_arch_enable_virtualization_cpu(void)
{
/*
* Most calls to this function are made with migration
@@ -2184,7 +2184,7 @@ int kvm_arch_hardware_enable(void)
return 0;
}
-void kvm_arch_hardware_disable(void)
+void kvm_arch_disable_virtualization_cpu(void)
{
kvm_timer_cpu_down();
kvm_vgic_cpu_down();
@@ -2380,7 +2380,7 @@ static int __init do_pkvm_init(u32 hyp_va_bits)
/*
* The stub hypercalls are now disabled, so set our local flag to
- * prevent a later re-init attempt in kvm_arch_hardware_enable().
+ * prevent a later re-init attempt in kvm_arch_enable_virtualization_cpu().
*/
__this_cpu_write(kvm_hyp_initialized, 1);
preempt_enable();
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 0eb0436ad4ce..bb35c34f86d2 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -25,6 +25,8 @@ config LOONGARCH
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
@@ -82,6 +84,7 @@ config LOONGARCH
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_DEVICES
+ select GENERIC_CPU_VULNERABILITIES
select GENERIC_ENTRY
select GENERIC_GETTIMEOFDAY
select GENERIC_IOREMAP if !ARCH_IOREMAP
@@ -147,7 +150,7 @@ config LOONGARCH
select HAVE_LIVEPATCH
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
- select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && !CC_IS_CLANG
+ select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -267,7 +270,7 @@ config AS_HAS_FCSR_CLASS
def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0)
config AS_HAS_THIN_ADD_SUB
- def_bool $(cc-option,-Wa$(comma)-mthin-add-sub)
+ def_bool $(cc-option,-Wa$(comma)-mthin-add-sub) || AS_IS_LLVM
config AS_HAS_LSX_EXTENSION
def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0)
diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h
index 99af8b3160a8..c86f0ab922ec 100644
--- a/arch/loongarch/include/asm/atomic.h
+++ b/arch/loongarch/include/asm/atomic.h
@@ -15,6 +15,7 @@
#define __LL "ll.w "
#define __SC "sc.w "
#define __AMADD "amadd.w "
+#define __AMOR "amor.w "
#define __AMAND_DB "amand_db.w "
#define __AMOR_DB "amor_db.w "
#define __AMXOR_DB "amxor_db.w "
@@ -22,6 +23,7 @@
#define __LL "ll.d "
#define __SC "sc.d "
#define __AMADD "amadd.d "
+#define __AMOR "amor.d "
#define __AMAND_DB "amand_db.d "
#define __AMOR_DB "amor_db.d "
#define __AMXOR_DB "amxor_db.d "
diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h
index 16a716f88a5c..fc83bb32f9f0 100644
--- a/arch/loongarch/include/asm/cpu-features.h
+++ b/arch/loongarch/include/asm/cpu-features.h
@@ -51,6 +51,7 @@
#define cpu_has_lbt_mips cpu_opt(LOONGARCH_CPU_LBT_MIPS)
#define cpu_has_lbt (cpu_has_lbt_x86|cpu_has_lbt_arm|cpu_has_lbt_mips)
#define cpu_has_csr cpu_opt(LOONGARCH_CPU_CSR)
+#define cpu_has_iocsr cpu_opt(LOONGARCH_CPU_IOCSR)
#define cpu_has_tlb cpu_opt(LOONGARCH_CPU_TLB)
#define cpu_has_watch cpu_opt(LOONGARCH_CPU_WATCH)
#define cpu_has_vint cpu_opt(LOONGARCH_CPU_VINT)
@@ -65,6 +66,7 @@
#define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID)
#define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR)
#define cpu_has_ptw cpu_opt(LOONGARCH_CPU_PTW)
+#define cpu_has_lspw cpu_opt(LOONGARCH_CPU_LSPW)
#define cpu_has_avecint cpu_opt(LOONGARCH_CPU_AVECINT)
#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h
index 843f9c4ec980..98cf4d7b4b0a 100644
--- a/arch/loongarch/include/asm/cpu.h
+++ b/arch/loongarch/include/asm/cpu.h
@@ -87,19 +87,21 @@ enum cpu_type_enum {
#define CPU_FEATURE_LBT_MIPS 12 /* CPU has MIPS Binary Translation */
#define CPU_FEATURE_TLB 13 /* CPU has TLB */
#define CPU_FEATURE_CSR 14 /* CPU has CSR */
-#define CPU_FEATURE_WATCH 15 /* CPU has watchpoint registers */
-#define CPU_FEATURE_VINT 16 /* CPU has vectored interrupts */
-#define CPU_FEATURE_CSRIPI 17 /* CPU has CSR-IPI */
-#define CPU_FEATURE_EXTIOI 18 /* CPU has EXT-IOI */
-#define CPU_FEATURE_PREFETCH 19 /* CPU has prefetch instructions */
-#define CPU_FEATURE_PMP 20 /* CPU has perfermance counter */
-#define CPU_FEATURE_SCALEFREQ 21 /* CPU supports cpufreq scaling */
-#define CPU_FEATURE_FLATMODE 22 /* CPU has flat mode */
-#define CPU_FEATURE_EIODECODE 23 /* CPU has EXTIOI interrupt pin decode mode */
-#define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */
-#define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */
-#define CPU_FEATURE_PTW 26 /* CPU has hardware page table walker */
-#define CPU_FEATURE_AVECINT 27 /* CPU has avec interrupt */
+#define CPU_FEATURE_IOCSR 15 /* CPU has IOCSR */
+#define CPU_FEATURE_WATCH 16 /* CPU has watchpoint registers */
+#define CPU_FEATURE_VINT 17 /* CPU has vectored interrupts */
+#define CPU_FEATURE_CSRIPI 18 /* CPU has CSR-IPI */
+#define CPU_FEATURE_EXTIOI 19 /* CPU has EXT-IOI */
+#define CPU_FEATURE_PREFETCH 20 /* CPU has prefetch instructions */
+#define CPU_FEATURE_PMP 21 /* CPU has perfermance counter */
+#define CPU_FEATURE_SCALEFREQ 22 /* CPU supports cpufreq scaling */
+#define CPU_FEATURE_FLATMODE 23 /* CPU has flat mode */
+#define CPU_FEATURE_EIODECODE 24 /* CPU has EXTIOI interrupt pin decode mode */
+#define CPU_FEATURE_GUESTID 25 /* CPU has GuestID feature */
+#define CPU_FEATURE_HYPERVISOR 26 /* CPU has hypervisor (running in VM) */
+#define CPU_FEATURE_PTW 27 /* CPU has hardware page table walker */
+#define CPU_FEATURE_LSPW 28 /* CPU has LSPW (lddir/ldpte instructions) */
+#define CPU_FEATURE_AVECINT 29 /* CPU has AVEC interrupt */
#define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG)
#define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM)
@@ -115,6 +117,7 @@ enum cpu_type_enum {
#define LOONGARCH_CPU_LBT_ARM BIT_ULL(CPU_FEATURE_LBT_ARM)
#define LOONGARCH_CPU_LBT_MIPS BIT_ULL(CPU_FEATURE_LBT_MIPS)
#define LOONGARCH_CPU_TLB BIT_ULL(CPU_FEATURE_TLB)
+#define LOONGARCH_CPU_IOCSR BIT_ULL(CPU_FEATURE_IOCSR)
#define LOONGARCH_CPU_CSR BIT_ULL(CPU_FEATURE_CSR)
#define LOONGARCH_CPU_WATCH BIT_ULL(CPU_FEATURE_WATCH)
#define LOONGARCH_CPU_VINT BIT_ULL(CPU_FEATURE_VINT)
@@ -128,6 +131,7 @@ enum cpu_type_enum {
#define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID)
#define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR)
#define LOONGARCH_CPU_PTW BIT_ULL(CPU_FEATURE_PTW)
+#define LOONGARCH_CPU_LSPW BIT_ULL(CPU_FEATURE_LSPW)
#define LOONGARCH_CPU_AVECINT BIT_ULL(CPU_FEATURE_AVECINT)
#endif /* _ASM_CPU_H */
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 04bf1a7f903a..26542413a5b0 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -62,6 +62,7 @@
#define LOONGARCH_CPUCFG1 0x1
#define CPUCFG1_ISGR32 BIT(0)
#define CPUCFG1_ISGR64 BIT(1)
+#define CPUCFG1_ISA GENMASK(1, 0)
#define CPUCFG1_PAGING BIT(2)
#define CPUCFG1_IOCSR BIT(3)
#define CPUCFG1_PABITS GENMASK(11, 4)
diff --git a/arch/loongarch/include/asm/mmu_context.h b/arch/loongarch/include/asm/mmu_context.h
index 9f97c3453b9c..304363bd3935 100644
--- a/arch/loongarch/include/asm/mmu_context.h
+++ b/arch/loongarch/include/asm/mmu_context.h
@@ -49,12 +49,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
/* Normal, classic get_new_mmu_context */
static inline void
-get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
+get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, bool *need_flush)
{
u64 asid = asid_cache(cpu);
if (!((++asid) & cpu_asid_mask(&cpu_data[cpu])))
- local_flush_tlb_user(); /* start new asid cycle */
+ *need_flush = true; /* start new asid cycle */
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}
@@ -74,21 +74,34 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}
+static inline void atomic_update_pgd_asid(unsigned long asid, unsigned long pgdl)
+{
+ __asm__ __volatile__(
+ "csrwr %[pgdl_val], %[pgdl_reg] \n\t"
+ "csrwr %[asid_val], %[asid_reg] \n\t"
+ : [asid_val] "+r" (asid), [pgdl_val] "+r" (pgdl)
+ : [asid_reg] "i" (LOONGARCH_CSR_ASID), [pgdl_reg] "i" (LOONGARCH_CSR_PGDL)
+ : "memory"
+ );
+}
+
static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
+ bool need_flush = false;
unsigned int cpu = smp_processor_id();
/* Check if our ASID is of an older version and thus invalid */
if (!asid_valid(next, cpu))
- get_new_mmu_context(next, cpu);
-
- write_csr_asid(cpu_asid(cpu, next));
+ get_new_mmu_context(next, cpu, &need_flush);
if (next != &init_mm)
- csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL);
+ atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)next->pgd);
else
- csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
+ atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)invalid_pg_dir);
+
+ if (need_flush)
+ local_flush_tlb_user(); /* Flush tlb after update ASID */
/*
* Mark current->active_mm as not "active" anymore.
@@ -135,9 +148,15 @@ drop_mmu_context(struct mm_struct *mm, unsigned int cpu)
asid = read_csr_asid() & cpu_asid_mask(&current_cpu_data);
if (asid == cpu_asid(cpu, mm)) {
+ bool need_flush = false;
+
if (!current->mm || (current->mm == mm)) {
- get_new_mmu_context(mm, cpu);
+ get_new_mmu_context(mm, cpu, &need_flush);
+
write_csr_asid(cpu_asid(cpu, mm));
+ if (need_flush)
+ local_flush_tlb_user(); /* Flush tlb after update ASID */
+
goto out;
}
}
diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
index 8f290e5546cf..87be9b14e9da 100644
--- a/arch/loongarch/include/asm/percpu.h
+++ b/arch/loongarch/include/asm/percpu.h
@@ -68,75 +68,6 @@ PERCPU_OP(and, and, &)
PERCPU_OP(or, or, |)
#undef PERCPU_OP
-static __always_inline unsigned long __percpu_read(void __percpu *ptr, int size)
-{
- unsigned long ret;
-
- switch (size) {
- case 1:
- __asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr] \n"
- : [ret] "=&r"(ret)
- : [ptr] "r"(ptr)
- : "memory");
- break;
- case 2:
- __asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr] \n"
- : [ret] "=&r"(ret)
- : [ptr] "r"(ptr)
- : "memory");
- break;
- case 4:
- __asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr] \n"
- : [ret] "=&r"(ret)
- : [ptr] "r"(ptr)
- : "memory");
- break;
- case 8:
- __asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr] \n"
- : [ret] "=&r"(ret)
- : [ptr] "r"(ptr)
- : "memory");
- break;
- default:
- ret = 0;
- BUILD_BUG();
- }
-
- return ret;
-}
-
-static __always_inline void __percpu_write(void __percpu *ptr, unsigned long val, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__("stx.b %[val], $r21, %[ptr] \n"
- :
- : [val] "r" (val), [ptr] "r" (ptr)
- : "memory");
- break;
- case 2:
- __asm__ __volatile__("stx.h %[val], $r21, %[ptr] \n"
- :
- : [val] "r" (val), [ptr] "r" (ptr)
- : "memory");
- break;
- case 4:
- __asm__ __volatile__("stx.w %[val], $r21, %[ptr] \n"
- :
- : [val] "r" (val), [ptr] "r" (ptr)
- : "memory");
- break;
- case 8:
- __asm__ __volatile__("stx.d %[val], $r21, %[ptr] \n"
- :
- : [val] "r" (val), [ptr] "r" (ptr)
- : "memory");
- break;
- default:
- BUILD_BUG();
- }
-}
-
static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size)
{
switch (size) {
@@ -157,6 +88,33 @@ static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
return 0;
}
+#define __pcpu_op_1(op) op ".b "
+#define __pcpu_op_2(op) op ".h "
+#define __pcpu_op_4(op) op ".w "
+#define __pcpu_op_8(op) op ".d "
+
+#define _percpu_read(size, _pcp) \
+({ \
+ typeof(_pcp) __pcp_ret; \
+ \
+ __asm__ __volatile__( \
+ __pcpu_op_##size("ldx") "%[ret], $r21, %[ptr] \n" \
+ : [ret] "=&r"(__pcp_ret) \
+ : [ptr] "r"(&(_pcp)) \
+ : "memory"); \
+ \
+ __pcp_ret; \
+})
+
+#define _percpu_write(size, _pcp, _val) \
+do { \
+ __asm__ __volatile__( \
+ __pcpu_op_##size("stx") "%[val], $r21, %[ptr] \n" \
+ : \
+ : [val] "r"(_val), [ptr] "r"(&(_pcp)) \
+ : "memory"); \
+} while (0)
+
/* this_cpu_cmpxchg */
#define _protect_cmpxchg_local(pcp, o, n) \
({ \
@@ -167,18 +125,6 @@ static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
__ret; \
})
-#define _percpu_read(pcp) \
-({ \
- typeof(pcp) __retval; \
- __retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp)); \
- __retval; \
-})
-
-#define _percpu_write(pcp, val) \
-do { \
- __percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp)); \
-} while (0) \
-
#define _pcp_protect(operation, pcp, val) \
({ \
typeof(pcp) __retval; \
@@ -215,15 +161,15 @@ do { \
#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
-#define this_cpu_read_1(pcp) _percpu_read(pcp)
-#define this_cpu_read_2(pcp) _percpu_read(pcp)
-#define this_cpu_read_4(pcp) _percpu_read(pcp)
-#define this_cpu_read_8(pcp) _percpu_read(pcp)
+#define this_cpu_read_1(pcp) _percpu_read(1, pcp)
+#define this_cpu_read_2(pcp) _percpu_read(2, pcp)
+#define this_cpu_read_4(pcp) _percpu_read(4, pcp)
+#define this_cpu_read_8(pcp) _percpu_read(8, pcp)
-#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
-#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
-#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
-#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_1(pcp, val) _percpu_write(1, pcp, val)
+#define this_cpu_write_2(pcp, val) _percpu_write(2, pcp, val)
+#define this_cpu_write_4(pcp, val) _percpu_write(4, pcp, val)
+#define this_cpu_write_8(pcp, val) _percpu_write(8, pcp, val)
#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 85431f20a14d..9965f52ef65b 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -331,29 +331,23 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
+ if (pte_none(ptep_get(buddy))) {
#ifdef CONFIG_SMP
- /*
- * For SMP, multiple CPUs can race, so we need to do
- * this atomically.
- */
- unsigned long page_global = _PAGE_GLOBAL;
- unsigned long tmp;
-
- __asm__ __volatile__ (
- "1:" __LL "%[tmp], %[buddy] \n"
- " bnez %[tmp], 2f \n"
- " or %[tmp], %[tmp], %[global] \n"
- __SC "%[tmp], %[buddy] \n"
- " beqz %[tmp], 1b \n"
- " nop \n"
- "2: \n"
- __WEAK_LLSC_MB
- : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
- : [global] "r" (page_global));
+ /*
+ * For SMP, multiple CPUs can race, so we need
+ * to do this atomically.
+ */
+ __asm__ __volatile__(
+ __AMOR "$zero, %[global], %[buddy] \n"
+ : [buddy] "+ZB" (buddy->pte)
+ : [global] "r" (_PAGE_GLOBAL)
+ : "memory");
+
+ DBAR(0b11000); /* o_wrw = 0b11000 */
#else /* !CONFIG_SMP */
- if (pte_none(ptep_get(buddy)))
WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL));
#endif /* CONFIG_SMP */
+ }
}
}
diff --git a/arch/loongarch/include/asm/set_memory.h b/arch/loongarch/include/asm/set_memory.h
new file mode 100644
index 000000000000..d70505b6676c
--- /dev/null
+++ b/arch/loongarch/include/asm/set_memory.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_SET_MEMORY_H
+#define _ASM_LOONGARCH_SET_MEMORY_H
+
+/*
+ * Functions to change memory attributes.
+ */
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+
+bool kernel_page_present(struct page *page);
+int set_direct_map_default_noflush(struct page *page);
+int set_direct_map_invalid_noflush(struct page *page);
+
+#endif /* _ASM_LOONGARCH_SET_MEMORY_H */
diff --git a/arch/loongarch/include/uapi/asm/hwcap.h b/arch/loongarch/include/uapi/asm/hwcap.h
index 6955a7cb2c65..2b34e56cfa9e 100644
--- a/arch/loongarch/include/uapi/asm/hwcap.h
+++ b/arch/loongarch/include/uapi/asm/hwcap.h
@@ -17,5 +17,6 @@
#define HWCAP_LOONGARCH_LBT_ARM (1 << 11)
#define HWCAP_LOONGARCH_LBT_MIPS (1 << 12)
#define HWCAP_LOONGARCH_PTW (1 << 13)
+#define HWCAP_LOONGARCH_LSPW (1 << 14)
#endif /* _UAPI_ASM_HWCAP_H */
diff --git a/arch/loongarch/include/uapi/asm/sigcontext.h b/arch/loongarch/include/uapi/asm/sigcontext.h
index 6c22f616b8f1..5cd121275bac 100644
--- a/arch/loongarch/include/uapi/asm/sigcontext.h
+++ b/arch/loongarch/include/uapi/asm/sigcontext.h
@@ -9,7 +9,6 @@
#define _UAPI_ASM_SIGCONTEXT_H
#include <linux/types.h>
-#include <linux/posix_types.h>
/* FP context was used */
#define SC_USED_FP (1 << 0)
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
index 929a497c987e..f1a74b80f22c 100644
--- a/arch/loongarch/kernel/acpi.c
+++ b/arch/loongarch/kernel/acpi.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/efi-bgrt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/memblock.h>
@@ -212,6 +213,9 @@ void __init acpi_boot_table_init(void)
/* Do not enable ACPI SPCR console by default */
acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
+ if (IS_ENABLED(CONFIG_ACPI_BGRT))
+ acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
+
return;
fdt_earlycon:
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
index 14f0449f5452..cbce099037b2 100644
--- a/arch/loongarch/kernel/cpu-probe.c
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -91,12 +91,30 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
unsigned int config;
unsigned long asid_mask;
- c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
- LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
+ c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR | LOONGARCH_CPU_VINT;
elf_hwcap = HWCAP_LOONGARCH_CPUCFG;
config = read_cpucfg(LOONGARCH_CPUCFG1);
+
+ switch (config & CPUCFG1_ISA) {
+ case 0:
+ set_isa(c, LOONGARCH_CPU_ISA_LA32R);
+ break;
+ case 1:
+ set_isa(c, LOONGARCH_CPU_ISA_LA32S);
+ break;
+ case 2:
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ break;
+ default:
+ pr_warn("Warning: unknown ISA level\n");
+ }
+
+ if (config & CPUCFG1_PAGING)
+ c->options |= LOONGARCH_CPU_TLB;
+ if (config & CPUCFG1_IOCSR)
+ c->options |= LOONGARCH_CPU_IOCSR;
if (config & CPUCFG1_UAL) {
c->options |= LOONGARCH_CPU_UAL;
elf_hwcap |= HWCAP_LOONGARCH_UAL;
@@ -139,6 +157,10 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
c->options |= LOONGARCH_CPU_PTW;
elf_hwcap |= HWCAP_LOONGARCH_PTW;
}
+ if (config & CPUCFG2_LSPW) {
+ c->options |= LOONGARCH_CPU_LSPW;
+ elf_hwcap |= HWCAP_LOONGARCH_LSPW;
+ }
if (config & CPUCFG2_LVZP) {
c->options |= LOONGARCH_CPU_LVZ;
elf_hwcap |= HWCAP_LOONGARCH_LVZ;
@@ -162,22 +184,6 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
if (config & CPUCFG6_PMP)
c->options |= LOONGARCH_CPU_PMP;
- config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
- if (config & IOCSRF_CSRIPI)
- c->options |= LOONGARCH_CPU_CSRIPI;
- if (config & IOCSRF_EXTIOI)
- c->options |= LOONGARCH_CPU_EXTIOI;
- if (config & IOCSRF_FREQSCALE)
- c->options |= LOONGARCH_CPU_SCALEFREQ;
- if (config & IOCSRF_FLATMODE)
- c->options |= LOONGARCH_CPU_FLATMODE;
- if (config & IOCSRF_EIODECODE)
- c->options |= LOONGARCH_CPU_EIODECODE;
- if (config & IOCSRF_AVEC)
- c->options |= LOONGARCH_CPU_AVECINT;
- if (config & IOCSRF_VM)
- c->options |= LOONGARCH_CPU_HYPERVISOR;
-
config = csr_read32(LOONGARCH_CSR_ASID);
config = (config & CSR_ASID_BIT) >> CSR_ASID_BIT_SHIFT;
asid_mask = GENMASK(config - 1, 0);
@@ -210,6 +216,9 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
default:
pr_warn("Warning: unknown TLB type\n");
}
+
+ if (get_num_brps() + get_num_wrps())
+ c->options |= LOONGARCH_CPU_WATCH;
}
#define MAX_NAME_LEN 32
@@ -220,52 +229,67 @@ static char cpu_full_name[MAX_NAME_LEN] = " - ";
static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int cpu)
{
+ uint32_t config;
uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]);
uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]);
+ const char *core_name = "Unknown";
- if (!__cpu_full_name[cpu])
- __cpu_full_name[cpu] = cpu_full_name;
-
- *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
- *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
-
- switch (c->processor_id & PRID_SERIES_MASK) {
- case PRID_SERIES_LA132:
+ switch (BIT(fls(c->isa_level) - 1)) {
+ case LOONGARCH_CPU_ISA_LA32R:
+ case LOONGARCH_CPU_ISA_LA32S:
c->cputype = CPU_LOONGSON32;
- set_isa(c, LOONGARCH_CPU_ISA_LA32S);
__cpu_family[cpu] = "Loongson-32bit";
- pr_info("32-bit Loongson Processor probed (LA132 Core)\n");
break;
- case PRID_SERIES_LA264:
+ case LOONGARCH_CPU_ISA_LA64:
c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
__cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (LA264 Core)\n");
+ break;
+ }
+
+ switch (c->processor_id & PRID_SERIES_MASK) {
+ case PRID_SERIES_LA132:
+ core_name = "LA132";
+ break;
+ case PRID_SERIES_LA264:
+ core_name = "LA264";
break;
case PRID_SERIES_LA364:
- c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
- __cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (LA364 Core)\n");
+ core_name = "LA364";
break;
case PRID_SERIES_LA464:
- c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
- __cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (LA464 Core)\n");
+ core_name = "LA464";
break;
case PRID_SERIES_LA664:
- c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
- __cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (LA664 Core)\n");
+ core_name = "LA664";
break;
- default: /* Default to 64 bit */
- c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
- __cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (Unknown Core)\n");
}
+
+ pr_info("%s Processor probed (%s Core)\n", __cpu_family[cpu], core_name);
+
+ if (!cpu_has_iocsr)
+ return;
+
+ if (!__cpu_full_name[cpu])
+ __cpu_full_name[cpu] = cpu_full_name;
+
+ *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
+ *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
+
+ config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
+ if (config & IOCSRF_CSRIPI)
+ c->options |= LOONGARCH_CPU_CSRIPI;
+ if (config & IOCSRF_EXTIOI)
+ c->options |= LOONGARCH_CPU_EXTIOI;
+ if (config & IOCSRF_FREQSCALE)
+ c->options |= LOONGARCH_CPU_SCALEFREQ;
+ if (config & IOCSRF_FLATMODE)
+ c->options |= LOONGARCH_CPU_FLATMODE;
+ if (config & IOCSRF_EIODECODE)
+ c->options |= LOONGARCH_CPU_EIODECODE;
+ if (config & IOCSRF_AVEC)
+ c->options |= LOONGARCH_CPU_AVECINT;
+ if (config & IOCSRF_VM)
+ c->options |= LOONGARCH_CPU_HYPERVISOR;
}
#ifdef CONFIG_64BIT
diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c
index 0d33cbc47e51..6ce46d92f1f1 100644
--- a/arch/loongarch/kernel/proc.c
+++ b/arch/loongarch/kernel/proc.c
@@ -31,6 +31,7 @@ int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long n = (unsigned long) v - 1;
+ unsigned int isa = cpu_data[n].isa_level;
unsigned int version = cpu_data[n].processor_id & 0xff;
unsigned int fp_version = cpu_data[n].fpu_vers;
struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
@@ -64,9 +65,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
cpu_pabits + 1, cpu_vabits + 1);
seq_printf(m, "ISA\t\t\t:");
- if (cpu_has_loongarch32)
- seq_printf(m, " loongarch32");
- if (cpu_has_loongarch64)
+ if (isa & LOONGARCH_CPU_ISA_LA32R)
+ seq_printf(m, " loongarch32r");
+ if (isa & LOONGARCH_CPU_ISA_LA32S)
+ seq_printf(m, " loongarch32s");
+ if (isa & LOONGARCH_CPU_ISA_LA64)
seq_printf(m, " loongarch64");
seq_printf(m, "\n");
@@ -81,6 +84,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_complex) seq_printf(m, " complex");
if (cpu_has_crypto) seq_printf(m, " crypto");
if (cpu_has_ptw) seq_printf(m, " ptw");
+ if (cpu_has_lspw) seq_printf(m, " lspw");
if (cpu_has_lvz) seq_printf(m, " lvz");
if (cpu_has_lbt_x86) seq_printf(m, " lbt_x86");
if (cpu_has_lbt_arm) seq_printf(m, " lbt_arm");
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
index ba5d0930a74f..168bd97540f8 100644
--- a/arch/loongarch/kernel/syscall.c
+++ b/arch/loongarch/kernel/syscall.c
@@ -79,7 +79,3 @@ void noinstr __no_stack_protector do_syscall(struct pt_regs *regs)
syscall_exit_to_user_mode(regs);
}
-
-#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
-STACK_FRAME_NON_STANDARD(do_syscall);
-#endif
diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c
index 844736b99d38..27e9b94c0a0b 100644
--- a/arch/loongarch/kvm/main.c
+++ b/arch/loongarch/kvm/main.c
@@ -261,7 +261,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -ENOIOCTLCMD;
}
-int kvm_arch_hardware_enable(void)
+int kvm_arch_enable_virtualization_cpu(void)
{
unsigned long env, gcfg = 0;
@@ -300,7 +300,7 @@ int kvm_arch_hardware_enable(void)
return 0;
}
-void kvm_arch_hardware_disable(void)
+void kvm_arch_disable_virtualization_cpu(void)
{
write_csr_gcfg(0);
write_csr_gstat(0);
diff --git a/arch/loongarch/mm/Makefile b/arch/loongarch/mm/Makefile
index e4d1e581dbae..278be2c8fc36 100644
--- a/arch/loongarch/mm/Makefile
+++ b/arch/loongarch/mm/Makefile
@@ -4,7 +4,8 @@
#
obj-y += init.o cache.o tlb.o tlbex.o extable.o \
- fault.o ioremap.o maccess.o mmap.o pgtable.o page.o
+ fault.o ioremap.o maccess.o mmap.o pgtable.o \
+ page.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index 97b40defde06..deefd9617d00 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -31,11 +31,52 @@
int show_unhandled_signals = 1;
+static int __kprobes spurious_fault(unsigned long write, unsigned long address)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (!(address & __UA_LIMIT))
+ return 0;
+
+ pgd = pgd_offset_k(address);
+ if (!pgd_present(pgdp_get(pgd)))
+ return 0;
+
+ p4d = p4d_offset(pgd, address);
+ if (!p4d_present(p4dp_get(p4d)))
+ return 0;
+
+ pud = pud_offset(p4d, address);
+ if (!pud_present(pudp_get(pud)))
+ return 0;
+
+ pmd = pmd_offset(pud, address);
+ if (!pmd_present(pmdp_get(pmd)))
+ return 0;
+
+ if (pmd_leaf(*pmd)) {
+ return write ? pmd_write(pmdp_get(pmd)) : 1;
+ } else {
+ pte = pte_offset_kernel(pmd, address);
+ if (!pte_present(ptep_get(pte)))
+ return 0;
+
+ return write ? pte_write(ptep_get(pte)) : 1;
+ }
+}
+
static void __kprobes no_context(struct pt_regs *regs,
unsigned long write, unsigned long address)
{
const int field = sizeof(unsigned long) * 2;
+ if (spurious_fault(write, address))
+ return;
+
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
diff --git a/arch/loongarch/mm/pageattr.c b/arch/loongarch/mm/pageattr.c
new file mode 100644
index 000000000000..ffd8d76021d4
--- /dev/null
+++ b/arch/loongarch/mm/pageattr.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#include <linux/pagewalk.h>
+#include <linux/pgtable.h>
+#include <asm/set_memory.h>
+#include <asm/tlbflush.h>
+
+struct pageattr_masks {
+ pgprot_t set_mask;
+ pgprot_t clear_mask;
+};
+
+static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
+{
+ unsigned long new_val = val;
+ struct pageattr_masks *masks = walk->private;
+
+ new_val &= ~(pgprot_val(masks->clear_mask));
+ new_val |= (pgprot_val(masks->set_mask));
+
+ return new_val;
+}
+
+static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pgd_t val = pgdp_get(pgd);
+
+ if (pgd_leaf(val)) {
+ val = __pgd(set_pageattr_masks(pgd_val(val), walk));
+ set_pgd(pgd, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ p4d_t val = p4dp_get(p4d);
+
+ if (p4d_leaf(val)) {
+ val = __p4d(set_pageattr_masks(p4d_val(val), walk));
+ set_p4d(p4d, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pud_t val = pudp_get(pud);
+
+ if (pud_leaf(val)) {
+ val = __pud(set_pageattr_masks(pud_val(val), walk));
+ set_pud(pud, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pmd_t val = pmdp_get(pmd);
+
+ if (pmd_leaf(val)) {
+ val = __pmd(set_pageattr_masks(pmd_val(val), walk));
+ set_pmd(pmd, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pte_t val = ptep_get(pte);
+
+ val = __pte(set_pageattr_masks(pte_val(val), walk));
+ set_pte(pte, val);
+
+ return 0;
+}
+
+static int pageattr_pte_hole(unsigned long addr, unsigned long next,
+ int depth, struct mm_walk *walk)
+{
+ return 0;
+}
+
+static const struct mm_walk_ops pageattr_ops = {
+ .pgd_entry = pageattr_pgd_entry,
+ .p4d_entry = pageattr_p4d_entry,
+ .pud_entry = pageattr_pud_entry,
+ .pmd_entry = pageattr_pmd_entry,
+ .pte_entry = pageattr_pte_entry,
+ .pte_hole = pageattr_pte_hole,
+ .walk_lock = PGWALK_RDLOCK,
+};
+
+static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask)
+{
+ int ret;
+ unsigned long start = addr;
+ unsigned long end = start + PAGE_SIZE * numpages;
+ struct pageattr_masks masks = {
+ .set_mask = set_mask,
+ .clear_mask = clear_mask
+ };
+
+ if (!numpages)
+ return 0;
+
+ mmap_write_lock(&init_mm);
+ ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, &masks);
+ mmap_write_unlock(&init_mm);
+
+ flush_tlb_kernel_range(start, end);
+
+ return ret;
+}
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_NO_EXEC));
+}
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, numpages, __pgprot(_PAGE_NO_EXEC), __pgprot(0));
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_WRITE | _PAGE_DIRTY));
+}
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, numpages, __pgprot(_PAGE_WRITE | _PAGE_DIRTY), __pgprot(0));
+}
+
+bool kernel_page_present(struct page *page)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long addr = (unsigned long)page_address(page);
+
+ if (addr < vm_map_base)
+ return true;
+
+ pgd = pgd_offset_k(addr);
+ if (pgd_none(pgdp_get(pgd)))
+ return false;
+ if (pgd_leaf(pgdp_get(pgd)))
+ return true;
+
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none(p4dp_get(p4d)))
+ return false;
+ if (p4d_leaf(p4dp_get(p4d)))
+ return true;
+
+ pud = pud_offset(p4d, addr);
+ if (pud_none(pudp_get(pud)))
+ return false;
+ if (pud_leaf(pudp_get(pud)))
+ return true;
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(pmdp_get(pmd)))
+ return false;
+ if (pmd_leaf(pmdp_get(pmd)))
+ return true;
+
+ pte = pte_offset_kernel(pmd, addr);
+ return pte_present(ptep_get(pte));
+}
+
+int set_direct_map_default_noflush(struct page *page)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, 1, PAGE_KERNEL, __pgprot(0));
+}
+
+int set_direct_map_invalid_noflush(struct page *page)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, 1, __pgprot(0), __pgprot(_PAGE_PRESENT | _PAGE_VALID));
+}
diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c
index 365f7de771cb..1da4dc46df43 100644
--- a/arch/loongarch/pci/acpi.c
+++ b/arch/loongarch/pci/acpi.c
@@ -225,6 +225,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
if (bus) {
memcpy(bus->sysdata, info->cfg, sizeof(struct pci_config_window));
kfree(info);
+ kfree(root_ops);
} else {
struct pci_bus *child;
diff --git a/arch/loongarch/vdso/vgetrandom-chacha.S b/arch/loongarch/vdso/vgetrandom-chacha.S
index 7e86a50f6e85..c2733e6c3a8d 100644
--- a/arch/loongarch/vdso/vgetrandom-chacha.S
+++ b/arch/loongarch/vdso/vgetrandom-chacha.S
@@ -9,23 +9,11 @@
.text
-/* Salsa20 quarter-round */
-.macro QR a b c d
- add.w \a, \a, \b
- xor \d, \d, \a
- rotri.w \d, \d, 16
-
- add.w \c, \c, \d
- xor \b, \b, \c
- rotri.w \b, \b, 20
-
- add.w \a, \a, \b
- xor \d, \d, \a
- rotri.w \d, \d, 24
-
- add.w \c, \c, \d
- xor \b, \b, \c
- rotri.w \b, \b, 25
+.macro OP_4REG op d0 d1 d2 d3 s0 s1 s2 s3
+ \op \d0, \d0, \s0
+ \op \d1, \d1, \s1
+ \op \d2, \d2, \s2
+ \op \d3, \d3, \s3
.endm
/*
@@ -74,6 +62,23 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
/* Reuse i as copy3 */
#define copy3 i
+/* Packs to be used with OP_4REG */
+#define line0 state0, state1, state2, state3
+#define line1 state4, state5, state6, state7
+#define line2 state8, state9, state10, state11
+#define line3 state12, state13, state14, state15
+
+#define line1_perm state5, state6, state7, state4
+#define line2_perm state10, state11, state8, state9
+#define line3_perm state15, state12, state13, state14
+
+#define copy copy0, copy1, copy2, copy3
+
+#define _16 16, 16, 16, 16
+#define _20 20, 20, 20, 20
+#define _24 24, 24, 24, 24
+#define _25 25, 25, 25, 25
+
/*
* The ABI requires s0-s9 saved, and sp aligned to 16-byte.
* This does not violate the stack-less requirement: no sensitive data
@@ -126,16 +131,38 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
li.w i, 10
.Lpermute:
/* odd round */
- QR state0, state4, state8, state12
- QR state1, state5, state9, state13
- QR state2, state6, state10, state14
- QR state3, state7, state11, state15
+ OP_4REG add.w line0, line1
+ OP_4REG xor line3, line0
+ OP_4REG rotri.w line3, _16
+
+ OP_4REG add.w line2, line3
+ OP_4REG xor line1, line2
+ OP_4REG rotri.w line1, _20
+
+ OP_4REG add.w line0, line1
+ OP_4REG xor line3, line0
+ OP_4REG rotri.w line3, _24
+
+ OP_4REG add.w line2, line3
+ OP_4REG xor line1, line2
+ OP_4REG rotri.w line1, _25
/* even round */
- QR state0, state5, state10, state15
- QR state1, state6, state11, state12
- QR state2, state7, state8, state13
- QR state3, state4, state9, state14
+ OP_4REG add.w line0, line1_perm
+ OP_4REG xor line3_perm, line0
+ OP_4REG rotri.w line3_perm, _16
+
+ OP_4REG add.w line2_perm, line3_perm
+ OP_4REG xor line1_perm, line2_perm
+ OP_4REG rotri.w line1_perm, _20
+
+ OP_4REG add.w line0, line1_perm
+ OP_4REG xor line3_perm, line0
+ OP_4REG rotri.w line3_perm, _24
+
+ OP_4REG add.w line2_perm, line3_perm
+ OP_4REG xor line1_perm, line2_perm
+ OP_4REG rotri.w line1_perm, _25
addi.w i, i, -1
bnez i, .Lpermute
@@ -147,10 +174,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
li.w copy3, 0x6b206574
/* output[0,1,2,3] = copy[0,1,2,3] + state[0,1,2,3] */
- add.w state0, state0, copy0
- add.w state1, state1, copy1
- add.w state2, state2, copy2
- add.w state3, state3, copy3
+ OP_4REG add.w line0, copy
st.w state0, output, 0
st.w state1, output, 4
st.w state2, output, 8
@@ -165,10 +189,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
ld.w state3, key, 12
/* output[4,5,6,7] = state[0,1,2,3] + state[4,5,6,7] */
- add.w state4, state4, state0
- add.w state5, state5, state1
- add.w state6, state6, state2
- add.w state7, state7, state3
+ OP_4REG add.w line1, line0
st.w state4, output, 16
st.w state5, output, 20
st.w state6, output, 24
@@ -181,10 +202,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
ld.w state3, key, 28
/* output[8,9,10,11] = state[0,1,2,3] + state[8,9,10,11] */
- add.w state8, state8, state0
- add.w state9, state9, state1
- add.w state10, state10, state2
- add.w state11, state11, state3
+ OP_4REG add.w line2, line0
st.w state8, output, 32
st.w state9, output, 36
st.w state10, output, 40
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 6743a57c1ab4..f7222eb594ea 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -728,8 +728,8 @@ struct kvm_mips_callbacks {
int (*handle_fpe)(struct kvm_vcpu *vcpu);
int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
- int (*hardware_enable)(void);
- void (*hardware_disable)(void);
+ int (*enable_virtualization_cpu)(void);
+ void (*disable_virtualization_cpu)(void);
int (*check_extension)(struct kvm *kvm, long ext);
int (*vcpu_init)(struct kvm_vcpu *vcpu);
void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index b5de770b092e..60b43ea85c12 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -125,14 +125,14 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
return 1;
}
-int kvm_arch_hardware_enable(void)
+int kvm_arch_enable_virtualization_cpu(void)
{
- return kvm_mips_callbacks->hardware_enable();
+ return kvm_mips_callbacks->enable_virtualization_cpu();
}
-void kvm_arch_hardware_disable(void)
+void kvm_arch_disable_virtualization_cpu(void)
{
- kvm_mips_callbacks->hardware_disable();
+ kvm_mips_callbacks->disable_virtualization_cpu();
}
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 99d5a71e4300..ccab4d76b126 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -2869,7 +2869,7 @@ static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
return ret + 1;
}
-static int kvm_vz_hardware_enable(void)
+static int kvm_vz_enable_virtualization_cpu(void)
{
unsigned int mmu_size, guest_mmu_size, ftlb_size;
u64 guest_cvmctl, cvmvmconfig;
@@ -2983,7 +2983,7 @@ static int kvm_vz_hardware_enable(void)
return 0;
}
-static void kvm_vz_hardware_disable(void)
+static void kvm_vz_disable_virtualization_cpu(void)
{
u64 cvmvmconfig;
unsigned int mmu_size;
@@ -3280,8 +3280,8 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = {
.handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
.handle_guest_exit = kvm_trap_vz_handle_guest_exit,
- .hardware_enable = kvm_vz_hardware_enable,
- .hardware_disable = kvm_vz_hardware_disable,
+ .enable_virtualization_cpu = kvm_vz_enable_virtualization_cpu,
+ .disable_virtualization_cpu = kvm_vz_disable_virtualization_cpu,
.check_extension = kvm_vz_check_extension,
.vcpu_init = kvm_vz_vcpu_init,
.vcpu_uninit = kvm_vz_vcpu_uninit,
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index b0f0816879df..5e8e37a722ef 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -466,7 +466,6 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
static const struct file_operations perf_fops = {
- .llseek = no_llseek,
.read = perf_read,
.write = perf_write,
.unlocked_ioctl = perf_ioctl,
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 0e59b8fd9bc6..83fe99861eb1 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1574,6 +1574,104 @@ static int proc_eeh_show(struct seq_file *m, void *v)
}
#endif /* CONFIG_PROC_FS */
+static int eeh_break_device(struct pci_dev *pdev)
+{
+ struct resource *bar = NULL;
+ void __iomem *mapped;
+ u16 old, bit;
+ int i, pos;
+
+ /* Do we have an MMIO BAR to disable? */
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ struct resource *r = &pdev->resource[i];
+
+ if (!r->flags || !r->start)
+ continue;
+ if (r->flags & IORESOURCE_IO)
+ continue;
+ if (r->flags & IORESOURCE_UNSET)
+ continue;
+
+ bar = r;
+ break;
+ }
+
+ if (!bar) {
+ pci_err(pdev, "Unable to find Memory BAR to cause EEH with\n");
+ return -ENXIO;
+ }
+
+ pci_err(pdev, "Going to break: %pR\n", bar);
+
+ if (pdev->is_virtfn) {
+#ifndef CONFIG_PCI_IOV
+ return -ENXIO;
+#else
+ /*
+ * VFs don't have a per-function COMMAND register, so the best
+ * we can do is clear the Memory Space Enable bit in the PF's
+ * SRIOV control reg.
+ *
+ * Unfortunately, this requires that we have a PF (i.e doesn't
+ * work for a passed-through VF) and it has the potential side
+ * effect of also causing an EEH on every other VF under the
+ * PF. Oh well.
+ */
+ pdev = pdev->physfn;
+ if (!pdev)
+ return -ENXIO; /* passed through VFs have no PF */
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ pos += PCI_SRIOV_CTRL;
+ bit = PCI_SRIOV_CTRL_MSE;
+#endif /* !CONFIG_PCI_IOV */
+ } else {
+ bit = PCI_COMMAND_MEMORY;
+ pos = PCI_COMMAND;
+ }
+
+ /*
+ * Process here is:
+ *
+ * 1. Disable Memory space.
+ *
+ * 2. Perform an MMIO to the device. This should result in an error
+ * (CA / UR) being raised by the device which results in an EEH
+ * PE freeze. Using the in_8() accessor skips the eeh detection hook
+ * so the freeze hook so the EEH Detection machinery won't be
+ * triggered here. This is to match the usual behaviour of EEH
+ * where the HW will asynchronously freeze a PE and it's up to
+ * the kernel to notice and deal with it.
+ *
+ * 3. Turn Memory space back on. This is more important for VFs
+ * since recovery will probably fail if we don't. For normal
+ * the COMMAND register is reset as a part of re-initialising
+ * the device.
+ *
+ * Breaking stuff is the point so who cares if it's racy ;)
+ */
+ pci_read_config_word(pdev, pos, &old);
+
+ mapped = ioremap(bar->start, PAGE_SIZE);
+ if (!mapped) {
+ pci_err(pdev, "Unable to map MMIO BAR %pR\n", bar);
+ return -ENXIO;
+ }
+
+ pci_write_config_word(pdev, pos, old & ~bit);
+ in_8(mapped);
+ pci_write_config_word(pdev, pos, old);
+
+ iounmap(mapped);
+
+ return 0;
+}
+
+int eeh_pe_inject_mmio_error(struct pci_dev *pdev)
+{
+ return eeh_break_device(pdev);
+}
+
#ifdef CONFIG_DEBUG_FS
@@ -1725,99 +1823,6 @@ static const struct file_operations eeh_dev_check_fops = {
.read = eeh_debugfs_dev_usage,
};
-static int eeh_debugfs_break_device(struct pci_dev *pdev)
-{
- struct resource *bar = NULL;
- void __iomem *mapped;
- u16 old, bit;
- int i, pos;
-
- /* Do we have an MMIO BAR to disable? */
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
- struct resource *r = &pdev->resource[i];
-
- if (!r->flags || !r->start)
- continue;
- if (r->flags & IORESOURCE_IO)
- continue;
- if (r->flags & IORESOURCE_UNSET)
- continue;
-
- bar = r;
- break;
- }
-
- if (!bar) {
- pci_err(pdev, "Unable to find Memory BAR to cause EEH with\n");
- return -ENXIO;
- }
-
- pci_err(pdev, "Going to break: %pR\n", bar);
-
- if (pdev->is_virtfn) {
-#ifndef CONFIG_PCI_IOV
- return -ENXIO;
-#else
- /*
- * VFs don't have a per-function COMMAND register, so the best
- * we can do is clear the Memory Space Enable bit in the PF's
- * SRIOV control reg.
- *
- * Unfortunately, this requires that we have a PF (i.e doesn't
- * work for a passed-through VF) and it has the potential side
- * effect of also causing an EEH on every other VF under the
- * PF. Oh well.
- */
- pdev = pdev->physfn;
- if (!pdev)
- return -ENXIO; /* passed through VFs have no PF */
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
- pos += PCI_SRIOV_CTRL;
- bit = PCI_SRIOV_CTRL_MSE;
-#endif /* !CONFIG_PCI_IOV */
- } else {
- bit = PCI_COMMAND_MEMORY;
- pos = PCI_COMMAND;
- }
-
- /*
- * Process here is:
- *
- * 1. Disable Memory space.
- *
- * 2. Perform an MMIO to the device. This should result in an error
- * (CA / UR) being raised by the device which results in an EEH
- * PE freeze. Using the in_8() accessor skips the eeh detection hook
- * so the freeze hook so the EEH Detection machinery won't be
- * triggered here. This is to match the usual behaviour of EEH
- * where the HW will asynchronously freeze a PE and it's up to
- * the kernel to notice and deal with it.
- *
- * 3. Turn Memory space back on. This is more important for VFs
- * since recovery will probably fail if we don't. For normal
- * the COMMAND register is reset as a part of re-initialising
- * the device.
- *
- * Breaking stuff is the point so who cares if it's racy ;)
- */
- pci_read_config_word(pdev, pos, &old);
-
- mapped = ioremap(bar->start, PAGE_SIZE);
- if (!mapped) {
- pci_err(pdev, "Unable to map MMIO BAR %pR\n", bar);
- return -ENXIO;
- }
-
- pci_write_config_word(pdev, pos, old & ~bit);
- in_8(mapped);
- pci_write_config_word(pdev, pos, old);
-
- iounmap(mapped);
-
- return 0;
-}
-
static ssize_t eeh_dev_break_write(struct file *filp,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -1829,7 +1834,7 @@ static ssize_t eeh_dev_break_write(struct file *filp,
if (IS_ERR(pdev))
return PTR_ERR(pdev);
- ret = eeh_debugfs_break_device(pdev);
+ ret = eeh_break_device(pdev);
pci_dev_put(pdev);
if (ret < 0)
@@ -1844,11 +1849,6 @@ static const struct file_operations eeh_dev_break_fops = {
.read = eeh_debugfs_dev_usage,
};
-int eeh_pe_inject_mmio_error(struct pci_dev *pdev)
-{
- return eeh_debugfs_break_device(pdev);
-}
-
static ssize_t eeh_dev_can_recover(struct file *filp,
const char __user *user_buf,
size_t count, loff_t *ppos)
diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
index 7e5e1c28e56a..8967903c15e9 100644
--- a/arch/powerpc/lib/crtsavres.S
+++ b/arch/powerpc/lib/crtsavres.S
@@ -46,7 +46,7 @@
.section ".text"
-#ifndef CONFIG_PPC64
+#ifndef __powerpc64__
/* Routines for saving integer registers, called by the compiler. */
/* Called with r11 pointing to the stack header word of the caller of the */
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index b6d515db869b..22dc5ea4196c 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -177,7 +177,7 @@ config RISCV
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RETHOOK if !XIP_KERNEL
select HAVE_RSEQ
- select HAVE_RUST if 64BIT
+ select HAVE_RUST if RUSTC_SUPPORTS_RISCV
select HAVE_SAMPLE_FTRACE_DIRECT
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_STACKPROTECTOR
@@ -209,6 +209,13 @@ config RISCV
select USER_STACKTRACE_SUPPORT
select ZONE_DMA32 if 64BIT
+config RUSTC_SUPPORTS_RISCV
+ def_bool y
+ depends on 64BIT
+ # Shadow call stack requires rustc version 1.82+ due to use of the
+ # -Zsanitizer=shadow-call-stack flag.
+ depends on !SHADOW_CALL_STACK || RUSTC_VERSION >= 108200
+
config CLANG_SUPPORTS_DYNAMIC_FTRACE
def_bool CC_IS_CLANG
# https://github.com/ClangBuiltLinux/linux/issues/1817
diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c
index bab2ec34cd87..f3427f6de608 100644
--- a/arch/riscv/kvm/main.c
+++ b/arch/riscv/kvm/main.c
@@ -20,7 +20,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -EINVAL;
}
-int kvm_arch_hardware_enable(void)
+int kvm_arch_enable_virtualization_cpu(void)
{
csr_write(CSR_HEDELEG, KVM_HEDELEG_DEFAULT);
csr_write(CSR_HIDELEG, KVM_HIDELEG_DEFAULT);
@@ -35,7 +35,7 @@ int kvm_arch_hardware_enable(void)
return 0;
}
-void kvm_arch_hardware_disable(void)
+void kvm_arch_disable_virtualization_cpu(void)
{
kvm_riscv_aia_disable();
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 7ec1b8cd0de9..9b57add02cd5 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -59,6 +59,7 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_S390_HYPFS_FS=y
CONFIG_KVM=m
+CONFIG_KVM_S390_UCONTROL=y
CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_S390_KPROBES_SANITY_TEST=m
CONFIG_S390_MODULES_SANITY_TEST=m
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index 0e855c5e91c5..5d9effb0867c 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -76,7 +76,6 @@ static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations dbfs_ops = {
.read = dbfs_read,
- .llseek = no_llseek,
.unlocked_ioctl = dbfs_ioctl,
};
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 858beaf4a8cb..d428635abf08 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -443,7 +443,6 @@ static const struct file_operations hypfs_file_ops = {
.release = hypfs_release,
.read_iter = hypfs_read_iter,
.write_iter = hypfs_write_iter,
- .llseek = no_llseek,
};
static struct file_system_type hypfs_type = {
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index bce50ca75ea7..e62bea9ab21e 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -163,7 +163,6 @@ static const struct file_operations debug_file_ops = {
.write = debug_input,
.open = debug_open,
.release = debug_close,
- .llseek = no_llseek,
};
static struct dentry *debug_debugfs_root_entry;
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 18b0d025f3a2..e2e0aa463fbd 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -1698,7 +1698,6 @@ static const struct file_operations cfset_fops = {
.release = cfset_release,
.unlocked_ioctl = cfset_ioctl,
.compat_ioctl = cfset_ioctl,
- .llseek = no_llseek
};
static struct miscdevice cfset_dev = {
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 2be30a96696a..88055f58fbda 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -498,7 +498,6 @@ static const struct file_operations stsi_##fc##_##s1##_##s2##_fs_ops = { \
.open = stsi_open_##fc##_##s1##_##s2, \
.release = stsi_release, \
.read = stsi_read, \
- .llseek = no_llseek, \
};
static int stsi_release(struct inode *inode, struct file *file)
diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
index e26e68675c08..aa06c85bcbd3 100644
--- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
@@ -13,10 +13,7 @@
* for details.
*/
.macro vdso_func func
- .globl __kernel_\func
- .type __kernel_\func,@function
- __ALIGN
-__kernel_\func:
+SYM_FUNC_START(__kernel_\func)
CFI_STARTPROC
aghi %r15,-STACK_FRAME_VDSO_OVERHEAD
CFI_DEF_CFA_OFFSET (STACK_FRAME_USER_OVERHEAD + STACK_FRAME_VDSO_OVERHEAD)
@@ -32,7 +29,7 @@ __kernel_\func:
CFI_RESTORE 15
br %r14
CFI_ENDPROC
- .size __kernel_\func,.-__kernel_\func
+SYM_FUNC_END(__kernel_\func)
.endm
vdso_func gettimeofday
@@ -41,16 +38,13 @@ vdso_func clock_gettime
vdso_func getcpu
.macro vdso_syscall func,syscall
- .globl __kernel_\func
- .type __kernel_\func,@function
- __ALIGN
-__kernel_\func:
+SYM_FUNC_START(__kernel_\func)
CFI_STARTPROC
svc \syscall
/* Make sure we notice when a syscall returns, which shouldn't happen */
.word 0
CFI_ENDPROC
- .size __kernel_\func,.-__kernel_\func
+SYM_FUNC_END(__kernel_\func)
.endm
vdso_syscall restart_syscall,__NR_restart_syscall
diff --git a/arch/s390/kernel/vdso64/vgetrandom-chacha.S b/arch/s390/kernel/vdso64/vgetrandom-chacha.S
index d802b0a96f41..09c034c2f853 100644
--- a/arch/s390/kernel/vdso64/vgetrandom-chacha.S
+++ b/arch/s390/kernel/vdso64/vgetrandom-chacha.S
@@ -1,7 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/stringify.h>
#include <linux/linkage.h>
#include <asm/alternative.h>
+#include <asm/dwarf.h>
#include <asm/fpu-insn.h>
#define STATE0 %v0
@@ -12,9 +14,6 @@
#define COPY1 %v5
#define COPY2 %v6
#define COPY3 %v7
-#define PERM4 %v16
-#define PERM8 %v17
-#define PERM12 %v18
#define BEPERM %v19
#define TMP0 %v20
#define TMP1 %v21
@@ -23,13 +22,11 @@
.section .rodata
- .balign 128
-.Lconstants:
+ .balign 32
+SYM_DATA_START_LOCAL(chacha20_constants)
.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 # endian-neutral
- .long 0x04050607,0x08090a0b,0x0c0d0e0f,0x00010203 # rotl 4 bytes
- .long 0x08090a0b,0x0c0d0e0f,0x00010203,0x04050607 # rotl 8 bytes
- .long 0x0c0d0e0f,0x00010203,0x04050607,0x08090a0b # rotl 12 bytes
.long 0x03020100,0x07060504,0x0b0a0908,0x0f0e0d0c # byte swap
+SYM_DATA_END(chacha20_constants)
.text
/*
@@ -43,13 +40,14 @@
* size_t nblocks)
*/
SYM_FUNC_START(__arch_chacha20_blocks_nostack)
- larl %r1,.Lconstants
+ CFI_STARTPROC
+ larl %r1,chacha20_constants
/* COPY0 = "expand 32-byte k" */
VL COPY0,0,,%r1
- /* PERM4-PERM12,BEPERM = byte selectors for VPERM */
- VLM PERM4,BEPERM,16,%r1
+ /* BEPERM = byte selectors for VPERM */
+ ALTERNATIVE __stringify(VL BEPERM,16,,%r1), "brcl 0,0", ALT_FACILITY(148)
/* COPY1,COPY2 = key */
VLM COPY1,COPY2,0,%r3
@@ -89,11 +87,11 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
VERLLF STATE1,STATE1,7
/* STATE1[0,1,2,3] = STATE1[1,2,3,0] */
- VPERM STATE1,STATE1,STATE1,PERM4
+ VSLDB STATE1,STATE1,STATE1,4
/* STATE2[0,1,2,3] = STATE2[2,3,0,1] */
- VPERM STATE2,STATE2,STATE2,PERM8
+ VSLDB STATE2,STATE2,STATE2,8
/* STATE3[0,1,2,3] = STATE3[3,0,1,2] */
- VPERM STATE3,STATE3,STATE3,PERM12
+ VSLDB STATE3,STATE3,STATE3,12
/* STATE0 += STATE1, STATE3 = rotl32(STATE3 ^ STATE0, 16) */
VAF STATE0,STATE0,STATE1
@@ -116,32 +114,38 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
VERLLF STATE1,STATE1,7
/* STATE1[0,1,2,3] = STATE1[3,0,1,2] */
- VPERM STATE1,STATE1,STATE1,PERM12
+ VSLDB STATE1,STATE1,STATE1,12
/* STATE2[0,1,2,3] = STATE2[2,3,0,1] */
- VPERM STATE2,STATE2,STATE2,PERM8
+ VSLDB STATE2,STATE2,STATE2,8
/* STATE3[0,1,2,3] = STATE3[1,2,3,0] */
- VPERM STATE3,STATE3,STATE3,PERM4
+ VSLDB STATE3,STATE3,STATE3,4
brctg %r0,.Ldoubleround
- /* OUTPUT0 = STATE0 + STATE0 */
+ /* OUTPUT0 = STATE0 + COPY0 */
VAF STATE0,STATE0,COPY0
- /* OUTPUT1 = STATE1 + STATE1 */
+ /* OUTPUT1 = STATE1 + COPY1 */
VAF STATE1,STATE1,COPY1
- /* OUTPUT2 = STATE2 + STATE2 */
+ /* OUTPUT2 = STATE2 + COPY2 */
VAF STATE2,STATE2,COPY2
- /* OUTPUT2 = STATE3 + STATE3 */
+ /* OUTPUT3 = STATE3 + COPY3 */
VAF STATE3,STATE3,COPY3
- /*
- * 32 bit wise little endian store to OUTPUT. If the vector
- * enhancement facility 2 is not installed use the slow path.
- */
- ALTERNATIVE "brc 0xf,.Lstoreslow", "nop", ALT_FACILITY(148)
- VSTBRF STATE0,0,,%r2
- VSTBRF STATE1,16,,%r2
- VSTBRF STATE2,32,,%r2
- VSTBRF STATE3,48,,%r2
-.Lstoredone:
+ ALTERNATIVE \
+ __stringify( \
+ /* Convert STATE to little endian and store to OUTPUT */\
+ VPERM TMP0,STATE0,STATE0,BEPERM; \
+ VPERM TMP1,STATE1,STATE1,BEPERM; \
+ VPERM TMP2,STATE2,STATE2,BEPERM; \
+ VPERM TMP3,STATE3,STATE3,BEPERM; \
+ VSTM TMP0,TMP3,0,%r2), \
+ __stringify( \
+ /* 32 bit wise little endian store to OUTPUT */ \
+ VSTBRF STATE0,0,,%r2; \
+ VSTBRF STATE1,16,,%r2; \
+ VSTBRF STATE2,32,,%r2; \
+ VSTBRF STATE3,48,,%r2; \
+ brcl 0,0), \
+ ALT_FACILITY(148)
/* ++COPY3.COUNTER */
/* alsih %r3,1 */
@@ -173,13 +177,5 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
VZERO TMP3
br %r14
-
-.Lstoreslow:
- /* Convert STATE to little endian format and store to OUTPUT */
- VPERM TMP0,STATE0,STATE0,BEPERM
- VPERM TMP1,STATE1,STATE1,BEPERM
- VPERM TMP2,STATE2,STATE2,BEPERM
- VPERM TMP3,STATE3,STATE3,BEPERM
- VSTM TMP0,TMP3,0,%r2
- j .Lstoredone
+ CFI_ENDPROC
SYM_FUNC_END(__arch_chacha20_blocks_nostack)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index ae5d0a9d6911..377b9aaf8c92 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -191,8 +191,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
- RUNTIME_CONST(shift, d_hash_shift)
- RUNTIME_CONST(ptr, dentry_hashtable)
+ RUNTIME_CONST_VARIABLES
PERCPU_SECTION(0x100)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0fd96860fc45..bb7134faaebf 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -348,20 +348,29 @@ static inline int plo_test_bit(unsigned char nr)
return cc == 0;
}
-static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
+static __always_inline void __sortl_query(u8 (*query)[32])
{
asm volatile(
" lghi 0,0\n"
- " lgr 1,%[query]\n"
+ " la 1,%[query]\n"
/* Parameter registers are ignored */
- " .insn rrf,%[opc] << 16,2,4,6,0\n"
+ " .insn rre,0xb9380000,2,4\n"
+ : [query] "=R" (*query)
:
- : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
- : "cc", "memory", "0", "1");
+ : "cc", "0", "1");
}
-#define INSN_SORTL 0xb938
-#define INSN_DFLTCC 0xb939
+static __always_inline void __dfltcc_query(u8 (*query)[32])
+{
+ asm volatile(
+ " lghi 0,0\n"
+ " la 1,%[query]\n"
+ /* Parameter registers are ignored */
+ " .insn rrf,0xb9390000,2,4,6,0\n"
+ : [query] "=R" (*query)
+ :
+ : "cc", "0", "1");
+}
static void __init kvm_s390_cpu_feat_init(void)
{
@@ -415,10 +424,10 @@ static void __init kvm_s390_cpu_feat_init(void)
kvm_s390_available_subfunc.kdsa);
if (test_facility(150)) /* SORTL */
- __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
+ __sortl_query(&kvm_s390_available_subfunc.sortl);
if (test_facility(151)) /* DFLTCC */
- __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
+ __dfltcc_query(&kvm_s390_available_subfunc.dfltcc);
if (MACHINE_HAS_ESOP)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 688abc65c79e..7a96623a9d2e 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -62,7 +62,7 @@ EXPORT_SYMBOL(zero_page_mask);
static void __init setup_zero_pages(void)
{
- unsigned long total_pages = PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size());
+ unsigned long total_pages = memblock_estimated_nr_free_pages();
unsigned int order;
struct page *page;
int i;
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index ee90a91ed888..6f55a59a0871 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -657,7 +657,6 @@ static const struct file_operations clp_misc_fops = {
.release = clp_misc_release,
.unlocked_ioctl = clp_misc_ioctl,
.compat_ioctl = clp_misc_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice clp_misc_device = {
diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h
index 0f384b1f45ca..53fc18a3d4c2 100644
--- a/arch/sh/include/asm/irq.h
+++ b/arch/sh/include/asm/irq.h
@@ -14,12 +14,6 @@
#define NO_IRQ_IGNORE ((unsigned int)-1)
/*
- * Simple Mask Register Support
- */
-extern void make_maskreg_irq(unsigned int irq);
-extern unsigned short *irq_mask_register;
-
-/*
* PINT IRQs
*/
void make_imask_irq(unsigned int irq);
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index ec61ff1f96b7..1dc9b3d70eda 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -39,12 +39,10 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
unsigned int ctxtbl;
unsigned int pgd, pmd, ped;
unsigned int ptr;
- unsigned int lvl, pte, paddrbase;
+ unsigned int lvl, pte;
unsigned int ctx;
unsigned int paddr_calc;
- paddrbase = 0;
-
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: trace on\n");
@@ -73,7 +71,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: pgd is entry level 3\n");
lvl = 3;
pte = pgd;
- paddrbase = pgd & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
@@ -96,7 +93,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: pmd is entry level 2\n");
lvl = 2;
pte = pmd;
- paddrbase = pmd & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
@@ -124,7 +120,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: ped is entry level 1\n");
lvl = 1;
pte = ped;
- paddrbase = ped & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
@@ -147,7 +142,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: ptr is entry level 0\n");
lvl = 0;
pte = ptr;
- paddrbase = ptr & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (srmmu_swprobe_trace)
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index dca84fd6d00a..c89575d05021 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -11,7 +11,6 @@ config UML
select ARCH_HAS_KCOV
select ARCH_HAS_STRNCPY_FROM_USER
select ARCH_HAS_STRNLEN_USER
- select ARCH_NO_PREEMPT_DYNAMIC
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c
index 99a7144b229f..819aabb4ecdc 100644
--- a/arch/um/drivers/harddog_kern.c
+++ b/arch/um/drivers/harddog_kern.c
@@ -164,7 +164,6 @@ static const struct file_operations harddog_fops = {
.compat_ioctl = compat_ptr_ioctl,
.open = harddog_open,
.release = harddog_release,
- .llseek = no_llseek,
};
static struct miscdevice harddog_miscdev = {
diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c
index c42b793bce65..9d228878cea2 100644
--- a/arch/um/drivers/hostaudio_kern.c
+++ b/arch/um/drivers/hostaudio_kern.c
@@ -291,7 +291,6 @@ static int hostmixer_release(struct inode *inode, struct file *file)
static const struct file_operations hostaudio_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = hostaudio_read,
.write = hostaudio_write,
.poll = hostaudio_poll,
@@ -304,7 +303,6 @@ static const struct file_operations hostaudio_fops = {
static const struct file_operations hostmixer_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = hostmixer_ioctl_mixdev,
.open = hostmixer_open_mixdev,
.release = hostmixer_release,
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 2d473282ab51..c992da83268d 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/fs.h>
+#include <asm/atomic.h>
#include <uapi/linux/filter.h>
#include <init.h>
#include <irq_kern.h>
@@ -102,18 +103,33 @@ static const struct {
static void vector_reset_stats(struct vector_private *vp)
{
+ /* We reuse the existing queue locks for stats */
+
+ /* RX stats are modified with RX head_lock held
+ * in vector_poll.
+ */
+
+ spin_lock(&vp->rx_queue->head_lock);
vp->estats.rx_queue_max = 0;
vp->estats.rx_queue_running_average = 0;
- vp->estats.tx_queue_max = 0;
- vp->estats.tx_queue_running_average = 0;
vp->estats.rx_encaps_errors = 0;
+ vp->estats.sg_ok = 0;
+ vp->estats.sg_linearized = 0;
+ spin_unlock(&vp->rx_queue->head_lock);
+
+ /* TX stats are modified with TX head_lock held
+ * in vector_send.
+ */
+
+ spin_lock(&vp->tx_queue->head_lock);
vp->estats.tx_timeout_count = 0;
vp->estats.tx_restart_queue = 0;
vp->estats.tx_kicks = 0;
vp->estats.tx_flow_control_xon = 0;
vp->estats.tx_flow_control_xoff = 0;
- vp->estats.sg_ok = 0;
- vp->estats.sg_linearized = 0;
+ vp->estats.tx_queue_max = 0;
+ vp->estats.tx_queue_running_average = 0;
+ spin_unlock(&vp->tx_queue->head_lock);
}
static int get_mtu(struct arglist *def)
@@ -232,12 +248,6 @@ static int get_transport_options(struct arglist *def)
static char *drop_buffer;
-/* Array backed queues optimized for bulk enqueue/dequeue and
- * 1:N (small values of N) or 1:1 enqueuer/dequeuer ratios.
- * For more details and full design rationale see
- * http://foswiki.cambridgegreys.com/Main/EatYourTailAndEnjoyIt
- */
-
/*
* Advance the mmsg queue head by n = advance. Resets the queue to
@@ -247,27 +257,13 @@ static char *drop_buffer;
static int vector_advancehead(struct vector_queue *qi, int advance)
{
- int queue_depth;
-
qi->head =
(qi->head + advance)
% qi->max_depth;
- spin_lock(&qi->tail_lock);
- qi->queue_depth -= advance;
-
- /* we are at 0, use this to
- * reset head and tail so we can use max size vectors
- */
-
- if (qi->queue_depth == 0) {
- qi->head = 0;
- qi->tail = 0;
- }
- queue_depth = qi->queue_depth;
- spin_unlock(&qi->tail_lock);
- return queue_depth;
+ atomic_sub(advance, &qi->queue_depth);
+ return atomic_read(&qi->queue_depth);
}
/* Advance the queue tail by n = advance.
@@ -277,16 +273,11 @@ static int vector_advancehead(struct vector_queue *qi, int advance)
static int vector_advancetail(struct vector_queue *qi, int advance)
{
- int queue_depth;
-
qi->tail =
(qi->tail + advance)
% qi->max_depth;
- spin_lock(&qi->head_lock);
- qi->queue_depth += advance;
- queue_depth = qi->queue_depth;
- spin_unlock(&qi->head_lock);
- return queue_depth;
+ atomic_add(advance, &qi->queue_depth);
+ return atomic_read(&qi->queue_depth);
}
static int prep_msg(struct vector_private *vp,
@@ -339,9 +330,7 @@ static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb)
int iov_count;
spin_lock(&qi->tail_lock);
- spin_lock(&qi->head_lock);
- queue_depth = qi->queue_depth;
- spin_unlock(&qi->head_lock);
+ queue_depth = atomic_read(&qi->queue_depth);
if (skb)
packet_len = skb->len;
@@ -360,6 +349,7 @@ static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb)
mmsg_vector->msg_hdr.msg_iovlen = iov_count;
mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr;
mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size;
+ wmb(); /* Make the packet visible to the NAPI poll thread */
queue_depth = vector_advancetail(qi, 1);
} else
goto drop;
@@ -398,7 +388,7 @@ static int consume_vector_skbs(struct vector_queue *qi, int count)
}
/*
- * Generic vector deque via sendmmsg with support for forming headers
+ * Generic vector dequeue via sendmmsg with support for forming headers
* using transport specific callback. Allows GRE, L2TPv3, RAW and
* other transports to use a common dequeue procedure in vector mode
*/
@@ -408,69 +398,64 @@ static int vector_send(struct vector_queue *qi)
{
struct vector_private *vp = netdev_priv(qi->dev);
struct mmsghdr *send_from;
- int result = 0, send_len, queue_depth = qi->max_depth;
+ int result = 0, send_len;
if (spin_trylock(&qi->head_lock)) {
- if (spin_trylock(&qi->tail_lock)) {
- /* update queue_depth to current value */
- queue_depth = qi->queue_depth;
- spin_unlock(&qi->tail_lock);
- while (queue_depth > 0) {
- /* Calculate the start of the vector */
- send_len = queue_depth;
- send_from = qi->mmsg_vector;
- send_from += qi->head;
- /* Adjust vector size if wraparound */
- if (send_len + qi->head > qi->max_depth)
- send_len = qi->max_depth - qi->head;
- /* Try to TX as many packets as possible */
- if (send_len > 0) {
- result = uml_vector_sendmmsg(
- vp->fds->tx_fd,
- send_from,
- send_len,
- 0
- );
- vp->in_write_poll =
- (result != send_len);
- }
- /* For some of the sendmmsg error scenarios
- * we may end being unsure in the TX success
- * for all packets. It is safer to declare
- * them all TX-ed and blame the network.
- */
- if (result < 0) {
- if (net_ratelimit())
- netdev_err(vp->dev, "sendmmsg err=%i\n",
- result);
- vp->in_error = true;
- result = send_len;
- }
- if (result > 0) {
- queue_depth =
- consume_vector_skbs(qi, result);
- /* This is equivalent to an TX IRQ.
- * Restart the upper layers to feed us
- * more packets.
- */
- if (result > vp->estats.tx_queue_max)
- vp->estats.tx_queue_max = result;
- vp->estats.tx_queue_running_average =
- (vp->estats.tx_queue_running_average + result) >> 1;
- }
- netif_wake_queue(qi->dev);
- /* if TX is busy, break out of the send loop,
- * poll write IRQ will reschedule xmit for us
+ /* update queue_depth to current value */
+ while (atomic_read(&qi->queue_depth) > 0) {
+ /* Calculate the start of the vector */
+ send_len = atomic_read(&qi->queue_depth);
+ send_from = qi->mmsg_vector;
+ send_from += qi->head;
+ /* Adjust vector size if wraparound */
+ if (send_len + qi->head > qi->max_depth)
+ send_len = qi->max_depth - qi->head;
+ /* Try to TX as many packets as possible */
+ if (send_len > 0) {
+ result = uml_vector_sendmmsg(
+ vp->fds->tx_fd,
+ send_from,
+ send_len,
+ 0
+ );
+ vp->in_write_poll =
+ (result != send_len);
+ }
+ /* For some of the sendmmsg error scenarios
+ * we may end being unsure in the TX success
+ * for all packets. It is safer to declare
+ * them all TX-ed and blame the network.
+ */
+ if (result < 0) {
+ if (net_ratelimit())
+ netdev_err(vp->dev, "sendmmsg err=%i\n",
+ result);
+ vp->in_error = true;
+ result = send_len;
+ }
+ if (result > 0) {
+ consume_vector_skbs(qi, result);
+ /* This is equivalent to an TX IRQ.
+ * Restart the upper layers to feed us
+ * more packets.
*/
- if (result != send_len) {
- vp->estats.tx_restart_queue++;
- break;
- }
+ if (result > vp->estats.tx_queue_max)
+ vp->estats.tx_queue_max = result;
+ vp->estats.tx_queue_running_average =
+ (vp->estats.tx_queue_running_average + result) >> 1;
+ }
+ netif_wake_queue(qi->dev);
+ /* if TX is busy, break out of the send loop,
+ * poll write IRQ will reschedule xmit for us.
+ */
+ if (result != send_len) {
+ vp->estats.tx_restart_queue++;
+ break;
}
}
spin_unlock(&qi->head_lock);
}
- return queue_depth;
+ return atomic_read(&qi->queue_depth);
}
/* Queue destructor. Deliberately stateless so we can use
@@ -589,7 +574,7 @@ static struct vector_queue *create_queue(
}
spin_lock_init(&result->head_lock);
spin_lock_init(&result->tail_lock);
- result->queue_depth = 0;
+ atomic_set(&result->queue_depth, 0);
result->head = 0;
result->tail = 0;
return result;
@@ -668,18 +653,27 @@ done:
}
-/* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs*/
+/* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs */
static void prep_queue_for_rx(struct vector_queue *qi)
{
struct vector_private *vp = netdev_priv(qi->dev);
struct mmsghdr *mmsg_vector = qi->mmsg_vector;
void **skbuff_vector = qi->skbuff_vector;
- int i;
+ int i, queue_depth;
+
+ queue_depth = atomic_read(&qi->queue_depth);
- if (qi->queue_depth == 0)
+ if (queue_depth == 0)
return;
- for (i = 0; i < qi->queue_depth; i++) {
+
+ /* RX is always emptied 100% during each cycle, so we do not
+ * have to do the tail wraparound math for it.
+ */
+
+ qi->head = qi->tail = 0;
+
+ for (i = 0; i < queue_depth; i++) {
/* it is OK if allocation fails - recvmmsg with NULL data in
* iov argument still performs an RX, just drops the packet
* This allows us stop faffing around with a "drop buffer"
@@ -689,7 +683,7 @@ static void prep_queue_for_rx(struct vector_queue *qi)
skbuff_vector++;
mmsg_vector++;
}
- qi->queue_depth = 0;
+ atomic_set(&qi->queue_depth, 0);
}
static struct vector_device *find_device(int n)
@@ -972,7 +966,7 @@ static int vector_mmsg_rx(struct vector_private *vp, int budget)
budget = qi->max_depth;
packet_count = uml_vector_recvmmsg(
- vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
+ vp->fds->rx_fd, qi->mmsg_vector, budget, 0);
if (packet_count < 0)
vp->in_error = true;
@@ -985,7 +979,7 @@ static int vector_mmsg_rx(struct vector_private *vp, int budget)
* many do we need to prep the next time prep_queue_for_rx() is called.
*/
- qi->queue_depth = packet_count;
+ atomic_add(packet_count, &qi->queue_depth);
for (i = 0; i < packet_count; i++) {
skb = (*skbuff_vector);
@@ -1172,6 +1166,7 @@ static int vector_poll(struct napi_struct *napi, int budget)
if ((vp->options & VECTOR_TX) != 0)
tx_enqueued = (vector_send(vp->tx_queue) > 0);
+ spin_lock(&vp->rx_queue->head_lock);
if ((vp->options & VECTOR_RX) > 0)
err = vector_mmsg_rx(vp, budget);
else {
@@ -1179,12 +1174,13 @@ static int vector_poll(struct napi_struct *napi, int budget)
if (err > 0)
err = 1;
}
+ spin_unlock(&vp->rx_queue->head_lock);
if (err > 0)
work_done += err;
if (tx_enqueued || err > 0)
napi_schedule(napi);
- if (work_done < budget)
+ if (work_done <= budget)
napi_complete_done(napi, work_done);
return work_done;
}
@@ -1225,7 +1221,7 @@ static int vector_net_open(struct net_device *dev)
vp->rx_header_size,
MAX_IOV_SIZE
);
- vp->rx_queue->queue_depth = get_depth(vp->parsed);
+ atomic_set(&vp->rx_queue->queue_depth, get_depth(vp->parsed));
} else {
vp->header_rxbuffer = kmalloc(
vp->rx_header_size,
@@ -1467,7 +1463,17 @@ static void vector_get_ethtool_stats(struct net_device *dev,
{
struct vector_private *vp = netdev_priv(dev);
+ /* Stats are modified in the dequeue portions of
+ * rx/tx which are protected by the head locks
+ * grabbing these locks here ensures they are up
+ * to date.
+ */
+
+ spin_lock(&vp->tx_queue->head_lock);
+ spin_lock(&vp->rx_queue->head_lock);
memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats));
+ spin_unlock(&vp->rx_queue->head_lock);
+ spin_unlock(&vp->tx_queue->head_lock);
}
static int vector_get_coalesce(struct net_device *netdev,
diff --git a/arch/um/drivers/vector_kern.h b/arch/um/drivers/vector_kern.h
index 806df551be0b..417834793658 100644
--- a/arch/um/drivers/vector_kern.h
+++ b/arch/um/drivers/vector_kern.h
@@ -14,6 +14,7 @@
#include <linux/ctype.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
+#include <asm/atomic.h>
#include "vector_user.h"
@@ -44,7 +45,8 @@ struct vector_queue {
struct net_device *dev;
spinlock_t head_lock;
spinlock_t tail_lock;
- int queue_depth, head, tail, max_depth, max_iov_frags;
+ atomic_t queue_depth;
+ int head, tail, max_depth, max_iov_frags;
short options;
};
diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c
index b16a5e5619d3..2ea67e6fd067 100644
--- a/arch/um/drivers/vector_user.c
+++ b/arch/um/drivers/vector_user.c
@@ -46,6 +46,9 @@
#define TRANS_FD "fd"
#define TRANS_FD_LEN strlen(TRANS_FD)
+#define TRANS_VDE "vde"
+#define TRANS_VDE_LEN strlen(TRANS_VDE)
+
#define VNET_HDR_FAIL "could not enable vnet headers on fd %d"
#define TUN_GET_F_FAIL "tapraw: TUNGETFEATURES failed: %s"
#define L2TPV3_BIND_FAIL "l2tpv3_open : could not bind socket err=%i"
@@ -434,6 +437,84 @@ fd_cleanup:
return NULL;
}
+/* enough char to store an int type */
+#define ENOUGH(type) ((CHAR_BIT * sizeof(type) - 1) / 3 + 2)
+#define ENOUGH_OCTAL(type) ((CHAR_BIT * sizeof(type) + 2) / 3)
+/* vde_plug --descr xx --port2 xx --mod2 xx --group2 xx seqpacket://NN vnl (NULL) */
+#define VDE_MAX_ARGC 12
+#define VDE_SEQPACKET_HEAD "seqpacket://"
+#define VDE_SEQPACKET_HEAD_LEN (sizeof(VDE_SEQPACKET_HEAD) - 1)
+#define VDE_DEFAULT_DESCRIPTION "UML"
+
+static struct vector_fds *user_init_vde_fds(struct arglist *ifspec)
+{
+ char seqpacketvnl[VDE_SEQPACKET_HEAD_LEN + ENOUGH(int) + 1];
+ char *argv[VDE_MAX_ARGC] = {"vde_plug"};
+ int argc = 1;
+ int rv;
+ int sv[2];
+ struct vector_fds *result = NULL;
+
+ char *vnl = uml_vector_fetch_arg(ifspec,"vnl");
+ char *descr = uml_vector_fetch_arg(ifspec,"descr");
+ char *port = uml_vector_fetch_arg(ifspec,"port");
+ char *mode = uml_vector_fetch_arg(ifspec,"mode");
+ char *group = uml_vector_fetch_arg(ifspec,"group");
+ if (descr == NULL) descr = VDE_DEFAULT_DESCRIPTION;
+
+ argv[argc++] = "--descr";
+ argv[argc++] = descr;
+ if (port != NULL) {
+ argv[argc++] = "--port2";
+ argv[argc++] = port;
+ }
+ if (mode != NULL) {
+ argv[argc++] = "--mod2";
+ argv[argc++] = mode;
+ }
+ if (group != NULL) {
+ argv[argc++] = "--group2";
+ argv[argc++] = group;
+ }
+ argv[argc++] = seqpacketvnl;
+ argv[argc++] = vnl;
+ argv[argc++] = NULL;
+
+ rv = socketpair(AF_UNIX, SOCK_SEQPACKET, 0, sv);
+ if (rv < 0) {
+ printk(UM_KERN_ERR "vde: seqpacket socketpair err %d", -errno);
+ return NULL;
+ }
+ rv = os_set_exec_close(sv[0]);
+ if (rv < 0) {
+ printk(UM_KERN_ERR "vde: seqpacket socketpair cloexec err %d", -errno);
+ goto vde_cleanup_sv;
+ }
+ snprintf(seqpacketvnl, sizeof(seqpacketvnl), VDE_SEQPACKET_HEAD "%d", sv[1]);
+
+ run_helper(NULL, NULL, argv);
+
+ close(sv[1]);
+
+ result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
+ if (result == NULL) {
+ printk(UM_KERN_ERR "fd open: allocation failed");
+ goto vde_cleanup;
+ }
+
+ result->rx_fd = sv[0];
+ result->tx_fd = sv[0];
+ result->remote_addr_size = 0;
+ result->remote_addr = NULL;
+ return result;
+
+vde_cleanup_sv:
+ close(sv[1]);
+vde_cleanup:
+ close(sv[0]);
+ return NULL;
+}
+
static struct vector_fds *user_init_raw_fds(struct arglist *ifspec)
{
int rxfd = -1, txfd = -1;
@@ -673,6 +754,8 @@ struct vector_fds *uml_vector_user_open(
return user_init_unix_fds(parsed, ID_BESS);
if (strncmp(transport, TRANS_FD, TRANS_FD_LEN) == 0)
return user_init_fd_fds(parsed);
+ if (strncmp(transport, TRANS_VDE, TRANS_VDE_LEN) == 0)
+ return user_init_vde_fds(parsed);
return NULL;
}
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index 5bb397b65efb..83373c9963e7 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -359,11 +359,4 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
return pte;
}
-/* Clear a kernel PTE and flush it from the TLB */
-#define kpte_clear_flush(ptep, vaddr) \
-do { \
- pte_clear(&init_mm, (vaddr), (ptep)); \
- __flush_tlb_one((vaddr)); \
-} while (0)
-
#endif
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 5a7c05275aa7..bce4595798da 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -28,20 +28,10 @@ struct thread_struct {
struct arch_thread arch;
jmp_buf switch_buf;
struct {
- int op;
- union {
- struct {
- int pid;
- } fork, exec;
- struct {
- int (*proc)(void *);
- void *arg;
- } thread;
- struct {
- void (*proc)(void *);
- void *arg;
- } cb;
- } u;
+ struct {
+ int (*proc)(void *);
+ void *arg;
+ } thread;
} request;
};
@@ -51,7 +41,7 @@ struct thread_struct {
.fault_addr = NULL, \
.prev_sched = NULL, \
.arch = INIT_ARCH_THREAD, \
- .request = { 0 } \
+ .request = { } \
}
/*
diff --git a/arch/um/include/asm/sysrq.h b/arch/um/include/asm/sysrq.h
deleted file mode 100644
index 8fc8c65cd357..000000000000
--- a/arch/um/include/asm/sysrq.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __UM_SYSRQ_H
-#define __UM_SYSRQ_H
-
-struct task_struct;
-extern void show_trace(struct task_struct* task, unsigned long *stack);
-
-#endif
diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h
index 1e76ba40feba..140388c282f6 100644
--- a/arch/um/include/shared/skas/mm_id.h
+++ b/arch/um/include/shared/skas/mm_id.h
@@ -7,10 +7,7 @@
#define __MM_ID_H
struct mm_id {
- union {
- int mm_fd;
- int pid;
- } u;
+ int pid;
unsigned long stack;
int syscall_data_len;
};
diff --git a/arch/um/include/shared/skas/skas.h b/arch/um/include/shared/skas/skas.h
index ebaa116de30b..85c50122ab98 100644
--- a/arch/um/include/shared/skas/skas.h
+++ b/arch/um/include/shared/skas/skas.h
@@ -10,10 +10,8 @@
extern int userspace_pid[];
-extern int user_thread(unsigned long stack, int flags);
extern void new_thread_handler(void);
extern void handle_syscall(struct uml_pt_regs *regs);
-extern long execute_syscall_skas(void *r);
extern unsigned long current_stub_stack(void);
extern struct mm_id *current_mm_id(void);
extern void current_mm_sync(void);
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index 2c15bb2c104c..cb8b5cd9285c 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -35,8 +35,5 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
PT_REGS_IP(regs) = eip;
PT_REGS_SP(regs) = esp;
clear_thread_flag(TIF_SINGLESTEP);
-#ifdef SUBARCH_EXECVE1
- SUBARCH_EXECVE1(regs->regs);
-#endif
}
EXPORT_SYMBOL(start_thread);
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index f36b63f53bab..be2856af6d4c 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -109,8 +109,8 @@ void new_thread_handler(void)
schedule_tail(current->thread.prev_sched);
current->thread.prev_sched = NULL;
- fn = current->thread.request.u.thread.proc;
- arg = current->thread.request.u.thread.arg;
+ fn = current->thread.request.thread.proc;
+ arg = current->thread.request.thread.arg;
/*
* callback returns only if the kernel thread execs a process
@@ -158,8 +158,8 @@ int copy_thread(struct task_struct * p, const struct kernel_clone_args *args)
arch_copy_thread(&current->thread.arch, &p->thread.arch);
} else {
get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
- p->thread.request.u.thread.proc = args->fn;
- p->thread.request.u.thread.arg = args->fn_arg;
+ p->thread.request.thread.proc = args->fn;
+ p->thread.request.thread.arg = args->fn_arg;
handler = new_thread_handler;
}
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c
index 3736bca626ba..680bce4bd8fa 100644
--- a/arch/um/kernel/reboot.c
+++ b/arch/um/kernel/reboot.c
@@ -29,7 +29,7 @@ static void kill_off_processes(void)
t = find_lock_task_mm(p);
if (!t)
continue;
- pid = t->mm->context.id.u.pid;
+ pid = t->mm->context.id.pid;
task_unlock(t);
os_kill_ptraced_process(pid, 1);
}
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 47f98d87ea3c..886ed5e65674 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -32,11 +32,11 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
new_id->stack = stack;
block_signals_trace();
- new_id->u.pid = start_userspace(stack);
+ new_id->pid = start_userspace(stack);
unblock_signals_trace();
- if (new_id->u.pid < 0) {
- ret = new_id->u.pid;
+ if (new_id->pid < 0) {
+ ret = new_id->pid;
goto out_free;
}
@@ -83,12 +83,12 @@ void destroy_context(struct mm_struct *mm)
* whole UML suddenly dying. Also, cover negative and
* 1 cases, since they shouldn't happen either.
*/
- if (mmu->id.u.pid < 2) {
+ if (mmu->id.pid < 2) {
printk(KERN_ERR "corrupt mm_context - pid = %d\n",
- mmu->id.u.pid);
+ mmu->id.pid);
return;
}
- os_kill_ptraced_process(mmu->id.u.pid, 1);
+ os_kill_ptraced_process(mmu->id.pid, 1);
free_pages(mmu->id.stack, ilog2(STUB_DATA_PAGES));
}
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 5f9c1c5f36e2..68657988c8d1 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -39,8 +39,8 @@ int __init start_uml(void)
init_new_thread_signals();
- init_task.thread.request.u.thread.proc = start_kernel_proc;
- init_task.thread.request.u.thread.arg = NULL;
+ init_task.thread.request.thread.proc = start_kernel_proc;
+ init_task.thread.request.thread.arg = NULL;
return start_idle_thread(task_stack_page(&init_task),
&init_task.thread.switch_buf);
}
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index 9ee19e566da3..b09e85279d2b 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -12,23 +12,13 @@
#include <sysdep/syscalls.h>
#include <linux/time-internal.h>
#include <asm/unistd.h>
+#include <asm/delay.h>
void handle_syscall(struct uml_pt_regs *r)
{
struct pt_regs *regs = container_of(r, struct pt_regs, regs);
int syscall;
- /*
- * If we have infinite CPU resources, then make every syscall also a
- * preemption point, since we don't have any other preemption in this
- * case, and kernel threads would basically never run until userspace
- * went to sleep, even if said userspace interacts with the kernel in
- * various ways.
- */
- if (time_travel_mode == TT_MODE_INFCPU ||
- time_travel_mode == TT_MODE_EXTERNAL)
- schedule();
-
/* Initialize the syscall number and default return value. */
UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp);
PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS);
@@ -41,9 +31,25 @@ void handle_syscall(struct uml_pt_regs *r)
goto out;
syscall = UPT_SYSCALL_NR(r);
- if (syscall >= 0 && syscall < __NR_syscalls)
- PT_REGS_SET_SYSCALL_RETURN(regs,
- EXECUTE_SYSCALL(syscall, regs));
+ if (syscall >= 0 && syscall < __NR_syscalls) {
+ unsigned long ret = EXECUTE_SYSCALL(syscall, regs);
+
+ PT_REGS_SET_SYSCALL_RETURN(regs, ret);
+
+ /*
+ * An error value here can be some form of -ERESTARTSYS
+ * and then we'd just loop. Make any error syscalls take
+ * some time, so that it won't just loop if something is
+ * not ready, and hopefully other things will make some
+ * progress.
+ */
+ if (IS_ERR_VALUE(ret) &&
+ (time_travel_mode == TT_MODE_INFCPU ||
+ time_travel_mode == TT_MODE_EXTERNAL)) {
+ um_udelay(1);
+ schedule();
+ }
+ }
out:
syscall_trace_leave(regs);
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index 746715379f12..4bb8622dc512 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -11,7 +11,6 @@
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
-#include <asm/sysrq.h>
#include <asm/stacktrace.h>
#include <os.h>
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 47b9f5e63566..29b27b90581f 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -839,7 +839,7 @@ static irqreturn_t um_timer(int irq, void *dev)
if (get_current()->mm != NULL)
{
/* userspace - relay signal, results in correct userspace timers */
- os_alarm_process(get_current()->mm->context.id.u.pid);
+ os_alarm_process(get_current()->mm->context.id.pid);
}
(*timer_clockevent.event_handler)(&timer_clockevent);
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 44c6fc697f3a..548af31d4111 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -82,16 +82,12 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
(x ? UM_PROT_EXEC : 0));
if (pte_newpage(*pte)) {
if (pte_present(*pte)) {
- if (pte_newpage(*pte)) {
- __u64 offset;
- unsigned long phys =
- pte_val(*pte) & PAGE_MASK;
- int fd = phys_mapping(phys, &offset);
-
- ret = ops->mmap(ops->mm_idp, addr,
- PAGE_SIZE, prot, fd,
- offset);
- }
+ __u64 offset;
+ unsigned long phys = pte_val(*pte) & PAGE_MASK;
+ int fd = phys_mapping(phys, &offset);
+
+ ret = ops->mmap(ops->mm_idp, addr, PAGE_SIZE,
+ prot, fd, offset);
} else
ret = ops->unmap(ops->mm_idp, addr, PAGE_SIZE);
} else if (pte_newprot(*pte))
diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
index 5adf8f630049..f1d03cf3957f 100644
--- a/arch/um/os-Linux/file.c
+++ b/arch/um/os-Linux/file.c
@@ -528,7 +528,8 @@ int os_shutdown_socket(int fd, int r, int w)
ssize_t os_rcv_fd_msg(int fd, int *fds, unsigned int n_fds,
void *data, size_t data_len)
{
- char buf[CMSG_SPACE(sizeof(*fds) * n_fds)];
+#define MAX_RCV_FDS 2
+ char buf[CMSG_SPACE(sizeof(*fds) * MAX_RCV_FDS)];
struct cmsghdr *cmsg;
struct iovec iov = {
.iov_base = data,
@@ -538,10 +539,13 @@ ssize_t os_rcv_fd_msg(int fd, int *fds, unsigned int n_fds,
.msg_iov = &iov,
.msg_iovlen = 1,
.msg_control = buf,
- .msg_controllen = sizeof(buf),
+ .msg_controllen = CMSG_SPACE(sizeof(*fds) * n_fds),
};
int n;
+ if (n_fds > MAX_RCV_FDS)
+ return -EINVAL;
+
n = recvmsg(fd, &msg, 0);
if (n < 0)
return -errno;
diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c
index c55430775efd..9a13ac23c606 100644
--- a/arch/um/os-Linux/skas/mem.c
+++ b/arch/um/os-Linux/skas/mem.c
@@ -78,7 +78,7 @@ static inline long do_syscall_stub(struct mm_id *mm_idp)
{
struct stub_data *proc_data = (void *)mm_idp->stack;
int n, i;
- int err, pid = mm_idp->u.pid;
+ int err, pid = mm_idp->pid;
n = ptrace_setregs(pid, syscall_regs);
if (n < 0) {
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index f7088345b3fc..b6f656bcffb1 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -588,5 +588,5 @@ void reboot_skas(void)
void __switch_mm(struct mm_id *mm_idp)
{
- userspace_pid[0] = mm_idp->u.pid;
+ userspace_pid[0] = mm_idp->pid;
}
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 801fd85c3ef6..cd75e78a06c1 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -24,11 +24,15 @@ RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix)
ifdef CONFIG_MITIGATION_RETHUNK
RETHUNK_CFLAGS := -mfunction-return=thunk-extern
+RETHUNK_RUSTFLAGS := -Zfunction-return=thunk-extern
RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS)
+RETPOLINE_RUSTFLAGS += $(RETHUNK_RUSTFLAGS)
endif
export RETHUNK_CFLAGS
+export RETHUNK_RUSTFLAGS
export RETPOLINE_CFLAGS
+export RETPOLINE_RUSTFLAGS
export RETPOLINE_VDSO_CFLAGS
# For gcc stack alignment is specified with -mpreferred-stack-boundary,
@@ -218,9 +222,10 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# Avoid indirect branches in kernel to deal with Spectre
ifdef CONFIG_MITIGATION_RETPOLINE
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
+ KBUILD_RUSTFLAGS += $(RETPOLINE_RUSTFLAGS)
# Additionally, avoid generating expensive indirect jumps which
# are subject to retpolines for small number of switch cases.
- # clang turns off jump table generation by default when under
+ # LLVM turns off jump table generation by default when under
# retpoline builds, however, gcc does not for x86. This has
# only been fixed starting from gcc stable version 8.4.0 and
# onwards, but not for older ones. See gcc bug #86952.
@@ -237,6 +242,10 @@ ifdef CONFIG_CALL_PADDING
PADDING_CFLAGS := -fpatchable-function-entry=$(CONFIG_FUNCTION_PADDING_BYTES),$(CONFIG_FUNCTION_PADDING_BYTES)
KBUILD_CFLAGS += $(PADDING_CFLAGS)
export PADDING_CFLAGS
+
+PADDING_RUSTFLAGS := -Zpatchable-function-entry=$(CONFIG_FUNCTION_PADDING_BYTES),$(CONFIG_FUNCTION_PADDING_BYTES)
+KBUILD_RUSTFLAGS += $(PADDING_RUSTFLAGS)
+export PADDING_RUSTFLAGS
endif
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index da8b66dce0da..327c45c5013f 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -16,6 +16,7 @@
#include <asm/insn-eval.h>
#include <asm/pgtable.h>
#include <asm/set_memory.h>
+#include <asm/traps.h>
/* MMIO direction */
#define EPT_READ 0
@@ -433,6 +434,11 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
return -EINVAL;
}
+ if (!fault_in_kernel_space(ve->gla)) {
+ WARN_ONCE(1, "Access to userspace address is not supported");
+ return -EINVAL;
+ }
+
/*
* Reject EPT violation #VEs that split pages.
*
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 8db2ec4d6cda..1f650b4dde50 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -163,20 +163,18 @@ static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
}
#define arch_atomic64_dec_return arch_atomic64_dec_return
-static __always_inline s64 arch_atomic64_add(s64 i, atomic64_t *v)
+static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
{
__alternative_atomic64(add, add_return,
ASM_OUTPUT2("+A" (i), "+c" (v)),
ASM_NO_INPUT_CLOBBER("memory"));
- return i;
}
-static __always_inline s64 arch_atomic64_sub(s64 i, atomic64_t *v)
+static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
{
__alternative_atomic64(sub, sub_return,
ASM_OUTPUT2("+A" (i), "+c" (v)),
ASM_NO_INPUT_CLOBBER("memory"));
- return i;
}
static __always_inline void arch_atomic64_inc(atomic64_t *v)
diff --git a/arch/x86/include/asm/cpuid.h b/arch/x86/include/asm/cpuid.h
index 80cc6386d7b1..ca4243318aad 100644
--- a/arch/x86/include/asm/cpuid.h
+++ b/arch/x86/include/asm/cpuid.h
@@ -179,6 +179,7 @@ static __always_inline bool cpuid_function_is_indexed(u32 function)
case 0x1d:
case 0x1e:
case 0x1f:
+ case 0x24:
case 0x8000001d:
return true;
}
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 44949f972826..1a42f829667a 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -135,6 +135,8 @@
#define INTEL_LUNARLAKE_M IFM(6, 0xBD)
+#define INTEL_PANTHERLAKE_L IFM(6, 0xCC)
+
/* "Small Core" Processors (Atom/E-Core) */
#define INTEL_ATOM_BONNELL IFM(6, 0x1C) /* Diamondville, Pineview */
@@ -178,4 +180,7 @@
#define INTEL_FAM5_QUARK_X1000 0x09 /* Quark X1000 SoC */
#define INTEL_QUARK_X1000 IFM(5, 0x09) /* Quark X1000 SoC */
+/* Family 19 */
+#define INTEL_PANTHERCOVE_X IFM(19, 0x01) /* Diamond Rapids */
+
#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 68ad4f923664..861d080ed4c6 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -14,8 +14,8 @@ BUILD_BUG_ON(1)
* be __static_call_return0.
*/
KVM_X86_OP(check_processor_compatibility)
-KVM_X86_OP(hardware_enable)
-KVM_X86_OP(hardware_disable)
+KVM_X86_OP(enable_virtualization_cpu)
+KVM_X86_OP(disable_virtualization_cpu)
KVM_X86_OP(hardware_unsetup)
KVM_X86_OP(has_emulated_msr)
KVM_X86_OP(vcpu_after_set_cpuid)
@@ -125,7 +125,7 @@ KVM_X86_OP_OPTIONAL(mem_enc_unregister_region)
KVM_X86_OP_OPTIONAL(vm_copy_enc_context_from)
KVM_X86_OP_OPTIONAL(vm_move_enc_context_from)
KVM_X86_OP_OPTIONAL(guest_memory_reclaimed)
-KVM_X86_OP(get_msr_feature)
+KVM_X86_OP(get_feature_msr)
KVM_X86_OP(check_emulate_instruction)
KVM_X86_OP(apic_init_signal_blocked)
KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4a68cb3eba78..6d9f763a7bb9 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -36,6 +36,7 @@
#include <asm/kvm_page_track.h>
#include <asm/kvm_vcpu_regs.h>
#include <asm/hyperv-tlfs.h>
+#include <asm/reboot.h>
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
@@ -211,6 +212,7 @@ enum exit_fastpath_completion {
EXIT_FASTPATH_NONE,
EXIT_FASTPATH_REENTER_GUEST,
EXIT_FASTPATH_EXIT_HANDLED,
+ EXIT_FASTPATH_EXIT_USERSPACE,
};
typedef enum exit_fastpath_completion fastpath_t;
@@ -280,10 +282,6 @@ enum x86_intercept_stage;
#define PFERR_PRIVATE_ACCESS BIT_ULL(49)
#define PFERR_SYNTHETIC_MASK (PFERR_IMPLICIT_ACCESS | PFERR_PRIVATE_ACCESS)
-#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \
- PFERR_WRITE_MASK | \
- PFERR_PRESENT_MASK)
-
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC 0
/*
@@ -1629,8 +1627,10 @@ struct kvm_x86_ops {
int (*check_processor_compatibility)(void);
- int (*hardware_enable)(void);
- void (*hardware_disable)(void);
+ int (*enable_virtualization_cpu)(void);
+ void (*disable_virtualization_cpu)(void);
+ cpu_emergency_virt_cb *emergency_disable_virtualization_cpu;
+
void (*hardware_unsetup)(void);
bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
@@ -1727,6 +1727,8 @@ struct kvm_x86_ops {
void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
void (*enable_irq_window)(struct kvm_vcpu *vcpu);
void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
+
+ const bool x2apic_icr_is_split;
const unsigned long required_apicv_inhibits;
bool allow_apicv_in_x2apic_without_x2apic_virtualization;
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
@@ -1806,7 +1808,7 @@ struct kvm_x86_ops {
int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
void (*guest_memory_reclaimed)(struct kvm *kvm);
- int (*get_msr_feature)(struct kvm_msr_entry *entry);
+ int (*get_feature_msr)(u32 msr, u64 *data);
int (*check_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type,
void *insn, int insn_len);
@@ -2060,6 +2062,8 @@ void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu);
void kvm_enable_efer_bits(u64);
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
+int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data);
+int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data);
int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
@@ -2136,7 +2140,15 @@ int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu);
void kvm_update_dr7(struct kvm_vcpu *vcpu);
-int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
+bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ bool always_retry);
+
+static inline bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu,
+ gpa_t cr2_or_gpa)
+{
+ return __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, false);
+}
+
void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
ulong roots_to_free);
void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu);
@@ -2254,6 +2266,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_cpu_has_extint(struct kvm_vcpu *v);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
+int kvm_cpu_get_extint(struct kvm_vcpu *v);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
@@ -2345,7 +2358,8 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
KVM_X86_QUIRK_OUT_7E_INC_RIP | \
KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT | \
KVM_X86_QUIRK_FIX_HYPERCALL_INSN | \
- KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS)
+ KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS | \
+ KVM_X86_QUIRK_SLOT_ZAP_ALL)
/*
* KVM previously used a u32 field in kvm_run to indicate the hypercall was
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index a7c06a46fb76..3ae84c3b8e6d 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -36,6 +36,20 @@
#define EFER_FFXSR (1<<_EFER_FFXSR)
#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS)
+/*
+ * Architectural memory types that are common to MTRRs, PAT, VMX MSRs, etc.
+ * Most MSRs support/allow only a subset of memory types, but the values
+ * themselves are common across all relevant MSRs.
+ */
+#define X86_MEMTYPE_UC 0ull /* Uncacheable, a.k.a. Strong Uncacheable */
+#define X86_MEMTYPE_WC 1ull /* Write Combining */
+/* RESERVED 2 */
+/* RESERVED 3 */
+#define X86_MEMTYPE_WT 4ull /* Write Through */
+#define X86_MEMTYPE_WP 5ull /* Write Protected */
+#define X86_MEMTYPE_WB 6ull /* Write Back */
+#define X86_MEMTYPE_UC_MINUS 7ull /* Weak Uncacheabled (PAT only) */
+
/* FRED MSRs */
#define MSR_IA32_FRED_RSP0 0x1cc /* Level 0 stack pointer */
#define MSR_IA32_FRED_RSP1 0x1cd /* Level 1 stack pointer */
@@ -365,6 +379,12 @@
#define MSR_IA32_CR_PAT 0x00000277
+#define PAT_VALUE(p0, p1, p2, p3, p4, p5, p6, p7) \
+ ((X86_MEMTYPE_ ## p0) | (X86_MEMTYPE_ ## p1 << 8) | \
+ (X86_MEMTYPE_ ## p2 << 16) | (X86_MEMTYPE_ ## p3 << 24) | \
+ (X86_MEMTYPE_ ## p4 << 32) | (X86_MEMTYPE_ ## p5 << 40) | \
+ (X86_MEMTYPE_ ## p6 << 48) | (X86_MEMTYPE_ ## p7 << 56))
+
#define MSR_IA32_DEBUGCTLMSR 0x000001d9
#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
@@ -1159,15 +1179,6 @@
#define MSR_IA32_VMX_VMFUNC 0x00000491
#define MSR_IA32_VMX_PROCBASED_CTLS3 0x00000492
-/* VMX_BASIC bits and bitmasks */
-#define VMX_BASIC_VMCS_SIZE_SHIFT 32
-#define VMX_BASIC_TRUE_CTLS (1ULL << 55)
-#define VMX_BASIC_64 0x0001000000000000LLU
-#define VMX_BASIC_MEM_TYPE_SHIFT 50
-#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU
-#define VMX_BASIC_MEM_TYPE_WB 6LLU
-#define VMX_BASIC_INOUT 0x0040000000000000LLU
-
/* Resctrl MSRs: */
/* - Intel: */
#define MSR_IA32_L3_QOS_CFG 0xc81
@@ -1185,11 +1196,6 @@
#define MSR_IA32_SMBA_BW_BASE 0xc0000280
#define MSR_IA32_EVT_CFG_BASE 0xc0000400
-/* MSR_IA32_VMX_MISC bits */
-#define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14)
-#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
-#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F
-
/* AMD-V MSRs */
#define MSR_VM_CR 0xc0010114
#define MSR_VM_IGNNE 0xc0010115
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 7e9db77231ac..d1426b64c1b9 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -270,5 +270,26 @@ static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
#include <asm/pgtable-invert.h>
-#endif /* !__ASSEMBLY__ */
+#else /* __ASSEMBLY__ */
+
+#define l4_index(x) (((x) >> 39) & 511)
+#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+
+L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
+L4_START_KERNEL = l4_index(__START_KERNEL_map)
+
+L3_START_KERNEL = pud_index(__START_KERNEL_map)
+
+#define SYM_DATA_START_PAGE_ALIGNED(name) \
+ SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
+
+/* Automate the creation of 1 to 1 mapping pmd entries */
+#define PMDS(START, PERM, COUNT) \
+ i = 0 ; \
+ .rept (COUNT) ; \
+ .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
+ i = i + 1 ; \
+ .endr
+
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
index 6536873f8fc0..c02183d3cdd7 100644
--- a/arch/x86/include/asm/reboot.h
+++ b/arch/x86/include/asm/reboot.h
@@ -25,12 +25,14 @@ void __noreturn machine_real_restart(unsigned int type);
#define MRR_BIOS 0
#define MRR_APM 1
-#if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD)
typedef void (cpu_emergency_virt_cb)(void);
+#if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD)
void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback);
void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback);
void cpu_emergency_disable_virtualization(void);
#else
+static inline void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback) {}
+static inline void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback) {}
static inline void cpu_emergency_disable_virtualization(void) {}
#endif /* CONFIG_KVM_INTEL || CONFIG_KVM_AMD */
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index f0dea3750ca9..2b59b9951c90 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -516,6 +516,20 @@ struct ghcb {
u32 ghcb_usage;
} __packed;
+struct vmcb {
+ struct vmcb_control_area control;
+ union {
+ struct vmcb_save_area save;
+
+ /*
+ * For SEV-ES VMs, the save area in the VMCB is used only to
+ * save/load host state. Guest state resides in a separate
+ * page, the aptly named VM Save Area (VMSA), that is encrypted
+ * with the guest's private key.
+ */
+ struct sev_es_save_area host_sev_es_save;
+ };
+} __packed;
#define EXPECTED_VMCB_SAVE_AREA_SIZE 744
#define EXPECTED_GHCB_SAVE_AREA_SIZE 1032
@@ -532,6 +546,7 @@ static inline void __unused_size_checks(void)
BUILD_BUG_ON(sizeof(struct ghcb_save_area) != EXPECTED_GHCB_SAVE_AREA_SIZE);
BUILD_BUG_ON(sizeof(struct sev_es_save_area) != EXPECTED_SEV_ES_SAVE_AREA_SIZE);
BUILD_BUG_ON(sizeof(struct vmcb_control_area) != EXPECTED_VMCB_CONTROL_AREA_SIZE);
+ BUILD_BUG_ON(offsetof(struct vmcb, save) != EXPECTED_VMCB_CONTROL_AREA_SIZE);
BUILD_BUG_ON(sizeof(struct ghcb) != EXPECTED_GHCB_SIZE);
/* Check offsets of reserved fields */
@@ -568,11 +583,6 @@ static inline void __unused_size_checks(void)
BUILD_BUG_RESERVED_OFFSET(ghcb, 0xff0);
}
-struct vmcb {
- struct vmcb_control_area control;
- struct vmcb_save_area save;
-} __packed;
-
#define SVM_CPUID_FUNC 0x8000000a
#define SVM_SELECTOR_S_SHIFT 4
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index d77a31039f24..f7fd4369b821 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -122,19 +122,17 @@
#define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff
-#define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f
-#define VMX_MISC_SAVE_EFER_LMA 0x00000020
-#define VMX_MISC_ACTIVITY_HLT 0x00000040
-#define VMX_MISC_ACTIVITY_WAIT_SIPI 0x00000100
-#define VMX_MISC_ZERO_LEN_INS 0x40000000
-#define VMX_MISC_MSR_LIST_MULTIPLIER 512
-
/* VMFUNC functions */
#define VMFUNC_CONTROL_BIT(x) BIT((VMX_FEATURE_##x & 0x1f) - 28)
#define VMX_VMFUNC_EPTP_SWITCHING VMFUNC_CONTROL_BIT(EPTP_SWITCHING)
#define VMFUNC_EPTP_ENTRIES 512
+#define VMX_BASIC_32BIT_PHYS_ADDR_ONLY BIT_ULL(48)
+#define VMX_BASIC_DUAL_MONITOR_TREATMENT BIT_ULL(49)
+#define VMX_BASIC_INOUT BIT_ULL(54)
+#define VMX_BASIC_TRUE_CTLS BIT_ULL(55)
+
static inline u32 vmx_basic_vmcs_revision_id(u64 vmx_basic)
{
return vmx_basic & GENMASK_ULL(30, 0);
@@ -145,9 +143,30 @@ static inline u32 vmx_basic_vmcs_size(u64 vmx_basic)
return (vmx_basic & GENMASK_ULL(44, 32)) >> 32;
}
+static inline u32 vmx_basic_vmcs_mem_type(u64 vmx_basic)
+{
+ return (vmx_basic & GENMASK_ULL(53, 50)) >> 50;
+}
+
+static inline u64 vmx_basic_encode_vmcs_info(u32 revision, u16 size, u8 memtype)
+{
+ return revision | ((u64)size << 32) | ((u64)memtype << 50);
+}
+
+#define VMX_MISC_SAVE_EFER_LMA BIT_ULL(5)
+#define VMX_MISC_ACTIVITY_HLT BIT_ULL(6)
+#define VMX_MISC_ACTIVITY_SHUTDOWN BIT_ULL(7)
+#define VMX_MISC_ACTIVITY_WAIT_SIPI BIT_ULL(8)
+#define VMX_MISC_INTEL_PT BIT_ULL(14)
+#define VMX_MISC_RDMSR_IN_SMM BIT_ULL(15)
+#define VMX_MISC_VMXOFF_BLOCK_SMI BIT_ULL(28)
+#define VMX_MISC_VMWRITE_SHADOW_RO_FIELDS BIT_ULL(29)
+#define VMX_MISC_ZERO_LEN_INS BIT_ULL(30)
+#define VMX_MISC_MSR_LIST_MULTIPLIER 512
+
static inline int vmx_misc_preemption_timer_rate(u64 vmx_misc)
{
- return vmx_misc & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
+ return vmx_misc & GENMASK_ULL(4, 0);
}
static inline int vmx_misc_cr3_count(u64 vmx_misc)
@@ -508,9 +527,10 @@ enum vmcs_field {
#define VMX_EPTP_PWL_4 0x18ull
#define VMX_EPTP_PWL_5 0x20ull
#define VMX_EPTP_AD_ENABLE_BIT (1ull << 6)
+/* The EPTP memtype is encoded in bits 2:0, i.e. doesn't need to be shifted. */
#define VMX_EPTP_MT_MASK 0x7ull
-#define VMX_EPTP_MT_WB 0x6ull
-#define VMX_EPTP_MT_UC 0x0ull
+#define VMX_EPTP_MT_WB X86_MEMTYPE_WB
+#define VMX_EPTP_MT_UC X86_MEMTYPE_UC
#define VMX_EPT_READABLE_MASK 0x1ull
#define VMX_EPT_WRITABLE_MASK 0x2ull
#define VMX_EPT_EXECUTABLE_MASK 0x4ull
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index bf57a824f722..a8debbf2f702 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -439,6 +439,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5)
#define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6)
+#define KVM_X86_QUIRK_SLOT_ZAP_ALL (1 << 7)
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1
diff --git a/arch/x86/kernel/cpu/mce/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c
index a3aa0199222e..af44fd5dbd7c 100644
--- a/arch/x86/kernel/cpu/mce/dev-mcelog.c
+++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c
@@ -331,7 +331,6 @@ static const struct file_operations mce_chrdev_ops = {
.poll = mce_chrdev_poll,
.unlocked_ioctl = mce_chrdev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice mce_chrdev_device = {
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 2a2fc14955cd..989d368be04f 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -55,6 +55,12 @@
#include "mtrr.h"
+static_assert(X86_MEMTYPE_UC == MTRR_TYPE_UNCACHABLE);
+static_assert(X86_MEMTYPE_WC == MTRR_TYPE_WRCOMB);
+static_assert(X86_MEMTYPE_WT == MTRR_TYPE_WRTHROUGH);
+static_assert(X86_MEMTYPE_WP == MTRR_TYPE_WRPROT);
+static_assert(X86_MEMTYPE_WB == MTRR_TYPE_WRBACK);
+
/* arch_phys_wc_add returns an MTRR register index plus this offset. */
#define MTRR_TO_PHYS_WC_OFFSET 1000
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index e69489d48625..972e6b6b0481 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -1567,7 +1567,6 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations pseudo_lock_dev_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = NULL,
.write = NULL,
.open = pseudo_lock_dev_open,
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 330922b328bf..16752b8dfa89 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -32,13 +32,6 @@
* We are not able to switch in one step to the final KERNEL ADDRESS SPACE
* because we need identity-mapped pages.
*/
-#define l4_index(x) (((x) >> 39) & 511)
-#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-
-L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
-L4_START_KERNEL = l4_index(__START_KERNEL_map)
-
-L3_START_KERNEL = pud_index(__START_KERNEL_map)
__HEAD
.code64
@@ -577,9 +570,6 @@ SYM_CODE_START_NOALIGN(vc_no_ghcb)
SYM_CODE_END(vc_no_ghcb)
#endif
-#define SYM_DATA_START_PAGE_ALIGNED(name) \
- SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
-
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
/*
* Each PGD needs to be 8k long and 8k aligned. We do not
@@ -601,14 +591,6 @@ SYM_CODE_END(vc_no_ghcb)
#define PTI_USER_PGD_FILL 0
#endif
-/* Automate the creation of 1 to 1 mapping pmd entries */
-#define PMDS(START, PERM, COUNT) \
- i = 0 ; \
- .rept (COUNT) ; \
- .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
- i = i + 1 ; \
- .endr
-
__INITDATA
.balign 4
@@ -708,8 +690,6 @@ SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
.endr
SYM_DATA_END(level1_fixmap_pgt)
-#undef PMDS
-
.data
.align 16
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 6e73403e874f..6726be89b7a6 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -357,8 +357,7 @@ SECTIONS
PERCPU_SECTION(INTERNODE_CACHE_BYTES)
#endif
- RUNTIME_CONST(shift, d_hash_shift)
- RUNTIME_CONST(ptr, dentry_hashtable)
+ RUNTIME_CONST_VARIABLES
. = ALIGN(PAGE_SIZE);
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 2617be544480..41786b834b16 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -705,7 +705,7 @@ void kvm_set_cpu_caps(void)
kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX,
F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI) |
- F(AMX_COMPLEX)
+ F(AMX_COMPLEX) | F(AVX10)
);
kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX,
@@ -721,6 +721,10 @@ void kvm_set_cpu_caps(void)
SF(SGX1) | SF(SGX2) | SF(SGX_EDECCSSA)
);
+ kvm_cpu_cap_init_kvm_defined(CPUID_24_0_EBX,
+ F(AVX10_128) | F(AVX10_256) | F(AVX10_512)
+ );
+
kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
@@ -949,7 +953,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
switch (function) {
case 0:
/* Limited to the highest leaf implemented in KVM. */
- entry->eax = min(entry->eax, 0x1fU);
+ entry->eax = min(entry->eax, 0x24U);
break;
case 1:
cpuid_entry_override(entry, CPUID_1_EDX);
@@ -1174,6 +1178,28 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
break;
}
break;
+ case 0x24: {
+ u8 avx10_version;
+
+ if (!kvm_cpu_cap_has(X86_FEATURE_AVX10)) {
+ entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+ break;
+ }
+
+ /*
+ * The AVX10 version is encoded in EBX[7:0]. Note, the version
+ * is guaranteed to be >=1 if AVX10 is supported. Note #2, the
+ * version needs to be captured before overriding EBX features!
+ */
+ avx10_version = min_t(u8, entry->ebx & 0xff, 1);
+ cpuid_entry_override(entry, CPUID_24_0_EBX);
+ entry->ebx |= avx10_version;
+
+ entry->eax = 0;
+ entry->ecx = 0;
+ entry->edx = 0;
+ break;
+ }
case KVM_CPUID_SIGNATURE: {
const u32 *sigptr = (const u32 *)KVM_SIGNATURE;
entry->eax = KVM_CPUID_FEATURES;
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 3d7eb11d0e45..63f66c51975a 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -108,7 +108,7 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
* Read pending interrupt(from non-APIC source)
* vector and intack.
*/
-static int kvm_cpu_get_extint(struct kvm_vcpu *v)
+int kvm_cpu_get_extint(struct kvm_vcpu *v)
{
if (!kvm_cpu_has_extint(v)) {
WARN_ON(!lapic_in_kernel(v));
@@ -131,6 +131,7 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
} else
return kvm_pic_read_irq(v->kvm); /* PIC */
}
+EXPORT_SYMBOL_GPL(kvm_cpu_get_extint);
/*
* Read pending interrupt vector and intack.
@@ -141,9 +142,12 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
if (vector != -1)
return vector; /* PIC */
- return kvm_get_apic_interrupt(v); /* APIC */
+ vector = kvm_apic_has_interrupt(v); /* APIC */
+ if (vector != -1)
+ kvm_apic_ack_interrupt(v, vector);
+
+ return vector;
}
-EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
{
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5bb481aefcbc..2098dc689088 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1944,7 +1944,7 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
u64 ns = 0;
ktime_t expire;
struct kvm_vcpu *vcpu = apic->vcpu;
- unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
+ u32 this_tsc_khz = vcpu->arch.virtual_tsc_khz;
unsigned long flags;
ktime_t now;
@@ -2453,6 +2453,43 @@ void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
+#define X2APIC_ICR_RESERVED_BITS (GENMASK_ULL(31, 20) | GENMASK_ULL(17, 16) | BIT(13))
+
+int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
+{
+ if (data & X2APIC_ICR_RESERVED_BITS)
+ return 1;
+
+ /*
+ * The BUSY bit is reserved on both Intel and AMD in x2APIC mode, but
+ * only AMD requires it to be zero, Intel essentially just ignores the
+ * bit. And if IPI virtualization (Intel) or x2AVIC (AMD) is enabled,
+ * the CPU performs the reserved bits checks, i.e. the underlying CPU
+ * behavior will "win". Arbitrarily clear the BUSY bit, as there is no
+ * sane way to provide consistent behavior with respect to hardware.
+ */
+ data &= ~APIC_ICR_BUSY;
+
+ kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
+ if (kvm_x86_ops.x2apic_icr_is_split) {
+ kvm_lapic_set_reg(apic, APIC_ICR, data);
+ kvm_lapic_set_reg(apic, APIC_ICR2, data >> 32);
+ } else {
+ kvm_lapic_set_reg64(apic, APIC_ICR, data);
+ }
+ trace_kvm_apic_write(APIC_ICR, data);
+ return 0;
+}
+
+static u64 kvm_x2apic_icr_read(struct kvm_lapic *apic)
+{
+ if (kvm_x86_ops.x2apic_icr_is_split)
+ return (u64)kvm_lapic_get_reg(apic, APIC_ICR) |
+ (u64)kvm_lapic_get_reg(apic, APIC_ICR2) << 32;
+
+ return kvm_lapic_get_reg64(apic, APIC_ICR);
+}
+
/* emulate APIC access in a trap manner */
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
{
@@ -2470,7 +2507,7 @@ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
* maybe-unecessary write, and both are in the noise anyways.
*/
if (apic_x2apic_mode(apic) && offset == APIC_ICR)
- kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
+ WARN_ON_ONCE(kvm_x2apic_icr_write(apic, kvm_x2apic_icr_read(apic)));
else
kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
}
@@ -2922,14 +2959,13 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
}
}
-int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
+void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, int vector)
{
- int vector = kvm_apic_has_interrupt(vcpu);
struct kvm_lapic *apic = vcpu->arch.apic;
u32 ppr;
- if (vector == -1)
- return -1;
+ if (WARN_ON_ONCE(vector < 0 || !apic))
+ return;
/*
* We get here even with APIC virtualization enabled, if doing
@@ -2957,8 +2993,8 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
__apic_update_ppr(apic, &ppr);
}
- return vector;
}
+EXPORT_SYMBOL_GPL(kvm_apic_ack_interrupt);
static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s, bool set)
@@ -2990,18 +3026,22 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
/*
* In x2APIC mode, the LDR is fixed and based on the id. And
- * ICR is internally a single 64-bit register, but needs to be
- * split to ICR+ICR2 in userspace for backwards compatibility.
+ * if the ICR is _not_ split, ICR is internally a single 64-bit
+ * register, but needs to be split to ICR+ICR2 in userspace for
+ * backwards compatibility.
*/
- if (set) {
+ if (set)
*ldr = kvm_apic_calc_x2apic_ldr(x2apic_id);
- icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
- (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
- __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr);
- } else {
- icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
- __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
+ if (!kvm_x86_ops.x2apic_icr_is_split) {
+ if (set) {
+ icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
+ (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
+ __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr);
+ } else {
+ icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
+ __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
+ }
}
}
@@ -3194,22 +3234,12 @@ int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
return 0;
}
-int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
-{
- data &= ~APIC_ICR_BUSY;
-
- kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
- kvm_lapic_set_reg64(apic, APIC_ICR, data);
- trace_kvm_apic_write(APIC_ICR, data);
- return 0;
-}
-
static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
{
u32 low;
if (reg == APIC_ICR) {
- *data = kvm_lapic_get_reg64(apic, APIC_ICR);
+ *data = kvm_x2apic_icr_read(apic);
return 0;
}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 7ef8ae73e82d..1b8ef9856422 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -88,15 +88,14 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu);
void kvm_free_lapic(struct kvm_vcpu *vcpu);
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
+void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, int vector);
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
-int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
int kvm_apic_accept_events(struct kvm_vcpu *vcpu);
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event);
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu);
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
-u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
void kvm_recalculate_apic_map(struct kvm *kvm);
void kvm_apic_set_version(struct kvm_vcpu *vcpu);
void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 4341e0e28571..9dc5dd43ae7f 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -223,8 +223,6 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
bool kvm_mmu_may_ignore_guest_pat(void);
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
-
int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 7813d28b082f..e52f990548df 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -614,32 +614,6 @@ static u64 mmu_spte_get_lockless(u64 *sptep)
return __get_spte_lockless(sptep);
}
-/* Returns the Accessed status of the PTE and resets it at the same time. */
-static bool mmu_spte_age(u64 *sptep)
-{
- u64 spte = mmu_spte_get_lockless(sptep);
-
- if (!is_accessed_spte(spte))
- return false;
-
- if (spte_ad_enabled(spte)) {
- clear_bit((ffs(shadow_accessed_mask) - 1),
- (unsigned long *)sptep);
- } else {
- /*
- * Capture the dirty status of the page, so that it doesn't get
- * lost when the SPTE is marked for access tracking.
- */
- if (is_writable_pte(spte))
- kvm_set_pfn_dirty(spte_to_pfn(spte));
-
- spte = mark_spte_for_access_track(spte);
- mmu_spte_update_no_track(sptep, spte);
- }
-
- return true;
-}
-
static inline bool is_tdp_mmu_active(struct kvm_vcpu *vcpu)
{
return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct;
@@ -938,6 +912,7 @@ static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu
* in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
* pte_list_desc containing more mappings.
*/
+#define KVM_RMAP_MANY BIT(0)
/*
* Returns the number of pointers in the rmap chain, not counting the new one.
@@ -950,16 +925,16 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
if (!rmap_head->val) {
rmap_head->val = (unsigned long)spte;
- } else if (!(rmap_head->val & 1)) {
+ } else if (!(rmap_head->val & KVM_RMAP_MANY)) {
desc = kvm_mmu_memory_cache_alloc(cache);
desc->sptes[0] = (u64 *)rmap_head->val;
desc->sptes[1] = spte;
desc->spte_count = 2;
desc->tail_count = 0;
- rmap_head->val = (unsigned long)desc | 1;
+ rmap_head->val = (unsigned long)desc | KVM_RMAP_MANY;
++count;
} else {
- desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+ desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
count = desc->tail_count + desc->spte_count;
/*
@@ -968,10 +943,10 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
*/
if (desc->spte_count == PTE_LIST_EXT) {
desc = kvm_mmu_memory_cache_alloc(cache);
- desc->more = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+ desc->more = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
desc->spte_count = 0;
desc->tail_count = count;
- rmap_head->val = (unsigned long)desc | 1;
+ rmap_head->val = (unsigned long)desc | KVM_RMAP_MANY;
}
desc->sptes[desc->spte_count++] = spte;
}
@@ -982,7 +957,7 @@ static void pte_list_desc_remove_entry(struct kvm *kvm,
struct kvm_rmap_head *rmap_head,
struct pte_list_desc *desc, int i)
{
- struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+ struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
int j = head_desc->spte_count - 1;
/*
@@ -1011,7 +986,7 @@ static void pte_list_desc_remove_entry(struct kvm *kvm,
if (!head_desc->more)
rmap_head->val = 0;
else
- rmap_head->val = (unsigned long)head_desc->more | 1;
+ rmap_head->val = (unsigned long)head_desc->more | KVM_RMAP_MANY;
mmu_free_pte_list_desc(head_desc);
}
@@ -1024,13 +999,13 @@ static void pte_list_remove(struct kvm *kvm, u64 *spte,
if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_head->val, kvm))
return;
- if (!(rmap_head->val & 1)) {
+ if (!(rmap_head->val & KVM_RMAP_MANY)) {
if (KVM_BUG_ON_DATA_CORRUPTION((u64 *)rmap_head->val != spte, kvm))
return;
rmap_head->val = 0;
} else {
- desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+ desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
while (desc) {
for (i = 0; i < desc->spte_count; ++i) {
if (desc->sptes[i] == spte) {
@@ -1063,12 +1038,12 @@ static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
if (!rmap_head->val)
return false;
- if (!(rmap_head->val & 1)) {
+ if (!(rmap_head->val & KVM_RMAP_MANY)) {
mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
goto out;
}
- desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+ desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
for (; desc; desc = next) {
for (i = 0; i < desc->spte_count; i++)
@@ -1088,10 +1063,10 @@ unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
if (!rmap_head->val)
return 0;
- else if (!(rmap_head->val & 1))
+ else if (!(rmap_head->val & KVM_RMAP_MANY))
return 1;
- desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+ desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
return desc->tail_count + desc->spte_count;
}
@@ -1153,13 +1128,13 @@ static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
if (!rmap_head->val)
return NULL;
- if (!(rmap_head->val & 1)) {
+ if (!(rmap_head->val & KVM_RMAP_MANY)) {
iter->desc = NULL;
sptep = (u64 *)rmap_head->val;
goto out;
}
- iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+ iter->desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
iter->pos = 0;
sptep = iter->desc->sptes[iter->pos];
out:
@@ -1307,15 +1282,6 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
return flush;
}
-/**
- * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
- * @kvm: kvm instance
- * @slot: slot to protect
- * @gfn_offset: start of the BITS_PER_LONG pages we care about
- * @mask: indicates which pages we should protect
- *
- * Used when we do not need to care about huge page mappings.
- */
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
@@ -1339,16 +1305,6 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
}
}
-/**
- * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
- * protect the page if the D-bit isn't supported.
- * @kvm: kvm instance
- * @slot: slot to clear D-bit
- * @gfn_offset: start of the BITS_PER_LONG pages we care about
- * @mask: indicates which pages we should clear D-bit
- *
- * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
- */
static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
@@ -1372,24 +1328,16 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
}
}
-/**
- * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
- * PT level pages.
- *
- * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
- * enable dirty logging for them.
- *
- * We need to care about huge page mappings: e.g. during dirty logging we may
- * have such mappings.
- */
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
/*
- * Huge pages are NOT write protected when we start dirty logging in
- * initially-all-set mode; must write protect them here so that they
- * are split to 4K on the first write.
+ * If the slot was assumed to be "initially all dirty", write-protect
+ * huge pages to ensure they are split to 4KiB on the first write (KVM
+ * dirty logs at 4KiB granularity). If eager page splitting is enabled,
+ * immediately try to split huge pages, e.g. so that vCPUs don't get
+ * saddled with the cost of splitting.
*
* The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
* of memslot has no such restriction, so the range can cross two large
@@ -1411,7 +1359,16 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
PG_LEVEL_2M);
}
- /* Now handle 4K PTEs. */
+ /*
+ * (Re)Enable dirty logging for all 4KiB SPTEs that map the GFNs in
+ * mask. If PML is enabled and the GFN doesn't need to be write-
+ * protected for other reasons, e.g. shadow paging, clear the Dirty bit.
+ * Otherwise clear the Writable bit.
+ *
+ * Note that kvm_mmu_clear_dirty_pt_masked() is called whenever PML is
+ * enabled but it chooses between clearing the Dirty bit and Writeable
+ * bit based on the context.
+ */
if (kvm_x86_ops.cpu_dirty_log_size)
kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
else
@@ -1453,16 +1410,10 @@ static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
}
-static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- const struct kvm_memory_slot *slot)
-{
- return kvm_zap_all_rmap_sptes(kvm, rmap_head);
-}
-
static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn, int level)
+ const struct kvm_memory_slot *slot)
{
- return __kvm_zap_rmap(kvm, rmap_head, slot);
+ return kvm_zap_all_rmap_sptes(kvm, rmap_head);
}
struct slot_rmap_walk_iterator {
@@ -1513,7 +1464,7 @@ static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
{
while (++iterator->rmap <= iterator->end_rmap) {
- iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
+ iterator->gfn += KVM_PAGES_PER_HPAGE(iterator->level);
if (iterator->rmap->val)
return;
@@ -1534,23 +1485,71 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
slot_rmap_walk_okay(_iter_); \
slot_rmap_walk_next(_iter_))
-typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn,
- int level);
+/* The return value indicates if tlb flush on all vcpus is needed. */
+typedef bool (*slot_rmaps_handler) (struct kvm *kvm,
+ struct kvm_rmap_head *rmap_head,
+ const struct kvm_memory_slot *slot);
-static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
- struct kvm_gfn_range *range,
- rmap_handler_t handler)
+static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
+ const struct kvm_memory_slot *slot,
+ slot_rmaps_handler fn,
+ int start_level, int end_level,
+ gfn_t start_gfn, gfn_t end_gfn,
+ bool can_yield, bool flush_on_yield,
+ bool flush)
{
struct slot_rmap_walk_iterator iterator;
- bool ret = false;
- for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
- range->start, range->end - 1, &iterator)
- ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
- iterator.level);
+ lockdep_assert_held_write(&kvm->mmu_lock);
- return ret;
+ for_each_slot_rmap_range(slot, start_level, end_level, start_gfn,
+ end_gfn, &iterator) {
+ if (iterator.rmap)
+ flush |= fn(kvm, iterator.rmap, slot);
+
+ if (!can_yield)
+ continue;
+
+ if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
+ if (flush && flush_on_yield) {
+ kvm_flush_remote_tlbs_range(kvm, start_gfn,
+ iterator.gfn - start_gfn + 1);
+ flush = false;
+ }
+ cond_resched_rwlock_write(&kvm->mmu_lock);
+ }
+ }
+
+ return flush;
+}
+
+static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
+ const struct kvm_memory_slot *slot,
+ slot_rmaps_handler fn,
+ int start_level, int end_level,
+ bool flush_on_yield)
+{
+ return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
+ slot->base_gfn, slot->base_gfn + slot->npages - 1,
+ true, flush_on_yield, false);
+}
+
+static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
+ const struct kvm_memory_slot *slot,
+ slot_rmaps_handler fn,
+ bool flush_on_yield)
+{
+ return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield);
+}
+
+static bool __kvm_rmap_zap_gfn_range(struct kvm *kvm,
+ const struct kvm_memory_slot *slot,
+ gfn_t start, gfn_t end, bool can_yield,
+ bool flush)
+{
+ return __walk_slot_rmaps(kvm, slot, kvm_zap_rmap,
+ PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
+ start, end - 1, can_yield, true, flush);
}
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -1558,7 +1557,9 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
bool flush = false;
if (kvm_memslots_have_rmaps(kvm))
- flush = kvm_handle_gfn_range(kvm, range, kvm_zap_rmap);
+ flush = __kvm_rmap_zap_gfn_range(kvm, range->slot,
+ range->start, range->end,
+ range->may_block, flush);
if (tdp_mmu_enabled)
flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
@@ -1570,31 +1571,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
return flush;
}
-static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn, int level)
-{
- u64 *sptep;
- struct rmap_iterator iter;
- int young = 0;
-
- for_each_rmap_spte(rmap_head, &iter, sptep)
- young |= mmu_spte_age(sptep);
-
- return young;
-}
-
-static bool kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn, int level)
-{
- u64 *sptep;
- struct rmap_iterator iter;
-
- for_each_rmap_spte(rmap_head, &iter, sptep)
- if (is_accessed_spte(*sptep))
- return true;
- return false;
-}
-
#define RMAP_RECYCLE_THRESHOLD 1000
static void __rmap_add(struct kvm *kvm,
@@ -1629,12 +1605,52 @@ static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
__rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
}
+static bool kvm_rmap_age_gfn_range(struct kvm *kvm,
+ struct kvm_gfn_range *range, bool test_only)
+{
+ struct slot_rmap_walk_iterator iterator;
+ struct rmap_iterator iter;
+ bool young = false;
+ u64 *sptep;
+
+ for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
+ range->start, range->end - 1, &iterator) {
+ for_each_rmap_spte(iterator.rmap, &iter, sptep) {
+ u64 spte = *sptep;
+
+ if (!is_accessed_spte(spte))
+ continue;
+
+ if (test_only)
+ return true;
+
+ if (spte_ad_enabled(spte)) {
+ clear_bit((ffs(shadow_accessed_mask) - 1),
+ (unsigned long *)sptep);
+ } else {
+ /*
+ * Capture the dirty status of the page, so that
+ * it doesn't get lost when the SPTE is marked
+ * for access tracking.
+ */
+ if (is_writable_pte(spte))
+ kvm_set_pfn_dirty(spte_to_pfn(spte));
+
+ spte = mark_spte_for_access_track(spte);
+ mmu_spte_update_no_track(sptep, spte);
+ }
+ young = true;
+ }
+ }
+ return young;
+}
+
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
bool young = false;
if (kvm_memslots_have_rmaps(kvm))
- young = kvm_handle_gfn_range(kvm, range, kvm_age_rmap);
+ young = kvm_rmap_age_gfn_range(kvm, range, false);
if (tdp_mmu_enabled)
young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
@@ -1647,7 +1663,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
bool young = false;
if (kvm_memslots_have_rmaps(kvm))
- young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmap);
+ young = kvm_rmap_age_gfn_range(kvm, range, true);
if (tdp_mmu_enabled)
young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
@@ -2713,36 +2729,49 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
write_unlock(&kvm->mmu_lock);
}
-int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
+bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ bool always_retry)
{
- struct kvm_mmu_page *sp;
+ struct kvm *kvm = vcpu->kvm;
LIST_HEAD(invalid_list);
- int r;
+ struct kvm_mmu_page *sp;
+ gpa_t gpa = cr2_or_gpa;
+ bool r = false;
+
+ /*
+ * Bail early if there aren't any write-protected shadow pages to avoid
+ * unnecessarily taking mmu_lock lock, e.g. if the gfn is write-tracked
+ * by a third party. Reading indirect_shadow_pages without holding
+ * mmu_lock is safe, as this is purely an optimization, i.e. a false
+ * positive is benign, and a false negative will simply result in KVM
+ * skipping the unprotect+retry path, which is also an optimization.
+ */
+ if (!READ_ONCE(kvm->arch.indirect_shadow_pages))
+ goto out;
+
+ if (!vcpu->arch.mmu->root_role.direct) {
+ gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
+ if (gpa == INVALID_GPA)
+ goto out;
+ }
- r = 0;
write_lock(&kvm->mmu_lock);
- for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
- r = 1;
+ for_each_gfn_valid_sp_with_gptes(kvm, sp, gpa_to_gfn(gpa))
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
- }
+
+ /*
+ * Snapshot the result before zapping, as zapping will remove all list
+ * entries, i.e. checking the list later would yield a false negative.
+ */
+ r = !list_empty(&invalid_list);
kvm_mmu_commit_zap_page(kvm, &invalid_list);
write_unlock(&kvm->mmu_lock);
- return r;
-}
-
-static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
-{
- gpa_t gpa;
- int r;
-
- if (vcpu->arch.mmu->root_role.direct)
- return 0;
-
- gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
-
- r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-
+out:
+ if (r || always_retry) {
+ vcpu->arch.last_retry_eip = kvm_rip_read(vcpu);
+ vcpu->arch.last_retry_addr = cr2_or_gpa;
+ }
return r;
}
@@ -2914,10 +2943,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
trace_kvm_mmu_set_spte(level, gfn, sptep);
}
- if (wrprot) {
- if (write_fault)
- ret = RET_PF_EMULATE;
- }
+ if (wrprot && write_fault)
+ ret = RET_PF_WRITE_PROTECTED;
if (flush)
kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level);
@@ -4549,7 +4576,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
return RET_PF_RETRY;
if (page_fault_handle_page_track(vcpu, fault))
- return RET_PF_EMULATE;
+ return RET_PF_WRITE_PROTECTED;
r = fast_page_fault(vcpu, fault);
if (r != RET_PF_INVALID)
@@ -4618,8 +4645,6 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
if (!flags) {
trace_kvm_page_fault(vcpu, fault_address, error_code);
- if (kvm_event_needs_reinjection(vcpu))
- kvm_mmu_unprotect_page_virt(vcpu, fault_address);
r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
insn_len);
} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
@@ -4642,7 +4667,7 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
int r;
if (page_fault_handle_page_track(vcpu, fault))
- return RET_PF_EMULATE;
+ return RET_PF_WRITE_PROTECTED;
r = fast_page_fault(vcpu, fault);
if (r != RET_PF_INVALID)
@@ -4719,6 +4744,7 @@ static int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code,
switch (r) {
case RET_PF_FIXED:
case RET_PF_SPURIOUS:
+ case RET_PF_WRITE_PROTECTED:
return 0;
case RET_PF_EMULATE:
@@ -5963,6 +5989,106 @@ void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
write_unlock(&vcpu->kvm->mmu_lock);
}
+static bool is_write_to_guest_page_table(u64 error_code)
+{
+ const u64 mask = PFERR_GUEST_PAGE_MASK | PFERR_WRITE_MASK | PFERR_PRESENT_MASK;
+
+ return (error_code & mask) == mask;
+}
+
+static int kvm_mmu_write_protect_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ u64 error_code, int *emulation_type)
+{
+ bool direct = vcpu->arch.mmu->root_role.direct;
+
+ /*
+ * Do not try to unprotect and retry if the vCPU re-faulted on the same
+ * RIP with the same address that was previously unprotected, as doing
+ * so will likely put the vCPU into an infinite. E.g. if the vCPU uses
+ * a non-page-table modifying instruction on the PDE that points to the
+ * instruction, then unprotecting the gfn will unmap the instruction's
+ * code, i.e. make it impossible for the instruction to ever complete.
+ */
+ if (vcpu->arch.last_retry_eip == kvm_rip_read(vcpu) &&
+ vcpu->arch.last_retry_addr == cr2_or_gpa)
+ return RET_PF_EMULATE;
+
+ /*
+ * Reset the unprotect+retry values that guard against infinite loops.
+ * The values will be refreshed if KVM explicitly unprotects a gfn and
+ * retries, in all other cases it's safe to retry in the future even if
+ * the next page fault happens on the same RIP+address.
+ */
+ vcpu->arch.last_retry_eip = 0;
+ vcpu->arch.last_retry_addr = 0;
+
+ /*
+ * It should be impossible to reach this point with an MMIO cache hit,
+ * as RET_PF_WRITE_PROTECTED is returned if and only if there's a valid,
+ * writable memslot, and creating a memslot should invalidate the MMIO
+ * cache by way of changing the memslot generation. WARN and disallow
+ * retry if MMIO is detected, as retrying MMIO emulation is pointless
+ * and could put the vCPU into an infinite loop because the processor
+ * will keep faulting on the non-existent MMIO address.
+ */
+ if (WARN_ON_ONCE(mmio_info_in_cache(vcpu, cr2_or_gpa, direct)))
+ return RET_PF_EMULATE;
+
+ /*
+ * Before emulating the instruction, check to see if the access was due
+ * to a read-only violation while the CPU was walking non-nested NPT
+ * page tables, i.e. for a direct MMU, for _guest_ page tables in L1.
+ * If L1 is sharing (a subset of) its page tables with L2, e.g. by
+ * having nCR3 share lower level page tables with hCR3, then when KVM
+ * (L0) write-protects the nested NPTs, i.e. npt12 entries, KVM is also
+ * unknowingly write-protecting L1's guest page tables, which KVM isn't
+ * shadowing.
+ *
+ * Because the CPU (by default) walks NPT page tables using a write
+ * access (to ensure the CPU can do A/D updates), page walks in L1 can
+ * trigger write faults for the above case even when L1 isn't modifying
+ * PTEs. As a result, KVM will unnecessarily emulate (or at least, try
+ * to emulate) an excessive number of L1 instructions; because L1's MMU
+ * isn't shadowed by KVM, there is no need to write-protect L1's gPTEs
+ * and thus no need to emulate in order to guarantee forward progress.
+ *
+ * Try to unprotect the gfn, i.e. zap any shadow pages, so that L1 can
+ * proceed without triggering emulation. If one or more shadow pages
+ * was zapped, skip emulation and resume L1 to let it natively execute
+ * the instruction. If no shadow pages were zapped, then the write-
+ * fault is due to something else entirely, i.e. KVM needs to emulate,
+ * as resuming the guest will put it into an infinite loop.
+ *
+ * Note, this code also applies to Intel CPUs, even though it is *very*
+ * unlikely that an L1 will share its page tables (IA32/PAE/paging64
+ * format) with L2's page tables (EPT format).
+ *
+ * For indirect MMUs, i.e. if KVM is shadowing the current MMU, try to
+ * unprotect the gfn and retry if an event is awaiting reinjection. If
+ * KVM emulates multiple instructions before completing event injection,
+ * the event could be delayed beyond what is architecturally allowed,
+ * e.g. KVM could inject an IRQ after the TPR has been raised.
+ */
+ if (((direct && is_write_to_guest_page_table(error_code)) ||
+ (!direct && kvm_event_needs_reinjection(vcpu))) &&
+ kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa))
+ return RET_PF_RETRY;
+
+ /*
+ * The gfn is write-protected, but if KVM detects its emulating an
+ * instruction that is unlikely to be used to modify page tables, or if
+ * emulation fails, KVM can try to unprotect the gfn and let the CPU
+ * re-execute the instruction that caused the page fault. Do not allow
+ * retrying an instruction from a nested guest as KVM is only explicitly
+ * shadowing L1's page tables, i.e. unprotecting something for L1 isn't
+ * going to magically fix whatever issue caused L2 to fail.
+ */
+ if (!is_guest_mode(vcpu))
+ *emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
+
+ return RET_PF_EMULATE;
+}
+
int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
void *insn, int insn_len)
{
@@ -6008,6 +6134,10 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
if (r < 0)
return r;
+ if (r == RET_PF_WRITE_PROTECTED)
+ r = kvm_mmu_write_protect_fault(vcpu, cr2_or_gpa, error_code,
+ &emulation_type);
+
if (r == RET_PF_FIXED)
vcpu->stat.pf_fixed++;
else if (r == RET_PF_EMULATE)
@@ -6018,32 +6148,6 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
if (r != RET_PF_EMULATE)
return 1;
- /*
- * Before emulating the instruction, check if the error code
- * was due to a RO violation while translating the guest page.
- * This can occur when using nested virtualization with nested
- * paging in both guests. If true, we simply unprotect the page
- * and resume the guest.
- */
- if (vcpu->arch.mmu->root_role.direct &&
- (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
- kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
- return 1;
- }
-
- /*
- * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
- * optimistically try to just unprotect the page and let the processor
- * re-execute the instruction that caused the page fault. Do not allow
- * retrying MMIO emulation, as it's not only pointless but could also
- * cause us to enter an infinite loop because the processor will keep
- * faulting on the non-existent MMIO address. Retrying an instruction
- * from a nested guest is also pointless and dangerous as we are only
- * explicitly shadowing L1's page tables, i.e. unprotecting something
- * for L1 isn't going to magically fix whatever issue cause L2 to fail.
- */
- if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
- emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
emulate:
return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
insn_len);
@@ -6202,59 +6306,6 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
}
EXPORT_SYMBOL_GPL(kvm_configure_mmu);
-/* The return value indicates if tlb flush on all vcpus is needed. */
-typedef bool (*slot_rmaps_handler) (struct kvm *kvm,
- struct kvm_rmap_head *rmap_head,
- const struct kvm_memory_slot *slot);
-
-static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
- const struct kvm_memory_slot *slot,
- slot_rmaps_handler fn,
- int start_level, int end_level,
- gfn_t start_gfn, gfn_t end_gfn,
- bool flush_on_yield, bool flush)
-{
- struct slot_rmap_walk_iterator iterator;
-
- lockdep_assert_held_write(&kvm->mmu_lock);
-
- for_each_slot_rmap_range(slot, start_level, end_level, start_gfn,
- end_gfn, &iterator) {
- if (iterator.rmap)
- flush |= fn(kvm, iterator.rmap, slot);
-
- if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
- if (flush && flush_on_yield) {
- kvm_flush_remote_tlbs_range(kvm, start_gfn,
- iterator.gfn - start_gfn + 1);
- flush = false;
- }
- cond_resched_rwlock_write(&kvm->mmu_lock);
- }
- }
-
- return flush;
-}
-
-static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
- const struct kvm_memory_slot *slot,
- slot_rmaps_handler fn,
- int start_level, int end_level,
- bool flush_on_yield)
-{
- return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
- slot->base_gfn, slot->base_gfn + slot->npages - 1,
- flush_on_yield, false);
-}
-
-static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
- const struct kvm_memory_slot *slot,
- slot_rmaps_handler fn,
- bool flush_on_yield)
-{
- return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield);
-}
-
static void free_mmu_pages(struct kvm_mmu *mmu)
{
if (!tdp_enabled && mmu->pae_root)
@@ -6528,9 +6579,8 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
if (WARN_ON_ONCE(start >= end))
continue;
- flush = __walk_slot_rmaps(kvm, memslot, __kvm_zap_rmap,
- PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
- start, end - 1, true, flush);
+ flush = __kvm_rmap_zap_gfn_range(kvm, memslot, start,
+ end, true, flush);
}
}
@@ -6818,7 +6868,7 @@ static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
*/
for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--)
__walk_slot_rmaps(kvm, slot, shadow_mmu_try_split_huge_pages,
- level, level, start, end - 1, true, false);
+ level, level, start, end - 1, true, true, false);
}
/* Must be called with the mmu_lock held in write-mode. */
@@ -6997,10 +7047,42 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
kvm_mmu_zap_all(kvm);
}
+/*
+ * Zapping leaf SPTEs with memslot range when a memslot is moved/deleted.
+ *
+ * Zapping non-leaf SPTEs, a.k.a. not-last SPTEs, isn't required, worst
+ * case scenario we'll have unused shadow pages lying around until they
+ * are recycled due to age or when the VM is destroyed.
+ */
+static void kvm_mmu_zap_memslot_leafs(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ struct kvm_gfn_range range = {
+ .slot = slot,
+ .start = slot->base_gfn,
+ .end = slot->base_gfn + slot->npages,
+ .may_block = true,
+ };
+
+ write_lock(&kvm->mmu_lock);
+ if (kvm_unmap_gfn_range(kvm, &range))
+ kvm_flush_remote_tlbs_memslot(kvm, slot);
+
+ write_unlock(&kvm->mmu_lock);
+}
+
+static inline bool kvm_memslot_flush_zap_all(struct kvm *kvm)
+{
+ return kvm->arch.vm_type == KVM_X86_DEFAULT_VM &&
+ kvm_check_has_quirk(kvm, KVM_X86_QUIRK_SLOT_ZAP_ALL);
+}
+
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
- kvm_mmu_zap_all_fast(kvm);
+ if (kvm_memslot_flush_zap_all(kvm))
+ kvm_mmu_zap_all_fast(kvm);
+ else
+ kvm_mmu_zap_memslot_leafs(kvm, slot);
}
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 1721d97743e9..c98827840e07 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -258,6 +258,8 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
* RET_PF_CONTINUE: So far, so good, keep handling the page fault.
* RET_PF_RETRY: let CPU fault again on the address.
* RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
+ * RET_PF_WRITE_PROTECTED: the gfn is write-protected, either unprotected the
+ * gfn and retry, or emulate the instruction directly.
* RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
* RET_PF_FIXED: The faulting entry has been fixed.
* RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU.
@@ -274,6 +276,7 @@ enum {
RET_PF_CONTINUE = 0,
RET_PF_RETRY,
RET_PF_EMULATE,
+ RET_PF_WRITE_PROTECTED,
RET_PF_INVALID,
RET_PF_FIXED,
RET_PF_SPURIOUS,
@@ -349,8 +352,6 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);
-void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
-
void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h
index 195d98bc8de8..f35a830ce469 100644
--- a/arch/x86/kvm/mmu/mmutrace.h
+++ b/arch/x86/kvm/mmu/mmutrace.h
@@ -57,6 +57,7 @@
TRACE_DEFINE_ENUM(RET_PF_CONTINUE);
TRACE_DEFINE_ENUM(RET_PF_RETRY);
TRACE_DEFINE_ENUM(RET_PF_EMULATE);
+TRACE_DEFINE_ENUM(RET_PF_WRITE_PROTECTED);
TRACE_DEFINE_ENUM(RET_PF_INVALID);
TRACE_DEFINE_ENUM(RET_PF_FIXED);
TRACE_DEFINE_ENUM(RET_PF_SPURIOUS);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 69941cebb3a8..ae7d39ff2d07 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -646,10 +646,10 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
* really care if it changes underneath us after this point).
*/
if (FNAME(gpte_changed)(vcpu, gw, top_level))
- goto out_gpte_changed;
+ return RET_PF_RETRY;
if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
- goto out_gpte_changed;
+ return RET_PF_RETRY;
/*
* Load a new root and retry the faulting instruction in the extremely
@@ -659,7 +659,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
*/
if (unlikely(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) {
kvm_make_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu);
- goto out_gpte_changed;
+ return RET_PF_RETRY;
}
for_each_shadow_entry(vcpu, fault->addr, it) {
@@ -674,34 +674,38 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
sp = kvm_mmu_get_child_sp(vcpu, it.sptep, table_gfn,
false, access);
- if (sp != ERR_PTR(-EEXIST)) {
- /*
- * We must synchronize the pagetable before linking it
- * because the guest doesn't need to flush tlb when
- * the gpte is changed from non-present to present.
- * Otherwise, the guest may use the wrong mapping.
- *
- * For PG_LEVEL_4K, kvm_mmu_get_page() has already
- * synchronized it transiently via kvm_sync_page().
- *
- * For higher level pagetable, we synchronize it via
- * the slower mmu_sync_children(). If it needs to
- * break, some progress has been made; return
- * RET_PF_RETRY and retry on the next #PF.
- * KVM_REQ_MMU_SYNC is not necessary but it
- * expedites the process.
- */
- if (sp->unsync_children &&
- mmu_sync_children(vcpu, sp, false))
- return RET_PF_RETRY;
- }
+ /*
+ * Synchronize the new page before linking it, as the CPU (KVM)
+ * is architecturally disallowed from inserting non-present
+ * entries into the TLB, i.e. the guest isn't required to flush
+ * the TLB when changing the gPTE from non-present to present.
+ *
+ * For PG_LEVEL_4K, kvm_mmu_find_shadow_page() has already
+ * synchronized the page via kvm_sync_page().
+ *
+ * For higher level pages, which cannot be unsync themselves
+ * but can have unsync children, synchronize via the slower
+ * mmu_sync_children(). If KVM needs to drop mmu_lock due to
+ * contention or to reschedule, instruct the caller to retry
+ * the #PF (mmu_sync_children() ensures forward progress will
+ * be made).
+ */
+ if (sp != ERR_PTR(-EEXIST) && sp->unsync_children &&
+ mmu_sync_children(vcpu, sp, false))
+ return RET_PF_RETRY;
/*
- * Verify that the gpte in the page we've just write
- * protected is still there.
+ * Verify that the gpte in the page, which is now either
+ * write-protected or unsync, wasn't modified between the fault
+ * and acquiring mmu_lock. This needs to be done even when
+ * reusing an existing shadow page to ensure the information
+ * gathered by the walker matches the information stored in the
+ * shadow page (which could have been modified by a different
+ * vCPU even if the page was already linked). Holding mmu_lock
+ * prevents the shadow page from changing after this point.
*/
if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
- goto out_gpte_changed;
+ return RET_PF_RETRY;
if (sp != ERR_PTR(-EEXIST))
link_shadow_page(vcpu, it.sptep, sp);
@@ -755,9 +759,6 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
return ret;
-
-out_gpte_changed:
- return RET_PF_RETRY;
}
/*
@@ -805,7 +806,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
if (page_fault_handle_page_track(vcpu, fault)) {
shadow_page_table_clear_flood(vcpu, fault->addr);
- return RET_PF_EMULATE;
+ return RET_PF_WRITE_PROTECTED;
}
r = mmu_topup_memory_caches(vcpu, true);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 3c55955bcaf8..3b996c1fdaab 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1046,10 +1046,8 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
* protected, emulation is needed. If the emulation was skipped,
* the vCPU would have the same fault again.
*/
- if (wrprot) {
- if (fault->write)
- ret = RET_PF_EMULATE;
- }
+ if (wrprot && fault->write)
+ ret = RET_PF_WRITE_PROTECTED;
/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
if (unlikely(is_mmio_spte(vcpu->kvm, new_spte))) {
diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
index 2f4e155080ba..0d17d6b70639 100644
--- a/arch/x86/kvm/reverse_cpuid.h
+++ b/arch/x86/kvm/reverse_cpuid.h
@@ -17,6 +17,7 @@ enum kvm_only_cpuid_leafs {
CPUID_8000_0007_EDX,
CPUID_8000_0022_EAX,
CPUID_7_2_EDX,
+ CPUID_24_0_EBX,
NR_KVM_CPU_CAPS,
NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
@@ -46,6 +47,7 @@ enum kvm_only_cpuid_leafs {
#define X86_FEATURE_AVX_NE_CONVERT KVM_X86_FEATURE(CPUID_7_1_EDX, 5)
#define X86_FEATURE_AMX_COMPLEX KVM_X86_FEATURE(CPUID_7_1_EDX, 8)
#define X86_FEATURE_PREFETCHITI KVM_X86_FEATURE(CPUID_7_1_EDX, 14)
+#define X86_FEATURE_AVX10 KVM_X86_FEATURE(CPUID_7_1_EDX, 19)
/* Intel-defined sub-features, CPUID level 0x00000007:2 (EDX) */
#define X86_FEATURE_INTEL_PSFD KVM_X86_FEATURE(CPUID_7_2_EDX, 0)
@@ -55,6 +57,11 @@ enum kvm_only_cpuid_leafs {
#define KVM_X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
#define X86_FEATURE_MCDT_NO KVM_X86_FEATURE(CPUID_7_2_EDX, 5)
+/* Intel-defined sub-features, CPUID level 0x00000024:0 (EBX) */
+#define X86_FEATURE_AVX10_128 KVM_X86_FEATURE(CPUID_24_0_EBX, 16)
+#define X86_FEATURE_AVX10_256 KVM_X86_FEATURE(CPUID_24_0_EBX, 17)
+#define X86_FEATURE_AVX10_512 KVM_X86_FEATURE(CPUID_24_0_EBX, 18)
+
/* CPUID level 0x80000007 (EDX). */
#define KVM_X86_FEATURE_CONSTANT_TSC KVM_X86_FEATURE(CPUID_8000_0007_EDX, 8)
@@ -90,6 +97,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
[CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
[CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX},
[CPUID_7_2_EDX] = { 7, 2, CPUID_EDX},
+ [CPUID_24_0_EBX] = { 0x24, 0, CPUID_EBX},
};
/*
diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
index 00e3c27d2a87..85241c0c7f56 100644
--- a/arch/x86/kvm/smm.c
+++ b/arch/x86/kvm/smm.c
@@ -624,17 +624,31 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
#endif
/*
- * Give leave_smm() a chance to make ISA-specific changes to the vCPU
- * state (e.g. enter guest mode) before loading state from the SMM
- * state-save area.
+ * FIXME: When resuming L2 (a.k.a. guest mode), the transition to guest
+ * mode should happen _after_ loading state from SMRAM. However, KVM
+ * piggybacks the nested VM-Enter flows (which is wrong for many other
+ * reasons), and so nSVM/nVMX would clobber state that is loaded from
+ * SMRAM and from the VMCS/VMCB.
*/
if (kvm_x86_call(leave_smm)(vcpu, &smram))
return X86EMUL_UNHANDLEABLE;
#ifdef CONFIG_X86_64
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
- return rsm_load_state_64(ctxt, &smram.smram64);
+ ret = rsm_load_state_64(ctxt, &smram.smram64);
else
#endif
- return rsm_load_state_32(ctxt, &smram.smram32);
+ ret = rsm_load_state_32(ctxt, &smram.smram32);
+
+ /*
+ * If RSM fails and triggers shutdown, architecturally the shutdown
+ * occurs *before* the transition to guest mode. But due to KVM's
+ * flawed handling of RSM to L2 (see above), the vCPU may already be
+ * in_guest_mode(). Force the vCPU out of guest mode before delivering
+ * the shutdown, so that L1 enters shutdown instead of seeing a VM-Exit
+ * that architecturally shouldn't be possible.
+ */
+ if (ret != X86EMUL_CONTINUE && is_guest_mode(vcpu))
+ kvm_leave_nested(vcpu);
+ return ret;
}
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 6f704c1037e5..d5314cb7dff4 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1693,8 +1693,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL;
ret = -ENOMEM;
- ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
- save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+ save = kzalloc(sizeof(*save), GFP_KERNEL);
if (!ctl || !save)
goto out_free;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 5ab2c92c7331..9df3e1e5ae81 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -573,7 +573,7 @@ static void __svm_write_tsc_multiplier(u64 multiplier)
static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
{
- return page_address(sd->save_area) + 0x400;
+ return &sd->save_area->host_sev_es_save;
}
static inline void kvm_cpu_svm_disable(void)
@@ -592,14 +592,14 @@ static inline void kvm_cpu_svm_disable(void)
}
}
-static void svm_emergency_disable(void)
+static void svm_emergency_disable_virtualization_cpu(void)
{
kvm_rebooting = true;
kvm_cpu_svm_disable();
}
-static void svm_hardware_disable(void)
+static void svm_disable_virtualization_cpu(void)
{
/* Make sure we clean up behind us */
if (tsc_scaling)
@@ -610,7 +610,7 @@ static void svm_hardware_disable(void)
amd_pmu_disable_virt();
}
-static int svm_hardware_enable(void)
+static int svm_enable_virtualization_cpu(void)
{
struct svm_cpu_data *sd;
@@ -696,7 +696,7 @@ static void svm_cpu_uninit(int cpu)
return;
kfree(sd->sev_vmcbs);
- __free_page(sd->save_area);
+ __free_page(__sme_pa_to_page(sd->save_area_pa));
sd->save_area_pa = 0;
sd->save_area = NULL;
}
@@ -704,23 +704,24 @@ static void svm_cpu_uninit(int cpu)
static int svm_cpu_init(int cpu)
{
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
+ struct page *save_area_page;
int ret = -ENOMEM;
memset(sd, 0, sizeof(struct svm_cpu_data));
- sd->save_area = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
- if (!sd->save_area)
+ save_area_page = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
+ if (!save_area_page)
return ret;
ret = sev_cpu_init(sd);
if (ret)
goto free_save_area;
- sd->save_area_pa = __sme_page_pa(sd->save_area);
+ sd->save_area = page_address(save_area_page);
+ sd->save_area_pa = __sme_page_pa(save_area_page);
return 0;
free_save_area:
- __free_page(sd->save_area);
- sd->save_area = NULL;
+ __free_page(save_area_page);
return ret;
}
@@ -1124,8 +1125,7 @@ static void svm_hardware_unsetup(void)
for_each_possible_cpu(cpu)
svm_cpu_uninit(cpu);
- __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT),
- get_order(IOPM_SIZE));
+ __free_pages(__sme_pa_to_page(iopm_base), get_order(IOPM_SIZE));
iopm_base = 0;
}
@@ -1301,7 +1301,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
if (!kvm_hlt_in_guest(vcpu->kvm))
svm_set_intercept(svm, INTERCEPT_HLT);
- control->iopm_base_pa = __sme_set(iopm_base);
+ control->iopm_base_pa = iopm_base;
control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
control->int_ctl = V_INTR_MASKING_MASK;
@@ -1503,7 +1503,7 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
sev_free_vcpu(vcpu);
- __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT));
+ __free_page(__sme_pa_to_page(svm->vmcb01.pa));
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
}
@@ -1533,7 +1533,7 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
* TSC_AUX is always virtualized for SEV-ES guests when the feature is
* available. The user return MSR support is not required in this case
* because TSC_AUX is restored on #VMEXIT from the host save area
- * (which has been initialized in svm_hardware_enable()).
+ * (which has been initialized in svm_enable_virtualization_cpu()).
*/
if (likely(tsc_aux_uret_slot >= 0) &&
(!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
@@ -2825,17 +2825,17 @@ static int efer_trap(struct kvm_vcpu *vcpu)
return kvm_complete_insn_gp(vcpu, ret);
}
-static int svm_get_msr_feature(struct kvm_msr_entry *msr)
+static int svm_get_feature_msr(u32 msr, u64 *data)
{
- msr->data = 0;
+ *data = 0;
- switch (msr->index) {
+ switch (msr) {
case MSR_AMD64_DE_CFG:
if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
- msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
+ *data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
break;
default:
- return KVM_MSR_RET_INVALID;
+ return KVM_MSR_RET_UNSUPPORTED;
}
return 0;
@@ -3144,7 +3144,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
* feature is available. The user return MSR support is not
* required in this case because TSC_AUX is restored on #VMEXIT
* from the host save area (which has been initialized in
- * svm_hardware_enable()).
+ * svm_enable_virtualization_cpu()).
*/
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm))
break;
@@ -3191,18 +3191,21 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
break;
case MSR_AMD64_DE_CFG: {
- struct kvm_msr_entry msr_entry;
+ u64 supported_de_cfg;
- msr_entry.index = msr->index;
- if (svm_get_msr_feature(&msr_entry))
+ if (svm_get_feature_msr(ecx, &supported_de_cfg))
return 1;
- /* Check the supported bits */
- if (data & ~msr_entry.data)
+ if (data & ~supported_de_cfg)
return 1;
- /* Don't allow the guest to change a bit, #GP */
- if (!msr->host_initiated && (data ^ msr_entry.data))
+ /*
+ * Don't let the guest change the host-programmed value. The
+ * MSR is very model specific, i.e. contains multiple bits that
+ * are completely unknown to KVM, and the one bit known to KVM
+ * is simply a reflection of hardware capabilities.
+ */
+ if (!msr->host_initiated && data != svm->msr_decfg)
return 1;
svm->msr_decfg = data;
@@ -4156,12 +4159,21 @@ static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
if (is_guest_mode(vcpu))
return EXIT_FASTPATH_NONE;
- if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
- to_svm(vcpu)->vmcb->control.exit_info_1)
+ switch (svm->vmcb->control.exit_code) {
+ case SVM_EXIT_MSR:
+ if (!svm->vmcb->control.exit_info_1)
+ break;
return handle_fastpath_set_msr_irqoff(vcpu);
+ case SVM_EXIT_HLT:
+ return handle_fastpath_hlt(vcpu);
+ default:
+ break;
+ }
return EXIT_FASTPATH_NONE;
}
@@ -4992,8 +5004,9 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.check_processor_compatibility = svm_check_processor_compat,
.hardware_unsetup = svm_hardware_unsetup,
- .hardware_enable = svm_hardware_enable,
- .hardware_disable = svm_hardware_disable,
+ .enable_virtualization_cpu = svm_enable_virtualization_cpu,
+ .disable_virtualization_cpu = svm_disable_virtualization_cpu,
+ .emergency_disable_virtualization_cpu = svm_emergency_disable_virtualization_cpu,
.has_emulated_msr = svm_has_emulated_msr,
.vcpu_create = svm_vcpu_create,
@@ -5011,7 +5024,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.vcpu_unblocking = avic_vcpu_unblocking,
.update_exception_bitmap = svm_update_exception_bitmap,
- .get_msr_feature = svm_get_msr_feature,
+ .get_feature_msr = svm_get_feature_msr,
.get_msr = svm_get_msr,
.set_msr = svm_set_msr,
.get_segment_base = svm_get_segment_base,
@@ -5062,6 +5075,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.enable_nmi_window = svm_enable_nmi_window,
.enable_irq_window = svm_enable_irq_window,
.update_cr8_intercept = svm_update_cr8_intercept,
+
+ .x2apic_icr_is_split = true,
.set_virtual_apic_mode = avic_refresh_virtual_apic_mode,
.refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl,
.apicv_post_state_restore = avic_apicv_post_state_restore,
@@ -5266,7 +5281,7 @@ static __init int svm_hardware_setup(void)
iopm_va = page_address(iopm_pages);
memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
- iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
+ iopm_base = __sme_page_pa(iopm_pages);
init_msrpm_offsets();
@@ -5425,8 +5440,6 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {
static void __svm_exit(void)
{
kvm_x86_vendor_exit();
-
- cpu_emergency_unregister_virt_callback(svm_emergency_disable);
}
static int __init svm_init(void)
@@ -5442,8 +5455,6 @@ static int __init svm_init(void)
if (r)
return r;
- cpu_emergency_register_virt_callback(svm_emergency_disable);
-
/*
* Common KVM initialization _must_ come last, after this, /dev/kvm is
* exposed to userspace!
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 76107c7d0595..43fa6a16eb19 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -25,7 +25,21 @@
#include "cpuid.h"
#include "kvm_cache_regs.h"
-#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
+/*
+ * Helpers to convert to/from physical addresses for pages whose address is
+ * consumed directly by hardware. Even though it's a physical address, SVM
+ * often restricts the address to the natural width, hence 'unsigned long'
+ * instead of 'hpa_t'.
+ */
+static inline unsigned long __sme_page_pa(struct page *page)
+{
+ return __sme_set(page_to_pfn(page) << PAGE_SHIFT);
+}
+
+static inline struct page *__sme_pa_to_page(unsigned long pa)
+{
+ return pfn_to_page(__sme_clr(pa) >> PAGE_SHIFT);
+}
#define IOPM_SIZE PAGE_SIZE * 3
#define MSRPM_SIZE PAGE_SIZE * 2
@@ -321,7 +335,7 @@ struct svm_cpu_data {
u32 next_asid;
u32 min_asid;
- struct page *save_area;
+ struct vmcb *save_area;
unsigned long save_area_pa;
struct vmcb *current_vmcb;
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index a0c8eb37d3e1..2ed80aea3bb1 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -209,10 +209,8 @@ SYM_FUNC_START(__svm_vcpu_run)
7: vmload %_ASM_AX
8:
-#ifdef CONFIG_MITIGATION_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
- FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
-#endif
+ FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
/* Clobbers RAX, RCX, RDX. */
RESTORE_HOST_SPEC_CTRL
@@ -348,10 +346,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
2: cli
-#ifdef CONFIG_MITIGATION_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
- FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
-#endif
+ FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
/* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
RESTORE_HOST_SPEC_CTRL
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index 41a4533f9989..cb6588238f46 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -54,9 +54,7 @@ struct nested_vmx_msrs {
};
struct vmcs_config {
- int size;
- u32 basic_cap;
- u32 revision_id;
+ u64 basic;
u32 pin_based_exec_ctrl;
u32 cpu_based_exec_ctrl;
u32 cpu_based_2nd_exec_ctrl;
@@ -76,7 +74,7 @@ extern struct vmx_capability vmx_capability __ro_after_init;
static inline bool cpu_has_vmx_basic_inout(void)
{
- return (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
+ return vmcs_config.basic & VMX_BASIC_INOUT;
}
static inline bool cpu_has_virtual_nmis(void)
@@ -225,7 +223,7 @@ static inline bool cpu_has_vmx_vmfunc(void)
static inline bool cpu_has_vmx_shadow_vmcs(void)
{
/* check if the cpu supports writing r/o exit information fields */
- if (!(vmcs_config.misc & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
+ if (!(vmcs_config.misc & VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
return false;
return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -367,7 +365,7 @@ static inline bool cpu_has_vmx_invvpid_global(void)
static inline bool cpu_has_vmx_intel_pt(void)
{
- return (vmcs_config.misc & MSR_IA32_VMX_MISC_INTEL_PT) &&
+ return (vmcs_config.misc & VMX_MISC_INTEL_PT) &&
(vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_PT_USE_GPA) &&
(vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_RTIT_CTL);
}
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index 0bf35ebe8a1b..7668e2fb8043 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -23,8 +23,10 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.hardware_unsetup = vmx_hardware_unsetup,
- .hardware_enable = vmx_hardware_enable,
- .hardware_disable = vmx_hardware_disable,
+ .enable_virtualization_cpu = vmx_enable_virtualization_cpu,
+ .disable_virtualization_cpu = vmx_disable_virtualization_cpu,
+ .emergency_disable_virtualization_cpu = vmx_emergency_disable_virtualization_cpu,
+
.has_emulated_msr = vmx_has_emulated_msr,
.vm_size = sizeof(struct kvm_vmx),
@@ -41,7 +43,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.vcpu_put = vmx_vcpu_put,
.update_exception_bitmap = vmx_update_exception_bitmap,
- .get_msr_feature = vmx_get_msr_feature,
+ .get_feature_msr = vmx_get_feature_msr,
.get_msr = vmx_get_msr,
.set_msr = vmx_set_msr,
.get_segment_base = vmx_get_segment_base,
@@ -89,6 +91,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.enable_nmi_window = vmx_enable_nmi_window,
.enable_irq_window = vmx_enable_irq_window,
.update_cr8_intercept = vmx_update_cr8_intercept,
+
+ .x2apic_icr_is_split = false,
.set_virtual_apic_mode = vmx_set_virtual_apic_mode,
.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
.refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 2392a7ef254d..a8e7bc04d9bf 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -981,7 +981,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
__func__, i, e.index, e.reserved);
goto fail;
}
- if (kvm_set_msr(vcpu, e.index, e.value)) {
+ if (kvm_set_msr_with_filter(vcpu, e.index, e.value)) {
pr_debug_ratelimited(
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
__func__, i, e.index, e.value);
@@ -1017,7 +1017,7 @@ static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
}
}
- if (kvm_get_msr(vcpu, msr_index, data)) {
+ if (kvm_get_msr_with_filter(vcpu, msr_index, data)) {
pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
msr_index);
return false;
@@ -1112,9 +1112,9 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
/*
* Emulated VMEntry does not fail here. Instead a less
* accurate value will be returned by
- * nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
- * instead of reading the value from the vmcs02 VMExit
- * MSR-store area.
+ * nested_vmx_get_vmexit_msr_value() by reading KVM's
+ * internal MSR state instead of reading the value from
+ * the vmcs02 VMExit MSR-store area.
*/
pr_warn_ratelimited(
"Not enough msr entries in msr_autostore. Can't add msr %x\n",
@@ -1251,21 +1251,32 @@ static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
{
- const u64 feature_and_reserved =
- /* feature (except bit 48; see below) */
- BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
- /* reserved */
- BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
+ const u64 feature_bits = VMX_BASIC_DUAL_MONITOR_TREATMENT |
+ VMX_BASIC_INOUT |
+ VMX_BASIC_TRUE_CTLS;
+
+ const u64 reserved_bits = GENMASK_ULL(63, 56) |
+ GENMASK_ULL(47, 45) |
+ BIT_ULL(31);
+
u64 vmx_basic = vmcs_config.nested.basic;
- if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
+ BUILD_BUG_ON(feature_bits & reserved_bits);
+
+ /*
+ * Except for 32BIT_PHYS_ADDR_ONLY, which is an anti-feature bit (has
+ * inverted polarity), the incoming value must not set feature bits or
+ * reserved bits that aren't allowed/supported by KVM. Fields, i.e.
+ * multi-bit values, are explicitly checked below.
+ */
+ if (!is_bitwise_subset(vmx_basic, data, feature_bits | reserved_bits))
return -EINVAL;
/*
* KVM does not emulate a version of VMX that constrains physical
* addresses of VMX structures (e.g. VMCS) to 32-bits.
*/
- if (data & BIT_ULL(48))
+ if (data & VMX_BASIC_32BIT_PHYS_ADDR_ONLY)
return -EINVAL;
if (vmx_basic_vmcs_revision_id(vmx_basic) !=
@@ -1334,16 +1345,29 @@ vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
{
- const u64 feature_and_reserved_bits =
- /* feature */
- BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
- BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
- /* reserved */
- GENMASK_ULL(13, 9) | BIT_ULL(31);
+ const u64 feature_bits = VMX_MISC_SAVE_EFER_LMA |
+ VMX_MISC_ACTIVITY_HLT |
+ VMX_MISC_ACTIVITY_SHUTDOWN |
+ VMX_MISC_ACTIVITY_WAIT_SIPI |
+ VMX_MISC_INTEL_PT |
+ VMX_MISC_RDMSR_IN_SMM |
+ VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
+ VMX_MISC_VMXOFF_BLOCK_SMI |
+ VMX_MISC_ZERO_LEN_INS;
+
+ const u64 reserved_bits = BIT_ULL(31) | GENMASK_ULL(13, 9);
+
u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low,
vmcs_config.nested.misc_high);
- if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
+ BUILD_BUG_ON(feature_bits & reserved_bits);
+
+ /*
+ * The incoming value must not set feature bits or reserved bits that
+ * aren't allowed/supported by KVM. Fields, i.e. multi-bit values, are
+ * explicitly checked below.
+ */
+ if (!is_bitwise_subset(vmx_misc, data, feature_bits | reserved_bits))
return -EINVAL;
if ((vmx->nested.msrs.pinbased_ctls_high &
@@ -2317,10 +2341,12 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
/* Posted interrupts setting is only taken from vmcs12. */
vmx->nested.pi_pending = false;
- if (nested_cpu_has_posted_intr(vmcs12))
+ if (nested_cpu_has_posted_intr(vmcs12)) {
vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
- else
+ } else {
+ vmx->nested.posted_intr_nv = -1;
exec_control &= ~PIN_BASED_POSTED_INTR;
+ }
pin_controls_set(vmx, exec_control);
/*
@@ -2470,6 +2496,7 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
+
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
@@ -2507,7 +2534,7 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
- vmx->segment_cache.bitmask = 0;
+ vmx_segment_cache_clear(vmx);
}
if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
@@ -4284,11 +4311,52 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
}
if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) {
+ int irq;
+
if (block_nested_events)
return -EBUSY;
if (!nested_exit_on_intr(vcpu))
goto no_vmexit;
- nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
+
+ if (!nested_exit_intr_ack_set(vcpu)) {
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
+ return 0;
+ }
+
+ irq = kvm_cpu_get_extint(vcpu);
+ if (irq != -1) {
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT,
+ INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0);
+ return 0;
+ }
+
+ irq = kvm_apic_has_interrupt(vcpu);
+ if (WARN_ON_ONCE(irq < 0))
+ goto no_vmexit;
+
+ /*
+ * If the IRQ is L2's PI notification vector, process posted
+ * interrupts for L2 instead of injecting VM-Exit, as the
+ * detection/morphing architecturally occurs when the IRQ is
+ * delivered to the CPU. Note, only interrupts that are routed
+ * through the local APIC trigger posted interrupt processing,
+ * and enabling posted interrupts requires ACK-on-exit.
+ */
+ if (irq == vmx->nested.posted_intr_nv) {
+ vmx->nested.pi_pending = true;
+ kvm_apic_clear_irr(vcpu, irq);
+ goto no_vmexit;
+ }
+
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT,
+ INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0);
+
+ /*
+ * ACK the interrupt _after_ emulating VM-Exit, as the IRQ must
+ * be marked as in-service in vmcs01.GUEST_INTERRUPT_STATUS.SVI
+ * if APICv is active.
+ */
+ kvm_apic_ack_interrupt(vcpu, irq);
return 0;
}
@@ -4806,7 +4874,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
goto vmabort;
}
- if (kvm_set_msr(vcpu, h.index, h.value)) {
+ if (kvm_set_msr_with_filter(vcpu, h.index, h.value)) {
pr_debug_ratelimited(
"%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
__func__, j, h.index, h.value);
@@ -4969,14 +5037,6 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
if (likely(!vmx->fail)) {
- if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
- nested_exit_intr_ack_set(vcpu)) {
- int irq = kvm_cpu_get_interrupt(vcpu);
- WARN_ON(irq < 0);
- vmcs12->vm_exit_intr_info = irq |
- INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
- }
-
if (vm_exit_reason != -1)
trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
vmcs12->exit_qualification,
@@ -7051,7 +7111,7 @@ static void nested_vmx_setup_misc_data(struct vmcs_config *vmcs_conf,
{
msrs->misc_low = (u32)vmcs_conf->misc & VMX_MISC_SAVE_EFER_LMA;
msrs->misc_low |=
- MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
+ VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
VMX_MISC_ACTIVITY_HLT |
VMX_MISC_ACTIVITY_WAIT_SIPI;
@@ -7066,12 +7126,10 @@ static void nested_vmx_setup_basic(struct nested_vmx_msrs *msrs)
* guest, and the VMCS structure we give it - not about the
* VMX support of the underlying hardware.
*/
- msrs->basic =
- VMCS12_REVISION |
- VMX_BASIC_TRUE_CTLS |
- ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
- (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
+ msrs->basic = vmx_basic_encode_vmcs_info(VMCS12_REVISION, VMCS12_SIZE,
+ X86_MEMTYPE_WB);
+ msrs->basic |= VMX_BASIC_TRUE_CTLS;
if (cpu_has_vmx_basic_inout())
msrs->basic |= VMX_BASIC_INOUT;
}
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index cce4e2aa30fb..2c296b6abb8c 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -39,11 +39,17 @@ bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
{
+ lockdep_assert_once(lockdep_is_held(&vcpu->mutex) ||
+ !refcount_read(&vcpu->kvm->users_count));
+
return to_vmx(vcpu)->nested.cached_vmcs12;
}
static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
{
+ lockdep_assert_once(lockdep_is_held(&vcpu->mutex) ||
+ !refcount_read(&vcpu->kvm->users_count));
+
return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
}
@@ -109,7 +115,7 @@ static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
{
return to_vmx(vcpu)->nested.msrs.misc_low &
- MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
+ VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
}
static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
index 6fef01e0536e..a3c3d2a51f47 100644
--- a/arch/x86/kvm/vmx/sgx.c
+++ b/arch/x86/kvm/vmx/sgx.c
@@ -274,7 +274,7 @@ static int handle_encls_ecreate(struct kvm_vcpu *vcpu)
* simultaneously set SGX_ATTR_PROVISIONKEY to bypass the check to
* enforce restriction of access to the PROVISIONKEY.
*/
- contents = (struct sgx_secs *)__get_free_page(GFP_KERNEL_ACCOUNT);
+ contents = (struct sgx_secs *)__get_free_page(GFP_KERNEL);
if (!contents)
return -ENOMEM;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 733a0c45d1a6..1a4438358c5e 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -525,10 +525,6 @@ static const struct kvm_vmx_segment_field {
VMX_SEGMENT_FIELD(LDTR),
};
-static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
-{
- vmx->segment_cache.bitmask = 0;
-}
static unsigned long host_idt_base;
@@ -755,7 +751,7 @@ fault:
return -EIO;
}
-static void vmx_emergency_disable(void)
+void vmx_emergency_disable_virtualization_cpu(void)
{
int cpu = raw_smp_processor_id();
struct loaded_vmcs *v;
@@ -1998,15 +1994,15 @@ static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx,
return !(msr->data & ~valid_bits);
}
-int vmx_get_msr_feature(struct kvm_msr_entry *msr)
+int vmx_get_feature_msr(u32 msr, u64 *data)
{
- switch (msr->index) {
+ switch (msr) {
case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
if (!nested)
return 1;
- return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
+ return vmx_get_vmx_msr(&vmcs_config.nested, msr, data);
default:
- return KVM_MSR_RET_INVALID;
+ return KVM_MSR_RET_UNSUPPORTED;
}
}
@@ -2605,13 +2601,13 @@ static u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr)
static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
struct vmx_capability *vmx_cap)
{
- u32 vmx_msr_low, vmx_msr_high;
u32 _pin_based_exec_control = 0;
u32 _cpu_based_exec_control = 0;
u32 _cpu_based_2nd_exec_control = 0;
u64 _cpu_based_3rd_exec_control = 0;
u32 _vmexit_control = 0;
u32 _vmentry_control = 0;
+ u64 basic_msr;
u64 misc_msr;
int i;
@@ -2734,29 +2730,29 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
_vmexit_control &= ~x_ctrl;
}
- rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
+ rdmsrl(MSR_IA32_VMX_BASIC, basic_msr);
/* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
- if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
+ if (vmx_basic_vmcs_size(basic_msr) > PAGE_SIZE)
return -EIO;
#ifdef CONFIG_X86_64
- /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
- if (vmx_msr_high & (1u<<16))
+ /*
+ * KVM expects to be able to shove all legal physical addresses into
+ * VMCS fields for 64-bit kernels, and per the SDM, "This bit is always
+ * 0 for processors that support Intel 64 architecture".
+ */
+ if (basic_msr & VMX_BASIC_32BIT_PHYS_ADDR_ONLY)
return -EIO;
#endif
/* Require Write-Back (WB) memory type for VMCS accesses. */
- if (((vmx_msr_high >> 18) & 15) != 6)
+ if (vmx_basic_vmcs_mem_type(basic_msr) != X86_MEMTYPE_WB)
return -EIO;
rdmsrl(MSR_IA32_VMX_MISC, misc_msr);
- vmcs_conf->size = vmx_msr_high & 0x1fff;
- vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
-
- vmcs_conf->revision_id = vmx_msr_low;
-
+ vmcs_conf->basic = basic_msr;
vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
@@ -2844,7 +2840,7 @@ fault:
return -EFAULT;
}
-int vmx_hardware_enable(void)
+int vmx_enable_virtualization_cpu(void)
{
int cpu = raw_smp_processor_id();
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
@@ -2881,7 +2877,7 @@ static void vmclear_local_loaded_vmcss(void)
__loaded_vmcs_clear(v);
}
-void vmx_hardware_disable(void)
+void vmx_disable_virtualization_cpu(void)
{
vmclear_local_loaded_vmcss();
@@ -2903,13 +2899,13 @@ struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
if (!pages)
return NULL;
vmcs = page_address(pages);
- memset(vmcs, 0, vmcs_config.size);
+ memset(vmcs, 0, vmx_basic_vmcs_size(vmcs_config.basic));
/* KVM supports Enlightened VMCS v1 only */
if (kvm_is_using_evmcs())
vmcs->hdr.revision_id = KVM_EVMCS_VERSION;
else
- vmcs->hdr.revision_id = vmcs_config.revision_id;
+ vmcs->hdr.revision_id = vmx_basic_vmcs_revision_id(vmcs_config.basic);
if (shadow)
vmcs->hdr.shadow_vmcs = 1;
@@ -3002,7 +2998,7 @@ static __init int alloc_kvm_area(void)
* physical CPU.
*/
if (kvm_is_using_evmcs())
- vmcs->hdr.revision_id = vmcs_config.revision_id;
+ vmcs->hdr.revision_id = vmx_basic_vmcs_revision_id(vmcs_config.basic);
per_cpu(vmxarea, cpu) = vmcs;
}
@@ -4219,6 +4215,13 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ /*
+ * DO NOT query the vCPU's vmcs12, as vmcs12 is dynamically allocated
+ * and freed, and must not be accessed outside of vcpu->mutex. The
+ * vCPU's cached PI NV is valid if and only if posted interrupts
+ * enabled in its vmcs12, i.e. checking the vector also checks that
+ * L1 has enabled posted interrupts for L2.
+ */
if (is_guest_mode(vcpu) &&
vector == vmx->nested.posted_intr_nv) {
/*
@@ -5804,8 +5807,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK)
? PFERR_PRESENT_MASK : 0;
- error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ?
- PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
+ if (error_code & EPT_VIOLATION_GVA_IS_VALID)
+ error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ?
+ PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
/*
* Check that the GPA doesn't exceed physical memory limits, as that is
@@ -7265,6 +7269,8 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu,
return handle_fastpath_set_msr_irqoff(vcpu);
case EXIT_REASON_PREEMPTION_TIMER:
return handle_fastpath_preemption_timer(vcpu, force_immediate_exit);
+ case EXIT_REASON_HLT:
+ return handle_fastpath_hlt(vcpu);
default:
return EXIT_FASTPATH_NONE;
}
@@ -7965,6 +7971,7 @@ static __init void vmx_set_cpu_caps(void)
kvm_cpu_cap_clear(X86_FEATURE_SGX_LC);
kvm_cpu_cap_clear(X86_FEATURE_SGX1);
kvm_cpu_cap_clear(X86_FEATURE_SGX2);
+ kvm_cpu_cap_clear(X86_FEATURE_SGX_EDECCSSA);
}
if (vmx_umip_emulated())
@@ -8515,7 +8522,7 @@ __init int vmx_hardware_setup(void)
u64 use_timer_freq = 5000ULL * 1000 * 1000;
cpu_preemption_timer_multi =
- vmcs_config.misc & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
+ vmx_misc_preemption_timer_rate(vmcs_config.misc);
if (tsc_khz)
use_timer_freq = (u64)tsc_khz * 1000;
@@ -8582,8 +8589,6 @@ static void __vmx_exit(void)
{
allow_smaller_maxphyaddr = false;
- cpu_emergency_unregister_virt_callback(vmx_emergency_disable);
-
vmx_cleanup_l1d_flush();
}
@@ -8630,8 +8635,6 @@ static int __init vmx_init(void)
pi_init_cpu(cpu);
}
- cpu_emergency_register_virt_callback(vmx_emergency_disable);
-
vmx_check_vmcs12_offsets();
/*
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 42498fa63abb..2325f773a20b 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -17,10 +17,6 @@
#include "run_flags.h"
#include "../mmu.h"
-#define MSR_TYPE_R 1
-#define MSR_TYPE_W 2
-#define MSR_TYPE_RW 3
-
#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
#ifdef CONFIG_X86_64
@@ -756,4 +752,9 @@ static inline bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu)
return lapic_in_kernel(vcpu) && enable_ipiv;
}
+static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
+{
+ vmx->segment_cache.bitmask = 0;
+}
+
#endif /* __KVM_X86_VMX_H */
diff --git a/arch/x86/kvm/vmx/vmx_onhyperv.h b/arch/x86/kvm/vmx/vmx_onhyperv.h
index eb48153bfd73..bba24ed99ee6 100644
--- a/arch/x86/kvm/vmx/vmx_onhyperv.h
+++ b/arch/x86/kvm/vmx/vmx_onhyperv.h
@@ -104,6 +104,14 @@ static inline void evmcs_load(u64 phys_addr)
struct hv_vp_assist_page *vp_ap =
hv_get_vp_assist_page(smp_processor_id());
+ /*
+ * When enabling eVMCS, KVM verifies that every CPU has a valid hv_vp_assist_page()
+ * and aborts enabling the feature otherwise. CPU onlining path is also checked in
+ * vmx_hardware_enable().
+ */
+ if (KVM_BUG_ON(!vp_ap, kvm_get_running_vcpu()->kvm))
+ return;
+
if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
vp_ap->nested_control.features.directhypercall = 1;
vp_ap->current_nested_vmcs = phys_addr;
diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h
index 8060e5fc6dbd..93e020dc88f6 100644
--- a/arch/x86/kvm/vmx/vmx_ops.h
+++ b/arch/x86/kvm/vmx/vmx_ops.h
@@ -47,7 +47,7 @@ static __always_inline void vmcs_check16(unsigned long field)
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
"16-bit accessor invalid for 64-bit high field");
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
- "16-bit accessor invalid for 32-bit high field");
+ "16-bit accessor invalid for 32-bit field");
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
"16-bit accessor invalid for natural width field");
}
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index ce3221cd1d01..a55981c5216e 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -13,8 +13,9 @@ extern struct kvm_x86_init_ops vt_init_ops __initdata;
void vmx_hardware_unsetup(void);
int vmx_check_processor_compat(void);
-int vmx_hardware_enable(void);
-void vmx_hardware_disable(void);
+int vmx_enable_virtualization_cpu(void);
+void vmx_disable_virtualization_cpu(void);
+void vmx_emergency_disable_virtualization_cpu(void);
int vmx_vm_init(struct kvm *kvm);
void vmx_vm_destroy(struct kvm *kvm);
int vmx_vcpu_precreate(struct kvm *kvm);
@@ -56,7 +57,7 @@ bool vmx_has_emulated_msr(struct kvm *kvm, u32 index);
void vmx_msr_filter_changed(struct kvm_vcpu *vcpu);
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
-int vmx_get_msr_feature(struct kvm_msr_entry *msr);
+int vmx_get_feature_msr(u32 msr, u64 *data);
int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg);
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c983c8e434b8..83fe0a78146f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -305,24 +305,237 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
static struct kmem_cache *x86_emulator_cache;
/*
- * When called, it means the previous get/set msr reached an invalid msr.
- * Return true if we want to ignore/silent this failed msr access.
+ * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track
+ * the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS,
+ * KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. msrs_to_save holds MSRs that
+ * require host support, i.e. should be probed via RDMSR. emulated_msrs holds
+ * MSRs that KVM emulates without strictly requiring host support.
+ * msr_based_features holds MSRs that enumerate features, i.e. are effectively
+ * CPUID leafs. Note, msr_based_features isn't mutually exclusive with
+ * msrs_to_save and emulated_msrs.
*/
-static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write)
+
+static const u32 msrs_to_save_base[] = {
+ MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
+ MSR_STAR,
+#ifdef CONFIG_X86_64
+ MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
+#endif
+ MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
+ MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
+ MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL,
+ MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
+ MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
+ MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
+ MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
+ MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
+ MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
+ MSR_IA32_UMWAIT_CONTROL,
+
+ MSR_IA32_XFD, MSR_IA32_XFD_ERR,
+};
+
+static const u32 msrs_to_save_pmu[] = {
+ MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
+ MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
+ MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
+ MSR_CORE_PERF_GLOBAL_CTRL,
+ MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
+
+ /* This part of MSRs should match KVM_MAX_NR_INTEL_GP_COUNTERS. */
+ MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
+ MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
+ MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
+ MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7,
+ MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
+ MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
+ MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
+ MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
+
+ MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
+ MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
+
+ /* This part of MSRs should match KVM_MAX_NR_AMD_GP_COUNTERS. */
+ MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
+ MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
+ MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
+ MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
+
+ MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
+ MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
+ MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
+};
+
+static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
+ ARRAY_SIZE(msrs_to_save_pmu)];
+static unsigned num_msrs_to_save;
+
+static const u32 emulated_msrs_all[] = {
+ MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
+ MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
+
+#ifdef CONFIG_KVM_HYPERV
+ HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
+ HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
+ HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
+ HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
+ HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
+ HV_X64_MSR_RESET,
+ HV_X64_MSR_VP_INDEX,
+ HV_X64_MSR_VP_RUNTIME,
+ HV_X64_MSR_SCONTROL,
+ HV_X64_MSR_STIMER0_CONFIG,
+ HV_X64_MSR_VP_ASSIST_PAGE,
+ HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
+ HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL,
+ HV_X64_MSR_SYNDBG_OPTIONS,
+ HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS,
+ HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER,
+ HV_X64_MSR_SYNDBG_PENDING_BUFFER,
+#endif
+
+ MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
+ MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
+
+ MSR_IA32_TSC_ADJUST,
+ MSR_IA32_TSC_DEADLINE,
+ MSR_IA32_ARCH_CAPABILITIES,
+ MSR_IA32_PERF_CAPABILITIES,
+ MSR_IA32_MISC_ENABLE,
+ MSR_IA32_MCG_STATUS,
+ MSR_IA32_MCG_CTL,
+ MSR_IA32_MCG_EXT_CTL,
+ MSR_IA32_SMBASE,
+ MSR_SMI_COUNT,
+ MSR_PLATFORM_INFO,
+ MSR_MISC_FEATURES_ENABLES,
+ MSR_AMD64_VIRT_SPEC_CTRL,
+ MSR_AMD64_TSC_RATIO,
+ MSR_IA32_POWER_CTL,
+ MSR_IA32_UCODE_REV,
+
+ /*
+ * KVM always supports the "true" VMX control MSRs, even if the host
+ * does not. The VMX MSRs as a whole are considered "emulated" as KVM
+ * doesn't strictly require them to exist in the host (ignoring that
+ * KVM would refuse to load in the first place if the core set of MSRs
+ * aren't supported).
+ */
+ MSR_IA32_VMX_BASIC,
+ MSR_IA32_VMX_TRUE_PINBASED_CTLS,
+ MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
+ MSR_IA32_VMX_TRUE_EXIT_CTLS,
+ MSR_IA32_VMX_TRUE_ENTRY_CTLS,
+ MSR_IA32_VMX_MISC,
+ MSR_IA32_VMX_CR0_FIXED0,
+ MSR_IA32_VMX_CR4_FIXED0,
+ MSR_IA32_VMX_VMCS_ENUM,
+ MSR_IA32_VMX_PROCBASED_CTLS2,
+ MSR_IA32_VMX_EPT_VPID_CAP,
+ MSR_IA32_VMX_VMFUNC,
+
+ MSR_K7_HWCR,
+ MSR_KVM_POLL_CONTROL,
+};
+
+static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
+static unsigned num_emulated_msrs;
+
+/*
+ * List of MSRs that control the existence of MSR-based features, i.e. MSRs
+ * that are effectively CPUID leafs. VMX MSRs are also included in the set of
+ * feature MSRs, but are handled separately to allow expedited lookups.
+ */
+static const u32 msr_based_features_all_except_vmx[] = {
+ MSR_AMD64_DE_CFG,
+ MSR_IA32_UCODE_REV,
+ MSR_IA32_ARCH_CAPABILITIES,
+ MSR_IA32_PERF_CAPABILITIES,
+};
+
+static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) +
+ (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)];
+static unsigned int num_msr_based_features;
+
+/*
+ * All feature MSRs except uCode revID, which tracks the currently loaded uCode
+ * patch, are immutable once the vCPU model is defined.
+ */
+static bool kvm_is_immutable_feature_msr(u32 msr)
{
- const char *op = write ? "wrmsr" : "rdmsr";
+ int i;
- if (ignore_msrs) {
- if (report_ignored_msrs)
- kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n",
- op, msr, data);
- /* Mask the error */
+ if (msr >= KVM_FIRST_EMULATED_VMX_MSR && msr <= KVM_LAST_EMULATED_VMX_MSR)
return true;
- } else {
+
+ for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) {
+ if (msr == msr_based_features_all_except_vmx[i])
+ return msr != MSR_IA32_UCODE_REV;
+ }
+
+ return false;
+}
+
+static bool kvm_is_advertised_msr(u32 msr_index)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_msrs_to_save; i++) {
+ if (msrs_to_save[i] == msr_index)
+ return true;
+ }
+
+ for (i = 0; i < num_emulated_msrs; i++) {
+ if (emulated_msrs[i] == msr_index)
+ return true;
+ }
+
+ return false;
+}
+
+typedef int (*msr_access_t)(struct kvm_vcpu *vcpu, u32 index, u64 *data,
+ bool host_initiated);
+
+static __always_inline int kvm_do_msr_access(struct kvm_vcpu *vcpu, u32 msr,
+ u64 *data, bool host_initiated,
+ enum kvm_msr_access rw,
+ msr_access_t msr_access_fn)
+{
+ const char *op = rw == MSR_TYPE_W ? "wrmsr" : "rdmsr";
+ int ret;
+
+ BUILD_BUG_ON(rw != MSR_TYPE_R && rw != MSR_TYPE_W);
+
+ /*
+ * Zero the data on read failures to avoid leaking stack data to the
+ * guest and/or userspace, e.g. if the failure is ignored below.
+ */
+ ret = msr_access_fn(vcpu, msr, data, host_initiated);
+ if (ret && rw == MSR_TYPE_R)
+ *data = 0;
+
+ if (ret != KVM_MSR_RET_UNSUPPORTED)
+ return ret;
+
+ /*
+ * Userspace is allowed to read MSRs, and write '0' to MSRs, that KVM
+ * advertises to userspace, even if an MSR isn't fully supported.
+ * Simply check that @data is '0', which covers both the write '0' case
+ * and all reads (in which case @data is zeroed on failure; see above).
+ */
+ if (host_initiated && !*data && kvm_is_advertised_msr(msr))
+ return 0;
+
+ if (!ignore_msrs) {
kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
- op, msr, data);
- return false;
+ op, msr, *data);
+ return ret;
}
+
+ if (report_ignored_msrs)
+ kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", op, msr, *data);
+
+ return 0;
}
static struct kmem_cache *kvm_alloc_emulator_cache(void)
@@ -355,7 +568,7 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
/*
* Disabling irqs at this point since the following code could be
- * interrupted and executed through kvm_arch_hardware_disable()
+ * interrupted and executed through kvm_arch_disable_virtualization_cpu()
*/
local_irq_save(flags);
if (msrs->registered) {
@@ -413,8 +626,7 @@ EXPORT_SYMBOL_GPL(kvm_find_user_return_msr);
static void kvm_user_return_msr_cpu_online(void)
{
- unsigned int cpu = smp_processor_id();
- struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
+ struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
u64 value;
int i;
@@ -621,12 +833,6 @@ static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vecto
ex->payload = payload;
}
-/* Forcibly leave the nested mode in cases like a vCPU reset */
-static void kvm_leave_nested(struct kvm_vcpu *vcpu)
-{
- kvm_x86_ops.nested_ops->leave_nested(vcpu);
-}
-
static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
unsigned nr, bool has_error, u32 error_code,
bool has_payload, unsigned long payload, bool reinject)
@@ -1412,178 +1618,6 @@ int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc);
/*
- * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track
- * the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS,
- * KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. msrs_to_save holds MSRs that
- * require host support, i.e. should be probed via RDMSR. emulated_msrs holds
- * MSRs that KVM emulates without strictly requiring host support.
- * msr_based_features holds MSRs that enumerate features, i.e. are effectively
- * CPUID leafs. Note, msr_based_features isn't mutually exclusive with
- * msrs_to_save and emulated_msrs.
- */
-
-static const u32 msrs_to_save_base[] = {
- MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
- MSR_STAR,
-#ifdef CONFIG_X86_64
- MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
-#endif
- MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
- MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
- MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL,
- MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
- MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
- MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
- MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
- MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
- MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
- MSR_IA32_UMWAIT_CONTROL,
-
- MSR_IA32_XFD, MSR_IA32_XFD_ERR,
-};
-
-static const u32 msrs_to_save_pmu[] = {
- MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
- MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
- MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
- MSR_CORE_PERF_GLOBAL_CTRL,
- MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
-
- /* This part of MSRs should match KVM_MAX_NR_INTEL_GP_COUNTERS. */
- MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
- MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
- MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
- MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7,
- MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
- MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
- MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
- MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
-
- MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
- MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
-
- /* This part of MSRs should match KVM_MAX_NR_AMD_GP_COUNTERS. */
- MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
- MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
- MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
- MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
-
- MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
- MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
- MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
-};
-
-static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
- ARRAY_SIZE(msrs_to_save_pmu)];
-static unsigned num_msrs_to_save;
-
-static const u32 emulated_msrs_all[] = {
- MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
- MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
-
-#ifdef CONFIG_KVM_HYPERV
- HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
- HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
- HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
- HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
- HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
- HV_X64_MSR_RESET,
- HV_X64_MSR_VP_INDEX,
- HV_X64_MSR_VP_RUNTIME,
- HV_X64_MSR_SCONTROL,
- HV_X64_MSR_STIMER0_CONFIG,
- HV_X64_MSR_VP_ASSIST_PAGE,
- HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
- HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL,
- HV_X64_MSR_SYNDBG_OPTIONS,
- HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS,
- HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER,
- HV_X64_MSR_SYNDBG_PENDING_BUFFER,
-#endif
-
- MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
- MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
-
- MSR_IA32_TSC_ADJUST,
- MSR_IA32_TSC_DEADLINE,
- MSR_IA32_ARCH_CAPABILITIES,
- MSR_IA32_PERF_CAPABILITIES,
- MSR_IA32_MISC_ENABLE,
- MSR_IA32_MCG_STATUS,
- MSR_IA32_MCG_CTL,
- MSR_IA32_MCG_EXT_CTL,
- MSR_IA32_SMBASE,
- MSR_SMI_COUNT,
- MSR_PLATFORM_INFO,
- MSR_MISC_FEATURES_ENABLES,
- MSR_AMD64_VIRT_SPEC_CTRL,
- MSR_AMD64_TSC_RATIO,
- MSR_IA32_POWER_CTL,
- MSR_IA32_UCODE_REV,
-
- /*
- * KVM always supports the "true" VMX control MSRs, even if the host
- * does not. The VMX MSRs as a whole are considered "emulated" as KVM
- * doesn't strictly require them to exist in the host (ignoring that
- * KVM would refuse to load in the first place if the core set of MSRs
- * aren't supported).
- */
- MSR_IA32_VMX_BASIC,
- MSR_IA32_VMX_TRUE_PINBASED_CTLS,
- MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
- MSR_IA32_VMX_TRUE_EXIT_CTLS,
- MSR_IA32_VMX_TRUE_ENTRY_CTLS,
- MSR_IA32_VMX_MISC,
- MSR_IA32_VMX_CR0_FIXED0,
- MSR_IA32_VMX_CR4_FIXED0,
- MSR_IA32_VMX_VMCS_ENUM,
- MSR_IA32_VMX_PROCBASED_CTLS2,
- MSR_IA32_VMX_EPT_VPID_CAP,
- MSR_IA32_VMX_VMFUNC,
-
- MSR_K7_HWCR,
- MSR_KVM_POLL_CONTROL,
-};
-
-static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
-static unsigned num_emulated_msrs;
-
-/*
- * List of MSRs that control the existence of MSR-based features, i.e. MSRs
- * that are effectively CPUID leafs. VMX MSRs are also included in the set of
- * feature MSRs, but are handled separately to allow expedited lookups.
- */
-static const u32 msr_based_features_all_except_vmx[] = {
- MSR_AMD64_DE_CFG,
- MSR_IA32_UCODE_REV,
- MSR_IA32_ARCH_CAPABILITIES,
- MSR_IA32_PERF_CAPABILITIES,
-};
-
-static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) +
- (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)];
-static unsigned int num_msr_based_features;
-
-/*
- * All feature MSRs except uCode revID, which tracks the currently loaded uCode
- * patch, are immutable once the vCPU model is defined.
- */
-static bool kvm_is_immutable_feature_msr(u32 msr)
-{
- int i;
-
- if (msr >= KVM_FIRST_EMULATED_VMX_MSR && msr <= KVM_LAST_EMULATED_VMX_MSR)
- return true;
-
- for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) {
- if (msr == msr_based_features_all_except_vmx[i])
- return msr != MSR_IA32_UCODE_REV;
- }
-
- return false;
-}
-
-/*
* Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM
* does not yet virtualize. These include:
* 10 - MISC_PACKAGE_CTRLS
@@ -1660,40 +1694,31 @@ static u64 kvm_get_arch_capabilities(void)
return data;
}
-static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
+static int kvm_get_feature_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
+ bool host_initiated)
{
- switch (msr->index) {
+ WARN_ON_ONCE(!host_initiated);
+
+ switch (index) {
case MSR_IA32_ARCH_CAPABILITIES:
- msr->data = kvm_get_arch_capabilities();
+ *data = kvm_get_arch_capabilities();
break;
case MSR_IA32_PERF_CAPABILITIES:
- msr->data = kvm_caps.supported_perf_cap;
+ *data = kvm_caps.supported_perf_cap;
break;
case MSR_IA32_UCODE_REV:
- rdmsrl_safe(msr->index, &msr->data);
+ rdmsrl_safe(index, data);
break;
default:
- return kvm_x86_call(get_msr_feature)(msr);
+ return kvm_x86_call(get_feature_msr)(index, data);
}
return 0;
}
-static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
+static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
- struct kvm_msr_entry msr;
- int r;
-
- /* Unconditionally clear the output for simplicity */
- msr.data = 0;
- msr.index = index;
- r = kvm_get_msr_feature(&msr);
-
- if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false))
- r = 0;
-
- *data = msr.data;
-
- return r;
+ return kvm_do_msr_access(vcpu, index, data, true, MSR_TYPE_R,
+ kvm_get_feature_msr);
}
static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
@@ -1880,16 +1905,17 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
return kvm_x86_call(set_msr)(vcpu, &msr);
}
+static int _kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
+ bool host_initiated)
+{
+ return __kvm_set_msr(vcpu, index, *data, host_initiated);
+}
+
static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
u32 index, u64 data, bool host_initiated)
{
- int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
-
- if (ret == KVM_MSR_RET_INVALID)
- if (kvm_msr_ignored_check(index, data, true))
- ret = 0;
-
- return ret;
+ return kvm_do_msr_access(vcpu, index, &data, host_initiated, MSR_TYPE_W,
+ _kvm_set_msr);
}
/*
@@ -1928,31 +1954,25 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
u32 index, u64 *data, bool host_initiated)
{
- int ret = __kvm_get_msr(vcpu, index, data, host_initiated);
-
- if (ret == KVM_MSR_RET_INVALID) {
- /* Unconditionally clear *data for simplicity */
- *data = 0;
- if (kvm_msr_ignored_check(index, 0, false))
- ret = 0;
- }
-
- return ret;
+ return kvm_do_msr_access(vcpu, index, data, host_initiated, MSR_TYPE_R,
+ __kvm_get_msr);
}
-static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
{
if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
return KVM_MSR_RET_FILTERED;
return kvm_get_msr_ignored_check(vcpu, index, data, false);
}
+EXPORT_SYMBOL_GPL(kvm_get_msr_with_filter);
-static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
+int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
{
if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
return KVM_MSR_RET_FILTERED;
return kvm_set_msr_ignored_check(vcpu, index, data, false);
}
+EXPORT_SYMBOL_GPL(kvm_set_msr_with_filter);
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
{
@@ -1999,7 +2019,7 @@ static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
static u64 kvm_msr_reason(int r)
{
switch (r) {
- case KVM_MSR_RET_INVALID:
+ case KVM_MSR_RET_UNSUPPORTED:
return KVM_MSR_EXIT_REASON_UNKNOWN;
case KVM_MSR_RET_FILTERED:
return KVM_MSR_EXIT_REASON_FILTER;
@@ -2162,31 +2182,34 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
{
u32 msr = kvm_rcx_read(vcpu);
u64 data;
- fastpath_t ret = EXIT_FASTPATH_NONE;
+ fastpath_t ret;
+ bool handled;
kvm_vcpu_srcu_read_lock(vcpu);
switch (msr) {
case APIC_BASE_MSR + (APIC_ICR >> 4):
data = kvm_read_edx_eax(vcpu);
- if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) {
- kvm_skip_emulated_instruction(vcpu);
- ret = EXIT_FASTPATH_EXIT_HANDLED;
- }
+ handled = !handle_fastpath_set_x2apic_icr_irqoff(vcpu, data);
break;
case MSR_IA32_TSC_DEADLINE:
data = kvm_read_edx_eax(vcpu);
- if (!handle_fastpath_set_tscdeadline(vcpu, data)) {
- kvm_skip_emulated_instruction(vcpu);
- ret = EXIT_FASTPATH_REENTER_GUEST;
- }
+ handled = !handle_fastpath_set_tscdeadline(vcpu, data);
break;
default:
+ handled = false;
break;
}
- if (ret != EXIT_FASTPATH_NONE)
+ if (handled) {
+ if (!kvm_skip_emulated_instruction(vcpu))
+ ret = EXIT_FASTPATH_EXIT_USERSPACE;
+ else
+ ret = EXIT_FASTPATH_REENTER_GUEST;
trace_kvm_msr_write(msr, data);
+ } else {
+ ret = EXIT_FASTPATH_NONE;
+ }
kvm_vcpu_srcu_read_unlock(vcpu);
@@ -3746,18 +3769,6 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
}
-static bool kvm_is_msr_to_save(u32 msr_index)
-{
- unsigned int i;
-
- for (i = 0; i < num_msrs_to_save; i++) {
- if (msrs_to_save[i] == msr_index)
- return true;
- }
-
- return false;
-}
-
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
u32 msr = msr_info->index;
@@ -4139,15 +4150,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (kvm_pmu_is_valid_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info);
- /*
- * Userspace is allowed to write '0' to MSRs that KVM reports
- * as to-be-saved, even if an MSRs isn't fully supported.
- */
- if (msr_info->host_initiated && !data &&
- kvm_is_msr_to_save(msr))
- break;
-
- return KVM_MSR_RET_INVALID;
+ return KVM_MSR_RET_UNSUPPORTED;
}
return 0;
}
@@ -4498,17 +4501,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
return kvm_pmu_get_msr(vcpu, msr_info);
- /*
- * Userspace is allowed to read MSRs that KVM reports as
- * to-be-saved, even if an MSR isn't fully supported.
- */
- if (msr_info->host_initiated &&
- kvm_is_msr_to_save(msr_info->index)) {
- msr_info->data = 0;
- break;
- }
-
- return KVM_MSR_RET_INVALID;
+ return KVM_MSR_RET_UNSUPPORTED;
}
return 0;
}
@@ -4946,7 +4939,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
break;
}
case KVM_GET_MSRS:
- r = msr_io(NULL, argp, do_get_msr_feature, 1);
+ r = msr_io(NULL, argp, do_get_feature_msr, 1);
break;
#ifdef CONFIG_KVM_HYPERV
case KVM_GET_SUPPORTED_HV_CPUID:
@@ -7383,11 +7376,9 @@ out:
static void kvm_probe_feature_msr(u32 msr_index)
{
- struct kvm_msr_entry msr = {
- .index = msr_index,
- };
+ u64 data;
- if (kvm_get_msr_feature(&msr))
+ if (kvm_get_feature_msr(NULL, msr_index, &data, true))
return;
msr_based_features[num_msr_based_features++] = msr_index;
@@ -8865,60 +8856,13 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
return 1;
}
-static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
- int emulation_type)
+static bool kvm_unprotect_and_retry_on_failure(struct kvm_vcpu *vcpu,
+ gpa_t cr2_or_gpa,
+ int emulation_type)
{
- gpa_t gpa = cr2_or_gpa;
- kvm_pfn_t pfn;
-
if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
return false;
- if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
- WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
- return false;
-
- if (!vcpu->arch.mmu->root_role.direct) {
- /*
- * Write permission should be allowed since only
- * write access need to be emulated.
- */
- gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
-
- /*
- * If the mapping is invalid in guest, let cpu retry
- * it to generate fault.
- */
- if (gpa == INVALID_GPA)
- return true;
- }
-
- /*
- * Do not retry the unhandleable instruction if it faults on the
- * readonly host memory, otherwise it will goto a infinite loop:
- * retry instruction -> write #PF -> emulation fail -> retry
- * instruction -> ...
- */
- pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
-
- /*
- * If the instruction failed on the error pfn, it can not be fixed,
- * report the error to userspace.
- */
- if (is_error_noslot_pfn(pfn))
- return false;
-
- kvm_release_pfn_clean(pfn);
-
- /*
- * If emulation may have been triggered by a write to a shadowed page
- * table, unprotect the gfn (zap any relevant SPTEs) and re-enter the
- * guest to let the CPU re-execute the instruction in the hope that the
- * CPU can cleanly execute the instruction that KVM failed to emulate.
- */
- if (vcpu->kvm->arch.indirect_shadow_pages)
- kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
-
/*
* If the failed instruction faulted on an access to page tables that
* are used to translate any part of the instruction, KVM can't resolve
@@ -8929,54 +8873,24 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
* then zap the SPTE to unprotect the gfn, and then do it all over
* again. Report the error to userspace.
*/
- return !(emulation_type & EMULTYPE_WRITE_PF_TO_SP);
-}
-
-static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
- gpa_t cr2_or_gpa, int emulation_type)
-{
- struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
- unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa;
-
- last_retry_eip = vcpu->arch.last_retry_eip;
- last_retry_addr = vcpu->arch.last_retry_addr;
+ if (emulation_type & EMULTYPE_WRITE_PF_TO_SP)
+ return false;
/*
- * If the emulation is caused by #PF and it is non-page_table
- * writing instruction, it means the VM-EXIT is caused by shadow
- * page protected, we can zap the shadow page and retry this
- * instruction directly.
- *
- * Note: if the guest uses a non-page-table modifying instruction
- * on the PDE that points to the instruction, then we will unmap
- * the instruction and go to an infinite loop. So, we cache the
- * last retried eip and the last fault address, if we meet the eip
- * and the address again, we can break out of the potential infinite
- * loop.
+ * If emulation may have been triggered by a write to a shadowed page
+ * table, unprotect the gfn (zap any relevant SPTEs) and re-enter the
+ * guest to let the CPU re-execute the instruction in the hope that the
+ * CPU can cleanly execute the instruction that KVM failed to emulate.
*/
- vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
-
- if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
- return false;
-
- if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
- WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
- return false;
-
- if (x86_page_table_writing_insn(ctxt))
- return false;
-
- if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa)
- return false;
-
- vcpu->arch.last_retry_eip = ctxt->eip;
- vcpu->arch.last_retry_addr = cr2_or_gpa;
-
- if (!vcpu->arch.mmu->root_role.direct)
- gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
-
- kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
+ __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, true);
+ /*
+ * Retry even if _this_ vCPU didn't unprotect the gfn, as it's possible
+ * all SPTEs were already zapped by a different task. The alternative
+ * is to report the error to userspace and likely terminate the guest,
+ * and the last_retry_{eip,addr} checks will prevent retrying the page
+ * fault indefinitely, i.e. there's nothing to lose by retrying.
+ */
return true;
}
@@ -9176,6 +9090,11 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
bool writeback = true;
+ if ((emulation_type & EMULTYPE_ALLOW_RETRY_PF) &&
+ (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
+ WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))))
+ emulation_type &= ~EMULTYPE_ALLOW_RETRY_PF;
+
r = kvm_check_emulate_insn(vcpu, emulation_type, insn, insn_len);
if (r != X86EMUL_CONTINUE) {
if (r == X86EMUL_RETRY_INSTR || r == X86EMUL_PROPAGATE_FAULT)
@@ -9206,8 +9125,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
- if (reexecute_instruction(vcpu, cr2_or_gpa,
- emulation_type))
+ if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
+ emulation_type))
return 1;
if (ctxt->have_exception &&
@@ -9254,7 +9173,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
return 1;
}
- if (retry_instruction(ctxt, cr2_or_gpa, emulation_type))
+ /*
+ * If emulation was caused by a write-protection #PF on a non-page_table
+ * writing instruction, try to unprotect the gfn, i.e. zap shadow pages,
+ * and retry the instruction, as the vCPU is likely no longer using the
+ * gfn as a page table.
+ */
+ if ((emulation_type & EMULTYPE_ALLOW_RETRY_PF) &&
+ !x86_page_table_writing_insn(ctxt) &&
+ kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa))
return 1;
/* this is needed for vmware backdoor interface to work since it
@@ -9285,7 +9212,8 @@ restart:
return 1;
if (r == EMULATION_FAILED) {
- if (reexecute_instruction(vcpu, cr2_or_gpa, emulation_type))
+ if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
+ emulation_type))
return 1;
return handle_emulation_failure(vcpu, emulation_type);
@@ -9753,7 +9681,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
guard(mutex)(&vendor_module_lock);
- if (kvm_x86_ops.hardware_enable) {
+ if (kvm_x86_ops.enable_virtualization_cpu) {
pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name);
return -EEXIST;
}
@@ -9880,7 +9808,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
return 0;
out_unwind_ops:
- kvm_x86_ops.hardware_enable = NULL;
+ kvm_x86_ops.enable_virtualization_cpu = NULL;
kvm_x86_call(hardware_unsetup)();
out_mmu_exit:
kvm_mmu_vendor_module_exit();
@@ -9921,56 +9849,11 @@ void kvm_x86_vendor_exit(void)
WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
#endif
mutex_lock(&vendor_module_lock);
- kvm_x86_ops.hardware_enable = NULL;
+ kvm_x86_ops.enable_virtualization_cpu = NULL;
mutex_unlock(&vendor_module_lock);
}
EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
-static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
-{
- /*
- * The vCPU has halted, e.g. executed HLT. Update the run state if the
- * local APIC is in-kernel, the run loop will detect the non-runnable
- * state and halt the vCPU. Exit to userspace if the local APIC is
- * managed by userspace, in which case userspace is responsible for
- * handling wake events.
- */
- ++vcpu->stat.halt_exits;
- if (lapic_in_kernel(vcpu)) {
- vcpu->arch.mp_state = state;
- return 1;
- } else {
- vcpu->run->exit_reason = reason;
- return 0;
- }
-}
-
-int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
-{
- return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
-}
-EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip);
-
-int kvm_emulate_halt(struct kvm_vcpu *vcpu)
-{
- int ret = kvm_skip_emulated_instruction(vcpu);
- /*
- * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
- * KVM_EXIT_DEBUG here.
- */
- return kvm_emulate_halt_noskip(vcpu) && ret;
-}
-EXPORT_SYMBOL_GPL(kvm_emulate_halt);
-
-int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
-{
- int ret = kvm_skip_emulated_instruction(vcpu);
-
- return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD,
- KVM_EXIT_AP_RESET_HOLD) && ret;
-}
-EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
-
#ifdef CONFIG_X86_64
static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
unsigned long clock_type)
@@ -11207,6 +11090,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (vcpu->arch.apic_attention)
kvm_lapic_sync_from_vapic(vcpu);
+ if (unlikely(exit_fastpath == EXIT_FASTPATH_EXIT_USERSPACE))
+ return 0;
+
r = kvm_x86_call(handle_exit)(vcpu, exit_fastpath);
return r;
@@ -11220,6 +11106,67 @@ out:
return r;
}
+static bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+ !vcpu->arch.apf.halted);
+}
+
+static bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
+{
+ if (!list_empty_careful(&vcpu->async_pf.done))
+ return true;
+
+ if (kvm_apic_has_pending_init_or_sipi(vcpu) &&
+ kvm_apic_init_sipi_allowed(vcpu))
+ return true;
+
+ if (vcpu->arch.pv.pv_unhalted)
+ return true;
+
+ if (kvm_is_exception_pending(vcpu))
+ return true;
+
+ if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+ (vcpu->arch.nmi_pending &&
+ kvm_x86_call(nmi_allowed)(vcpu, false)))
+ return true;
+
+#ifdef CONFIG_KVM_SMM
+ if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
+ (vcpu->arch.smi_pending &&
+ kvm_x86_call(smi_allowed)(vcpu, false)))
+ return true;
+#endif
+
+ if (kvm_test_request(KVM_REQ_PMI, vcpu))
+ return true;
+
+ if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu))
+ return true;
+
+ if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu))
+ return true;
+
+ if (kvm_hv_has_stimer_pending(vcpu))
+ return true;
+
+ if (is_guest_mode(vcpu) &&
+ kvm_x86_ops.nested_ops->has_events &&
+ kvm_x86_ops.nested_ops->has_events(vcpu, false))
+ return true;
+
+ if (kvm_xen_has_pending_events(vcpu))
+ return true;
+
+ return false;
+}
+
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
+}
+
/* Called within kvm->srcu read side. */
static inline int vcpu_block(struct kvm_vcpu *vcpu)
{
@@ -11291,12 +11238,6 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
return 1;
}
-static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
-{
- return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
- !vcpu->arch.apf.halted);
-}
-
/* Called within kvm->srcu read side. */
static int vcpu_run(struct kvm_vcpu *vcpu)
{
@@ -11348,6 +11289,98 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
return r;
}
+static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
+{
+ /*
+ * The vCPU has halted, e.g. executed HLT. Update the run state if the
+ * local APIC is in-kernel, the run loop will detect the non-runnable
+ * state and halt the vCPU. Exit to userspace if the local APIC is
+ * managed by userspace, in which case userspace is responsible for
+ * handling wake events.
+ */
+ ++vcpu->stat.halt_exits;
+ if (lapic_in_kernel(vcpu)) {
+ if (kvm_vcpu_has_events(vcpu))
+ vcpu->arch.pv.pv_unhalted = false;
+ else
+ vcpu->arch.mp_state = state;
+ return 1;
+ } else {
+ vcpu->run->exit_reason = reason;
+ return 0;
+ }
+}
+
+int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
+{
+ return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip);
+
+int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+{
+ int ret = kvm_skip_emulated_instruction(vcpu);
+ /*
+ * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
+ * KVM_EXIT_DEBUG here.
+ */
+ return kvm_emulate_halt_noskip(vcpu) && ret;
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_halt);
+
+fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ kvm_vcpu_srcu_read_lock(vcpu);
+ ret = kvm_emulate_halt(vcpu);
+ kvm_vcpu_srcu_read_unlock(vcpu);
+
+ if (!ret)
+ return EXIT_FASTPATH_EXIT_USERSPACE;
+
+ if (kvm_vcpu_running(vcpu))
+ return EXIT_FASTPATH_REENTER_GUEST;
+
+ return EXIT_FASTPATH_EXIT_HANDLED;
+}
+EXPORT_SYMBOL_GPL(handle_fastpath_hlt);
+
+int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
+{
+ int ret = kvm_skip_emulated_instruction(vcpu);
+
+ return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD,
+ KVM_EXIT_AP_RESET_HOLD) && ret;
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
+
+bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_apicv_active(vcpu) &&
+ kvm_x86_call(dy_apicv_has_pending_interrupt)(vcpu);
+}
+
+bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.preempted_in_kernel;
+}
+
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+ if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
+ return true;
+
+ if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+#ifdef CONFIG_KVM_SMM
+ kvm_test_request(KVM_REQ_SMI, vcpu) ||
+#endif
+ kvm_test_request(KVM_REQ_EVENT, vcpu))
+ return true;
+
+ return kvm_arch_dy_has_pending_interrupt(vcpu);
+}
+
static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
{
return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
@@ -12264,8 +12297,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
- vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
-
kvm_async_pf_hash_reset(vcpu);
vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap;
@@ -12431,6 +12462,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
if (!init_event) {
vcpu->arch.smbase = 0x30000;
+ vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
+
vcpu->arch.msr_misc_features_enables = 0;
vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |
MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
@@ -12516,7 +12549,17 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector);
-int kvm_arch_hardware_enable(void)
+void kvm_arch_enable_virtualization(void)
+{
+ cpu_emergency_register_virt_callback(kvm_x86_ops.emergency_disable_virtualization_cpu);
+}
+
+void kvm_arch_disable_virtualization(void)
+{
+ cpu_emergency_unregister_virt_callback(kvm_x86_ops.emergency_disable_virtualization_cpu);
+}
+
+int kvm_arch_enable_virtualization_cpu(void)
{
struct kvm *kvm;
struct kvm_vcpu *vcpu;
@@ -12532,7 +12575,7 @@ int kvm_arch_hardware_enable(void)
if (ret)
return ret;
- ret = kvm_x86_call(hardware_enable)();
+ ret = kvm_x86_call(enable_virtualization_cpu)();
if (ret != 0)
return ret;
@@ -12612,9 +12655,9 @@ int kvm_arch_hardware_enable(void)
return 0;
}
-void kvm_arch_hardware_disable(void)
+void kvm_arch_disable_virtualization_cpu(void)
{
- kvm_x86_call(hardware_disable)();
+ kvm_x86_call(disable_virtualization_cpu)();
drop_user_return_notifiers();
}
@@ -13162,87 +13205,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
kvm_arch_free_memslot(kvm, old);
}
-static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
-{
- if (!list_empty_careful(&vcpu->async_pf.done))
- return true;
-
- if (kvm_apic_has_pending_init_or_sipi(vcpu) &&
- kvm_apic_init_sipi_allowed(vcpu))
- return true;
-
- if (vcpu->arch.pv.pv_unhalted)
- return true;
-
- if (kvm_is_exception_pending(vcpu))
- return true;
-
- if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
- (vcpu->arch.nmi_pending &&
- kvm_x86_call(nmi_allowed)(vcpu, false)))
- return true;
-
-#ifdef CONFIG_KVM_SMM
- if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
- (vcpu->arch.smi_pending &&
- kvm_x86_call(smi_allowed)(vcpu, false)))
- return true;
-#endif
-
- if (kvm_test_request(KVM_REQ_PMI, vcpu))
- return true;
-
- if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu))
- return true;
-
- if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu))
- return true;
-
- if (kvm_hv_has_stimer_pending(vcpu))
- return true;
-
- if (is_guest_mode(vcpu) &&
- kvm_x86_ops.nested_ops->has_events &&
- kvm_x86_ops.nested_ops->has_events(vcpu, false))
- return true;
-
- if (kvm_xen_has_pending_events(vcpu))
- return true;
-
- return false;
-}
-
-int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
-}
-
-bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_apicv_active(vcpu) &&
- kvm_x86_call(dy_apicv_has_pending_interrupt)(vcpu);
-}
-
-bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.preempted_in_kernel;
-}
-
-bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
-{
- if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
- return true;
-
- if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
-#ifdef CONFIG_KVM_SMM
- kvm_test_request(KVM_REQ_SMI, vcpu) ||
-#endif
- kvm_test_request(KVM_REQ_EVENT, vcpu))
- return true;
-
- return kvm_arch_dy_has_pending_interrupt(vcpu);
-}
-
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.guest_state_protected)
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 50596f6f8320..a84c48ef5278 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -103,11 +103,18 @@ static inline unsigned int __shrink_ple_window(unsigned int val,
return max(val, min);
}
-#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
+#define MSR_IA32_CR_PAT_DEFAULT \
+ PAT_VALUE(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC)
void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
int kvm_check_nested_events(struct kvm_vcpu *vcpu);
+/* Forcibly leave the nested mode in cases like a vCPU reset */
+static inline void kvm_leave_nested(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops.nested_ops->leave_nested(vcpu);
+}
+
static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)
{
return vcpu->arch.last_vmentry_cpu != -1;
@@ -334,6 +341,7 @@ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len);
fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
+fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu);
extern struct kvm_caps kvm_caps;
extern struct kvm_host_values kvm_host;
@@ -504,13 +512,26 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
+enum kvm_msr_access {
+ MSR_TYPE_R = BIT(0),
+ MSR_TYPE_W = BIT(1),
+ MSR_TYPE_RW = MSR_TYPE_R | MSR_TYPE_W,
+};
+
/*
* Internal error codes that are used to indicate that MSR emulation encountered
- * an error that should result in #GP in the guest, unless userspace
- * handles it.
+ * an error that should result in #GP in the guest, unless userspace handles it.
+ * Note, '1', '0', and negative numbers are off limits, as they are used by KVM
+ * as part of KVM's lightly documented internal KVM_RUN return codes.
+ *
+ * UNSUPPORTED - The MSR isn't supported, either because it is completely
+ * unknown to KVM, or because the MSR should not exist according
+ * to the vCPU model.
+ *
+ * FILTERED - Access to the MSR is denied by a userspace MSR filter.
*/
-#define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */
-#define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */
+#define KVM_MSR_RET_UNSUPPORTED 2
+#define KVM_MSR_RET_FILTERED 3
#define __cr4_reserved_bits(__cpu_has, __c) \
({ \
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
index 90afb488b396..b2eff07d65e4 100644
--- a/arch/x86/lib/atomic64_cx8_32.S
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -16,6 +16,11 @@
cmpxchg8b (\reg)
.endm
+.macro read64_nonatomic reg
+ movl (\reg), %eax
+ movl 4(\reg), %edx
+.endm
+
SYM_FUNC_START(atomic64_read_cx8)
read64 %ecx
RET
@@ -51,7 +56,7 @@ SYM_FUNC_START(atomic64_\func\()_return_cx8)
movl %edx, %edi
movl %ecx, %ebp
- read64 %ecx
+ read64_nonatomic %ecx
1:
movl %eax, %ebx
movl %edx, %ecx
@@ -79,7 +84,7 @@ addsub_return sub sub sbb
SYM_FUNC_START(atomic64_\func\()_return_cx8)
pushl %ebx
- read64 %esi
+ read64_nonatomic %esi
1:
movl %eax, %ebx
movl %edx, %ecx
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index f73b5ce270b3..feb8cc6a12bf 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -176,15 +176,6 @@ static inline void set_page_memtype(struct page *pg,
}
#endif
-enum {
- PAT_UC = 0, /* uncached */
- PAT_WC = 1, /* Write combining */
- PAT_WT = 4, /* Write Through */
- PAT_WP = 5, /* Write Protected */
- PAT_WB = 6, /* Write Back (default) */
- PAT_UC_MINUS = 7, /* UC, but can be overridden by MTRR */
-};
-
#define CM(c) (_PAGE_CACHE_MODE_ ## c)
static enum page_cache_mode __init pat_get_cache_mode(unsigned int pat_val,
@@ -194,13 +185,13 @@ static enum page_cache_mode __init pat_get_cache_mode(unsigned int pat_val,
char *cache_mode;
switch (pat_val) {
- case PAT_UC: cache = CM(UC); cache_mode = "UC "; break;
- case PAT_WC: cache = CM(WC); cache_mode = "WC "; break;
- case PAT_WT: cache = CM(WT); cache_mode = "WT "; break;
- case PAT_WP: cache = CM(WP); cache_mode = "WP "; break;
- case PAT_WB: cache = CM(WB); cache_mode = "WB "; break;
- case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
- default: cache = CM(WB); cache_mode = "WB "; break;
+ case X86_MEMTYPE_UC: cache = CM(UC); cache_mode = "UC "; break;
+ case X86_MEMTYPE_WC: cache = CM(WC); cache_mode = "WC "; break;
+ case X86_MEMTYPE_WT: cache = CM(WT); cache_mode = "WT "; break;
+ case X86_MEMTYPE_WP: cache = CM(WP); cache_mode = "WP "; break;
+ case X86_MEMTYPE_WB: cache = CM(WB); cache_mode = "WB "; break;
+ case X86_MEMTYPE_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
+ default: cache = CM(WB); cache_mode = "WB "; break;
}
memcpy(msg, cache_mode, 4);
@@ -257,12 +248,6 @@ void pat_cpu_init(void)
void __init pat_bp_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
-#define PAT(p0, p1, p2, p3, p4, p5, p6, p7) \
- (((u64)PAT_ ## p0) | ((u64)PAT_ ## p1 << 8) | \
- ((u64)PAT_ ## p2 << 16) | ((u64)PAT_ ## p3 << 24) | \
- ((u64)PAT_ ## p4 << 32) | ((u64)PAT_ ## p5 << 40) | \
- ((u64)PAT_ ## p6 << 48) | ((u64)PAT_ ## p7 << 56))
-
if (!IS_ENABLED(CONFIG_X86_PAT))
pr_info_once("x86/PAT: PAT support disabled because CONFIG_X86_PAT is disabled in the kernel.\n");
@@ -293,7 +278,7 @@ void __init pat_bp_init(void)
* NOTE: When WC or WP is used, it is redirected to UC- per
* the default setup in __cachemode2pte_tbl[].
*/
- pat_msr_val = PAT(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC);
+ pat_msr_val = PAT_VALUE(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC);
}
/*
@@ -328,7 +313,7 @@ void __init pat_bp_init(void)
* NOTE: When WT or WP is used, it is redirected to UC- per
* the default setup in __cachemode2pte_tbl[].
*/
- pat_msr_val = PAT(WB, WC, UC_MINUS, UC, WB, WC, UC_MINUS, UC);
+ pat_msr_val = PAT_VALUE(WB, WC, UC_MINUS, UC, WB, WC, UC_MINUS, UC);
} else {
/*
* Full PAT support. We put WT in slot 7 to improve
@@ -356,13 +341,12 @@ void __init pat_bp_init(void)
* The reserved slots are unused, but mapped to their
* corresponding types in the presence of PAT errata.
*/
- pat_msr_val = PAT(WB, WC, UC_MINUS, UC, WB, WP, UC_MINUS, WT);
+ pat_msr_val = PAT_VALUE(WB, WC, UC_MINUS, UC, WB, WP, UC_MINUS, WT);
}
memory_caching_control |= CACHE_PAT;
init_cache_modes(pat_msr_val);
-#undef PAT
}
static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
index f7235ef87bc3..64fca49cd88f 100644
--- a/arch/x86/platform/pvh/head.S
+++ b/arch/x86/platform/pvh/head.S
@@ -7,6 +7,7 @@
.code32
.text
#define _pa(x) ((x) - __START_KERNEL_map)
+#define rva(x) ((x) - pvh_start_xen)
#include <linux/elfnote.h>
#include <linux/init.h>
@@ -15,6 +16,7 @@
#include <asm/segment.h>
#include <asm/asm.h>
#include <asm/boot.h>
+#include <asm/pgtable.h>
#include <asm/processor-flags.h>
#include <asm/msr.h>
#include <asm/nospec-branch.h>
@@ -54,7 +56,25 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
UNWIND_HINT_END_OF_STACK
cld
- lgdt (_pa(gdt))
+ /*
+ * See the comment for startup_32 for more details. We need to
+ * execute a call to get the execution address to be position
+ * independent, but we don't have a stack. Save and restore the
+ * magic field of start_info in ebx, and use that as the stack.
+ */
+ mov (%ebx), %eax
+ leal 4(%ebx), %esp
+ ANNOTATE_INTRA_FUNCTION_CALL
+ call 1f
+1: popl %ebp
+ mov %eax, (%ebx)
+ subl $rva(1b), %ebp
+ movl $0, %esp
+
+ leal rva(gdt)(%ebp), %eax
+ leal rva(gdt_start)(%ebp), %ecx
+ movl %ecx, 2(%eax)
+ lgdt (%eax)
mov $PVH_DS_SEL,%eax
mov %eax,%ds
@@ -62,14 +82,14 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
mov %eax,%ss
/* Stash hvm_start_info. */
- mov $_pa(pvh_start_info), %edi
+ leal rva(pvh_start_info)(%ebp), %edi
mov %ebx, %esi
- mov _pa(pvh_start_info_sz), %ecx
+ movl rva(pvh_start_info_sz)(%ebp), %ecx
shr $2,%ecx
rep
movsl
- mov $_pa(early_stack_end), %esp
+ leal rva(early_stack_end)(%ebp), %esp
/* Enable PAE mode. */
mov %cr4, %eax
@@ -83,31 +103,86 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
btsl $_EFER_LME, %eax
wrmsr
+ mov %ebp, %ebx
+ subl $_pa(pvh_start_xen), %ebx /* offset */
+ jz .Lpagetable_done
+
+ /* Fixup page-tables for relocation. */
+ leal rva(pvh_init_top_pgt)(%ebp), %edi
+ movl $PTRS_PER_PGD, %ecx
+2:
+ testl $_PAGE_PRESENT, 0x00(%edi)
+ jz 1f
+ addl %ebx, 0x00(%edi)
+1:
+ addl $8, %edi
+ decl %ecx
+ jnz 2b
+
+ /* L3 ident has a single entry. */
+ leal rva(pvh_level3_ident_pgt)(%ebp), %edi
+ addl %ebx, 0x00(%edi)
+
+ leal rva(pvh_level3_kernel_pgt)(%ebp), %edi
+ addl %ebx, (PAGE_SIZE - 16)(%edi)
+ addl %ebx, (PAGE_SIZE - 8)(%edi)
+
+ /* pvh_level2_ident_pgt is fine - large pages */
+
+ /* pvh_level2_kernel_pgt needs adjustment - large pages */
+ leal rva(pvh_level2_kernel_pgt)(%ebp), %edi
+ movl $PTRS_PER_PMD, %ecx
+2:
+ testl $_PAGE_PRESENT, 0x00(%edi)
+ jz 1f
+ addl %ebx, 0x00(%edi)
+1:
+ addl $8, %edi
+ decl %ecx
+ jnz 2b
+
+.Lpagetable_done:
/* Enable pre-constructed page tables. */
- mov $_pa(init_top_pgt), %eax
+ leal rva(pvh_init_top_pgt)(%ebp), %eax
mov %eax, %cr3
mov $(X86_CR0_PG | X86_CR0_PE), %eax
mov %eax, %cr0
/* Jump to 64-bit mode. */
- ljmp $PVH_CS_SEL, $_pa(1f)
+ pushl $PVH_CS_SEL
+ leal rva(1f)(%ebp), %eax
+ pushl %eax
+ lretl
/* 64-bit entry point. */
.code64
1:
+ UNWIND_HINT_END_OF_STACK
+
/* Set base address in stack canary descriptor. */
mov $MSR_GS_BASE,%ecx
- mov $_pa(canary), %eax
+ leal canary(%rip), %eax
xor %edx, %edx
wrmsr
+ /*
+ * Calculate load offset and store in phys_base. __pa() needs
+ * phys_base set to calculate the hypercall page in xen_pvh_init().
+ */
+ movq %rbp, %rbx
+ subq $_pa(pvh_start_xen), %rbx
+ movq %rbx, phys_base(%rip)
call xen_prepare_pvh
+ /*
+ * Clear phys_base. __startup_64 will *add* to its value,
+ * so reset to 0.
+ */
+ xor %rbx, %rbx
+ movq %rbx, phys_base(%rip)
/* startup_64 expects boot_params in %rsi. */
- mov $_pa(pvh_bootparams), %rsi
- mov $_pa(startup_64), %rax
- ANNOTATE_RETPOLINE_SAFE
- jmp *%rax
+ lea pvh_bootparams(%rip), %rsi
+ jmp startup_64
#else /* CONFIG_X86_64 */
@@ -143,7 +218,7 @@ SYM_CODE_END(pvh_start_xen)
.balign 8
SYM_DATA_START_LOCAL(gdt)
.word gdt_end - gdt_start
- .long _pa(gdt_start)
+ .long _pa(gdt_start) /* x86-64 will overwrite if relocated. */
.word 0
SYM_DATA_END(gdt)
SYM_DATA_START_LOCAL(gdt_start)
@@ -163,5 +238,67 @@ SYM_DATA_START_LOCAL(early_stack)
.fill BOOT_STACK_SIZE, 1, 0
SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
+#ifdef CONFIG_X86_64
+/*
+ * Xen PVH needs a set of identity mapped and kernel high mapping
+ * page tables. pvh_start_xen starts running on the identity mapped
+ * page tables, but xen_prepare_pvh calls into the high mapping.
+ * These page tables need to be relocatable and are only used until
+ * startup_64 transitions to init_top_pgt.
+ */
+SYM_DATA_START_PAGE_ALIGNED(pvh_init_top_pgt)
+ .quad pvh_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
+ .org pvh_init_top_pgt + L4_PAGE_OFFSET * 8, 0
+ .quad pvh_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
+ .org pvh_init_top_pgt + L4_START_KERNEL * 8, 0
+ /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
+ .quad pvh_level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
+SYM_DATA_END(pvh_init_top_pgt)
+
+SYM_DATA_START_PAGE_ALIGNED(pvh_level3_ident_pgt)
+ .quad pvh_level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
+ .fill 511, 8, 0
+SYM_DATA_END(pvh_level3_ident_pgt)
+SYM_DATA_START_PAGE_ALIGNED(pvh_level2_ident_pgt)
+ /*
+ * Since I easily can, map the first 1G.
+ * Don't set NX because code runs from these pages.
+ *
+ * Note: This sets _PAGE_GLOBAL despite whether
+ * the CPU supports it or it is enabled. But,
+ * the CPU should ignore the bit.
+ */
+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
+SYM_DATA_END(pvh_level2_ident_pgt)
+SYM_DATA_START_PAGE_ALIGNED(pvh_level3_kernel_pgt)
+ .fill L3_START_KERNEL, 8, 0
+ /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
+ .quad pvh_level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
+ .quad 0 /* no fixmap */
+SYM_DATA_END(pvh_level3_kernel_pgt)
+
+SYM_DATA_START_PAGE_ALIGNED(pvh_level2_kernel_pgt)
+ /*
+ * Kernel high mapping.
+ *
+ * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
+ * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
+ * 512 MiB otherwise.
+ *
+ * (NOTE: after that starts the module area, see MODULES_VADDR.)
+ *
+ * This table is eventually used by the kernel during normal runtime.
+ * Care must be taken to clear out undesired bits later, like _PAGE_RW
+ * or _PAGE_GLOBAL in some cases.
+ */
+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE / PMD_SIZE)
+SYM_DATA_END(pvh_level2_kernel_pgt)
+
+ ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_RELOC,
+ .long CONFIG_PHYSICAL_ALIGN;
+ .long LOAD_PHYSICAL_ADDR;
+ .long KERNEL_IMAGE_SIZE - 1)
+#endif
+
ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
_ASM_PTR (pvh_start_xen - __START_KERNEL_map))
diff --git a/arch/x86/um/sysrq_32.c b/arch/x86/um/sysrq_32.c
index f2383484840d..a1ee415c008d 100644
--- a/arch/x86/um/sysrq_32.c
+++ b/arch/x86/um/sysrq_32.c
@@ -9,7 +9,6 @@
#include <linux/sched/debug.h>
#include <linux/kallsyms.h>
#include <asm/ptrace.h>
-#include <asm/sysrq.h>
/* This is declared by <linux/sched.h> */
void show_regs(struct pt_regs *regs)
diff --git a/arch/x86/um/sysrq_64.c b/arch/x86/um/sysrq_64.c
index 0bf6de40abff..340d8a243c8a 100644
--- a/arch/x86/um/sysrq_64.c
+++ b/arch/x86/um/sysrq_64.c
@@ -12,7 +12,6 @@
#include <linux/utsname.h>
#include <asm/current.h>
#include <asm/ptrace.h>
-#include <asm/sysrq.h>
void show_regs(struct pt_regs *regs)
{
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index 728a4366ca85..bf68c329fc01 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -4,6 +4,7 @@
#include <linux/mm.h>
#include <xen/hvc-console.h>
+#include <xen/acpi.h>
#include <asm/bootparam.h>
#include <asm/io_apic.h>
@@ -28,6 +29,28 @@
bool __ro_after_init xen_pvh;
EXPORT_SYMBOL_GPL(xen_pvh);
+#ifdef CONFIG_XEN_DOM0
+int xen_pvh_setup_gsi(int gsi, int trigger, int polarity)
+{
+ int ret;
+ struct physdev_setup_gsi setup_gsi;
+
+ setup_gsi.gsi = gsi;
+ setup_gsi.triggering = (trigger == ACPI_EDGE_SENSITIVE ? 0 : 1);
+ setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
+
+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
+ if (ret == -EEXIST) {
+ xen_raw_printk("Already setup the GSI :%d\n", gsi);
+ ret = 0;
+ } else if (ret)
+ xen_raw_printk("Fail to setup GSI (%d)!\n", gsi);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xen_pvh_setup_gsi);
+#endif
+
/*
* Reserve e820 UNUSABLE regions to inflate the memory balloon.
*
diff --git a/block/bdev.c b/block/bdev.c
index 33f9c4605e3a..738e3c8457e7 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -555,7 +555,7 @@ retry:
/* if claiming is already in progress, wait for it to finish */
if (whole->bd_claiming) {
- wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
+ wait_queue_head_t *wq = __var_waitqueue(&whole->bd_claiming);
DEFINE_WAIT(wait);
prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
@@ -578,7 +578,7 @@ static void bd_clear_claiming(struct block_device *whole, void *holder)
/* tell others that we're done */
BUG_ON(whole->bd_claiming != holder);
whole->bd_claiming = NULL;
- wake_up_bit(&whole->bd_claiming, 0);
+ wake_up_var(&whole->bd_claiming);
}
/**
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 96a2653905ae..88e3ad73c385 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -367,7 +367,6 @@ free_bvec:
kfree(bvec);
return ret;
}
-EXPORT_SYMBOL_GPL(bio_integrity_map_user);
/**
* bio_integrity_prep - Prepare bio for integrity I/O
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 010decc892ea..0a2b1c5d0ebf 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -53,7 +53,6 @@ new_segment:
return segments;
}
-EXPORT_SYMBOL(blk_rq_count_integrity_sg);
/**
* blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
@@ -63,19 +62,20 @@ EXPORT_SYMBOL(blk_rq_count_integrity_sg);
*
* Description: Map the integrity vectors in request into a
* scatterlist. The scatterlist must be big enough to hold all
- * elements. I.e. sized using blk_rq_count_integrity_sg().
+ * elements. I.e. sized using blk_rq_count_integrity_sg() or
+ * rq->nr_integrity_segments.
*/
-int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
- struct scatterlist *sglist)
+int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
{
struct bio_vec iv, ivprv = { NULL };
+ struct request_queue *q = rq->q;
struct scatterlist *sg = NULL;
+ struct bio *bio = rq->bio;
unsigned int segments = 0;
struct bvec_iter iter;
int prev = 0;
bio_for_each_integrity_vec(iv, bio, iter) {
-
if (prev) {
if (!biovec_phys_mergeable(q, &ivprv, &iv))
goto new_segment;
@@ -103,10 +103,30 @@ new_segment:
if (sg)
sg_mark_end(sg);
+ /*
+ * Something must have been wrong if the figured number of segment
+ * is bigger than number of req's physical integrity segments
+ */
+ BUG_ON(segments > rq->nr_integrity_segments);
+ BUG_ON(segments > queue_max_integrity_segments(q));
return segments;
}
EXPORT_SYMBOL(blk_rq_map_integrity_sg);
+int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
+ ssize_t bytes, u32 seed)
+{
+ int ret = bio_integrity_map_user(rq->bio, ubuf, bytes, seed);
+
+ if (ret)
+ return ret;
+
+ rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio);
+ rq->cmd_flags |= REQ_INTEGRITY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
+
bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
struct request *next)
{
@@ -134,7 +154,6 @@ bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
struct bio *bio)
{
int nr_integrity_segs;
- struct bio *next = bio->bi_next;
if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
return true;
@@ -145,16 +164,11 @@ bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
return false;
- bio->bi_next = NULL;
nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
- bio->bi_next = next;
-
if (req->nr_integrity_segments + nr_integrity_segs >
q->limits.max_integrity_segments)
return false;
- req->nr_integrity_segments += nr_integrity_segs;
-
return true;
}
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 56769c4bcd79..ad763ec313b6 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -639,6 +639,9 @@ static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
* counters.
*/
req->nr_phys_segments += nr_phys_segs;
+ if (bio_integrity(bio))
+ req->nr_integrity_segments += blk_rq_count_integrity_sg(req->q,
+ bio);
return 1;
no_merge:
@@ -731,6 +734,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
/* Merge is OK... */
req->nr_phys_segments = total_phys_segments;
+ req->nr_integrity_segments += next->nr_integrity_segments;
return 1;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 831c5cf5d874..4b2c8e940f59 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -376,9 +376,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->io_start_time_ns = 0;
rq->stats_sectors = 0;
rq->nr_phys_segments = 0;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
rq->nr_integrity_segments = 0;
-#endif
rq->end_io = NULL;
rq->end_io_data = NULL;
@@ -2546,6 +2544,9 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
rq->__sector = bio->bi_iter.bi_sector;
rq->write_hint = bio->bi_write_hint;
blk_rq_bio_prep(rq, bio, nr_segs);
+ if (bio_integrity(bio))
+ rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q,
+ bio);
/* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index cd8a8eabc9a5..a446654ddee5 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -437,48 +437,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
}
EXPORT_SYMBOL_GPL(queue_limits_set);
-/**
- * blk_limits_io_min - set minimum request size for a device
- * @limits: the queue limits
- * @min: smallest I/O size in bytes
- *
- * Description:
- * Some devices have an internal block size bigger than the reported
- * hardware sector size. This function can be used to signal the
- * smallest I/O the device can perform without incurring a performance
- * penalty.
- */
-void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
-{
- limits->io_min = min;
-
- if (limits->io_min < limits->logical_block_size)
- limits->io_min = limits->logical_block_size;
-
- if (limits->io_min < limits->physical_block_size)
- limits->io_min = limits->physical_block_size;
-}
-EXPORT_SYMBOL(blk_limits_io_min);
-
-/**
- * blk_limits_io_opt - set optimal request size for a device
- * @limits: the queue limits
- * @opt: smallest I/O size in bytes
- *
- * Description:
- * Storage devices may report an optimal I/O size, which is the
- * device's preferred unit for sustained I/O. This is rarely reported
- * for disk drives. For RAID arrays it is usually the stripe width or
- * the internal track size. A properly aligned multiple of
- * optimal_io_size is the preferred request size for workloads where
- * sustained throughput is desired.
- */
-void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
-{
- limits->io_opt = opt;
-}
-EXPORT_SYMBOL(blk_limits_io_opt);
-
static int queue_limit_alignment_offset(const struct queue_limits *lim,
sector_t sector)
{
diff --git a/block/elevator.c b/block/elevator.c
index c355b55d0107..4122026b11f1 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -715,7 +715,9 @@ int elv_iosched_load_module(struct gendisk *disk, const char *buf,
strscpy(elevator_name, buf, sizeof(elevator_name));
- return request_module("%s-iosched", strstrip(elevator_name));
+ request_module("%s-iosched", strstrip(elevator_name));
+
+ return 0;
}
ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index e3a7c2aedd5f..d67f63d93b2a 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -451,7 +451,7 @@ config ACPI_HED
config ACPI_BGRT
bool "Boottime Graphics Resource Table support"
- depends on EFI && (X86 || ARM64)
+ depends on EFI && (X86 || ARM64 || LOONGARCH)
help
This driver adds support for exposing the ACPI Boottime Graphics
Resource Table, which allows the operating system to obtain
diff --git a/drivers/acpi/apei/einj-cxl.c b/drivers/acpi/apei/einj-cxl.c
index 8b8be0c90709..4f81a119ec08 100644
--- a/drivers/acpi/apei/einj-cxl.c
+++ b/drivers/acpi/apei/einj-cxl.c
@@ -7,9 +7,9 @@
*
* Author: Ben Cheatham <benjamin.cheatham@amd.com>
*/
-#include <linux/einj-cxl.h>
#include <linux/seq_file.h>
#include <linux/pci.h>
+#include <cxl/einj.h>
#include "apei-internal.h"
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 8bc71cdc2270..246076341e8c 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -199,7 +199,6 @@ static const struct file_operations erst_dbg_ops = {
.read = erst_dbg_read,
.write = erst_dbg_write,
.unlocked_ioctl = erst_dbg_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice erst_dbg_dev = {
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 623cc0cb4a65..ada93cfde9ba 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -27,7 +27,6 @@
#include <linux/timer.h>
#include <linux/cper.h>
#include <linux/cleanup.h>
-#include <linux/cxl-event.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/ratelimit.h>
@@ -50,6 +49,7 @@
#include <acpi/apei.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
+#include <cxl/event.h>
#include <ras/ras_event.h>
#include "apei-internal.h"
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index ff30ceca2203..630fe0a34bc6 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -288,7 +288,7 @@ static int acpi_reroute_boot_interrupt(struct pci_dev *dev,
}
#endif /* CONFIG_X86_IO_APIC */
-static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
+struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
{
struct acpi_prt_entry *entry = NULL;
struct pci_dev *bridge;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index e8643c69d426..978740537a1a 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -277,7 +277,7 @@ _binder_proc_lock(struct binder_proc *proc, int line)
}
/**
- * binder_proc_unlock() - Release spinlock for given binder_proc
+ * binder_proc_unlock() - Release outer lock for given binder_proc
* @proc: struct binder_proc to acquire
*
* Release lock acquired via binder_proc_lock()
@@ -1352,6 +1352,7 @@ static void binder_free_ref(struct binder_ref *ref)
if (ref->node)
binder_free_node(ref->node);
kfree(ref->death);
+ kfree(ref->freeze);
kfree(ref);
}
@@ -1546,7 +1547,7 @@ static void binder_thread_dec_tmpref(struct binder_thread *thread)
* by threads that are being released. When done with the binder_proc,
* this function is called to decrement the counter and free the
* proc if appropriate (proc has been released, all threads have
- * been released and not currenly in-use to process a transaction).
+ * been released and not currently in-use to process a transaction).
*/
static void binder_proc_dec_tmpref(struct binder_proc *proc)
{
@@ -3842,6 +3843,155 @@ err_invalid_target_handle:
}
}
+static int
+binder_request_freeze_notification(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_handle_cookie *handle_cookie)
+{
+ struct binder_ref_freeze *freeze;
+ struct binder_ref *ref;
+ bool is_frozen;
+
+ freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
+ if (!freeze)
+ return -ENOMEM;
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
+ if (!ref) {
+ binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
+ proc->pid, thread->pid, handle_cookie->handle);
+ binder_proc_unlock(proc);
+ kfree(freeze);
+ return -EINVAL;
+ }
+
+ binder_node_lock(ref->node);
+
+ if (ref->freeze || !ref->node->proc) {
+ binder_user_error("%d:%d invalid BC_REQUEST_FREEZE_NOTIFICATION %s\n",
+ proc->pid, thread->pid,
+ ref->freeze ? "already set" : "dead node");
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ kfree(freeze);
+ return -EINVAL;
+ }
+ binder_inner_proc_lock(ref->node->proc);
+ is_frozen = ref->node->proc->is_frozen;
+ binder_inner_proc_unlock(ref->node->proc);
+
+ binder_stats_created(BINDER_STAT_FREEZE);
+ INIT_LIST_HEAD(&freeze->work.entry);
+ freeze->cookie = handle_cookie->cookie;
+ freeze->work.type = BINDER_WORK_FROZEN_BINDER;
+ freeze->is_frozen = is_frozen;
+
+ ref->freeze = freeze;
+
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(&ref->freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
+
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return 0;
+}
+
+static int
+binder_clear_freeze_notification(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_handle_cookie *handle_cookie)
+{
+ struct binder_ref_freeze *freeze;
+ struct binder_ref *ref;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
+ if (!ref) {
+ binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
+ proc->pid, thread->pid, handle_cookie->handle);
+ binder_proc_unlock(proc);
+ return -EINVAL;
+ }
+
+ binder_node_lock(ref->node);
+
+ if (!ref->freeze) {
+ binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
+ proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return -EINVAL;
+ }
+ freeze = ref->freeze;
+ binder_inner_proc_lock(proc);
+ if (freeze->cookie != handle_cookie->cookie) {
+ binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
+ proc->pid, thread->pid, (u64)freeze->cookie,
+ (u64)handle_cookie->cookie);
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return -EINVAL;
+ }
+ ref->freeze = NULL;
+ /*
+ * Take the existing freeze object and overwrite its work type. There are three cases here:
+ * 1. No pending notification. In this case just add the work to the queue.
+ * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
+ * should resend with the new work type.
+ * 3. A notification is pending to be sent. Since the work is already in the queue, nothing
+ * needs to be done here.
+ */
+ freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
+ if (list_empty(&freeze->work.entry)) {
+ binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ } else if (freeze->sent) {
+ freeze->resend = true;
+ }
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return 0;
+}
+
+static int
+binder_freeze_notification_done(struct binder_proc *proc,
+ struct binder_thread *thread,
+ binder_uintptr_t cookie)
+{
+ struct binder_ref_freeze *freeze = NULL;
+ struct binder_work *w;
+
+ binder_inner_proc_lock(proc);
+ list_for_each_entry(w, &proc->delivered_freeze, entry) {
+ struct binder_ref_freeze *tmp_freeze =
+ container_of(w, struct binder_ref_freeze, work);
+
+ if (tmp_freeze->cookie == cookie) {
+ freeze = tmp_freeze;
+ break;
+ }
+ }
+ if (!freeze) {
+ binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
+ proc->pid, thread->pid, (u64)cookie);
+ binder_inner_proc_unlock(proc);
+ return -EINVAL;
+ }
+ binder_dequeue_work_ilocked(&freeze->work);
+ freeze->sent = false;
+ if (freeze->resend) {
+ freeze->resend = false;
+ binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ }
+ binder_inner_proc_unlock(proc);
+ return 0;
+}
+
/**
* binder_free_buf() - free the specified buffer
* @proc: binder proc that owns buffer
@@ -4325,6 +4475,44 @@ static int binder_thread_write(struct binder_proc *proc,
binder_inner_proc_unlock(proc);
} break;
+ case BC_REQUEST_FREEZE_NOTIFICATION: {
+ struct binder_handle_cookie handle_cookie;
+ int error;
+
+ if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
+ return -EFAULT;
+ ptr += sizeof(handle_cookie);
+ error = binder_request_freeze_notification(proc, thread,
+ &handle_cookie);
+ if (error)
+ return error;
+ } break;
+
+ case BC_CLEAR_FREEZE_NOTIFICATION: {
+ struct binder_handle_cookie handle_cookie;
+ int error;
+
+ if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
+ return -EFAULT;
+ ptr += sizeof(handle_cookie);
+ error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
+ if (error)
+ return error;
+ } break;
+
+ case BC_FREEZE_NOTIFICATION_DONE: {
+ binder_uintptr_t cookie;
+ int error;
+
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+
+ ptr += sizeof(cookie);
+ error = binder_freeze_notification_done(proc, thread, cookie);
+ if (error)
+ return error;
+ } break;
+
default:
pr_err("%d:%d unknown command %u\n",
proc->pid, thread->pid, cmd);
@@ -4714,6 +4902,46 @@ retry:
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
+
+ case BINDER_WORK_FROZEN_BINDER: {
+ struct binder_ref_freeze *freeze;
+ struct binder_frozen_state_info info;
+
+ memset(&info, 0, sizeof(info));
+ freeze = container_of(w, struct binder_ref_freeze, work);
+ info.is_frozen = freeze->is_frozen;
+ info.cookie = freeze->cookie;
+ freeze->sent = true;
+ binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
+ binder_inner_proc_unlock(proc);
+
+ if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (copy_to_user(ptr, &info, sizeof(info)))
+ return -EFAULT;
+ ptr += sizeof(info);
+ binder_stat_br(proc, thread, BR_FROZEN_BINDER);
+ goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
+ } break;
+
+ case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
+ struct binder_ref_freeze *freeze =
+ container_of(w, struct binder_ref_freeze, work);
+ binder_uintptr_t cookie = freeze->cookie;
+
+ binder_inner_proc_unlock(proc);
+ kfree(freeze);
+ binder_stats_deleted(BINDER_STAT_FREEZE);
+ if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+ binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
+ } break;
+
default:
binder_inner_proc_unlock(proc);
pr_err("%d:%d: bad work type %d\n",
@@ -5322,6 +5550,48 @@ static bool binder_txns_pending_ilocked(struct binder_proc *proc)
return false;
}
+static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
+{
+ struct rb_node *n;
+ struct binder_ref *ref;
+
+ binder_inner_proc_lock(proc);
+ for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
+ struct binder_node *node;
+
+ node = rb_entry(n, struct binder_node, rb_node);
+ binder_inner_proc_unlock(proc);
+ binder_node_lock(node);
+ hlist_for_each_entry(ref, &node->refs, node_entry) {
+ /*
+ * Need the node lock to synchronize
+ * with new notification requests and the
+ * inner lock to synchronize with queued
+ * freeze notifications.
+ */
+ binder_inner_proc_lock(ref->proc);
+ if (!ref->freeze) {
+ binder_inner_proc_unlock(ref->proc);
+ continue;
+ }
+ ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
+ if (list_empty(&ref->freeze->work.entry)) {
+ ref->freeze->is_frozen = is_frozen;
+ binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
+ binder_wakeup_proc_ilocked(ref->proc);
+ } else {
+ if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
+ ref->freeze->resend = true;
+ ref->freeze->is_frozen = is_frozen;
+ }
+ binder_inner_proc_unlock(ref->proc);
+ }
+ binder_node_unlock(node);
+ binder_inner_proc_lock(proc);
+ }
+ binder_inner_proc_unlock(proc);
+}
+
static int binder_ioctl_freeze(struct binder_freeze_info *info,
struct binder_proc *target_proc)
{
@@ -5333,6 +5603,7 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
target_proc->async_recv = false;
target_proc->is_frozen = false;
binder_inner_proc_unlock(target_proc);
+ binder_add_freeze_work(target_proc, false);
return 0;
}
@@ -5365,6 +5636,8 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
binder_inner_proc_lock(target_proc);
target_proc->is_frozen = false;
binder_inner_proc_unlock(target_proc);
+ } else {
+ binder_add_freeze_work(target_proc, true);
}
return ret;
@@ -5740,6 +6013,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
binder_stats_created(BINDER_STAT_PROC);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
+ INIT_LIST_HEAD(&proc->delivered_freeze);
INIT_LIST_HEAD(&proc->waiting_threads);
filp->private_data = proc;
@@ -6291,7 +6565,9 @@ static const char * const binder_return_strings[] = {
"BR_FAILED_REPLY",
"BR_FROZEN_REPLY",
"BR_ONEWAY_SPAM_SUSPECT",
- "BR_TRANSACTION_PENDING_FROZEN"
+ "BR_TRANSACTION_PENDING_FROZEN",
+ "BR_FROZEN_BINDER",
+ "BR_CLEAR_FREEZE_NOTIFICATION_DONE",
};
static const char * const binder_command_strings[] = {
@@ -6314,6 +6590,9 @@ static const char * const binder_command_strings[] = {
"BC_DEAD_BINDER_DONE",
"BC_TRANSACTION_SG",
"BC_REPLY_SG",
+ "BC_REQUEST_FREEZE_NOTIFICATION",
+ "BC_CLEAR_FREEZE_NOTIFICATION",
+ "BC_FREEZE_NOTIFICATION_DONE",
};
static const char * const binder_objstat_strings[] = {
@@ -6323,7 +6602,8 @@ static const char * const binder_objstat_strings[] = {
"ref",
"death",
"transaction",
- "transaction_complete"
+ "transaction_complete",
+ "freeze",
};
static void print_binder_stats(struct seq_file *m, const char *prefix,
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 7d4fc53f7a73..f8d6be682f23 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -130,12 +130,13 @@ enum binder_stat_types {
BINDER_STAT_DEATH,
BINDER_STAT_TRANSACTION,
BINDER_STAT_TRANSACTION_COMPLETE,
+ BINDER_STAT_FREEZE,
BINDER_STAT_COUNT
};
struct binder_stats {
- atomic_t br[_IOC_NR(BR_TRANSACTION_PENDING_FROZEN) + 1];
- atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
+ atomic_t br[_IOC_NR(BR_CLEAR_FREEZE_NOTIFICATION_DONE) + 1];
+ atomic_t bc[_IOC_NR(BC_FREEZE_NOTIFICATION_DONE) + 1];
atomic_t obj_created[BINDER_STAT_COUNT];
atomic_t obj_deleted[BINDER_STAT_COUNT];
};
@@ -160,6 +161,8 @@ struct binder_work {
BINDER_WORK_DEAD_BINDER,
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
+ BINDER_WORK_FROZEN_BINDER,
+ BINDER_WORK_CLEAR_FREEZE_NOTIFICATION,
} type;
};
@@ -276,6 +279,14 @@ struct binder_ref_death {
binder_uintptr_t cookie;
};
+struct binder_ref_freeze {
+ struct binder_work work;
+ binder_uintptr_t cookie;
+ bool is_frozen:1;
+ bool sent:1;
+ bool resend:1;
+};
+
/**
* struct binder_ref_data - binder_ref counts and id
* @debug_id: unique ID for the ref
@@ -308,6 +319,8 @@ struct binder_ref_data {
* @node indicates the node must be freed
* @death: pointer to death notification (ref_death) if requested
* (protected by @node->lock)
+ * @freeze: pointer to freeze notification (ref_freeze) if requested
+ * (protected by @node->lock)
*
* Structure to track references from procA to target node (on procB). This
* structure is unsafe to access without holding @proc->outer_lock.
@@ -324,6 +337,7 @@ struct binder_ref {
struct binder_proc *proc;
struct binder_node *node;
struct binder_ref_death *death;
+ struct binder_ref_freeze *freeze;
};
/**
@@ -377,6 +391,8 @@ struct binder_ref {
* (atomics, no lock needed)
* @delivered_death: list of delivered death notification
* (protected by @inner_lock)
+ * @delivered_freeze: list of delivered freeze notification
+ * (protected by @inner_lock)
* @max_threads: cap on number of binder threads
* (protected by @inner_lock)
* @requested_threads: number of binder threads requested but not
@@ -424,6 +440,7 @@ struct binder_proc {
struct list_head todo;
struct binder_stats stats;
struct list_head delivered_death;
+ struct list_head delivered_freeze;
u32 max_threads;
int requested_threads;
int requested_threads_started;
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 3001d754ac36..ad1fa7abc323 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -58,6 +58,7 @@ enum binderfs_stats_mode {
struct binder_features {
bool oneway_spam_detection;
bool extended_error;
+ bool freeze_notification;
};
static const struct constant_table binderfs_param_stats[] = {
@@ -74,6 +75,7 @@ static const struct fs_parameter_spec binderfs_fs_parameters[] = {
static struct binder_features binder_features = {
.oneway_spam_detection = true,
.extended_error = true,
+ .freeze_notification = true,
};
static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
@@ -608,6 +610,12 @@ static int init_binder_features(struct super_block *sb)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
+ dentry = binderfs_create_file(dir, "freeze_notification",
+ &binder_features_fops,
+ &binder_features.freeze_notification);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
return 0;
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 3328a6febc13..a4aedf7e1775 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2256,10 +2256,15 @@ static inline u16 ata_xlat_cdl_limit(u8 *buf)
static unsigned int ata_msense_control_spgt2(struct ata_device *dev, u8 *buf,
u8 spg)
{
- u8 *b, *cdl = dev->cdl->desc_log_buf, *desc;
+ u8 *b, *cdl, *desc;
u32 policy;
int i;
+ if (!(dev->flags & ATA_DFLAG_CDL) || !dev->cdl)
+ return 0;
+
+ cdl = dev->cdl->desc_log_buf;
+
/*
* Fill the subpage. The first four bytes of the T2A/T2B mode pages
* are a header. The PAGE LENGTH field is the size of the page
@@ -2356,7 +2361,7 @@ static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf,
case ALL_SUB_MPAGES:
n = ata_msense_control_spg0(dev, buf, changeable);
n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE);
- n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE);
+ n += ata_msense_control_spgt2(dev, buf + n, CDL_T2B_SUB_MPAGE);
n += ata_msense_control_ata_feature(dev, buf + n);
return n;
default:
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index a34e56a9d535..f3f5b2b0ecc9 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -44,8 +44,8 @@
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/ktime.h>
+#include <linux/mod_devicetable.h>
-#include <linux/platform_data/dma-ep93xx.h>
#include <linux/soc/cirrus/ep93xx.h>
#define DRV_NAME "ep93xx-ide"
@@ -126,7 +126,7 @@ enum {
};
struct ep93xx_pata_data {
- const struct platform_device *pdev;
+ struct platform_device *pdev;
void __iomem *ide_base;
struct ata_timing t;
bool iordy;
@@ -135,9 +135,7 @@ struct ep93xx_pata_data {
unsigned long udma_out_phys;
struct dma_chan *dma_rx_channel;
- struct ep93xx_dma_data dma_rx_data;
struct dma_chan *dma_tx_channel;
- struct ep93xx_dma_data dma_tx_data;
};
static void ep93xx_pata_clear_regs(void __iomem *base)
@@ -637,20 +635,13 @@ static void ep93xx_pata_release_dma(struct ep93xx_pata_data *drv_data)
}
}
-static bool ep93xx_pata_dma_filter(struct dma_chan *chan, void *filter_param)
+static int ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
{
- if (ep93xx_dma_chan_is_m2p(chan))
- return false;
-
- chan->private = filter_param;
- return true;
-}
-
-static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
-{
- const struct platform_device *pdev = drv_data->pdev;
+ struct platform_device *pdev = drv_data->pdev;
+ struct device *dev = &pdev->dev;
dma_cap_mask_t mask;
struct dma_slave_config conf;
+ int ret;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -660,22 +651,16 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
* to request only one channel, and reprogram it's direction at
* start of new transfer.
*/
- drv_data->dma_rx_data.port = EP93XX_DMA_IDE;
- drv_data->dma_rx_data.direction = DMA_DEV_TO_MEM;
- drv_data->dma_rx_data.name = "ep93xx-pata-rx";
- drv_data->dma_rx_channel = dma_request_channel(mask,
- ep93xx_pata_dma_filter, &drv_data->dma_rx_data);
- if (!drv_data->dma_rx_channel)
- return;
-
- drv_data->dma_tx_data.port = EP93XX_DMA_IDE;
- drv_data->dma_tx_data.direction = DMA_MEM_TO_DEV;
- drv_data->dma_tx_data.name = "ep93xx-pata-tx";
- drv_data->dma_tx_channel = dma_request_channel(mask,
- ep93xx_pata_dma_filter, &drv_data->dma_tx_data);
- if (!drv_data->dma_tx_channel) {
- dma_release_channel(drv_data->dma_rx_channel);
- return;
+ drv_data->dma_rx_channel = dma_request_chan(dev, "rx");
+ if (IS_ERR(drv_data->dma_rx_channel))
+ return dev_err_probe(dev, PTR_ERR(drv_data->dma_rx_channel),
+ "rx DMA setup failed\n");
+
+ drv_data->dma_tx_channel = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR(drv_data->dma_tx_channel)) {
+ ret = dev_err_probe(dev, PTR_ERR(drv_data->dma_tx_channel),
+ "tx DMA setup failed\n");
+ goto fail_release_rx;
}
/* Configure receive channel direction and source address */
@@ -683,10 +668,10 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
conf.direction = DMA_DEV_TO_MEM;
conf.src_addr = drv_data->udma_in_phys;
conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) {
- dev_err(&pdev->dev, "failed to configure rx dma channel\n");
- ep93xx_pata_release_dma(drv_data);
- return;
+ ret = dmaengine_slave_config(drv_data->dma_rx_channel, &conf);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to configure rx dma channel");
+ goto fail_release_dma;
}
/* Configure transmit channel direction and destination address */
@@ -694,10 +679,20 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
conf.direction = DMA_MEM_TO_DEV;
conf.dst_addr = drv_data->udma_out_phys;
conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) {
- dev_err(&pdev->dev, "failed to configure tx dma channel\n");
- ep93xx_pata_release_dma(drv_data);
+ ret = dmaengine_slave_config(drv_data->dma_tx_channel, &conf);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to configure tx dma channel");
+ goto fail_release_dma;
}
+
+ return 0;
+
+fail_release_rx:
+ dma_release_channel(drv_data->dma_rx_channel);
+fail_release_dma:
+ ep93xx_pata_release_dma(drv_data);
+
+ return ret;
}
static void ep93xx_pata_dma_start(struct ata_queued_cmd *qc)
@@ -925,34 +920,26 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
void __iomem *ide_base;
int err;
- err = ep93xx_ide_acquire_gpio(pdev);
- if (err)
- return err;
-
/* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = irq;
- goto err_rel_gpio;
- }
+ if (irq < 0)
+ return irq;
ide_base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
- if (IS_ERR(ide_base)) {
- err = PTR_ERR(ide_base);
- goto err_rel_gpio;
- }
+ if (IS_ERR(ide_base))
+ return PTR_ERR(ide_base);
drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
- if (!drv_data) {
- err = -ENOMEM;
- goto err_rel_gpio;
- }
+ if (!drv_data)
+ return -ENOMEM;
drv_data->pdev = pdev;
drv_data->ide_base = ide_base;
drv_data->udma_in_phys = mem_res->start + IDEUDMADATAIN;
drv_data->udma_out_phys = mem_res->start + IDEUDMADATAOUT;
- ep93xx_pata_dma_init(drv_data);
+ err = ep93xx_pata_dma_init(drv_data);
+ if (err)
+ return err;
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
@@ -1003,8 +990,6 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
err_rel_dma:
ep93xx_pata_release_dma(drv_data);
-err_rel_gpio:
- ep93xx_ide_release_gpio(pdev);
return err;
}
@@ -1016,12 +1001,18 @@ static void ep93xx_pata_remove(struct platform_device *pdev)
ata_host_detach(host);
ep93xx_pata_release_dma(drv_data);
ep93xx_pata_clear_regs(drv_data->ide_base);
- ep93xx_ide_release_gpio(pdev);
}
+static const struct of_device_id ep93xx_pata_of_ids[] = {
+ { .compatible = "cirrus,ep9312-pata" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ep93xx_pata_of_ids);
+
static struct platform_driver ep93xx_pata_platform_driver = {
.driver = {
.name = DRV_NAME,
+ .of_match_table = ep93xx_pata_of_ids,
},
.probe = ep93xx_pata_probe,
.remove_new = ep93xx_pata_remove,
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index bb9463814454..19b619376d48 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -526,7 +526,6 @@ static const struct file_operations charlcd_fops = {
.write = charlcd_write,
.open = charlcd_open,
.release = charlcd_release,
- .llseek = no_llseek,
};
static struct miscdevice charlcd_dev = {
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 01ef796c2055..b6f941a6ab69 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -346,8 +346,7 @@ attribute_container_device_trigger_safe(struct device *dev,
* @fn: the function to execute for each classdev.
*
* This function is for executing a trigger when you need to know both
- * the container and the classdev. If you only care about the
- * container, then use attribute_container_trigger() instead.
+ * the container and the classdev.
*/
void
attribute_container_device_trigger(struct device *dev,
@@ -379,33 +378,6 @@ attribute_container_device_trigger(struct device *dev,
}
/**
- * attribute_container_trigger - trigger a function for each matching container
- *
- * @dev: The generic device to activate the trigger for
- * @fn: the function to trigger
- *
- * This routine triggers a function that only needs to know the
- * matching containers (not the classdev) associated with a device.
- * It is more lightweight than attribute_container_device_trigger, so
- * should be used in preference unless the triggering function
- * actually needs to know the classdev.
- */
-void
-attribute_container_trigger(struct device *dev,
- int (*fn)(struct attribute_container *,
- struct device *))
-{
- struct attribute_container *cont;
-
- mutex_lock(&attribute_container_mutex);
- list_for_each_entry(cont, &attribute_container_list, node) {
- if (cont->match(cont, dev))
- fn(cont, dev);
- }
- mutex_unlock(&attribute_container_mutex);
-}
-
-/**
* attribute_container_add_attrs - add attributes
*
* @classdev: The class device
@@ -459,24 +431,6 @@ attribute_container_add_class_device(struct device *classdev)
}
/**
- * attribute_container_add_class_device_adapter - simple adapter for triggers
- *
- * @cont: the container to register.
- * @dev: the generic device to activate the trigger for
- * @classdev: the class device to add
- *
- * This function is identical to attribute_container_add_class_device except
- * that it is designed to be called from the triggers
- */
-int
-attribute_container_add_class_device_adapter(struct attribute_container *cont,
- struct device *dev,
- struct device *classdev)
-{
- return attribute_container_add_class_device(classdev);
-}
-
-/**
* attribute_container_remove_attrs - remove any attribute files
*
* @classdev: The class device to remove the files from
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 54b92839e05c..7823888af4f6 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -352,7 +352,7 @@ EXPORT_SYMBOL_GPL(__auxiliary_device_add);
*/
struct auxiliary_device *auxiliary_find_device(struct device *start,
const void *data,
- int (*match)(struct device *dev, const void *data))
+ device_match_t match)
{
struct device *dev;
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 0b53593372d7..8cf04a557bdb 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -145,7 +145,7 @@ void auxiliary_bus_init(void);
static inline void auxiliary_bus_init(void) { }
#endif
-struct kobject *virtual_device_parent(struct device *dev);
+struct kobject *virtual_device_parent(void);
int bus_add_device(struct device *dev);
void bus_probe_device(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index ffea0728b8b2..657c93c38b0d 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -152,7 +152,8 @@ static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr,
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
struct subsys_private *subsys_priv = to_subsys_private(kobj);
- ssize_t ret = 0;
+ /* return -EIO for reading a bus attribute without show() */
+ ssize_t ret = -EIO;
if (bus_attr->show)
ret = bus_attr->show(subsys_priv->bus, buf);
@@ -164,7 +165,8 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
struct subsys_private *subsys_priv = to_subsys_private(kobj);
- ssize_t ret = 0;
+ /* return -EIO for writing a bus attribute without store() */
+ ssize_t ret = -EIO;
if (bus_attr->store)
ret = bus_attr->store(subsys_priv->bus, buf, count);
@@ -389,7 +391,7 @@ EXPORT_SYMBOL_GPL(bus_for_each_dev);
*/
struct device *bus_find_device(const struct bus_type *bus,
struct device *start, const void *data,
- int (*match)(struct device *dev, const void *data))
+ device_match_t match)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
@@ -920,6 +922,8 @@ bus_devices_fail:
bus_remove_file(bus, &bus_attr_uevent);
bus_uevent_fail:
kset_unregister(&priv->subsys);
+ /* Above kset_unregister() will kfree @priv */
+ priv = NULL;
out:
kfree(priv);
return retval;
@@ -1294,7 +1298,7 @@ int subsys_virtual_register(const struct bus_type *subsys,
{
struct kobject *virtual_dir;
- virtual_dir = virtual_device_parent(NULL);
+ virtual_dir = virtual_device_parent();
if (!virtual_dir)
return -ENOMEM;
@@ -1385,8 +1389,13 @@ int __init buses_init(void)
return -ENOMEM;
system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
- if (!system_kset)
+ if (!system_kset) {
+ /* Do error handling here as devices_init() do */
+ kset_unregister(bus_kset);
+ bus_kset = NULL;
+ pr_err("%s: failed to create and add kset 'bus'\n", __func__);
return -ENOMEM;
+ }
return 0;
}
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 23b8cba4a2a3..7a7609298e18 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -202,29 +202,24 @@ static void cache_of_set_props(struct cacheinfo *this_leaf,
static int cache_setup_of_node(unsigned int cpu)
{
- struct device_node *np, *prev;
struct cacheinfo *this_leaf;
unsigned int index = 0;
- np = of_cpu_device_node_get(cpu);
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
if (!np) {
pr_err("Failed to find cpu%d device node\n", cpu);
return -ENOENT;
}
if (!of_check_cache_nodes(np)) {
- of_node_put(np);
return -ENOENT;
}
- prev = np;
-
while (index < cache_leaves(cpu)) {
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
if (this_leaf->level != 1) {
+ struct device_node *prev __free(device_node) = np;
np = of_find_next_cache_node(np);
- of_node_put(prev);
- prev = np;
if (!np)
break;
}
@@ -233,8 +228,6 @@ static int cache_setup_of_node(unsigned int cpu)
index++;
}
- of_node_put(np);
-
if (index != cache_leaves(cpu)) /* not all OF nodes populated */
return -ENOENT;
@@ -243,17 +236,14 @@ static int cache_setup_of_node(unsigned int cpu)
static bool of_check_cache_nodes(struct device_node *np)
{
- struct device_node *next;
-
if (of_property_present(np, "cache-size") ||
of_property_present(np, "i-cache-size") ||
of_property_present(np, "d-cache-size") ||
of_property_present(np, "cache-unified"))
return true;
- next = of_find_next_cache_node(np);
+ struct device_node *next __free(device_node) = of_find_next_cache_node(np);
if (next) {
- of_node_put(next);
return true;
}
@@ -287,12 +277,10 @@ static int of_count_cache_leaves(struct device_node *np)
int init_of_cache_level(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
- struct device_node *np = of_cpu_device_node_get(cpu);
- struct device_node *prev = NULL;
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
unsigned int levels = 0, leaves, level;
if (!of_check_cache_nodes(np)) {
- of_node_put(np);
return -ENOENT;
}
@@ -300,30 +288,27 @@ int init_of_cache_level(unsigned int cpu)
if (leaves > 0)
levels = 1;
- prev = np;
- while ((np = of_find_next_cache_node(np))) {
- of_node_put(prev);
- prev = np;
+ while (1) {
+ struct device_node *prev __free(device_node) = np;
+ np = of_find_next_cache_node(np);
+ if (!np)
+ break;
+
if (!of_device_is_compatible(np, "cache"))
- goto err_out;
+ return -EINVAL;
if (of_property_read_u32(np, "cache-level", &level))
- goto err_out;
+ return -EINVAL;
if (level <= levels)
- goto err_out;
+ return -EINVAL;
leaves += of_count_cache_leaves(np);
levels = level;
}
- of_node_put(np);
this_cpu_ci->num_levels = levels;
this_cpu_ci->num_leaves = leaves;
return 0;
-
-err_out:
- of_node_put(np);
- return -EINVAL;
}
#else
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 7b38fdf8e1d7..cb5359235c70 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -183,6 +183,17 @@ int class_register(const struct class *cls)
pr_debug("device class '%s': registering\n", cls->name);
+ if (cls->ns_type && !cls->namespace) {
+ pr_err("%s: class '%s' does not have namespace\n",
+ __func__, cls->name);
+ return -EINVAL;
+ }
+ if (!cls->ns_type && cls->namespace) {
+ pr_err("%s: class '%s' does not have ns_type\n",
+ __func__, cls->name);
+ return -EINVAL;
+ }
+
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
@@ -433,8 +444,7 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
* code. There's no locking restriction.
*/
struct device *class_find_device(const struct class *class, const struct device *start,
- const void *data,
- int (*match)(struct device *, const void *))
+ const void *data, device_match_t match)
{
struct subsys_private *sp = class_to_subsys(class);
struct class_dev_iter iter;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 8c0733d3aad8..a4c853411a6b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -9,29 +9,30 @@
*/
#include <linux/acpi.h>
+#include <linux/blkdev.h>
+#include <linux/cleanup.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <linux/err.h>
#include <linux/fwnode.h>
#include <linux/init.h>
+#include <linux/kdev_t.h>
#include <linux/kstrtox.h>
#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/kdev_t.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/blkdev.h>
-#include <linux/mutex.h>
#include <linux/pm_runtime.h>
-#include <linux/netdevice.h>
#include <linux/rcupdate.h>
-#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
#include <linux/string_helpers.h>
#include <linux/swiotlb.h>
#include <linux/sysfs.h>
-#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include "base.h"
#include "physical_location.h"
@@ -97,12 +98,9 @@ static int __fwnode_link_add(struct fwnode_handle *con,
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup,
u8 flags)
{
- int ret;
+ guard(mutex)(&fwnode_link_lock);
- mutex_lock(&fwnode_link_lock);
- ret = __fwnode_link_add(con, sup, flags);
- mutex_unlock(&fwnode_link_lock);
- return ret;
+ return __fwnode_link_add(con, sup, flags);
}
/**
@@ -143,10 +141,10 @@ static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
{
struct fwnode_link *link, *tmp;
- mutex_lock(&fwnode_link_lock);
+ guard(mutex)(&fwnode_link_lock);
+
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)
__fwnode_link_del(link);
- mutex_unlock(&fwnode_link_lock);
}
/**
@@ -159,10 +157,10 @@ static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
{
struct fwnode_link *link, *tmp;
- mutex_lock(&fwnode_link_lock);
+ guard(mutex)(&fwnode_link_lock);
+
list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)
__fwnode_link_del(link);
- mutex_unlock(&fwnode_link_lock);
}
/**
@@ -563,20 +561,11 @@ static struct class devlink_class = {
static int devlink_add_symlinks(struct device *dev)
{
+ char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL;
int ret;
- size_t len;
struct device_link *link = to_devlink(dev);
struct device *sup = link->supplier;
struct device *con = link->consumer;
- char *buf;
-
- len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
- strlen(dev_bus_name(con)) + strlen(dev_name(con)));
- len += strlen(":");
- len += strlen("supplier:") + 1;
- buf = kzalloc(len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier");
if (ret)
@@ -586,58 +575,64 @@ static int devlink_add_symlinks(struct device *dev)
if (ret)
goto err_con;
- snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
- ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
+ buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
+ if (!buf_con) {
+ ret = -ENOMEM;
+ goto err_con_dev;
+ }
+
+ ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf_con);
if (ret)
goto err_con_dev;
- snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
- ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
+ buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
+ if (!buf_sup) {
+ ret = -ENOMEM;
+ goto err_sup_dev;
+ }
+
+ ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf_sup);
if (ret)
goto err_sup_dev;
goto out;
err_sup_dev:
- snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
- sysfs_remove_link(&sup->kobj, buf);
+ sysfs_remove_link(&sup->kobj, buf_con);
err_con_dev:
sysfs_remove_link(&link->link_dev.kobj, "consumer");
err_con:
sysfs_remove_link(&link->link_dev.kobj, "supplier");
out:
- kfree(buf);
return ret;
}
static void devlink_remove_symlinks(struct device *dev)
{
+ char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL;
struct device_link *link = to_devlink(dev);
- size_t len;
struct device *sup = link->supplier;
struct device *con = link->consumer;
- char *buf;
sysfs_remove_link(&link->link_dev.kobj, "consumer");
sysfs_remove_link(&link->link_dev.kobj, "supplier");
- len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
- strlen(dev_bus_name(con)) + strlen(dev_name(con)));
- len += strlen(":");
- len += strlen("supplier:") + 1;
- buf = kzalloc(len, GFP_KERNEL);
- if (!buf) {
- WARN(1, "Unable to properly free device link symlinks!\n");
- return;
- }
-
if (device_is_registered(con)) {
- snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
- sysfs_remove_link(&con->kobj, buf);
+ buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
+ if (!buf_sup)
+ goto out;
+ sysfs_remove_link(&con->kobj, buf_sup);
}
- snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
- sysfs_remove_link(&sup->kobj, buf);
- kfree(buf);
+
+ buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
+ if (!buf_con)
+ goto out;
+ sysfs_remove_link(&sup->kobj, buf_con);
+
+ return;
+
+out:
+ WARN(1, "Unable to properly free device link symlinks!\n");
}
static struct class_interface devlink_class_intf = {
@@ -678,6 +673,9 @@ postcore_initcall(devlink_class_init);
* @supplier: Supplier end of the link.
* @flags: Link flags.
*
+ * Return: On success, a device_link struct will be returned.
+ * On error or invalid flag settings, NULL will be returned.
+ *
* The caller is responsible for the proper synchronization of the link creation
* with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
* runtime PM framework to take the link into account. Second, if the
@@ -1061,20 +1059,16 @@ int device_links_check_suppliers(struct device *dev)
* Device waiting for supplier to become available is not allowed to
* probe.
*/
- mutex_lock(&fwnode_link_lock);
- sup_fw = fwnode_links_check_suppliers(dev->fwnode);
- if (sup_fw) {
- if (!dev_is_best_effort(dev)) {
- fwnode_ret = -EPROBE_DEFER;
- dev_err_probe(dev, -EPROBE_DEFER,
- "wait for supplier %pfwf\n", sup_fw);
- } else {
- fwnode_ret = -EAGAIN;
+ scoped_guard(mutex, &fwnode_link_lock) {
+ sup_fw = fwnode_links_check_suppliers(dev->fwnode);
+ if (sup_fw) {
+ if (dev_is_best_effort(dev))
+ fwnode_ret = -EAGAIN;
+ else
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "wait for supplier %pfwf\n", sup_fw);
}
}
- mutex_unlock(&fwnode_link_lock);
- if (fwnode_ret == -EPROBE_DEFER)
- return fwnode_ret;
device_links_write_lock();
@@ -1093,10 +1087,8 @@ int device_links_check_suppliers(struct device *dev)
}
device_links_missing_supplier(dev);
- dev_err_probe(dev, -EPROBE_DEFER,
- "supplier %s not ready\n",
- dev_name(link->supplier));
- ret = -EPROBE_DEFER;
+ ret = dev_err_probe(dev, -EPROBE_DEFER,
+ "supplier %s not ready\n", dev_name(link->supplier));
break;
}
WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
@@ -1249,9 +1241,8 @@ static ssize_t waiting_for_supplier_show(struct device *dev,
bool val;
device_lock(dev);
- mutex_lock(&fwnode_link_lock);
- val = !!fwnode_links_check_suppliers(dev->fwnode);
- mutex_unlock(&fwnode_link_lock);
+ scoped_guard(mutex, &fwnode_link_lock)
+ val = !!fwnode_links_check_suppliers(dev->fwnode);
device_unlock(dev);
return sysfs_emit(buf, "%u\n", val);
}
@@ -1324,13 +1315,15 @@ void device_links_driver_bound(struct device *dev)
*/
if (dev->fwnode && dev->fwnode->dev == dev) {
struct fwnode_handle *child;
+
fwnode_links_purge_suppliers(dev->fwnode);
- mutex_lock(&fwnode_link_lock);
+
+ guard(mutex)(&fwnode_link_lock);
+
fwnode_for_each_available_child_node(dev->fwnode, child)
__fw_devlink_pickup_dangling_consumers(child,
dev->fwnode);
__fw_devlink_link_to_consumers(dev);
- mutex_unlock(&fwnode_link_lock);
}
device_remove_file(dev, &dev_attr_waiting_for_supplier);
@@ -2339,10 +2332,10 @@ static void fw_devlink_link_device(struct device *dev)
fw_devlink_parse_fwtree(fwnode);
- mutex_lock(&fwnode_link_lock);
+ guard(mutex)(&fwnode_link_lock);
+
__fw_devlink_link_to_consumers(dev);
__fw_devlink_link_to_suppliers(dev, fwnode);
- mutex_unlock(&fwnode_link_lock);
}
/* Device links support end. */
@@ -2591,7 +2584,7 @@ static const void *device_namespace(const struct kobject *kobj)
const struct device *dev = kobj_to_dev(kobj);
const void *ns = NULL;
- if (dev->class && dev->class->ns_type)
+ if (dev->class && dev->class->namespace)
ns = dev->class->namespace(dev);
return ns;
@@ -3170,7 +3163,7 @@ void device_initialize(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_initialize);
-struct kobject *virtual_device_parent(struct device *dev)
+struct kobject *virtual_device_parent(void)
{
static struct kobject *virtual_dir = NULL;
@@ -3248,7 +3241,7 @@ static struct kobject *get_device_parent(struct device *dev,
* in a "glue" directory to prevent namespace collisions.
*/
if (parent == NULL)
- parent_kobj = virtual_device_parent(dev);
+ parent_kobj = virtual_device_parent();
else if (parent->class && !dev->class->ns_type) {
subsys_put(sp);
return &parent->kobj;
@@ -4003,7 +3996,7 @@ int device_for_each_child(struct device *parent, void *data,
struct device *child;
int error = 0;
- if (!parent->p)
+ if (!parent || !parent->p)
return 0;
klist_iter_init(&parent->p->klist_children, &i);
@@ -4033,7 +4026,7 @@ int device_for_each_child_reverse(struct device *parent, void *data,
struct device *child;
int error = 0;
- if (!parent->p)
+ if (!parent || !parent->p)
return 0;
klist_iter_init(&parent->p->klist_children, &i);
@@ -4067,7 +4060,7 @@ struct device *device_find_child(struct device *parent, void *data,
struct klist_iter i;
struct device *child;
- if (!parent)
+ if (!parent || !parent->p)
return NULL;
klist_iter_init(&parent->p->klist_children, &i);
@@ -4515,9 +4508,11 @@ EXPORT_SYMBOL_GPL(device_destroy);
*/
int device_rename(struct device *dev, const char *new_name)
{
+ struct subsys_private *sp = NULL;
struct kobject *kobj = &dev->kobj;
char *old_device_name = NULL;
int error;
+ bool is_link_renamed = false;
dev = get_device(dev);
if (!dev)
@@ -4532,7 +4527,7 @@ int device_rename(struct device *dev, const char *new_name)
}
if (dev->class) {
- struct subsys_private *sp = class_to_subsys(dev->class);
+ sp = class_to_subsys(dev->class);
if (!sp) {
error = -EINVAL;
@@ -4541,16 +4536,19 @@ int device_rename(struct device *dev, const char *new_name)
error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name,
new_name, kobject_namespace(kobj));
- subsys_put(sp);
if (error)
goto out;
+
+ is_link_renamed = true;
}
error = kobject_rename(kobj, new_name);
- if (error)
- goto out;
-
out:
+ if (error && is_link_renamed)
+ sysfs_rename_link_ns(&sp->subsys.kobj, kobj, new_name,
+ old_device_name, kobject_namespace(kobj));
+ subsys_put(sp);
+
put_device(dev);
kfree(old_device_name);
@@ -4872,7 +4870,7 @@ set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
else
return;
- strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
+ strscpy(dev_info->subsystem, subsys);
/*
* Add device identifier DEVICE=:
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 964111361497..f0e4b4aba885 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -248,7 +248,7 @@ static int deferred_devs_show(struct seq_file *s, void *data)
list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe)
seq_printf(s, "%s\t%s", dev_name(curr->device),
- curr->device->p->deferred_probe_reason ?: "\n");
+ curr->deferred_probe_reason ?: "\n");
mutex_unlock(&deferred_probe_mutex);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index a2ce0ead06a6..2152eec0c135 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -1231,6 +1231,6 @@ void devm_free_percpu(struct device *dev, void __percpu *pdata)
* devm_free_pages() does.
*/
WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
- (__force void *)pdata));
+ (void *)(__force unsigned long)pdata));
}
EXPORT_SYMBOL_GPL(devm_free_percpu);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 88c6fd1f1992..b4eb5b89c4ee 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(driver_for_each_device);
*/
struct device *driver_find_device(const struct device_driver *drv,
struct device *start, const void *data,
- int (*match)(struct device *dev, const void *data))
+ device_match_t match)
{
struct klist_iter i;
struct device *dev;
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index a03ee4b11134..324a9a3c087a 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -849,6 +849,26 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name,
{}
#endif
+/*
+ * Reject firmware file names with ".." path components.
+ * There are drivers that construct firmware file names from device-supplied
+ * strings, and we don't want some device to be able to tell us "I would like to
+ * be sent my firmware from ../../../etc/shadow, please".
+ *
+ * Search for ".." surrounded by either '/' or start/end of string.
+ *
+ * This intentionally only looks at the firmware name, not at the firmware base
+ * directory or at symlink contents.
+ */
+static bool name_contains_dotdot(const char *name)
+{
+ size_t name_len = strlen(name);
+
+ return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 ||
+ strstr(name, "/../") != NULL ||
+ (name_len >= 3 && strcmp(name+name_len-3, "/..") == 0);
+}
+
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
@@ -869,6 +889,14 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
+ if (name_contains_dotdot(name)) {
+ dev_warn(device,
+ "Firmware load for '%s' refused, path contains '..' component\n",
+ name);
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = _request_firmware_prepare(&fw, name, device, buf, size,
offset, opt_flags);
if (ret <= 0) /* error or already assigned */
@@ -946,6 +974,8 @@ out:
* @name will be used as $FIRMWARE in the uevent environment and
* should be distinctive enough not to be confused with any other
* firmware image for this or any other device.
+ * It must not contain any ".." path components - "foo/bar..bin" is
+ * allowed, but "foo/../bar.bin" is not.
*
* Caller must hold the reference count of @device.
*
diff --git a/drivers/base/module.c b/drivers/base/module.c
index f742ad2a21da..c4eaa1158d54 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -66,27 +66,31 @@ int module_add_driver(struct module *mod, const struct device_driver *drv)
driver_name = make_driver_name(drv);
if (!driver_name) {
ret = -ENOMEM;
- goto out;
+ goto out_remove_kobj;
}
module_create_drivers_dir(mk);
if (!mk->drivers_dir) {
ret = -EINVAL;
- goto out;
+ goto out_free_driver_name;
}
ret = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name);
if (ret)
- goto out;
+ goto out_remove_drivers_dir;
kfree(driver_name);
return 0;
-out:
- sysfs_remove_link(&drv->p->kobj, "module");
+
+out_remove_drivers_dir:
sysfs_remove_link(mk->drivers_dir, driver_name);
+
+out_free_driver_name:
kfree(driver_name);
+out_remove_kobj:
+ sysfs_remove_link(&drv->p->kobj, "module");
return ret;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 4c3ee6521ba5..6f2a33722c52 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1474,7 +1474,7 @@ static const struct dev_pm_ops platform_dev_pm_ops = {
USE_PLATFORM_PM_SLEEP_OPS
};
-struct bus_type platform_bus_type = {
+const struct bus_type platform_bus_type = {
.name = "platform",
.dev_groups = platform_dev_groups,
.match = platform_match,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 449123eb54bf..0d74d75260ef 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -3399,10 +3399,12 @@ void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
{
unsigned long flags;
- if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
+ spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
+ if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) {
+ spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
return;
+ }
- spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
if (val == 0) {
drbd_uuid_move_history(device);
device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 11901f2812ad..223faa9d5ffd 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2259,14 +2259,12 @@ static const struct file_operations mtip_regs_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = mtip_hw_read_registers,
- .llseek = no_llseek,
};
static const struct file_operations mtip_flags_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = mtip_hw_read_flags,
- .llseek = no_llseek,
};
static void mtip_hw_debugfs_init(struct driver_data *dd)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 3edb37a41312..499c110465e3 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2835,7 +2835,6 @@ static const struct file_operations pkt_ctl_fops = {
.compat_ioctl = pkt_ctl_compat_ioctl,
#endif
.owner = THIS_MODULE,
- .llseek = no_llseek,
};
static struct miscdevice pkt_misc = {
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index bca06bfb4bc3..a6c8e5cc6051 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1983,7 +1983,6 @@ static const struct file_operations ublk_ch_fops = {
.owner = THIS_MODULE,
.open = ublk_ch_open,
.release = ublk_ch_release,
- .llseek = no_llseek,
.read_iter = ublk_ch_read_iter,
.write_iter = ublk_ch_write_iter,
.uring_cmd = ublk_ch_uring_cmd,
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index c3d245617083..ad9c9bc3ccfc 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -2115,8 +2115,10 @@ static void zram_destroy_comps(struct zram *zram)
zram->num_active_comps--;
}
- for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
- kfree(zram->comp_algs[prio]);
+ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
+ /* Do not free statically defined compression algorithms */
+ if (zram->comp_algs[prio] != default_compressor)
+ kfree(zram->comp_algs[prio]);
zram->comp_algs[prio] = NULL;
}
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 43e9ac5a3324..aa6af351d02d 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -679,7 +679,6 @@ static const struct file_operations vhci_fops = {
.poll = vhci_poll,
.open = vhci_open,
.release = vhci_release,
- .llseek = no_llseek,
};
static struct miscdevice vhci_miscdev = {
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index dd68b8191a0a..930d8a3ba722 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -309,7 +309,7 @@ static struct attribute *fsl_mc_bus_attrs[] = {
ATTRIBUTE_GROUPS(fsl_mc_bus);
-struct bus_type fsl_mc_bus_type = {
+const struct bus_type fsl_mc_bus_type = {
.name = "fsl-mc",
.match = fsl_mc_bus_match,
.uevent = fsl_mc_bus_uevent,
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index ce7d2e62c2f1..a9b1f8beee7b 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -1464,7 +1464,7 @@ static int mhi_match(struct device *dev, const struct device_driver *drv)
return 0;
};
-struct bus_type mhi_bus_type = {
+const struct bus_type mhi_bus_type = {
.name = "mhi",
.dev_name = "mhi",
.match = mhi_match,
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index aaad40a07f69..d057e877932e 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -9,7 +9,7 @@
#include "../common.h"
-extern struct bus_type mhi_bus_type;
+extern const struct bus_type mhi_bus_type;
/* Host request register */
#define MHI_SOC_RESET_REQ_OFFSET 0xb0
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 14a11880bcea..9938bb034c1c 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -26,6 +26,7 @@
/* PCI VID definitions */
#define PCI_VENDOR_ID_THALES 0x1269
#define PCI_VENDOR_ID_QUECTEL 0x1eac
+#define PCI_VENDOR_ID_NETPRISMA 0x203e
#define MHI_EDL_DB 91
#define MHI_EDL_COOKIE 0xEDEDEDED
@@ -433,8 +434,8 @@ static const struct mhi_controller_config modem_foxconn_sdx72_config = {
static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
.name = "foxconn-sdx55",
- .fw = "qcom/sdx55m/sbl1.mbn",
- .edl = "qcom/sdx55m/edl.mbn",
+ .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -444,8 +445,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
static const struct mhi_pci_dev_info mhi_foxconn_t99w175_info = {
.name = "foxconn-t99w175",
- .fw = "qcom/sdx55m/sbl1.mbn",
- .edl = "qcom/sdx55m/edl.mbn",
+ .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -455,8 +456,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w175_info = {
static const struct mhi_pci_dev_info mhi_foxconn_dw5930e_info = {
.name = "foxconn-dw5930e",
- .fw = "qcom/sdx55m/sbl1.mbn",
- .edl = "qcom/sdx55m/edl.mbn",
+ .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -466,6 +467,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_dw5930e_info = {
static const struct mhi_pci_dev_info mhi_foxconn_t99w368_info = {
.name = "foxconn-t99w368",
+ .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -475,6 +478,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w368_info = {
static const struct mhi_pci_dev_info mhi_foxconn_t99w373_info = {
.name = "foxconn-t99w373",
+ .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -484,6 +489,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w373_info = {
static const struct mhi_pci_dev_info mhi_foxconn_t99w510_info = {
.name = "foxconn-t99w510",
+ .edl = "qcom/sdx24m/foxconn/prog_firehose_sdx24.mbn",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -493,6 +500,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w510_info = {
static const struct mhi_pci_dev_info mhi_foxconn_dw5932e_info = {
.name = "foxconn-dw5932e",
+ .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -502,7 +511,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_dw5932e_info = {
static const struct mhi_pci_dev_info mhi_foxconn_t99w515_info = {
.name = "foxconn-t99w515",
- .edl = "fox/sdx72m/edl.mbn",
+ .edl = "qcom/sdx72m/foxconn/edl.mbn",
.edl_trigger = true,
.config = &modem_foxconn_sdx72_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
@@ -513,7 +522,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w515_info = {
static const struct mhi_pci_dev_info mhi_foxconn_dw5934e_info = {
.name = "foxconn-dw5934e",
- .edl = "fox/sdx72m/edl.mbn",
+ .edl = "qcom/sdx72m/foxconn/edl.mbn",
.edl_trigger = true,
.config = &modem_foxconn_sdx72_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
@@ -680,6 +689,35 @@ static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
.mru_default = 32768,
};
+static const struct mhi_pci_dev_info mhi_telit_fe990a_info = {
+ .name = "telit-fe990a",
+ .config = &modem_telit_fn990_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+};
+
+static const struct mhi_pci_dev_info mhi_netprisma_lcur57_info = {
+ .name = "netprisma-lcur57",
+ .edl = "qcom/prog_firehose_sdx24.mbn",
+ .config = &modem_quectel_em1xx_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = true,
+};
+
+static const struct mhi_pci_dev_info mhi_netprisma_fcun69_info = {
+ .name = "netprisma-fcun69",
+ .edl = "qcom/prog_firehose_sdx6x.elf",
+ .config = &modem_quectel_em1xx_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = true,
+};
+
/* Keep the list sorted based on the PID. New VID should be added as the last entry */
static const struct pci_device_id mhi_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
@@ -697,9 +735,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* Telit FN990 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
- /* Telit FE990 */
+ /* Telit FE990A */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
- .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
+ .driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
@@ -778,6 +816,12 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* T99W175 (sdx55), HP variant */
{ PCI_DEVICE(0x03f0, 0x0a6c),
.driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
+ /* NETPRISMA LCUR57 (SDX24) */
+ { PCI_DEVICE(PCI_VENDOR_ID_NETPRISMA, 0x1000),
+ .driver_data = (kernel_ulong_t) &mhi_netprisma_lcur57_info },
+ /* NETPRISMA FCUN69 (SDX6X) */
+ { PCI_DEVICE(PCI_VENDOR_ID_NETPRISMA, 0x1001),
+ .driver_data = (kernel_ulong_t) &mhi_netprisma_fcun69_info },
{ }
};
MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 8412406c4f1d..6276551d7968 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -484,7 +484,6 @@ static const struct file_operations input_fops = {
.owner = THIS_MODULE,
.open = moxtet_debug_open,
.read = input_read,
- .llseek = no_llseek,
};
static ssize_t output_read(struct file *file, char __user *buf, size_t len,
@@ -549,7 +548,6 @@ static const struct file_operations output_fops = {
.open = moxtet_debug_open,
.read = output_read,
.write = output_write,
- .llseek = no_llseek,
};
static int moxtet_register_debugfs(struct moxtet *moxtet)
diff --git a/drivers/cdx/controller/mcdi.c b/drivers/cdx/controller/mcdi.c
index 1eedc5eeb315..e760f8d347cc 100644
--- a/drivers/cdx/controller/mcdi.c
+++ b/drivers/cdx/controller/mcdi.c
@@ -27,10 +27,6 @@
#include "bitfield.h"
#include "mcdi.h"
-struct cdx_mcdi_copy_buffer {
- struct cdx_dword buffer[DIV_ROUND_UP(MCDI_CTL_SDU_LEN_MAX, 4)];
-};
-
static void cdx_mcdi_cancel_cmd(struct cdx_mcdi *cdx, struct cdx_mcdi_cmd *cmd);
static void cdx_mcdi_wait_for_cleanup(struct cdx_mcdi *cdx);
static int cdx_mcdi_rpc_async_internal(struct cdx_mcdi *cdx,
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 69314532f38c..9fed9706d9cd 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -111,7 +111,6 @@ static irqreturn_t ac_interrupt(int, void *);
static const struct file_operations ac_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = ac_read,
.write = ac_write,
.unlocked_ioctl = ac_ioctl,
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index a4f4291b4492..44a1cdbd4bfb 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -353,7 +353,6 @@ static const struct file_operations ds1620_fops = {
.open = ds1620_open,
.read = ds1620_read,
.unlocked_ioctl = ds1620_unlocked_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice ds1620_miscdev = {
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 5a1a73310e97..27f5f9d19531 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -107,7 +107,6 @@ static const struct file_operations dtlk_fops =
.unlocked_ioctl = dtlk_ioctl,
.open = dtlk_open,
.release = dtlk_release,
- .llseek = no_llseek,
};
/* local prototypes */
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index da32e8ed0830..e904e476e49a 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -700,7 +700,6 @@ hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations hpet_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = hpet_read,
.poll = hpet_poll,
.unlocked_ioctl = hpet_ioctl,
@@ -808,7 +807,7 @@ int hpet_alloc(struct hpet_data *hdp)
struct hpets *hpetp;
struct hpet __iomem *hpet;
static struct hpets *last;
- unsigned long period;
+ u32 period;
unsigned long long temp;
u32 remainder;
@@ -865,11 +864,11 @@ int hpet_alloc(struct hpet_data *hdp)
do_div(temp, period);
hpetp->hp_tick_freq = temp; /* ticks per second */
- printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s",
+ printk(KERN_INFO "hpet%u: at MMIO 0x%lx, IRQ%s",
hpetp->hp_which, hdp->hd_phys_address,
hpetp->hp_ntimer > 1 ? "s" : "");
for (i = 0; i < hpetp->hp_ntimer; i++)
- printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
+ printk(KERN_CONT "%s %u", i > 0 ? "," : "", hdp->hd_irq[i]);
printk(KERN_CONT "\n");
temp = hpetp->hp_tick_freq;
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 9a459257489f..335eea80054e 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -903,7 +903,6 @@ static const struct file_operations ipmi_wdog_fops = {
.open = ipmi_open,
.release = ipmi_close,
.fasync = ipmi_fasync,
- .llseek = no_llseek,
};
static struct miscdevice ipmi_wdog_miscdev = {
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
index c39a836ebd15..5f4696813cea 100644
--- a/drivers/char/pc8736x_gpio.c
+++ b/drivers/char/pc8736x_gpio.c
@@ -235,7 +235,6 @@ static const struct file_operations pc8736x_gpio_fileops = {
.open = pc8736x_gpio_open,
.write = nsc_gpio_write,
.read = nsc_gpio_read,
- .llseek = no_llseek,
};
static void __init pc8736x_init_shadow(void)
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index eaff98dbaa8c..d1dfbd8d4d42 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -786,7 +786,6 @@ static const struct class ppdev_class = {
static const struct file_operations pp_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = pp_read,
.write = pp_write,
.poll = pp_poll,
diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c
index 9f701dcba95c..700e6affea6f 100644
--- a/drivers/char/scx200_gpio.c
+++ b/drivers/char/scx200_gpio.c
@@ -68,7 +68,6 @@ static const struct file_operations scx200_gpio_fileops = {
.read = nsc_gpio_read,
.open = scx200_gpio_open,
.release = scx200_gpio_release,
- .llseek = no_llseek,
};
static struct cdev scx200_gpio_cdev; /* use 1 cdev for all pins */
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index bb5115b1736a..0f8185e541ed 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1054,7 +1054,6 @@ static const struct file_operations sonypi_misc_fops = {
.release = sonypi_misc_release,
.fasync = sonypi_misc_fasync,
.unlocked_ioctl = sonypi_misc_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice sonypi_misc_device = {
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
index e2c0baa69fef..97c94b5e9340 100644
--- a/drivers/char/tpm/tpm-dev.c
+++ b/drivers/char/tpm/tpm-dev.c
@@ -59,7 +59,6 @@ static int tpm_release(struct inode *inode, struct file *file)
const struct file_operations tpm_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.open = tpm_open,
.read = tpm_common_read,
.write = tpm_common_write,
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 11c502039faf..8fe4a01eea12 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -243,7 +243,6 @@ static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp)
static const struct file_operations vtpm_proxy_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = vtpm_proxy_fops_read,
.write = vtpm_proxy_fops_write,
.poll = vtpm_proxy_fops_poll,
diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c
index eef0fb06ea83..c25df7ea064e 100644
--- a/drivers/char/tpm/tpmrm-dev.c
+++ b/drivers/char/tpm/tpmrm-dev.c
@@ -46,7 +46,6 @@ static int tpmrm_release(struct inode *inode, struct file *file)
const struct file_operations tpmrm_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.open = tpmrm_open,
.read = tpm_common_read,
.write = tpm_common_write,
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index de7d720d99fa..99a7f2441e70 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1093,7 +1093,6 @@ static const struct file_operations port_fops = {
.poll = port_fops_poll,
.release = port_fops_release,
.fasync = port_fops_fasync,
- .llseek = no_llseek,
};
/*
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 260961668e48..299bc678ed1b 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -218,6 +218,14 @@ config COMMON_CLK_EN7523
This driver provides the fixed clocks and gates present on Airoha
ARM silicon.
+config COMMON_CLK_EP93XX
+ tristate "Clock driver for Cirrus Logic ep93xx SoC"
+ depends on ARCH_EP93XX || COMPILE_TEST
+ select AUXILIARY_BUS
+ select REGMAP_MMIO
+ help
+ This driver supports the SoC clocks on the Cirrus Logic ep93xx.
+
config COMMON_CLK_FSL_FLEXSPI
tristate "Clock driver for FlexSPI on Layerscape SoCs"
depends on ARCH_LAYERSCAPE || COMPILE_TEST
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 9b783c3e5d2f..fb8878a5d7d9 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_COMMON_CLK_CDCE706) += clk-cdce706.o
obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
+obj-$(CONFIG_COMMON_CLK_EP93XX) += clk-ep93xx.o
obj-$(CONFIG_ARCH_SPARX5) += clk-sparx5.o
obj-$(CONFIG_COMMON_CLK_EN7523) += clk-en7523.o
obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o
diff --git a/drivers/clk/clk-ep93xx.c b/drivers/clk/clk-ep93xx.c
new file mode 100644
index 000000000000..f888aed79b11
--- /dev/null
+++ b/drivers/clk/clk-ep93xx.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Clock control for Cirrus EP93xx chips.
+ * Copyright (C) 2021 Nikita Shubin <nikita.shubin@maquefel.me>
+ *
+ * Based on a rewrite of arch/arm/mach-ep93xx/clock.c:
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ */
+#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
+
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk-provider.h>
+#include <linux/math.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#include <linux/soc/cirrus/ep93xx.h>
+#include <dt-bindings/clock/cirrus,ep9301-syscon.h>
+
+#include <asm/div64.h>
+
+#define EP93XX_EXT_CLK_RATE 14745600
+#define EP93XX_EXT_RTC_RATE 32768
+
+#define EP93XX_SYSCON_POWER_STATE 0x00
+#define EP93XX_SYSCON_PWRCNT 0x04
+#define EP93XX_SYSCON_PWRCNT_UARTBAUD BIT(29)
+#define EP93XX_SYSCON_PWRCNT_USH_EN 28
+#define EP93XX_SYSCON_PWRCNT_DMA_M2M1 27
+#define EP93XX_SYSCON_PWRCNT_DMA_M2M0 26
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P8 25
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P9 24
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P6 23
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P7 22
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P4 21
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P5 20
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P2 19
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P3 18
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P0 17
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P1 16
+#define EP93XX_SYSCON_CLKSET1 0x20
+#define EP93XX_SYSCON_CLKSET1_NBYP1 BIT(23)
+#define EP93XX_SYSCON_CLKSET2 0x24
+#define EP93XX_SYSCON_CLKSET2_NBYP2 BIT(19)
+#define EP93XX_SYSCON_CLKSET2_PLL2_EN BIT(18)
+#define EP93XX_SYSCON_DEVCFG 0x80
+#define EP93XX_SYSCON_DEVCFG_U3EN 24
+#define EP93XX_SYSCON_DEVCFG_U2EN 20
+#define EP93XX_SYSCON_DEVCFG_U1EN 18
+#define EP93XX_SYSCON_VIDCLKDIV 0x84
+#define EP93XX_SYSCON_CLKDIV_ENABLE 15
+#define EP93XX_SYSCON_CLKDIV_ESEL BIT(14)
+#define EP93XX_SYSCON_CLKDIV_PSEL BIT(13)
+#define EP93XX_SYSCON_CLKDIV_MASK GENMASK(14, 13)
+#define EP93XX_SYSCON_CLKDIV_PDIV_SHIFT 8
+#define EP93XX_SYSCON_I2SCLKDIV 0x8c
+#define EP93XX_SYSCON_I2SCLKDIV_SENA 31
+#define EP93XX_SYSCON_I2SCLKDIV_ORIDE BIT(29)
+#define EP93XX_SYSCON_I2SCLKDIV_SPOL BIT(19)
+#define EP93XX_SYSCON_KEYTCHCLKDIV 0x90
+#define EP93XX_SYSCON_KEYTCHCLKDIV_TSEN 31
+#define EP93XX_SYSCON_KEYTCHCLKDIV_ADIV 16
+#define EP93XX_SYSCON_KEYTCHCLKDIV_KEN 15
+#define EP93XX_SYSCON_KEYTCHCLKDIV_KDIV 0
+#define EP93XX_SYSCON_CHIPID 0x94
+#define EP93XX_SYSCON_CHIPID_ID 0x9213
+
+#define EP93XX_FIXED_CLK_COUNT 21
+
+static const char ep93xx_adc_divisors[] = { 16, 4 };
+static const char ep93xx_sclk_divisors[] = { 2, 4 };
+static const char ep93xx_lrclk_divisors[] = { 32, 64, 128 };
+
+struct ep93xx_clk {
+ struct clk_hw hw;
+ u16 idx;
+ u16 reg;
+ u32 mask;
+ u8 bit_idx;
+ u8 shift;
+ u8 width;
+ u8 num_div;
+ const char *div;
+};
+
+struct ep93xx_clk_priv {
+ spinlock_t lock;
+ struct ep93xx_regmap_adev *aux_dev;
+ struct device *dev;
+ void __iomem *base;
+ struct regmap *map;
+ struct clk_hw *fixed[EP93XX_FIXED_CLK_COUNT];
+ struct ep93xx_clk reg[];
+};
+
+static struct ep93xx_clk *ep93xx_clk_from(struct clk_hw *hw)
+{
+ return container_of(hw, struct ep93xx_clk, hw);
+}
+
+static struct ep93xx_clk_priv *ep93xx_priv_from(struct ep93xx_clk *clk)
+{
+ return container_of(clk, struct ep93xx_clk_priv, reg[clk->idx]);
+}
+
+static void ep93xx_clk_write(struct ep93xx_clk_priv *priv, unsigned int reg, unsigned int val)
+{
+ struct ep93xx_regmap_adev *aux = priv->aux_dev;
+
+ aux->write(aux->map, aux->lock, reg, val);
+}
+
+static int ep93xx_clk_is_enabled(struct clk_hw *hw)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+
+ regmap_read(priv->map, clk->reg, &val);
+
+ return !!(val & BIT(clk->bit_idx));
+}
+
+static int ep93xx_clk_enable(struct clk_hw *hw)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+
+ guard(spinlock_irqsave)(&priv->lock);
+
+ regmap_read(priv->map, clk->reg, &val);
+ val |= BIT(clk->bit_idx);
+
+ ep93xx_clk_write(priv, clk->reg, val);
+
+ return 0;
+}
+
+static void ep93xx_clk_disable(struct clk_hw *hw)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+
+ guard(spinlock_irqsave)(&priv->lock);
+
+ regmap_read(priv->map, clk->reg, &val);
+ val &= ~BIT(clk->bit_idx);
+
+ ep93xx_clk_write(priv, clk->reg, val);
+}
+
+static const struct clk_ops clk_ep93xx_gate_ops = {
+ .enable = ep93xx_clk_enable,
+ .disable = ep93xx_clk_disable,
+ .is_enabled = ep93xx_clk_is_enabled,
+};
+
+static int ep93xx_clk_register_gate(struct ep93xx_clk *clk,
+ const char *name,
+ struct clk_parent_data *parent_data,
+ unsigned long flags,
+ unsigned int reg,
+ u8 bit_idx)
+{
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ struct clk_init_data init = { };
+
+ init.name = name;
+ init.ops = &clk_ep93xx_gate_ops;
+ init.flags = flags;
+ init.parent_data = parent_data;
+ init.num_parents = 1;
+
+ clk->reg = reg;
+ clk->bit_idx = bit_idx;
+ clk->hw.init = &init;
+
+ return devm_clk_hw_register(priv->dev, &clk->hw);
+}
+
+static u8 ep93xx_mux_get_parent(struct clk_hw *hw)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+
+ regmap_read(priv->map, clk->reg, &val);
+
+ val &= EP93XX_SYSCON_CLKDIV_MASK;
+
+ switch (val) {
+ case EP93XX_SYSCON_CLKDIV_ESEL:
+ return 1; /* PLL1 */
+ case EP93XX_SYSCON_CLKDIV_MASK:
+ return 2; /* PLL2 */
+ default:
+ return 0; /* XTALI */
+ };
+}
+
+static int ep93xx_mux_set_parent_lock(struct clk_hw *hw, u8 index)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+
+ if (index >= 3)
+ return -EINVAL;
+
+ guard(spinlock_irqsave)(&priv->lock);
+
+ regmap_read(priv->map, clk->reg, &val);
+ val &= ~(EP93XX_SYSCON_CLKDIV_MASK);
+ val |= index > 0 ? EP93XX_SYSCON_CLKDIV_ESEL : 0;
+ val |= index > 1 ? EP93XX_SYSCON_CLKDIV_PSEL : 0;
+
+ ep93xx_clk_write(priv, clk->reg, val);
+
+ return 0;
+}
+
+static bool is_best(unsigned long rate, unsigned long now,
+ unsigned long best)
+{
+ return abs_diff(rate, now) < abs_diff(rate, best);
+}
+
+static int ep93xx_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long best_rate = 0, actual_rate, mclk_rate;
+ unsigned long rate = req->rate;
+ struct clk_hw *parent_best = NULL;
+ unsigned long parent_rate_best;
+ unsigned long parent_rate;
+ int div, pdiv;
+ unsigned int i;
+
+ /*
+ * Try the two pll's and the external clock,
+ * because the valid predividers are 2, 2.5 and 3, we multiply
+ * all the clocks by 2 to avoid floating point math.
+ *
+ * This is based on the algorithm in the ep93xx raster guide:
+ * http://be-a-maverick.com/en/pubs/appNote/AN269REV1.pdf
+ *
+ */
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+
+ parent_rate = clk_hw_get_rate(parent);
+ mclk_rate = parent_rate * 2;
+
+ /* Try each predivider value */
+ for (pdiv = 4; pdiv <= 6; pdiv++) {
+ div = DIV_ROUND_CLOSEST(mclk_rate, rate * pdiv);
+ if (!in_range(div, 1, 127))
+ continue;
+
+ actual_rate = DIV_ROUND_CLOSEST(mclk_rate, pdiv * div);
+ if (is_best(rate, actual_rate, best_rate)) {
+ best_rate = actual_rate;
+ parent_rate_best = parent_rate;
+ parent_best = parent;
+ }
+ }
+ }
+
+ if (!parent_best)
+ return -EINVAL;
+
+ req->best_parent_rate = parent_rate_best;
+ req->best_parent_hw = parent_best;
+ req->rate = best_rate;
+
+ return 0;
+}
+
+static unsigned long ep93xx_ddiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ unsigned int pdiv, div;
+ u32 val;
+
+ regmap_read(priv->map, clk->reg, &val);
+ pdiv = (val >> EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) & GENMASK(1, 0);
+ div = val & GENMASK(6, 0);
+ if (!div)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(parent_rate * 2, (pdiv + 3) * div);
+}
+
+static int ep93xx_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ int pdiv, div, npdiv, ndiv;
+ unsigned long actual_rate, mclk_rate, rate_err = ULONG_MAX;
+ u32 val;
+
+ regmap_read(priv->map, clk->reg, &val);
+ mclk_rate = parent_rate * 2;
+
+ for (pdiv = 4; pdiv <= 6; pdiv++) {
+ div = DIV_ROUND_CLOSEST(mclk_rate, rate * pdiv);
+ if (!in_range(div, 1, 127))
+ continue;
+
+ actual_rate = DIV_ROUND_CLOSEST(mclk_rate, pdiv * div);
+ if (abs(actual_rate - rate) < rate_err) {
+ npdiv = pdiv - 3;
+ ndiv = div;
+ rate_err = abs(actual_rate - rate);
+ }
+ }
+
+ if (rate_err == ULONG_MAX)
+ return -EINVAL;
+
+ /*
+ * Clear old dividers.
+ * Bit 7 is reserved bit in all ClkDiv registers.
+ */
+ val &= ~(GENMASK(9, 0) & ~BIT(7));
+
+ /* Set the new pdiv and div bits for the new clock rate */
+ val |= (npdiv << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | ndiv;
+
+ ep93xx_clk_write(priv, clk->reg, val);
+
+ return 0;
+}
+
+static const struct clk_ops clk_ddiv_ops = {
+ .enable = ep93xx_clk_enable,
+ .disable = ep93xx_clk_disable,
+ .is_enabled = ep93xx_clk_is_enabled,
+ .get_parent = ep93xx_mux_get_parent,
+ .set_parent = ep93xx_mux_set_parent_lock,
+ .determine_rate = ep93xx_mux_determine_rate,
+ .recalc_rate = ep93xx_ddiv_recalc_rate,
+ .set_rate = ep93xx_ddiv_set_rate,
+};
+
+static int ep93xx_clk_register_ddiv(struct ep93xx_clk *clk,
+ const char *name,
+ struct clk_parent_data *parent_data,
+ u8 num_parents,
+ unsigned int reg,
+ u8 bit_idx)
+{
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ struct clk_init_data init = { };
+
+ init.name = name;
+ init.ops = &clk_ddiv_ops;
+ init.flags = 0;
+ init.parent_data = parent_data;
+ init.num_parents = num_parents;
+
+ clk->reg = reg;
+ clk->bit_idx = bit_idx;
+ clk->hw.init = &init;
+
+ return devm_clk_hw_register(priv->dev, &clk->hw);
+}
+
+static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+ u8 index;
+
+ regmap_read(priv->map, clk->reg, &val);
+ index = (val & clk->mask) >> clk->shift;
+ if (index >= clk->num_div)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(parent_rate, clk->div[index]);
+}
+
+static long ep93xx_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ unsigned long best = 0, now;
+ unsigned int i;
+
+ for (i = 0; i < clk->num_div; i++) {
+ if ((rate * clk->div[i]) == *parent_rate)
+ return rate;
+
+ now = DIV_ROUND_CLOSEST(*parent_rate, clk->div[i]);
+ if (!best || is_best(rate, now, best))
+ best = now;
+ }
+
+ return best;
+}
+
+static int ep93xx_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ unsigned int i;
+ u32 val;
+
+ regmap_read(priv->map, clk->reg, &val);
+ val &= ~clk->mask;
+ for (i = 0; i < clk->num_div; i++)
+ if (rate == DIV_ROUND_CLOSEST(parent_rate, clk->div[i]))
+ break;
+
+ if (i == clk->num_div)
+ return -EINVAL;
+
+ val |= i << clk->shift;
+
+ ep93xx_clk_write(priv, clk->reg, val);
+
+ return 0;
+}
+
+static const struct clk_ops ep93xx_div_ops = {
+ .enable = ep93xx_clk_enable,
+ .disable = ep93xx_clk_disable,
+ .is_enabled = ep93xx_clk_is_enabled,
+ .recalc_rate = ep93xx_div_recalc_rate,
+ .round_rate = ep93xx_div_round_rate,
+ .set_rate = ep93xx_div_set_rate,
+};
+
+static int ep93xx_register_div(struct ep93xx_clk *clk,
+ const char *name,
+ const struct clk_parent_data *parent_data,
+ unsigned int reg,
+ u8 enable_bit,
+ u8 shift,
+ u8 width,
+ const char *clk_divisors,
+ u8 num_div)
+{
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ struct clk_init_data init = { };
+
+ init.name = name;
+ init.ops = &ep93xx_div_ops;
+ init.flags = 0;
+ init.parent_data = parent_data;
+ init.num_parents = 1;
+
+ clk->reg = reg;
+ clk->bit_idx = enable_bit;
+ clk->mask = GENMASK(shift + width - 1, shift);
+ clk->shift = shift;
+ clk->div = clk_divisors;
+ clk->num_div = num_div;
+ clk->hw.init = &init;
+
+ return devm_clk_hw_register(priv->dev, &clk->hw);
+}
+
+struct ep93xx_gate {
+ unsigned int idx;
+ unsigned int bit;
+ const char *name;
+};
+
+static const struct ep93xx_gate ep93xx_uarts[] = {
+ { EP93XX_CLK_UART1, EP93XX_SYSCON_DEVCFG_U1EN, "uart1" },
+ { EP93XX_CLK_UART2, EP93XX_SYSCON_DEVCFG_U2EN, "uart2" },
+ { EP93XX_CLK_UART3, EP93XX_SYSCON_DEVCFG_U3EN, "uart3" },
+};
+
+static int ep93xx_uart_clock_init(struct ep93xx_clk_priv *priv)
+{
+ struct clk_parent_data parent_data = { };
+ unsigned int i, idx, ret, clk_uart_div;
+ struct ep93xx_clk *clk;
+ u32 val;
+
+ regmap_read(priv->map, EP93XX_SYSCON_PWRCNT, &val);
+ if (val & EP93XX_SYSCON_PWRCNT_UARTBAUD)
+ clk_uart_div = 1;
+ else
+ clk_uart_div = 2;
+
+ priv->fixed[EP93XX_CLK_UART] =
+ devm_clk_hw_register_fixed_factor_index(priv->dev, "uart",
+ 0, /* XTALI external clock */
+ 0, 1, clk_uart_div);
+ parent_data.hw = priv->fixed[EP93XX_CLK_UART];
+
+ /* parenting uart gate clocks to uart clock */
+ for (i = 0; i < ARRAY_SIZE(ep93xx_uarts); i++) {
+ idx = ep93xx_uarts[i].idx - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ ret = ep93xx_clk_register_gate(clk,
+ ep93xx_uarts[i].name,
+ &parent_data, CLK_SET_RATE_PARENT,
+ EP93XX_SYSCON_DEVCFG,
+ ep93xx_uarts[i].bit);
+ if (ret)
+ return dev_err_probe(priv->dev, ret,
+ "failed to register uart[%d] clock\n", i);
+ }
+
+ return 0;
+}
+
+static const struct ep93xx_gate ep93xx_dmas[] = {
+ { EP93XX_CLK_M2M0, EP93XX_SYSCON_PWRCNT_DMA_M2M0, "m2m0" },
+ { EP93XX_CLK_M2M1, EP93XX_SYSCON_PWRCNT_DMA_M2M1, "m2m1" },
+ { EP93XX_CLK_M2P0, EP93XX_SYSCON_PWRCNT_DMA_M2P0, "m2p0" },
+ { EP93XX_CLK_M2P1, EP93XX_SYSCON_PWRCNT_DMA_M2P1, "m2p1" },
+ { EP93XX_CLK_M2P2, EP93XX_SYSCON_PWRCNT_DMA_M2P2, "m2p2" },
+ { EP93XX_CLK_M2P3, EP93XX_SYSCON_PWRCNT_DMA_M2P3, "m2p3" },
+ { EP93XX_CLK_M2P4, EP93XX_SYSCON_PWRCNT_DMA_M2P4, "m2p4" },
+ { EP93XX_CLK_M2P5, EP93XX_SYSCON_PWRCNT_DMA_M2P5, "m2p5" },
+ { EP93XX_CLK_M2P6, EP93XX_SYSCON_PWRCNT_DMA_M2P6, "m2p6" },
+ { EP93XX_CLK_M2P7, EP93XX_SYSCON_PWRCNT_DMA_M2P7, "m2p7" },
+ { EP93XX_CLK_M2P8, EP93XX_SYSCON_PWRCNT_DMA_M2P8, "m2p8" },
+ { EP93XX_CLK_M2P9, EP93XX_SYSCON_PWRCNT_DMA_M2P9, "m2p9" },
+};
+
+static int ep93xx_dma_clock_init(struct ep93xx_clk_priv *priv)
+{
+ struct clk_parent_data parent_data = { };
+ unsigned int i, idx;
+
+ parent_data.hw = priv->fixed[EP93XX_CLK_HCLK];
+ for (i = 0; i < ARRAY_SIZE(ep93xx_dmas); i++) {
+ idx = ep93xx_dmas[i].idx;
+ priv->fixed[idx] = devm_clk_hw_register_gate_parent_data(priv->dev,
+ ep93xx_dmas[i].name,
+ &parent_data, 0,
+ priv->base + EP93XX_SYSCON_PWRCNT,
+ ep93xx_dmas[i].bit,
+ 0,
+ &priv->lock);
+ if (IS_ERR(priv->fixed[idx]))
+ return PTR_ERR(priv->fixed[idx]);
+ }
+
+ return 0;
+}
+
+static struct clk_hw *of_clk_ep93xx_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct ep93xx_clk_priv *priv = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx < EP93XX_CLK_UART1)
+ return priv->fixed[idx];
+
+ if (idx <= EP93XX_CLK_I2S_LRCLK)
+ return &priv->reg[idx - EP93XX_CLK_UART1].hw;
+
+ return ERR_PTR(-EINVAL);
+}
+
+/*
+ * PLL rate = 14.7456 MHz * (X1FBD + 1) * (X2FBD + 1) / (X2IPD + 1) / 2^PS
+ */
+static unsigned long calc_pll_rate(u64 rate, u32 config_word)
+{
+ rate *= ((config_word >> 11) & GENMASK(4, 0)) + 1; /* X1FBD */
+ rate *= ((config_word >> 5) & GENMASK(5, 0)) + 1; /* X2FBD */
+ do_div(rate, (config_word & GENMASK(4, 0)) + 1); /* X2IPD */
+ rate >>= (config_word >> 16) & GENMASK(1, 0); /* PS */
+
+ return rate;
+}
+
+static int ep93xx_plls_init(struct ep93xx_clk_priv *priv)
+{
+ const char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
+ const char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
+ const char pclk_divisors[] = { 1, 2, 4, 8 };
+ struct clk_parent_data xtali = { .index = 0 };
+ unsigned int clk_f_div, clk_h_div, clk_p_div;
+ unsigned long clk_pll1_rate, clk_pll2_rate;
+ struct device *dev = priv->dev;
+ struct clk_hw *hw, *pll1;
+ u32 value;
+
+ /* Determine the bootloader configured pll1 rate */
+ regmap_read(priv->map, EP93XX_SYSCON_CLKSET1, &value);
+
+ if (value & EP93XX_SYSCON_CLKSET1_NBYP1)
+ clk_pll1_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value);
+ else
+ clk_pll1_rate = EP93XX_EXT_CLK_RATE;
+
+ pll1 = devm_clk_hw_register_fixed_rate_parent_data(dev, "pll1", &xtali,
+ 0, clk_pll1_rate);
+ if (IS_ERR(pll1))
+ return PTR_ERR(pll1);
+
+ priv->fixed[EP93XX_CLK_PLL1] = pll1;
+
+ /* Initialize the pll1 derived clocks */
+ clk_f_div = fclk_divisors[(value >> 25) & GENMASK(2, 0)];
+ clk_h_div = hclk_divisors[(value >> 20) & GENMASK(2, 0)];
+ clk_p_div = pclk_divisors[(value >> 18) & GENMASK(1, 0)];
+
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, "fclk", pll1, 0, 1, clk_f_div);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_FCLK] = hw;
+
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, "hclk", pll1, 0, 1, clk_h_div);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_HCLK] = hw;
+
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, "pclk", hw, 0, 1, clk_p_div);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_PCLK] = hw;
+
+ /* Determine the bootloader configured pll2 rate */
+ regmap_read(priv->map, EP93XX_SYSCON_CLKSET2, &value);
+ if (!(value & EP93XX_SYSCON_CLKSET2_NBYP2))
+ clk_pll2_rate = EP93XX_EXT_CLK_RATE;
+ else if (value & EP93XX_SYSCON_CLKSET2_PLL2_EN)
+ clk_pll2_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value);
+ else
+ clk_pll2_rate = 0;
+
+ hw = devm_clk_hw_register_fixed_rate_parent_data(dev, "pll2", &xtali,
+ 0, clk_pll2_rate);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_PLL2] = hw;
+
+ return 0;
+}
+
+static int ep93xx_clk_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ep93xx_regmap_adev *rdev = to_ep93xx_regmap_adev(adev);
+ struct clk_parent_data xtali = { .index = 0 };
+ struct clk_parent_data ddiv_pdata[3] = { };
+ unsigned int clk_spi_div, clk_usb_div;
+ struct clk_parent_data pdata = {};
+ struct device *dev = &adev->dev;
+ struct ep93xx_clk_priv *priv;
+ struct ep93xx_clk *clk;
+ struct clk_hw *hw;
+ unsigned int idx;
+ int ret;
+ u32 value;
+
+ priv = devm_kzalloc(dev, struct_size(priv, reg, 10), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+ priv->dev = dev;
+ priv->aux_dev = rdev;
+ priv->map = rdev->map;
+ priv->base = rdev->base;
+
+ ret = ep93xx_plls_init(priv);
+ if (ret)
+ return ret;
+
+ regmap_read(priv->map, EP93XX_SYSCON_CLKSET2, &value);
+ clk_usb_div = (value >> 28 & GENMASK(3, 0)) + 1;
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, "usb_clk",
+ priv->fixed[EP93XX_CLK_PLL2], 0, 1,
+ clk_usb_div);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_USB] = hw;
+
+ ret = ep93xx_uart_clock_init(priv);
+ if (ret)
+ return ret;
+
+ ret = ep93xx_dma_clock_init(priv);
+ if (ret)
+ return ret;
+
+ clk_spi_div = id->driver_data;
+ hw = devm_clk_hw_register_fixed_factor_index(dev, "ep93xx-spi.0",
+ 0, /* XTALI external clock */
+ 0, 1, clk_spi_div);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_SPI] = hw;
+
+ /* PWM clock */
+ hw = devm_clk_hw_register_fixed_factor_index(dev, "pwm_clk", 0, /* XTALI external clock */
+ 0, 1, 1);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_PWM] = hw;
+
+ /* USB clock */
+ pdata.hw = priv->fixed[EP93XX_CLK_USB];
+ hw = devm_clk_hw_register_gate_parent_data(priv->dev, "ohci-platform", &pdata,
+ 0, priv->base + EP93XX_SYSCON_PWRCNT,
+ EP93XX_SYSCON_PWRCNT_USH_EN, 0,
+ &priv->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_USB] = hw;
+
+ ddiv_pdata[0].index = 0; /* XTALI external clock */
+ ddiv_pdata[1].hw = priv->fixed[EP93XX_CLK_PLL1];
+ ddiv_pdata[2].hw = priv->fixed[EP93XX_CLK_PLL2];
+
+ /* touchscreen/ADC clock */
+ idx = EP93XX_CLK_ADC - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ ret = ep93xx_register_div(clk, "ep93xx-adc", &xtali,
+ EP93XX_SYSCON_KEYTCHCLKDIV,
+ EP93XX_SYSCON_KEYTCHCLKDIV_TSEN,
+ EP93XX_SYSCON_KEYTCHCLKDIV_ADIV,
+ 1,
+ ep93xx_adc_divisors,
+ ARRAY_SIZE(ep93xx_adc_divisors));
+
+
+ /* keypad clock */
+ idx = EP93XX_CLK_KEYPAD - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ ret = ep93xx_register_div(clk, "ep93xx-keypad", &xtali,
+ EP93XX_SYSCON_KEYTCHCLKDIV,
+ EP93XX_SYSCON_KEYTCHCLKDIV_KEN,
+ EP93XX_SYSCON_KEYTCHCLKDIV_KDIV,
+ 1,
+ ep93xx_adc_divisors,
+ ARRAY_SIZE(ep93xx_adc_divisors));
+
+ /*
+ * On reset PDIV and VDIV is set to zero, while PDIV zero
+ * means clock disable, VDIV shouldn't be zero.
+ * So we set both video and i2s dividers to minimum.
+ * ENA - Enable CLK divider.
+ * PDIV - 00 - Disable clock
+ * VDIV - at least 2
+ */
+
+ /* Check and enable video clk registers */
+ regmap_read(priv->map, EP93XX_SYSCON_VIDCLKDIV, &value);
+ value |= BIT(EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2;
+ ep93xx_clk_write(priv, EP93XX_SYSCON_VIDCLKDIV, value);
+
+ /* Check and enable i2s clk registers */
+ regmap_read(priv->map, EP93XX_SYSCON_I2SCLKDIV, &value);
+ value |= BIT(EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2;
+
+ /*
+ * Override the SAI_MSTR_CLK_CFG from the I2S block and use the
+ * I2SClkDiv Register settings. LRCLK transitions on the falling SCLK
+ * edge.
+ */
+ value |= EP93XX_SYSCON_I2SCLKDIV_ORIDE | EP93XX_SYSCON_I2SCLKDIV_SPOL;
+ ep93xx_clk_write(priv, EP93XX_SYSCON_I2SCLKDIV, value);
+
+ /* video clk */
+ idx = EP93XX_CLK_VIDEO - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ ret = ep93xx_clk_register_ddiv(clk, "ep93xx-fb",
+ ddiv_pdata, ARRAY_SIZE(ddiv_pdata),
+ EP93XX_SYSCON_VIDCLKDIV,
+ EP93XX_SYSCON_CLKDIV_ENABLE);
+
+ /* i2s clk */
+ idx = EP93XX_CLK_I2S_MCLK - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ ret = ep93xx_clk_register_ddiv(clk, "mclk",
+ ddiv_pdata, ARRAY_SIZE(ddiv_pdata),
+ EP93XX_SYSCON_I2SCLKDIV,
+ EP93XX_SYSCON_CLKDIV_ENABLE);
+
+ /* i2s sclk */
+ idx = EP93XX_CLK_I2S_SCLK - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ pdata.hw = &priv->reg[EP93XX_CLK_I2S_MCLK - EP93XX_CLK_UART1].hw;
+ ret = ep93xx_register_div(clk, "sclk", &pdata,
+ EP93XX_SYSCON_I2SCLKDIV,
+ EP93XX_SYSCON_I2SCLKDIV_SENA,
+ 16, /* EP93XX_I2SCLKDIV_SDIV_SHIFT */
+ 1, /* EP93XX_I2SCLKDIV_SDIV_WIDTH */
+ ep93xx_sclk_divisors,
+ ARRAY_SIZE(ep93xx_sclk_divisors));
+
+ /* i2s lrclk */
+ idx = EP93XX_CLK_I2S_LRCLK - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ pdata.hw = &priv->reg[EP93XX_CLK_I2S_SCLK - EP93XX_CLK_UART1].hw;
+ ret = ep93xx_register_div(clk, "lrclk", &pdata,
+ EP93XX_SYSCON_I2SCLKDIV,
+ EP93XX_SYSCON_I2SCLKDIV_SENA,
+ 17, /* EP93XX_I2SCLKDIV_LRDIV32_SHIFT */
+ 2, /* EP93XX_I2SCLKDIV_LRDIV32_WIDTH */
+ ep93xx_lrclk_divisors,
+ ARRAY_SIZE(ep93xx_lrclk_divisors));
+
+ /* IrDa clk uses same pattern but no init code presents in original clock driver */
+ return devm_of_clk_add_hw_provider(priv->dev, of_clk_ep93xx_get, priv);
+}
+
+static const struct auxiliary_device_id ep93xx_clk_ids[] = {
+ { .name = "soc_ep93xx.clk-ep93xx", .driver_data = 2, },
+ { .name = "soc_ep93xx.clk-ep93xx.e2", .driver_data = 1, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(auxiliary, ep93xx_clk_ids);
+
+static struct auxiliary_driver ep93xx_clk_driver = {
+ .probe = ep93xx_clk_probe,
+ .id_table = ep93xx_clk_ids,
+};
+module_auxiliary_driver(ep93xx_clk_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nikita Shubin <nikita.shubin@maquefel.me>");
+MODULE_DESCRIPTION("Clock control for Cirrus EP93xx chips");
diff --git a/drivers/comedi/drivers/ni_atmio.c b/drivers/comedi/drivers/ni_atmio.c
index 8876a1d24c56..330ae1c58800 100644
--- a/drivers/comedi/drivers/ni_atmio.c
+++ b/drivers/comedi/drivers/ni_atmio.c
@@ -79,6 +79,15 @@
#include "ni_stc.h"
+static const struct comedi_lrange range_ni_E_ao_ext = {
+ 4, {
+ BIP_RANGE(10),
+ UNI_RANGE(10),
+ RANGE_ext(-1, 1),
+ RANGE_ext(0, 1)
+ }
+};
+
/* AT specific setup */
static const struct ni_board_struct ni_boards[] = {
{
diff --git a/drivers/comedi/drivers/ni_mio_common.c b/drivers/comedi/drivers/ni_mio_common.c
index 980f309d6de7..3acb449d293c 100644
--- a/drivers/comedi/drivers/ni_mio_common.c
+++ b/drivers/comedi/drivers/ni_mio_common.c
@@ -166,15 +166,6 @@ static const struct comedi_lrange range_ni_M_ai_628x = {
}
};
-static const struct comedi_lrange range_ni_E_ao_ext = {
- 4, {
- BIP_RANGE(10),
- UNI_RANGE(10),
- RANGE_ext(-1, 1),
- RANGE_ext(0, 1)
- }
-};
-
static const struct comedi_lrange *const ni_range_lkup[] = {
[ai_gain_16] = &range_ni_E_ai,
[ai_gain_8] = &range_ni_E_ai_limited,
diff --git a/drivers/comedi/drivers/ni_pcimio.c b/drivers/comedi/drivers/ni_pcimio.c
index 0b055321023d..f63c390314e1 100644
--- a/drivers/comedi/drivers/ni_pcimio.c
+++ b/drivers/comedi/drivers/ni_pcimio.c
@@ -102,6 +102,15 @@
#define PCIDMA
+static const struct comedi_lrange range_ni_E_ao_ext = {
+ 4, {
+ BIP_RANGE(10),
+ UNI_RANGE(10),
+ RANGE_ext(-1, 1),
+ RANGE_ext(0, 1)
+ }
+};
+
/*
* These are not all the possible ao ranges for 628x boards.
* They can do OFFSET +- REFERENCE where OFFSET can be
diff --git a/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c b/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c
index d55521b5bdcb..892a66b2cea6 100644
--- a/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c
+++ b/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c
@@ -140,6 +140,11 @@ int main(void)
{
FILE *fp = fopen("ni_values.py", "w");
+ if (fp == NULL) {
+ fprintf(stderr, "Could not open file!");
+ return -1;
+ }
+
/* write route register values */
fprintf(fp, "ni_route_values = {\n");
for (int i = 0; ni_all_route_values[i]; ++i)
diff --git a/drivers/comedi/drivers/ni_stc.h b/drivers/comedi/drivers/ni_stc.h
index fbc0b753a0f5..7837e4683c6d 100644
--- a/drivers/comedi/drivers/ni_stc.h
+++ b/drivers/comedi/drivers/ni_stc.h
@@ -1137,6 +1137,4 @@ struct ni_private {
u8 rgout0_usage;
};
-static const struct comedi_lrange range_ni_E_ao_ext;
-
#endif /* _COMEDI_NI_STC_H */
diff --git a/drivers/counter/counter-chrdev.c b/drivers/counter/counter-chrdev.c
index afc94d0062b1..3ee75e1a78cd 100644
--- a/drivers/counter/counter-chrdev.c
+++ b/drivers/counter/counter-chrdev.c
@@ -454,7 +454,6 @@ out_unlock:
static const struct file_operations counter_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = counter_chrdev_read,
.poll = counter_chrdev_poll,
.unlocked_ioctl = counter_chrdev_ioctl,
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index bb83867d9fec..ef1621d40f05 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -9,13 +9,12 @@
#include "cxlmem.h"
#include "core.h"
#include "cxl.h"
-#include "core.h"
struct dsmas_entry {
struct range dpa_range;
u8 handle;
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
-
+ struct access_coordinate cdat_coord[ACCESS_COORDINATE_MAX];
int entries;
int qos_class;
};
@@ -163,7 +162,7 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
dslbis->data_type);
- cxl_access_coordinate_set(dent->coord, dslbis->data_type, val);
+ cxl_access_coordinate_set(dent->cdat_coord, dslbis->data_type, val);
return 0;
}
@@ -220,7 +219,7 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
xa_for_each(dsmas_xa, index, dent) {
int qos_class;
- cxl_coordinates_combine(dent->coord, dent->coord, ep_c);
+ cxl_coordinates_combine(dent->coord, dent->cdat_coord, ep_c);
dent->entries = 1;
rc = cxl_root->ops->qos_class(cxl_root,
&dent->coord[ACCESS_COORDINATE_CPU],
@@ -241,8 +240,10 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
struct cxl_dpa_perf *dpa_perf)
{
- for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
dpa_perf->coord[i] = dent->coord[i];
+ dpa_perf->cdat_coord[i] = dent->cdat_coord[i];
+ }
dpa_perf->dpa_range = dent->dpa_range;
dpa_perf->qos_class = dent->qos_class;
dev_dbg(dev,
@@ -546,19 +547,37 @@ void cxl_coordinates_combine(struct access_coordinate *out,
MODULE_IMPORT_NS(CXL);
-void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled)
+static void cxl_bandwidth_add(struct access_coordinate *coord,
+ struct access_coordinate *c1,
+ struct access_coordinate *c2)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ coord[i].read_bandwidth = c1[i].read_bandwidth +
+ c2[i].read_bandwidth;
+ coord[i].write_bandwidth = c1[i].write_bandwidth +
+ c2[i].write_bandwidth;
+ }
+}
+
+static bool dpa_perf_contains(struct cxl_dpa_perf *perf,
+ struct resource *dpa_res)
{
- struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
struct range dpa = {
- .start = cxled->dpa_res->start,
- .end = cxled->dpa_res->end,
+ .start = dpa_res->start,
+ .end = dpa_res->end,
};
+
+ return range_contains(&perf->dpa_range, &dpa);
+}
+
+static struct cxl_dpa_perf *cxled_get_dpa_perf(struct cxl_endpoint_decoder *cxled,
+ enum cxl_decoder_mode mode)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_dpa_perf *perf;
- switch (cxlr->mode) {
+ switch (mode) {
case CXL_DECODER_RAM:
perf = &mds->ram_perf;
break;
@@ -566,12 +585,473 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
perf = &mds->pmem_perf;
break;
default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!dpa_perf_contains(perf, cxled->dpa_res))
+ return ERR_PTR(-EINVAL);
+
+ return perf;
+}
+
+/*
+ * Transient context for containing the current calculation of bandwidth when
+ * doing walking the port hierarchy to deal with shared upstream link.
+ */
+struct cxl_perf_ctx {
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
+ struct cxl_port *port;
+};
+
+/**
+ * cxl_endpoint_gather_bandwidth - collect all the endpoint bandwidth in an xarray
+ * @cxlr: CXL region for the bandwidth calculation
+ * @cxled: endpoint decoder to start on
+ * @usp_xa: (output) the xarray that collects all the bandwidth coordinates
+ * indexed by the upstream device with data of 'struct cxl_perf_ctx'.
+ * @gp_is_root: (output) bool of whether the grandparent is cxl root.
+ *
+ * Return: 0 for success or -errno
+ *
+ * Collects aggregated endpoint bandwidth and store the bandwidth in
+ * an xarray indexed by the upstream device of the switch or the RP
+ * device. Each endpoint consists the minimum of the bandwidth from DSLBIS
+ * from the endpoint CDAT, the endpoint upstream link bandwidth, and the
+ * bandwidth from the SSLBIS of the switch CDAT for the switch upstream port to
+ * the downstream port that's associated with the endpoint. If the
+ * device is directly connected to a RP, then no SSLBIS is involved.
+ */
+static int cxl_endpoint_gather_bandwidth(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled,
+ struct xarray *usp_xa,
+ bool *gp_is_root)
+{
+ struct cxl_port *endpoint = to_cxl_port(cxled->cxld.dev.parent);
+ struct cxl_port *parent_port = to_cxl_port(endpoint->dev.parent);
+ struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent);
+ struct access_coordinate pci_coord[ACCESS_COORDINATE_MAX];
+ struct access_coordinate sw_coord[ACCESS_COORDINATE_MAX];
+ struct access_coordinate ep_coord[ACCESS_COORDINATE_MAX];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ struct cxl_perf_ctx *perf_ctx;
+ struct cxl_dpa_perf *perf;
+ unsigned long index;
+ void *ptr;
+ int rc;
+
+ if (cxlds->rcd)
+ return -ENODEV;
+
+ perf = cxled_get_dpa_perf(cxled, cxlr->mode);
+ if (IS_ERR(perf))
+ return PTR_ERR(perf);
+
+ gp_port = to_cxl_port(parent_port->dev.parent);
+ *gp_is_root = is_cxl_root(gp_port);
+
+ /*
+ * If the grandparent is cxl root, then index is the root port,
+ * otherwise it's the parent switch upstream device.
+ */
+ if (*gp_is_root)
+ index = (unsigned long)endpoint->parent_dport->dport_dev;
+ else
+ index = (unsigned long)parent_port->uport_dev;
+
+ perf_ctx = xa_load(usp_xa, index);
+ if (!perf_ctx) {
+ struct cxl_perf_ctx *c __free(kfree) =
+ kzalloc(sizeof(*perf_ctx), GFP_KERNEL);
+
+ if (!c)
+ return -ENOMEM;
+ ptr = xa_store(usp_xa, index, c, GFP_KERNEL);
+ if (xa_is_err(ptr))
+ return xa_err(ptr);
+ perf_ctx = no_free_ptr(c);
+ perf_ctx->port = parent_port;
+ }
+
+ /* Direct upstream link from EP bandwidth */
+ rc = cxl_pci_get_bandwidth(pdev, pci_coord);
+ if (rc < 0)
+ return rc;
+
+ /*
+ * Min of upstream link bandwidth and Endpoint CDAT bandwidth from
+ * DSLBIS.
+ */
+ cxl_coordinates_combine(ep_coord, pci_coord, perf->cdat_coord);
+
+ /*
+ * If grandparent port is root, then there's no switch involved and
+ * the endpoint is connected to a root port.
+ */
+ if (!*gp_is_root) {
+ /*
+ * Retrieve the switch SSLBIS for switch downstream port
+ * associated with the endpoint bandwidth.
+ */
+ rc = cxl_port_get_switch_dport_bandwidth(endpoint, sw_coord);
+ if (rc)
+ return rc;
+
+ /*
+ * Min of the earlier coordinates with the switch SSLBIS
+ * bandwidth
+ */
+ cxl_coordinates_combine(ep_coord, ep_coord, sw_coord);
+ }
+
+ /*
+ * Aggregate the computed bandwidth with the current aggregated bandwidth
+ * of the endpoints with the same switch upstream device or RP.
+ */
+ cxl_bandwidth_add(perf_ctx->coord, perf_ctx->coord, ep_coord);
+
+ return 0;
+}
+
+static void free_perf_xa(struct xarray *xa)
+{
+ struct cxl_perf_ctx *ctx;
+ unsigned long index;
+
+ if (!xa)
return;
+
+ xa_for_each(xa, index, ctx)
+ kfree(ctx);
+ xa_destroy(xa);
+ kfree(xa);
+}
+DEFINE_FREE(free_perf_xa, struct xarray *, if (_T) free_perf_xa(_T))
+
+/**
+ * cxl_switch_gather_bandwidth - collect all the bandwidth at switch level in an xarray
+ * @cxlr: The region being operated on
+ * @input_xa: xarray indexed by upstream device of a switch with data of 'struct
+ * cxl_perf_ctx'
+ * @gp_is_root: (output) bool of whether the grandparent is cxl root.
+ *
+ * Return: a xarray of resulting cxl_perf_ctx per parent switch or root port
+ * or ERR_PTR(-errno)
+ *
+ * Iterate through the xarray. Take the minimum of the downstream calculated
+ * bandwidth, the upstream link bandwidth, and the SSLBIS of the upstream
+ * switch if exists. Sum the resulting bandwidth under the switch upstream
+ * device or a RP device. The function can be iterated over multiple switches
+ * if the switches are present.
+ */
+static struct xarray *cxl_switch_gather_bandwidth(struct cxl_region *cxlr,
+ struct xarray *input_xa,
+ bool *gp_is_root)
+{
+ struct xarray *res_xa __free(free_perf_xa) =
+ kzalloc(sizeof(*res_xa), GFP_KERNEL);
+ struct access_coordinate coords[ACCESS_COORDINATE_MAX];
+ struct cxl_perf_ctx *ctx, *us_ctx;
+ unsigned long index, us_index;
+ int dev_count = 0;
+ int gp_count = 0;
+ void *ptr;
+ int rc;
+
+ if (!res_xa)
+ return ERR_PTR(-ENOMEM);
+ xa_init(res_xa);
+
+ xa_for_each(input_xa, index, ctx) {
+ struct device *dev = (struct device *)index;
+ struct cxl_port *port = ctx->port;
+ struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
+ struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent);
+ struct cxl_dport *dport = port->parent_dport;
+ bool is_root = false;
+
+ dev_count++;
+ if (is_cxl_root(gp_port)) {
+ is_root = true;
+ gp_count++;
+ }
+
+ /*
+ * If the grandparent is cxl root, then index is the root port,
+ * otherwise it's the parent switch upstream device.
+ */
+ if (is_root)
+ us_index = (unsigned long)port->parent_dport->dport_dev;
+ else
+ us_index = (unsigned long)parent_port->uport_dev;
+
+ us_ctx = xa_load(res_xa, us_index);
+ if (!us_ctx) {
+ struct cxl_perf_ctx *n __free(kfree) =
+ kzalloc(sizeof(*n), GFP_KERNEL);
+
+ if (!n)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = xa_store(res_xa, us_index, n, GFP_KERNEL);
+ if (xa_is_err(ptr))
+ return ERR_PTR(xa_err(ptr));
+ us_ctx = no_free_ptr(n);
+ us_ctx->port = parent_port;
+ }
+
+ /*
+ * If the device isn't an upstream PCIe port, there's something
+ * wrong with the topology.
+ */
+ if (!dev_is_pci(dev))
+ return ERR_PTR(-EINVAL);
+
+ /* Retrieve the upstream link bandwidth */
+ rc = cxl_pci_get_bandwidth(to_pci_dev(dev), coords);
+ if (rc)
+ return ERR_PTR(-ENXIO);
+
+ /*
+ * Take the min of downstream bandwidth and the upstream link
+ * bandwidth.
+ */
+ cxl_coordinates_combine(coords, coords, ctx->coord);
+
+ /*
+ * Take the min of the calculated bandwdith and the upstream
+ * switch SSLBIS bandwidth if there's a parent switch
+ */
+ if (!is_root)
+ cxl_coordinates_combine(coords, coords, dport->coord);
+
+ /*
+ * Aggregate the calculated bandwidth common to an upstream
+ * switch.
+ */
+ cxl_bandwidth_add(us_ctx->coord, us_ctx->coord, coords);
}
+ /* Asymmetric topology detected. */
+ if (gp_count) {
+ if (gp_count != dev_count) {
+ dev_dbg(&cxlr->dev,
+ "Asymmetric hierarchy detected, bandwidth not updated\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ *gp_is_root = true;
+ }
+
+ return no_free_ptr(res_xa);
+}
+
+/**
+ * cxl_rp_gather_bandwidth - handle the root port level bandwidth collection
+ * @xa: the xarray that holds the cxl_perf_ctx that has the bandwidth calculated
+ * below each root port device.
+ *
+ * Return: xarray that holds cxl_perf_ctx per host bridge or ERR_PTR(-errno)
+ */
+static struct xarray *cxl_rp_gather_bandwidth(struct xarray *xa)
+{
+ struct xarray *hb_xa __free(free_perf_xa) =
+ kzalloc(sizeof(*hb_xa), GFP_KERNEL);
+ struct cxl_perf_ctx *ctx;
+ unsigned long index;
+
+ if (!hb_xa)
+ return ERR_PTR(-ENOMEM);
+ xa_init(hb_xa);
+
+ xa_for_each(xa, index, ctx) {
+ struct cxl_port *port = ctx->port;
+ unsigned long hb_index = (unsigned long)port->uport_dev;
+ struct cxl_perf_ctx *hb_ctx;
+ void *ptr;
+
+ hb_ctx = xa_load(hb_xa, hb_index);
+ if (!hb_ctx) {
+ struct cxl_perf_ctx *n __free(kfree) =
+ kzalloc(sizeof(*n), GFP_KERNEL);
+
+ if (!n)
+ return ERR_PTR(-ENOMEM);
+ ptr = xa_store(hb_xa, hb_index, n, GFP_KERNEL);
+ if (xa_is_err(ptr))
+ return ERR_PTR(xa_err(ptr));
+ hb_ctx = no_free_ptr(n);
+ hb_ctx->port = port;
+ }
+
+ cxl_bandwidth_add(hb_ctx->coord, hb_ctx->coord, ctx->coord);
+ }
+
+ return no_free_ptr(hb_xa);
+}
+
+/**
+ * cxl_hb_gather_bandwidth - handle the host bridge level bandwidth collection
+ * @xa: the xarray that holds the cxl_perf_ctx that has the bandwidth calculated
+ * below each host bridge.
+ *
+ * Return: xarray that holds cxl_perf_ctx per ACPI0017 device or ERR_PTR(-errno)
+ */
+static struct xarray *cxl_hb_gather_bandwidth(struct xarray *xa)
+{
+ struct xarray *mw_xa __free(free_perf_xa) =
+ kzalloc(sizeof(*mw_xa), GFP_KERNEL);
+ struct cxl_perf_ctx *ctx;
+ unsigned long index;
+
+ if (!mw_xa)
+ return ERR_PTR(-ENOMEM);
+ xa_init(mw_xa);
+
+ xa_for_each(xa, index, ctx) {
+ struct cxl_port *port = ctx->port;
+ struct cxl_port *parent_port;
+ struct cxl_perf_ctx *mw_ctx;
+ struct cxl_dport *dport;
+ unsigned long mw_index;
+ void *ptr;
+
+ parent_port = to_cxl_port(port->dev.parent);
+ mw_index = (unsigned long)parent_port->uport_dev;
+
+ mw_ctx = xa_load(mw_xa, mw_index);
+ if (!mw_ctx) {
+ struct cxl_perf_ctx *n __free(kfree) =
+ kzalloc(sizeof(*n), GFP_KERNEL);
+
+ if (!n)
+ return ERR_PTR(-ENOMEM);
+ ptr = xa_store(mw_xa, mw_index, n, GFP_KERNEL);
+ if (xa_is_err(ptr))
+ return ERR_PTR(xa_err(ptr));
+ mw_ctx = no_free_ptr(n);
+ }
+
+ dport = port->parent_dport;
+ cxl_coordinates_combine(ctx->coord, ctx->coord, dport->coord);
+ cxl_bandwidth_add(mw_ctx->coord, mw_ctx->coord, ctx->coord);
+ }
+
+ return no_free_ptr(mw_xa);
+}
+
+/**
+ * cxl_region_update_bandwidth - Update the bandwidth access coordinates of a region
+ * @cxlr: The region being operated on
+ * @input_xa: xarray holds cxl_perf_ctx wht calculated bandwidth per ACPI0017 instance
+ */
+static void cxl_region_update_bandwidth(struct cxl_region *cxlr,
+ struct xarray *input_xa)
+{
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
+ struct cxl_perf_ctx *ctx;
+ unsigned long index;
+
+ memset(coord, 0, sizeof(coord));
+ xa_for_each(input_xa, index, ctx)
+ cxl_bandwidth_add(coord, coord, ctx->coord);
+
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ cxlr->coord[i].read_bandwidth = coord[i].read_bandwidth;
+ cxlr->coord[i].write_bandwidth = coord[i].write_bandwidth;
+ }
+}
+
+/**
+ * cxl_region_shared_upstream_bandwidth_update - Recalculate the bandwidth for
+ * the region
+ * @cxlr: the cxl region to recalculate
+ *
+ * The function walks the topology from bottom up and calculates the bandwidth. It
+ * starts at the endpoints, processes at the switches if any, processes at the rootport
+ * level, at the host bridge level, and finally aggregates at the region.
+ */
+void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr)
+{
+ struct xarray *working_xa;
+ int root_count = 0;
+ bool is_root;
+ int rc;
+
+ lockdep_assert_held(&cxl_dpa_rwsem);
+
+ struct xarray *usp_xa __free(free_perf_xa) =
+ kzalloc(sizeof(*usp_xa), GFP_KERNEL);
+
+ if (!usp_xa)
+ return;
+
+ xa_init(usp_xa);
+
+ /* Collect bandwidth data from all the endpoints. */
+ for (int i = 0; i < cxlr->params.nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = cxlr->params.targets[i];
+
+ is_root = false;
+ rc = cxl_endpoint_gather_bandwidth(cxlr, cxled, usp_xa, &is_root);
+ if (rc)
+ return;
+ root_count += is_root;
+ }
+
+ /* Detect asymmetric hierarchy with some direct attached endpoints. */
+ if (root_count && root_count != cxlr->params.nr_targets) {
+ dev_dbg(&cxlr->dev,
+ "Asymmetric hierarchy detected, bandwidth not updated\n");
+ return;
+ }
+
+ /*
+ * Walk up one or more switches to deal with the bandwidth of the
+ * switches if they exist. Endpoints directly attached to RPs skip
+ * over this part.
+ */
+ if (!root_count) {
+ do {
+ working_xa = cxl_switch_gather_bandwidth(cxlr, usp_xa,
+ &is_root);
+ if (IS_ERR(working_xa))
+ return;
+ free_perf_xa(usp_xa);
+ usp_xa = working_xa;
+ } while (!is_root);
+ }
+
+ /* Handle the bandwidth at the root port of the hierarchy */
+ working_xa = cxl_rp_gather_bandwidth(usp_xa);
+ if (IS_ERR(working_xa))
+ return;
+ free_perf_xa(usp_xa);
+ usp_xa = working_xa;
+
+ /* Handle the bandwidth at the host bridge of the hierarchy */
+ working_xa = cxl_hb_gather_bandwidth(usp_xa);
+ if (IS_ERR(working_xa))
+ return;
+ free_perf_xa(usp_xa);
+ usp_xa = working_xa;
+
+ /*
+ * Aggregate all the bandwidth collected per CFMWS (ACPI0017) and
+ * update the region bandwidth with the final calculated values.
+ */
+ cxl_region_update_bandwidth(cxlr, usp_xa);
+}
+
+void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_dpa_perf *perf;
+
lockdep_assert_held(&cxl_dpa_rwsem);
- if (!range_contains(&perf->dpa_range, &dpa))
+ perf = cxled_get_dpa_perf(cxled, cxlr->mode);
+ if (IS_ERR(perf))
return;
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 72a506c9dbd0..0c62b4069ba0 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -103,9 +103,11 @@ enum cxl_poison_trace_type {
};
long cxl_pci_get_latency(struct pci_dev *pdev);
-
+int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c);
int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
enum access_coordinate_class access);
bool cxl_need_node_perf_attrs_update(int nid);
+int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
+ struct access_coordinate *c);
#endif /* __CXL_CORE_H__ */
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index e5cdeafdf76e..946f8e44455f 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -225,7 +225,7 @@ static const char *cxl_mem_opcode_to_name(u16 opcode)
/**
* cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
- * @mds: The driver data for the operation
+ * @cxl_mbox: CXL mailbox context
* @mbox_cmd: initialized command to execute
*
* Context: Any context.
@@ -241,19 +241,19 @@ static const char *cxl_mem_opcode_to_name(u16 opcode)
* error. While this distinction can be useful for commands from userspace, the
* kernel will only be able to use results when both are successful.
*/
-int cxl_internal_send_cmd(struct cxl_memdev_state *mds,
+int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox,
struct cxl_mbox_cmd *mbox_cmd)
{
size_t out_size, min_out;
int rc;
- if (mbox_cmd->size_in > mds->payload_size ||
- mbox_cmd->size_out > mds->payload_size)
+ if (mbox_cmd->size_in > cxl_mbox->payload_size ||
+ mbox_cmd->size_out > cxl_mbox->payload_size)
return -E2BIG;
out_size = mbox_cmd->size_out;
min_out = mbox_cmd->min_out;
- rc = mds->mbox_send(mds, mbox_cmd);
+ rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd);
/*
* EIO is reserved for a payload size mismatch and mbox_send()
* may not return this error.
@@ -353,6 +353,7 @@ static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
struct cxl_memdev_state *mds, u16 opcode,
size_t in_size, size_t out_size, u64 in_payload)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
*mbox = (struct cxl_mbox_cmd) {
.opcode = opcode,
.size_in = in_size,
@@ -374,7 +375,7 @@ static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
/* Prepare to handle a full payload for variable sized output */
if (out_size == CXL_VARIABLE_PAYLOAD)
- mbox->size_out = mds->payload_size;
+ mbox->size_out = cxl_mbox->payload_size;
else
mbox->size_out = out_size;
@@ -398,6 +399,8 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
const struct cxl_send_command *send_cmd,
struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+
if (send_cmd->raw.rsvd)
return -EINVAL;
@@ -406,7 +409,7 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
* gets passed along without further checking, so it must be
* validated here.
*/
- if (send_cmd->out.size > mds->payload_size)
+ if (send_cmd->out.size > cxl_mbox->payload_size)
return -EINVAL;
if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
@@ -494,6 +497,7 @@ static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
struct cxl_memdev_state *mds,
const struct cxl_send_command *send_cmd)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mem_command mem_cmd;
int rc;
@@ -505,7 +509,7 @@ static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
* supports, but output can be arbitrarily large (simply write out as
* much data as the hardware provides).
*/
- if (send_cmd->in.size > mds->payload_size)
+ if (send_cmd->in.size > cxl_mbox->payload_size)
return -EINVAL;
/* Sanitize and construct a cxl_mem_command */
@@ -542,7 +546,7 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
/*
- * otherwise, return max(n_commands, total commands) cxl_command_info
+ * otherwise, return min(n_commands, total commands) cxl_command_info
* structures.
*/
cxl_for_each_cmd(cmd) {
@@ -591,6 +595,7 @@ static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds,
u64 out_payload, s32 *size_out,
u32 *retval)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct device *dev = mds->cxlds.dev;
int rc;
@@ -601,7 +606,7 @@ static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds,
cxl_mem_opcode_to_name(mbox_cmd->opcode),
mbox_cmd->opcode, mbox_cmd->size_in);
- rc = mds->mbox_send(mds, mbox_cmd);
+ rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd);
if (rc)
goto out;
@@ -659,11 +664,12 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
u32 *size, u8 *out)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
u32 remaining = *size;
u32 offset = 0;
while (remaining) {
- u32 xfer_size = min_t(u32, remaining, mds->payload_size);
+ u32 xfer_size = min_t(u32, remaining, cxl_mbox->payload_size);
struct cxl_mbox_cmd mbox_cmd;
struct cxl_mbox_get_log log;
int rc;
@@ -682,7 +688,7 @@ static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
.payload_out = out,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
/*
* The output payload length that indicates the number
@@ -752,22 +758,23 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_get_supported_logs *ret;
struct cxl_mbox_cmd mbox_cmd;
int rc;
- ret = kvmalloc(mds->payload_size, GFP_KERNEL);
+ ret = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS,
- .size_out = mds->payload_size,
+ .size_out = cxl_mbox->payload_size,
.payload_out = ret,
/* At least the record number field must be valid */
.min_out = 2,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0) {
kvfree(ret);
return ERR_PTR(rc);
@@ -910,6 +917,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
enum cxl_event_log_type log,
struct cxl_get_event_payload *get_pl)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_clear_event_payload *payload;
u16 total = le16_to_cpu(get_pl->record_count);
u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES;
@@ -920,8 +928,8 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
int i;
/* Payload size may limit the max handles */
- if (pl_size > mds->payload_size) {
- max_handles = (mds->payload_size - sizeof(*payload)) /
+ if (pl_size > cxl_mbox->payload_size) {
+ max_handles = (cxl_mbox->payload_size - sizeof(*payload)) /
sizeof(__le16);
pl_size = struct_size(payload, handles, max_handles);
}
@@ -955,7 +963,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
if (i == max_handles) {
payload->nr_recs = i;
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
goto free_pl;
i = 0;
@@ -966,7 +974,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
if (i) {
payload->nr_recs = i;
mbox_cmd.size_in = struct_size(payload, handles, i);
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
goto free_pl;
}
@@ -979,6 +987,7 @@ free_pl:
static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
enum cxl_event_log_type type)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
struct device *dev = mds->cxlds.dev;
struct cxl_get_event_payload *payload;
@@ -995,11 +1004,11 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
.payload_in = &log_type,
.size_in = sizeof(log_type),
.payload_out = payload,
- .size_out = mds->payload_size,
+ .size_out = cxl_mbox->payload_size,
.min_out = struct_size(payload, records, 0),
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc) {
dev_err_ratelimited(dev,
"Event log '%d': Failed to query event records : %d",
@@ -1070,6 +1079,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL);
*/
static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_get_partition_info pi;
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -1079,7 +1089,7 @@ static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
.size_out = sizeof(pi),
.payload_out = &pi,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
return rc;
@@ -1106,6 +1116,7 @@ static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
*/
int cxl_dev_state_identify(struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
struct cxl_mbox_identify id;
struct cxl_mbox_cmd mbox_cmd;
@@ -1120,7 +1131,7 @@ int cxl_dev_state_identify(struct cxl_memdev_state *mds)
.size_out = sizeof(id),
.payload_out = &id,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0)
return rc;
@@ -1148,6 +1159,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
int rc;
u32 sec_out = 0;
struct cxl_get_security_output {
@@ -1159,14 +1171,13 @@ static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
.size_out = sizeof(out),
};
struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd };
- struct cxl_dev_state *cxlds = &mds->cxlds;
if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE)
return -EINVAL;
- rc = cxl_internal_send_cmd(mds, &sec_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &sec_cmd);
if (rc < 0) {
- dev_err(cxlds->dev, "Failed to get security state : %d", rc);
+ dev_err(cxl_mbox->host, "Failed to get security state : %d", rc);
return rc;
}
@@ -1183,9 +1194,9 @@ static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
sec_out & CXL_PMEM_SEC_STATE_LOCKED)
return -EINVAL;
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0) {
- dev_err(cxlds->dev, "Failed to sanitize device : %d", rc);
+ dev_err(cxl_mbox->host, "Failed to sanitize device : %d", rc);
return rc;
}
@@ -1214,7 +1225,7 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
int rc;
/* synchronize with cxl_mem_probe() and decoder write operations */
- device_lock(&cxlmd->dev);
+ guard(device)(&cxlmd->dev);
endpoint = cxlmd->endpoint;
down_read(&cxl_region_rwsem);
/*
@@ -1226,7 +1237,6 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
else
rc = -EBUSY;
up_read(&cxl_region_rwsem);
- device_unlock(&cxlmd->dev);
return rc;
}
@@ -1300,6 +1310,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
int cxl_set_timestamp(struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_cmd mbox_cmd;
struct cxl_mbox_set_timestamp_in pi;
int rc;
@@ -1311,7 +1322,7 @@ int cxl_set_timestamp(struct cxl_memdev_state *mds)
.payload_in = &pi,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
/*
* Command is optional. Devices may have another way of providing
* a timestamp, or may return all 0s in timestamp fields.
@@ -1328,6 +1339,7 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
struct cxl_region *cxlr)
{
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_poison_out *po;
struct cxl_mbox_poison_in pi;
int nr_records = 0;
@@ -1346,12 +1358,12 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
.opcode = CXL_MBOX_OP_GET_POISON,
.size_in = sizeof(pi),
.payload_in = &pi,
- .size_out = mds->payload_size,
+ .size_out = cxl_mbox->payload_size,
.payload_out = po,
.min_out = struct_size(po, record, 0),
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
break;
@@ -1382,7 +1394,9 @@ static void free_poison_buf(void *buf)
/* Get Poison List output buffer is protected by mds->poison.lock */
static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds)
{
- mds->poison.list_out = kvmalloc(mds->payload_size, GFP_KERNEL);
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+
+ mds->poison.list_out = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
if (!mds->poison.list_out)
return -ENOMEM;
@@ -1408,6 +1422,19 @@ int cxl_poison_state_init(struct cxl_memdev_state *mds)
}
EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL);
+int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host)
+{
+ if (!cxl_mbox || !host)
+ return -EINVAL;
+
+ cxl_mbox->host = host;
+ mutex_init(&cxl_mbox->mbox_mutex);
+ rcuwait_init(&cxl_mbox->mbox_wait);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, CXL);
+
struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
{
struct cxl_memdev_state *mds;
@@ -1418,7 +1445,6 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
return ERR_PTR(-ENOMEM);
}
- mutex_init(&mds->mbox_mutex);
mutex_init(&mds->event.log_lock);
mds->cxlds.dev = dev;
mds->cxlds.reg_map.host = dev;
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 0277726afd04..84fefb76dafa 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -58,7 +58,7 @@ static ssize_t payload_max_show(struct device *dev,
if (!mds)
return sysfs_emit(buf, "\n");
- return sysfs_emit(buf, "%zu\n", mds->payload_size);
+ return sysfs_emit(buf, "%zu\n", cxlds->cxl_mbox.payload_size);
}
static DEVICE_ATTR_RO(payload_max);
@@ -124,15 +124,16 @@ static ssize_t security_state_show(struct device *dev,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
unsigned long state = mds->security.state;
int rc = 0;
/* sync with latest submission state */
- mutex_lock(&mds->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
if (mds->security.sanitize_active)
rc = sysfs_emit(buf, "sanitize\n");
- mutex_unlock(&mds->mbox_mutex);
+ mutex_unlock(&cxl_mbox->mbox_mutex);
if (rc)
return rc;
@@ -277,7 +278,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
{
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_inject_poison inject;
struct cxl_poison_record record;
struct cxl_mbox_cmd mbox_cmd;
@@ -307,13 +308,13 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
.size_in = sizeof(inject),
.payload_in = &inject,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
goto out;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr)
- dev_warn_once(mds->cxlds.dev,
+ dev_warn_once(cxl_mbox->host,
"poison inject dpa:%#llx region: %s\n", dpa,
dev_name(&cxlr->dev));
@@ -332,7 +333,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL);
int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
{
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_clear_poison clear;
struct cxl_poison_record record;
struct cxl_mbox_cmd mbox_cmd;
@@ -371,13 +372,13 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
.payload_in = &clear,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
goto out;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr)
- dev_warn_once(mds->cxlds.dev,
+ dev_warn_once(cxl_mbox->host,
"poison clear dpa:%#llx region: %s\n", dpa,
dev_name(&cxlr->dev));
@@ -714,6 +715,7 @@ static int cxl_memdev_release_file(struct inode *inode, struct file *file)
*/
static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_get_fw_info info;
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -724,7 +726,7 @@ static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds)
.payload_out = &info,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0)
return rc;
@@ -748,6 +750,7 @@ static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds)
*/
static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_activate_fw activate;
struct cxl_mbox_cmd mbox_cmd;
@@ -764,7 +767,7 @@ static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot)
activate.action = CXL_FW_ACTIVATE_OFFLINE;
activate.slot = slot;
- return cxl_internal_send_cmd(mds, &mbox_cmd);
+ return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
}
/**
@@ -779,6 +782,7 @@ static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot)
*/
static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_transfer_fw *transfer;
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -798,7 +802,7 @@ static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds)
transfer->action = CXL_FW_TRANSFER_ACTION_ABORT;
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
kfree(transfer);
return rc;
}
@@ -829,12 +833,13 @@ static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data,
{
struct cxl_memdev_state *mds = fwl->dd_handle;
struct cxl_mbox_transfer_fw *transfer;
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
if (!size)
return FW_UPLOAD_ERR_INVALID_SIZE;
mds->fw.oneshot = struct_size(transfer, data, size) <
- mds->payload_size;
+ cxl_mbox->payload_size;
if (cxl_mem_get_fw_info(mds))
return FW_UPLOAD_ERR_HW_ERROR;
@@ -854,6 +859,7 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
{
struct cxl_memdev_state *mds = fwl->dd_handle;
struct cxl_dev_state *cxlds = &mds->cxlds;
+ struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
struct cxl_memdev *cxlmd = cxlds->cxlmd;
struct cxl_mbox_transfer_fw *transfer;
struct cxl_mbox_cmd mbox_cmd;
@@ -877,7 +883,7 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
* sizeof(*transfer) is 128. These constraints imply that @cur_size
* will always be 128b aligned.
*/
- cur_size = min_t(size_t, size, mds->payload_size - sizeof(*transfer));
+ cur_size = min_t(size_t, size, cxl_mbox->payload_size - sizeof(*transfer));
remaining = size - cur_size;
size_in = struct_size(transfer, data, cur_size);
@@ -921,7 +927,7 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
.poll_count = 30,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0) {
rc = FW_UPLOAD_ERR_RW_ERROR;
goto out_free;
@@ -1059,16 +1065,17 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
static void sanitize_teardown_notifier(void *data)
{
struct cxl_memdev_state *mds = data;
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct kernfs_node *state;
/*
* Prevent new irq triggered invocations of the workqueue and
* flush inflight invocations.
*/
- mutex_lock(&mds->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
state = mds->security.sanitize_node;
mds->security.sanitize_node = NULL;
- mutex_unlock(&mds->mbox_mutex);
+ mutex_unlock(&cxl_mbox->mbox_mutex);
cancel_delayed_work_sync(&mds->security.poll_dwork);
sysfs_put(state);
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 51132a575b27..5b46bc46aaa9 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -211,37 +211,6 @@ int cxl_await_media_ready(struct cxl_dev_state *cxlds)
}
EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL);
-static int wait_for_valid(struct pci_dev *pdev, int d)
-{
- u32 val;
- int rc;
-
- /*
- * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high
- * and Size Low registers are valid. Must be set within 1 second of
- * deassertion of reset to CXL device. Likely it is already set by the
- * time this runs, but otherwise give a 1.5 second timeout in case of
- * clock skew.
- */
- rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
- if (rc)
- return rc;
-
- if (val & CXL_DVSEC_MEM_INFO_VALID)
- return 0;
-
- msleep(1500);
-
- rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
- if (rc)
- return rc;
-
- if (val & CXL_DVSEC_MEM_INFO_VALID)
- return 0;
-
- return -ETIMEDOUT;
-}
-
static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val)
{
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
@@ -322,11 +291,13 @@ static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
}
-int cxl_dvsec_rr_decode(struct device *dev, int d,
+int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
struct cxl_endpoint_dvsec_info *info)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
int hdm_count, rc, i, ranges = 0;
+ int d = cxlds->cxl_dvsec;
u16 cap, ctrl;
if (!d) {
@@ -353,12 +324,6 @@ int cxl_dvsec_rr_decode(struct device *dev, int d,
if (!hdm_count || hdm_count > 2)
return -EINVAL;
- rc = wait_for_valid(pdev, d);
- if (rc) {
- dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc);
- return rc;
- }
-
/*
* The current DVSEC values are moot if the memory capability is
* disabled, and they will remain moot after the HDM Decoder
@@ -376,6 +341,10 @@ int cxl_dvsec_rr_decode(struct device *dev, int d,
u64 base, size;
u32 temp;
+ rc = cxl_dvsec_mem_range_valid(cxlds, i);
+ if (rc)
+ return rc;
+
rc = pci_read_config_dword(
pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp);
if (rc)
@@ -390,10 +359,6 @@ int cxl_dvsec_rr_decode(struct device *dev, int d,
size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK;
if (!size) {
- info->dvsec_range[i] = (struct range) {
- .start = 0,
- .end = CXL_RESOURCE_NONE,
- };
continue;
}
@@ -411,12 +376,10 @@ int cxl_dvsec_rr_decode(struct device *dev, int d,
base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
- info->dvsec_range[i] = (struct range) {
+ info->dvsec_range[ranges++] = (struct range) {
.start = base,
.end = base + size - 1
};
-
- ranges++;
}
info->ranges = ranges;
@@ -463,7 +426,15 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
return -ENODEV;
}
- for (i = 0, allowed = 0; info->mem_enabled && i < info->ranges; i++) {
+ if (!info->mem_enabled) {
+ rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
+ if (rc)
+ return rc;
+
+ return devm_cxl_enable_mem(&port->dev, cxlds);
+ }
+
+ for (i = 0, allowed = 0; i < info->ranges; i++) {
struct device *cxld_dev;
cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i],
@@ -477,7 +448,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
allowed++;
}
- if (!allowed && info->mem_enabled) {
+ if (!allowed) {
dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
return -ENXIO;
}
@@ -491,14 +462,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
* match. If at least one DVSEC range is enabled and allowed, skip HDM
* Decoder Capability Enable.
*/
- if (info->mem_enabled)
- return 0;
-
- rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
- if (rc)
- return rc;
-
- return devm_cxl_enable_mem(&port->dev, cxlds);
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);
@@ -772,22 +736,20 @@ static bool cxl_handle_endpoint_ras(struct cxl_dev_state *cxlds)
static void cxl_dport_map_rch_aer(struct cxl_dport *dport)
{
- struct cxl_rcrb_info *ri = &dport->rcrb;
- void __iomem *dport_aer = NULL;
resource_size_t aer_phys;
struct device *host;
+ u16 aer_cap;
- if (dport->rch && ri->aer_cap) {
+ aer_cap = cxl_rcrb_to_aer(dport->dport_dev, dport->rcrb.base);
+ if (aer_cap) {
host = dport->reg_map.host;
- aer_phys = ri->aer_cap + ri->base;
- dport_aer = devm_cxl_iomap_block(host, aer_phys,
- sizeof(struct aer_capability_regs));
+ aer_phys = aer_cap + dport->rcrb.base;
+ dport->regs.dport_aer = devm_cxl_iomap_block(host, aer_phys,
+ sizeof(struct aer_capability_regs));
}
-
- dport->regs.dport_aer = dport_aer;
}
-static void cxl_dport_map_regs(struct cxl_dport *dport)
+static void cxl_dport_map_ras(struct cxl_dport *dport)
{
struct cxl_register_map *map = &dport->reg_map;
struct device *dev = dport->dport_dev;
@@ -797,22 +759,16 @@ static void cxl_dport_map_regs(struct cxl_dport *dport)
else if (cxl_map_component_regs(map, &dport->regs.component,
BIT(CXL_CM_CAP_CAP_ID_RAS)))
dev_dbg(dev, "Failed to map RAS capability.\n");
-
- if (dport->rch)
- cxl_dport_map_rch_aer(dport);
}
static void cxl_disable_rch_root_ints(struct cxl_dport *dport)
{
void __iomem *aer_base = dport->regs.dport_aer;
- struct pci_host_bridge *bridge;
u32 aer_cmd_mask, aer_cmd;
if (!aer_base)
return;
- bridge = to_pci_host_bridge(dport->dport_dev);
-
/*
* Disable RCH root port command interrupts.
* CXL 3.0 12.2.1.1 - RCH Downstream Port-detected Errors
@@ -821,34 +777,35 @@ static void cxl_disable_rch_root_ints(struct cxl_dport *dport)
* the root cmd register's interrupts is required. But, PCI spec
* shows these are disabled by default on reset.
*/
- if (bridge->native_aer) {
- aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN |
- PCI_ERR_ROOT_CMD_NONFATAL_EN |
- PCI_ERR_ROOT_CMD_FATAL_EN);
- aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND);
- aer_cmd &= ~aer_cmd_mask;
- writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND);
- }
+ aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN |
+ PCI_ERR_ROOT_CMD_NONFATAL_EN |
+ PCI_ERR_ROOT_CMD_FATAL_EN);
+ aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND);
+ aer_cmd &= ~aer_cmd_mask;
+ writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND);
}
-void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport)
+/**
+ * cxl_dport_init_ras_reporting - Setup CXL RAS report on this dport
+ * @dport: the cxl_dport that needs to be initialized
+ * @host: host device for devm operations
+ */
+void cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host)
{
- struct device *dport_dev = dport->dport_dev;
+ dport->reg_map.host = host;
+ cxl_dport_map_ras(dport);
if (dport->rch) {
- struct pci_host_bridge *host_bridge = to_pci_host_bridge(dport_dev);
-
- if (host_bridge->native_aer)
- dport->rcrb.aer_cap = cxl_rcrb_to_aer(dport_dev, dport->rcrb.base);
- }
+ struct pci_host_bridge *host_bridge = to_pci_host_bridge(dport->dport_dev);
- dport->reg_map.host = host;
- cxl_dport_map_regs(dport);
+ if (!host_bridge->native_aer)
+ return;
- if (dport->rch)
+ cxl_dport_map_rch_aer(dport);
cxl_disable_rch_root_ints(dport);
+ }
}
-EXPORT_SYMBOL_NS_GPL(cxl_setup_parent_dport, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_dport_init_ras_reporting, CXL);
static void cxl_handle_rdport_cor_ras(struct cxl_dev_state *cxlds,
struct cxl_dport *dport)
@@ -915,15 +872,13 @@ static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds)
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
struct aer_capability_regs aer_regs;
struct cxl_dport *dport;
- struct cxl_port *port;
int severity;
- port = cxl_pci_find_port(pdev, &dport);
+ struct cxl_port *port __free(put_cxl_port) =
+ cxl_pci_find_port(pdev, &dport);
if (!port)
return;
- put_device(&port->dev);
-
if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs))
return;
@@ -1076,3 +1031,26 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port)
__cxl_endpoint_decoder_reset_detected);
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, CXL);
+
+int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c)
+{
+ int speed, bw;
+ u16 lnksta;
+ u32 width;
+
+ speed = pcie_link_speed_mbps(pdev);
+ if (speed < 0)
+ return speed;
+ speed /= BITS_PER_BYTE;
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
+ width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
+ bw = speed * width;
+
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ c[i].read_bandwidth = bw;
+ c[i].write_bandwidth = bw;
+ }
+
+ return 0;
+}
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 1d5007e3795a..e666ec6a9085 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -3,7 +3,6 @@
#include <linux/platform_device.h>
#include <linux/memregion.h>
#include <linux/workqueue.h>
-#include <linux/einj-cxl.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
@@ -11,6 +10,7 @@
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/node.h>
+#include <cxl/einj.h>
#include <cxlmem.h>
#include <cxlpci.h>
#include <cxl.h>
@@ -828,27 +828,20 @@ static void cxl_debugfs_create_dport_dir(struct cxl_dport *dport)
&cxl_einj_inject_fops);
}
-static struct cxl_port *__devm_cxl_add_port(struct device *host,
- struct device *uport_dev,
- resource_size_t component_reg_phys,
- struct cxl_dport *parent_dport)
+static int cxl_port_add(struct cxl_port *port,
+ resource_size_t component_reg_phys,
+ struct cxl_dport *parent_dport)
{
- struct cxl_port *port;
- struct device *dev;
+ struct device *dev __free(put_device) = &port->dev;
int rc;
- port = cxl_port_alloc(uport_dev, parent_dport);
- if (IS_ERR(port))
- return port;
-
- dev = &port->dev;
- if (is_cxl_memdev(uport_dev)) {
- struct cxl_memdev *cxlmd = to_cxl_memdev(uport_dev);
+ if (is_cxl_memdev(port->uport_dev)) {
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
rc = dev_set_name(dev, "endpoint%d", port->id);
if (rc)
- goto err;
+ return rc;
/*
* The endpoint driver already enumerated the component and RAS
@@ -861,19 +854,41 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host,
} else if (parent_dport) {
rc = dev_set_name(dev, "port%d", port->id);
if (rc)
- goto err;
+ return rc;
rc = cxl_port_setup_regs(port, component_reg_phys);
if (rc)
- goto err;
- } else
+ return rc;
+ } else {
rc = dev_set_name(dev, "root%d", port->id);
- if (rc)
- goto err;
+ if (rc)
+ return rc;
+ }
rc = device_add(dev);
if (rc)
- goto err;
+ return rc;
+
+ /* Inhibit the cleanup function invoked */
+ dev = NULL;
+ return 0;
+}
+
+static struct cxl_port *__devm_cxl_add_port(struct device *host,
+ struct device *uport_dev,
+ resource_size_t component_reg_phys,
+ struct cxl_dport *parent_dport)
+{
+ struct cxl_port *port;
+ int rc;
+
+ port = cxl_port_alloc(uport_dev, parent_dport);
+ if (IS_ERR(port))
+ return port;
+
+ rc = cxl_port_add(port, component_reg_phys, parent_dport);
+ if (rc)
+ return ERR_PTR(rc);
rc = devm_add_action_or_reset(host, unregister_port, port);
if (rc)
@@ -891,10 +906,6 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host,
port->pci_latency = cxl_pci_get_latency(to_pci_dev(uport_dev));
return port;
-
-err:
- put_device(dev);
- return ERR_PTR(rc);
}
/**
@@ -941,7 +952,7 @@ struct cxl_root *devm_cxl_add_root(struct device *host,
port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
if (IS_ERR(port))
- return (struct cxl_root *)port;
+ return ERR_CAST(port);
cxl_root = to_cxl_root(port);
cxl_root->ops = ops;
@@ -1258,18 +1269,13 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL);
static int add_ep(struct cxl_ep *new)
{
struct cxl_port *port = new->dport->port;
- int rc;
- device_lock(&port->dev);
- if (port->dead) {
- device_unlock(&port->dev);
+ guard(device)(&port->dev);
+ if (port->dead)
return -ENXIO;
- }
- rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
- GFP_KERNEL);
- device_unlock(&port->dev);
- return rc;
+ return xa_insert(&port->endpoints, (unsigned long)new->ep,
+ new, GFP_KERNEL);
}
/**
@@ -1393,14 +1399,14 @@ static void delete_endpoint(void *data)
struct cxl_port *endpoint = cxlmd->endpoint;
struct device *host = endpoint_host(endpoint);
- device_lock(host);
- if (host->driver && !endpoint->dead) {
- devm_release_action(host, cxl_unlink_parent_dport, endpoint);
- devm_release_action(host, cxl_unlink_uport, endpoint);
- devm_release_action(host, unregister_port, endpoint);
+ scoped_guard(device, host) {
+ if (host->driver && !endpoint->dead) {
+ devm_release_action(host, cxl_unlink_parent_dport, endpoint);
+ devm_release_action(host, cxl_unlink_uport, endpoint);
+ devm_release_action(host, unregister_port, endpoint);
+ }
+ cxlmd->endpoint = NULL;
}
- cxlmd->endpoint = NULL;
- device_unlock(host);
put_device(&endpoint->dev);
put_device(host);
}
@@ -1477,12 +1483,11 @@ static void cxl_detach_ep(void *data)
.cxlmd = cxlmd,
.depth = i,
};
- struct device *dev;
struct cxl_ep *ep;
bool died = false;
- dev = bus_find_device(&cxl_bus_type, NULL, &ctx,
- port_has_memdev);
+ struct device *dev __free(put_device) =
+ bus_find_device(&cxl_bus_type, NULL, &ctx, port_has_memdev);
if (!dev)
continue;
port = to_cxl_port(dev);
@@ -1512,7 +1517,6 @@ static void cxl_detach_ep(void *data)
dev_name(&port->dev));
delete_switch_port(port);
}
- put_device(&port->dev);
device_unlock(&parent_port->dev);
}
}
@@ -1540,7 +1544,6 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
struct device *dport_dev)
{
struct device *dparent = grandparent(dport_dev);
- struct cxl_port *port, *parent_port = NULL;
struct cxl_dport *dport, *parent_dport;
resource_size_t component_reg_phys;
int rc;
@@ -1556,50 +1559,52 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
return -ENXIO;
}
- parent_port = find_cxl_port(dparent, &parent_dport);
+ struct cxl_port *parent_port __free(put_cxl_port) =
+ find_cxl_port(dparent, &parent_dport);
if (!parent_port) {
/* iterate to create this parent_port */
return -EAGAIN;
}
- device_lock(&parent_port->dev);
- if (!parent_port->dev.driver) {
- dev_warn(&cxlmd->dev,
- "port %s:%s disabled, failed to enumerate CXL.mem\n",
- dev_name(&parent_port->dev), dev_name(uport_dev));
- port = ERR_PTR(-ENXIO);
- goto out;
- }
+ /*
+ * Definition with __free() here to keep the sequence of
+ * dereferencing the device of the port before the parent_port releasing.
+ */
+ struct cxl_port *port __free(put_cxl_port) = NULL;
+ scoped_guard(device, &parent_port->dev) {
+ if (!parent_port->dev.driver) {
+ dev_warn(&cxlmd->dev,
+ "port %s:%s disabled, failed to enumerate CXL.mem\n",
+ dev_name(&parent_port->dev), dev_name(uport_dev));
+ return -ENXIO;
+ }
- port = find_cxl_port_at(parent_port, dport_dev, &dport);
- if (!port) {
- component_reg_phys = find_component_registers(uport_dev);
- port = devm_cxl_add_port(&parent_port->dev, uport_dev,
- component_reg_phys, parent_dport);
- /* retry find to pick up the new dport information */
- if (!IS_ERR(port))
+ port = find_cxl_port_at(parent_port, dport_dev, &dport);
+ if (!port) {
+ component_reg_phys = find_component_registers(uport_dev);
+ port = devm_cxl_add_port(&parent_port->dev, uport_dev,
+ component_reg_phys, parent_dport);
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+
+ /* retry find to pick up the new dport information */
port = find_cxl_port_at(parent_port, dport_dev, &dport);
+ if (!port)
+ return -ENXIO;
+ }
}
-out:
- device_unlock(&parent_port->dev);
- if (IS_ERR(port))
- rc = PTR_ERR(port);
- else {
- dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
- dev_name(&port->dev), dev_name(port->uport_dev));
- rc = cxl_add_ep(dport, &cxlmd->dev);
- if (rc == -EBUSY) {
- /*
- * "can't" happen, but this error code means
- * something to the caller, so translate it.
- */
- rc = -ENXIO;
- }
- put_device(&port->dev);
+ dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
+ dev_name(&port->dev), dev_name(port->uport_dev));
+ rc = cxl_add_ep(dport, &cxlmd->dev);
+ if (rc == -EBUSY) {
+ /*
+ * "can't" happen, but this error code means
+ * something to the caller, so translate it.
+ */
+ rc = -ENXIO;
}
- put_device(&parent_port->dev);
return rc;
}
@@ -1630,7 +1635,6 @@ retry:
struct device *dport_dev = grandparent(iter);
struct device *uport_dev;
struct cxl_dport *dport;
- struct cxl_port *port;
/*
* The terminal "grandparent" in PCI is NULL and @platform_bus
@@ -1649,7 +1653,8 @@ retry:
dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n",
dev_name(iter), dev_name(dport_dev),
dev_name(uport_dev));
- port = find_cxl_port(dport_dev, &dport);
+ struct cxl_port *port __free(put_cxl_port) =
+ find_cxl_port(dport_dev, &dport);
if (port) {
dev_dbg(&cxlmd->dev,
"found already registered port %s:%s\n",
@@ -1664,18 +1669,13 @@ retry:
* the parent_port lock as the current port may be being
* reaped.
*/
- if (rc && rc != -EBUSY) {
- put_device(&port->dev);
+ if (rc && rc != -EBUSY)
return rc;
- }
/* Any more ports to add between this one and the root? */
- if (!dev_is_cxl_root_child(&port->dev)) {
- put_device(&port->dev);
+ if (!dev_is_cxl_root_child(&port->dev))
continue;
- }
- put_device(&port->dev);
return 0;
}
@@ -1983,7 +1983,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
{
struct cxl_port *port;
- int rc;
if (WARN_ON_ONCE(!cxld))
return -EINVAL;
@@ -1993,11 +1992,8 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
port = to_cxl_port(cxld->dev.parent);
- device_lock(&port->dev);
- rc = cxl_decoder_add_locked(cxld, target_map);
- device_unlock(&port->dev);
-
- return rc;
+ guard(device)(&port->dev);
+ return cxl_decoder_add_locked(cxld, target_map);
}
EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
@@ -2241,6 +2237,26 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, CXL);
+int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
+ struct access_coordinate *c)
+{
+ struct cxl_dport *dport = port->parent_dport;
+
+ /* Check this port is connected to a switch DSP and not an RP */
+ if (parent_port_is_cxl_root(to_cxl_port(port->dev.parent)))
+ return -ENODEV;
+
+ if (!coordinates_valid(dport->coord))
+ return -EINVAL;
+
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ c[i].read_bandwidth = dport->coord[i].read_bandwidth;
+ c[i].write_bandwidth = dport->coord[i].write_bandwidth;
+ }
+
+ return 0;
+}
+
/* for user tooling to ensure port disable work has completed */
static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count)
{
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 21ad5f242875..e701e4b04032 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -1983,6 +1983,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
* then the region is already committed.
*/
p->state = CXL_CONFIG_COMMIT;
+ cxl_region_shared_upstream_bandwidth_update(cxlr);
return 0;
}
@@ -2004,6 +2005,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
if (rc)
return rc;
p->state = CXL_CONFIG_ACTIVE;
+ cxl_region_shared_upstream_bandwidth_update(cxlr);
}
cxled->cxld.interleave_ways = p->interleave_ways;
@@ -2313,8 +2315,6 @@ static void unregister_region(void *_cxlr)
struct cxl_region_params *p = &cxlr->params;
int i;
- unregister_memory_notifier(&cxlr->memory_notifier);
- unregister_mt_adistance_algorithm(&cxlr->adist_notifier);
device_del(&cxlr->dev);
/*
@@ -2391,18 +2391,6 @@ static bool cxl_region_update_coordinates(struct cxl_region *cxlr, int nid)
return true;
}
-static int cxl_region_nid(struct cxl_region *cxlr)
-{
- struct cxl_region_params *p = &cxlr->params;
- struct resource *res;
-
- guard(rwsem_read)(&cxl_region_rwsem);
- res = p->res;
- if (!res)
- return NUMA_NO_NODE;
- return phys_to_target_node(res->start);
-}
-
static int cxl_region_perf_attrs_callback(struct notifier_block *nb,
unsigned long action, void *arg)
{
@@ -2415,7 +2403,11 @@ static int cxl_region_perf_attrs_callback(struct notifier_block *nb,
if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
return NOTIFY_DONE;
- region_nid = cxl_region_nid(cxlr);
+ /*
+ * No need to hold cxl_region_rwsem; region parameters are stable
+ * within the cxl_region driver.
+ */
+ region_nid = phys_to_target_node(cxlr->params.res->start);
if (nid != region_nid)
return NOTIFY_DONE;
@@ -2434,7 +2426,11 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb,
int *adist = data;
int region_nid;
- region_nid = cxl_region_nid(cxlr);
+ /*
+ * No need to hold cxl_region_rwsem; region parameters are stable
+ * within the cxl_region driver.
+ */
+ region_nid = phys_to_target_node(cxlr->params.res->start);
if (nid != region_nid)
return NOTIFY_OK;
@@ -2484,14 +2480,6 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
if (rc)
goto err;
- cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback;
- cxlr->memory_notifier.priority = CXL_CALLBACK_PRI;
- register_memory_notifier(&cxlr->memory_notifier);
-
- cxlr->adist_notifier.notifier_call = cxl_region_calculate_adistance;
- cxlr->adist_notifier.priority = 100;
- register_mt_adistance_algorithm(&cxlr->adist_notifier);
-
rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr);
if (rc)
return ERR_PTR(rc);
@@ -3094,11 +3082,11 @@ static void cxlr_release_nvdimm(void *_cxlr)
struct cxl_region *cxlr = _cxlr;
struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
- device_lock(&cxl_nvb->dev);
- if (cxlr->cxlr_pmem)
- devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
- cxlr->cxlr_pmem);
- device_unlock(&cxl_nvb->dev);
+ scoped_guard(device, &cxl_nvb->dev) {
+ if (cxlr->cxlr_pmem)
+ devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
+ cxlr->cxlr_pmem);
+ }
cxlr->cxl_nvb = NULL;
put_device(&cxl_nvb->dev);
}
@@ -3134,13 +3122,14 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
dev_name(dev));
- device_lock(&cxl_nvb->dev);
- if (cxl_nvb->dev.driver)
- rc = devm_add_action_or_reset(&cxl_nvb->dev,
- cxlr_pmem_unregister, cxlr_pmem);
- else
- rc = -ENXIO;
- device_unlock(&cxl_nvb->dev);
+ scoped_guard(device, &cxl_nvb->dev) {
+ if (cxl_nvb->dev.driver)
+ rc = devm_add_action_or_reset(&cxl_nvb->dev,
+ cxlr_pmem_unregister,
+ cxlr_pmem);
+ else
+ rc = -ENXIO;
+ }
if (rc)
goto err_bridge;
@@ -3386,6 +3375,14 @@ static int is_system_ram(struct resource *res, void *arg)
return 1;
}
+static void shutdown_notifiers(void *_cxlr)
+{
+ struct cxl_region *cxlr = _cxlr;
+
+ unregister_memory_notifier(&cxlr->memory_notifier);
+ unregister_mt_adistance_algorithm(&cxlr->adist_notifier);
+}
+
static int cxl_region_probe(struct device *dev)
{
struct cxl_region *cxlr = to_cxl_region(dev);
@@ -3421,6 +3418,18 @@ out:
if (rc)
return rc;
+ cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback;
+ cxlr->memory_notifier.priority = CXL_CALLBACK_PRI;
+ register_memory_notifier(&cxlr->memory_notifier);
+
+ cxlr->adist_notifier.notifier_call = cxl_region_calculate_adistance;
+ cxlr->adist_notifier.priority = 100;
+ register_mt_adistance_algorithm(&cxlr->adist_notifier);
+
+ rc = devm_add_action_or_reset(&cxlr->dev, shutdown_notifiers, cxlr);
+ if (rc)
+ return rc;
+
switch (cxlr->mode) {
case CXL_DECODER_PMEM:
return devm_cxl_add_pmem_region(cxlr);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 9afb407d438f..0d8b810a51f0 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -744,6 +744,7 @@ struct cxl_root *find_cxl_root(struct cxl_port *port);
void put_cxl_root(struct cxl_root *cxl_root);
DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_cxl_root(_T))
+DEFINE_FREE(put_cxl_port, struct cxl_port *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
void cxl_bus_rescan(void);
void cxl_bus_drain(void);
@@ -762,9 +763,10 @@ struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
#ifdef CONFIG_PCIEAER_CXL
void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport);
+void cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host);
#else
-static inline void cxl_setup_parent_dport(struct device *host,
- struct cxl_dport *dport) { }
+static inline void cxl_dport_init_ras_reporting(struct cxl_dport *dport,
+ struct device *host) { }
#endif
struct cxl_decoder *to_cxl_decoder(struct device *dev);
@@ -809,7 +811,7 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
struct cxl_endpoint_dvsec_info *info);
int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
-int cxl_dvsec_rr_decode(struct device *dev, int dvsec,
+int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
struct cxl_endpoint_dvsec_info *info);
bool is_cxl_region(struct device *dev);
@@ -889,6 +891,7 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled);
+void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr);
void cxl_memdev_update_perf(struct cxl_memdev *cxlmd);
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index afb53d058d62..2a25d1957ddb 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -3,11 +3,12 @@
#ifndef __CXL_MEM_H__
#define __CXL_MEM_H__
#include <uapi/linux/cxl_mem.h>
+#include <linux/pci.h>
#include <linux/cdev.h>
#include <linux/uuid.h>
-#include <linux/rcuwait.h>
-#include <linux/cxl-event.h>
#include <linux/node.h>
+#include <cxl/event.h>
+#include <cxl/mailbox.h>
#include "cxl.h"
/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
@@ -397,11 +398,13 @@ enum cxl_devtype {
* struct cxl_dpa_perf - DPA performance property entry
* @dpa_range: range for DPA address
* @coord: QoS performance data (i.e. latency, bandwidth)
+ * @cdat_coord: raw QoS performance data from CDAT
* @qos_class: QoS Class cookies
*/
struct cxl_dpa_perf {
struct range dpa_range;
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
+ struct access_coordinate cdat_coord[ACCESS_COORDINATE_MAX];
int qos_class;
};
@@ -424,6 +427,7 @@ struct cxl_dpa_perf {
* @ram_res: Active Volatile memory capacity configuration
* @serial: PCIe Device Serial Number
* @type: Generic Memory Class device or Vendor Specific Memory device
+ * @cxl_mbox: CXL mailbox context
*/
struct cxl_dev_state {
struct device *dev;
@@ -438,8 +442,14 @@ struct cxl_dev_state {
struct resource ram_res;
u64 serial;
enum cxl_devtype type;
+ struct cxl_mailbox cxl_mbox;
};
+static inline struct cxl_dev_state *mbox_to_cxlds(struct cxl_mailbox *cxl_mbox)
+{
+ return dev_get_drvdata(cxl_mbox->host);
+}
+
/**
* struct cxl_memdev_state - Generic Type-3 Memory Device Class driver data
*
@@ -448,11 +458,8 @@ struct cxl_dev_state {
* the functionality related to that like Identify Memory Device and Get
* Partition Info
* @cxlds: Core driver state common across Type-2 and Type-3 devices
- * @payload_size: Size of space for payload
- * (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
* @lsa_size: Size of Label Storage Area
* (CXL 2.0 8.2.9.5.1.1 Identify Memory Device)
- * @mbox_mutex: Mutex to synchronize mailbox access.
* @firmware_version: Firmware version for the memory device.
* @enabled_cmds: Hardware commands found enabled in CEL.
* @exclusive_cmds: Commands that are kernel-internal only
@@ -470,17 +477,13 @@ struct cxl_dev_state {
* @poison: poison driver state info
* @security: security driver state info
* @fw: firmware upload / activation state
- * @mbox_wait: RCU wait for mbox send completely
- * @mbox_send: @dev specific transport for transmitting mailbox commands
*
* See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
*/
struct cxl_memdev_state {
struct cxl_dev_state cxlds;
- size_t payload_size;
size_t lsa_size;
- struct mutex mbox_mutex; /* Protects device mailbox and firmware */
char firmware_version[0x10];
DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
@@ -500,10 +503,6 @@ struct cxl_memdev_state {
struct cxl_poison_state poison;
struct cxl_security_state security;
struct cxl_fw_state fw;
-
- struct rcuwait mbox_wait;
- int (*mbox_send)(struct cxl_memdev_state *mds,
- struct cxl_mbox_cmd *cmd);
};
static inline struct cxl_memdev_state *
@@ -814,7 +813,7 @@ enum {
CXL_PMEM_SEC_PASS_USER,
};
-int cxl_internal_send_cmd(struct cxl_memdev_state *mds,
+int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox,
struct cxl_mbox_cmd *cmd);
int cxl_dev_state_identify(struct cxl_memdev_state *mds);
int cxl_await_media_ready(struct cxl_dev_state *cxlds);
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 7de232eaeb17..a9fd5cd5a0d2 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -109,7 +109,6 @@ static int cxl_mem_probe(struct device *dev)
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *endpoint_parent;
- struct cxl_port *parent_port;
struct cxl_dport *dport;
struct dentry *dentry;
int rc;
@@ -146,7 +145,8 @@ static int cxl_mem_probe(struct device *dev)
if (rc)
return rc;
- parent_port = cxl_mem_find_port(cxlmd, &dport);
+ struct cxl_port *parent_port __free(put_cxl_port) =
+ cxl_mem_find_port(cxlmd, &dport);
if (!parent_port) {
dev_err(dev, "CXL port topology not found\n");
return -ENXIO;
@@ -166,22 +166,19 @@ static int cxl_mem_probe(struct device *dev)
else
endpoint_parent = &parent_port->dev;
- cxl_setup_parent_dport(dev, dport);
+ cxl_dport_init_ras_reporting(dport, dev);
- device_lock(endpoint_parent);
- if (!endpoint_parent->driver) {
- dev_err(dev, "CXL port topology %s not enabled\n",
- dev_name(endpoint_parent));
- rc = -ENXIO;
- goto unlock;
- }
+ scoped_guard(device, endpoint_parent) {
+ if (!endpoint_parent->driver) {
+ dev_err(dev, "CXL port topology %s not enabled\n",
+ dev_name(endpoint_parent));
+ return -ENXIO;
+ }
- rc = devm_cxl_add_endpoint(endpoint_parent, cxlmd, dport);
-unlock:
- device_unlock(endpoint_parent);
- put_device(&parent_port->dev);
- if (rc)
- return rc;
+ rc = devm_cxl_add_endpoint(endpoint_parent, cxlmd, dport);
+ if (rc)
+ return rc;
+ }
/*
* The kernel may be operating out of CXL memory on this device,
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 4be35dc22202..37164174b5fb 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/io.h>
+#include <cxl/mailbox.h>
#include "cxlmem.h"
#include "cxlpci.h"
#include "cxl.h"
@@ -124,6 +125,7 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
u16 opcode;
struct cxl_dev_id *dev_id = id;
struct cxl_dev_state *cxlds = dev_id->cxlds;
+ struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
if (!cxl_mbox_background_complete(cxlds))
@@ -132,13 +134,13 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
if (opcode == CXL_MBOX_OP_SANITIZE) {
- mutex_lock(&mds->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
if (mds->security.sanitize_node)
mod_delayed_work(system_wq, &mds->security.poll_dwork, 0);
- mutex_unlock(&mds->mbox_mutex);
+ mutex_unlock(&cxl_mbox->mbox_mutex);
} else {
/* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
- rcuwait_wake_up(&mds->mbox_wait);
+ rcuwait_wake_up(&cxl_mbox->mbox_wait);
}
return IRQ_HANDLED;
@@ -152,8 +154,9 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
struct cxl_memdev_state *mds =
container_of(work, typeof(*mds), security.poll_dwork.work);
struct cxl_dev_state *cxlds = &mds->cxlds;
+ struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
- mutex_lock(&mds->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
if (cxl_mbox_background_complete(cxlds)) {
mds->security.poll_tmo_secs = 0;
if (mds->security.sanitize_node)
@@ -167,12 +170,12 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
mds->security.poll_tmo_secs = min(15 * 60, timeout);
schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
}
- mutex_unlock(&mds->mbox_mutex);
+ mutex_unlock(&cxl_mbox->mbox_mutex);
}
/**
* __cxl_pci_mbox_send_cmd() - Execute a mailbox command
- * @mds: The memory device driver data
+ * @cxl_mbox: CXL mailbox context
* @mbox_cmd: Command to send to the memory device.
*
* Context: Any context. Expects mbox_mutex to be held.
@@ -192,17 +195,18 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
* not need to coordinate with each other. The driver only uses the primary
* mailbox.
*/
-static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
+static int __cxl_pci_mbox_send_cmd(struct cxl_mailbox *cxl_mbox,
struct cxl_mbox_cmd *mbox_cmd)
{
- struct cxl_dev_state *cxlds = &mds->cxlds;
+ struct cxl_dev_state *cxlds = mbox_to_cxlds(cxl_mbox);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
struct device *dev = cxlds->dev;
u64 cmd_reg, status_reg;
size_t out_len;
int rc;
- lockdep_assert_held(&mds->mbox_mutex);
+ lockdep_assert_held(&cxl_mbox->mbox_mutex);
/*
* Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
@@ -315,10 +319,10 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
timeout = mbox_cmd->poll_interval_ms;
for (i = 0; i < mbox_cmd->poll_count; i++) {
- if (rcuwait_wait_event_timeout(&mds->mbox_wait,
- cxl_mbox_background_complete(cxlds),
- TASK_UNINTERRUPTIBLE,
- msecs_to_jiffies(timeout)) > 0)
+ if (rcuwait_wait_event_timeout(&cxl_mbox->mbox_wait,
+ cxl_mbox_background_complete(cxlds),
+ TASK_UNINTERRUPTIBLE,
+ msecs_to_jiffies(timeout)) > 0)
break;
}
@@ -360,7 +364,7 @@ success:
*/
size_t n;
- n = min3(mbox_cmd->size_out, mds->payload_size, out_len);
+ n = min3(mbox_cmd->size_out, cxl_mbox->payload_size, out_len);
memcpy_fromio(mbox_cmd->payload_out, payload, n);
mbox_cmd->size_out = n;
} else {
@@ -370,14 +374,14 @@ success:
return 0;
}
-static int cxl_pci_mbox_send(struct cxl_memdev_state *mds,
+static int cxl_pci_mbox_send(struct cxl_mailbox *cxl_mbox,
struct cxl_mbox_cmd *cmd)
{
int rc;
- mutex_lock_io(&mds->mbox_mutex);
- rc = __cxl_pci_mbox_send_cmd(mds, cmd);
- mutex_unlock(&mds->mbox_mutex);
+ mutex_lock_io(&cxl_mbox->mbox_mutex);
+ rc = __cxl_pci_mbox_send_cmd(cxl_mbox, cmd);
+ mutex_unlock(&cxl_mbox->mbox_mutex);
return rc;
}
@@ -385,6 +389,7 @@ static int cxl_pci_mbox_send(struct cxl_memdev_state *mds,
static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail)
{
struct cxl_dev_state *cxlds = &mds->cxlds;
+ struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
struct device *dev = cxlds->dev;
unsigned long timeout;
@@ -417,8 +422,8 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail)
return -ETIMEDOUT;
}
- mds->mbox_send = cxl_pci_mbox_send;
- mds->payload_size =
+ cxl_mbox->mbox_send = cxl_pci_mbox_send;
+ cxl_mbox->payload_size =
1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
/*
@@ -428,16 +433,15 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail)
* there's no point in going forward. If the size is too large, there's
* no harm is soft limiting it.
*/
- mds->payload_size = min_t(size_t, mds->payload_size, SZ_1M);
- if (mds->payload_size < 256) {
+ cxl_mbox->payload_size = min_t(size_t, cxl_mbox->payload_size, SZ_1M);
+ if (cxl_mbox->payload_size < 256) {
dev_err(dev, "Mailbox is too small (%zub)",
- mds->payload_size);
+ cxl_mbox->payload_size);
return -ENXIO;
}
- dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
+ dev_dbg(dev, "Mailbox payload sized %zu", cxl_mbox->payload_size);
- rcuwait_init(&mds->mbox_wait);
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
/* background command interrupts are optional */
@@ -473,7 +477,6 @@ static bool is_cxl_restricted(struct pci_dev *pdev)
static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
struct cxl_register_map *map)
{
- struct cxl_port *port;
struct cxl_dport *dport;
resource_size_t component_reg_phys;
@@ -482,14 +485,12 @@ static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
.resource = CXL_RESOURCE_NONE,
};
- port = cxl_pci_find_port(pdev, &dport);
+ struct cxl_port *port __free(put_cxl_port) =
+ cxl_pci_find_port(pdev, &dport);
if (!port)
return -EPROBE_DEFER;
component_reg_phys = cxl_rcd_component_reg_phys(&pdev->dev, dport);
-
- put_device(&port->dev);
-
if (component_reg_phys == CXL_RESOURCE_NONE)
return -ENXIO;
@@ -578,9 +579,10 @@ static void free_event_buf(void *buf)
*/
static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_get_event_payload *buf;
- buf = kvmalloc(mds->payload_size, GFP_KERNEL);
+ buf = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mds->event.buf = buf;
@@ -653,6 +655,7 @@ static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting)
static int cxl_event_get_int_policy(struct cxl_memdev_state *mds,
struct cxl_event_interrupt_policy *policy)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_cmd mbox_cmd = {
.opcode = CXL_MBOX_OP_GET_EVT_INT_POLICY,
.payload_out = policy,
@@ -660,7 +663,7 @@ static int cxl_event_get_int_policy(struct cxl_memdev_state *mds,
};
int rc;
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0)
dev_err(mds->cxlds.dev,
"Failed to get event interrupt policy : %d", rc);
@@ -671,6 +674,7 @@ static int cxl_event_get_int_policy(struct cxl_memdev_state *mds,
static int cxl_event_config_msgnums(struct cxl_memdev_state *mds,
struct cxl_event_interrupt_policy *policy)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -687,7 +691,7 @@ static int cxl_event_config_msgnums(struct cxl_memdev_state *mds,
.size_in = sizeof(*policy),
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0) {
dev_err(mds->cxlds.dev, "Failed to set event interrupt policy : %d",
rc);
@@ -786,6 +790,23 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
return 0;
}
+static int cxl_pci_type3_init_mailbox(struct cxl_dev_state *cxlds)
+{
+ int rc;
+
+ /*
+ * Fail the init if there's no mailbox. For a type3 this is out of spec.
+ */
+ if (!cxlds->reg_map.device_map.mbox.valid)
+ return -ENODEV;
+
+ rc = cxl_mailbox_init(&cxlds->cxl_mbox, cxlds->dev);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
@@ -846,6 +867,10 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
+ rc = cxl_pci_type3_init_mailbox(cxlds);
+ if (rc)
+ return rc;
+
rc = cxl_await_media_ready(cxlds);
if (rc == 0)
cxlds->media_ready = true;
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 4ef93da22335..a6538a5f5c9f 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -102,13 +102,15 @@ static int cxl_pmem_get_config_size(struct cxl_memdev_state *mds,
struct nd_cmd_get_config_size *cmd,
unsigned int buf_len)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+
if (sizeof(*cmd) > buf_len)
return -EINVAL;
*cmd = (struct nd_cmd_get_config_size){
.config_size = mds->lsa_size,
.max_xfer =
- mds->payload_size - sizeof(struct cxl_mbox_set_lsa),
+ cxl_mbox->payload_size - sizeof(struct cxl_mbox_set_lsa),
};
return 0;
@@ -118,6 +120,7 @@ static int cxl_pmem_get_config_data(struct cxl_memdev_state *mds,
struct nd_cmd_get_config_data_hdr *cmd,
unsigned int buf_len)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_get_lsa get_lsa;
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -139,7 +142,7 @@ static int cxl_pmem_get_config_data(struct cxl_memdev_state *mds,
.payload_out = cmd->out_buf,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
cmd->status = 0;
return rc;
@@ -149,6 +152,7 @@ static int cxl_pmem_set_config_data(struct cxl_memdev_state *mds,
struct nd_cmd_set_config_hdr *cmd,
unsigned int buf_len)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_set_lsa *set_lsa;
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -175,7 +179,7 @@ static int cxl_pmem_set_config_data(struct cxl_memdev_state *mds,
.size_in = struct_size(set_lsa, data, cmd->in_length),
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
/*
* Set "firmware" status (4-packed bytes at the end of the input
@@ -233,15 +237,13 @@ static int detach_nvdimm(struct device *dev, void *data)
if (!is_cxl_nvdimm(dev))
return 0;
- device_lock(dev);
- if (!dev->driver)
- goto out;
-
- cxl_nvd = to_cxl_nvdimm(dev);
- if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data)
- release = true;
-out:
- device_unlock(dev);
+ scoped_guard(device, dev) {
+ if (dev->driver) {
+ cxl_nvd = to_cxl_nvdimm(dev);
+ if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data)
+ release = true;
+ }
+ }
if (release)
device_release_driver(dev);
return 0;
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index d7d5d982ce69..861dde65768f 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -98,7 +98,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
struct cxl_port *root;
int rc;
- rc = cxl_dvsec_rr_decode(cxlds->dev, cxlds->cxl_dvsec, &info);
+ rc = cxl_dvsec_rr_decode(cxlds->dev, port, &info);
if (rc < 0)
return rc;
diff --git a/drivers/cxl/security.c b/drivers/cxl/security.c
index 21856a3f408e..452d1a9b9148 100644
--- a/drivers/cxl/security.c
+++ b/drivers/cxl/security.c
@@ -14,6 +14,7 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
unsigned long security_flags = 0;
struct cxl_get_security_output {
@@ -29,7 +30,7 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm,
.payload_out = &out,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0)
return 0;
@@ -70,7 +71,7 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_cmd mbox_cmd;
struct cxl_set_pass set_pass;
@@ -87,7 +88,7 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm,
.payload_in = &set_pass,
};
- return cxl_internal_send_cmd(mds, &mbox_cmd);
+ return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
}
static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
@@ -96,7 +97,7 @@ static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_disable_pass dis_pass;
struct cxl_mbox_cmd mbox_cmd;
@@ -112,7 +113,7 @@ static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
.payload_in = &dis_pass,
};
- return cxl_internal_send_cmd(mds, &mbox_cmd);
+ return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
}
static int cxl_pmem_security_disable(struct nvdimm *nvdimm,
@@ -131,12 +132,12 @@ static int cxl_pmem_security_freeze(struct nvdimm *nvdimm)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_cmd mbox_cmd = {
.opcode = CXL_MBOX_OP_FREEZE_SECURITY,
};
- return cxl_internal_send_cmd(mds, &mbox_cmd);
+ return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
}
static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
@@ -144,7 +145,7 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
u8 pass[NVDIMM_PASSPHRASE_LEN];
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -156,7 +157,7 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
.payload_in = pass,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0)
return rc;
@@ -169,7 +170,7 @@ static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_cmd mbox_cmd;
struct cxl_pass_erase erase;
int rc;
@@ -185,7 +186,7 @@ static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm,
.payload_in = &erase,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0)
return rc;
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 4ee337e78c23..995427afe077 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -17,14 +17,15 @@
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/of_dma.h>
+#include <linux/overflow.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/platform_data/dma-ep93xx.h>
-
#include "dmaengine.h"
/* M2P registers */
@@ -104,6 +105,31 @@
#define DMA_MAX_CHAN_BYTES 0xffff
#define DMA_MAX_CHAN_DESCRIPTORS 32
+/*
+ * M2P channels.
+ *
+ * Note that these values are also directly used for setting the PPALLOC
+ * register.
+ */
+#define EP93XX_DMA_I2S1 0
+#define EP93XX_DMA_I2S2 1
+#define EP93XX_DMA_AAC1 2
+#define EP93XX_DMA_AAC2 3
+#define EP93XX_DMA_AAC3 4
+#define EP93XX_DMA_I2S3 5
+#define EP93XX_DMA_UART1 6
+#define EP93XX_DMA_UART2 7
+#define EP93XX_DMA_UART3 8
+#define EP93XX_DMA_IRDA 9
+/* M2M channels */
+#define EP93XX_DMA_SSP 10
+#define EP93XX_DMA_IDE 11
+
+enum ep93xx_dma_type {
+ M2P_DMA,
+ M2M_DMA,
+};
+
struct ep93xx_dma_engine;
static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
enum dma_transfer_direction dir,
@@ -129,11 +155,17 @@ struct ep93xx_dma_desc {
struct list_head node;
};
+struct ep93xx_dma_chan_cfg {
+ u8 port;
+ enum dma_transfer_direction dir;
+};
+
/**
* struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
* @chan: dmaengine API channel
* @edma: pointer to the engine device
* @regs: memory mapped registers
+ * @dma_cfg: channel number, direction
* @irq: interrupt number of the channel
* @clk: clock used by this channel
* @tasklet: channel specific tasklet used for callbacks
@@ -157,14 +189,12 @@ struct ep93xx_dma_desc {
* descriptor in the chain. When a descriptor is moved to the @active queue,
* the first and chained descriptors are flattened into a single list.
*
- * @chan.private holds pointer to &struct ep93xx_dma_data which contains
- * necessary channel configuration information. For memcpy channels this must
- * be %NULL.
*/
struct ep93xx_dma_chan {
struct dma_chan chan;
const struct ep93xx_dma_engine *edma;
void __iomem *regs;
+ struct ep93xx_dma_chan_cfg dma_cfg;
int irq;
struct clk *clk;
struct tasklet_struct tasklet;
@@ -216,6 +246,11 @@ struct ep93xx_dma_engine {
struct ep93xx_dma_chan channels[] __counted_by(num_channels);
};
+struct ep93xx_edma_data {
+ u32 id;
+ size_t num_channels;
+};
+
static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
{
return &edmac->chan.dev->device;
@@ -226,6 +261,31 @@ static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
return container_of(chan, struct ep93xx_dma_chan, chan);
}
+static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
+{
+ if (device_is_compatible(chan->device->dev, "cirrus,ep9301-dma-m2p"))
+ return true;
+
+ return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
+}
+
+/*
+ * ep93xx_dma_chan_direction - returns direction the channel can be used
+ *
+ * This function can be used in filter functions to find out whether the
+ * channel supports given DMA direction. Only M2P channels have such
+ * limitation, for M2M channels the direction is configurable.
+ */
+static inline enum dma_transfer_direction
+ep93xx_dma_chan_direction(struct dma_chan *chan)
+{
+ if (!ep93xx_dma_chan_is_m2p(chan))
+ return DMA_TRANS_NONE;
+
+ /* even channels are for TX, odd for RX */
+ return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+}
+
/**
* ep93xx_dma_set_active - set new active descriptor chain
* @edmac: channel
@@ -318,10 +378,9 @@ static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
{
- struct ep93xx_dma_data *data = edmac->chan.private;
u32 control;
- writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
+ writel(edmac->dma_cfg.port & 0xf, edmac->regs + M2P_PPALLOC);
control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
| M2P_CONTROL_ENABLE;
@@ -458,16 +517,15 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
{
- const struct ep93xx_dma_data *data = edmac->chan.private;
u32 control = 0;
- if (!data) {
+ if (edmac->dma_cfg.dir == DMA_MEM_TO_MEM) {
/* This is memcpy channel, nothing to configure */
writel(control, edmac->regs + M2M_CONTROL);
return 0;
}
- switch (data->port) {
+ switch (edmac->dma_cfg.port) {
case EP93XX_DMA_SSP:
/*
* This was found via experimenting - anything less than 5
@@ -477,7 +535,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
control = (5 << M2M_CONTROL_PWSC_SHIFT);
control |= M2M_CONTROL_NO_HDSK;
- if (data->direction == DMA_MEM_TO_DEV) {
+ if (edmac->dma_cfg.dir == DMA_MEM_TO_DEV) {
control |= M2M_CONTROL_DAH;
control |= M2M_CONTROL_TM_TX;
control |= M2M_CONTROL_RSS_SSPTX;
@@ -493,7 +551,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
* This IDE part is totally untested. Values below are taken
* from the EP93xx Users's Guide and might not be correct.
*/
- if (data->direction == DMA_MEM_TO_DEV) {
+ if (edmac->dma_cfg.dir == DMA_MEM_TO_DEV) {
/* Worst case from the UG */
control = (3 << M2M_CONTROL_PWSC_SHIFT);
control |= M2M_CONTROL_DAH;
@@ -548,7 +606,6 @@ static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
{
- struct ep93xx_dma_data *data = edmac->chan.private;
u32 control = readl(edmac->regs + M2M_CONTROL);
/*
@@ -574,7 +631,7 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
control |= M2M_CONTROL_ENABLE;
writel(control, edmac->regs + M2M_CONTROL);
- if (!data) {
+ if (edmac->dma_cfg.dir == DMA_MEM_TO_MEM) {
/*
* For memcpy channels the software trigger must be asserted
* in order to start the memcpy operation.
@@ -636,7 +693,7 @@ static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
*/
if (ep93xx_dma_advance_active(edmac)) {
m2m_fill_desc(edmac);
- if (done && !edmac->chan.private) {
+ if (done && edmac->dma_cfg.dir == DMA_MEM_TO_MEM) {
/* Software trigger for memcpy channel */
control = readl(edmac->regs + M2M_CONTROL);
control |= M2M_CONTROL_START;
@@ -867,25 +924,22 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
- struct ep93xx_dma_data *data = chan->private;
const char *name = dma_chan_name(chan);
int ret, i;
/* Sanity check the channel parameters */
if (!edmac->edma->m2m) {
- if (!data)
- return -EINVAL;
- if (data->port < EP93XX_DMA_I2S1 ||
- data->port > EP93XX_DMA_IRDA)
+ if (edmac->dma_cfg.port < EP93XX_DMA_I2S1 ||
+ edmac->dma_cfg.port > EP93XX_DMA_IRDA)
return -EINVAL;
- if (data->direction != ep93xx_dma_chan_direction(chan))
+ if (edmac->dma_cfg.dir != ep93xx_dma_chan_direction(chan))
return -EINVAL;
} else {
- if (data) {
- switch (data->port) {
+ if (edmac->dma_cfg.dir != DMA_MEM_TO_MEM) {
+ switch (edmac->dma_cfg.port) {
case EP93XX_DMA_SSP:
case EP93XX_DMA_IDE:
- if (!is_slave_direction(data->direction))
+ if (!is_slave_direction(edmac->dma_cfg.dir))
return -EINVAL;
break;
default:
@@ -894,9 +948,6 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
}
}
- if (data && data->name)
- name = data->name;
-
ret = clk_prepare_enable(edmac->clk);
if (ret)
return ret;
@@ -1315,36 +1366,53 @@ static void ep93xx_dma_issue_pending(struct dma_chan *chan)
ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
}
-static int __init ep93xx_dma_probe(struct platform_device *pdev)
+static struct ep93xx_dma_engine *ep93xx_dma_of_probe(struct platform_device *pdev)
{
- struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ const struct ep93xx_edma_data *data;
+ struct device *dev = &pdev->dev;
struct ep93xx_dma_engine *edma;
struct dma_device *dma_dev;
- int ret, i;
+ char dma_clk_name[5];
+ int i;
- edma = kzalloc(struct_size(edma, channels, pdata->num_channels), GFP_KERNEL);
+ data = device_get_match_data(dev);
+ if (!data)
+ return ERR_PTR(dev_err_probe(dev, -ENODEV, "No device match found\n"));
+
+ edma = devm_kzalloc(dev, struct_size(edma, channels, data->num_channels),
+ GFP_KERNEL);
if (!edma)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+ edma->m2m = data->id;
+ edma->num_channels = data->num_channels;
dma_dev = &edma->dma_dev;
- edma->m2m = platform_get_device_id(pdev)->driver_data;
- edma->num_channels = pdata->num_channels;
INIT_LIST_HEAD(&dma_dev->channels);
- for (i = 0; i < pdata->num_channels; i++) {
- const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
+ for (i = 0; i < edma->num_channels; i++) {
struct ep93xx_dma_chan *edmac = &edma->channels[i];
edmac->chan.device = dma_dev;
- edmac->regs = cdata->base;
- edmac->irq = cdata->irq;
+ edmac->regs = devm_platform_ioremap_resource(pdev, i);
+ if (IS_ERR(edmac->regs))
+ return edmac->regs;
+
+ edmac->irq = fwnode_irq_get(dev_fwnode(dev), i);
+ if (edmac->irq < 0)
+ return ERR_PTR(edmac->irq);
+
edmac->edma = edma;
- edmac->clk = clk_get(NULL, cdata->name);
+ if (edma->m2m)
+ snprintf(dma_clk_name, sizeof(dma_clk_name), "m2m%u", i);
+ else
+ snprintf(dma_clk_name, sizeof(dma_clk_name), "m2p%u", i);
+
+ edmac->clk = devm_clk_get(dev, dma_clk_name);
if (IS_ERR(edmac->clk)) {
- dev_warn(&pdev->dev, "failed to get clock for %s\n",
- cdata->name);
- continue;
+ dev_err_probe(dev, PTR_ERR(edmac->clk),
+ "no %s clock found\n", dma_clk_name);
+ return ERR_CAST(edmac->clk);
}
spin_lock_init(&edmac->lock);
@@ -1357,6 +1425,90 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
&dma_dev->channels);
}
+ return edma;
+}
+
+static bool ep93xx_m2p_dma_filter(struct dma_chan *chan, void *filter_param)
+{
+ struct ep93xx_dma_chan *echan = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_chan_cfg *cfg = filter_param;
+
+ if (cfg->dir != ep93xx_dma_chan_direction(chan))
+ return false;
+
+ echan->dma_cfg = *cfg;
+ return true;
+}
+
+static struct dma_chan *ep93xx_m2p_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct ep93xx_dma_engine *edma = ofdma->of_dma_data;
+ dma_cap_mask_t mask = edma->dma_dev.cap_mask;
+ struct ep93xx_dma_chan_cfg dma_cfg;
+ u8 port = dma_spec->args[0];
+ u8 direction = dma_spec->args[1];
+
+ if (port > EP93XX_DMA_IRDA)
+ return NULL;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ dma_cfg.port = port;
+ dma_cfg.dir = direction;
+
+ return __dma_request_channel(&mask, ep93xx_m2p_dma_filter, &dma_cfg, ofdma->of_node);
+}
+
+static bool ep93xx_m2m_dma_filter(struct dma_chan *chan, void *filter_param)
+{
+ struct ep93xx_dma_chan *echan = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_chan_cfg *cfg = filter_param;
+
+ echan->dma_cfg = *cfg;
+
+ return true;
+}
+
+static struct dma_chan *ep93xx_m2m_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct ep93xx_dma_engine *edma = ofdma->of_dma_data;
+ dma_cap_mask_t mask = edma->dma_dev.cap_mask;
+ struct ep93xx_dma_chan_cfg dma_cfg;
+ u8 port = dma_spec->args[0];
+ u8 direction = dma_spec->args[1];
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ switch (port) {
+ case EP93XX_DMA_SSP:
+ case EP93XX_DMA_IDE:
+ break;
+ default:
+ return NULL;
+ }
+
+ dma_cfg.port = port;
+ dma_cfg.dir = direction;
+
+ return __dma_request_channel(&mask, ep93xx_m2m_dma_filter, &dma_cfg, ofdma->of_node);
+}
+
+static int ep93xx_dma_probe(struct platform_device *pdev)
+{
+ struct ep93xx_dma_engine *edma;
+ struct dma_device *dma_dev;
+ int ret;
+
+ edma = ep93xx_dma_of_probe(pdev);
+ if (IS_ERR(edma))
+ return PTR_ERR(edma);
+
+ dma_dev = &edma->dma_dev;
+
dma_cap_zero(dma_dev->cap_mask);
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
@@ -1393,21 +1545,46 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
}
ret = dma_async_device_register(dma_dev);
- if (unlikely(ret)) {
- for (i = 0; i < edma->num_channels; i++) {
- struct ep93xx_dma_chan *edmac = &edma->channels[i];
- if (!IS_ERR_OR_NULL(edmac->clk))
- clk_put(edmac->clk);
- }
- kfree(edma);
+ if (ret)
+ return ret;
+
+ if (edma->m2m) {
+ ret = of_dma_controller_register(pdev->dev.of_node, ep93xx_m2m_dma_of_xlate,
+ edma);
} else {
- dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
- edma->m2m ? "M" : "P");
+ ret = of_dma_controller_register(pdev->dev.of_node, ep93xx_m2p_dma_of_xlate,
+ edma);
}
+ if (ret)
+ goto err_dma_unregister;
+
+ dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", edma->m2m ? "M" : "P");
+
+ return 0;
+
+err_dma_unregister:
+ dma_async_device_unregister(dma_dev);
return ret;
}
+static const struct ep93xx_edma_data edma_m2p = {
+ .id = M2P_DMA,
+ .num_channels = 10,
+};
+
+static const struct ep93xx_edma_data edma_m2m = {
+ .id = M2M_DMA,
+ .num_channels = 2,
+};
+
+static const struct of_device_id ep93xx_dma_of_ids[] = {
+ { .compatible = "cirrus,ep9301-dma-m2p", .data = &edma_m2p },
+ { .compatible = "cirrus,ep9301-dma-m2m", .data = &edma_m2m },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ep93xx_dma_of_ids);
+
static const struct platform_device_id ep93xx_dma_driver_ids[] = {
{ "ep93xx-dma-m2p", 0 },
{ "ep93xx-dma-m2m", 1 },
@@ -1417,15 +1594,13 @@ static const struct platform_device_id ep93xx_dma_driver_ids[] = {
static struct platform_driver ep93xx_dma_driver = {
.driver = {
.name = "ep93xx-dma",
+ .of_match_table = ep93xx_dma_of_ids,
},
.id_table = ep93xx_dma_driver_ids,
+ .probe = ep93xx_dma_probe,
};
-static int __init ep93xx_dma_module_init(void)
-{
- return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
-}
-subsys_initcall(ep93xx_dma_module_init);
+module_platform_driver(ep93xx_dma_driver);
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
MODULE_DESCRIPTION("EP93xx DMA driver");
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 3da94b382292..a6f6d467aacf 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -75,6 +75,17 @@ config EXTCON_INTEL_MRFLD
Say Y here to enable extcon support for charger detection / control
on the Intel Merrifield Basin Cove PMIC.
+config EXTCON_LC824206XA
+ tristate "LC824206XA extcon Support"
+ depends on I2C
+ depends on POWER_SUPPLY
+ help
+ Say Y here to enable support for the ON Semiconductor LC824206XA
+ microUSB switch and accessory detector chip. The LC824206XA is a USB
+ port accessory detector and switch. The LC824206XA is fully controlled
+ using I2C and enables USB data, stereo and mono audio, video,
+ microphone and UART data to use a common connector port.
+
config EXTCON_MAX14577
tristate "Maxim MAX14577/77836 EXTCON Support"
depends on MFD_MAX14577
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index f779adb5e4c7..0d6d23faf748 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
obj-$(CONFIG_EXTCON_INTEL_INT3496) += extcon-intel-int3496.o
obj-$(CONFIG_EXTCON_INTEL_CHT_WC) += extcon-intel-cht-wc.o
obj-$(CONFIG_EXTCON_INTEL_MRFLD) += extcon-intel-mrfld.o
+obj-$(CONFIG_EXTCON_LC824206XA) += extcon-lc824206xa.o
obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o
obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
diff --git a/drivers/extcon/extcon-lc824206xa.c b/drivers/extcon/extcon-lc824206xa.c
new file mode 100644
index 000000000000..56938748aea8
--- /dev/null
+++ b/drivers/extcon/extcon-lc824206xa.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ON Semiconductor LC824206XA Micro USB Switch driver
+ *
+ * Copyright (c) 2024 Hans de Goede <hansg@kernel.org>
+ *
+ * ON Semiconductor has an "Advance Information" datasheet available
+ * (ENA2222-D.PDF), but no full datasheet. So there is no documentation
+ * available for the registers.
+ *
+ * This driver is based on the register info from the extcon-fsa9285.c driver,
+ * from the Lollipop Android sources for the Lenovo Yoga Tablet 2 (Pro)
+ * 830 / 1050 / 1380 models. Note despite the name this is actually a driver
+ * for the LC824206XA not the FSA9285. The Android sources can be downloaded
+ * from Lenovo's support page for these tablets, filename:
+ * yoga_tab_2_osc_android_to_lollipop_201505.rar.
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/extcon-provider.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+
+/*
+ * Register defines as mentioned above there is no datasheet with register
+ * info, so this may not be 100% accurate.
+ */
+#define REG00 0x00
+#define REG00_INIT_VALUE 0x01
+
+#define REG_STATUS 0x01
+#define STATUS_OVP BIT(0)
+#define STATUS_DATA_SHORT BIT(1)
+#define STATUS_VBUS_PRESENT BIT(2)
+#define STATUS_USB_ID GENMASK(7, 3)
+#define STATUS_USB_ID_GND 0x80
+#define STATUS_USB_ID_ACA 0xf0
+#define STATUS_USB_ID_FLOAT 0xf8
+
+/*
+ * This controls the DP/DM muxes + other switches,
+ * meaning of individual bits is unknown.
+ */
+#define REG_SWITCH_CONTROL 0x02
+#define SWITCH_STEREO_MIC 0xc8
+#define SWITCH_USB_HOST 0xec
+#define SWITCH_DISCONNECTED 0xf8
+#define SWITCH_USB_DEVICE 0xfc
+
+/* 5 bits? ADC 0x10 GND, 0x1a-0x1f ACA, 0x1f float */
+#define REG_ID_PIN_ADC_VALUE 0x03
+
+/* Masks for all 3 interrupt registers */
+#define INTR_ID_PIN_CHANGE BIT(0)
+#define INTR_VBUS_CHANGE BIT(1)
+/* Both of these get set after a continuous mode ADC conversion */
+#define INTR_ID_PIN_ADC_INT1 BIT(2)
+#define INTR_ID_PIN_ADC_INT2 BIT(3)
+/* Charger type available in reg 0x09 */
+#define INTR_CHARGER_DET_DONE BIT(4)
+#define INTR_OVP BIT(5)
+
+/* There are 7 interrupt sources, bit 6 use is unknown (OCP?) */
+#define INTR_ALL GENMASK(6, 0)
+
+/* Unmask interrupts this driver cares about */
+#define INTR_MASK \
+ (INTR_ALL & ~(INTR_ID_PIN_CHANGE | INTR_VBUS_CHANGE | INTR_CHARGER_DET_DONE))
+
+/* Active (event happened and not cleared yet) interrupts */
+#define REG_INTR_STATUS 0x04
+
+/*
+ * Writing a 1 to a bit here clears it in INTR_STATUS. These bits do NOT
+ * auto-reset to 0, so these must be set to 0 manually after clearing.
+ */
+#define REG_INTR_CLEAR 0x05
+
+/* Interrupts which bit is set to 1 here will not raise the HW IRQ */
+#define REG_INTR_MASK 0x06
+
+/* ID pin ADC control, meaning of individual bits is unknown */
+#define REG_ID_PIN_ADC_CTRL 0x07
+#define ID_PIN_ADC_AUTO 0x40
+#define ID_PIN_ADC_CONTINUOUS 0x44
+
+#define REG_CHARGER_DET 0x08
+#define CHARGER_DET_ON BIT(0)
+#define CHARGER_DET_CDP_ON BIT(1)
+#define CHARGER_DET_CDP_VAL BIT(2)
+
+#define REG_CHARGER_TYPE 0x09
+#define CHARGER_TYPE_UNKNOWN 0x00
+#define CHARGER_TYPE_DCP 0x01
+#define CHARGER_TYPE_SDP_OR_CDP 0x04
+#define CHARGER_TYPE_QC 0x06
+
+#define REG10 0x10
+#define REG10_INIT_VALUE 0x00
+
+struct lc824206xa_data {
+ struct work_struct work;
+ struct i2c_client *client;
+ struct extcon_dev *edev;
+ struct power_supply *psy;
+ struct regulator *vbus_boost;
+ unsigned int usb_type;
+ unsigned int cable;
+ unsigned int previous_cable;
+ u8 switch_control;
+ u8 previous_switch_control;
+ bool vbus_ok;
+ bool vbus_boost_enabled;
+ bool fastcharge_over_miclr;
+};
+
+static const unsigned int lc824206xa_cables[] = {
+ EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
+ EXTCON_CHG_USB_CDP,
+ EXTCON_CHG_USB_DCP,
+ EXTCON_CHG_USB_ACA,
+ EXTCON_CHG_USB_FAST,
+ EXTCON_NONE,
+};
+
+/* read/write reg helpers to add error logging to smbus byte functions */
+static int lc824206xa_read_reg(struct lc824206xa_data *data, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, reg);
+ if (ret < 0)
+ dev_err(&data->client->dev, "Error %d reading reg 0x%02x\n", ret, reg);
+
+ return ret;
+}
+
+static int lc824206xa_write_reg(struct lc824206xa_data *data, u8 reg, u8 val)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, reg, val);
+ if (ret < 0)
+ dev_err(&data->client->dev, "Error %d writing reg 0x%02x\n", ret, reg);
+
+ return ret;
+}
+
+static int lc824206xa_get_id(struct lc824206xa_data *data)
+{
+ int ret;
+
+ ret = lc824206xa_write_reg(data, REG_ID_PIN_ADC_CTRL, ID_PIN_ADC_CONTINUOUS);
+ if (ret)
+ return ret;
+
+ ret = lc824206xa_read_reg(data, REG_ID_PIN_ADC_VALUE);
+
+ lc824206xa_write_reg(data, REG_ID_PIN_ADC_CTRL, ID_PIN_ADC_AUTO);
+
+ return ret;
+}
+
+static void lc824206xa_set_vbus_boost(struct lc824206xa_data *data, bool enable)
+{
+ int ret;
+
+ if (data->vbus_boost_enabled == enable)
+ return;
+
+ if (enable)
+ ret = regulator_enable(data->vbus_boost);
+ else
+ ret = regulator_disable(data->vbus_boost);
+
+ if (ret == 0)
+ data->vbus_boost_enabled = enable;
+ else
+ dev_err(&data->client->dev, "Error updating Vbus boost regulator: %d\n", ret);
+}
+
+static void lc824206xa_charger_detect(struct lc824206xa_data *data)
+{
+ int charger_type, ret;
+
+ charger_type = lc824206xa_read_reg(data, REG_CHARGER_TYPE);
+ if (charger_type < 0)
+ return;
+
+ dev_dbg(&data->client->dev, "charger type 0x%02x\n", charger_type);
+
+ switch (charger_type) {
+ case CHARGER_TYPE_UNKNOWN:
+ data->usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ /* Treat as SDP */
+ data->cable = EXTCON_CHG_USB_SDP;
+ data->switch_control = SWITCH_USB_DEVICE;
+ break;
+ case CHARGER_TYPE_SDP_OR_CDP:
+ data->usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+ data->cable = EXTCON_CHG_USB_SDP;
+ data->switch_control = SWITCH_USB_DEVICE;
+
+ ret = lc824206xa_write_reg(data, REG_CHARGER_DET,
+ CHARGER_DET_CDP_ON | CHARGER_DET_ON);
+ if (ret < 0)
+ break;
+
+ msleep(100);
+ ret = lc824206xa_read_reg(data, REG_CHARGER_DET);
+ if (ret >= 0 && (ret & CHARGER_DET_CDP_VAL)) {
+ data->usb_type = POWER_SUPPLY_USB_TYPE_CDP;
+ data->cable = EXTCON_CHG_USB_CDP;
+ }
+
+ lc824206xa_write_reg(data, REG_CHARGER_DET, CHARGER_DET_ON);
+ break;
+ case CHARGER_TYPE_DCP:
+ data->usb_type = POWER_SUPPLY_USB_TYPE_DCP;
+ data->cable = EXTCON_CHG_USB_DCP;
+ if (data->fastcharge_over_miclr)
+ data->switch_control = SWITCH_STEREO_MIC;
+ else
+ data->switch_control = SWITCH_DISCONNECTED;
+ break;
+ case CHARGER_TYPE_QC:
+ data->usb_type = POWER_SUPPLY_USB_TYPE_DCP;
+ data->cable = EXTCON_CHG_USB_DCP;
+ data->switch_control = SWITCH_DISCONNECTED;
+ break;
+ default:
+ dev_warn(&data->client->dev, "Unknown charger type: 0x%02x\n", charger_type);
+ break;
+ }
+}
+
+static void lc824206xa_work(struct work_struct *work)
+{
+ struct lc824206xa_data *data = container_of(work, struct lc824206xa_data, work);
+ bool vbus_boost_enable = false;
+ int status, id;
+
+ status = lc824206xa_read_reg(data, REG_STATUS);
+ if (status < 0)
+ return;
+
+ dev_dbg(&data->client->dev, "status 0x%02x\n", status);
+
+ data->vbus_ok = (status & (STATUS_VBUS_PRESENT | STATUS_OVP)) == STATUS_VBUS_PRESENT;
+
+ /* Read id pin ADC if necessary */
+ switch (status & STATUS_USB_ID) {
+ case STATUS_USB_ID_GND:
+ case STATUS_USB_ID_FLOAT:
+ break;
+ default:
+ /* Happens when the connector is inserted slowly, log at dbg level */
+ dev_dbg(&data->client->dev, "Unknown status 0x%02x\n", status);
+ fallthrough;
+ case STATUS_USB_ID_ACA:
+ id = lc824206xa_get_id(data);
+ dev_dbg(&data->client->dev, "RID 0x%02x\n", id);
+ switch (id) {
+ case 0x10:
+ status = STATUS_USB_ID_GND;
+ break;
+ case 0x18 ... 0x1e:
+ status = STATUS_USB_ID_ACA;
+ break;
+ case 0x1f:
+ status = STATUS_USB_ID_FLOAT;
+ break;
+ default:
+ dev_warn(&data->client->dev, "Unknown RID 0x%02x\n", id);
+ return;
+ }
+ }
+
+ /* Check for out of spec OTG charging hubs, treat as ACA */
+ if ((status & STATUS_USB_ID) == STATUS_USB_ID_GND &&
+ data->vbus_ok && !data->vbus_boost_enabled) {
+ dev_info(&data->client->dev, "Out of spec USB host adapter with Vbus present, not enabling 5V output\n");
+ status = STATUS_USB_ID_ACA;
+ }
+
+ switch (status & STATUS_USB_ID) {
+ case STATUS_USB_ID_ACA:
+ data->usb_type = POWER_SUPPLY_USB_TYPE_ACA;
+ data->cable = EXTCON_CHG_USB_ACA;
+ data->switch_control = SWITCH_USB_HOST;
+ break;
+ case STATUS_USB_ID_GND:
+ data->usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ data->cable = EXTCON_USB_HOST;
+ data->switch_control = SWITCH_USB_HOST;
+ vbus_boost_enable = true;
+ break;
+ case STATUS_USB_ID_FLOAT:
+ /* When fast charging with Vbus > 5V, OVP will be set */
+ if (data->fastcharge_over_miclr &&
+ data->switch_control == SWITCH_STEREO_MIC &&
+ (status & STATUS_OVP)) {
+ data->cable = EXTCON_CHG_USB_FAST;
+ break;
+ }
+
+ if (data->vbus_ok) {
+ lc824206xa_charger_detect(data);
+ } else {
+ data->usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ data->cable = EXTCON_NONE;
+ data->switch_control = SWITCH_DISCONNECTED;
+ }
+ break;
+ }
+
+ lc824206xa_set_vbus_boost(data, vbus_boost_enable);
+
+ if (data->switch_control != data->previous_switch_control) {
+ lc824206xa_write_reg(data, REG_SWITCH_CONTROL, data->switch_control);
+ data->previous_switch_control = data->switch_control;
+ }
+
+ if (data->cable != data->previous_cable) {
+ extcon_set_state_sync(data->edev, data->previous_cable, false);
+ extcon_set_state_sync(data->edev, data->cable, true);
+ data->previous_cable = data->cable;
+ }
+
+ power_supply_changed(data->psy);
+}
+
+static irqreturn_t lc824206xa_irq(int irq, void *_data)
+{
+ struct lc824206xa_data *data = _data;
+ int intr_status;
+
+ intr_status = lc824206xa_read_reg(data, REG_INTR_STATUS);
+ if (intr_status < 0)
+ intr_status = INTR_ALL; /* Should never happen, clear all */
+
+ dev_dbg(&data->client->dev, "interrupt 0x%02x\n", intr_status);
+
+ lc824206xa_write_reg(data, REG_INTR_CLEAR, intr_status);
+ lc824206xa_write_reg(data, REG_INTR_CLEAR, 0);
+
+ schedule_work(&data->work);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Newer charger (power_supply) drivers expect the max input current to be
+ * provided by a parent power_supply device for the charger chip.
+ */
+static int lc824206xa_psy_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct lc824206xa_data *data = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = data->vbus_ok && !data->vbus_boost_enabled;
+ break;
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ val->intval = data->usb_type;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ switch (data->usb_type) {
+ case POWER_SUPPLY_USB_TYPE_DCP:
+ case POWER_SUPPLY_USB_TYPE_ACA:
+ val->intval = 2000000;
+ break;
+ case POWER_SUPPLY_USB_TYPE_CDP:
+ val->intval = 1500000;
+ break;
+ default:
+ val->intval = 500000;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const enum power_supply_property lc824206xa_psy_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static const struct power_supply_desc lc824206xa_psy_desc = {
+ .name = "lc824206xa-charger-detect",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_ACA) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
+ .properties = lc824206xa_psy_props,
+ .num_properties = ARRAY_SIZE(lc824206xa_psy_props),
+ .get_property = lc824206xa_psy_get_prop,
+};
+
+static int lc824206xa_probe(struct i2c_client *client)
+{
+ struct power_supply_config psy_cfg = { };
+ struct device *dev = &client->dev;
+ struct lc824206xa_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ INIT_WORK(&data->work, lc824206xa_work);
+ data->cable = EXTCON_NONE;
+ data->previous_cable = EXTCON_NONE;
+ data->usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ /* Some designs use a custom fast-charge protocol over the mic L/R inputs */
+ data->fastcharge_over_miclr =
+ device_property_read_bool(dev, "onnn,enable-miclr-for-dcp");
+
+ data->vbus_boost = devm_regulator_get(dev, "vbus");
+ if (IS_ERR(data->vbus_boost))
+ return dev_err_probe(dev, PTR_ERR(data->vbus_boost),
+ "getting regulator\n");
+
+ /* Init */
+ ret = lc824206xa_write_reg(data, REG00, REG00_INIT_VALUE);
+ ret |= lc824206xa_write_reg(data, REG10, REG10_INIT_VALUE);
+ msleep(100);
+ ret |= lc824206xa_write_reg(data, REG_INTR_CLEAR, INTR_ALL);
+ ret |= lc824206xa_write_reg(data, REG_INTR_CLEAR, 0);
+ ret |= lc824206xa_write_reg(data, REG_INTR_MASK, INTR_MASK);
+ ret |= lc824206xa_write_reg(data, REG_ID_PIN_ADC_CTRL, ID_PIN_ADC_AUTO);
+ ret |= lc824206xa_write_reg(data, REG_CHARGER_DET, CHARGER_DET_ON);
+ if (ret)
+ return -EIO;
+
+ /* Initialize extcon device */
+ data->edev = devm_extcon_dev_allocate(dev, lc824206xa_cables);
+ if (IS_ERR(data->edev))
+ return PTR_ERR(data->edev);
+
+ ret = devm_extcon_dev_register(dev, data->edev);
+ if (ret)
+ return dev_err_probe(dev, ret, "registering extcon device\n");
+
+ psy_cfg.drv_data = data;
+ data->psy = devm_power_supply_register(dev, &lc824206xa_psy_desc, &psy_cfg);
+ if (IS_ERR(data->psy))
+ return dev_err_probe(dev, PTR_ERR(data->psy), "registering power supply\n");
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL, lc824206xa_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ KBUILD_MODNAME, data);
+ if (ret)
+ return dev_err_probe(dev, ret, "requesting IRQ\n");
+
+ /* Sync initial state */
+ schedule_work(&data->work);
+ return 0;
+}
+
+static const struct i2c_device_id lc824206xa_i2c_ids[] = {
+ { "lc824206xa" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lc824206xa_i2c_ids);
+
+static struct i2c_driver lc824206xa_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = lc824206xa_probe,
+ .id_table = lc824206xa_i2c_ids,
+};
+
+module_i2c_driver(lc824206xa_driver);
+
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_DESCRIPTION("LC824206XA Micro USB Switch driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 518eaa073b2b..b360dca2c69e 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1911,7 +1911,6 @@ static __poll_t fw_device_op_poll(struct file *file, poll_table * pt)
const struct file_operations fw_device_ops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.open = fw_device_op_open,
.read = fw_device_op_read,
.unlocked_ioctl = fw_device_op_ioctl,
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 69c15135371c..88c5c4ff4bb6 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -2886,7 +2886,6 @@ static ssize_t reset_all_on_write(struct file *filp, const char __user *buf,
static const struct file_operations fops_reset_counts = {
.owner = THIS_MODULE,
.open = simple_open,
- .llseek = no_llseek,
.write = reset_all_on_write,
};
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 130d13e9cd6b..9e89a6a763da 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -950,7 +950,6 @@ static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
.open = scmi_dbg_raw_mode_open,
.release = scmi_dbg_raw_mode_release,
.write = scmi_dbg_raw_mode_reset_write,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -960,7 +959,6 @@ static const struct file_operations scmi_dbg_raw_mode_message_fops = {
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_write,
.poll = scmi_dbg_raw_mode_message_poll,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -977,7 +975,6 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_async_write,
.poll = scmi_dbg_raw_mode_message_poll,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -1001,7 +998,6 @@ static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_notif_read,
.poll = scmi_test_dbg_raw_mode_notif_poll,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -1025,7 +1021,6 @@ static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_errors_read,
.poll = scmi_test_dbg_raw_mode_errors_poll,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 97bafb5f7038..0c17bdd388e1 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -309,7 +309,6 @@ static const struct file_operations efi_capsule_fops = {
.open = efi_capsule_open,
.write = efi_capsule_write,
.release = efi_capsule_release,
- .llseek = no_llseek,
};
static struct miscdevice efi_capsule_misc = {
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 7d2cdd9e2227..b69e68ef3f02 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -434,12 +434,17 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
"%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
pfx, pcie->bridge.secondary_status, pcie->bridge.control);
- /* Fatal errors call __ghes_panic() before AER handler prints this */
- if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) &&
- (gdata->error_severity & CPER_SEV_FATAL)) {
+ /*
+ * Print all valid AER info. Record may be from BERT (boot-time) or GHES (run-time).
+ *
+ * Fatal errors call __ghes_panic() before AER handler prints this.
+ */
+ if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) {
struct aer_capability_regs *aer;
aer = (struct aer_capability_regs *)pcie->aer_info;
+ printk("%saer_cor_status: 0x%08x, aer_cor_mask: 0x%08x\n",
+ pfx, aer->cor_status, aer->cor_mask);
printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n",
pfx, aer->uncor_status, aer->uncor_mask);
printk("%saer_uncor_severity: 0x%08x\n",
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index fdf07dd6f459..70490bf2697b 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -349,7 +349,7 @@ static void __init efi_debugfs_init(void)
int i = 0;
efi_debugfs = debugfs_create_dir("efi", NULL);
- if (IS_ERR_OR_NULL(efi_debugfs))
+ if (IS_ERR(efi_debugfs))
return;
for_each_efi_memory_desc(md) {
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index df3182f2e63a..1fd6823248ab 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -96,7 +96,7 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca
}
/* Allocate space for the logs and copy them. */
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
sizeof(*log_tbl) + log_size, (void **)&log_tbl);
if (status != EFI_SUCCESS) {
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 47d67bb0a516..9e2628728aad 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -750,7 +750,6 @@ static const struct file_operations efi_test_fops = {
.unlocked_ioctl = efi_test_ioctl,
.open = efi_test_open,
.release = efi_test_close,
- .llseek = no_llseek,
};
static struct miscdevice efi_test_dev = {
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 5f43dfa22f79..85c525745b31 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -452,7 +452,7 @@ static void fw_cfg_sysfs_release_entry(struct kobject *kobj)
}
/* kobj_type: ties together all properties required to register an entry */
-static struct kobj_type fw_cfg_sysfs_entry_ktype = {
+static const struct kobj_type fw_cfg_sysfs_entry_ktype = {
.default_groups = fw_cfg_sysfs_entry_groups,
.sysfs_ops = &fw_cfg_sysfs_attr_ops,
.release = fw_cfg_sysfs_release_entry,
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index 525ebdc7ded5..f3bc0d427825 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -386,7 +386,6 @@ static const struct file_operations do_sign_fops = {
.open = rwtm_debug_open,
.read = do_sign_read,
.write = do_sign_write,
- .llseek = no_llseek,
};
static void rwtm_debugfs_release(void *root)
diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c
index 723ea0ad3f09..b08b4bb8f650 100644
--- a/drivers/fpga/socfpga.c
+++ b/drivers/fpga/socfpga.c
@@ -301,16 +301,17 @@ static irqreturn_t socfpga_fpga_isr(int irq, void *dev_id)
static int socfpga_fpga_wait_for_config_done(struct socfpga_fpga_priv *priv)
{
- int timeout, ret = 0;
+ int ret = 0;
+ long time_left;
socfpga_fpga_disable_irqs(priv);
init_completion(&priv->status_complete);
socfpga_fpga_enable_irqs(priv, SOCFPGA_FPGMGR_MON_CONF_DONE);
- timeout = wait_for_completion_interruptible_timeout(
+ time_left = wait_for_completion_interruptible_timeout(
&priv->status_complete,
msecs_to_jiffies(10));
- if (timeout == 0)
+ if (time_left == 0)
ret = -ETIMEDOUT;
socfpga_fpga_disable_irqs(priv);
diff --git a/drivers/fpga/tests/fpga-bridge-test.c b/drivers/fpga/tests/fpga-bridge-test.c
index 2f7a24f23808..b9ab29809e96 100644
--- a/drivers/fpga/tests/fpga-bridge-test.c
+++ b/drivers/fpga/tests/fpga-bridge-test.c
@@ -23,6 +23,13 @@ struct bridge_ctx {
struct bridge_stats stats;
};
+/*
+ * Wrapper to avoid a cast warning when passing the action function directly
+ * to kunit_add_action().
+ */
+KUNIT_DEFINE_ACTION_WRAPPER(fpga_bridge_unregister_wrapper, fpga_bridge_unregister,
+ struct fpga_bridge *);
+
static int op_enable_set(struct fpga_bridge *bridge, bool enable)
{
struct bridge_stats *stats = bridge->priv;
@@ -50,6 +57,7 @@ static const struct fpga_bridge_ops fake_bridge_ops = {
static struct bridge_ctx *register_test_bridge(struct kunit *test, const char *dev_name)
{
struct bridge_ctx *ctx;
+ int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
@@ -61,13 +69,10 @@ static struct bridge_ctx *register_test_bridge(struct kunit *test, const char *d
&ctx->stats);
KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->bridge));
- return ctx;
-}
+ ret = kunit_add_action_or_reset(test, fpga_bridge_unregister_wrapper, ctx->bridge);
+ KUNIT_ASSERT_EQ(test, ret, 0);
-static void unregister_test_bridge(struct kunit *test, struct bridge_ctx *ctx)
-{
- fpga_bridge_unregister(ctx->bridge);
- kunit_device_unregister(test, ctx->dev);
+ return ctx;
}
static void fpga_bridge_test_get(struct kunit *test)
@@ -141,8 +146,6 @@ static void fpga_bridge_test_get_put_list(struct kunit *test)
fpga_bridges_put(&bridge_list);
KUNIT_EXPECT_TRUE(test, list_empty(&bridge_list));
-
- unregister_test_bridge(test, ctx_1);
}
static int fpga_bridge_test_init(struct kunit *test)
@@ -152,11 +155,6 @@ static int fpga_bridge_test_init(struct kunit *test)
return 0;
}
-static void fpga_bridge_test_exit(struct kunit *test)
-{
- unregister_test_bridge(test, test->priv);
-}
-
static struct kunit_case fpga_bridge_test_cases[] = {
KUNIT_CASE(fpga_bridge_test_get),
KUNIT_CASE(fpga_bridge_test_toggle),
@@ -167,7 +165,6 @@ static struct kunit_case fpga_bridge_test_cases[] = {
static struct kunit_suite fpga_bridge_suite = {
.name = "fpga_bridge",
.init = fpga_bridge_test_init,
- .exit = fpga_bridge_test_exit,
.test_cases = fpga_bridge_test_cases,
};
diff --git a/drivers/fpga/tests/fpga-mgr-test.c b/drivers/fpga/tests/fpga-mgr-test.c
index 125b3a4d43c6..9cb37aefbac4 100644
--- a/drivers/fpga/tests/fpga-mgr-test.c
+++ b/drivers/fpga/tests/fpga-mgr-test.c
@@ -44,6 +44,16 @@ struct mgr_ctx {
struct mgr_stats stats;
};
+/*
+ * Wrappers to avoid cast warnings when passing action functions directly
+ * to kunit_add_action().
+ */
+KUNIT_DEFINE_ACTION_WRAPPER(sg_free_table_wrapper, sg_free_table,
+ struct sg_table *);
+
+KUNIT_DEFINE_ACTION_WRAPPER(fpga_image_info_free_wrapper, fpga_image_info_free,
+ struct fpga_image_info *);
+
/**
* init_test_buffer() - Allocate and initialize a test image in a buffer.
* @test: KUnit test context object.
@@ -257,6 +267,9 @@ static void fpga_mgr_test_img_load_sgt(struct kunit *test)
KUNIT_ASSERT_EQ(test, ret, 0);
sg_init_one(sgt->sgl, img_buf, IMAGE_SIZE);
+ ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
ctx->img_info->sgt = sgt;
ret = fpga_mgr_load(ctx->mgr, ctx->img_info);
@@ -273,13 +286,12 @@ static void fpga_mgr_test_img_load_sgt(struct kunit *test)
KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_seq, ctx->stats.op_parse_header_seq + 1);
KUNIT_EXPECT_EQ(test, ctx->stats.op_write_sg_seq, ctx->stats.op_parse_header_seq + 2);
KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_seq, ctx->stats.op_parse_header_seq + 3);
-
- sg_free_table(ctx->img_info->sgt);
}
static int fpga_mgr_test_init(struct kunit *test)
{
struct mgr_ctx *ctx;
+ int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
@@ -294,19 +306,14 @@ static int fpga_mgr_test_init(struct kunit *test)
ctx->img_info = fpga_image_info_alloc(ctx->dev);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->img_info);
+ ret = kunit_add_action_or_reset(test, fpga_image_info_free_wrapper, ctx->img_info);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
test->priv = ctx;
return 0;
}
-static void fpga_mgr_test_exit(struct kunit *test)
-{
- struct mgr_ctx *ctx = test->priv;
-
- fpga_image_info_free(ctx->img_info);
- kunit_device_unregister(test, ctx->dev);
-}
-
static struct kunit_case fpga_mgr_test_cases[] = {
KUNIT_CASE(fpga_mgr_test_get),
KUNIT_CASE(fpga_mgr_test_lock),
@@ -318,7 +325,6 @@ static struct kunit_case fpga_mgr_test_cases[] = {
static struct kunit_suite fpga_mgr_suite = {
.name = "fpga_mgr",
.init = fpga_mgr_test_init,
- .exit = fpga_mgr_test_exit,
.test_cases = fpga_mgr_test_cases,
};
diff --git a/drivers/fpga/tests/fpga-region-test.c b/drivers/fpga/tests/fpga-region-test.c
index bcf0651df261..6a108cafded8 100644
--- a/drivers/fpga/tests/fpga-region-test.c
+++ b/drivers/fpga/tests/fpga-region-test.c
@@ -35,6 +35,19 @@ struct test_ctx {
struct mgr_stats mgr_stats;
};
+/*
+ * Wrappers to avoid cast warnings when passing action functions directly
+ * to kunit_add_action().
+ */
+KUNIT_DEFINE_ACTION_WRAPPER(fpga_image_info_free_wrapper, fpga_image_info_free,
+ struct fpga_image_info *);
+
+KUNIT_DEFINE_ACTION_WRAPPER(fpga_bridge_unregister_wrapper, fpga_bridge_unregister,
+ struct fpga_bridge *);
+
+KUNIT_DEFINE_ACTION_WRAPPER(fpga_region_unregister_wrapper, fpga_region_unregister,
+ struct fpga_region *);
+
static int op_write(struct fpga_manager *mgr, const char *buf, size_t count)
{
struct mgr_stats *stats = mgr->priv;
@@ -111,6 +124,9 @@ static void fpga_region_test_program_fpga(struct kunit *test)
img_info = fpga_image_info_alloc(ctx->mgr_dev);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, img_info);
+ ret = kunit_add_action_or_reset(test, fpga_image_info_free_wrapper, img_info);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
img_info->buf = img_buf;
img_info->count = sizeof(img_buf);
@@ -130,8 +146,6 @@ static void fpga_region_test_program_fpga(struct kunit *test)
KUNIT_EXPECT_EQ(test, 2, ctx->bridge_stats.cycles_count);
fpga_bridges_put(&ctx->region->bridge_list);
-
- fpga_image_info_free(img_info);
}
/*
@@ -144,6 +158,7 @@ static int fpga_region_test_init(struct kunit *test)
{
struct test_ctx *ctx;
struct fpga_region_info region_info = { 0 };
+ int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
@@ -164,6 +179,9 @@ static int fpga_region_test_init(struct kunit *test)
ctx->bridge_stats.enable = true;
+ ret = kunit_add_action_or_reset(test, fpga_bridge_unregister_wrapper, ctx->bridge);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
ctx->region_dev = kunit_device_register(test, "fpga-region-test-dev");
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->region_dev);
@@ -174,24 +192,14 @@ static int fpga_region_test_init(struct kunit *test)
ctx->region = fpga_region_register_full(ctx->region_dev, &region_info);
KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->region));
+ ret = kunit_add_action_or_reset(test, fpga_region_unregister_wrapper, ctx->region);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
test->priv = ctx;
return 0;
}
-static void fpga_region_test_exit(struct kunit *test)
-{
- struct test_ctx *ctx = test->priv;
-
- fpga_region_unregister(ctx->region);
- kunit_device_unregister(test, ctx->region_dev);
-
- fpga_bridge_unregister(ctx->bridge);
- kunit_device_unregister(test, ctx->bridge_dev);
-
- kunit_device_unregister(test, ctx->mgr_dev);
-}
-
static struct kunit_case fpga_region_test_cases[] = {
KUNIT_CASE(fpga_region_test_class_find),
KUNIT_CASE(fpga_region_test_program_fpga),
@@ -199,9 +207,8 @@ static struct kunit_case fpga_region_test_cases[] = {
};
static struct kunit_suite fpga_region_suite = {
- .name = "fpga_mgr",
+ .name = "fpga_region",
.init = fpga_region_test_init,
- .exit = fpga_region_test_exit,
.test_cases = fpga_region_test_cases,
};
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 0ac93183d201..4db3d80e10b0 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -387,7 +387,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
const char *why;
int err;
u32 intr_status;
- unsigned long timeout;
+ unsigned long time_left;
unsigned long flags;
struct scatterlist *sg;
int i;
@@ -427,8 +427,8 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
zynq_step_dma(priv);
spin_unlock_irqrestore(&priv->dma_lock, flags);
- timeout = wait_for_completion_timeout(&priv->dma_done,
- msecs_to_jiffies(DMA_TIMEOUT_MS));
+ time_left = wait_for_completion_timeout(&priv->dma_done,
+ msecs_to_jiffies(DMA_TIMEOUT_MS));
spin_lock_irqsave(&priv->dma_lock, flags);
zynq_fpga_set_irq(priv, 0);
@@ -452,7 +452,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
if (priv->cur_sg ||
!((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
- if (timeout == 0)
+ if (time_left == 0)
why = "DMA timed out";
else
why = "DMA did not complete";
diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c
index 48f2ee0f78c4..883ef86ad3fc 100644
--- a/drivers/gnss/core.c
+++ b/drivers/gnss/core.c
@@ -206,7 +206,6 @@ static const struct file_operations gnss_fops = {
.read = gnss_read,
.write = gnss_write,
.poll = gnss_poll,
- .llseek = no_llseek,
};
static struct class *gnss_class;
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 6cedf46efec6..ab798c848215 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/slab.h>
@@ -19,29 +20,8 @@
#include <linux/bitops.h>
#include <linux/seq_file.h>
-#define EP93XX_GPIO_F_INT_STATUS 0x5c
-#define EP93XX_GPIO_A_INT_STATUS 0xa0
-#define EP93XX_GPIO_B_INT_STATUS 0xbc
-
-/* Maximum value for gpio line identifiers */
-#define EP93XX_GPIO_LINE_MAX 63
-
-/* Number of GPIO chips in EP93XX */
-#define EP93XX_GPIO_CHIP_NUM 8
-
-/* Maximum value for irq capable line identifiers */
-#define EP93XX_GPIO_LINE_MAX_IRQ 23
-
-#define EP93XX_GPIO_A_IRQ_BASE 64
-#define EP93XX_GPIO_B_IRQ_BASE 72
-/*
- * Static mapping of GPIO bank F IRQS:
- * F0..F7 (16..24) to irq 80..87.
- */
-#define EP93XX_GPIO_F_IRQ_BASE 80
-
struct ep93xx_gpio_irq_chip {
- u8 irq_offset;
+ void __iomem *base;
u8 int_unmasked;
u8 int_enabled;
u8 int_type1;
@@ -50,15 +30,11 @@ struct ep93xx_gpio_irq_chip {
};
struct ep93xx_gpio_chip {
+ void __iomem *base;
struct gpio_chip gc;
struct ep93xx_gpio_irq_chip *eic;
};
-struct ep93xx_gpio {
- void __iomem *base;
- struct ep93xx_gpio_chip gc[EP93XX_GPIO_CHIP_NUM];
-};
-
#define to_ep93xx_gpio_chip(x) container_of(x, struct ep93xx_gpio_chip, gc)
static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc)
@@ -79,25 +55,23 @@ static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc
#define EP93XX_INT_RAW_STATUS_OFFSET 0x14
#define EP93XX_INT_DEBOUNCE_OFFSET 0x18
-static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg,
- struct ep93xx_gpio_irq_chip *eic)
+static void ep93xx_gpio_update_int_params(struct ep93xx_gpio_irq_chip *eic)
{
- writeb_relaxed(0, epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET);
+ writeb_relaxed(0, eic->base + EP93XX_INT_EN_OFFSET);
writeb_relaxed(eic->int_type2,
- epg->base + eic->irq_offset + EP93XX_INT_TYPE2_OFFSET);
+ eic->base + EP93XX_INT_TYPE2_OFFSET);
writeb_relaxed(eic->int_type1,
- epg->base + eic->irq_offset + EP93XX_INT_TYPE1_OFFSET);
+ eic->base + EP93XX_INT_TYPE1_OFFSET);
writeb_relaxed(eic->int_unmasked & eic->int_enabled,
- epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET);
+ eic->base + EP93XX_INT_EN_OFFSET);
}
static void ep93xx_gpio_int_debounce(struct gpio_chip *gc,
unsigned int offset, bool enable)
{
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
int port_mask = BIT(offset);
@@ -106,53 +80,43 @@ static void ep93xx_gpio_int_debounce(struct gpio_chip *gc,
else
eic->int_debounce &= ~port_mask;
- writeb(eic->int_debounce,
- epg->base + eic->irq_offset + EP93XX_INT_DEBOUNCE_OFFSET);
+ writeb(eic->int_debounce, eic->base + EP93XX_INT_DEBOUNCE_OFFSET);
}
-static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
+static u32 ep93xx_gpio_ab_irq_handler(struct gpio_chip *gc)
{
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
unsigned long stat;
int offset;
- chained_irq_enter(irqchip, desc);
-
- /*
- * Dispatch the IRQs to the irqdomain of each A and B
- * gpiochip irqdomains depending on what has fired.
- * The tricky part is that the IRQ line is shared
- * between bank A and B and each has their own gpiochip.
- */
- stat = readb(epg->base + EP93XX_GPIO_A_INT_STATUS);
+ stat = readb(eic->base + EP93XX_INT_STATUS_OFFSET);
for_each_set_bit(offset, &stat, 8)
- generic_handle_domain_irq(epg->gc[0].gc.irq.domain,
- offset);
+ generic_handle_domain_irq(gc->irq.domain, offset);
- stat = readb(epg->base + EP93XX_GPIO_B_INT_STATUS);
- for_each_set_bit(offset, &stat, 8)
- generic_handle_domain_irq(epg->gc[1].gc.irq.domain,
- offset);
+ return stat;
+}
- chained_irq_exit(irqchip, desc);
+static irqreturn_t ep93xx_ab_irq_handler(int irq, void *dev_id)
+{
+ return IRQ_RETVAL(ep93xx_gpio_ab_irq_handler(dev_id));
}
static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
{
- /*
- * map discontiguous hw irq range to continuous sw irq range:
- *
- * IRQ_EP93XX_GPIO{0..7}MUX -> EP93XX_GPIO_LINE_F{0..7}
- */
struct irq_chip *irqchip = irq_desc_get_chip(desc);
- unsigned int irq = irq_desc_get_irq(desc);
- int port_f_idx = (irq & 7) ^ 4; /* {20..23,48..51} -> {0..7} */
- int gpio_irq = EP93XX_GPIO_F_IRQ_BASE + port_f_idx;
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct gpio_irq_chip *gic = &gc->irq;
+ unsigned int parent = irq_desc_get_irq(desc);
+ unsigned int i;
chained_irq_enter(irqchip, desc);
- generic_handle_irq(gpio_irq);
+ for (i = 0; i < gic->num_parents; i++)
+ if (gic->parents[i] == parent)
+ break;
+
+ if (i < gic->num_parents)
+ generic_handle_domain_irq(gc->irq.domain, i);
+
chained_irq_exit(irqchip, desc);
}
@@ -160,54 +124,53 @@ static void ep93xx_gpio_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port_mask = BIT(d->irq & 7);
+ int port_mask = BIT(irqd_to_hwirq(d));
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
eic->int_type2 ^= port_mask; /* switch edge direction */
- ep93xx_gpio_update_int_params(epg, eic);
+ ep93xx_gpio_update_int_params(eic);
}
- writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
+ writeb(port_mask, eic->base + EP93XX_INT_EOI_OFFSET);
}
static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port_mask = BIT(d->irq & 7);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ int port_mask = BIT(hwirq);
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH)
eic->int_type2 ^= port_mask; /* switch edge direction */
eic->int_unmasked &= ~port_mask;
- ep93xx_gpio_update_int_params(epg, eic);
+ ep93xx_gpio_update_int_params(eic);
- writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
- gpiochip_disable_irq(gc, irqd_to_hwirq(d));
+ writeb(port_mask, eic->base + EP93XX_INT_EOI_OFFSET);
+ gpiochip_disable_irq(gc, hwirq);
}
static void ep93xx_gpio_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- eic->int_unmasked &= ~BIT(d->irq & 7);
- ep93xx_gpio_update_int_params(epg, eic);
- gpiochip_disable_irq(gc, irqd_to_hwirq(d));
+ eic->int_unmasked &= ~BIT(hwirq);
+ ep93xx_gpio_update_int_params(eic);
+ gpiochip_disable_irq(gc, hwirq);
}
static void ep93xx_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- gpiochip_enable_irq(gc, irqd_to_hwirq(d));
- eic->int_unmasked |= BIT(d->irq & 7);
- ep93xx_gpio_update_int_params(epg, eic);
+ gpiochip_enable_irq(gc, hwirq);
+ eic->int_unmasked |= BIT(hwirq);
+ ep93xx_gpio_update_int_params(eic);
}
/*
@@ -219,12 +182,11 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int offset = d->irq & 7;
- int port_mask = BIT(offset);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ int port_mask = BIT(hwirq);
irq_flow_handler_t handler;
- gc->direction_input(gc, offset);
+ gc->direction_input(gc, hwirq);
switch (type) {
case IRQ_TYPE_EDGE_RISING:
@@ -250,7 +212,7 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
case IRQ_TYPE_EDGE_BOTH:
eic->int_type1 |= port_mask;
/* set initial polarity based on current input level */
- if (gc->get(gc, offset))
+ if (gc->get(gc, hwirq))
eic->int_type2 &= ~port_mask; /* falling */
else
eic->int_type2 |= port_mask; /* rising */
@@ -264,51 +226,11 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
eic->int_enabled |= port_mask;
- ep93xx_gpio_update_int_params(epg, eic);
+ ep93xx_gpio_update_int_params(eic);
return 0;
}
-/*************************************************************************
- * gpiolib interface for EP93xx on-chip GPIOs
- *************************************************************************/
-struct ep93xx_gpio_bank {
- const char *label;
- int data;
- int dir;
- int irq;
- int base;
- bool has_irq;
- bool has_hierarchical_irq;
- unsigned int irq_base;
-};
-
-#define EP93XX_GPIO_BANK(_label, _data, _dir, _irq, _base, _has_irq, _has_hier, _irq_base) \
- { \
- .label = _label, \
- .data = _data, \
- .dir = _dir, \
- .irq = _irq, \
- .base = _base, \
- .has_irq = _has_irq, \
- .has_hierarchical_irq = _has_hier, \
- .irq_base = _irq_base, \
- }
-
-static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = {
- /* Bank A has 8 IRQs */
- EP93XX_GPIO_BANK("A", 0x00, 0x10, 0x90, 0, true, false, EP93XX_GPIO_A_IRQ_BASE),
- /* Bank B has 8 IRQs */
- EP93XX_GPIO_BANK("B", 0x04, 0x14, 0xac, 8, true, false, EP93XX_GPIO_B_IRQ_BASE),
- EP93XX_GPIO_BANK("C", 0x08, 0x18, 0x00, 40, false, false, 0),
- EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 0x00, 24, false, false, 0),
- EP93XX_GPIO_BANK("E", 0x20, 0x24, 0x00, 32, false, false, 0),
- /* Bank F has 8 IRQs */
- EP93XX_GPIO_BANK("F", 0x30, 0x34, 0x4c, 16, false, true, EP93XX_GPIO_F_IRQ_BASE),
- EP93XX_GPIO_BANK("G", 0x38, 0x3c, 0x00, 48, false, false, 0),
- EP93XX_GPIO_BANK("H", 0x40, 0x44, 0x00, 56, false, false, 0),
-};
-
static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset,
unsigned long config)
{
@@ -342,115 +264,112 @@ static const struct irq_chip gpio_eic_irq_chip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
-static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc,
- struct platform_device *pdev,
- struct ep93xx_gpio *epg,
- struct ep93xx_gpio_bank *bank)
+static int ep93xx_setup_irqs(struct platform_device *pdev,
+ struct ep93xx_gpio_chip *egc)
{
- void __iomem *data = epg->base + bank->data;
- void __iomem *dir = epg->base + bank->dir;
struct gpio_chip *gc = &egc->gc;
struct device *dev = &pdev->dev;
- struct gpio_irq_chip *girq;
- int err;
-
- err = bgpio_init(gc, dev, 1, data, NULL, NULL, dir, NULL, 0);
- if (err)
- return err;
-
- gc->label = bank->label;
- gc->base = bank->base;
-
- girq = &gc->irq;
- if (bank->has_irq || bank->has_hierarchical_irq) {
- gc->set_config = ep93xx_gpio_set_config;
- egc->eic = devm_kcalloc(dev, 1,
- sizeof(*egc->eic),
- GFP_KERNEL);
- if (!egc->eic)
- return -ENOMEM;
- egc->eic->irq_offset = bank->irq;
- gpio_irq_chip_set_chip(girq, &gpio_eic_irq_chip);
- }
+ struct gpio_irq_chip *girq = &gc->irq;
+ int ret, irq, i;
+ void __iomem *intr;
- if (bank->has_irq) {
- int ab_parent_irq = platform_get_irq(pdev, 0);
-
- girq->parent_handler = ep93xx_gpio_ab_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(dev, girq->num_parents,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_level_irq;
- girq->parents[0] = ab_parent_irq;
- girq->first = bank->irq_base;
- }
+ intr = devm_platform_ioremap_resource_byname(pdev, "intr");
+ if (IS_ERR(intr))
+ return PTR_ERR(intr);
+
+ gc->set_config = ep93xx_gpio_set_config;
+ egc->eic = devm_kzalloc(dev, sizeof(*egc->eic), GFP_KERNEL);
+ if (!egc->eic)
+ return -ENOMEM;
+
+ egc->eic->base = intr;
+ gpio_irq_chip_set_chip(girq, &gpio_eic_irq_chip);
+ girq->num_parents = platform_irq_count(pdev);
+ if (girq->num_parents == 0)
+ return -EINVAL;
+
+ girq->parents = devm_kcalloc(dev, girq->num_parents, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
- /* Only bank F has especially funky IRQ handling */
- if (bank->has_hierarchical_irq) {
- int gpio_irq;
- int i;
+ if (girq->num_parents == 1) { /* A/B irqchips */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
- /*
- * FIXME: convert this to use hierarchical IRQ support!
- * this requires fixing the root irqchip to be hierarchical.
- */
+ ret = devm_request_irq(dev, irq, ep93xx_ab_irq_handler,
+ IRQF_SHARED, gc->label, gc);
+ if (ret)
+ return dev_err_probe(dev, ret, "requesting IRQ: %d\n", irq);
+
+ girq->parents[0] = irq;
+ } else { /* F irqchip */
girq->parent_handler = ep93xx_gpio_f_irq_handler;
- girq->num_parents = 8;
- girq->parents = devm_kcalloc(dev, girq->num_parents,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- /* Pick resources 1..8 for these IRQs */
+
for (i = 0; i < girq->num_parents; i++) {
- girq->parents[i] = platform_get_irq(pdev, i + 1);
- gpio_irq = bank->irq_base + i;
- irq_set_chip_data(gpio_irq, &epg->gc[5]);
- irq_set_chip_and_handler(gpio_irq,
- girq->chip,
- handle_level_irq);
- irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
+ irq = platform_get_irq_optional(pdev, i);
+ if (irq < 0)
+ continue;
+
+ girq->parents[i] = irq;
}
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_level_irq;
- girq->first = bank->irq_base;
+
+ girq->map = girq->parents;
}
- return devm_gpiochip_add_data(dev, gc, epg);
+ girq->default_type = IRQ_TYPE_NONE;
+ /* TODO: replace with handle_bad_irq() once we are fully hierarchical */
+ girq->handler = handle_simple_irq;
+
+ return 0;
}
static int ep93xx_gpio_probe(struct platform_device *pdev)
{
- struct ep93xx_gpio *epg;
- int i;
-
- epg = devm_kzalloc(&pdev->dev, sizeof(*epg), GFP_KERNEL);
- if (!epg)
+ struct ep93xx_gpio_chip *egc;
+ struct gpio_chip *gc;
+ void __iomem *data;
+ void __iomem *dir;
+ int ret;
+
+ egc = devm_kzalloc(&pdev->dev, sizeof(*egc), GFP_KERNEL);
+ if (!egc)
return -ENOMEM;
- epg->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(epg->base))
- return PTR_ERR(epg->base);
-
- for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
- struct ep93xx_gpio_chip *gc = &epg->gc[i];
- struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i];
-
- if (ep93xx_gpio_add_bank(gc, pdev, epg, bank))
- dev_warn(&pdev->dev, "Unable to add gpio bank %s\n",
- bank->label);
+ data = devm_platform_ioremap_resource_byname(pdev, "data");
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ dir = devm_platform_ioremap_resource_byname(pdev, "dir");
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ gc = &egc->gc;
+ ret = bgpio_init(gc, &pdev->dev, 1, data, NULL, NULL, dir, NULL, 0);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "unable to init generic GPIO\n");
+
+ gc->label = dev_name(&pdev->dev);
+ if (platform_irq_count(pdev) > 0) {
+ dev_dbg(&pdev->dev, "setting up irqs for %s\n", dev_name(&pdev->dev));
+ ret = ep93xx_setup_irqs(pdev, egc);
+ if (ret)
+ dev_err_probe(&pdev->dev, ret, "setup irqs failed");
}
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, gc, egc);
}
+static const struct of_device_id ep93xx_gpio_match[] = {
+ { .compatible = "cirrus,ep9301-gpio" },
+ { /* sentinel */ }
+};
+
static struct platform_driver ep93xx_gpio_driver = {
.driver = {
.name = "gpio-ep93xx",
+ .of_match_table = ep93xx_gpio_match,
},
.probe = ep93xx_gpio_probe,
};
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 455eecf6380e..d39c6618bade 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -347,7 +347,6 @@ static const struct file_operations gpio_mockup_debugfs_ops = {
.open = gpio_mockup_debugfs_open,
.read = gpio_mockup_debugfs_read,
.write = gpio_mockup_debugfs_write,
- .llseek = no_llseek,
.release = single_release,
};
diff --git a/drivers/gpio/gpio-sloppy-logic-analyzer.c b/drivers/gpio/gpio-sloppy-logic-analyzer.c
index aed6d1f6cfc3..07e0d7180579 100644
--- a/drivers/gpio/gpio-sloppy-logic-analyzer.c
+++ b/drivers/gpio/gpio-sloppy-logic-analyzer.c
@@ -217,7 +217,6 @@ static const struct file_operations fops_trigger = {
.owner = THIS_MODULE,
.open = trigger_open,
.write = trigger_write,
- .llseek = no_llseek,
.release = single_release,
};
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 5aac59de0d76..78c9d9ed3d68 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -2842,7 +2842,6 @@ static const struct file_operations gpio_fileops = {
.poll = lineinfo_watch_poll,
.read = lineinfo_watch_read,
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = gpio_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = gpio_ioctl_compat,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index dcd59040c449..9b1e0ede05a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1083,10 +1083,6 @@ struct amdgpu_device {
struct amdgpu_virt virt;
- /* link all shadow bo */
- struct list_head shadow_list;
- struct mutex shadow_list_lock;
-
/* record hw reset is performed */
bool has_hw_reset;
u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index 57bda66e85ef..2ca127173135 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -511,7 +511,7 @@ static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *h
return -EINVAL;
}
- /* udpate aca bank to aca source error_cache first */
+ /* update aca bank to aca source error_cache first */
ret = aca_banks_update(adev, smu_type, handler_aca_log_bank_error, qctx, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 1254a43ec96b..3bc0cbf45bc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -950,28 +950,30 @@ static void unlock_spi_csq_mutexes(struct amdgpu_device *adev)
* @inst: xcc's instance number on a multi-XCC setup
*/
static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
- int *wave_cnt, int *vmid, uint32_t inst)
+ struct kfd_cu_occupancy *queue_cnt, uint32_t inst)
{
int pipe_idx;
int queue_slot;
unsigned int reg_val;
-
+ unsigned int wave_cnt;
/*
* Program GRBM with appropriate MEID, PIPEID, QUEUEID and VMID
* parameters to read out waves in flight. Get VMID if there are
* non-zero waves in flight.
*/
- *vmid = 0xFF;
- *wave_cnt = 0;
pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
- soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, inst);
- reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, inst, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
- queue_slot);
- *wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
- if (*wave_cnt != 0)
- *vmid = (RREG32_SOC15(GC, inst, mmCP_HQD_VMID) &
- CP_HQD_VMID__VMID_MASK) >> CP_HQD_VMID__VMID__SHIFT;
+ soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, GET_INST(GC, inst));
+ reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
+ mmSPI_CSQ_WF_ACTIVE_COUNT_0) + queue_slot);
+ wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
+ if (wave_cnt != 0) {
+ queue_cnt->wave_cnt += wave_cnt;
+ queue_cnt->doorbell_off =
+ (RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL) &
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK) >>
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+ }
}
/**
@@ -981,9 +983,8 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
* or more queues running and submitting waves to compute units.
*
* @adev: Handle of device from which to get number of waves in flight
- * @pasid: Identifies the process for which this query call is invoked
- * @pasid_wave_cnt: Output parameter updated with number of waves in flight that
- * belong to process with given pasid
+ * @cu_occupancy: Array that gets filled with wave_cnt and doorbell offset
+ * for comparison later.
* @max_waves_per_cu: Output parameter updated with maximum number of waves
* possible per Compute Unit
* @inst: xcc's instance number on a multi-XCC setup
@@ -1011,34 +1012,28 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
* number of waves that are in flight for the queue at specified index. The
* index ranges from 0 to 7.
*
- * If non-zero waves are in flight, read CP_HQD_VMID register to obtain VMID
- * of the wave(s).
+ * If non-zero waves are in flight, store the corresponding doorbell offset
+ * of the queue, along with the wave count.
*
- * Determine if VMID from above step maps to pasid provided as parameter. If
- * it matches agrregate the wave count. That the VMID will not match pasid is
- * a normal condition i.e. a device is expected to support multiple queues
- * from multiple proceses.
+ * Determine if the queue belongs to the process by comparing the doorbell
+ * offset against the process's queues. If it matches, aggregate the wave
+ * count for the process.
*
* Reading registers referenced above involves programming GRBM appropriately
*/
-void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
- int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst)
+void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev,
+ struct kfd_cu_occupancy *cu_occupancy,
+ int *max_waves_per_cu, uint32_t inst)
{
int qidx;
- int vmid;
int se_idx;
- int sh_idx;
int se_cnt;
- int sh_cnt;
- int wave_cnt;
int queue_map;
- int pasid_tmp;
int max_queue_cnt;
- int vmid_wave_cnt = 0;
DECLARE_BITMAP(cp_queue_bitmap, AMDGPU_MAX_QUEUES);
lock_spi_csq_mutexes(adev);
- soc15_grbm_select(adev, 1, 0, 0, 0, inst);
+ soc15_grbm_select(adev, 1, 0, 0, 0, GET_INST(GC, inst));
/*
* Iterate through the shader engines and arrays of the device
@@ -1048,51 +1043,38 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
AMDGPU_MAX_QUEUES);
max_queue_cnt = adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe;
- sh_cnt = adev->gfx.config.max_sh_per_se;
se_cnt = adev->gfx.config.max_shader_engines;
for (se_idx = 0; se_idx < se_cnt; se_idx++) {
- for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
+ amdgpu_gfx_select_se_sh(adev, se_idx, 0, 0xffffffff, inst);
+ queue_map = RREG32_SOC15(GC, GET_INST(GC, inst), mmSPI_CSQ_WF_ACTIVE_STATUS);
+
+ /*
+ * Assumption: queue map encodes following schema: four
+ * pipes per each micro-engine, with each pipe mapping
+ * eight queues. This schema is true for GFX9 devices
+ * and must be verified for newer device families
+ */
+ for (qidx = 0; qidx < max_queue_cnt; qidx++) {
+ /* Skip qeueus that are not associated with
+ * compute functions
+ */
+ if (!test_bit(qidx, cp_queue_bitmap))
+ continue;
- amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff, inst);
- queue_map = RREG32_SOC15(GC, inst, mmSPI_CSQ_WF_ACTIVE_STATUS);
+ if (!(queue_map & (1 << qidx)))
+ continue;
- /*
- * Assumption: queue map encodes following schema: four
- * pipes per each micro-engine, with each pipe mapping
- * eight queues. This schema is true for GFX9 devices
- * and must be verified for newer device families
- */
- for (qidx = 0; qidx < max_queue_cnt; qidx++) {
-
- /* Skip qeueus that are not associated with
- * compute functions
- */
- if (!test_bit(qidx, cp_queue_bitmap))
- continue;
-
- if (!(queue_map & (1 << qidx)))
- continue;
-
- /* Get number of waves in flight and aggregate them */
- get_wave_count(adev, qidx, &wave_cnt, &vmid,
- inst);
- if (wave_cnt != 0) {
- pasid_tmp =
- RREG32(SOC15_REG_OFFSET(OSSSYS, inst,
- mmIH_VMID_0_LUT) + vmid);
- if (pasid_tmp == pasid)
- vmid_wave_cnt += wave_cnt;
- }
- }
+ /* Get number of waves in flight and aggregate them */
+ get_wave_count(adev, qidx, &cu_occupancy[qidx],
+ inst);
}
}
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, inst);
- soc15_grbm_select(adev, 0, 0, 0, 0, inst);
+ soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, inst));
unlock_spi_csq_mutexes(adev);
/* Update the output parameters and return */
- *pasid_wave_cnt = vmid_wave_cnt;
*max_waves_per_cu = adev->gfx.cu_info.simd_per_cu *
adev->gfx.cu_info.max_waves_per_simd;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index 988c50ac3be0..b6a91a552aa4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -52,8 +52,9 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid);
void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
uint32_t vmid, uint64_t page_table_base);
-void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
- int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst);
+void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev,
+ struct kfd_cu_occupancy *cu_occupancy,
+ int *max_waves_per_cu, uint32_t inst);
void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr,
uint32_t inst);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 4afef5b46c7d..ce5ca304dba9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1499,7 +1499,7 @@ static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain)
}
}
- ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0);
+ ret = amdgpu_bo_pin(bo, domain);
if (ret)
pr_err("Error in Pinning BO to domain: %d\n", domain);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 42e64bce661e..45affc02548c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -87,8 +87,9 @@ static bool check_atom_bios(uint8_t *bios, size_t size)
* part of the system bios. On boot, the system bios puts a
* copy of the igp rom at the start of vram if a discrete card is
* present.
+ * For SR-IOV, the vbios image is also put in VRAM in the VF.
*/
-static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
+static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
{
uint8_t __iomem *bios;
resource_size_t vram_base;
@@ -284,10 +285,6 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
acpi_status status;
bool found = false;
- /* ATRM is for the discrete card only */
- if (adev->flags & AMD_IS_APU)
- return false;
-
/* ATRM is for on-platform devices only */
if (dev_is_removable(&adev->pdev->dev))
return false;
@@ -343,11 +340,8 @@ static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
{
- if (adev->flags & AMD_IS_APU)
- return igp_read_bios_from_vram(adev);
- else
- return (!adev->asic_funcs || !adev->asic_funcs->read_disabled_bios) ?
- false : amdgpu_asic_read_disabled_bios(adev);
+ return (!adev->asic_funcs || !adev->asic_funcs->read_disabled_bios) ?
+ false : amdgpu_asic_read_disabled_bios(adev);
}
#ifdef CONFIG_ACPI
@@ -414,7 +408,36 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
}
#endif
-bool amdgpu_get_bios(struct amdgpu_device *adev)
+static bool amdgpu_get_bios_apu(struct amdgpu_device *adev)
+{
+ if (amdgpu_acpi_vfct_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from VFCT\n");
+ goto success;
+ }
+
+ if (amdgpu_read_bios_from_vram(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from VRAM BAR\n");
+ goto success;
+ }
+
+ if (amdgpu_read_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
+ goto success;
+ }
+
+ if (amdgpu_read_platform_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from platform\n");
+ goto success;
+ }
+
+ dev_err(adev->dev, "Unable to locate a BIOS ROM\n");
+ return false;
+
+success:
+ return true;
+}
+
+static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev)
{
if (amdgpu_atrm_get_bios(adev)) {
dev_info(adev->dev, "Fetched VBIOS from ATRM\n");
@@ -426,7 +449,8 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
goto success;
}
- if (igp_read_bios_from_vram(adev)) {
+ /* this is required for SR-IOV */
+ if (amdgpu_read_bios_from_vram(adev)) {
dev_info(adev->dev, "Fetched VBIOS from VRAM BAR\n");
goto success;
}
@@ -455,10 +479,24 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
return false;
success:
- adev->is_atom_fw = adev->asic_type >= CHIP_VEGA10;
return true;
}
+bool amdgpu_get_bios(struct amdgpu_device *adev)
+{
+ bool found;
+
+ if (adev->flags & AMD_IS_APU)
+ found = amdgpu_get_bios_apu(adev);
+ else
+ found = amdgpu_get_bios_dgpu(adev);
+
+ if (found)
+ adev->is_atom_fw = adev->asic_type >= CHIP_VEGA10;
+
+ return found;
+}
+
/* helper function for soc15 and onwards to read bios from rom */
bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index f4628412dac4..c2394c8b4d6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4107,9 +4107,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->mm_stats.lock);
spin_lock_init(&adev->wb.lock);
- INIT_LIST_HEAD(&adev->shadow_list);
- mutex_init(&adev->shadow_list_lock);
-
INIT_LIST_HEAD(&adev->reset_list);
INIT_LIST_HEAD(&adev->ras_list);
@@ -5030,80 +5027,6 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
}
/**
- * amdgpu_device_recover_vram - Recover some VRAM contents
- *
- * @adev: amdgpu_device pointer
- *
- * Restores the contents of VRAM buffers from the shadows in GTT. Used to
- * restore things like GPUVM page tables after a GPU reset where
- * the contents of VRAM might be lost.
- *
- * Returns:
- * 0 on success, negative error code on failure.
- */
-static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
-{
- struct dma_fence *fence = NULL, *next = NULL;
- struct amdgpu_bo *shadow;
- struct amdgpu_bo_vm *vmbo;
- long r = 1, tmo;
-
- if (amdgpu_sriov_runtime(adev))
- tmo = msecs_to_jiffies(8000);
- else
- tmo = msecs_to_jiffies(100);
-
- dev_info(adev->dev, "recover vram bo from shadow start\n");
- mutex_lock(&adev->shadow_list_lock);
- list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
- /* If vm is compute context or adev is APU, shadow will be NULL */
- if (!vmbo->shadow)
- continue;
- shadow = vmbo->shadow;
-
- /* No need to recover an evicted BO */
- if (!shadow->tbo.resource ||
- shadow->tbo.resource->mem_type != TTM_PL_TT ||
- shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
- shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
- continue;
-
- r = amdgpu_bo_restore_shadow(shadow, &next);
- if (r)
- break;
-
- if (fence) {
- tmo = dma_fence_wait_timeout(fence, false, tmo);
- dma_fence_put(fence);
- fence = next;
- if (tmo == 0) {
- r = -ETIMEDOUT;
- break;
- } else if (tmo < 0) {
- r = tmo;
- break;
- }
- } else {
- fence = next;
- }
- }
- mutex_unlock(&adev->shadow_list_lock);
-
- if (fence)
- tmo = dma_fence_wait_timeout(fence, false, tmo);
- dma_fence_put(fence);
-
- if (r < 0 || tmo <= 0) {
- dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
- return -EIO;
- }
-
- dev_info(adev->dev, "recover vram bo from shadow done\n");
- return 0;
-}
-
-
-/**
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
*
* @adev: amdgpu_device pointer
@@ -5165,12 +5088,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
return r;
- if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
+ if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST)
amdgpu_inc_vram_lost(adev);
- r = amdgpu_device_recover_vram(adev);
- }
- if (r)
- return r;
/* need to be called during full access so we can't do it later like
* bare-metal does.
@@ -5569,9 +5488,7 @@ out:
}
}
- if (!r)
- r = amdgpu_device_recover_vram(tmp_adev);
- else
+ if (r)
tmp_adev->asic_reset_res = r;
}
@@ -6189,7 +6106,7 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
p2p_addressable = !(adev->gmc.aper_base & address_mask ||
aper_limit & address_mask);
}
- return is_large_bar && p2p_access && p2p_addressable;
+ return pcie_p2p && is_large_bar && p2p_access && p2p_addressable;
#else
return false;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 092ec11258cd..b119d27271c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -233,6 +233,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
}
if (!adev->enable_virtual_display) {
+ new_abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(new_abo,
amdgpu_display_supported_domains(adev, new_abo->flags));
if (unlikely(r != 0)) {
@@ -1474,7 +1475,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
- connector->display_info.is_hdmi &&
+ connector && connector->display_info.is_hdmi &&
amdgpu_display_is_hdtv_mode(mode)))) {
if (amdgpu_encoder->underscan_hborder != 0)
amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
@@ -1759,6 +1760,7 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
+ aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
if (r != 0)
dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f57411ed2dc2..81d9877c8735 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -117,9 +117,10 @@
* - 3.56.0 - Update IB start address and size alignment for decode and encode
* - 3.57.0 - Compute tunneling on GFX10+
* - 3.58.0 - Add GFX12 DCC support
+ * - 3.59.0 - Cleared VRAM
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 58
+#define KMS_DRIVER_MINOR 59
#define KMS_DRIVER_PATCHLEVEL 0
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 0e617dff8765..1a5df8b94661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -43,8 +43,6 @@
#include "amdgpu_hmm.h"
#include "amdgpu_xgmi.h"
-static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
-
static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
@@ -87,11 +85,11 @@ static const struct vm_operations_struct amdgpu_gem_vm_ops = {
static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
- struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
- if (robj) {
- amdgpu_hmm_unregister(robj);
- amdgpu_bo_unref(&robj);
+ if (aobj) {
+ amdgpu_hmm_unregister(aobj);
+ ttm_bo_put(&aobj->tbo);
}
}
@@ -126,7 +124,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
bo = &ubo->bo;
*obj = &bo->tbo.base;
- (*obj)->funcs = &amdgpu_gem_object_funcs;
return 0;
}
@@ -295,7 +292,7 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
return drm_gem_ttm_mmap(obj, vma);
}
-static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
+const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
.free = amdgpu_gem_object_free,
.open = amdgpu_gem_object_open,
.close = amdgpu_gem_object_close,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index f30264782ba2..3a8f57900a3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -33,6 +33,8 @@
#define AMDGPU_GEM_DOMAIN_MAX 0x3
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base)
+extern const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
+
unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index ad6bf5d4e0a9..16f2605ac50b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -107,8 +107,11 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
/*
* Do the coredump immediately after a job timeout to get a very
* close dump/snapshot/representation of GPU's current error status
+ * Skip it for SRIOV, since VF FLR will be triggered by host driver
+ * before job timeout
*/
- amdgpu_job_core_dump(adev, job);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_job_core_dump(adev, job);
if (amdgpu_gpu_recovery &&
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e32161f6b67a..44819cdba7fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -77,24 +77,6 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_destroy(tbo);
}
-static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
- struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo;
- struct amdgpu_bo_vm *vmbo;
-
- bo = shadow_bo->parent;
- vmbo = to_amdgpu_bo_vm(bo);
- /* in case amdgpu_device_recover_vram got NULL of bo->parent */
- if (!list_empty(&vmbo->shadow_list)) {
- mutex_lock(&adev->shadow_list_lock);
- list_del_init(&vmbo->shadow_list);
- mutex_unlock(&adev->shadow_list_lock);
- }
-
- amdgpu_bo_destroy(tbo);
-}
-
/**
* amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
* @bo: buffer object to be checked
@@ -108,8 +90,7 @@ static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &amdgpu_bo_destroy ||
- bo->destroy == &amdgpu_bo_user_destroy ||
- bo->destroy == &amdgpu_bo_vm_destroy)
+ bo->destroy == &amdgpu_bo_user_destroy)
return true;
return false;
@@ -583,6 +564,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (bo == NULL)
return -ENOMEM;
drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
+ bo->tbo.base.funcs = &amdgpu_gem_object_funcs;
bo->vm_bo = NULL;
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
bp->domain;
@@ -723,52 +705,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
}
/**
- * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
- *
- * @vmbo: BO that will be inserted into the shadow list
- *
- * Insert a BO to the shadow list.
- */
-void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev);
-
- mutex_lock(&adev->shadow_list_lock);
- list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
- vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo);
- vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy;
- mutex_unlock(&adev->shadow_list_lock);
-}
-
-/**
- * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
- *
- * @shadow: &amdgpu_bo shadow to be restored
- * @fence: dma_fence associated with the operation
- *
- * Copies a buffer object's shadow content back to the object.
- * This is used for recovering a buffer from its shadow in case of a gpu
- * reset where vram context may be lost.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
-
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- uint64_t shadow_addr, parent_addr;
-
- shadow_addr = amdgpu_bo_gpu_offset(shadow);
- parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
-
- return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
- amdgpu_bo_size(shadow), NULL, fence,
- true, false, 0);
-}
-
-/**
* amdgpu_bo_kmap - map an &amdgpu_bo buffer object
* @bo: &amdgpu_bo buffer object to be mapped
* @ptr: kernel virtual address to be returned
@@ -851,7 +787,7 @@ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
if (bo == NULL)
return NULL;
- ttm_bo_get(&bo->tbo);
+ drm_gem_object_get(&bo->tbo.base);
return bo;
}
@@ -863,40 +799,30 @@ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
*/
void amdgpu_bo_unref(struct amdgpu_bo **bo)
{
- struct ttm_buffer_object *tbo;
-
if ((*bo) == NULL)
return;
- tbo = &((*bo)->tbo);
- ttm_bo_put(tbo);
+ drm_gem_object_put(&(*bo)->tbo.base);
*bo = NULL;
}
/**
- * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
+ * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
* @bo: &amdgpu_bo buffer object to be pinned
* @domain: domain to be pinned to
- * @min_offset: the start of requested address range
- * @max_offset: the end of requested address range
*
- * Pins the buffer object according to requested domain and address range. If
- * the memory is unbound gart memory, binds the pages into gart table. Adjusts
- * pin_count and pin_size accordingly.
+ * Pins the buffer object according to requested domain. If the memory is
+ * unbound gart memory, binds the pages into gart table. Adjusts pin_count and
+ * pin_size accordingly.
*
* Pinning means to lock pages in memory along with keeping them at a fixed
* offset. It is required when a buffer can not be moved, for example, when
* a display buffer is being scanned out.
*
- * Compared with amdgpu_bo_pin(), this function gives more flexibility on
- * where to pin a buffer if there are specific restrictions on where a buffer
- * must be located.
- *
* Returns:
* 0 for success or a negative error code on failure.
*/
-int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
- u64 min_offset, u64 max_offset)
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_operation_ctx ctx = { false, false };
@@ -905,9 +831,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
return -EPERM;
- if (WARN_ON_ONCE(min_offset > max_offset))
- return -EINVAL;
-
/* Check domain to be pinned to against preferred domains */
if (bo->preferred_domains & domain)
domain = bo->preferred_domains & domain;
@@ -933,14 +856,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL;
ttm_bo_pin(&bo->tbo);
-
- if (max_offset != 0) {
- u64 domain_start = amdgpu_ttm_domain_start(adev,
- mem_type);
- WARN_ON_ONCE(max_offset <
- (amdgpu_bo_gpu_offset(bo) - domain_start));
- }
-
return 0;
}
@@ -957,17 +872,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
amdgpu_bo_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
- unsigned int fpfn, lpfn;
-
- fpfn = min_offset >> PAGE_SHIFT;
- lpfn = max_offset >> PAGE_SHIFT;
-
- if (fpfn > bo->placements[i].fpfn)
- bo->placements[i].fpfn = fpfn;
- if (!bo->placements[i].lpfn ||
- (lpfn && lpfn < bo->placements[i].lpfn))
- bo->placements[i].lpfn = lpfn;
-
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS &&
bo->placements[i].mem_type == TTM_PL_VRAM)
bo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
@@ -994,24 +898,6 @@ error:
}
/**
- * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
- * @bo: &amdgpu_bo buffer object to be pinned
- * @domain: domain to be pinned to
- *
- * A simple wrapper to amdgpu_bo_pin_restricted().
- * Provides a simpler API for buffers that do not have any strict restrictions
- * on where a buffer must be located.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
-{
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
-}
-
-/**
* amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
* @bo: &amdgpu_bo buffer object to be unpinned
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index d7e27957013f..717e47b46167 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -136,8 +136,6 @@ struct amdgpu_bo_user {
struct amdgpu_bo_vm {
struct amdgpu_bo bo;
- struct amdgpu_bo *shadow;
- struct list_head shadow_list;
struct amdgpu_vm_bo_base entries[];
};
@@ -275,22 +273,6 @@ static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
}
-/**
- * amdgpu_bo_shadowed - check if the BO is shadowed
- *
- * @bo: BO to be tested.
- *
- * Returns:
- * NULL if not shadowed or else return a BO pointer.
- */
-static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo)
-{
- if (bo->tbo.type == ttm_bo_type_kernel)
- return to_amdgpu_bo_vm(bo)->shadow;
-
- return NULL;
-}
-
bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
@@ -322,8 +304,6 @@ void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
void amdgpu_bo_unref(struct amdgpu_bo **bo);
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
-int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
- u64 min_offset, u64 max_offset);
void amdgpu_bo_unpin(struct amdgpu_bo *bo);
int amdgpu_bo_init(struct amdgpu_device *adev);
void amdgpu_bo_fini(struct amdgpu_device *adev);
@@ -349,9 +329,6 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats);
-void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
-int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
- struct dma_fence **fence);
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 189574d53ebd..0b28b2cf1517 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2853,7 +2853,7 @@ static int psp_load_non_psp_fw(struct psp_context *psp)
if (ret)
return ret;
- /* Start rlc autoload after psp recieved all the gfx firmware */
+ /* Start rlc autoload after psp received all the gfx firmware */
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
ret = psp_rlc_autoload_start(psp);
@@ -3425,9 +3425,11 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
- int err = 0;
+ const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
+ int fw_index, fw_bin_count, start_index = 0;
+ const struct psp_fw_bin_desc *fw_bin;
uint8_t *ucode_array_start_addr;
- int fw_index = 0;
+ int err = 0;
err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name);
if (err)
@@ -3478,15 +3480,30 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
case 2:
sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
- if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
+ fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
+
+ if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
err = -EINVAL;
goto out;
}
- for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) {
- err = parse_sos_bin_descriptor(psp,
- &sos_hdr_v2_0->psp_fw_bin[fw_index],
+ if (sos_hdr_v2_0->header.header_version_minor == 1) {
+ sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
+
+ fw_bin = sos_hdr_v2_1->psp_fw_bin;
+
+ if (psp_is_aux_sos_load_required(psp))
+ start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
+ else
+ fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
+
+ } else {
+ fw_bin = sos_hdr_v2_0->psp_fw_bin;
+ }
+
+ for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
+ err = parse_sos_bin_descriptor(psp, fw_bin + fw_index,
sos_hdr_v2_0);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 74a96516c913..e8abbbcb4326 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -138,6 +138,7 @@ struct psp_funcs {
int (*vbflash_stat)(struct psp_context *psp);
int (*fatal_error_recovery_quirk)(struct psp_context *psp);
bool (*get_ras_capability)(struct psp_context *psp);
+ bool (*is_aux_sos_load_required)(struct psp_context *psp);
};
struct ta_funcs {
@@ -464,6 +465,9 @@ struct amdgpu_psp_funcs {
((psp)->funcs->fatal_error_recovery_quirk ? \
(psp)->funcs->fatal_error_recovery_quirk((psp)) : 0)
+#define psp_is_aux_sos_load_required(psp) \
+ ((psp)->funcs->is_aux_sos_load_required ? (psp)->funcs->is_aux_sos_load_required((psp)) : 0)
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 61a2f386d9fb..1a1395c5fff1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -882,7 +882,7 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
if (ret)
return ret;
- /* gfx block ras dsiable cmd must send to ras-ta */
+ /* gfx block ras disable cmd must send to ras-ta */
if (head->block == AMDGPU_RAS_BLOCK__GFX)
con->features |= BIT(head->block);
@@ -3468,6 +3468,11 @@ init_ras_enabled_flag:
/* aca is disabled by default */
adev->aca.is_enabled = false;
+
+ /* bad page feature is not applicable to specific app platform */
+ if (adev->gmc.is_app_apu &&
+ amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0))
+ amdgpu_bad_page_threshold = 0;
}
static void amdgpu_ras_counte_dw(struct work_struct *work)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index aab8077e5098..f28f6b4ba765 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -58,7 +58,7 @@
#define EEPROM_I2C_MADDR_4 0x40000
/*
- * The 2 macros bellow represent the actual size in bytes that
+ * The 2 macros below represent the actual size in bytes that
* those entities occupy in the EEPROM memory.
* RAS_TABLE_RECORD_SIZE is different than sizeof(eeprom_table_record) which
* uses uint64 to store 6b fields such as retired_page.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index bdf1ef825d89..c586ab4c911b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -260,6 +260,36 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return 0;
}
+/**
+ * amdgpu_sync_kfd - sync to KFD fences
+ *
+ * @sync: sync object to add KFD fences to
+ * @resv: reservation object with KFD fences
+ *
+ * Extract all KFD fences and add them to the sync object.
+ */
+int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *f;
+ int r = 0;
+
+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, f) {
+ void *fence_owner = amdgpu_sync_get_owner(f);
+
+ if (fence_owner != AMDGPU_FENCE_OWNER_KFD)
+ continue;
+
+ r = amdgpu_sync_fence(sync, f);
+ if (r)
+ break;
+ }
+ dma_resv_iter_end(&cursor);
+
+ return r;
+}
+
/* Free the entry back to the slab */
static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index cf1e9e858efd..e3272dce798d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -51,6 +51,7 @@ int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f);
int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_resv *resv, enum amdgpu_sync_mode mode,
void *owner);
+int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b8bc7fa8c375..74adb983ab03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1970,7 +1970,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
(unsigned int)(gtt_size / (1024 * 1024)));
- /* Initiailize doorbell pool on PCI BAR */
+ /* Initialize doorbell pool on PCI BAR */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
if (r) {
DRM_ERROR("Failed initializing doorbell heap.\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 5bc37acd3981..4e23419b92d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -136,6 +136,14 @@ struct psp_firmware_header_v2_0 {
struct psp_fw_bin_desc psp_fw_bin[];
};
+/* version_major=2, version_minor=1 */
+struct psp_firmware_header_v2_1 {
+ struct common_firmware_header header;
+ uint32_t psp_fw_bin_count;
+ uint32_t psp_aux_fw_bin_index;
+ struct psp_fw_bin_desc psp_fw_bin[];
+};
+
/* version_major=1, version_minor=0 */
struct ta_firmware_header_v1_0 {
struct common_firmware_header header;
@@ -426,6 +434,7 @@ union amdgpu_firmware_header {
struct psp_firmware_header_v1_1 psp_v1_1;
struct psp_firmware_header_v1_3 psp_v1_3;
struct psp_firmware_header_v2_0 psp_v2_0;
+ struct psp_firmware_header_v2_0 psp_v2_1;
struct ta_firmware_header_v1_0 ta;
struct ta_firmware_header_v2_0 ta_v2_0;
struct gfx_firmware_header_v1_0 gfx;
@@ -447,7 +456,7 @@ union amdgpu_firmware_header {
uint8_t raw[0x100];
};
-#define UCODE_MAX_PSP_PACKAGING ((sizeof(union amdgpu_firmware_header) - sizeof(struct common_firmware_header) - 4) / sizeof(struct psp_fw_bin_desc))
+#define UCODE_MAX_PSP_PACKAGING (((sizeof(union amdgpu_firmware_header) - sizeof(struct common_firmware_header) - 4) / sizeof(struct psp_fw_bin_desc)) * 2)
/*
* fw loading support
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index e5f508d34ed8..d4c2afafbb73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -338,6 +338,7 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane,
else
domain = AMDGPU_GEM_DOMAIN_VRAM;
+ rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(rbo, domain);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 2452dfa6314f..6005280f5f38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -465,7 +465,6 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
{
uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
struct amdgpu_vm_bo_base *bo_base;
- struct amdgpu_bo *shadow;
struct amdgpu_bo *bo;
int r;
@@ -486,16 +485,10 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_unlock(&vm->status_lock);
bo = bo_base->bo;
- shadow = amdgpu_bo_shadowed(bo);
r = validate(param, bo);
if (r)
return r;
- if (shadow) {
- r = validate(param, shadow);
- if (r)
- return r;
- }
if (bo->tbo.type != ttm_bo_type_kernel) {
amdgpu_vm_bo_moved(bo_base);
@@ -1176,6 +1169,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
AMDGPU_SYNC_EQ_OWNER, vm);
if (r)
goto error_free;
+ if (bo) {
+ r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv);
+ if (r)
+ goto error_free;
+ }
+
} else {
struct drm_gem_object *obj = &bo->tbo.base;
@@ -2149,10 +2148,6 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
{
struct amdgpu_vm_bo_base *bo_base;
- /* shadow bo doesn't have bo base, its validation needs its parent */
- if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
- bo = bo->parent;
-
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
@@ -2482,7 +2477,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
root_bo = amdgpu_bo_ref(&root->bo);
r = amdgpu_bo_reserve(root_bo, true);
if (r) {
- amdgpu_bo_unref(&root->shadow);
amdgpu_bo_unref(&root_bo);
goto error_free_delayed;
}
@@ -2575,11 +2569,6 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->last_update = dma_fence_get_stub();
vm->is_compute_context = true;
- /* Free the shadow bo for compute VM */
- amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
-
- goto unreserve_bo;
-
unreserve_bo:
amdgpu_bo_unreserve(vm->root.bo);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index a076f43097e4..f78a0434a48f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -383,14 +383,6 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
return r;
- if (vmbo->shadow) {
- struct amdgpu_bo *shadow = vmbo->shadow;
-
- r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx);
- if (r)
- return r;
- }
-
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return -ENODEV;
@@ -448,10 +440,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int32_t xcp_id)
{
struct amdgpu_bo_param bp;
- struct amdgpu_bo *bo;
- struct dma_resv *resv;
unsigned int num_entries;
- int r;
memset(&bp, 0, sizeof(bp));
@@ -484,42 +473,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (vm->root.bo)
bp.resv = vm->root.bo->tbo.base.resv;
- r = amdgpu_bo_create_vm(adev, &bp, vmbo);
- if (r)
- return r;
-
- bo = &(*vmbo)->bo;
- if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) {
- (*vmbo)->shadow = NULL;
- return 0;
- }
-
- if (!bp.resv)
- WARN_ON(dma_resv_lock(bo->tbo.base.resv,
- NULL));
- resv = bp.resv;
- memset(&bp, 0, sizeof(bp));
- bp.size = amdgpu_vm_pt_size(adev, level);
- bp.domain = AMDGPU_GEM_DOMAIN_GTT;
- bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
- bp.type = ttm_bo_type_kernel;
- bp.resv = bo->tbo.base.resv;
- bp.bo_ptr_size = sizeof(struct amdgpu_bo);
- bp.xcp_id_plus1 = xcp_id + 1;
-
- r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
-
- if (!resv)
- dma_resv_unlock(bo->tbo.base.resv);
-
- if (r) {
- amdgpu_bo_unref(&bo);
- return r;
- }
-
- amdgpu_bo_add_to_shadow_list(*vmbo);
-
- return 0;
+ return amdgpu_bo_create_vm(adev, &bp, vmbo);
}
/**
@@ -569,7 +523,6 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
return 0;
error_free_pt:
- amdgpu_bo_unref(&pt->shadow);
amdgpu_bo_unref(&pt_bo);
return r;
}
@@ -581,17 +534,10 @@ error_free_pt:
*/
static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
{
- struct amdgpu_bo *shadow;
-
if (!entry->bo)
return;
entry->bo->vm_bo = NULL;
- shadow = amdgpu_bo_shadowed(entry->bo);
- if (shadow) {
- ttm_bo_set_bulk_move(&shadow->tbo, NULL);
- amdgpu_bo_unref(&shadow);
- }
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
spin_lock(&entry->vm->status_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 4772fba33285..46d9fb433ab2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -35,16 +35,7 @@
*/
static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
{
- int r;
-
- r = amdgpu_ttm_alloc_gart(&table->bo.tbo);
- if (r)
- return r;
-
- if (table->shadow)
- r = amdgpu_ttm_alloc_gart(&table->shadow->tbo);
-
- return r;
+ return amdgpu_ttm_alloc_gart(&table->bo.tbo);
}
/* Allocate a new job for @count PTE updates */
@@ -265,17 +256,13 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
if (!p->pages_addr) {
/* set page commands needed */
- if (vmbo->shadow)
- amdgpu_vm_sdma_set_ptes(p, vmbo->shadow, pe, addr,
- count, incr, flags);
amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
incr, flags);
return 0;
}
/* copy commands needed */
- ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
- (vmbo->shadow ? 2 : 1);
+ ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
/* for padding */
ndw -= 7;
@@ -290,8 +277,6 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
pte[i] |= flags;
}
- if (vmbo->shadow)
- amdgpu_vm_sdma_copy_ptes(p, vmbo->shadow, pe, nptes);
amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
pe += nptes * 8;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
index 90138bc5f03d..32775260556f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -180,6 +180,6 @@ amdgpu_get_next_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int *from)
#define for_each_xcp(xcp_mgr, xcp, i) \
for (i = 0, xcp = amdgpu_get_next_xcp(xcp_mgr, &i); xcp; \
- xcp = amdgpu_get_next_xcp(xcp_mgr, &i))
+ ++i, xcp = amdgpu_get_next_xcp(xcp_mgr, &i))
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 26e2188101e7..5e8833e4fed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -94,8 +94,6 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
case AMDGPU_RING_TYPE_VCN_ENC:
case AMDGPU_RING_TYPE_VCN_JPEG:
ip_blk = AMDGPU_XCP_VCN;
- if (aqua_vanjaram_xcp_vcn_shared(adev))
- inst_mask = 1 << (inst_idx * 2);
break;
default:
DRM_ERROR("Not support ring type %d!", ring->funcs->type);
@@ -105,6 +103,8 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
ring->xcp_id = xcp_id;
+ dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
+ ring->xcp_id);
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
break;
@@ -394,38 +394,31 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x
struct amdgpu_xcp_ip *ip)
{
struct amdgpu_device *adev = xcp_mgr->adev;
+ int num_sdma, num_vcn, num_shared_vcn, num_xcp;
int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
- int num_sdma, num_vcn;
num_sdma = adev->sdma.num_instances;
num_vcn = adev->vcn.num_vcn_inst;
+ num_shared_vcn = 1;
+
+ num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
+ num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp;
switch (xcp_mgr->mode) {
case AMDGPU_SPX_PARTITION_MODE:
- num_sdma_xcp = num_sdma;
- num_vcn_xcp = num_vcn;
- break;
case AMDGPU_DPX_PARTITION_MODE:
- num_sdma_xcp = num_sdma / 2;
- num_vcn_xcp = num_vcn / 2;
- break;
case AMDGPU_TPX_PARTITION_MODE:
- num_sdma_xcp = num_sdma / 3;
- num_vcn_xcp = num_vcn / 3;
- break;
case AMDGPU_QPX_PARTITION_MODE:
- num_sdma_xcp = num_sdma / 4;
- num_vcn_xcp = num_vcn / 4;
- break;
case AMDGPU_CPX_PARTITION_MODE:
- num_sdma_xcp = 2;
- num_vcn_xcp = num_vcn ? 1 : 0;
+ num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp);
+ num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp);
break;
default:
return -EINVAL;
}
- num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
+ if (num_vcn && num_xcp > num_vcn)
+ num_shared_vcn = num_xcp / num_vcn;
switch (ip_id) {
case AMDGPU_XCP_GFXHUB:
@@ -441,7 +434,8 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x
ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
break;
case AMDGPU_XCP_VCN:
- ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id);
+ ip->inst_mask =
+ XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn);
/* TODO : Assign IP funcs */
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 742adbc460c9..70c1399f738d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1881,6 +1881,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
return r;
if (!atomic) {
+ abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
@@ -2401,6 +2402,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
+ aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 8d46ebadfa46..f154c24499c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1931,6 +1931,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
return r;
if (!atomic) {
+ abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
@@ -2485,6 +2486,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
+ aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index f08dc6a3886f..a7fcb135827f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1861,6 +1861,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
return r;
if (!atomic) {
+ abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
@@ -2321,6 +2322,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
+ aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index a6a3adf2ae13..77ac3f114d24 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1828,6 +1828,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
return r;
if (!atomic) {
+ abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
@@ -2320,6 +2321,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
+ aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index d1357c01eb39..47b47d21f464 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -202,12 +202,16 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
};
-static const struct soc15_reg_golden golden_settings_gc_12_0[] = {
+static const struct soc15_reg_golden golden_settings_gc_12_0_rev0[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x0000000f, 0x0000000f),
SOC15_REG_GOLDEN_VALUE(GC, 0, regCB_HW_CONTROL_1, 0x03000000, 0x03000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL5, 0x00000070, 0x00000020)
};
+static const struct soc15_reg_golden golden_settings_gc_12_0[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x00008000, 0x00008000),
+};
+
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
@@ -3495,10 +3499,14 @@ static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_12_0,
+ (const u32)ARRAY_SIZE(golden_settings_gc_12_0));
+
if (adev->rev_id == 0)
soc15_program_register_sequence(adev,
- golden_settings_gc_12_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_12_0));
+ golden_settings_gc_12_0_rev0,
+ (const u32)ARRAY_SIZE(golden_settings_gc_12_0_rev0));
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 408e5600bb61..c100845409f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -1701,7 +1701,15 @@ static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev,
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0);
} else {
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL,
- (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
+ (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
+ CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME1_HALT_MASK |
+ CP_MEC_CNTL__MEC_ME2_HALT_MASK));
adev->gfx.kiq[xcc_id].ring.sched.ready = false;
}
udelay(50);
@@ -2240,6 +2248,8 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id);
if (r)
return r;
+ } else {
+ gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
}
r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id);
@@ -2299,12 +2309,6 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
return 0;
}
-static void gfx_v9_4_3_xcc_cp_enable(struct amdgpu_device *adev, bool enable,
- int xcc_id)
-{
- gfx_v9_4_3_xcc_cp_compute_enable(adev, enable, xcc_id);
-}
-
static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
{
if (amdgpu_gfx_disable_kcq(adev, xcc_id))
@@ -2336,7 +2340,7 @@ static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
}
gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id);
- gfx_v9_4_3_xcc_cp_enable(adev, false, xcc_id);
+ gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
}
static int gfx_v9_4_3_hw_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index 6c1891889c4d..d4f72e47ae9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -153,7 +153,7 @@ static void imu_v11_0_setup(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, regGFX_IMU_C2PMSG_16, imu_reg_val);
}
- //disble imu Rtavfs, SmsRepair, DfllBTC, and ClkB
+ //disable imu Rtavfs, SmsRepair, DfllBTC, and ClkB
imu_reg_val = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_10);
imu_reg_val |= 0x10007;
WREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_10, imu_reg_val);
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index ee91ff9e52a2..231a3d490ea8 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -161,7 +161,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
int api_status_off)
{
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
- signed long timeout = 3000000; /* 3000 ms */
+ signed long timeout = 2100000; /* 2100 ms */
struct amdgpu_device *adev = mes->adev;
struct amdgpu_ring *ring = &mes->ring[0];
struct MES_API_STATUS *api_status;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index e499b2857a01..8d27421689c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -146,7 +146,7 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
int api_status_off)
{
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
- signed long timeout = 3000000; /* 3000 ms */
+ signed long timeout = 2100000; /* 2100 ms */
struct amdgpu_device *adev = mes->adev;
struct amdgpu_ring *ring = &mes->ring[pipe];
spinlock_t *ring_lock = &mes->ring_lock[pipe];
@@ -479,6 +479,11 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes,
union MESAPI__MISC misc_pkt;
int pipe;
+ if (mes->adev->enable_uni_mes)
+ pipe = AMDGPU_MES_KIQ_PIPE;
+ else
+ pipe = AMDGPU_MES_SCHED_PIPE;
+
memset(&misc_pkt, 0, sizeof(misc_pkt));
misc_pkt.header.type = MES_API_TYPE_SCHEDULER;
@@ -513,6 +518,7 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes,
misc_pkt.wait_reg_mem.reg_offset2 = input->wrm_reg.reg1;
break;
case MES_MISC_OP_SET_SHADER_DEBUGGER:
+ pipe = AMDGPU_MES_SCHED_PIPE;
misc_pkt.opcode = MESAPI_MISC__SET_SHADER_DEBUGGER;
misc_pkt.set_shader_debugger.process_context_addr =
input->set_shader_debugger.process_context_addr;
@@ -530,11 +536,6 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes,
return -EINVAL;
}
- if (mes->adev->enable_uni_mes)
- pipe = AMDGPU_MES_KIQ_PIPE;
- else
- pipe = AMDGPU_MES_SCHED_PIPE;
-
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&misc_pkt, sizeof(misc_pkt),
offsetof(union MESAPI__MISC, api_status));
@@ -608,6 +609,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
mes_set_hw_res_pkt.disable_mes_log = 1;
mes_set_hw_res_pkt.use_different_vmid_compute = 1;
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
+ mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
/*
* Keep oversubscribe timer for sdma . When we have unmapped doorbell
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index fa479dfa1ec1..739fce4fa8fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -365,7 +365,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
} else {
- /* Disbale ASPM L1 */
+ /* Disable ASPM L1 */
data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
/* Disable ASPM TxL0s */
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 1251ee38a676..51e470e8d67d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -81,6 +81,8 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_4_ta.bin");
/* memory training timeout define */
#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
+#define regMP1_PUB_SCRATCH0 0x3b10090
+
static int psp_v13_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -807,6 +809,20 @@ static bool psp_v13_0_get_ras_capability(struct psp_context *psp)
}
}
+static bool psp_v13_0_is_aux_sos_load_required(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ u32 pmfw_ver;
+
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6))
+ return false;
+
+ /* load 4e version of sos if pmfw version less than 85.115.0 */
+ pmfw_ver = RREG32(regMP1_PUB_SCRATCH0 / 4);
+
+ return (pmfw_ver < 0x557300);
+}
+
static const struct psp_funcs psp_v13_0_funcs = {
.init_microcode = psp_v13_0_init_microcode,
.wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state,
@@ -830,6 +846,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
.vbflash_stat = psp_v13_0_vbflash_status,
.fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
.get_ras_capability = psp_v13_0_get_ras_capability,
+ .is_aux_sos_load_required = psp_v13_0_is_aux_sos_load_required,
};
void psp_v13_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index aa637541da58..e65194fe94af 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -710,7 +710,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
upper_32_bits(wptr_gpu_addr));
wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
if (ring->use_pollmem) {
- /*wptr polling is not enogh fast, directly clean the wptr register */
+ /*wptr polling is not enough fast, directly clean the wptr register */
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
SDMA0_GFX_RB_WPTR_POLL_CNTL,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index cfd8e183ad50..a8763496aed3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -1080,13 +1080,16 @@ static void sdma_v7_0_vm_copy_pte(struct amdgpu_ib *ib,
unsigned bytes = count * 8;
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
- SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+ SDMA_PKT_COPY_LINEAR_HEADER_CPV(1);
+
ib->ptr[ib->length_dw++] = bytes - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src);
ib->ptr[ib->length_dw++] = upper_32_bits(src);
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = 0;
}
@@ -1744,7 +1747,7 @@ static void sdma_v7_0_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs sdma_v7_0_vm_pte_funcs = {
- .copy_pte_num_dw = 7,
+ .copy_pte_num_dw = 8,
.copy_pte = sdma_v7_0_vm_copy_pte,
.write_pte = sdma_v7_0_vm_write_pte,
.set_pte_pde = sdma_v7_0_vm_set_pte_pde,
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c
index e4e30b9d481b..c04fdd2d5b38 100644
--- a/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c
@@ -60,7 +60,7 @@ static void smuio_v9_0_get_clock_gating_state(struct amdgpu_device *adev, u64 *f
{
u32 data;
- /* CGTT_ROM_CLK_CTRL0 is not availabe for APUs */
+ /* CGTT_ROM_CLK_CTRL0 is not available for APUs */
if (adev->flags & AMD_IS_APU)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c
index b0c3678cfb31..fd4c3d4f8387 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc24.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc24.c
@@ -250,13 +250,6 @@ static void soc24_program_aspm(struct amdgpu_device *adev)
adev->nbio.funcs->program_aspm(adev);
}
-static void soc24_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable)
-{
- adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
- adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
-}
-
const struct amdgpu_ip_block_version soc24_common_ip_block = {
.type = AMD_IP_BLOCK_TYPE_COMMON,
.major = 1,
@@ -454,6 +447,11 @@ static int soc24_common_late_init(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_nv_mailbox_get_irq(adev);
+ /* Enable selfring doorbell aperture late because doorbell BAR
+ * aperture will change if resize BAR successfully in gmc sw_init.
+ */
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
+
return 0;
}
@@ -491,7 +489,7 @@ static int soc24_common_hw_init(void *handle)
adev->df.funcs->hw_init(adev);
/* enable the doorbell aperture */
- soc24_enable_doorbell_aperture(adev, true);
+ adev->nbio.funcs->enable_doorbell_aperture(adev, true);
return 0;
}
@@ -500,8 +498,13 @@ static int soc24_common_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* disable the doorbell aperture */
- soc24_enable_doorbell_aperture(adev, false);
+ /* Disable the doorbell aperture and selfring doorbell aperture
+ * separately in hw_fini because soc21_enable_doorbell_aperture
+ * has been removed and there is no need to delay disabling
+ * selfring doorbell.
+ */
+ adev->nbio.funcs->enable_doorbell_aperture(adev, false);
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
if (amdgpu_sriov_vf(adev))
xgpu_nv_mailbox_put_irq(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index b1fd226b7efb..9d4f5352a62c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -1395,170 +1395,6 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static int vcn_v4_0_5_limit_sched(struct amdgpu_cs_parser *p,
- struct amdgpu_job *job)
-{
- struct drm_gpu_scheduler **scheds;
-
- /* The create msg must be in the first IB submitted */
- if (atomic_read(&job->base.entity->fence_seq))
- return -EINVAL;
-
- /* if VCN0 is harvested, we can't support AV1 */
- if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
- return -EINVAL;
-
- scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
- [AMDGPU_RING_PRIO_0].sched;
- drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
- return 0;
-}
-
-static int vcn_v4_0_5_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
- uint64_t addr)
-{
- struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_bo_va_mapping *map;
- uint32_t *msg, num_buffers;
- struct amdgpu_bo *bo;
- uint64_t start, end;
- unsigned int i;
- void *ptr;
- int r;
-
- addr &= AMDGPU_GMC_HOLE_MASK;
- r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
- if (r) {
- DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr);
- return r;
- }
-
- start = map->start * AMDGPU_GPU_PAGE_SIZE;
- end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
- if (addr & 0x7) {
- DRM_ERROR("VCN messages must be 8 byte aligned!\n");
- return -EINVAL;
- }
-
- bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (r) {
- DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
- return r;
- }
-
- r = amdgpu_bo_kmap(bo, &ptr);
- if (r) {
- DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
- return r;
- }
-
- msg = ptr + addr - start;
-
- /* Check length */
- if (msg[1] > end - addr) {
- r = -EINVAL;
- goto out;
- }
-
- if (msg[3] != RDECODE_MSG_CREATE)
- goto out;
-
- num_buffers = msg[2];
- for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
- uint32_t offset, size, *create;
-
- if (msg[0] != RDECODE_MESSAGE_CREATE)
- continue;
-
- offset = msg[1];
- size = msg[2];
-
- if (offset + size > end) {
- r = -EINVAL;
- goto out;
- }
-
- create = ptr + addr + offset - start;
-
- /* H264, HEVC and VP9 can run on any instance */
- if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
- continue;
-
- r = vcn_v4_0_5_limit_sched(p, job);
- if (r)
- goto out;
- }
-
-out:
- amdgpu_bo_kunmap(bo);
- return r;
-}
-
-#define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002)
-#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
-
-#define RADEON_VCN_ENGINE_INFO (0x30000001)
-#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16
-
-#define RENCODE_ENCODE_STANDARD_AV1 2
-#define RENCODE_IB_PARAM_SESSION_INIT 0x00000003
-#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64
-
-/* return the offset in ib if id is found, -1 otherwise
- * to speed up the searching we only search upto max_offset
- */
-static int vcn_v4_0_5_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset)
-{
- int i;
-
- for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) {
- if (ib->ptr[i + 1] == id)
- return i;
- }
- return -1;
-}
-
-static int vcn_v4_0_5_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib)
-{
- struct amdgpu_ring *ring = amdgpu_job_ring(job);
- struct amdgpu_vcn_decode_buffer *decode_buffer;
- uint64_t addr;
- uint32_t val;
- int idx;
-
- /* The first instance can decode anything */
- if (!ring->me)
- return 0;
-
- /* RADEON_VCN_ENGINE_INFO is at the top of ib block */
- idx = vcn_v4_0_5_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO,
- RADEON_VCN_ENGINE_INFO_MAX_OFFSET);
- if (idx < 0) /* engine info is missing */
- return 0;
-
- val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
- if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
- decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
-
- if (!(decode_buffer->valid_buf_flag & 0x1))
- return 0;
-
- addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
- decode_buffer->msg_buffer_address_lo;
- return vcn_v4_0_5_dec_msg(p, job, addr);
- } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
- idx = vcn_v4_0_5_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT,
- RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET);
- if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1)
- return vcn_v4_0_5_limit_sched(p, job);
- }
- return 0;
-}
-
static const struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1566,7 +1402,6 @@ static const struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
.get_rptr = vcn_v4_0_5_unified_ring_get_rptr,
.get_wptr = vcn_v4_0_5_unified_ring_get_wptr,
.set_wptr = vcn_v4_0_5_unified_ring_set_wptr,
- .patch_cs_in_place = vcn_v4_0_5_ring_patch_cs_in_place,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 71b465f8d83e..648f40091aa3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -3540,6 +3540,30 @@ int debug_refresh_runlist(struct device_queue_manager *dqm)
return debug_map_and_unlock(dqm);
}
+bool kfd_dqm_is_queue_in_process(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ int doorbell_off, u32 *queue_format)
+{
+ struct queue *q;
+ bool r = false;
+
+ if (!queue_format)
+ return r;
+
+ dqm_lock(dqm);
+
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (q->properties.doorbell_off == doorbell_off) {
+ *queue_format = q->properties.format;
+ r = true;
+ goto out;
+ }
+ }
+
+out:
+ dqm_unlock(dqm);
+ return r;
+}
#if defined(CONFIG_DEBUG_FS)
static void seq_reg_dump(struct seq_file *m,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 08b40826ad1e..09ab36f8e8c6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -324,6 +324,9 @@ void set_queue_snapshot_entry(struct queue *q,
int debug_lock_and_unmap(struct device_queue_manager *dqm);
int debug_map_and_unlock(struct device_queue_manager *dqm);
int debug_refresh_runlist(struct device_queue_manager *dqm);
+bool kfd_dqm_is_queue_in_process(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ int doorbell_off, u32 *queue_format);
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
index bb8cbfc39b90..37b69fe0ede3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
@@ -306,23 +306,8 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
- uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry);
- uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry);
- int hub_inst = 0;
struct kfd_hsa_memory_exception_data exception_data;
- /* gfxhub */
- if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) {
- hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev,
- node_id);
- if (hub_inst < 0)
- hub_inst = 0;
- }
-
- /* mmhub */
- if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC)
- hub_inst = node_id / 4;
-
info.vmid = vmid;
info.mc_id = client_id;
info.page_addr = ih_ring_entry[4] |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
index d163d92a692f..2b72d5b4949b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
@@ -341,6 +341,10 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
m->sdmax_rlcx_doorbell_offset =
q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
+ m->sdmax_rlcx_sched_cntl = (amdgpu_sdma_phase_quantum
+ << SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT)
+ & SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK;
+
m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index a902950cc060..d07acf1b2f93 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -270,6 +270,11 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
struct kfd_node *dev = NULL;
struct kfd_process *proc = NULL;
struct kfd_process_device *pdd = NULL;
+ int i;
+ struct kfd_cu_occupancy cu_occupancy[AMDGPU_MAX_QUEUES];
+ u32 queue_format;
+
+ memset(cu_occupancy, 0x0, sizeof(cu_occupancy));
pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
dev = pdd->dev;
@@ -287,8 +292,29 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
/* Collect wave count from device if it supports */
wave_cnt = 0;
max_waves_per_cu = 0;
- dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt,
- &max_waves_per_cu, 0);
+
+ /*
+ * For GFX 9.4.3, fetch the CU occupancy from the first XCC in the partition.
+ * For AQL queues, because of cooperative dispatch we multiply the wave count
+ * by number of XCCs in the partition to get the total wave counts across all
+ * XCCs in the partition.
+ * For PM4 queues, there is no cooperative dispatch so wave_cnt stay as it is.
+ */
+ dev->kfd2kgd->get_cu_occupancy(dev->adev, cu_occupancy,
+ &max_waves_per_cu, ffs(dev->xcc_mask) - 1);
+
+ for (i = 0; i < AMDGPU_MAX_QUEUES; i++) {
+ if (cu_occupancy[i].wave_cnt != 0 &&
+ kfd_dqm_is_queue_in_process(dev->dqm, &pdd->qpd,
+ cu_occupancy[i].doorbell_off,
+ &queue_format)) {
+ if (unlikely(queue_format == KFD_QUEUE_FORMAT_PM4))
+ wave_cnt += cu_occupancy[i].wave_cnt;
+ else
+ wave_cnt += (NUM_XCC(dev->xcc_mask) *
+ cu_occupancy[i].wave_cnt);
+ }
+ }
/* Translate wave count to number of compute units */
cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index b439d4d0bd84..01b960b15274 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -517,7 +517,6 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
if (retval)
goto err_destroy_queue;
- kfd_procfs_del_queue(pqn->q);
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) {
@@ -527,6 +526,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
if (retval != -ETIME)
goto err_destroy_queue;
}
+ kfd_procfs_del_queue(pqn->q);
kfd_queue_release_buffers(pdd, &pqn->q->properties);
pqm_clean_queue_resource(pqm, pqn);
uninit_queue(pqn->q);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0cff66735cfe..6e79028c5d78 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -808,6 +808,20 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
}
/**
+ * dmub_hpd_sense_callback - DMUB HPD sense processing callback.
+ * @adev: amdgpu_device pointer
+ * @notify: dmub notification structure
+ *
+ * HPD sense changes can occur during low power states and need to be
+ * notified from firmware to driver.
+ */
+static void dmub_hpd_sense_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
+{
+ DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n");
+}
+
+/**
* register_dmub_notify_callback - Sets callback for DMUB notify
* @adev: amdgpu_device pointer
* @type: Type of dmub notification
@@ -1757,25 +1771,41 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
static enum dmub_ips_disable_type dm_get_default_ips_mode(
struct amdgpu_device *adev)
{
- /*
- * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
- * cause a hard hang. A fix exists for newer PMFW.
- *
- * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
- * IPS state in all cases, except for s0ix and all displays off (DPMS),
- * where IPS2 is allowed.
- *
- * When checking pmfw version, use the major and minor only.
- */
- if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(3, 5, 0) &&
- (adev->pm.fw_version & 0x00FFFF00) < 0x005D6300)
- return DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+ enum dmub_ips_disable_type ret = DMUB_IPS_ENABLE;
- if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0))
- return DMUB_IPS_ENABLE;
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 5, 0):
+ /*
+ * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
+ * cause a hard hang. A fix exists for newer PMFW.
+ *
+ * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
+ * IPS state in all cases, except for s0ix and all displays off (DPMS),
+ * where IPS2 is allowed.
+ *
+ * When checking pmfw version, use the major and minor only.
+ */
+ if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300)
+ ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+ else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0))
+ /*
+ * Other ASICs with DCN35 that have residency issues with
+ * IPS2 in idle.
+ * We want them to use IPS2 only in display off cases.
+ */
+ ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+ break;
+ case IP_VERSION(3, 5, 1):
+ ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+ break;
+ default:
+ /* ASICs older than DCN35 do not have IPSs */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0))
+ ret = DMUB_IPS_DISABLE_ALL;
+ break;
+ }
- /* ASICs older than DCN35 do not have IPSs */
- return DMUB_IPS_DISABLE_ALL;
+ return ret;
}
static int amdgpu_dm_init(struct amdgpu_device *adev)
@@ -3808,6 +3838,12 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
return -EINVAL;
}
+
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
+ dmub_hpd_sense_callback, true)) {
+ DRM_ERROR("amdgpu: fail to register dmub hpd sense callback");
+ return -EINVAL;
+ }
}
list_for_each_entry(connector,
@@ -4449,6 +4485,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
+#define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2)
#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
@@ -4463,6 +4500,21 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
return;
amdgpu_acpi_get_backlight_caps(&caps);
+
+ /* validate the firmware value is sane */
+ if (caps.caps_valid) {
+ int spread = caps.max_input_signal - caps.min_input_signal;
+
+ if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
+ caps.min_input_signal < 0 ||
+ spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
+ spread < AMDGPU_DM_MIN_SPREAD) {
+ DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n",
+ caps.min_input_signal, caps.max_input_signal);
+ caps.caps_valid = false;
+ }
+ }
+
if (caps.caps_valid) {
dm->backlight_caps[bl_idx].caps_valid = true;
if (caps.aux_support)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 2d7755e2b6c3..15d4690c74d6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -50,7 +50,7 @@
#define AMDGPU_DM_MAX_NUM_EDP 2
-#define AMDGPU_DMUB_NOTIFICATION_MAX 6
+#define AMDGPU_DMUB_NOTIFICATION_MAX 7
#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index c0c61c03984c..83a31b97e96b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1147,7 +1147,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel;
params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
- dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy);
+ dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link));
if (!dc_dsc_compute_bandwidth_range(
stream->sink->ctx->dc->res_pool->dscs[0],
stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
@@ -1681,7 +1681,7 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
{
struct dc_dsc_policy dsc_policy = {0};
- dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy);
+ dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link));
dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
dsc_policy.min_target_bpp * 16,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 25f63b2e7a8e..495e3cd70426 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -961,6 +961,7 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
else
domain = AMDGPU_GEM_DOMAIN_VRAM;
+ rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(rbo, domain);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
index 08c494a7a21b..0d5fefb0f591 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
@@ -114,6 +114,7 @@ static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector
domain = amdgpu_display_supported_domains(adev, rbo->flags);
+ rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(rbo, domain);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index e47e9db062f4..681799468487 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -569,7 +569,7 @@ static void calculate_bandwidth(
break;
}
data->lb_partitions[i] = bw_floor2(bw_div(data->lb_size_per_component[i], data->lb_line_pitch), bw_int_to_fixed(1));
- /*clamp the partitions to the maxium number supported by the lb*/
+ /* clamp the partitions to the maximum number supported by the lb */
if ((surface_type[i] != bw_def_graphics || dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) {
data->lb_partitions_max[i] = bw_int_to_fixed(10);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index f770828df149..0e243f4344d0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -59,6 +59,7 @@ int clk_mgr_helper_get_active_display_cnt(
display_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
+ const struct dc_stream_status *stream_status = &context->stream_status[i];
/* Don't count SubVP phantom pipes as part of active
* display count
@@ -66,13 +67,7 @@ int clk_mgr_helper_get_active_display_cnt(
if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM)
continue;
- /*
- * Only notify active stream or virtual stream.
- * Need to notify virtual stream to work around
- * headless case. HPD does not fire when system is in
- * S0i2.
- */
- if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
+ if (!stream->dpms_off || (stream_status && stream_status->plane_count))
display_count++;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 97164b5585a8..b46a3afe48ca 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -1222,6 +1222,12 @@ void dcn35_clk_mgr_construct(
ctx->dc->debug.disable_dpp_power_gate = false;
ctx->dc->debug.disable_hubp_power_gate = false;
ctx->dc->debug.disable_dsc_power_gate = false;
+
+ /* Disable dynamic IPS2 in older PMFW (93.12) for Z8 interop. */
+ if (ctx->dc->config.disable_ips == DMUB_IPS_ENABLE &&
+ ctx->dce_version == DCN_VERSION_3_5 &&
+ ((clk_mgr->base.smu_ver & 0x00FFFFFF) <= 0x005d0c00))
+ ctx->dc->config.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
} else {
/*let's reset the config control flag*/
ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index ae788154896c..5c39390ecbd5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1767,7 +1767,7 @@ bool dc_validate_boot_timing(const struct dc *dc,
if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
return false;
- if (!se->funcs->dp_get_pixel_format)
+ if (!se || !se->funcs->dp_get_pixel_format)
return false;
if (!se->funcs->dp_get_pixel_format(
@@ -2376,7 +2376,7 @@ static bool is_surface_in_context(
return false;
}
-static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
+static enum surface_update_type get_plane_info_update_type(const struct dc *dc, const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
enum surface_update_type update_type = UPDATE_TYPE_FAST;
@@ -2455,7 +2455,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
/* todo: below are HW dependent, we should add a hook to
* DCE/N resource and validated there.
*/
- if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
+ if (!dc->debug.skip_full_updated_if_possible) {
/* swizzled mode requires RQ to be setup properly,
* thus need to run DML to calculate RQ settings
*/
@@ -2547,7 +2547,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
update_flags->raw = 0; // Reset all flags
- type = get_plane_info_update_type(u);
+ type = get_plane_info_update_type(dc, u);
elevate_update_type(&overall_type, type);
type = get_scaling_info_update_type(dc, u);
@@ -2596,6 +2596,12 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
elevate_update_type(&overall_type, UPDATE_TYPE_MED);
}
+ if (u->sdr_white_level_nits)
+ if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) {
+ update_flags->bits.sdr_white_level_nits = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ }
+
if (u->cm2_params) {
if ((u->cm2_params->component_settings.shaper_3dlut_setting
!= u->surface->mcm_shaper_3dlut_setting)
@@ -2876,6 +2882,10 @@ static void copy_surface_update_to_plane(
surface->hdr_mult =
srf_update->hdr_mult;
+ if (srf_update->sdr_white_level_nits)
+ surface->sdr_white_level_nits =
+ srf_update->sdr_white_level_nits;
+
if (srf_update->blend_tf)
memcpy(&surface->blend_tf, srf_update->blend_tf,
sizeof(surface->blend_tf));
@@ -4679,6 +4689,8 @@ static bool full_update_required(struct dc *dc,
srf_updates[i].scaling_info ||
(srf_updates[i].hdr_mult.value &&
srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
+ (srf_updates[i].sdr_white_level_nits &&
+ srf_updates[i].sdr_white_level_nits != srf_updates->surface->sdr_white_level_nits) ||
srf_updates[i].in_transfer_func ||
srf_updates[i].func_shaper ||
srf_updates[i].lut3d_func ||
@@ -5744,6 +5756,27 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
}
/**
+ * dc_process_dmub_dpia_set_tps_notification - Submits tps notification
+ *
+ * @dc: [in] dc structure
+ * @link_index: [in] link index
+ * @tps: [in] request tps
+ *
+ * Submits set_tps_notification command to dmub via inbox message
+ */
+void dc_process_dmub_dpia_set_tps_notification(const struct dc *dc, uint32_t link_index, uint8_t tps)
+{
+ union dmub_rb_cmd cmd = {0};
+
+ cmd.set_tps_notification.header.type = DMUB_CMD__DPIA;
+ cmd.set_tps_notification.header.sub_type = DMUB_CMD__DPIA_SET_TPS_NOTIFICATION;
+ cmd.set_tps_notification.tps_notification.instance = dc->links[link_index]->ddc_hw_inst;
+ cmd.set_tps_notification.tps_notification.tps = tps;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+/**
* dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
*
* @dc: [in] dc structure
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 4c94dd38be4b..3992ad73165b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -55,7 +55,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.299"
+#define DC_VER "3.2.301"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -462,6 +462,7 @@ struct dc_config {
bool support_edp0_on_dp1;
unsigned int enable_fpo_flicker_detection;
bool disable_hbr_audio_dp2;
+ bool consolidated_dpia_dp_lt;
};
enum visual_confirm {
@@ -762,7 +763,8 @@ union dpia_debug_options {
uint32_t disable_mst_dsc_work_around:1; /* bit 3 */
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
uint32_t disable_usb4_pm_support:1; /* bit 5 */
- uint32_t reserved:26;
+ uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */
+ uint32_t reserved:25;
} bits;
uint32_t raw;
};
@@ -1056,6 +1058,9 @@ struct dc_debug_options {
unsigned int force_lls;
bool notify_dpia_hr_bw;
bool enable_ips_visual_confirm;
+ unsigned int sharpen_policy;
+ unsigned int scale_to_sharpness_policy;
+ bool skip_full_updated_if_possible;
};
@@ -1269,6 +1274,7 @@ union surface_update_flags {
uint32_t tmz_changed:1;
uint32_t mcm_transfer_function_enable_change:1; /* disable or enable MCM transfer func */
uint32_t full_update:1;
+ uint32_t sdr_white_level_nits:1;
} bits;
uint32_t raw;
@@ -1351,6 +1357,7 @@ struct dc_plane_state {
bool adaptive_sharpness_en;
int sharpness_level;
enum linear_light_scaling linear_light_scaling;
+ unsigned int sdr_white_level_nits;
};
struct dc_plane_info {
@@ -1508,6 +1515,7 @@ struct dc_surface_update {
*/
struct dc_cm2_parameters *cm2_params;
const struct dc_csc_transform *cursor_csc_color_matrix;
+ unsigned int sdr_white_level_nits;
};
/*
@@ -2520,6 +2528,8 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
uint8_t mst_alloc_slots,
uint8_t *mst_slots_in_use);
+void dc_process_dmub_dpia_set_tps_notification(const struct dc *dc, uint32_t link_index, uint8_t tps);
+
void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
uint32_t hpd_int_enable);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 519c3df78ee5..41bd95e9177a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -969,6 +969,14 @@ union dp_sink_video_fallback_formats {
uint8_t raw;
};
+union dpcd_max_uncompressed_pixel_rate_cap {
+ struct {
+ uint16_t max_uncompressed_pixel_rate_cap :15;
+ uint16_t valid :1;
+ } bits;
+ uint8_t raw[2];
+};
+
union dp_fec_capability1 {
struct {
uint8_t AGGREGATED_ERROR_COUNTERS_CAPABLE :1;
@@ -1170,6 +1178,7 @@ struct dpcd_caps {
struct dc_lttpr_caps lttpr_caps;
struct adaptive_sync_caps adaptive_sync_caps;
struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info;
+ union dpcd_max_uncompressed_pixel_rate_cap max_uncompressed_pixel_rate_cap;
union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates;
union dp_main_line_channel_coding_cap channel_coding_cap;
@@ -1340,6 +1349,9 @@ struct dp_trace {
#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX
#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX 0x110
#endif
+#ifndef DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP
+#define DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP 0x221c
+#endif
#ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE
#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index fe3078b8789e..9014c2409817 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -59,6 +59,7 @@ struct dc_dsc_config_options {
uint32_t max_target_bpp_limit_override_x16;
uint32_t slice_height_granularity;
uint32_t dsc_force_odm_hslice_override;
+ bool force_dsc_when_not_needed;
};
bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
@@ -100,7 +101,8 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
*/
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
uint32_t max_target_bpp_limit_override_x16,
- struct dc_dsc_policy *policy);
+ struct dc_dsc_policy *policy,
+ const enum dc_link_encoding_format link_encoding);
void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index cd6de93eb91c..603552dbd771 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -186,19 +186,17 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->h_active = pipe_ctx->plane_res.scl_data.h_active;
spl_in->v_active = pipe_ctx->plane_res.scl_data.v_active;
+
+ spl_in->debug.sharpen_policy = (enum sharpen_policy)pipe_ctx->stream->ctx->dc->debug.sharpen_policy;
+ spl_in->debug.scale_to_sharpness_policy =
+ (enum scale_to_sharpness_policy)pipe_ctx->stream->ctx->dc->debug.scale_to_sharpness_policy;
+
/* Check if it is stream is in fullscreen and if its HDR.
* Use this to determine sharpness levels
*/
spl_in->is_fullscreen = dm_helpers_is_fullscreen(pipe_ctx->stream->ctx, pipe_ctx->stream);
spl_in->is_hdr_on = dm_helpers_is_hdr_on(pipe_ctx->stream->ctx, pipe_ctx->stream);
- spl_in->hdr_multx100 = 0;
- if (spl_in->is_hdr_on) {
- spl_in->hdr_multx100 = (uint32_t)dc_fixpt_floor(dc_fixpt_mul(plane_state->hdr_mult,
- dc_fixpt_from_int(100)));
- /* Disable sharpness for HDR Mult > 6.0 */
- if (spl_in->hdr_multx100 > 600)
- spl_in->adaptive_sharpness.enable = false;
- }
+ spl_in->sdr_white_level_nits = plane_state->sdr_white_level_nits;
}
/// @brief Translate SPL output parameters to pipe context
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index e7019c95ba79..4fce64a030b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -313,9 +313,6 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
if (swath_height_c > 0)
log2_swath_height_c = dml_log2(swath_height_c);
-
- if (req128_c && log2_swath_height_c > 0)
- log2_swath_height_c -= 1;
}
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index ae5251041728..3fa9a5da02f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -313,9 +313,6 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
if (swath_height_c > 0)
log2_swath_height_c = dml_log2(swath_height_c);
-
- if (req128_c && log2_swath_height_c > 0)
- log2_swath_height_c -= 1;
}
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 0b132ce1d2cd..2b275e680379 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -1924,15 +1924,6 @@ static unsigned int CalculateVMAndRowBytes(
*PixelPTEReqWidth = 32768.0 / BytePerPixel;
*PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
- } else if (MacroTileSizeBytes == 4096) {
- PixelPTEReqHeightPTEs = 1;
- *PixelPTEReqHeight = MacroTileHeight;
- *PixelPTEReqWidth = 8 * *MacroTileWidth;
- *PTERequestSize = 64;
- if (ScanDirection != dm_vert)
- FractionOfPTEReturnDrop = 0;
- else
- FractionOfPTEReturnDrop = 7.0 / 8;
} else if (GPUVMMinPageSize == 4 && MacroTileSizeBytes > 4096) {
PixelPTEReqHeightPTEs = 16;
*PixelPTEReqHeight = 16 * BlockHeight256Bytes;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 547dfcc80fde..d851c081e376 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -8926,7 +8926,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
// The prefetch scheduling should only be calculated once as per AllowForPStateChangeOrStutterInVBlank requirement
// If the AllowForPStateChangeOrStutterInVBlank requirement is not strict (i.e. only try those power saving feature
- // if possible, then will try to program for the best power saving features in order of diffculty (dram, fclk, stutter)
+ // if possible, then will try to program for the best power saving features in order of difficulty (dram, fclk, stutter)
s->iteration = 0;
s->MaxTotalRDBandwidth = 0;
s->AllPrefetchModeTested = false;
@@ -9977,7 +9977,7 @@ void dml_core_get_row_heights(
dml_print("DML_DLG: %s: GPUVMMinPageSizeKBytes = %u\n", __func__, GPUVMMinPageSizeKBytes);
#endif
- // just suppluy with enough parameters to calculate meta and dte
+ // just supply with enough parameters to calculate meta and dte
CalculateVMAndRowBytes(
0, // dml_bool_t ViewportStationary,
1, // dml_bool_t DCCEnable,
@@ -10110,7 +10110,7 @@ dml_bool_t dml_mode_support(
/// Note: In this function, it is assumed that DCFCLK, SOCCLK freq are the state values, and mode_program will just use the DML calculated DPPCLK and DISPCLK
/// @param mode_lib mode_lib data struct that house all the input/output/bbox and calculation values.
/// @param state_idx Power state idx chosen
-/// @param display_cfg Display Congiuration
+/// @param display_cfg Display Configuration
/// @param call_standalone Calling mode_programming without calling mode support. Some of the "support" struct member will be pre-calculated before doing mode programming
/// TODO: Add clk_cfg input, could be useful for standalone mode
dml_bool_t dml_mode_programming(
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index b0d9aed0f265..8697eac1e1f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -858,7 +858,9 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->immediate_flip = plane_state->flip_immediate;
- plane->composition.rect_out_height_spans_vactive = plane_state->dst_rect.height >= stream->timing.v_addressable;
+ plane->composition.rect_out_height_spans_vactive =
+ plane_state->dst_rect.height >= stream->timing.v_addressable &&
+ stream->dst.height >= stream->timing.v_addressable;
}
//TODO : Could be possibly moved to a common helper layer.
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index d63558ee3135..1cf9015e854a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -940,9 +940,11 @@ static void build_synchronized_timing_groups(
/* find synchronizable timing groups */
for (j = i + 1; j < display_config->display_config.num_streams; j++) {
if (memcmp(master_timing,
- &display_config->display_config.stream_descriptors[j].timing,
- sizeof(struct dml2_timing_cfg)) == 0 &&
- display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder) {
+ &display_config->display_config.stream_descriptors[j].timing,
+ sizeof(struct dml2_timing_cfg)) == 0 &&
+ display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder &&
+ (display_config->display_config.stream_descriptors[i].output.output_encoder != dml2_hdmi || //hdmi requires formats match
+ display_config->display_config.stream_descriptors[i].output.output_format == display_config->display_config.stream_descriptors[j].output.output_format)) {
set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j);
set_bit_in_bitfield(&stream_mapped_mask, j);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index a1727e5bf024..ebd5df1a36e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -668,6 +668,7 @@ static bool decide_dsc_bandwidth_range(
*/
static bool decide_dsc_target_bpp_x16(
const struct dc_dsc_policy *policy,
+ const struct dc_dsc_config_options *options,
const struct dsc_enc_caps *dsc_common_caps,
const int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
@@ -682,7 +683,7 @@ static bool decide_dsc_target_bpp_x16(
if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16,
num_slices_h, dsc_common_caps, timing, link_encoding, &range)) {
if (target_bandwidth_kbps >= range.stream_kbps) {
- if (policy->enable_dsc_when_not_needed)
+ if (policy->enable_dsc_when_not_needed || options->force_dsc_when_not_needed)
/* enable max bpp even dsc is not needed */
*target_bpp_x16 = range.max_target_bpp_x16;
} else if (target_bandwidth_kbps >= range.max_kbps) {
@@ -882,7 +883,7 @@ static bool setup_dsc_config(
memset(dsc_cfg, 0, sizeof(struct dc_dsc_config));
- dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy);
+ dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy, link_encoding);
pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right;
pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
@@ -1080,6 +1081,7 @@ static bool setup_dsc_config(
if (target_bandwidth_kbps > 0) {
is_dsc_possible = decide_dsc_target_bpp_x16(
&policy,
+ options,
&dsc_common_caps,
target_bandwidth_kbps,
timing,
@@ -1171,7 +1173,8 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
uint32_t max_target_bpp_limit_override_x16,
- struct dc_dsc_policy *policy)
+ struct dc_dsc_policy *policy,
+ const enum dc_link_encoding_format link_encoding)
{
uint32_t bpc = 0;
@@ -1235,10 +1238,7 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
policy->max_target_bpp = max_target_bpp_limit_override_x16 / 16;
/* enable DSC when not needed, default false */
- if (dsc_policy_enable_dsc_when_not_needed)
- policy->enable_dsc_when_not_needed = dsc_policy_enable_dsc_when_not_needed;
- else
- policy->enable_dsc_when_not_needed = false;
+ policy->enable_dsc_when_not_needed = dsc_policy_enable_dsc_when_not_needed;
}
void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit)
@@ -1267,4 +1267,5 @@ void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_
options->dsc_force_odm_hslice_override = dc->debug.force_odm_combine;
options->max_target_bpp_limit_override_x16 = 0;
options->slice_height_granularity = 1;
+ options->force_dsc_when_not_needed = false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
index 6293173ba2b9..5eb3da8d5206 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
@@ -545,6 +545,7 @@ static void hubbub35_init(struct hubbub *hubbub)
DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 256,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 256);
+ memset(&hubbub2->watermarks.a.cstate_pstate, 0, sizeof(hubbub2->watermarks.a.cstate_pstate));
}
/*static void hubbub35_set_request_limit(struct hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index d52ce58c6a98..4fbed0298adf 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -57,6 +57,7 @@
#include "panel_cntl.h"
#include "dc_state_priv.h"
#include "dpcd_defs.h"
+#include "dsc.h"
/* include DCE11 register header files */
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_sh_mask.h"
@@ -1823,6 +1824,48 @@ static void get_edp_links_with_sink(
}
}
+static void clean_up_dsc_blocks(struct dc *dc)
+{
+ struct display_stream_compressor *dsc = NULL;
+ struct timing_generator *tg = NULL;
+ struct stream_encoder *se = NULL;
+ struct dccg *dccg = dc->res_pool->dccg;
+ struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
+ int i;
+
+ if (dc->ctx->dce_version != DCN_VERSION_3_5 &&
+ dc->ctx->dce_version != DCN_VERSION_3_51)
+ return;
+
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
+ struct dcn_dsc_state s = {0};
+
+ dsc = dc->res_pool->dscs[i];
+ dsc->funcs->dsc_read_state(dsc, &s);
+ if (s.dsc_fw_en) {
+ /* disable DSC in OPTC */
+ if (i < dc->res_pool->timing_generator_count) {
+ tg = dc->res_pool->timing_generators[i];
+ tg->funcs->set_dsc_config(tg, OPTC_DSC_DISABLED, 0, 0);
+ }
+ /* disable DSC in stream encoder */
+ if (i < dc->res_pool->stream_enc_count) {
+ se = dc->res_pool->stream_enc[i];
+ se->funcs->dp_set_dsc_config(se, OPTC_DSC_DISABLED, 0, 0);
+ se->funcs->dp_set_dsc_pps_info_packet(se, false, NULL, true);
+ }
+ /* disable DSC block */
+ if (dccg->funcs->set_ref_dscclk)
+ dccg->funcs->set_ref_dscclk(dccg, dsc->inst);
+ dsc->funcs->dsc_disable(dsc);
+
+ /* power down DSC */
+ if (pg_cntl != NULL)
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, dsc->inst, false);
+ }
+ }
+}
+
/*
* When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need:
* 1. Power down all DC HW blocks
@@ -1927,6 +1970,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
power_down_all_hw_blocks(dc);
+
+ /* DSC could be enabled on eDP during VBIOS post.
+ * To clean up dsc blocks if eDP is in link but not active.
+ */
+ if (edp_link_with_sink && (edp_stream_num == 0))
+ clean_up_dsc_blocks(dc);
+
disable_vga_and_power_gate_all_controllers(dc);
if (edp_link_with_sink && !keep_edp_vdd_on)
dc->hwss.edp_power_control(edp_link_with_sink, false);
@@ -2046,13 +2096,20 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
* as well.
*/
for (i = 0; i < num_pipes; i++) {
- pipe_ctx[i]->stream_res.tg->funcs->set_drr(
- pipe_ctx[i]->stream_res.tg, &params);
-
- if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
- pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx[i]->stream_res.tg,
- event_triggers, num_frames);
+ /* dc_state_destruct() might null the stream resources, so fetch tg
+ * here first to avoid a race condition. The lifetime of the pointee
+ * itself (the timing_generator object) is not a problem here.
+ */
+ struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
+
+ if ((tg != NULL) && tg->funcs) {
+ if (tg->funcs->set_drr)
+ tg->funcs->set_drr(tg, &params);
+ if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
+ if (tg->funcs->set_static_screen_control)
+ tg->funcs->set_static_screen_control(
+ tg, event_triggers, num_frames);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index 42c52284a868..bded33575493 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -455,7 +455,7 @@ bool dcn30_mmhubbub_warmup(
struct mcif_wb *mcif_wb;
struct mcif_warmup_params warmup_params = {0};
unsigned int i, i_buf;
- /*make sure there is no active DWB eanbled */
+ /* make sure there is no active DWB enabled */
for (i = 0; i < num_dwb; i++) {
dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst];
if (dwb->dwb_is_efc_transition || dwb->dwb_is_drc) {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index a36e11606f90..2e8c9f738259 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1032,6 +1032,20 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
struct dsc_config dsc_cfg;
struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
+ struct dcn_dsc_state dsc_state = {0};
+
+ if (!dsc) {
+ DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+
+ if (dsc->funcs->dsc_read_state) {
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+ if (!dsc_state.dsc_fw_en) {
+ DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+ }
/* Enable DSC hw block */
dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 479fd3e89e5a..bd309dbdf7b2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -334,7 +334,20 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
struct dsc_config dsc_cfg;
struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
+ struct dcn_dsc_state dsc_state = {0};
+ if (!dsc) {
+ DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+
+ if (dsc->funcs->dsc_read_state) {
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+ if (!dsc_state.dsc_fw_en) {
+ DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+ }
/* Enable DSC hw block */
dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
index 46fb3649bc86..6499807af72a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
@@ -50,8 +50,31 @@ static void update_dpia_stream_allocation_table(struct dc_link *link,
DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n",
status, mst_alloc_slots, prev_mst_slots_in_use);
- ASSERT(link_enc);
- link_enc->funcs->update_mst_stream_allocation_table(link_enc, table);
+ if (link_enc)
+ link_enc->funcs->update_mst_stream_allocation_table(link_enc, table);
+}
+
+static void set_dio_dpia_link_test_pattern(struct dc_link *link,
+ const struct link_resource *link_res,
+ struct encoder_set_dp_phy_pattern_param *tp_params)
+{
+ if (tp_params->dp_phy_pattern != DP_TEST_PATTERN_VIDEO_MODE)
+ return;
+
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (!link_enc)
+ return;
+
+ link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+}
+
+static void set_dio_dpia_lane_settings(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_settings,
+ const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
+{
}
static const struct link_hwss dpia_link_hwss = {
@@ -65,8 +88,8 @@ static const struct link_hwss dpia_link_hwss = {
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
.enable_dp_link_output = enable_dio_dp_link_output,
- .set_dp_link_test_pattern = set_dio_dp_link_test_pattern,
- .set_dp_lane_settings = set_dio_dp_lane_settings,
+ .set_dp_link_test_pattern = set_dio_dpia_link_test_pattern,
+ .set_dp_lane_settings = set_dio_dpia_lane_settings,
.update_stream_allocation_table = update_dpia_stream_allocation_table,
},
};
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index 1aed55b0ab6a..60f15a9ba7a5 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -287,6 +287,13 @@ static bool dp_validate_mode_timing(
req_bw = dc_bandwidth_in_kbps_from_timing(timing, dc_link_get_highest_encoding_format(link));
max_bw = dp_link_bandwidth_kbps(link, link_setting);
+ bool is_max_uncompressed_pixel_rate_exceeded = link->dpcd_caps.max_uncompressed_pixel_rate_cap.bits.valid &&
+ timing->pix_clk_100hz > link->dpcd_caps.max_uncompressed_pixel_rate_cap.bits.max_uncompressed_pixel_rate_cap * 10000;
+
+ if (is_max_uncompressed_pixel_rate_exceeded && !timing->flags.DSC) {
+ return false;
+ }
+
if (req_bw <= max_bw) {
/* remember the biggest mode here, during
* initial link training (to get
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 34a618a7278b..d78c8ec4de79 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -1942,6 +1942,11 @@ static bool retrieve_link_cap(struct dc_link *link)
DC_LOG_DP2("\tFEC aggregated error counters are supported");
}
+ core_link_read_dpcd(link,
+ DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP,
+ link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw,
+ sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw));
+
retrieve_cable_id(link);
dpcd_write_cable_id_to_dprx(link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 988999c44475..27b881f947e8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -515,6 +515,41 @@ bool dp_is_interlane_aligned(union lane_align_status_updated align_status)
return align_status.bits.INTERLANE_ALIGN_DONE == 1;
}
+bool dp_check_interlane_aligned(union lane_align_status_updated align_status,
+ struct dc_link *link,
+ uint8_t retries)
+{
+ /* Take into consideration corner case for DP 1.4a LL Compliance CTS as USB4
+ * has to share encoders unlike DP and USBC
+ */
+ return (dp_is_interlane_aligned(align_status) ||
+ (link->skip_fallback_on_link_loss && retries));
+}
+
+uint32_t dp_get_eq_aux_rd_interval(
+ const struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ uint32_t offset,
+ uint8_t retries)
+{
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ if (offset == 0 && retries == 1 && lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ return max(lt_settings->eq_pattern_time, (uint32_t) DPIA_CLK_SYNC_DELAY);
+ else
+ return dpia_get_eq_aux_rd_interval(link, lt_settings, offset);
+ } else if (is_repeater(lt_settings, offset))
+ return dp_translate_training_aux_read_interval(
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
+ else
+ return lt_settings->eq_pattern_time;
+}
+
+bool dp_check_dpcd_reqeust_status(const struct dc_link *link,
+ enum dc_status status)
+{
+ return (status != DC_OK && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA);
+}
+
enum link_training_result dp_check_link_loss_status(
struct dc_link *link,
const struct link_training_settings *link_training_setting)
@@ -973,13 +1008,17 @@ void repeater_training_done(struct dc_link *link, uint32_t offset)
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
}
-static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding)
+static enum link_training_result dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding)
{
+ enum dc_status status;
uint8_t sink_status = 0;
uint8_t i;
/* clear training pattern set */
- dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
+ status = dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
+
+ if (dp_check_dpcd_reqeust_status(link, status))
+ return LINK_TRAINING_ABORT;
if (encoding == DP_128b_132b_ENCODING) {
/* poll for intra-hop disable */
@@ -990,6 +1029,8 @@ static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding
fsleep(1000);
}
}
+
+ return LINK_TRAINING_SUCCESS;
}
enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
@@ -1013,17 +1054,18 @@ enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
return status;
}
-void dpcd_set_training_pattern(
+enum dc_status dpcd_set_training_pattern(
struct dc_link *link,
enum dc_dp_training_pattern training_pattern)
{
+ enum dc_status status;
union dpcd_training_pattern dpcd_pattern = {0};
dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
dp_training_pattern_to_dpcd_training_pattern(
link, training_pattern);
- core_link_write_dpcd(
+ status = core_link_write_dpcd(
link,
DP_TRAINING_PATTERN_SET,
&dpcd_pattern.raw,
@@ -1033,6 +1075,8 @@ void dpcd_set_training_pattern(
__func__,
DP_TRAINING_PATTERN_SET,
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+
+ return status;
}
enum dc_status dpcd_set_link_settings(
@@ -1185,6 +1229,13 @@ void dpcd_set_lt_pattern_and_lane_settings(
dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
= dpcd_pattern.raw;
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ dpia_set_tps_notification(
+ link,
+ lt_settings,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET,
+ offset);
+
if (is_repeater(lt_settings, offset)) {
DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
__func__,
@@ -1455,7 +1506,8 @@ static enum link_training_result dp_transition_to_video_idle(
*/
if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) {
msleep(5);
- status = dp_check_link_loss_status(link, lt_settings);
+ if (!link->skip_fallback_on_link_loss)
+ status = dp_check_link_loss_status(link, lt_settings);
}
return status;
}
@@ -1521,7 +1573,9 @@ enum link_training_result dp_perform_link_training(
ASSERT(0);
/* exit training mode */
- dpcd_exit_training_mode(link, encoding);
+ if ((dpcd_exit_training_mode(link, encoding) != LINK_TRAINING_SUCCESS || status == LINK_TRAINING_ABORT) &&
+ link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ dpia_training_abort(link, &lt_settings, 0);
/* switch to video idle */
if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)
@@ -1599,8 +1653,7 @@ bool perform_link_training_with_retries(
dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings);
return true;
} else {
- /** @todo Consolidate USB4 DP and DPx.x training. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ if (!link->dc->config.consolidated_dpia_dp_lt && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
status = dpia_perform_link_training(
link,
&pipe_ctx->link_res,
@@ -1629,8 +1682,17 @@ bool perform_link_training_with_retries(
dp_trace_lt_total_count_increment(link, false);
dp_trace_lt_result_update(link, status, false);
dp_trace_set_lt_end_timestamp(link, false);
- if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) {
+ // Update verified link settings to current one
+ // Because DPIA LT might fallback to lower link setting.
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
+ link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
+ dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link);
+ }
return true;
+ }
}
fail_count++;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
index 851bd17317a0..0b18aa35c33c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
@@ -55,7 +55,7 @@ void dp_set_hw_test_pattern(
uint8_t *custom_pattern,
uint32_t custom_pattern_size);
-void dpcd_set_training_pattern(
+enum dc_status dpcd_set_training_pattern(
struct dc_link *link,
enum dc_dp_training_pattern training_pattern);
@@ -182,4 +182,18 @@ uint32_t dp_translate_training_aux_read_interval(
uint8_t dp_get_nibble_at_index(const uint8_t *buf,
uint32_t index);
+
+bool dp_check_interlane_aligned(union lane_align_status_updated align_status,
+ struct dc_link *link,
+ uint8_t retries);
+
+uint32_t dp_get_eq_aux_rd_interval(
+ const struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ uint32_t offset,
+ uint8_t retries);
+
+bool dp_check_dpcd_reqeust_status(const struct dc_link *link,
+ enum dc_status status);
+
#endif /* __DC_LINK_DP_TRAINING_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
index 2b4c15b0b407..3bdce32a85e3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
@@ -157,6 +157,7 @@ enum link_training_result perform_8b_10b_clock_recovery_sequence(
struct link_training_settings *lt_settings,
uint32_t offset)
{
+ enum dc_status status;
uint32_t retries_cr;
uint32_t retry_count;
uint32_t wait_time_microsec;
@@ -216,7 +217,7 @@ enum link_training_result perform_8b_10b_clock_recovery_sequence(
/* 4. Read lane status and requested drive
* settings as set by the sink
*/
- dp_get_lane_status_and_lane_adjust(
+ status = dp_get_lane_status_and_lane_adjust(
link,
lt_settings,
dpcd_lane_status,
@@ -224,6 +225,9 @@ enum link_training_result perform_8b_10b_clock_recovery_sequence(
dpcd_lane_adjust,
offset);
+ if (dp_check_dpcd_reqeust_status(link, status))
+ return LINK_TRAINING_ABORT;
+
/* 5. check CR done*/
if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
DC_LOG_HW_LINK_TRAINING("%s: Clock recovery OK\n", __func__);
@@ -273,6 +277,7 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence(
struct link_training_settings *lt_settings,
uint32_t offset)
{
+ enum dc_status status;
enum dc_dp_training_pattern tr_pattern;
uint32_t retries_ch_eq;
uint32_t wait_time_microsec;
@@ -308,12 +313,7 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence(
dpcd_set_lane_settings(link, lt_settings, offset);
/* 3. wait for receiver to lock-on*/
- wait_time_microsec = lt_settings->eq_pattern_time;
-
- if (is_repeater(lt_settings, offset))
- wait_time_microsec =
- dp_translate_training_aux_read_interval(
- link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
+ wait_time_microsec = dp_get_eq_aux_rd_interval(link, lt_settings, offset, retries_ch_eq);
dp_wait_for_training_aux_rd_interval(
link,
@@ -322,7 +322,7 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence(
/* 4. Read lane status and requested
* drive settings as set by the sink*/
- dp_get_lane_status_and_lane_adjust(
+ status = dp_get_lane_status_and_lane_adjust(
link,
lt_settings,
dpcd_lane_status,
@@ -330,6 +330,9 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence(
dpcd_lane_adjust,
offset);
+ if (dp_check_dpcd_reqeust_status(link, status))
+ return LINK_TRAINING_ABORT;
+
/* 5. check CR done*/
if (!dp_is_cr_done(lane_count, dpcd_lane_status))
return dpcd_lane_status[0].bits.CR_DONE_0 ?
@@ -339,7 +342,7 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence(
/* 6. check CHEQ done*/
if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
- dp_is_interlane_aligned(dpcd_lane_status_updated))
+ dp_check_interlane_aligned(dpcd_lane_status_updated, link, retries_ch_eq))
return LINK_TRAINING_SUCCESS;
/* 7. update VS/PE/PC2 in lt_settings*/
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index cd1975c03f38..39e4b7dc9588 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -43,9 +43,6 @@
#define DC_LOGGER \
link->ctx->logger
-/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */
-#define DPIA_CLK_SYNC_DELAY 16000
-
/* Extend interval between training status checks for manual testing. */
#define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000
@@ -566,28 +563,6 @@ static enum link_training_result dpia_training_cr_phase(
return result;
}
-/* Return status read interval during equalization phase. */
-static uint32_t dpia_get_eq_aux_rd_interval(
- const struct dc_link *link,
- const struct link_training_settings *lt_settings,
- uint32_t hop)
-{
- uint32_t wait_time_microsec;
-
- if (hop == DPRX)
- wait_time_microsec = lt_settings->eq_pattern_time;
- else
- wait_time_microsec =
- dp_translate_training_aux_read_interval(
- link->dpcd_caps.lttpr_caps.aux_rd_interval[hop - 1]);
-
- /* Check debug option for extending aux read interval. */
- if (link->dc->debug.dpia_debug.bits.extend_aux_rd_interval)
- wait_time_microsec = DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US;
-
- return wait_time_microsec;
-}
-
/* Execute equalization phase of link training for specified hop in display
* path in non-transparent mode:
* - driver issues both DPCD and SET_CONFIG transactions.
@@ -936,6 +911,22 @@ static enum link_training_result dpia_training_end(
return result;
}
+/* Return status read interval during equalization phase. */
+uint32_t dpia_get_eq_aux_rd_interval(
+ const struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ uint32_t hop)
+{
+ /* Check debug option for extending aux read interval. */
+ if (link->dc->debug.dpia_debug.bits.extend_aux_rd_interval)
+ return DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US;
+ else if (hop == DPRX)
+ return lt_settings->eq_pattern_time;
+ else
+ return dp_translate_training_aux_read_interval(
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[hop - 1]);
+}
+
/* When aborting training of specified hop in display path, clean up by:
* - Attempting to clear DPCD TRAINING_PATTERN_SET, LINK_BW_SET and LANE_COUNT_SET.
* - Sending SET_CONFIG(SET_LINK) with lane count and link rate set to 0.
@@ -943,7 +934,7 @@ static enum link_training_result dpia_training_end(
* @param link DPIA link being trained.
* @param hop Hop in display path. DPRX = 0.
*/
-static void dpia_training_abort(
+void dpia_training_abort(
struct dc_link *link,
struct link_training_settings *lt_settings,
uint32_t hop)
@@ -968,7 +959,26 @@ static void dpia_training_abort(
core_link_write_dpcd(link, dpcd_tps_offset, &data, 1);
core_link_write_dpcd(link, DP_LINK_BW_SET, &data, 1);
core_link_write_dpcd(link, DP_LANE_COUNT_SET, &data, 1);
- core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data);
+
+ if (!link->dc->config.consolidated_dpia_dp_lt)
+ core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data);
+}
+
+void dpia_set_tps_notification(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ uint8_t pattern,
+ uint32_t hop)
+{
+ uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
+
+ if (lt_settings->lttpr_mode != LTTPR_MODE_NON_TRANSPARENT || pattern == DPCD_TRAINING_PATTERN_VIDEOIDLE)
+ return;
+
+ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ if (hop != repeater_cnt)
+ dc_process_dmub_dpia_set_tps_notification(link->ctx->dc, link->link_index, pattern);
}
enum link_training_result dpia_perform_link_training(
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h
index b39fb9faf1c2..9f4eceb494c2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h
@@ -28,6 +28,9 @@
#define __DC_LINK_DP_TRAINING_DPIA_H__
#include "link_dp_training.h"
+/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */
+#define DPIA_CLK_SYNC_DELAY 16000
+
/* Train DP tunneling link for USB4 DPIA display endpoint.
* DPIA equivalent of dc_link_dp_perfrorm_link_training.
* Aborts link training upon detection of sink unplug.
@@ -38,4 +41,20 @@ enum link_training_result dpia_perform_link_training(
const struct dc_link_settings *link_setting,
bool skip_video_pattern);
+void dpia_training_abort(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t hop);
+
+uint32_t dpia_get_eq_aux_rd_interval(
+ const struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ uint32_t hop);
+
+void dpia_set_tps_notification(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ uint8_t pattern,
+ uint32_t offset);
+
#endif /* __DC_LINK_DP_TRAINING_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 46ad684fe192..893a9d9ee870 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -2155,6 +2155,7 @@ static bool dcn35_resource_construct(
dc->dml2_options.max_segments_per_hubp = 24;
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/
+ dc->dml2_options.override_det_buffer_size_kbytes = true;
if (dc->config.sdpif_request_limit_words_per_umc == 0)
dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 4c5e722baa3a..da9101b83e8c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -736,7 +736,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.hdmichar = true,
.dpstream = true,
.symclk32_se = true,
- .symclk32_le = true,
+ .symclk32_le = false,
.symclk_fe = true,
.physymclk = false,
.dpiasymclk = true,
@@ -2133,6 +2133,7 @@ static bool dcn351_resource_construct(
dc->dml2_options.max_segments_per_hubp = 24;
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/
+ dc->dml2_options.override_det_buffer_size_kbytes = true;
if (dc->config.sdpif_request_limit_words_per_umc == 0)
dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
index 15f7eda903e6..014e8a296f0c 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
@@ -813,6 +813,14 @@ static bool enable_easf(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
return skip_easf;
}
+/* Check if video is in fullscreen mode */
+static bool spl_is_video_fullscreen(struct spl_in *spl_in)
+{
+ if (spl_is_yuv420(spl_in->basic_in.format) && spl_in->is_fullscreen)
+ return true;
+ return false;
+}
+
static bool spl_get_isharp_en(struct spl_in *spl_in,
struct spl_scratch *spl_scratch)
{
@@ -820,6 +828,7 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
int vratio = 0;
int hratio = 0;
struct spl_taps taps = spl_scratch->scl_data.taps;
+ bool fullscreen = spl_is_video_fullscreen(spl_in);
/* Return if adaptive sharpness is disabled */
if (spl_in->adaptive_sharpness.enable == false)
@@ -835,9 +844,18 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
// Scaling is up to 1:1 (no scaling) or upscaling
/*
- * Apply sharpness to all RGB surfaces and to
- * NV12/P010 surfaces
+ * Apply sharpness to RGB and YUV (NV12/P010)
+ * surfaces based on policy setting
*/
+ if (!spl_is_yuv420(spl_in->basic_in.format) &&
+ (spl_in->debug.sharpen_policy == SHARPEN_YUV))
+ return enable_isharp;
+ else if ((spl_is_yuv420(spl_in->basic_in.format) && !fullscreen) &&
+ (spl_in->debug.sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV))
+ return enable_isharp;
+ else if (!spl_in->is_fullscreen &&
+ spl_in->debug.sharpen_policy == SHARPEN_FULLSCREEN_ALL)
+ return enable_isharp;
/*
* Apply sharpness if supports horizontal taps 4,6 AND
@@ -1155,14 +1173,19 @@ static void spl_set_dscl_prog_data(struct spl_in *spl_in, struct spl_scratch *sp
}
/* Calculate C0-C3 coefficients based on HDR_mult */
-static void spl_calculate_c0_c3_hdr(struct dscl_prog_data *dscl_prog_data, uint32_t hdr_multx100)
+static void spl_calculate_c0_c3_hdr(struct dscl_prog_data *dscl_prog_data, uint32_t sdr_white_level_nits)
{
struct spl_fixed31_32 hdr_mult, c0_mult, c1_mult, c2_mult;
struct spl_fixed31_32 c0_calc, c1_calc, c2_calc;
struct spl_custom_float_format fmt;
+ uint32_t hdr_multx100_int;
- SPL_ASSERT(hdr_multx100);
- hdr_mult = spl_fixpt_from_fraction((long long)hdr_multx100, 100LL);
+ if ((sdr_white_level_nits >= 80) && (sdr_white_level_nits <= 480))
+ hdr_multx100_int = sdr_white_level_nits * 100 / 80;
+ else
+ hdr_multx100_int = 100; /* default for 80 nits otherwise */
+
+ hdr_mult = spl_fixpt_from_fraction((long long)hdr_multx100_int, 100LL);
c0_mult = spl_fixpt_from_fraction(2126LL, 10000LL);
c1_mult = spl_fixpt_from_fraction(7152LL, 10000LL);
c2_mult = spl_fixpt_from_fraction(722LL, 10000LL);
@@ -1191,7 +1214,7 @@ static void spl_calculate_c0_c3_hdr(struct dscl_prog_data *dscl_prog_data, uint3
static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *spl_out, bool enable_easf_v,
bool enable_easf_h, enum linear_light_scaling lls_pref,
enum spl_pixel_format format, enum system_setup setup,
- uint32_t hdr_multx100)
+ uint32_t sdr_white_level_nits)
{
struct dscl_prog_data *dscl_prog_data = spl_out->dscl_prog_data;
if (enable_easf_v) {
@@ -1499,7 +1522,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
dscl_prog_data->easf_ltonl_en = 1; // Linear input
if ((setup == HDR_L) && (spl_is_rgb8(format))) {
/* Calculate C0-C3 coefficients based on HDR multiplier */
- spl_calculate_c0_c3_hdr(dscl_prog_data, hdr_multx100);
+ spl_calculate_c0_c3_hdr(dscl_prog_data, sdr_white_level_nits);
} else { // HDR_L ( DWM ) and SDR_L
dscl_prog_data->easf_matrix_c0 =
0x4EF7; // fp1.5.10, C0 coefficient (LN_rec709: 0.2126 * (2^14)/125 = 27.86590720)
@@ -1557,7 +1580,7 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
struct adaptive_sharpness adp_sharpness, bool enable_isharp,
enum linear_light_scaling lls_pref, enum spl_pixel_format format,
const struct spl_scaler_data *data, struct spl_fixed31_32 ratio,
- enum system_setup setup)
+ enum system_setup setup, enum scale_to_sharpness_policy scale_to_sharpness_policy)
{
/* Turn off sharpener if not required */
if (!enable_isharp) {
@@ -1565,6 +1588,11 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
return;
}
+ spl_build_isharp_1dlut_from_reference_curve(ratio, setup, adp_sharpness,
+ scale_to_sharpness_policy);
+ dscl_prog_data->isharp_delta = spl_get_pregen_filter_isharp_1D_lut(setup);
+ dscl_prog_data->sharpness_level = adp_sharpness.sharpness_level;
+
dscl_prog_data->isharp_en = 1; // ISHARP_EN
// Set ISHARP_NOISEDET_MODE if htaps = 6-tap
if (data->taps.h_taps == 6) {
@@ -1662,11 +1690,6 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format
}
-
- spl_build_isharp_1dlut_from_reference_curve(ratio, setup, adp_sharpness);
- dscl_prog_data->isharp_delta = spl_get_pregen_filter_isharp_1D_lut(setup);
- dscl_prog_data->sharpness_level = adp_sharpness.sharpness_level;
-
// Program the nldelta soft clip values
if (lls_pref == LLS_PREF_YES) {
dscl_prog_data->isharp_nldelta_sclip.enable_p = 0; /* ISHARP_NLDELTA_SCLIP_EN_P */
@@ -1750,7 +1773,7 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
// Set EASF
spl_set_easf_data(&spl_scratch, spl_out, enable_easf_v, enable_easf_h, spl_in->lls_pref,
- spl_in->basic_in.format, setup, spl_in->hdr_multx100);
+ spl_in->basic_in.format, setup, spl_in->sdr_white_level_nits);
// Set iSHARP
vratio = spl_fixpt_ceil(spl_scratch.scl_data.ratios.vert);
@@ -1761,7 +1784,8 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
isharp_scale_ratio = spl_scratch.scl_data.recip_ratios.horz;
spl_set_isharp_data(spl_out->dscl_prog_data, spl_in->adaptive_sharpness, enable_isharp,
- spl_in->lls_pref, spl_in->basic_in.format, data, isharp_scale_ratio, setup);
+ spl_in->lls_pref, spl_in->basic_in.format, data, isharp_scale_ratio, setup,
+ spl_in->debug.scale_to_sharpness_policy);
return res;
}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c
index 33712f50d303..e0572252c640 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c
@@ -500,6 +500,15 @@ struct isharp_1D_lut_pregen filter_isharp_1D_lut_pregen[NUM_SHARPNESS_SETUPS] =
},
};
+struct scale_ratio_to_sharpness_level_adj sharpness_level_adj[NUM_SHARPNESS_ADJ_LEVELS] = {
+ {1125, 1000, 0},
+ {11, 10, 1},
+ {1075, 1000, 2},
+ {105, 100, 3},
+ {1025, 1000, 4},
+ {1, 1, 5},
+};
+
const uint32_t *spl_get_filter_isharp_1D_lut_0(void)
{
return filter_isharp_1D_lut_0;
@@ -541,19 +550,72 @@ uint16_t *spl_get_filter_isharp_bs_3tap_64p(void)
return filter_isharp_bs_3tap_64p_s1_12;
}
-static unsigned int spl_calculate_sharpness_level(int discrete_sharpness_level, enum system_setup setup,
- struct spl_sharpness_range sharpness_range)
+static unsigned int spl_calculate_sharpness_level_adj(struct spl_fixed31_32 ratio)
+{
+ int j;
+ struct spl_fixed31_32 ratio_level;
+ struct scale_ratio_to_sharpness_level_adj *lookup_ptr;
+ unsigned int sharpness_level_down_adj;
+
+ /*
+ * Adjust sharpness level based on current scaling ratio
+ *
+ * We have 5 discrete scaling ratios which we will use to adjust the
+ * sharpness level down by 1 as we pass each ratio. The ratios
+ * are
+ *
+ * 1.125 upscale and higher - no adj
+ * 1.100 - under 1.125 - adj level down 1
+ * 1.075 - under 1.100 - adj level down 2
+ * 1.050 - under 1.075 - adj level down 3
+ * 1.025 - under 1.050 - adj level down 4
+ * 1.000 - under 1.025 - adj level down 5
+ *
+ */
+ j = 0;
+ sharpness_level_down_adj = 0;
+ lookup_ptr = sharpness_level_adj;
+ while (j < NUM_SHARPNESS_ADJ_LEVELS) {
+ ratio_level = spl_fixpt_from_fraction(lookup_ptr->ratio_numer,
+ lookup_ptr->ratio_denom);
+ if (ratio.value >= ratio_level.value) {
+ sharpness_level_down_adj = lookup_ptr->level_down_adj;
+ break;
+ }
+ lookup_ptr++;
+ j++;
+ }
+ return sharpness_level_down_adj;
+}
+
+static unsigned int spl_calculate_sharpness_level(struct spl_fixed31_32 ratio,
+ int discrete_sharpness_level, enum system_setup setup,
+ struct spl_sharpness_range sharpness_range,
+ enum scale_to_sharpness_policy scale_to_sharpness_policy)
{
unsigned int sharpness_level = 0;
+ unsigned int sharpness_level_down_adj = 0;
int min_sharpness, max_sharpness, mid_sharpness;
+ /*
+ * Adjust sharpness level if policy requires we adjust it based on
+ * scale ratio. Based on scale ratio, we may adjust the sharpness
+ * level down by a certain number of steps. We will not select
+ * a sharpness value of 0 so the lowest sharpness level will be
+ * 0 or 1 depending on what the min_sharpness is
+ *
+ * If the policy is no required, this code maybe removed at a later
+ * date
+ */
switch (setup) {
case HDR_L:
min_sharpness = sharpness_range.hdr_rgb_min;
max_sharpness = sharpness_range.hdr_rgb_max;
mid_sharpness = sharpness_range.hdr_rgb_mid;
+ if (scale_to_sharpness_policy == SCALE_TO_SHARPNESS_ADJ_ALL)
+ sharpness_level_down_adj = spl_calculate_sharpness_level_adj(ratio);
break;
case HDR_NL:
/* currently no use case, use Non-linear SDR values for now */
@@ -561,15 +623,26 @@ static unsigned int spl_calculate_sharpness_level(int discrete_sharpness_level,
min_sharpness = sharpness_range.sdr_yuv_min;
max_sharpness = sharpness_range.sdr_yuv_max;
mid_sharpness = sharpness_range.sdr_yuv_mid;
+ if (scale_to_sharpness_policy >= SCALE_TO_SHARPNESS_ADJ_YUV)
+ sharpness_level_down_adj = spl_calculate_sharpness_level_adj(ratio);
break;
case SDR_L:
default:
min_sharpness = sharpness_range.sdr_rgb_min;
max_sharpness = sharpness_range.sdr_rgb_max;
mid_sharpness = sharpness_range.sdr_rgb_mid;
+ if (scale_to_sharpness_policy == SCALE_TO_SHARPNESS_ADJ_ALL)
+ sharpness_level_down_adj = spl_calculate_sharpness_level_adj(ratio);
break;
}
+ if ((min_sharpness == 0) && (sharpness_level_down_adj >= discrete_sharpness_level))
+ discrete_sharpness_level = 1;
+ else if (sharpness_level_down_adj >= discrete_sharpness_level)
+ discrete_sharpness_level = 0;
+ else
+ discrete_sharpness_level -= sharpness_level_down_adj;
+
int lower_half_step_size = (mid_sharpness - min_sharpness) / 5;
int upper_half_step_size = (max_sharpness - mid_sharpness) / 5;
@@ -584,7 +657,7 @@ static unsigned int spl_calculate_sharpness_level(int discrete_sharpness_level,
}
void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup,
- struct adaptive_sharpness sharpness)
+ struct adaptive_sharpness sharpness, enum scale_to_sharpness_policy scale_to_sharpness_policy)
{
uint8_t *byte_ptr_1dlut_src, *byte_ptr_1dlut_dst;
struct spl_fixed31_32 sharp_base, sharp_calc, sharp_level;
@@ -594,8 +667,9 @@ void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, en
uint32_t filter_pregen_store[ISHARP_LUT_TABLE_SIZE];
/* Custom sharpnessX1000 value */
- unsigned int sharpnessX1000 = spl_calculate_sharpness_level(sharpness.sharpness_level,
- setup, sharpness.sharpness_range);
+ unsigned int sharpnessX1000 = spl_calculate_sharpness_level(ratio,
+ sharpness.sharpness_level, setup,
+ sharpness.sharpness_range, scale_to_sharpness_policy);
sharp_level = spl_fixpt_from_fraction(sharpnessX1000, 1000);
/*
@@ -606,7 +680,6 @@ void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, en
(filter_isharp_1D_lut_pregen[setup].sharpness_denom == 1000))
return;
-
/*
* Calculate LUT_128_gained with this equation:
*
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
index fe0b12571f2c..afcc66206ca2 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
@@ -20,11 +20,11 @@ uint16_t *spl_get_filter_isharp_bs_3tap_64p(void);
const uint16_t *spl_get_filter_isharp_wide_6tap_64p(void);
uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps);
-struct scale_ratio_to_sharpness_level_lookup {
+#define NUM_SHARPNESS_ADJ_LEVELS 6
+struct scale_ratio_to_sharpness_level_adj {
unsigned int ratio_numer;
unsigned int ratio_denom;
- unsigned int sharpness_numer;
- unsigned int sharpness_denom;
+ unsigned int level_down_adj; /* adjust sharpness level down */
};
struct isharp_1D_lut_pregen {
@@ -45,6 +45,7 @@ void spl_init_blur_scale_coeffs(void);
void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data,
const struct spl_scaler_data *data);
-void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup, struct adaptive_sharpness sharpness);
+void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup,
+ struct adaptive_sharpness sharpness, enum scale_to_sharpness_policy scale_to_sharpness_policy);
uint32_t *spl_get_pregen_filter_isharp_1D_lut(enum system_setup setup);
#endif /* __DC_SPL_ISHARP_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
index 85b19ebe2c57..2a74ff5fdfdb 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
@@ -487,6 +487,17 @@ enum linear_light_scaling { // convert it in translation logic
LLS_PREF_YES,
LLS_PREF_NO
};
+enum sharpen_policy {
+ SHARPEN_ALWAYS = 0,
+ SHARPEN_YUV = 1,
+ SHARPEN_RGB_FULLSCREEN_YUV = 2,
+ SHARPEN_FULLSCREEN_ALL = 3
+};
+enum scale_to_sharpness_policy {
+ NO_SCALE_TO_SHARPNESS_ADJ = 0,
+ SCALE_TO_SHARPNESS_ADJ_YUV = 1,
+ SCALE_TO_SHARPNESS_ADJ_ALL = 2
+};
struct spl_funcs {
void (*spl_calc_lb_num_partitions)
(bool alpha_en,
@@ -499,6 +510,8 @@ struct spl_funcs {
struct spl_debug {
int visual_confirm_base_offset;
int visual_confirm_dpp_offset;
+ enum sharpen_policy sharpen_policy;
+ enum scale_to_sharpness_policy scale_to_sharpness_policy;
};
struct spl_in {
@@ -518,7 +531,7 @@ struct spl_in {
bool is_hdr_on;
int h_active;
int v_active;
- int hdr_multx100;
+ int sdr_white_level_nits;
};
// end of SPL inputs
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index cd70453aeae0..fe5b6f7a3eb1 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -300,6 +300,7 @@ struct dmub_srv_hw_params {
enum dmub_ips_disable_type disable_ips;
bool disallow_phy_access;
bool disable_sldo_opt;
+ bool enable_non_transparent_setconfig;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index e20c220aa8b4..ebcf68bfae2b 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -682,7 +682,7 @@ union dmub_fw_boot_options {
uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/
uint32_t usb4_cm_version: 1; /**< 1 CM support */
uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
- uint32_t reserved0: 1;
+ uint32_t enable_non_transparent_setconfig: 1; /* 1 if dpia use conventional dp lt flow*/
uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/
uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */
uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/
@@ -1308,6 +1308,7 @@ enum dmub_cmd_dpia_type {
DMUB_CMD__DPIA_DIG1_DPIA_CONTROL = 0,
DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1,
DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2,
+ DMUB_CMD__DPIA_SET_TPS_NOTIFICATION = 3,
};
/* DMUB_OUT_CMD__DPIA_NOTIFICATION command types. */
@@ -2139,6 +2140,24 @@ struct dmub_rb_cmd_set_mst_alloc_slots {
};
/**
+ * Data passed from driver to FW in a DMUB_CMD__SET_TPS_NOTIFICATION command.
+ */
+struct dmub_cmd_tps_notification_data {
+ uint8_t instance; /* DPIA instance */
+ uint8_t tps; /* requested training pattern */
+ uint8_t reserved1;
+ uint8_t reserved2;
+};
+
+/**
+ * DMUB command structure for SET_TPS_NOTIFICATION command.
+ */
+struct dmub_rb_cmd_set_tps_notification {
+ struct dmub_cmd_header header; /* header */
+ struct dmub_cmd_tps_notification_data tps_notification; /* set tps_notification data */
+};
+
+/**
* DMUB command structure for DPIA HPD int enable control.
*/
struct dmub_rb_cmd_dpia_hpd_int_enable {
@@ -5305,6 +5324,10 @@ union dmub_rb_cmd {
*/
struct dmub_rb_cmd_set_mst_alloc_slots set_mst_alloc_slots;
/**
+ * Definition of a DMUB_CMD__DPIA_SET_TPS_NOTIFICATION command.
+ */
+ struct dmub_rb_cmd_set_tps_notification set_tps_notification;
+ /**
* Definition of a DMUB_CMD__EDID_CEA command.
*/
struct dmub_rb_cmd_edid_cea edid_cea;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 746696b6f09a..2ccad79053c5 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -425,6 +425,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.ips_disable = params->disable_ips;
boot_options.bits.ips_sequential_ono = params->ips_sequential_ono;
boot_options.bits.disable_sldo_opt = params->disable_sldo_opt;
+ boot_options.bits.enable_non_transparent_setconfig = params->enable_non_transparent_setconfig;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index a40e6590215a..bbd259cea4f4 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -134,7 +134,7 @@ unsigned int mod_freesync_calc_v_total_from_refresh(
v_total = div64_u64(div64_u64(((unsigned long long)(
frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
- stream->timing.h_total), 1000000);
+ stream->timing.h_total) + 500000, 1000000);
/* v_total cannot be less than nominal */
if (v_total < stream->timing.v_total) {
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 745fd052840d..3f91926a50e9 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -85,7 +85,7 @@ enum amd_apu_flags {
* @AMD_IP_BLOCK_TYPE_MES: Micro-Engine Scheduler
* @AMD_IP_BLOCK_TYPE_JPEG: JPEG Engine
* @AMD_IP_BLOCK_TYPE_VPE: Video Processing Engine
-* @AMD_IP_BLOCK_TYPE_UMSCH_MM: User Mode Schduler for Multimedia
+* @AMD_IP_BLOCK_TYPE_UMSCH_MM: User Mode Scheduler for Multimedia
* @AMD_IP_BLOCK_TYPE_ISP: Image Signal Processor
* @AMD_IP_BLOCK_TYPE_NUM: Total number of IP block types
*/
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 7744ca3ef4b1..e3e635a31b8a 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -71,6 +71,11 @@ enum kgd_memory_pool {
KGD_POOL_FRAMEBUFFER = 3,
};
+struct kfd_cu_occupancy {
+ u32 wave_cnt;
+ u32 doorbell_off;
+};
+
/**
* enum kfd_sched_policy
*
@@ -313,8 +318,9 @@ struct kfd2kgd_calls {
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data);
- void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid,
- int *wave_cnt, int *max_waves_per_cu, uint32_t inst);
+ void (*get_cu_occupancy)(struct amdgpu_device *adev,
+ struct kfd_cu_occupancy *cu_occupancy,
+ int *max_waves_per_cu, uint32_t inst);
void (*program_trap_handler_settings)(struct amdgpu_device *adev,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr,
uint32_t inst);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index 0b3c2f54a343..822c6425d90e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -123,7 +123,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0xC
+#define SMU_METRICS_TABLE_VERSION 0xD
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -227,6 +227,10 @@ typedef struct __attribute__((packed, aligned(4))) {
// PCIE LINK Speed and width
uint32_t PCIeLinkSpeed;
uint32_t PCIeLinkWidth;
+
+ // PER XCD ACTIVITY
+ uint32_t GfxBusy[8];
+ uint64_t GfxBusyAcc[8];
} MetricsTableX_t;
typedef struct __attribute__((packed, aligned(4))) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index a887ab945dfa..1d024b122b0c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2569,10 +2569,14 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
}
}
- return smu_cmn_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
workload_mask,
NULL);
+ if (!ret)
+ smu->workload_mask = workload_mask;
+
+ return ret;
}
static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 9974c9f8135e..55ed6247eb61 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -2107,8 +2107,12 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
}
mutex_lock(&adev->pm.mutex);
r = smu_v13_0_6_request_i2c_xfer(smu, req);
- if (r)
- goto fail;
+ if (r) {
+ /* Retry once, in case of an i2c collision */
+ r = smu_v13_0_6_request_i2c_xfer(smu, req);
+ if (r)
+ goto fail;
+ }
for (c = i = 0; i < num_msgs; i++) {
if (!(msg[i].flags & I2C_M_RD)) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 7bc95c404377..b891a5e0a396 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2501,8 +2501,11 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp
return -EINVAL;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type, NULL);
+
if (ret)
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
+ else
+ smu->workload_mask = (1 << workload_type);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 43820d7d2c54..5899d01fa73d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1861,10 +1861,14 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
return -EINVAL;
- return smu_cmn_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
1 << workload_type,
NULL);
+ if (!ret)
+ smu->workload_mask = 1 << workload_type;
+
+ return ret;
}
static int smu_v14_0_2_baco_enter(struct smu_context *smu)
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 07e493d14d0c..ad1dc638c83b 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -103,7 +103,6 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
* .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n
* .poll = drm_poll,
* .read = drm_read,
- * .llseek = no_llseek,
* .mmap = drm_gem_mmap,
* };
*
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 00fbe9f8c03a..b1c294236cc8 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -916,7 +916,7 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port,
* instead of a specific AUX_IO_<port> reference without powering up any
* extra wells.
*/
- if (intel_encoder_can_psr(&dig_port->base))
+ if (intel_psr_needs_aux_io_power(&dig_port->base, crtc_state))
return intel_display_power_aux_io_domain(i915, dig_port->aux_ch);
else if (DISPLAY_VER(i915) < 14 &&
(intel_crtc_has_dp_encoder(crtc_state) ||
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index a1fcedfd404b..90fa73575feb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -531,6 +531,10 @@ static void
intel_dp_set_source_rates(struct intel_dp *intel_dp)
{
/* The values must be in increasing order */
+ static const int bmg_rates[] = {
+ 162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000,
+ 810000, 1000000, 1350000,
+ };
static const int mtl_rates[] = {
162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000,
810000, 1000000, 2000000,
@@ -561,8 +565,13 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
intel_dp->source_rates || intel_dp->num_source_rates);
if (DISPLAY_VER(dev_priv) >= 14) {
- source_rates = mtl_rates;
- size = ARRAY_SIZE(mtl_rates);
+ if (IS_BATTLEMAGE(dev_priv)) {
+ source_rates = bmg_rates;
+ size = ARRAY_SIZE(bmg_rates);
+ } else {
+ source_rates = mtl_rates;
+ size = ARRAY_SIZE(mtl_rates);
+ }
max_rate = mtl_max_source_rate(intel_dp);
} else if (DISPLAY_VER(dev_priv) >= 11) {
source_rates = icl_rates;
@@ -4058,6 +4067,9 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector
drm_dp_is_branch(intel_dp->dpcd));
intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident);
+ intel_dp->colorimetry_support =
+ intel_dp_get_colorimetry_status(intel_dp);
+
/*
* Read the eDP display control registers.
*
@@ -4171,6 +4183,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident);
+ intel_dp->colorimetry_support =
+ intel_dp_get_colorimetry_status(intel_dp);
+
intel_dp_update_sink_caps(intel_dp);
}
@@ -6922,9 +6937,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
"HDCP init failed, skipping.\n");
}
- intel_dp->colorimetry_support =
- intel_dp_get_colorimetry_status(intel_dp);
-
intel_dp->frl.is_trained = false;
intel_dp->frl.trained_rate_gbps = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 1f83b3b67ea6..136a0d6ca970 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -203,6 +203,25 @@ bool intel_encoder_can_psr(struct intel_encoder *encoder)
return false;
}
+bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever
+ * the output is enabled. For non-eDP outputs the main link is always
+ * on, hence it doesn't require the HW initiated AUX wake-up signaling used
+ * for eDP.
+ *
+ * TODO:
+ * - Consider leaving AUX IO disabled for eDP / PR as well, in case
+ * the ALPM with main-link off mode is not enabled.
+ * - Leave AUX IO enabled for DP / PR, once support for ALPM with
+ * main-link off mode is added for it and this mode gets enabled.
+ */
+ return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ intel_encoder_can_psr(encoder);
+}
+
static bool psr_global_enabled(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -2784,13 +2803,6 @@ static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
EDP_PSR_STATUS_STATE_MASK, 50);
}
-static int _panel_replay_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
-{
- return intel_dp_is_edp(intel_dp) ?
- _psr2_ready_for_pipe_update_locked(intel_dp) :
- _psr1_ready_for_pipe_update_locked(intel_dp);
-}
-
/**
* intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
* @new_crtc_state: new CRTC state
@@ -2813,12 +2825,10 @@ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_stat
lockdep_assert_held(&intel_dp->psr.lock);
- if (!intel_dp->psr.enabled)
+ if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
continue;
- if (intel_dp->psr.panel_replay_enabled)
- ret = _panel_replay_ready_for_pipe_update_locked(intel_dp);
- else if (intel_dp->psr.sel_update_enabled)
+ if (intel_dp->psr.sel_update_enabled)
ret = _psr2_ready_for_pipe_update_locked(intel_dp);
else
ret = _psr1_ready_for_pipe_update_locked(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 4e09c10908e4..6eb5f15f674f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -25,6 +25,8 @@ struct intel_plane_state;
(intel_dp)->psr.source_panel_replay_support)
bool intel_encoder_can_psr(struct intel_encoder *encoder);
+bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_enable_sink(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 025a79fe5920..2406cda75b7b 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3751,7 +3751,6 @@ static int i915_perf_release(struct inode *inode, struct file *file)
static const struct file_operations fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.release = i915_perf_release,
.poll = i915_perf_poll,
.read = i915_perf_read,
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
index 3d3da79fec2a..d3c7889aaf26 100644
--- a/drivers/gpu/drm/msm/msm_perf.c
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -192,7 +192,6 @@ static const struct file_operations perf_debugfs_fops = {
.owner = THIS_MODULE,
.open = perf_open,
.read = perf_read,
- .llseek = no_llseek,
.release = perf_release,
};
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index ca44fd291c5b..39138e190cb9 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -227,7 +227,6 @@ static const struct file_operations rd_debugfs_fops = {
.owner = THIS_MODULE,
.open = rd_open,
.read = rd_read,
- .llseek = no_llseek,
.release = rd_release,
};
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
index a13e0b3a169e..ef777dbdf4ec 100644
--- a/drivers/gpu/drm/xe/xe_bb.c
+++ b/drivers/gpu/drm/xe/xe_bb.c
@@ -65,7 +65,8 @@ __xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr)
{
u32 size = drm_suballoc_size(bb->bo);
- bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+ if (bb->len == 0 || bb->cs[bb->len - 1] != MI_BATCH_BUFFER_END)
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
xe_gt_assert(q->gt, bb->len * 4 + bb_prefetch(q->gt) <= size);
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 06911e9a3bf5..f379df3a12bf 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -2320,6 +2320,20 @@ void xe_bo_put_commit(struct llist_head *deferred)
drm_gem_object_free(&bo->ttm.base.refcount);
}
+void xe_bo_put(struct xe_bo *bo)
+{
+ might_sleep();
+ if (bo) {
+#ifdef CONFIG_PROC_FS
+ if (bo->client)
+ might_lock(&bo->client->bos_lock);
+#endif
+ if (bo->ggtt_node && bo->ggtt_node->ggtt)
+ might_lock(&bo->ggtt_node->ggtt->lock);
+ drm_gem_object_put(&bo->ttm.base);
+ }
+}
+
/**
* xe_bo_dumb_create - Create a dumb bo as backing for a fb
* @file_priv: ...
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index dbfb3209615d..6e4be52306df 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -126,11 +126,7 @@ static inline struct xe_bo *xe_bo_get(struct xe_bo *bo)
return bo;
}
-static inline void xe_bo_put(struct xe_bo *bo)
-{
- if (bo)
- drm_gem_object_put(&bo->ttm.base);
-}
+void xe_bo_put(struct xe_bo *bo);
static inline void __xe_bo_unset_bulk_move(struct xe_bo *bo)
{
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 95a05c5bc897..c4add8b38bbd 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -168,15 +168,10 @@ static void bo_meminfo(struct xe_bo *bo,
struct drm_memory_stats stats[TTM_NUM_MEM_TYPES])
{
u64 sz = bo->size;
- u32 mem_type;
+ u32 mem_type = bo->ttm.resource->mem_type;
xe_bo_assert_held(bo);
- if (bo->placement.placement)
- mem_type = bo->placement.placement->mem_type;
- else
- mem_type = XE_PL_TT;
-
if (drm_gem_object_is_shared_for_memory_stats(&bo->ttm.base))
stats[mem_type].shared += sz;
else
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 730eec07795e..00af059a8971 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -212,6 +212,12 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
* TODO: Change to read lock? Using write lock for simplicity.
*/
down_write(&vm->lock);
+
+ if (xe_vm_is_closed(vm)) {
+ err = -ENOENT;
+ goto unlock_vm;
+ }
+
vma = lookup_vma(vm, pf->page_addr);
if (!vma) {
err = -EINVAL;
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index c3e6b51f7a09..42116b167c98 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -18,8 +18,10 @@
*/
#define MAKE_GUC_VER(maj, min, pat) (((maj) << 16) | ((min) << 8) | (pat))
#define MAKE_GUC_VER_STRUCT(ver) MAKE_GUC_VER((ver).major, (ver).minor, (ver).patch)
-#define GUC_SUBMIT_VER(guc) MAKE_VER_STRUCT((guc)->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY])
-#define GUC_FIRMWARE_VER(guc) MAKE_VER_STRUCT((guc)->fw.versions.found[XE_UC_FW_VER_RELEASE])
+#define GUC_SUBMIT_VER(guc) \
+ MAKE_GUC_VER_STRUCT((guc)->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY])
+#define GUC_FIRMWARE_VER(guc) \
+ MAKE_GUC_VER_STRUCT((guc)->fw.versions.found[XE_UC_FW_VER_RELEASE])
struct drm_printer;
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 0369cc016f6a..eae38a49ee8e 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -1263,7 +1263,6 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
static const struct file_operations xe_oa_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.release = xe_oa_release,
.poll = xe_oa_poll,
.read = xe_oa_read,
diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
index 5bcd59190353..80ba2fc78837 100644
--- a/drivers/gpu/drm/xe/xe_vram.c
+++ b/drivers/gpu/drm/xe/xe_vram.c
@@ -182,6 +182,7 @@ static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size)
offset = offset_hi << 32; /* HW view bits 39:32 */
offset |= offset_lo << 6; /* HW view bits 31:6 */
offset *= num_enabled; /* convert to SW view */
+ offset = round_up(offset, SZ_128K); /* SW must round up to nearest 128K */
/* We don't expect any holes */
xe_assert_msg(xe, offset == (xe_mmio_read64_2x32(gt, GSMBASE) - ccs_size),
diff --git a/drivers/greybus/Kconfig b/drivers/greybus/Kconfig
index ab81ceceb337..c3f056d28b01 100644
--- a/drivers/greybus/Kconfig
+++ b/drivers/greybus/Kconfig
@@ -21,6 +21,8 @@ config GREYBUS_BEAGLEPLAY
tristate "Greybus BeaglePlay driver"
depends on SERIAL_DEV_BUS
select CRC_CCITT
+ select FW_LOADER
+ select FW_UPLOAD
help
Select this option if you have a BeaglePlay where CC1352
co-processor acts as Greybus SVC.
diff --git a/drivers/greybus/gb-beagleplay.c b/drivers/greybus/gb-beagleplay.c
index 33f8fad70260..3a1ade84737c 100644
--- a/drivers/greybus/gb-beagleplay.c
+++ b/drivers/greybus/gb-beagleplay.c
@@ -6,21 +6,19 @@
* Copyright (c) 2023 BeagleBoard.org Foundation
*/
-#include <linux/gfp.h>
+#include <asm-generic/unaligned.h>
+#include <linux/crc32.h>
+#include <linux/gpio/consumer.h>
+#include <linux/firmware.h>
#include <linux/greybus.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/printk.h>
#include <linux/serdev.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/greybus/hd.h>
-#include <linux/init.h>
-#include <linux/device.h>
#include <linux/crc-ccitt.h>
#include <linux/circ_buf.h>
-#include <linux/types.h>
-#include <linux/workqueue.h>
+
+#define CC1352_FIRMWARE_SIZE (704 * 1024)
+#define CC1352_BOOTLOADER_TIMEOUT 2000
+#define CC1352_BOOTLOADER_ACK 0xcc
+#define CC1352_BOOTLOADER_NACK 0x33
#define RX_HDLC_PAYLOAD 256
#define CRC_LEN 2
@@ -57,6 +55,17 @@
* @rx_buffer_len: length of receive buffer filled.
* @rx_buffer: hdlc frame receive buffer
* @rx_in_esc: hdlc rx flag to indicate ESC frame
+ *
+ * @fwl: underlying firmware upload device
+ * @bootloader_backdoor_gpio: cc1352p7 boot gpio
+ * @rst_gpio: cc1352p7 reset gpio
+ * @flashing_mode: flag to indicate that flashing is currently in progress
+ * @fwl_ack_com: completion to signal an Ack/Nack
+ * @fwl_ack: Ack/Nack byte received
+ * @fwl_cmd_response_com: completion to signal a bootloader command response
+ * @fwl_cmd_response: bootloader command response data
+ * @fwl_crc32: crc32 of firmware to flash
+ * @fwl_reset_addr: flag to indicate if we need to send COMMAND_DOWNLOAD again
*/
struct gb_beagleplay {
struct serdev_device *sd;
@@ -72,6 +81,17 @@ struct gb_beagleplay {
u16 rx_buffer_len;
bool rx_in_esc;
u8 rx_buffer[MAX_RX_HDLC];
+
+ struct fw_upload *fwl;
+ struct gpio_desc *bootloader_backdoor_gpio;
+ struct gpio_desc *rst_gpio;
+ bool flashing_mode;
+ struct completion fwl_ack_com;
+ u8 fwl_ack;
+ struct completion fwl_cmd_response_com;
+ u32 fwl_cmd_response;
+ u32 fwl_crc32;
+ bool fwl_reset_addr;
};
/**
@@ -100,6 +120,87 @@ struct hdlc_greybus_frame {
u8 payload[];
} __packed;
+/**
+ * enum cc1352_bootloader_cmd: CC1352 Bootloader Commands
+ *
+ * @COMMAND_DOWNLOAD: Prepares flash programming
+ * @COMMAND_GET_STATUS: Returns the status of the last command that was issued
+ * @COMMAND_SEND_DATA: Transfers data and programs flash
+ * @COMMAND_RESET: Performs a system reset
+ * @COMMAND_CRC32: Calculates CRC32 over a specified memory area
+ * @COMMAND_BANK_ERASE: Performs an erase of all of the customer-accessible
+ * flash sectors not protected by FCFG1 and CCFG
+ * writeprotect bits.
+ *
+ * CC1352 Bootloader serial bus commands
+ */
+enum cc1352_bootloader_cmd {
+ COMMAND_DOWNLOAD = 0x21,
+ COMMAND_GET_STATUS = 0x23,
+ COMMAND_SEND_DATA = 0x24,
+ COMMAND_RESET = 0x25,
+ COMMAND_CRC32 = 0x27,
+ COMMAND_BANK_ERASE = 0x2c,
+};
+
+/**
+ * enum cc1352_bootloader_status: CC1352 Bootloader COMMAND_GET_STATUS response
+ *
+ * @COMMAND_RET_SUCCESS: Status for successful command
+ * @COMMAND_RET_UNKNOWN_CMD: Status for unknown command
+ * @COMMAND_RET_INVALID_CMD: Status for invalid command (in other words,
+ * incorrect packet size)
+ * @COMMAND_RET_INVALID_ADR: Status for invalid input address
+ * @COMMAND_RET_FLASH_FAIL: Status for failing flash erase or program operation
+ */
+enum cc1352_bootloader_status {
+ COMMAND_RET_SUCCESS = 0x40,
+ COMMAND_RET_UNKNOWN_CMD = 0x41,
+ COMMAND_RET_INVALID_CMD = 0x42,
+ COMMAND_RET_INVALID_ADR = 0x43,
+ COMMAND_RET_FLASH_FAIL = 0x44,
+};
+
+/**
+ * struct cc1352_bootloader_packet: CC1352 Bootloader Request Packet
+ *
+ * @len: length of packet + optional request data
+ * @checksum: 8-bit checksum excluding len
+ * @cmd: bootloader command
+ */
+struct cc1352_bootloader_packet {
+ u8 len;
+ u8 checksum;
+ u8 cmd;
+} __packed;
+
+#define CC1352_BOOTLOADER_PKT_MAX_SIZE \
+ (U8_MAX - sizeof(struct cc1352_bootloader_packet))
+
+/**
+ * struct cc1352_bootloader_download_cmd_data: CC1352 Bootloader COMMAND_DOWNLOAD request data
+ *
+ * @addr: address to start programming data into
+ * @size: size of data that will be sent
+ */
+struct cc1352_bootloader_download_cmd_data {
+ __be32 addr;
+ __be32 size;
+} __packed;
+
+/**
+ * struct cc1352_bootloader_crc32_cmd_data: CC1352 Bootloader COMMAND_CRC32 request data
+ *
+ * @addr: address where crc32 calculation starts
+ * @size: number of bytes comprised by crc32 calculation
+ * @read_repeat: number of read repeats for each data location
+ */
+struct cc1352_bootloader_crc32_cmd_data {
+ __be32 addr;
+ __be32 size;
+ __be32 read_repeat;
+} __packed;
+
static void hdlc_rx_greybus_frame(struct gb_beagleplay *bg, u8 *buf, u16 len)
{
struct hdlc_greybus_frame *gb_frame = (struct hdlc_greybus_frame *)buf;
@@ -331,11 +432,135 @@ static void hdlc_deinit(struct gb_beagleplay *bg)
flush_work(&bg->tx_work);
}
+/**
+ * csum8: Calculate 8-bit checksum on data
+ *
+ * @data: bytes to calculate 8-bit checksum of
+ * @size: number of bytes
+ * @base: starting value for checksum
+ */
+static u8 csum8(const u8 *data, size_t size, u8 base)
+{
+ size_t i;
+ u8 sum = base;
+
+ for (i = 0; i < size; ++i)
+ sum += data[i];
+
+ return sum;
+}
+
+static void cc1352_bootloader_send_ack(struct gb_beagleplay *bg)
+{
+ static const u8 ack[] = { 0x00, CC1352_BOOTLOADER_ACK };
+
+ serdev_device_write_buf(bg->sd, ack, sizeof(ack));
+}
+
+static void cc1352_bootloader_send_nack(struct gb_beagleplay *bg)
+{
+ static const u8 nack[] = { 0x00, CC1352_BOOTLOADER_NACK };
+
+ serdev_device_write_buf(bg->sd, nack, sizeof(nack));
+}
+
+/**
+ * cc1352_bootloader_pkt_rx: Process a CC1352 Bootloader Packet
+ *
+ * @bg: beagleplay greybus driver
+ * @data: packet buffer
+ * @count: packet buffer size
+ *
+ * @return: number of bytes processed
+ *
+ * Here are the steps to successfully receive a packet from cc1352 bootloader
+ * according to the docs:
+ * 1. Wait for nonzero data to be returned from the device. This is important
+ * as the device may send zero bytes between a sent and a received data
+ * packet. The first nonzero byte received is the size of the packet that is
+ * being received.
+ * 2. Read the next byte, which is the checksum for the packet.
+ * 3. Read the data bytes from the device. During the data phase, packet size
+ * minus 2 bytes is sent.
+ * 4. Calculate the checksum of the data bytes and verify it matches the
+ * checksum received in the packet.
+ * 5. Send an acknowledge byte or a not-acknowledge byte to the device to
+ * indicate the successful or unsuccessful reception of the packet.
+ */
+static int cc1352_bootloader_pkt_rx(struct gb_beagleplay *bg, const u8 *data,
+ size_t count)
+{
+ bool is_valid = false;
+
+ switch (data[0]) {
+ /* Skip 0x00 bytes. */
+ case 0x00:
+ return 1;
+ case CC1352_BOOTLOADER_ACK:
+ case CC1352_BOOTLOADER_NACK:
+ WRITE_ONCE(bg->fwl_ack, data[0]);
+ complete(&bg->fwl_ack_com);
+ return 1;
+ case 3:
+ if (count < 3)
+ return 0;
+ is_valid = data[1] == data[2];
+ WRITE_ONCE(bg->fwl_cmd_response, (u32)data[2]);
+ break;
+ case 6:
+ if (count < 6)
+ return 0;
+ is_valid = csum8(&data[2], sizeof(__be32), 0) == data[1];
+ WRITE_ONCE(bg->fwl_cmd_response, get_unaligned_be32(&data[2]));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (is_valid) {
+ cc1352_bootloader_send_ack(bg);
+ complete(&bg->fwl_cmd_response_com);
+ } else {
+ dev_warn(&bg->sd->dev,
+ "Dropping bootloader packet with invalid checksum");
+ cc1352_bootloader_send_nack(bg);
+ }
+
+ return data[0];
+}
+
+static size_t cc1352_bootloader_rx(struct gb_beagleplay *bg, const u8 *data,
+ size_t count)
+{
+ int ret;
+ size_t off = 0;
+
+ memcpy(bg->rx_buffer + bg->rx_buffer_len, data, count);
+ bg->rx_buffer_len += count;
+
+ do {
+ ret = cc1352_bootloader_pkt_rx(bg, bg->rx_buffer + off,
+ bg->rx_buffer_len - off);
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev, ret,
+ "Invalid Packet");
+ off += ret;
+ } while (ret > 0 && off < count);
+
+ bg->rx_buffer_len -= off;
+ memmove(bg->rx_buffer, bg->rx_buffer + off, bg->rx_buffer_len);
+
+ return count;
+}
+
static size_t gb_tty_receive(struct serdev_device *sd, const u8 *data,
size_t count)
{
struct gb_beagleplay *bg = serdev_device_get_drvdata(sd);
+ if (READ_ONCE(bg->flashing_mode))
+ return cc1352_bootloader_rx(bg, data, count);
+
return hdlc_rx(bg, data, count);
}
@@ -343,7 +568,8 @@ static void gb_tty_wakeup(struct serdev_device *serdev)
{
struct gb_beagleplay *bg = serdev_device_get_drvdata(serdev);
- schedule_work(&bg->tx_work);
+ if (!READ_ONCE(bg->flashing_mode))
+ schedule_work(&bg->tx_work);
}
static struct serdev_device_ops gb_beagleplay_ops = {
@@ -412,6 +638,195 @@ static void gb_beagleplay_stop_svc(struct gb_beagleplay *bg)
hdlc_tx_frames(bg, ADDRESS_CONTROL, 0x03, &payload, 1);
}
+static int cc1352_bootloader_wait_for_ack(struct gb_beagleplay *bg)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(
+ &bg->fwl_ack_com, msecs_to_jiffies(CC1352_BOOTLOADER_TIMEOUT));
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev, ret,
+ "Failed to acquire ack semaphore");
+
+ switch (READ_ONCE(bg->fwl_ack)) {
+ case CC1352_BOOTLOADER_ACK:
+ return 0;
+ case CC1352_BOOTLOADER_NACK:
+ return -EAGAIN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int cc1352_bootloader_sync(struct gb_beagleplay *bg)
+{
+ static const u8 sync_bytes[] = { 0x55, 0x55 };
+
+ serdev_device_write_buf(bg->sd, sync_bytes, sizeof(sync_bytes));
+ return cc1352_bootloader_wait_for_ack(bg);
+}
+
+static int cc1352_bootloader_get_status(struct gb_beagleplay *bg)
+{
+ int ret;
+ static const struct cc1352_bootloader_packet pkt = {
+ .len = sizeof(pkt),
+ .checksum = COMMAND_GET_STATUS,
+ .cmd = COMMAND_GET_STATUS
+ };
+
+ serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt));
+ ret = cc1352_bootloader_wait_for_ack(bg);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(
+ &bg->fwl_cmd_response_com,
+ msecs_to_jiffies(CC1352_BOOTLOADER_TIMEOUT));
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev, ret,
+ "Failed to acquire last status semaphore");
+
+ switch (READ_ONCE(bg->fwl_cmd_response)) {
+ case COMMAND_RET_SUCCESS:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cc1352_bootloader_erase(struct gb_beagleplay *bg)
+{
+ int ret;
+ static const struct cc1352_bootloader_packet pkt = {
+ .len = sizeof(pkt),
+ .checksum = COMMAND_BANK_ERASE,
+ .cmd = COMMAND_BANK_ERASE
+ };
+
+ serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt));
+
+ ret = cc1352_bootloader_wait_for_ack(bg);
+ if (ret < 0)
+ return ret;
+
+ return cc1352_bootloader_get_status(bg);
+}
+
+static int cc1352_bootloader_reset(struct gb_beagleplay *bg)
+{
+ static const struct cc1352_bootloader_packet pkt = {
+ .len = sizeof(pkt),
+ .checksum = COMMAND_RESET,
+ .cmd = COMMAND_RESET
+ };
+
+ serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt));
+
+ return cc1352_bootloader_wait_for_ack(bg);
+}
+
+/**
+ * cc1352_bootloader_empty_pkt: Calculate the number of empty bytes in the current packet
+ *
+ * @data: packet bytes array to check
+ * @size: number of bytes in array
+ */
+static size_t cc1352_bootloader_empty_pkt(const u8 *data, size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size && data[i] == 0xff; ++i)
+ continue;
+
+ return i;
+}
+
+static int cc1352_bootloader_crc32(struct gb_beagleplay *bg, u32 *crc32)
+{
+ int ret;
+ static const struct cc1352_bootloader_crc32_cmd_data cmd_data = {
+ .addr = 0, .size = cpu_to_be32(704 * 1024), .read_repeat = 0
+ };
+ const struct cc1352_bootloader_packet pkt = {
+ .len = sizeof(pkt) + sizeof(cmd_data),
+ .checksum = csum8((const void *)&cmd_data, sizeof(cmd_data),
+ COMMAND_CRC32),
+ .cmd = COMMAND_CRC32
+ };
+
+ serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt));
+ serdev_device_write_buf(bg->sd, (const u8 *)&cmd_data,
+ sizeof(cmd_data));
+
+ ret = cc1352_bootloader_wait_for_ack(bg);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(
+ &bg->fwl_cmd_response_com,
+ msecs_to_jiffies(CC1352_BOOTLOADER_TIMEOUT));
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev, ret,
+ "Failed to acquire last status semaphore");
+
+ *crc32 = READ_ONCE(bg->fwl_cmd_response);
+
+ return 0;
+}
+
+static int cc1352_bootloader_download(struct gb_beagleplay *bg, u32 size,
+ u32 addr)
+{
+ int ret;
+ const struct cc1352_bootloader_download_cmd_data cmd_data = {
+ .addr = cpu_to_be32(addr),
+ .size = cpu_to_be32(size),
+ };
+ const struct cc1352_bootloader_packet pkt = {
+ .len = sizeof(pkt) + sizeof(cmd_data),
+ .checksum = csum8((const void *)&cmd_data, sizeof(cmd_data),
+ COMMAND_DOWNLOAD),
+ .cmd = COMMAND_DOWNLOAD
+ };
+
+ serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt));
+ serdev_device_write_buf(bg->sd, (const u8 *)&cmd_data,
+ sizeof(cmd_data));
+
+ ret = cc1352_bootloader_wait_for_ack(bg);
+ if (ret < 0)
+ return ret;
+
+ return cc1352_bootloader_get_status(bg);
+}
+
+static int cc1352_bootloader_send_data(struct gb_beagleplay *bg, const u8 *data,
+ size_t size)
+{
+ int ret, rem = min(size, CC1352_BOOTLOADER_PKT_MAX_SIZE);
+ const struct cc1352_bootloader_packet pkt = {
+ .len = sizeof(pkt) + rem,
+ .checksum = csum8(data, rem, COMMAND_SEND_DATA),
+ .cmd = COMMAND_SEND_DATA
+ };
+
+ serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt));
+ serdev_device_write_buf(bg->sd, data, rem);
+
+ ret = cc1352_bootloader_wait_for_ack(bg);
+ if (ret < 0)
+ return ret;
+
+ ret = cc1352_bootloader_get_status(bg);
+ if (ret < 0)
+ return ret;
+
+ return rem;
+}
+
static void gb_greybus_deinit(struct gb_beagleplay *bg)
{
gb_hd_del(bg->gb_hd);
@@ -442,6 +857,157 @@ free_gb_hd:
return ret;
}
+static enum fw_upload_err cc1352_prepare(struct fw_upload *fw_upload,
+ const u8 *data, u32 size)
+{
+ int ret;
+ u32 curr_crc32;
+ struct gb_beagleplay *bg = fw_upload->dd_handle;
+
+ dev_info(&bg->sd->dev, "CC1352 Start Flashing...");
+
+ if (size != CC1352_FIRMWARE_SIZE)
+ return FW_UPLOAD_ERR_INVALID_SIZE;
+
+ /* Might involve network calls */
+ gb_greybus_deinit(bg);
+ msleep(5 * MSEC_PER_SEC);
+
+ gb_beagleplay_stop_svc(bg);
+ msleep(200);
+ flush_work(&bg->tx_work);
+
+ serdev_device_wait_until_sent(bg->sd, CC1352_BOOTLOADER_TIMEOUT);
+
+ WRITE_ONCE(bg->flashing_mode, true);
+
+ gpiod_direction_output(bg->bootloader_backdoor_gpio, 0);
+ gpiod_direction_output(bg->rst_gpio, 0);
+ msleep(200);
+
+ gpiod_set_value(bg->rst_gpio, 1);
+ msleep(200);
+
+ gpiod_set_value(bg->bootloader_backdoor_gpio, 1);
+ msleep(200);
+
+ gpiod_direction_input(bg->bootloader_backdoor_gpio);
+ gpiod_direction_input(bg->rst_gpio);
+
+ ret = cc1352_bootloader_sync(bg);
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR,
+ "Failed to sync");
+
+ ret = cc1352_bootloader_crc32(bg, &curr_crc32);
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR,
+ "Failed to fetch crc32");
+
+ bg->fwl_crc32 = crc32(0xffffffff, data, size) ^ 0xffffffff;
+
+ /* Check if attempting to reflash same firmware */
+ if (bg->fwl_crc32 == curr_crc32) {
+ dev_warn(&bg->sd->dev, "Skipping reflashing same image");
+ cc1352_bootloader_reset(bg);
+ WRITE_ONCE(bg->flashing_mode, false);
+ msleep(200);
+ gb_greybus_init(bg);
+ gb_beagleplay_start_svc(bg);
+ return FW_UPLOAD_ERR_FW_INVALID;
+ }
+
+ ret = cc1352_bootloader_erase(bg);
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR,
+ "Failed to erase");
+
+ bg->fwl_reset_addr = true;
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static void cc1352_cleanup(struct fw_upload *fw_upload)
+{
+ struct gb_beagleplay *bg = fw_upload->dd_handle;
+
+ WRITE_ONCE(bg->flashing_mode, false);
+}
+
+static enum fw_upload_err cc1352_write(struct fw_upload *fw_upload,
+ const u8 *data, u32 offset, u32 size,
+ u32 *written)
+{
+ int ret;
+ size_t empty_bytes;
+ struct gb_beagleplay *bg = fw_upload->dd_handle;
+
+ /* Skip 0xff packets. Significant performance improvement */
+ empty_bytes = cc1352_bootloader_empty_pkt(data + offset, size);
+ if (empty_bytes >= CC1352_BOOTLOADER_PKT_MAX_SIZE) {
+ bg->fwl_reset_addr = true;
+ *written = empty_bytes;
+ return FW_UPLOAD_ERR_NONE;
+ }
+
+ if (bg->fwl_reset_addr) {
+ ret = cc1352_bootloader_download(bg, size, offset);
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev,
+ FW_UPLOAD_ERR_HW_ERROR,
+ "Failed to send download cmd");
+
+ bg->fwl_reset_addr = false;
+ }
+
+ ret = cc1352_bootloader_send_data(bg, data + offset, size);
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR,
+ "Failed to flash firmware");
+ *written = ret;
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err cc1352_poll_complete(struct fw_upload *fw_upload)
+{
+ u32 curr_crc32;
+ struct gb_beagleplay *bg = fw_upload->dd_handle;
+
+ if (cc1352_bootloader_crc32(bg, &curr_crc32) < 0)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR,
+ "Failed to fetch crc32");
+
+ if (bg->fwl_crc32 != curr_crc32)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_FW_INVALID,
+ "Invalid CRC32");
+
+ if (cc1352_bootloader_reset(bg) < 0)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR,
+ "Failed to reset");
+
+ dev_info(&bg->sd->dev, "CC1352 Flashing Successful");
+ WRITE_ONCE(bg->flashing_mode, false);
+ msleep(200);
+
+ if (gb_greybus_init(bg) < 0)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_RW_ERROR,
+ "Failed to initialize greybus");
+
+ gb_beagleplay_start_svc(bg);
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static void cc1352_cancel(struct fw_upload *fw_upload)
+{
+ struct gb_beagleplay *bg = fw_upload->dd_handle;
+
+ dev_info(&bg->sd->dev, "CC1352 Bootloader Cancel");
+
+ cc1352_bootloader_reset(bg);
+}
+
static void gb_serdev_deinit(struct gb_beagleplay *bg)
{
serdev_device_close(bg->sd);
@@ -463,6 +1029,65 @@ static int gb_serdev_init(struct gb_beagleplay *bg)
return 0;
}
+static const struct fw_upload_ops cc1352_bootloader_ops = {
+ .prepare = cc1352_prepare,
+ .write = cc1352_write,
+ .poll_complete = cc1352_poll_complete,
+ .cancel = cc1352_cancel,
+ .cleanup = cc1352_cleanup
+};
+
+static int gb_fw_init(struct gb_beagleplay *bg)
+{
+ int ret;
+ struct fw_upload *fwl;
+ struct gpio_desc *desc;
+
+ bg->fwl = NULL;
+ bg->bootloader_backdoor_gpio = NULL;
+ bg->rst_gpio = NULL;
+ bg->flashing_mode = false;
+ bg->fwl_cmd_response = 0;
+ bg->fwl_ack = 0;
+ init_completion(&bg->fwl_ack_com);
+ init_completion(&bg->fwl_cmd_response_com);
+
+ desc = devm_gpiod_get(&bg->sd->dev, "bootloader-backdoor", GPIOD_IN);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+ bg->bootloader_backdoor_gpio = desc;
+
+ desc = devm_gpiod_get(&bg->sd->dev, "reset", GPIOD_IN);
+ if (IS_ERR(desc)) {
+ ret = PTR_ERR(desc);
+ goto free_boot;
+ }
+ bg->rst_gpio = desc;
+
+ fwl = firmware_upload_register(THIS_MODULE, &bg->sd->dev, "cc1352p7",
+ &cc1352_bootloader_ops, bg);
+ if (IS_ERR(fwl)) {
+ ret = PTR_ERR(fwl);
+ goto free_reset;
+ }
+ bg->fwl = fwl;
+
+ return 0;
+
+free_reset:
+ devm_gpiod_put(&bg->sd->dev, bg->rst_gpio);
+ bg->rst_gpio = NULL;
+free_boot:
+ devm_gpiod_put(&bg->sd->dev, bg->bootloader_backdoor_gpio);
+ bg->bootloader_backdoor_gpio = NULL;
+ return ret;
+}
+
+static void gb_fw_deinit(struct gb_beagleplay *bg)
+{
+ firmware_upload_unregister(bg->fwl);
+}
+
static int gb_beagleplay_probe(struct serdev_device *serdev)
{
int ret = 0;
@@ -481,14 +1106,20 @@ static int gb_beagleplay_probe(struct serdev_device *serdev)
if (ret)
goto free_serdev;
- ret = gb_greybus_init(bg);
+ ret = gb_fw_init(bg);
if (ret)
goto free_hdlc;
+ ret = gb_greybus_init(bg);
+ if (ret)
+ goto free_fw;
+
gb_beagleplay_start_svc(bg);
return 0;
+free_fw:
+ gb_fw_deinit(bg);
free_hdlc:
hdlc_deinit(bg);
free_serdev:
@@ -500,6 +1131,7 @@ static void gb_beagleplay_remove(struct serdev_device *serdev)
{
struct gb_beagleplay *bg = serdev_device_get_drvdata(serdev);
+ gb_fw_deinit(bg);
gb_greybus_deinit(bg);
gb_beagleplay_stop_svc(bg);
hdlc_deinit(bg);
diff --git a/drivers/hid/hid-goodix-spi.c b/drivers/hid/hid-goodix-spi.c
index de655f745d3f..0e59663814dd 100644
--- a/drivers/hid/hid-goodix-spi.c
+++ b/drivers/hid/hid-goodix-spi.c
@@ -786,14 +786,6 @@ static const struct acpi_device_id goodix_spi_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, goodix_spi_acpi_match);
#endif
-#ifdef CONFIG_OF
-static const struct of_device_id goodix_spi_of_match[] = {
- { .compatible = "goodix,gt7986u", },
- { }
-};
-MODULE_DEVICE_TABLE(of, goodix_spi_of_match);
-#endif
-
static const struct spi_device_id goodix_spi_ids[] = {
{ "gt7986u" },
{ },
@@ -804,7 +796,6 @@ static struct spi_driver goodix_spi_driver = {
.driver = {
.name = "goodix-spi-hid",
.acpi_match_table = ACPI_PTR(goodix_spi_acpi_match),
- .of_match_table = of_match_ptr(goodix_spi_of_match),
.pm = pm_sleep_ptr(&goodix_spi_pm_ops),
},
.probe = goodix_spi_probe,
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index a54c7995b9be..21a70420151e 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -803,7 +803,6 @@ static const struct file_operations uhid_fops = {
.read = uhid_char_read,
.write = uhid_char_write,
.poll = uhid_char_poll,
- .llseek = no_llseek,
};
static struct miscdevice uhid_misc = {
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 3751c1e3eddd..1dc7e24fe4c5 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -783,7 +783,6 @@ static const struct file_operations atk_debugfs_ggrp_fops = {
.read = atk_debugfs_ggrp_read,
.open = atk_debugfs_ggrp_open,
.release = atk_debugfs_ggrp_release,
- .llseek = no_llseek,
};
static void atk_debugfs_init(struct atk_data *data)
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 1811f84d835e..a303959879ef 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -948,7 +948,6 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd,
static const struct file_operations watchdog_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.open = watchdog_open,
.release = watchdog_release,
.write = watchdog_write,
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 0acf6bd0227f..67728f60333f 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -1451,7 +1451,6 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd,
static const struct file_operations watchdog_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.open = watchdog_open,
.release = watchdog_close,
.write = watchdog_write,
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 9fc6f6b863e0..ea38ecf26fcb 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -487,23 +487,25 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
return csdev;
}
+u32 coresight_get_sink_id(struct coresight_device *csdev)
+{
+ if (!csdev->ea)
+ return 0;
+
+ /*
+ * See function etm_perf_add_symlink_sink() to know where
+ * this comes from.
+ */
+ return (u32) (unsigned long) csdev->ea->var;
+}
+
static int coresight_sink_by_id(struct device *dev, const void *data)
{
struct coresight_device *csdev = to_coresight_device(dev);
- unsigned long hash;
if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
- csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
-
- if (!csdev->ea)
- return 0;
- /*
- * See function etm_perf_add_symlink_sink() to know where
- * this comes from.
- */
- hash = (unsigned long)csdev->ea->var;
-
- if ((u32)hash == *(u32 *)data)
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+ if (coresight_get_sink_id(csdev) == *(u32 *)data)
return 1;
}
@@ -902,6 +904,7 @@ static void coresight_device_release(struct device *dev)
struct coresight_device *csdev = to_coresight_device(dev);
fwnode_handle_put(csdev->dev.fwnode);
+ free_percpu(csdev->perf_sink_id_map.cpu_map);
kfree(csdev);
}
@@ -1159,6 +1162,16 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev));
dev_set_name(&csdev->dev, "%s", desc->name);
+ if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+ spin_lock_init(&csdev->perf_sink_id_map.lock);
+ csdev->perf_sink_id_map.cpu_map = alloc_percpu(atomic_t);
+ if (!csdev->perf_sink_id_map.cpu_map) {
+ kfree(csdev);
+ ret = -ENOMEM;
+ goto err_out;
+ }
+ }
/*
* Make sure the device registration and the connection fixup
* are synchronised, so that we don't see uninitialised devices
diff --git a/drivers/hwtracing/coresight/coresight-cti-platform.c b/drivers/hwtracing/coresight/coresight-cti-platform.c
index ccef04f27f12..d0ae10bf6128 100644
--- a/drivers/hwtracing/coresight/coresight-cti-platform.c
+++ b/drivers/hwtracing/coresight/coresight-cti-platform.c
@@ -416,20 +416,16 @@ static int cti_plat_create_impdef_connections(struct device *dev,
struct cti_drvdata *drvdata)
{
int rc = 0;
- struct fwnode_handle *fwnode = dev_fwnode(dev);
- struct fwnode_handle *child = NULL;
- if (IS_ERR_OR_NULL(fwnode))
+ if (IS_ERR_OR_NULL(dev_fwnode(dev)))
return -EINVAL;
- fwnode_for_each_child_node(fwnode, child) {
+ device_for_each_child_node_scoped(dev, child) {
if (cti_plat_node_name_eq(child, CTI_DT_CONNS))
- rc = cti_plat_create_connection(dev, drvdata,
- child);
+ rc = cti_plat_create_connection(dev, drvdata, child);
if (rc != 0)
break;
}
- fwnode_handle_put(child);
return rc;
}
diff --git a/drivers/hwtracing/coresight/coresight-dummy.c b/drivers/hwtracing/coresight/coresight-dummy.c
index ac70c0b491be..bb85fa663ffc 100644
--- a/drivers/hwtracing/coresight/coresight-dummy.c
+++ b/drivers/hwtracing/coresight/coresight-dummy.c
@@ -21,8 +21,12 @@ DEFINE_CORESIGHT_DEVLIST(source_devs, "dummy_source");
DEFINE_CORESIGHT_DEVLIST(sink_devs, "dummy_sink");
static int dummy_source_enable(struct coresight_device *csdev,
- struct perf_event *event, enum cs_mode mode)
+ struct perf_event *event, enum cs_mode mode,
+ __maybe_unused struct coresight_trace_id_map *id_map)
{
+ if (!coresight_take_mode(csdev, mode))
+ return -EBUSY;
+
dev_dbg(csdev->dev.parent, "Dummy source enabled\n");
return 0;
@@ -31,6 +35,7 @@ static int dummy_source_enable(struct coresight_device *csdev,
static void dummy_source_disable(struct coresight_device *csdev,
struct perf_event *event)
{
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
dev_dbg(csdev->dev.parent, "Dummy source disabled\n");
}
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 7edd3f1d0d46..aea9ac9c4bd0 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -652,7 +652,6 @@ static const struct file_operations etb_fops = {
.open = etb_open,
.read = etb_read,
.release = etb_release,
- .llseek = no_llseek,
};
static struct attribute *coresight_etb_mgmt_attrs[] = {
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index c0c60e6a1703..ad6a8f4b70b6 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -229,15 +229,24 @@ static void free_event_data(struct work_struct *work)
struct list_head **ppath;
ppath = etm_event_cpu_path_ptr(event_data, cpu);
- if (!(IS_ERR_OR_NULL(*ppath)))
+ if (!(IS_ERR_OR_NULL(*ppath))) {
+ struct coresight_device *sink = coresight_get_sink(*ppath);
+
+ /*
+ * Mark perf event as done for trace id allocator, but don't call
+ * coresight_trace_id_put_cpu_id_map() on individual IDs. Perf sessions
+ * never free trace IDs to ensure that the ID associated with a CPU
+ * cannot change during their and other's concurrent sessions. Instead,
+ * a refcount is used so that the last event to call
+ * coresight_trace_id_perf_stop() frees all IDs.
+ */
+ coresight_trace_id_perf_stop(&sink->perf_sink_id_map);
+
coresight_release_path(*ppath);
+ }
*ppath = NULL;
- coresight_trace_id_put_cpu_id(cpu);
}
- /* mark perf event as done for trace id allocator */
- coresight_trace_id_perf_stop();
-
free_percpu(event_data->path);
kfree(event_data);
}
@@ -325,9 +334,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
sink = user_sink = coresight_get_sink_by_id(id);
}
- /* tell the trace ID allocator that a perf event is starting up */
- coresight_trace_id_perf_start();
-
/* check if user wants a coresight configuration selected */
cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32);
if (cfg_hash) {
@@ -401,13 +407,14 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
}
/* ensure we can allocate a trace ID for this CPU */
- trace_id = coresight_trace_id_get_cpu_id(cpu);
+ trace_id = coresight_trace_id_get_cpu_id_map(cpu, &sink->perf_sink_id_map);
if (!IS_VALID_CS_TRACE_ID(trace_id)) {
cpumask_clear_cpu(cpu, mask);
coresight_release_path(path);
continue;
}
+ coresight_trace_id_perf_start(&sink->perf_sink_id_map);
*etm_event_cpu_path_ptr(event_data, cpu) = path;
}
@@ -453,6 +460,7 @@ static void etm_event_start(struct perf_event *event, int flags)
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
struct list_head *path;
u64 hw_id;
+ u8 trace_id;
if (!csdev)
goto fail;
@@ -495,7 +503,8 @@ static void etm_event_start(struct perf_event *event, int flags)
goto fail_end_stop;
/* Finally enable the tracer */
- if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
+ if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF,
+ &sink->perf_sink_id_map))
goto fail_disable_path;
/*
@@ -504,10 +513,16 @@ static void etm_event_start(struct perf_event *event, int flags)
*/
if (!cpumask_test_cpu(cpu, &event_data->aux_hwid_done)) {
cpumask_set_cpu(cpu, &event_data->aux_hwid_done);
- hw_id = FIELD_PREP(CS_AUX_HW_ID_VERSION_MASK,
- CS_AUX_HW_ID_CURR_VERSION);
- hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK,
- coresight_trace_id_read_cpu_id(cpu));
+
+ trace_id = coresight_trace_id_read_cpu_id_map(cpu, &sink->perf_sink_id_map);
+
+ hw_id = FIELD_PREP(CS_AUX_HW_ID_MAJOR_VERSION_MASK,
+ CS_AUX_HW_ID_MAJOR_VERSION);
+ hw_id |= FIELD_PREP(CS_AUX_HW_ID_MINOR_VERSION_MASK,
+ CS_AUX_HW_ID_MINOR_VERSION);
+ hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, trace_id);
+ hw_id |= FIELD_PREP(CS_AUX_HW_ID_SINK_ID_MASK, coresight_get_sink_id(sink));
+
perf_report_aux_output_id(event, hw_id);
}
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
index bebbadee2ceb..744531158d6b 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.h
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.h
@@ -62,7 +62,6 @@ struct etm_event_data {
struct list_head * __percpu *path;
};
-#if IS_ENABLED(CONFIG_CORESIGHT)
int etm_perf_symlink(struct coresight_device *csdev, bool link);
int etm_perf_add_symlink_sink(struct coresight_device *csdev);
void etm_perf_del_symlink_sink(struct coresight_device *csdev);
@@ -77,23 +76,6 @@ static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
int etm_perf_add_symlink_cscfg(struct device *dev,
struct cscfg_config_desc *config_desc);
void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc);
-#else
-static inline int etm_perf_symlink(struct coresight_device *csdev, bool link)
-{ return -EINVAL; }
-int etm_perf_add_symlink_sink(struct coresight_device *csdev)
-{ return -EINVAL; }
-void etm_perf_del_symlink_sink(struct coresight_device *csdev) {}
-static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
-{
- return NULL;
-}
-int etm_perf_add_symlink_cscfg(struct device *dev,
- struct cscfg_config_desc *config_desc)
-{ return -EINVAL; }
-void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc) {}
-
-#endif /* CONFIG_CORESIGHT */
-
int __init etm_perf_init(void);
void etm_perf_exit(void);
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 8b362605d242..c103f4c70f5d 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -481,7 +481,8 @@ void etm_release_trace_id(struct etm_drvdata *drvdata)
}
static int etm_enable_perf(struct coresight_device *csdev,
- struct perf_event *event)
+ struct perf_event *event,
+ struct coresight_trace_id_map *id_map)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int trace_id;
@@ -500,7 +501,7 @@ static int etm_enable_perf(struct coresight_device *csdev,
* with perf locks - we know the ID cannot change until perf shuts down
* the session
*/
- trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu);
+ trace_id = coresight_trace_id_read_cpu_id_map(drvdata->cpu, id_map);
if (!IS_VALID_CS_TRACE_ID(trace_id)) {
dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n",
dev_name(&drvdata->csdev->dev), drvdata->cpu);
@@ -553,7 +554,7 @@ unlock_enable_sysfs:
}
static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
- enum cs_mode mode)
+ enum cs_mode mode, struct coresight_trace_id_map *id_map)
{
int ret;
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -568,7 +569,7 @@ static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
ret = etm_enable_sysfs(csdev);
break;
case CS_MODE_PERF:
- ret = etm_enable_perf(csdev, event);
+ ret = etm_enable_perf(csdev, event, id_map);
break;
default:
ret = -EINVAL;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index bf01f01964cf..66d44a404ad0 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -752,7 +752,8 @@ out:
}
static int etm4_enable_perf(struct coresight_device *csdev,
- struct perf_event *event)
+ struct perf_event *event,
+ struct coresight_trace_id_map *id_map)
{
int ret = 0, trace_id;
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -775,7 +776,7 @@ static int etm4_enable_perf(struct coresight_device *csdev,
* with perf locks - we know the ID cannot change until perf shuts down
* the session
*/
- trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu);
+ trace_id = coresight_trace_id_read_cpu_id_map(drvdata->cpu, id_map);
if (!IS_VALID_CS_TRACE_ID(trace_id)) {
dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n",
dev_name(&drvdata->csdev->dev), drvdata->cpu);
@@ -837,7 +838,7 @@ unlock_sysfs_enable:
}
static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
- enum cs_mode mode)
+ enum cs_mode mode, struct coresight_trace_id_map *id_map)
{
int ret;
@@ -851,7 +852,7 @@ static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
ret = etm4_enable_sysfs(csdev);
break;
case CS_MODE_PERF:
- ret = etm4_enable_perf(csdev, event);
+ ret = etm4_enable_perf(csdev, event, id_map);
break;
default:
ret = -EINVAL;
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 61a46d3bdcc8..05f891ca6b5c 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -148,6 +148,7 @@ int coresight_make_links(struct coresight_device *orig,
struct coresight_device *target);
void coresight_remove_links(struct coresight_device *orig,
struct coresight_connection *conn);
+u32 coresight_get_sink_id(struct coresight_device *csdev);
#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM3X)
extern int etm_readl_cp14(u32 off, unsigned int *val);
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 117dbb484543..cb3e04755c99 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -194,7 +194,8 @@ static void stm_enable_hw(struct stm_drvdata *drvdata)
}
static int stm_enable(struct coresight_device *csdev, struct perf_event *event,
- enum cs_mode mode)
+ enum cs_mode mode,
+ __maybe_unused struct coresight_trace_id_map *trace_id)
{
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
diff --git a/drivers/hwtracing/coresight/coresight-sysfs.c b/drivers/hwtracing/coresight/coresight-sysfs.c
index 1e67cc7758d7..a01c9e54e2ed 100644
--- a/drivers/hwtracing/coresight/coresight-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-sysfs.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include "coresight-priv.h"
+#include "coresight-trace-id.h"
/*
* Use IDR to map the hash of the source's device name
@@ -63,7 +64,7 @@ static int coresight_enable_source_sysfs(struct coresight_device *csdev,
*/
lockdep_assert_held(&coresight_mutex);
if (coresight_get_mode(csdev) != CS_MODE_SYSFS) {
- ret = source_ops(csdev)->enable(csdev, data, mode);
+ ret = source_ops(csdev)->enable(csdev, data, mode, NULL);
if (ret)
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index b54562f392f3..3a482fd2cb22 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -220,7 +220,6 @@ static const struct file_operations tmc_fops = {
.open = tmc_open,
.read = tmc_read,
.release = tmc_release,
- .llseek = no_llseek,
};
static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index e75428fa1592..a48bb85d0e7f 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -36,7 +36,8 @@ struct etr_buf_hw {
* etr_perf_buffer - Perf buffer used for ETR
* @drvdata - The ETR drvdaga this buffer has been allocated for.
* @etr_buf - Actual buffer used by the ETR
- * @pid - The PID this etr_perf_buffer belongs to.
+ * @pid - The PID of the session owner that etr_perf_buffer
+ * belongs to.
* @snaphost - Perf session mode
* @nr_pages - Number of pages in the ring buffer.
* @pages - Array of Pages in the ring buffer.
@@ -261,6 +262,7 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table)
{
tmc_free_table_pages(sg_table);
tmc_free_data_pages(sg_table);
+ kfree(sg_table);
}
EXPORT_SYMBOL_GPL(tmc_free_sg_table);
@@ -342,7 +344,6 @@ struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
rc = tmc_alloc_table_pages(sg_table);
if (rc) {
tmc_free_sg_table(sg_table);
- kfree(sg_table);
return ERR_PTR(rc);
}
@@ -1662,7 +1663,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
goto unlock_out;
}
- /* Get a handle on the pid of the process to monitor */
+ /* Get a handle on the pid of the session owner */
pid = etr_perf->pid;
/* Do not proceed if this device is associated with another session */
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index c77763b49de0..2671926be62a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -171,8 +171,9 @@ struct etr_buf {
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.tmc" entry.
* @spinlock: only one at a time pls.
- * @pid: Process ID of the process being monitored by the session
- * that is using this component.
+ * @pid: Process ID of the process that owns the session that is using
+ * this component. For example this would be the pid of the Perf
+ * process.
* @buf: Snapshot of the trace data for ETF/ETB.
* @etr_buf: details of buffer used in TMC-ETR
* @len: size of the available trace for ETF/ETB.
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index 0726f8842552..b7d99e91ab84 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -439,7 +439,8 @@ static void __tpdm_enable(struct tpdm_drvdata *drvdata)
}
static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event,
- enum cs_mode mode)
+ enum cs_mode mode,
+ __maybe_unused struct coresight_trace_id_map *id_map)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -449,6 +450,11 @@ static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event,
return -EBUSY;
}
+ if (!coresight_take_mode(csdev, mode)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EBUSY;
+ }
+
__tpdm_enable(drvdata);
drvdata->enable = true;
spin_unlock(&drvdata->spinlock);
@@ -506,6 +512,7 @@ static void tpdm_disable(struct coresight_device *csdev,
}
__tpdm_disable(drvdata);
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
drvdata->enable = false;
spin_unlock(&drvdata->spinlock);
diff --git a/drivers/hwtracing/coresight/coresight-trace-id.c b/drivers/hwtracing/coresight/coresight-trace-id.c
index af5b4ef59cea..d98e12cb30ec 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.c
+++ b/drivers/hwtracing/coresight/coresight-trace-id.c
@@ -3,6 +3,7 @@
* Copyright (c) 2022, Linaro Limited, All rights reserved.
* Author: Mike Leach <mike.leach@linaro.org>
*/
+#include <linux/coresight.h>
#include <linux/coresight-pmu.h>
#include <linux/cpumask.h>
#include <linux/kernel.h>
@@ -11,18 +12,12 @@
#include "coresight-trace-id.h"
-/* Default trace ID map. Used on systems that don't require per sink mappings */
-static struct coresight_trace_id_map id_map_default;
-
-/* maintain a record of the mapping of IDs and pending releases per cpu */
-static DEFINE_PER_CPU(atomic_t, cpu_id) = ATOMIC_INIT(0);
-static cpumask_t cpu_id_release_pending;
-
-/* perf session active counter */
-static atomic_t perf_cs_etm_session_active = ATOMIC_INIT(0);
-
-/* lock to protect id_map and cpu data */
-static DEFINE_SPINLOCK(id_map_lock);
+/* Default trace ID map. Used in sysfs mode and for system sources */
+static DEFINE_PER_CPU(atomic_t, id_map_default_cpu_ids) = ATOMIC_INIT(0);
+static struct coresight_trace_id_map id_map_default = {
+ .cpu_map = &id_map_default_cpu_ids,
+ .lock = __SPIN_LOCK_UNLOCKED(id_map_default.lock)
+};
/* #define TRACE_ID_DEBUG 1 */
#if defined(TRACE_ID_DEBUG) || defined(CONFIG_COMPILE_TEST)
@@ -32,7 +27,6 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
{
pr_debug("%s id_map::\n", func_name);
pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids);
- pr_debug("Pend = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->pend_rel_ids);
}
#define DUMP_ID_MAP(map) coresight_trace_id_dump_table(map, __func__)
#define DUMP_ID_CPU(cpu, id) pr_debug("%s called; cpu=%d, id=%d\n", __func__, cpu, id)
@@ -46,9 +40,9 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
#endif
/* unlocked read of current trace ID value for given CPU */
-static int _coresight_trace_id_read_cpu_id(int cpu)
+static int _coresight_trace_id_read_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{
- return atomic_read(&per_cpu(cpu_id, cpu));
+ return atomic_read(per_cpu_ptr(id_map->cpu_map, cpu));
}
/* look for next available odd ID, return 0 if none found */
@@ -119,49 +113,33 @@ static void coresight_trace_id_free(int id, struct coresight_trace_id_map *id_ma
clear_bit(id, id_map->used_ids);
}
-static void coresight_trace_id_set_pend_rel(int id, struct coresight_trace_id_map *id_map)
-{
- if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id))
- return;
- set_bit(id, id_map->pend_rel_ids);
-}
-
/*
- * release all pending IDs for all current maps & clear CPU associations
- *
- * This currently operates on the default id map, but may be extended to
- * operate on all registered id maps if per sink id maps are used.
+ * Release all IDs and clear CPU associations.
*/
-static void coresight_trace_id_release_all_pending(void)
+static void coresight_trace_id_release_all(struct coresight_trace_id_map *id_map)
{
- struct coresight_trace_id_map *id_map = &id_map_default;
unsigned long flags;
- int cpu, bit;
+ int cpu;
- spin_lock_irqsave(&id_map_lock, flags);
- for_each_set_bit(bit, id_map->pend_rel_ids, CORESIGHT_TRACE_ID_RES_TOP) {
- clear_bit(bit, id_map->used_ids);
- clear_bit(bit, id_map->pend_rel_ids);
- }
- for_each_cpu(cpu, &cpu_id_release_pending) {
- atomic_set(&per_cpu(cpu_id, cpu), 0);
- cpumask_clear_cpu(cpu, &cpu_id_release_pending);
- }
- spin_unlock_irqrestore(&id_map_lock, flags);
+ spin_lock_irqsave(&id_map->lock, flags);
+ bitmap_zero(id_map->used_ids, CORESIGHT_TRACE_IDS_MAX);
+ for_each_possible_cpu(cpu)
+ atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
+ spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_MAP(id_map);
}
-static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
+static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{
unsigned long flags;
int id;
- spin_lock_irqsave(&id_map_lock, flags);
+ spin_lock_irqsave(&id_map->lock, flags);
/* check for existing allocation for this CPU */
- id = _coresight_trace_id_read_cpu_id(cpu);
+ id = _coresight_trace_id_read_cpu_id(cpu, id_map);
if (id)
- goto get_cpu_id_clr_pend;
+ goto get_cpu_id_out_unlock;
/*
* Find a new ID.
@@ -180,44 +158,32 @@ static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_
goto get_cpu_id_out_unlock;
/* allocate the new id to the cpu */
- atomic_set(&per_cpu(cpu_id, cpu), id);
-
-get_cpu_id_clr_pend:
- /* we are (re)using this ID - so ensure it is not marked for release */
- cpumask_clear_cpu(cpu, &cpu_id_release_pending);
- clear_bit(id, id_map->pend_rel_ids);
+ atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id);
get_cpu_id_out_unlock:
- spin_unlock_irqrestore(&id_map_lock, flags);
+ spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_CPU(cpu, id);
DUMP_ID_MAP(id_map);
return id;
}
-static void coresight_trace_id_map_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
+static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{
unsigned long flags;
int id;
/* check for existing allocation for this CPU */
- id = _coresight_trace_id_read_cpu_id(cpu);
+ id = _coresight_trace_id_read_cpu_id(cpu, id_map);
if (!id)
return;
- spin_lock_irqsave(&id_map_lock, flags);
+ spin_lock_irqsave(&id_map->lock, flags);
- if (atomic_read(&perf_cs_etm_session_active)) {
- /* set release at pending if perf still active */
- coresight_trace_id_set_pend_rel(id, id_map);
- cpumask_set_cpu(cpu, &cpu_id_release_pending);
- } else {
- /* otherwise clear id */
- coresight_trace_id_free(id, id_map);
- atomic_set(&per_cpu(cpu_id, cpu), 0);
- }
+ coresight_trace_id_free(id, id_map);
+ atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
- spin_unlock_irqrestore(&id_map_lock, flags);
+ spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_CPU(cpu, id);
DUMP_ID_MAP(id_map);
}
@@ -227,10 +193,10 @@ static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *i
unsigned long flags;
int id;
- spin_lock_irqsave(&id_map_lock, flags);
+ spin_lock_irqsave(&id_map->lock, flags);
/* prefer odd IDs for system components to avoid legacy CPU IDS */
id = coresight_trace_id_alloc_new_id(id_map, 0, true);
- spin_unlock_irqrestore(&id_map_lock, flags);
+ spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID(id);
DUMP_ID_MAP(id_map);
@@ -241,9 +207,9 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *
{
unsigned long flags;
- spin_lock_irqsave(&id_map_lock, flags);
+ spin_lock_irqsave(&id_map->lock, flags);
coresight_trace_id_free(id, id_map);
- spin_unlock_irqrestore(&id_map_lock, flags);
+ spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID(id);
DUMP_ID_MAP(id_map);
@@ -253,22 +219,40 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *
int coresight_trace_id_get_cpu_id(int cpu)
{
- return coresight_trace_id_map_get_cpu_id(cpu, &id_map_default);
+ return _coresight_trace_id_get_cpu_id(cpu, &id_map_default);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id);
+int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
+{
+ return _coresight_trace_id_get_cpu_id(cpu, id_map);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id_map);
+
void coresight_trace_id_put_cpu_id(int cpu)
{
- coresight_trace_id_map_put_cpu_id(cpu, &id_map_default);
+ _coresight_trace_id_put_cpu_id(cpu, &id_map_default);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id);
+void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
+{
+ _coresight_trace_id_put_cpu_id(cpu, id_map);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id_map);
+
int coresight_trace_id_read_cpu_id(int cpu)
{
- return _coresight_trace_id_read_cpu_id(cpu);
+ return _coresight_trace_id_read_cpu_id(cpu, &id_map_default);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id);
+int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
+{
+ return _coresight_trace_id_read_cpu_id(cpu, id_map);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id_map);
+
int coresight_trace_id_get_system_id(void)
{
return coresight_trace_id_map_get_system_id(&id_map_default);
@@ -281,17 +265,17 @@ void coresight_trace_id_put_system_id(int id)
}
EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id);
-void coresight_trace_id_perf_start(void)
+void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map)
{
- atomic_inc(&perf_cs_etm_session_active);
- PERF_SESSION(atomic_read(&perf_cs_etm_session_active));
+ atomic_inc(&id_map->perf_cs_etm_session_active);
+ PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active));
}
EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start);
-void coresight_trace_id_perf_stop(void)
+void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map)
{
- if (!atomic_dec_return(&perf_cs_etm_session_active))
- coresight_trace_id_release_all_pending();
- PERF_SESSION(atomic_read(&perf_cs_etm_session_active));
+ if (!atomic_dec_return(&id_map->perf_cs_etm_session_active))
+ coresight_trace_id_release_all(id_map);
+ PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active));
}
EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop);
diff --git a/drivers/hwtracing/coresight/coresight-trace-id.h b/drivers/hwtracing/coresight/coresight-trace-id.h
index 3797777d367e..9aae50a553ca 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.h
+++ b/drivers/hwtracing/coresight/coresight-trace-id.h
@@ -17,9 +17,10 @@
* released when done.
*
* In order to ensure that a consistent cpu / ID matching is maintained
- * throughout a perf cs_etm event session - a session in progress flag will
- * be maintained, and released IDs not cleared until the perf session is
- * complete. This allows the same CPU to be re-allocated its prior ID.
+ * throughout a perf cs_etm event session - a session in progress flag will be
+ * maintained for each sink, and IDs are cleared when all the perf sessions
+ * complete. This allows the same CPU to be re-allocated its prior ID when
+ * events are scheduled in and out.
*
*
* Trace ID maps will be created and initialised to prevent architecturally
@@ -32,10 +33,6 @@
#include <linux/bitops.h>
#include <linux/types.h>
-
-/* architecturally we have 128 IDs some of which are reserved */
-#define CORESIGHT_TRACE_IDS_MAX 128
-
/* ID 0 is reserved */
#define CORESIGHT_TRACE_ID_RES_0 0
@@ -47,23 +44,6 @@
((id > CORESIGHT_TRACE_ID_RES_0) && (id < CORESIGHT_TRACE_ID_RES_TOP))
/**
- * Trace ID map.
- *
- * @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs.
- * Initialised so that the reserved IDs are permanently marked as
- * in use.
- * @pend_rel_ids: CPU IDs that have been released by the trace source but not
- * yet marked as available, to allow re-allocation to the same
- * CPU during a perf session.
- */
-struct coresight_trace_id_map {
- DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
- DECLARE_BITMAP(pend_rel_ids, CORESIGHT_TRACE_IDS_MAX);
-};
-
-/* Allocate and release IDs for a single default trace ID map */
-
-/**
* Read and optionally allocate a CoreSight trace ID and associate with a CPU.
*
* Function will read the current trace ID for the associated CPU,
@@ -79,19 +59,27 @@ struct coresight_trace_id_map {
int coresight_trace_id_get_cpu_id(int cpu);
/**
+ * Version of coresight_trace_id_get_cpu_id() that allows the ID map to operate
+ * on to be provided.
+ */
+int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
+
+/**
* Release an allocated trace ID associated with the CPU.
*
- * This will release the CoreSight trace ID associated with the CPU,
- * unless a perf session is in operation.
- *
- * If a perf session is in operation then the ID will be marked as pending
- * release.
+ * This will release the CoreSight trace ID associated with the CPU.
*
* @cpu: The CPU index to release the associated trace ID.
*/
void coresight_trace_id_put_cpu_id(int cpu);
/**
+ * Version of coresight_trace_id_put_cpu_id() that allows the ID map to operate
+ * on to be provided.
+ */
+void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
+
+/**
* Read the current allocated CoreSight Trace ID value for the CPU.
*
* Fast read of the current value that does not allocate if no ID allocated
@@ -112,6 +100,12 @@ void coresight_trace_id_put_cpu_id(int cpu);
int coresight_trace_id_read_cpu_id(int cpu);
/**
+ * Version of coresight_trace_id_read_cpu_id() that allows the ID map to operate
+ * on to be provided.
+ */
+int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
+
+/**
* Allocate a CoreSight trace ID for a system component.
*
* Unconditionally allocates a Trace ID, without associating the ID with a CPU.
@@ -136,21 +130,21 @@ void coresight_trace_id_put_system_id(int id);
/**
* Notify the Trace ID allocator that a perf session is starting.
*
- * Increase the perf session reference count - called by perf when setting up
- * a trace event.
+ * Increase the perf session reference count - called by perf when setting up a
+ * trace event.
*
- * This reference count is used by the ID allocator to ensure that trace IDs
- * associated with a CPU cannot change or be released during a perf session.
+ * Perf sessions never free trace IDs to ensure that the ID associated with a
+ * CPU cannot change during their and other's concurrent sessions. Instead,
+ * this refcount is used so that the last event to finish always frees all IDs.
*/
-void coresight_trace_id_perf_start(void);
+void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map);
/**
* Notify the ID allocator that a perf session is stopping.
*
- * Decrease the perf session reference count.
- * if this causes the count to go to zero, then all Trace IDs marked as pending
- * release, will be released.
+ * Decrease the perf session reference count. If this causes the count to go to
+ * zero, then all Trace IDs will be released.
*/
-void coresight_trace_id_perf_stop(void);
+void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map);
#endif /* _CORESIGHT_TRACE_ID_H */
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.c b/drivers/hwtracing/coresight/ultrasoc-smb.c
index f9ebf20c91e6..ef7f560f0ffa 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.c
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.c
@@ -163,7 +163,6 @@ static const struct file_operations smb_fops = {
.open = smb_open,
.read = smb_read,
.release = smb_release,
- .llseek = no_llseek,
};
static ssize_t buf_size_show(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index be63d5b8f193..66123d684ac9 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -1677,7 +1677,6 @@ static const struct file_operations intel_th_msc_fops = {
.release = intel_th_msc_release,
.read = intel_th_msc_read,
.mmap = intel_th_msc_mmap,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index ccf39a80dc4f..cdba4e875b28 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -839,7 +839,6 @@ static const struct file_operations stm_fops = {
.mmap = stm_char_mmap,
.unlocked_ioctl = stm_char_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .llseek = no_llseek,
};
static void stm_device_release(struct device *dev)
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 53f18b351f53..6b3ba7e5723a 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -782,6 +782,7 @@ config I2C_JZ4780
config I2C_KEBA
tristate "KEBA I2C controller support"
depends on HAS_IOMEM
+ depends on KEBA_CP500 || COMPILE_TEST
select AUXILIARY_BUS
help
This driver supports the I2C controller found in KEBA system FPGA
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 080204182bb5..f31d352d98b5 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -523,6 +523,7 @@ err_release_lock:
void __i2c_dw_disable(struct dw_i2c_dev *dev)
{
+ struct i2c_timings *t = &dev->timings;
unsigned int raw_intr_stats;
unsigned int enable;
int timeout = 100;
@@ -535,6 +536,19 @@ void __i2c_dw_disable(struct dw_i2c_dev *dev)
abort_needed = raw_intr_stats & DW_IC_INTR_MST_ON_HOLD;
if (abort_needed) {
+ if (!(enable & DW_IC_ENABLE_ENABLE)) {
+ regmap_write(dev->map, DW_IC_ENABLE, DW_IC_ENABLE_ENABLE);
+ /*
+ * Wait 10 times the signaling period of the highest I2C
+ * transfer supported by the driver (for 400KHz this is
+ * 25us) to ensure the I2C ENABLE bit is already set
+ * as described in the DesignWare I2C databook.
+ */
+ fsleep(DIV_ROUND_CLOSEST_ULL(10 * MICRO, t->bus_freq_hz));
+ /* Set ENABLE bit before setting ABORT */
+ enable |= DW_IC_ENABLE_ENABLE;
+ }
+
regmap_write(dev->map, DW_IC_ENABLE, enable | DW_IC_ENABLE_ABORT);
ret = regmap_read_poll_timeout(dev->map, DW_IC_ENABLE, enable,
!(enable & DW_IC_ENABLE_ABORT), 10,
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 1ac2afd03a0a..8e8854ec9882 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -108,6 +108,7 @@
DW_IC_INTR_RX_UNDER | \
DW_IC_INTR_RD_REQ)
+#define DW_IC_ENABLE_ENABLE BIT(0)
#define DW_IC_ENABLE_ABORT BIT(1)
#define DW_IC_STATUS_ACTIVITY BIT(0)
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index e46f1b22c360..e8ac9a7bf0b3 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -271,6 +271,34 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
__i2c_dw_write_intr_mask(dev, DW_IC_INTR_MASTER_MASK);
}
+/*
+ * This function waits for the controller to be idle before disabling I2C
+ * When the controller is not in the IDLE state, the MST_ACTIVITY bit
+ * (IC_STATUS[5]) is set.
+ *
+ * Values:
+ * 0x1 (ACTIVE): Controller not idle
+ * 0x0 (IDLE): Controller is idle
+ *
+ * The function is called after completing the current transfer.
+ *
+ * Returns:
+ * False when the controller is in the IDLE state.
+ * True when the controller is in the ACTIVE state.
+ */
+static bool i2c_dw_is_controller_active(struct dw_i2c_dev *dev)
+{
+ u32 status;
+
+ regmap_read(dev->map, DW_IC_STATUS, &status);
+ if (!(status & DW_IC_STATUS_MASTER_ACTIVITY))
+ return false;
+
+ return regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status,
+ !(status & DW_IC_STATUS_MASTER_ACTIVITY),
+ 1100, 20000) != 0;
+}
+
static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
{
u32 val;
@@ -807,6 +835,16 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
}
/*
+ * This happens rarely (~1:500) and is hard to reproduce. Debug trace
+ * showed that IC_STATUS had value of 0x23 when STOP_DET occurred,
+ * if disable IC_ENABLE.ENABLE immediately that can result in
+ * IC_RAW_INTR_STAT.MASTER_ON_HOLD holding SCL low. Check if
+ * controller is still ACTIVE before disabling I2C.
+ */
+ if (i2c_dw_is_controller_active(dev))
+ dev_err(dev->dev, "controller active\n");
+
+ /*
* We must disable the adapter before returning and signaling the end
* of the current transfer. Otherwise the hardware might continue
* generating interrupts which in turn causes a race condition with
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index 4eccbcd0fbfc..bbb9062669e4 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -550,12 +550,13 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
device_property_read_u32(&pdev->dev, "socionext,pclk-rate",
&i2c->pclkrate);
- pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
+ pclk = devm_clk_get_optional_enabled(&pdev->dev, "pclk");
if (IS_ERR(pclk))
return dev_err_probe(&pdev->dev, PTR_ERR(pclk),
"failed to get and enable clock\n");
- i2c->pclkrate = clk_get_rate(pclk);
+ if (pclk)
+ i2c->pclkrate = clk_get_rate(pclk);
if (i2c->pclkrate < SYNQUACER_I2C_MIN_CLK_RATE ||
i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE)
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 4c89aad02451..1d68177241a6 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -1337,8 +1337,8 @@ static int xiic_i2c_probe(struct platform_device *pdev)
return 0;
err_pm_disable:
- pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
return ret;
}
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index f4fb212b7f39..61f7c4003d2f 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -637,7 +637,6 @@ static int i2cdev_release(struct inode *inode, struct file *file)
static const struct file_operations i2cdev_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = i2cdev_read,
.write = i2cdev_write,
.unlocked_ioctl = i2cdev_ioctl,
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 9457e34b9e32..67aebfe0fed6 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -121,6 +121,12 @@ static unsigned int mwait_substates __initdata;
#define CPUIDLE_FLAG_INIT_XSTATE BIT(17)
/*
+ * Ignore the sub-state when matching mwait hints between the ACPI _CST and
+ * custom tables.
+ */
+#define CPUIDLE_FLAG_PARTIAL_HINT_MATCH BIT(18)
+
+/*
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
* the C-state (top nibble) and sub-state (bottom nibble)
* 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
@@ -1043,7 +1049,8 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
.name = "C6",
.desc = "MWAIT 0x20",
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED |
- CPUIDLE_FLAG_INIT_XSTATE,
+ CPUIDLE_FLAG_INIT_XSTATE |
+ CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 170,
.target_residency = 650,
.enter = &intel_idle,
@@ -1052,7 +1059,8 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
.name = "C6P",
.desc = "MWAIT 0x21",
.flags = MWAIT2flg(0x21) | CPUIDLE_FLAG_TLB_FLUSHED |
- CPUIDLE_FLAG_INIT_XSTATE,
+ CPUIDLE_FLAG_INIT_XSTATE |
+ CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 210,
.target_residency = 1000,
.enter = &intel_idle,
@@ -1354,7 +1362,8 @@ static struct cpuidle_state srf_cstates[] __initdata = {
{
.name = "C6S",
.desc = "MWAIT 0x22",
- .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED |
+ CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 270,
.target_residency = 700,
.enter = &intel_idle,
@@ -1362,7 +1371,8 @@ static struct cpuidle_state srf_cstates[] __initdata = {
{
.name = "C6SP",
.desc = "MWAIT 0x23",
- .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED |
+ CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 310,
.target_residency = 900,
.enter = &intel_idle,
@@ -1744,7 +1754,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
}
}
-static bool __init intel_idle_off_by_default(u32 mwait_hint)
+static bool __init intel_idle_off_by_default(unsigned int flags, u32 mwait_hint)
{
int cstate, limit;
@@ -1761,7 +1771,15 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint)
* the interesting states are ACPI_CSTATE_FFH.
*/
for (cstate = 1; cstate < limit; cstate++) {
- if (acpi_state_table.states[cstate].address == mwait_hint)
+ u32 acpi_hint = acpi_state_table.states[cstate].address;
+ u32 table_hint = mwait_hint;
+
+ if (flags & CPUIDLE_FLAG_PARTIAL_HINT_MATCH) {
+ acpi_hint &= ~MWAIT_SUBSTATE_MASK;
+ table_hint &= ~MWAIT_SUBSTATE_MASK;
+ }
+
+ if (acpi_hint == table_hint)
return false;
}
return true;
@@ -1771,7 +1789,10 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint)
static inline bool intel_idle_acpi_cst_extract(void) { return false; }
static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
-static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
+static inline bool intel_idle_off_by_default(unsigned int flags, u32 mwait_hint)
+{
+ return false;
+}
#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
/**
@@ -2098,7 +2119,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
if ((disabled_states_mask & BIT(drv->state_count)) ||
((icpu->use_acpi || force_use_acpi) &&
- intel_idle_off_by_default(mwait_hint) &&
+ intel_idle_off_by_default(state->flags, mwait_hint) &&
!(state->flags & CPUIDLE_FLAG_ALWAYS_ENABLE)))
state->flags |= CPUIDLE_FLAG_OFF;
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 80b57d3ee3a7..516c1a8e4d56 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -177,6 +177,33 @@ config ADXL372_I2C
To compile this driver as a module, choose M here: the
module will be called adxl372_i2c.
+config ADXL380
+ tristate
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+
+config ADXL380_SPI
+ tristate "Analog Devices ADXL380 3-Axis Accelerometer SPI Driver"
+ depends on SPI
+ select ADXL380
+ select REGMAP_SPI
+ help
+ Say yes here to add support for the Analog Devices ADXL380 triaxial
+ acceleration sensor.
+ To compile this driver as a module, choose M here: the
+ module will be called adxl380_spi.
+
+config ADXL380_I2C
+ tristate "Analog Devices ADXL380 3-Axis Accelerometer I2C Driver"
+ depends on I2C
+ select ADXL380
+ select REGMAP_I2C
+ help
+ Say yes here to add support for the Analog Devices ADXL380 triaxial
+ acceleration sensor.
+ To compile this driver as a module, choose M here: the
+ module will be called adxl380_i2c.
+
config BMA180
tristate "Bosch BMA023/BMA1x0/BMA250 3-Axis Accelerometer Driver"
depends on I2C && INPUT_BMA150=n
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index db90532ba24a..ca8569e25aba 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -21,6 +21,9 @@ obj-$(CONFIG_ADXL367_SPI) += adxl367_spi.o
obj-$(CONFIG_ADXL372) += adxl372.o
obj-$(CONFIG_ADXL372_I2C) += adxl372_i2c.o
obj-$(CONFIG_ADXL372_SPI) += adxl372_spi.o
+obj-$(CONFIG_ADXL380) += adxl380.o
+obj-$(CONFIG_ADXL380_I2C) += adxl380_i2c.o
+obj-$(CONFIG_ADXL380_SPI) += adxl380_spi.o
obj-$(CONFIG_BMA180) += bma180.o
obj-$(CONFIG_BMA220) += bma220_spi.o
obj-$(CONFIG_BMA400) += bma400_core.o
diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
index 5cf4828a5eb5..1c046e96aef9 100644
--- a/drivers/iio/accel/adxl367.c
+++ b/drivers/iio/accel/adxl367.c
@@ -1220,7 +1220,7 @@ static int adxl367_update_scan_mode(struct iio_dev *indio_dev,
return ret;
st->fifo_set_size = bitmap_weight(active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
return 0;
}
diff --git a/drivers/iio/accel/adxl367_spi.c b/drivers/iio/accel/adxl367_spi.c
index 118c894015a5..b70117265791 100644
--- a/drivers/iio/accel/adxl367_spi.c
+++ b/drivers/iio/accel/adxl367_spi.c
@@ -72,7 +72,7 @@ static int adxl367_write(void *context, const void *val_buf, size_t val_size)
return spi_sync(st->spi, &st->reg_write_msg);
}
-static struct regmap_bus adxl367_spi_regmap_bus = {
+static const struct regmap_bus adxl367_spi_regmap_bus = {
.read = adxl367_read,
.write = adxl367_write,
};
diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c
index c4193286eb05..ef8dd557877b 100644
--- a/drivers/iio/accel/adxl372.c
+++ b/drivers/iio/accel/adxl372.c
@@ -1050,7 +1050,7 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev)
st->fifo_format = adxl372_axis_lookup_table[i].fifo_format;
st->fifo_axis_mask = adxl372_axis_lookup_table[i].bits;
st->fifo_set_size = bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
/* Configure the FIFO to store sets of impact event peak. */
if (st->peak_fifo_mode_en) {
diff --git a/drivers/iio/accel/adxl380.c b/drivers/iio/accel/adxl380.c
new file mode 100644
index 000000000000..98863e22bb6b
--- /dev/null
+++ b/drivers/iio/accel/adxl380.c
@@ -0,0 +1,1905 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ADXL380 3-Axis Digital Accelerometer core driver
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/units.h>
+
+#include <asm/unaligned.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/sysfs.h>
+
+#include <linux/regulator/consumer.h>
+
+#include "adxl380.h"
+
+#define ADXL380_ID_VAL 380
+#define ADXL382_ID_VAL 382
+
+#define ADXL380_DEVID_AD_REG 0x00
+#define ADLX380_PART_ID_REG 0x02
+
+#define ADXL380_X_DATA_H_REG 0x15
+#define ADXL380_Y_DATA_H_REG 0x17
+#define ADXL380_Z_DATA_H_REG 0x19
+#define ADXL380_T_DATA_H_REG 0x1B
+
+#define ADXL380_MISC_0_REG 0x20
+#define ADXL380_XL382_MSK BIT(7)
+
+#define ADXL380_MISC_1_REG 0x21
+
+#define ADXL380_X_DSM_OFFSET_REG 0x4D
+
+#define ADXL380_ACT_INACT_CTL_REG 0x37
+#define ADXL380_INACT_EN_MSK BIT(2)
+#define ADXL380_ACT_EN_MSK BIT(0)
+
+#define ADXL380_SNSR_AXIS_EN_REG 0x38
+#define ADXL380_ACT_INACT_AXIS_EN_MSK GENMASK(2, 0)
+
+#define ADXL380_THRESH_ACT_H_REG 0x39
+#define ADXL380_TIME_ACT_H_REG 0x3B
+#define ADXL380_THRESH_INACT_H_REG 0x3E
+#define ADXL380_TIME_INACT_H_REG 0x40
+#define ADXL380_THRESH_MAX GENMASK(12, 0)
+#define ADXL380_TIME_MAX GENMASK(24, 0)
+
+#define ADXL380_FIFO_CONFIG_0_REG 0x30
+#define ADXL380_FIFO_SAMPLES_8_MSK BIT(0)
+#define ADXL380_FIFO_MODE_MSK GENMASK(5, 4)
+
+#define ADXL380_FIFO_DISABLED 0
+#define ADXL380_FIFO_NORMAL 1
+#define ADXL380_FIFO_STREAMED 2
+#define ADXL380_FIFO_TRIGGERED 3
+
+#define ADXL380_FIFO_CONFIG_1_REG 0x31
+#define ADXL380_FIFO_STATUS_0_REG 0x1E
+
+#define ADXL380_TAP_THRESH_REG 0x43
+#define ADXL380_TAP_DUR_REG 0x44
+#define ADXL380_TAP_LATENT_REG 0x45
+#define ADXL380_TAP_WINDOW_REG 0x46
+#define ADXL380_TAP_TIME_MAX GENMASK(7, 0)
+
+#define ADXL380_TAP_CFG_REG 0x47
+#define ADXL380_TAP_AXIS_MSK GENMASK(1, 0)
+
+#define ADXL380_TRIG_CFG_REG 0x49
+#define ADXL380_TRIG_CFG_DEC_2X_MSK BIT(7)
+#define ADXL380_TRIG_CFG_SINC_RATE_MSK BIT(6)
+
+#define ADXL380_FILTER_REG 0x50
+#define ADXL380_FILTER_EQ_FILT_MSK BIT(6)
+#define ADXL380_FILTER_LPF_MODE_MSK GENMASK(5, 4)
+#define ADXL380_FILTER_HPF_PATH_MSK BIT(3)
+#define ADXL380_FILTER_HPF_CORNER_MSK GENMASK(2, 0)
+
+#define ADXL380_OP_MODE_REG 0x26
+#define ADXL380_OP_MODE_RANGE_MSK GENMASK(7, 6)
+#define ADXL380_OP_MODE_MSK GENMASK(3, 0)
+#define ADXL380_OP_MODE_STANDBY 0
+#define ADXL380_OP_MODE_HEART_SOUND 1
+#define ADXL380_OP_MODE_ULP 2
+#define ADXL380_OP_MODE_VLP 3
+#define ADXL380_OP_MODE_LP 4
+#define ADXL380_OP_MODE_LP_ULP 6
+#define ADXL380_OP_MODE_LP_VLP 7
+#define ADXL380_OP_MODE_RBW 8
+#define ADXL380_OP_MODE_RBW_ULP 10
+#define ADXL380_OP_MODE_RBW_VLP 11
+#define ADXL380_OP_MODE_HP 12
+#define ADXL380_OP_MODE_HP_ULP 14
+#define ADXL380_OP_MODE_HP_VLP 15
+
+#define ADXL380_OP_MODE_4G_RANGE 0
+#define ADXL382_OP_MODE_15G_RANGE 0
+#define ADXL380_OP_MODE_8G_RANGE 1
+#define ADXL382_OP_MODE_30G_RANGE 1
+#define ADXL380_OP_MODE_16G_RANGE 2
+#define ADXL382_OP_MODE_60G_RANGE 2
+
+#define ADXL380_DIG_EN_REG 0x27
+#define ADXL380_CHAN_EN_MSK(chan) BIT(4 + (chan))
+#define ADXL380_FIFO_EN_MSK BIT(3)
+
+#define ADXL380_INT0_MAP0_REG 0x2B
+#define ADXL380_INT1_MAP0_REG 0x2D
+#define ADXL380_INT_MAP0_INACT_INT0_MSK BIT(6)
+#define ADXL380_INT_MAP0_ACT_INT0_MSK BIT(5)
+#define ADXL380_INT_MAP0_FIFO_WM_INT0_MSK BIT(3)
+
+#define ADXL380_INT0_MAP1_REG 0x2C
+#define ADXL380_INT1_MAP1_REG 0x2E
+#define ADXL380_INT_MAP1_DOUBLE_TAP_INT0_MSK BIT(1)
+#define ADXL380_INT_MAP1_SINGLE_TAP_INT0_MSK BIT(0)
+
+#define ADXL380_INT0_REG 0x5D
+#define ADXL380_INT0_POL_MSK BIT(7)
+
+#define ADXL380_RESET_REG 0x2A
+#define ADXL380_FIFO_DATA 0x1D
+
+#define ADXL380_DEVID_AD_VAL 0xAD
+#define ADXL380_RESET_CODE 0x52
+
+#define ADXL380_STATUS_0_REG 0x11
+#define ADXL380_STATUS_0_FIFO_FULL_MSK BIT(1)
+#define ADXL380_STATUS_0_FIFO_WM_MSK BIT(3)
+
+#define ADXL380_STATUS_1_INACT_MSK BIT(6)
+#define ADXL380_STATUS_1_ACT_MSK BIT(5)
+#define ADXL380_STATUS_1_DOUBLE_TAP_MSK BIT(1)
+#define ADXL380_STATUS_1_SINGLE_TAP_MSK BIT(0)
+
+#define ADXL380_FIFO_SAMPLES 315UL
+
+enum adxl380_channels {
+ ADXL380_ACCEL_X,
+ ADXL380_ACCEL_Y,
+ ADXL380_ACCEL_Z,
+ ADXL380_TEMP,
+ ADXL380_CH_NUM
+};
+
+enum adxl380_axis {
+ ADXL380_X_AXIS,
+ ADXL380_Y_AXIS,
+ ADXL380_Z_AXIS,
+};
+
+enum adxl380_activity_type {
+ ADXL380_ACTIVITY,
+ ADXL380_INACTIVITY,
+};
+
+enum adxl380_tap_type {
+ ADXL380_SINGLE_TAP,
+ ADXL380_DOUBLE_TAP,
+};
+
+enum adxl380_tap_time_type {
+ ADXL380_TAP_TIME_LATENT,
+ ADXL380_TAP_TIME_WINDOW,
+};
+
+static const int adxl380_range_scale_factor_tbl[] = { 1, 2, 4 };
+
+const struct adxl380_chip_info adxl380_chip_info = {
+ .name = "adxl380",
+ .chip_id = ADXL380_ID_VAL,
+ .scale_tbl = {
+ [ADXL380_OP_MODE_4G_RANGE] = { 0, 1307226 },
+ [ADXL380_OP_MODE_8G_RANGE] = { 0, 2615434 },
+ [ADXL380_OP_MODE_16G_RANGE] = { 0, 5229886 },
+ },
+ .samp_freq_tbl = { 8000, 16000, 32000 },
+ /*
+ * The datasheet defines an intercept of 470 LSB at 25 degC
+ * and a sensitivity of 10.2 LSB/C.
+ */
+ .temp_offset = 25 * 102 / 10 - 470,
+
+};
+EXPORT_SYMBOL_NS_GPL(adxl380_chip_info, IIO_ADXL380);
+
+const struct adxl380_chip_info adxl382_chip_info = {
+ .name = "adxl382",
+ .chip_id = ADXL382_ID_VAL,
+ .scale_tbl = {
+ [ADXL382_OP_MODE_15G_RANGE] = { 0, 4903325 },
+ [ADXL382_OP_MODE_30G_RANGE] = { 0, 9806650 },
+ [ADXL382_OP_MODE_60G_RANGE] = { 0, 19613300 },
+ },
+ .samp_freq_tbl = { 16000, 32000, 64000 },
+ /*
+ * The datasheet defines an intercept of 570 LSB at 25 degC
+ * and a sensitivity of 10.2 LSB/C.
+ */
+ .temp_offset = 25 * 102 / 10 - 570,
+};
+EXPORT_SYMBOL_NS_GPL(adxl382_chip_info, IIO_ADXL380);
+
+static const unsigned int adxl380_th_reg_high_addr[2] = {
+ [ADXL380_ACTIVITY] = ADXL380_THRESH_ACT_H_REG,
+ [ADXL380_INACTIVITY] = ADXL380_THRESH_INACT_H_REG,
+};
+
+static const unsigned int adxl380_time_reg_high_addr[2] = {
+ [ADXL380_ACTIVITY] = ADXL380_TIME_ACT_H_REG,
+ [ADXL380_INACTIVITY] = ADXL380_TIME_INACT_H_REG,
+};
+
+static const unsigned int adxl380_tap_time_reg[2] = {
+ [ADXL380_TAP_TIME_LATENT] = ADXL380_TAP_LATENT_REG,
+ [ADXL380_TAP_TIME_WINDOW] = ADXL380_TAP_WINDOW_REG,
+};
+
+struct adxl380_state {
+ struct regmap *regmap;
+ struct device *dev;
+ const struct adxl380_chip_info *chip_info;
+ /*
+ * Synchronize access to members of driver state, and ensure atomicity
+ * of consecutive regmap operations.
+ */
+ struct mutex lock;
+ enum adxl380_axis tap_axis_en;
+ u8 range;
+ u8 odr;
+ u8 fifo_set_size;
+ u8 transf_buf[3];
+ u16 watermark;
+ u32 act_time_ms;
+ u32 act_threshold;
+ u32 inact_time_ms;
+ u32 inact_threshold;
+ u32 tap_latent_us;
+ u32 tap_window_us;
+ u32 tap_duration_us;
+ u32 tap_threshold;
+ int irq;
+ int int_map[2];
+ int lpf_tbl[4];
+ int hpf_tbl[7][2];
+
+ __be16 fifo_buf[ADXL380_FIFO_SAMPLES] __aligned(IIO_DMA_MINALIGN);
+};
+
+bool adxl380_readable_noinc_reg(struct device *dev, unsigned int reg)
+{
+ return reg == ADXL380_FIFO_DATA;
+}
+EXPORT_SYMBOL_NS_GPL(adxl380_readable_noinc_reg, IIO_ADXL380);
+
+static int adxl380_set_measure_en(struct adxl380_state *st, bool en)
+{
+ int ret;
+ unsigned int act_inact_ctl;
+ u8 op_mode = ADXL380_OP_MODE_STANDBY;
+
+ if (en) {
+ ret = regmap_read(st->regmap, ADXL380_ACT_INACT_CTL_REG, &act_inact_ctl);
+ if (ret)
+ return ret;
+
+ /* Activity/ Inactivity detection available only in VLP/ULP mode */
+ if (FIELD_GET(ADXL380_ACT_EN_MSK, act_inact_ctl) ||
+ FIELD_GET(ADXL380_INACT_EN_MSK, act_inact_ctl))
+ op_mode = ADXL380_OP_MODE_VLP;
+ else
+ op_mode = ADXL380_OP_MODE_HP;
+ }
+
+ return regmap_update_bits(st->regmap, ADXL380_OP_MODE_REG,
+ ADXL380_OP_MODE_MSK,
+ FIELD_PREP(ADXL380_OP_MODE_MSK, op_mode));
+}
+
+static void adxl380_scale_act_inact_thresholds(struct adxl380_state *st,
+ u8 old_range,
+ u8 new_range)
+{
+ st->act_threshold = mult_frac(st->act_threshold,
+ adxl380_range_scale_factor_tbl[old_range],
+ adxl380_range_scale_factor_tbl[new_range]);
+ st->inact_threshold = mult_frac(st->inact_threshold,
+ adxl380_range_scale_factor_tbl[old_range],
+ adxl380_range_scale_factor_tbl[new_range]);
+}
+
+static int adxl380_write_act_inact_threshold(struct adxl380_state *st,
+ enum adxl380_activity_type act,
+ unsigned int th)
+{
+ int ret;
+ u8 reg = adxl380_th_reg_high_addr[act];
+
+ if (th > ADXL380_THRESH_MAX)
+ return -EINVAL;
+
+ ret = regmap_write(st->regmap, reg + 1, th & GENMASK(7, 0));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, reg, GENMASK(2, 0), th >> 8);
+ if (ret)
+ return ret;
+
+ if (act == ADXL380_ACTIVITY)
+ st->act_threshold = th;
+ else
+ st->inact_threshold = th;
+
+ return 0;
+}
+
+static int adxl380_set_act_inact_threshold(struct iio_dev *indio_dev,
+ enum adxl380_activity_type act,
+ u16 th)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = adxl380_write_act_inact_threshold(st, act, th);
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_set_tap_threshold_value(struct iio_dev *indio_dev, u8 th)
+{
+ int ret;
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, ADXL380_TAP_THRESH_REG, th);
+ if (ret)
+ return ret;
+
+ st->tap_threshold = th;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int _adxl380_write_tap_time_us(struct adxl380_state *st,
+ enum adxl380_tap_time_type tap_time_type,
+ u32 us)
+{
+ u8 reg = adxl380_tap_time_reg[tap_time_type];
+ unsigned int reg_val;
+ int ret;
+
+ /* scale factor for tap window is 1250us / LSB */
+ reg_val = DIV_ROUND_CLOSEST(us, 1250);
+ if (reg_val > ADXL380_TAP_TIME_MAX)
+ reg_val = ADXL380_TAP_TIME_MAX;
+
+ ret = regmap_write(st->regmap, reg, reg_val);
+ if (ret)
+ return ret;
+
+ if (tap_time_type == ADXL380_TAP_TIME_WINDOW)
+ st->tap_window_us = us;
+ else
+ st->tap_latent_us = us;
+
+ return 0;
+}
+
+static int adxl380_write_tap_time_us(struct adxl380_state *st,
+ enum adxl380_tap_time_type tap_time_type, u32 us)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = _adxl380_write_tap_time_us(st, tap_time_type, us);
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_write_tap_dur_us(struct iio_dev *indio_dev, u32 us)
+{
+ int ret;
+ unsigned int reg_val;
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ /* 625us per code is the scale factor of TAP_DUR register */
+ reg_val = DIV_ROUND_CLOSEST(us, 625);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, ADXL380_TAP_DUR_REG, reg_val);
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_read_chn(struct adxl380_state *st, u8 addr)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_bulk_read(st->regmap, addr, &st->transf_buf, 2);
+ if (ret)
+ return ret;
+
+ return get_unaligned_be16(st->transf_buf);
+}
+
+static int adxl380_get_odr(struct adxl380_state *st, int *odr)
+{
+ int ret;
+ unsigned int trig_cfg, odr_idx;
+
+ ret = regmap_read(st->regmap, ADXL380_TRIG_CFG_REG, &trig_cfg);
+ if (ret)
+ return ret;
+
+ odr_idx = (FIELD_GET(ADXL380_TRIG_CFG_SINC_RATE_MSK, trig_cfg) << 1) |
+ (FIELD_GET(ADXL380_TRIG_CFG_DEC_2X_MSK, trig_cfg) & 1);
+
+ *odr = st->chip_info->samp_freq_tbl[odr_idx];
+
+ return 0;
+}
+
+static const int adxl380_lpf_div[] = {
+ 1, 4, 8, 16,
+};
+
+static int adxl380_fill_lpf_tbl(struct adxl380_state *st)
+{
+ int ret, i;
+ int odr;
+
+ ret = adxl380_get_odr(st, &odr);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(st->lpf_tbl); i++)
+ st->lpf_tbl[i] = DIV_ROUND_CLOSEST(odr, adxl380_lpf_div[i]);
+
+ return 0;
+}
+
+static const int adxl380_hpf_mul[] = {
+ 0, 247000, 62084, 15545, 3862, 954, 238,
+};
+
+static int adxl380_fill_hpf_tbl(struct adxl380_state *st)
+{
+ int i, ret, odr_hz;
+ u32 multiplier;
+ u64 div, rem, odr;
+
+ ret = adxl380_get_odr(st, &odr_hz);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(adxl380_hpf_mul); i++) {
+ odr = mul_u64_u32_shr(odr_hz, MEGA, 0);
+ multiplier = adxl380_hpf_mul[i];
+ div = div64_u64_rem(mul_u64_u32_shr(odr, multiplier, 0),
+ TERA * 100, &rem);
+
+ st->hpf_tbl[i][0] = div;
+ st->hpf_tbl[i][1] = div_u64(rem, MEGA * 100);
+ }
+
+ return 0;
+}
+
+static int adxl380_set_odr(struct adxl380_state *st, u8 odr)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_TRIG_CFG_REG,
+ ADXL380_TRIG_CFG_DEC_2X_MSK,
+ FIELD_PREP(ADXL380_TRIG_CFG_DEC_2X_MSK, odr & 1));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_TRIG_CFG_REG,
+ ADXL380_TRIG_CFG_SINC_RATE_MSK,
+ FIELD_PREP(ADXL380_TRIG_CFG_SINC_RATE_MSK, odr >> 1));
+ if (ret)
+ return ret;
+
+ ret = adxl380_set_measure_en(st, true);
+ if (ret)
+ return ret;
+
+ ret = adxl380_fill_lpf_tbl(st);
+ if (ret)
+ return ret;
+
+ return adxl380_fill_hpf_tbl(st);
+}
+
+static int adxl380_find_match_1d_tbl(const int *array, unsigned int size,
+ int val)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (val == array[i])
+ return i;
+ }
+
+ return size - 1;
+}
+
+static int adxl380_find_match_2d_tbl(const int (*freq_tbl)[2], int n, int val, int val2)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (freq_tbl[i][0] == val && freq_tbl[i][1] == val2)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int adxl380_get_lpf(struct adxl380_state *st, int *lpf)
+{
+ int ret;
+ unsigned int trig_cfg, lpf_idx;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, ADXL380_FILTER_REG, &trig_cfg);
+ if (ret)
+ return ret;
+
+ lpf_idx = FIELD_GET(ADXL380_FILTER_LPF_MODE_MSK, trig_cfg);
+
+ *lpf = st->lpf_tbl[lpf_idx];
+
+ return 0;
+}
+
+static int adxl380_set_lpf(struct adxl380_state *st, u8 lpf)
+{
+ int ret;
+ u8 eq_bypass = 0;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ if (lpf)
+ eq_bypass = 1;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_FILTER_REG,
+ ADXL380_FILTER_EQ_FILT_MSK,
+ FIELD_PREP(ADXL380_FILTER_EQ_FILT_MSK, eq_bypass));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_FILTER_REG,
+ ADXL380_FILTER_LPF_MODE_MSK,
+ FIELD_PREP(ADXL380_FILTER_LPF_MODE_MSK, lpf));
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_get_hpf(struct adxl380_state *st, int *hpf_int, int *hpf_frac)
+{
+ int ret;
+ unsigned int trig_cfg, hpf_idx;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, ADXL380_FILTER_REG, &trig_cfg);
+ if (ret)
+ return ret;
+
+ hpf_idx = FIELD_GET(ADXL380_FILTER_HPF_CORNER_MSK, trig_cfg);
+
+ *hpf_int = st->hpf_tbl[hpf_idx][0];
+ *hpf_frac = st->hpf_tbl[hpf_idx][1];
+
+ return 0;
+}
+
+static int adxl380_set_hpf(struct adxl380_state *st, u8 hpf)
+{
+ int ret;
+ u8 hpf_path = 0;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ if (hpf)
+ hpf_path = 1;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_FILTER_REG,
+ ADXL380_FILTER_HPF_PATH_MSK,
+ FIELD_PREP(ADXL380_FILTER_HPF_PATH_MSK, hpf_path));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_FILTER_REG,
+ ADXL380_FILTER_HPF_CORNER_MSK,
+ FIELD_PREP(ADXL380_FILTER_HPF_CORNER_MSK, hpf));
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int _adxl380_set_act_inact_time_ms(struct adxl380_state *st,
+ enum adxl380_activity_type act,
+ u32 ms)
+{
+ u8 reg = adxl380_time_reg_high_addr[act];
+ unsigned int reg_val;
+ int ret;
+
+ /* 500us per code is the scale factor of TIME_ACT / TIME_INACT registers */
+ reg_val = min(DIV_ROUND_CLOSEST(ms * 1000, 500), ADXL380_TIME_MAX);
+
+ put_unaligned_be24(reg_val, &st->transf_buf[0]);
+
+ ret = regmap_bulk_write(st->regmap, reg, st->transf_buf, sizeof(st->transf_buf));
+ if (ret)
+ return ret;
+
+ if (act == ADXL380_ACTIVITY)
+ st->act_time_ms = ms;
+ else
+ st->inact_time_ms = ms;
+
+ return 0;
+}
+
+static int adxl380_set_act_inact_time_ms(struct adxl380_state *st,
+ enum adxl380_activity_type act,
+ u32 ms)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = _adxl380_set_act_inact_time_ms(st, act, ms);
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_set_range(struct adxl380_state *st, u8 range)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_OP_MODE_REG,
+ ADXL380_OP_MODE_RANGE_MSK,
+ FIELD_PREP(ADXL380_OP_MODE_RANGE_MSK, range));
+
+ if (ret)
+ return ret;
+
+ adxl380_scale_act_inact_thresholds(st, st->range, range);
+
+ /* Activity thresholds depend on range */
+ ret = adxl380_write_act_inact_threshold(st, ADXL380_ACTIVITY,
+ st->act_threshold);
+ if (ret)
+ return ret;
+
+ ret = adxl380_write_act_inact_threshold(st, ADXL380_INACTIVITY,
+ st->inact_threshold);
+ if (ret)
+ return ret;
+
+ st->range = range;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_write_act_inact_en(struct adxl380_state *st,
+ enum adxl380_activity_type type,
+ bool en)
+{
+ if (type == ADXL380_ACTIVITY)
+ return regmap_update_bits(st->regmap, ADXL380_ACT_INACT_CTL_REG,
+ ADXL380_ACT_EN_MSK,
+ FIELD_PREP(ADXL380_ACT_EN_MSK, en));
+
+ return regmap_update_bits(st->regmap, ADXL380_ACT_INACT_CTL_REG,
+ ADXL380_INACT_EN_MSK,
+ FIELD_PREP(ADXL380_INACT_EN_MSK, en));
+}
+
+static int adxl380_read_act_inact_int(struct adxl380_state *st,
+ enum adxl380_activity_type type,
+ bool *en)
+{
+ int ret;
+ unsigned int reg_val;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, st->int_map[0], &reg_val);
+ if (ret)
+ return ret;
+
+ if (type == ADXL380_ACTIVITY)
+ *en = FIELD_GET(ADXL380_INT_MAP0_ACT_INT0_MSK, reg_val);
+ else
+ *en = FIELD_GET(ADXL380_INT_MAP0_INACT_INT0_MSK, reg_val);
+
+ return 0;
+}
+
+static int adxl380_write_act_inact_int(struct adxl380_state *st,
+ enum adxl380_activity_type act,
+ bool en)
+{
+ if (act == ADXL380_ACTIVITY)
+ return regmap_update_bits(st->regmap, st->int_map[0],
+ ADXL380_INT_MAP0_ACT_INT0_MSK,
+ FIELD_PREP(ADXL380_INT_MAP0_ACT_INT0_MSK, en));
+
+ return regmap_update_bits(st->regmap, st->int_map[0],
+ ADXL380_INT_MAP0_INACT_INT0_MSK,
+ FIELD_PREP(ADXL380_INT_MAP0_INACT_INT0_MSK, en));
+}
+
+static int adxl380_act_inact_config(struct adxl380_state *st,
+ enum adxl380_activity_type type,
+ bool en)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = adxl380_write_act_inact_en(st, type, en);
+ if (ret)
+ return ret;
+
+ ret = adxl380_write_act_inact_int(st, type, en);
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_write_tap_axis(struct adxl380_state *st,
+ enum adxl380_axis axis)
+{
+ int ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_TAP_CFG_REG,
+ ADXL380_TAP_AXIS_MSK,
+ FIELD_PREP(ADXL380_TAP_AXIS_MSK, axis));
+
+ if (ret)
+ return ret;
+
+ st->tap_axis_en = axis;
+
+ return 0;
+}
+
+static int adxl380_read_tap_int(struct adxl380_state *st, enum adxl380_tap_type type, bool *en)
+{
+ int ret;
+ unsigned int reg_val;
+
+ ret = regmap_read(st->regmap, st->int_map[1], &reg_val);
+ if (ret)
+ return ret;
+
+ if (type == ADXL380_SINGLE_TAP)
+ *en = FIELD_GET(ADXL380_INT_MAP1_SINGLE_TAP_INT0_MSK, reg_val);
+ else
+ *en = FIELD_GET(ADXL380_INT_MAP1_DOUBLE_TAP_INT0_MSK, reg_val);
+
+ return 0;
+}
+
+static int adxl380_write_tap_int(struct adxl380_state *st, enum adxl380_tap_type type, bool en)
+{
+ if (type == ADXL380_SINGLE_TAP)
+ return regmap_update_bits(st->regmap, st->int_map[1],
+ ADXL380_INT_MAP1_SINGLE_TAP_INT0_MSK,
+ FIELD_PREP(ADXL380_INT_MAP1_SINGLE_TAP_INT0_MSK, en));
+
+ return regmap_update_bits(st->regmap, st->int_map[1],
+ ADXL380_INT_MAP1_DOUBLE_TAP_INT0_MSK,
+ FIELD_PREP(ADXL380_INT_MAP1_DOUBLE_TAP_INT0_MSK, en));
+}
+
+static int adxl380_tap_config(struct adxl380_state *st,
+ enum adxl380_axis axis,
+ enum adxl380_tap_type type,
+ bool en)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = adxl380_write_tap_axis(st, axis);
+ if (ret)
+ return ret;
+
+ ret = adxl380_write_tap_int(st, type, en);
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_set_fifo_samples(struct adxl380_state *st)
+{
+ int ret;
+ u16 fifo_samples = st->watermark * st->fifo_set_size;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_FIFO_CONFIG_0_REG,
+ ADXL380_FIFO_SAMPLES_8_MSK,
+ FIELD_PREP(ADXL380_FIFO_SAMPLES_8_MSK,
+ (fifo_samples & BIT(8))));
+ if (ret)
+ return ret;
+
+ return regmap_write(st->regmap, ADXL380_FIFO_CONFIG_1_REG,
+ fifo_samples & 0xFF);
+}
+
+static int adxl380_get_status(struct adxl380_state *st, u8 *status0, u8 *status1)
+{
+ int ret;
+
+ /* STATUS0, STATUS1 are adjacent regs */
+ ret = regmap_bulk_read(st->regmap, ADXL380_STATUS_0_REG,
+ &st->transf_buf, 2);
+ if (ret)
+ return ret;
+
+ *status0 = st->transf_buf[0];
+ *status1 = st->transf_buf[1];
+
+ return 0;
+}
+
+static int adxl380_get_fifo_entries(struct adxl380_state *st, u16 *fifo_entries)
+{
+ int ret;
+
+ ret = regmap_bulk_read(st->regmap, ADXL380_FIFO_STATUS_0_REG,
+ &st->transf_buf, 2);
+ if (ret)
+ return ret;
+
+ *fifo_entries = st->transf_buf[0] | ((BIT(0) & st->transf_buf[1]) << 8);
+
+ return 0;
+}
+
+static void adxl380_push_event(struct iio_dev *indio_dev, s64 timestamp,
+ u8 status1)
+{
+ if (FIELD_GET(ADXL380_STATUS_1_ACT_MSK, status1))
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ timestamp);
+
+ if (FIELD_GET(ADXL380_STATUS_1_INACT_MSK, status1))
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING),
+ timestamp);
+ if (FIELD_GET(ADXL380_STATUS_1_SINGLE_TAP_MSK, status1))
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_GESTURE, IIO_EV_DIR_SINGLETAP),
+ timestamp);
+
+ if (FIELD_GET(ADXL380_STATUS_1_DOUBLE_TAP_MSK, status1))
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_GESTURE, IIO_EV_DIR_DOUBLETAP),
+ timestamp);
+}
+
+static irqreturn_t adxl380_irq_handler(int irq, void *p)
+{
+ struct iio_dev *indio_dev = p;
+ struct adxl380_state *st = iio_priv(indio_dev);
+ u8 status0, status1;
+ u16 fifo_entries;
+ int i;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_get_status(st, &status0, &status1);
+ if (ret)
+ return IRQ_HANDLED;
+
+ adxl380_push_event(indio_dev, iio_get_time_ns(indio_dev), status1);
+
+ if (!FIELD_GET(ADXL380_STATUS_0_FIFO_WM_MSK, status0))
+ return IRQ_HANDLED;
+
+ ret = adxl380_get_fifo_entries(st, &fifo_entries);
+ if (ret)
+ return IRQ_HANDLED;
+
+ for (i = 0; i < fifo_entries; i += st->fifo_set_size) {
+ ret = regmap_noinc_read(st->regmap, ADXL380_FIFO_DATA,
+ &st->fifo_buf[i],
+ 2 * st->fifo_set_size);
+ if (ret)
+ return IRQ_HANDLED;
+ iio_push_to_buffers(indio_dev, &st->fifo_buf[i]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int adxl380_write_calibbias_value(struct adxl380_state *st,
+ unsigned long chan_addr,
+ s8 calibbias)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, ADXL380_X_DSM_OFFSET_REG + chan_addr, calibbias);
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_read_calibbias_value(struct adxl380_state *st,
+ unsigned long chan_addr,
+ int *calibbias)
+{
+ int ret;
+ unsigned int reg_val;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, ADXL380_X_DSM_OFFSET_REG + chan_addr, &reg_val);
+ if (ret)
+ return ret;
+
+ *calibbias = sign_extend32(reg_val, 7);
+
+ return 0;
+}
+
+static ssize_t hwfifo_watermark_min_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "1\n");
+}
+
+static ssize_t hwfifo_watermark_max_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%lu\n", ADXL380_FIFO_SAMPLES);
+}
+
+static ssize_t adxl380_get_fifo_watermark(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ return sysfs_emit(buf, "%d\n", st->watermark);
+}
+
+static ssize_t adxl380_get_fifo_enabled(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adxl380_state *st = iio_priv(indio_dev);
+ int ret;
+ unsigned int reg_val;
+
+ ret = regmap_read(st->regmap, ADXL380_DIG_EN_REG, &reg_val);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%lu\n",
+ FIELD_GET(ADXL380_FIFO_EN_MSK, reg_val));
+}
+
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0);
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0);
+static IIO_DEVICE_ATTR(hwfifo_watermark, 0444,
+ adxl380_get_fifo_watermark, NULL, 0);
+static IIO_DEVICE_ATTR(hwfifo_enabled, 0444,
+ adxl380_get_fifo_enabled, NULL, 0);
+
+static const struct iio_dev_attr *adxl380_fifo_attributes[] = {
+ &iio_dev_attr_hwfifo_watermark_min,
+ &iio_dev_attr_hwfifo_watermark_max,
+ &iio_dev_attr_hwfifo_watermark,
+ &iio_dev_attr_hwfifo_enabled,
+ NULL
+};
+
+static int adxl380_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ int i;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap,
+ st->int_map[0],
+ ADXL380_INT_MAP0_FIFO_WM_INT0_MSK,
+ FIELD_PREP(ADXL380_INT_MAP0_FIFO_WM_INT0_MSK, 1));
+ if (ret)
+ return ret;
+
+ for_each_clear_bit(i, indio_dev->active_scan_mask, ADXL380_CH_NUM) {
+ ret = regmap_update_bits(st->regmap, ADXL380_DIG_EN_REG,
+ ADXL380_CHAN_EN_MSK(i),
+ 0 << (4 + i));
+ if (ret)
+ return ret;
+ }
+
+ st->fifo_set_size = bitmap_weight(indio_dev->active_scan_mask,
+ iio_get_masklength(indio_dev));
+
+ if ((st->watermark * st->fifo_set_size) > ADXL380_FIFO_SAMPLES)
+ st->watermark = (ADXL380_FIFO_SAMPLES / st->fifo_set_size);
+
+ ret = adxl380_set_fifo_samples(st);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_DIG_EN_REG, ADXL380_FIFO_EN_MSK,
+ FIELD_PREP(ADXL380_FIFO_EN_MSK, 1));
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ int ret, i;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap,
+ st->int_map[0],
+ ADXL380_INT_MAP0_FIFO_WM_INT0_MSK,
+ FIELD_PREP(ADXL380_INT_MAP0_FIFO_WM_INT0_MSK, 0));
+ if (ret)
+ return ret;
+
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ ret = regmap_update_bits(st->regmap, ADXL380_DIG_EN_REG,
+ ADXL380_CHAN_EN_MSK(i),
+ 1 << (4 + i));
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_update_bits(st->regmap, ADXL380_DIG_EN_REG, ADXL380_FIFO_EN_MSK,
+ FIELD_PREP(ADXL380_FIFO_EN_MSK, 0));
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static const struct iio_buffer_setup_ops adxl380_buffer_ops = {
+ .postenable = adxl380_buffer_postenable,
+ .predisable = adxl380_buffer_predisable,
+};
+
+static int adxl380_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = adxl380_read_chn(st, chan->address);
+ iio_device_release_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(ret >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ scoped_guard(mutex, &st->lock) {
+ *val = st->chip_info->scale_tbl[st->range][0];
+ *val2 = st->chip_info->scale_tbl[st->range][1];
+ }
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_TEMP:
+ /* 10.2 LSB / Degree Celsius */
+ *val = 10000;
+ *val2 = 102;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = st->chip_info->temp_offset;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ ret = adxl380_read_calibbias_value(st, chan->scan_index, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = adxl380_get_odr(st, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ ret = adxl380_get_lpf(st, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ ret = adxl380_get_hpf(st, val, val2);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return -EINVAL;
+}
+
+static int adxl380_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (const int *)st->chip_info->scale_tbl;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ *length = ARRAY_SIZE(st->chip_info->scale_tbl) * 2;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = (const int *)st->chip_info->samp_freq_tbl;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(st->chip_info->samp_freq_tbl);
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *vals = (const int *)st->lpf_tbl;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(st->lpf_tbl);
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ *vals = (const int *)st->hpf_tbl;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* Values are stored in a 2D matrix */
+ *length = ARRAY_SIZE(st->hpf_tbl) * 2;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl380_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ int odr_index, lpf_index, hpf_index, range_index;
+
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ odr_index = adxl380_find_match_1d_tbl(st->chip_info->samp_freq_tbl,
+ ARRAY_SIZE(st->chip_info->samp_freq_tbl),
+ val);
+ return adxl380_set_odr(st, odr_index);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return adxl380_write_calibbias_value(st, chan->scan_index, val);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ lpf_index = adxl380_find_match_1d_tbl(st->lpf_tbl,
+ ARRAY_SIZE(st->lpf_tbl),
+ val);
+ return adxl380_set_lpf(st, lpf_index);
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ hpf_index = adxl380_find_match_2d_tbl(st->hpf_tbl,
+ ARRAY_SIZE(st->hpf_tbl),
+ val, val2);
+ if (hpf_index < 0)
+ return hpf_index;
+ return adxl380_set_hpf(st, hpf_index);
+ case IIO_CHAN_INFO_SCALE:
+ range_index = adxl380_find_match_2d_tbl(st->chip_info->scale_tbl,
+ ARRAY_SIZE(st->chip_info->scale_tbl),
+ val, val2);
+ if (range_index < 0)
+ return range_index;
+ return adxl380_set_range(st, range_index);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl380_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+}
+
+static int adxl380_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ int ret;
+ bool int_en;
+ bool tap_axis_en = false;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ tap_axis_en = st->tap_axis_en == ADXL380_X_AXIS;
+ break;
+ case IIO_MOD_Y:
+ tap_axis_en = st->tap_axis_en == ADXL380_Y_AXIS;
+ break;
+ case IIO_MOD_Z:
+ tap_axis_en = st->tap_axis_en == ADXL380_Z_AXIS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = adxl380_read_act_inact_int(st, ADXL380_ACTIVITY, &int_en);
+ if (ret)
+ return ret;
+ return int_en;
+ case IIO_EV_DIR_FALLING:
+ ret = adxl380_read_act_inact_int(st, ADXL380_INACTIVITY, &int_en);
+ if (ret)
+ return ret;
+ return int_en;
+ case IIO_EV_DIR_SINGLETAP:
+ ret = adxl380_read_tap_int(st, ADXL380_SINGLE_TAP, &int_en);
+ if (ret)
+ return ret;
+ return int_en && tap_axis_en;
+ case IIO_EV_DIR_DOUBLETAP:
+ ret = adxl380_read_tap_int(st, ADXL380_DOUBLE_TAP, &int_en);
+ if (ret)
+ return ret;
+ return int_en && tap_axis_en;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl380_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ enum adxl380_axis axis;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ axis = ADXL380_X_AXIS;
+ break;
+ case IIO_MOD_Y:
+ axis = ADXL380_Y_AXIS;
+ break;
+ case IIO_MOD_Z:
+ axis = ADXL380_Z_AXIS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return adxl380_act_inact_config(st, ADXL380_ACTIVITY, state);
+ case IIO_EV_DIR_FALLING:
+ return adxl380_act_inact_config(st, ADXL380_INACTIVITY, state);
+ case IIO_EV_DIR_SINGLETAP:
+ return adxl380_tap_config(st, axis, ADXL380_SINGLE_TAP, state);
+ case IIO_EV_DIR_DOUBLETAP:
+ return adxl380_tap_config(st, axis, ADXL380_DOUBLE_TAP, state);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl380_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ guard(mutex)(&st->lock);
+
+ switch (type) {
+ case IIO_EV_TYPE_THRESH:
+ switch (info) {
+ case IIO_EV_INFO_VALUE: {
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ *val = st->act_threshold;
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_FALLING:
+ *val = st->inact_threshold;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ }
+ case IIO_EV_INFO_PERIOD:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ *val = st->act_time_ms;
+ *val2 = 1000;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_EV_DIR_FALLING:
+ *val = st->inact_time_ms;
+ *val2 = 1000;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_TYPE_GESTURE:
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = st->tap_threshold;
+ return IIO_VAL_INT;
+ case IIO_EV_INFO_RESET_TIMEOUT:
+ *val = st->tap_window_us;
+ *val2 = 1000000;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_EV_INFO_TAP2_MIN_DELAY:
+ *val = st->tap_latent_us;
+ *val2 = 1000000;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl380_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type, enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ u32 val_ms, val_us;
+
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ switch (type) {
+ case IIO_EV_TYPE_THRESH:
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return adxl380_set_act_inact_threshold(indio_dev,
+ ADXL380_ACTIVITY, val);
+ case IIO_EV_DIR_FALLING:
+ return adxl380_set_act_inact_threshold(indio_dev,
+ ADXL380_INACTIVITY, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_PERIOD:
+ val_ms = val * 1000 + DIV_ROUND_UP(val2, 1000);
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return adxl380_set_act_inact_time_ms(st,
+ ADXL380_ACTIVITY, val_ms);
+ case IIO_EV_DIR_FALLING:
+ return adxl380_set_act_inact_time_ms(st,
+ ADXL380_INACTIVITY, val_ms);
+ default:
+ return -EINVAL;
+ }
+
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_TYPE_GESTURE:
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ return adxl380_set_tap_threshold_value(indio_dev, val);
+ case IIO_EV_INFO_RESET_TIMEOUT:
+ val_us = val * 1000000 + val2;
+ return adxl380_write_tap_time_us(st,
+ ADXL380_TAP_TIME_WINDOW,
+ val_us);
+ case IIO_EV_INFO_TAP2_MIN_DELAY:
+ val_us = val * 1000000 + val2;
+ return adxl380_write_tap_time_us(st,
+ ADXL380_TAP_TIME_LATENT,
+ val_us);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t in_accel_gesture_tap_maxtomin_time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int vals[2];
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ guard(mutex)(&st->lock);
+
+ vals[0] = st->tap_duration_us;
+ vals[1] = MICRO;
+
+ return iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, vals);
+}
+
+static ssize_t in_accel_gesture_tap_maxtomin_time_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adxl380_state *st = iio_priv(indio_dev);
+ int ret, val_int, val_fract_us;
+
+ guard(mutex)(&st->lock);
+
+ ret = iio_str_to_fixpoint(buf, 100000, &val_int, &val_fract_us);
+ if (ret)
+ return ret;
+
+ /* maximum value is 255 * 625 us = 0.159375 seconds */
+ if (val_int || val_fract_us > 159375 || val_fract_us < 0)
+ return -EINVAL;
+
+ ret = adxl380_write_tap_dur_us(indio_dev, val_fract_us);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR_RW(in_accel_gesture_tap_maxtomin_time, 0);
+
+static struct attribute *adxl380_event_attributes[] = {
+ &iio_dev_attr_in_accel_gesture_tap_maxtomin_time.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adxl380_event_attribute_group = {
+ .attrs = adxl380_event_attributes,
+};
+
+static int adxl380_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+
+ return regmap_write(st->regmap, reg, writeval);
+}
+
+static int adxl380_set_watermark(struct iio_dev *indio_dev, unsigned int val)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ st->watermark = min(val, ADXL380_FIFO_SAMPLES);
+
+ return 0;
+}
+
+static const struct iio_info adxl380_info = {
+ .read_raw = adxl380_read_raw,
+ .read_avail = &adxl380_read_avail,
+ .write_raw = adxl380_write_raw,
+ .write_raw_get_fmt = adxl380_write_raw_get_fmt,
+ .read_event_config = adxl380_read_event_config,
+ .write_event_config = adxl380_write_event_config,
+ .read_event_value = adxl380_read_event_value,
+ .write_event_value = adxl380_write_event_value,
+ .event_attrs = &adxl380_event_attribute_group,
+ .debugfs_reg_access = &adxl380_reg_access,
+ .hwfifo_set_watermark = adxl380_set_watermark,
+};
+
+static const struct iio_event_spec adxl380_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD),
+ },
+ {
+ .type = IIO_EV_TYPE_GESTURE,
+ .dir = IIO_EV_DIR_SINGLETAP,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_RESET_TIMEOUT),
+ },
+ {
+ .type = IIO_EV_TYPE_GESTURE,
+ .dir = IIO_EV_DIR_DOUBLETAP,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_RESET_TIMEOUT) |
+ BIT(IIO_EV_INFO_TAP2_MIN_DELAY),
+ },
+};
+
+#define ADXL380_ACCEL_CHANNEL(index, reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_type = \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \
+ BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \
+ BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ .event_spec = adxl380_events, \
+ .num_event_specs = ARRAY_SIZE(adxl380_events) \
+}
+
+static const struct iio_chan_spec adxl380_channels[] = {
+ ADXL380_ACCEL_CHANNEL(0, ADXL380_X_DATA_H_REG, X),
+ ADXL380_ACCEL_CHANNEL(1, ADXL380_Y_DATA_H_REG, Y),
+ ADXL380_ACCEL_CHANNEL(2, ADXL380_Z_DATA_H_REG, Z),
+ {
+ .type = IIO_TEMP,
+ .address = ADXL380_T_DATA_H_REG,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = 3,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 12,
+ .storagebits = 16,
+ .shift = 4,
+ .endianness = IIO_BE,
+ },
+ },
+};
+
+static int adxl380_config_irq(struct iio_dev *indio_dev)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ unsigned long irq_flag;
+ struct irq_data *desc;
+ u32 irq_type;
+ u8 polarity;
+ int ret;
+
+ st->irq = fwnode_irq_get_byname(dev_fwnode(st->dev), "INT0");
+ if (st->irq > 0) {
+ st->int_map[0] = ADXL380_INT0_MAP0_REG;
+ st->int_map[1] = ADXL380_INT0_MAP1_REG;
+ } else {
+ st->irq = fwnode_irq_get_byname(dev_fwnode(st->dev), "INT1");
+ if (st->irq > 0)
+ return dev_err_probe(st->dev, -ENODEV,
+ "no interrupt name specified");
+ st->int_map[0] = ADXL380_INT1_MAP0_REG;
+ st->int_map[1] = ADXL380_INT1_MAP1_REG;
+ }
+
+ desc = irq_get_irq_data(st->irq);
+ if (!desc)
+ return dev_err_probe(st->dev, -EINVAL, "Could not find IRQ %d\n", st->irq);
+
+ irq_type = irqd_get_trigger_type(desc);
+ if (irq_type == IRQ_TYPE_LEVEL_HIGH) {
+ polarity = 0;
+ irq_flag = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+ } else if (irq_type == IRQ_TYPE_LEVEL_LOW) {
+ polarity = 1;
+ irq_flag = IRQF_TRIGGER_LOW | IRQF_ONESHOT;
+ } else {
+ return dev_err_probe(st->dev, -EINVAL,
+ "Invalid interrupt 0x%x. Only level interrupts supported\n",
+ irq_type);
+ }
+
+ ret = regmap_update_bits(st->regmap, ADXL380_INT0_REG,
+ ADXL380_INT0_POL_MSK,
+ FIELD_PREP(ADXL380_INT0_POL_MSK, polarity));
+ if (ret)
+ return ret;
+
+ return devm_request_threaded_irq(st->dev, st->irq, NULL,
+ adxl380_irq_handler, irq_flag,
+ indio_dev->name, indio_dev);
+}
+
+static int adxl380_setup(struct iio_dev *indio_dev)
+{
+ unsigned int reg_val;
+ u16 part_id, chip_id;
+ int ret, i;
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ ret = regmap_read(st->regmap, ADXL380_DEVID_AD_REG, &reg_val);
+ if (ret)
+ return ret;
+
+ if (reg_val != ADXL380_DEVID_AD_VAL)
+ dev_warn(st->dev, "Unknown chip id %x\n", reg_val);
+
+ ret = regmap_bulk_read(st->regmap, ADLX380_PART_ID_REG,
+ &st->transf_buf, 2);
+ if (ret)
+ return ret;
+
+ part_id = get_unaligned_be16(st->transf_buf);
+ part_id >>= 4;
+
+ if (part_id != ADXL380_ID_VAL)
+ dev_warn(st->dev, "Unknown part id %x\n", part_id);
+
+ ret = regmap_read(st->regmap, ADXL380_MISC_0_REG, &reg_val);
+ if (ret)
+ return ret;
+
+ /* Bit to differentiate between ADXL380/382. */
+ if (reg_val & ADXL380_XL382_MSK)
+ chip_id = ADXL382_ID_VAL;
+ else
+ chip_id = ADXL380_ID_VAL;
+
+ if (chip_id != st->chip_info->chip_id)
+ dev_warn(st->dev, "Unknown chip id %x\n", chip_id);
+
+ ret = regmap_write(st->regmap, ADXL380_RESET_REG, ADXL380_RESET_CODE);
+ if (ret)
+ return ret;
+
+ /*
+ * A latency of approximately 0.5 ms is required after soft reset.
+ * Stated in the register REG_RESET description.
+ */
+ fsleep(500);
+
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ ret = regmap_update_bits(st->regmap, ADXL380_DIG_EN_REG,
+ ADXL380_CHAN_EN_MSK(i),
+ 1 << (4 + i));
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_update_bits(st->regmap, ADXL380_FIFO_CONFIG_0_REG,
+ ADXL380_FIFO_MODE_MSK,
+ FIELD_PREP(ADXL380_FIFO_MODE_MSK, ADXL380_FIFO_STREAMED));
+ if (ret)
+ return ret;
+
+ /* Select all 3 axis for act/inact detection. */
+ ret = regmap_update_bits(st->regmap, ADXL380_SNSR_AXIS_EN_REG,
+ ADXL380_ACT_INACT_AXIS_EN_MSK,
+ FIELD_PREP(ADXL380_ACT_INACT_AXIS_EN_MSK,
+ ADXL380_ACT_INACT_AXIS_EN_MSK));
+ if (ret)
+ return ret;
+
+ ret = adxl380_config_irq(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = adxl380_fill_lpf_tbl(st);
+ if (ret)
+ return ret;
+
+ ret = adxl380_fill_hpf_tbl(st);
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+int adxl380_probe(struct device *dev, struct regmap *regmap,
+ const struct adxl380_chip_info *chip_info)
+{
+ struct iio_dev *indio_dev;
+ struct adxl380_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ st->dev = dev;
+ st->regmap = regmap;
+ st->chip_info = chip_info;
+
+ mutex_init(&st->lock);
+
+ indio_dev->channels = adxl380_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adxl380_channels);
+ indio_dev->name = chip_info->name;
+ indio_dev->info = &adxl380_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = devm_regulator_get_enable(dev, "vddio");
+ if (ret)
+ return dev_err_probe(st->dev, ret,
+ "Failed to get vddio regulator\n");
+
+ ret = devm_regulator_get_enable(st->dev, "vsupply");
+ if (ret)
+ return dev_err_probe(st->dev, ret,
+ "Failed to get vsupply regulator\n");
+
+ ret = adxl380_setup(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_kfifo_buffer_setup_ext(st->dev, indio_dev,
+ &adxl380_buffer_ops,
+ adxl380_fifo_attributes);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+EXPORT_SYMBOL_NS_GPL(adxl380_probe, IIO_ADXL380);
+
+MODULE_AUTHOR("Ramona Gradinariu <ramona.gradinariu@analog.com>");
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADXL380 3-axis accelerometer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/adxl380.h b/drivers/iio/accel/adxl380.h
new file mode 100644
index 000000000000..a683625d897a
--- /dev/null
+++ b/drivers/iio/accel/adxl380.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ADXL380 3-Axis Digital Accelerometer
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+
+#ifndef _ADXL380_H_
+#define _ADXL380_H_
+
+struct adxl380_chip_info {
+ const char *name;
+ const int scale_tbl[3][2];
+ const int samp_freq_tbl[3];
+ const int temp_offset;
+ const u16 chip_id;
+};
+
+extern const struct adxl380_chip_info adxl380_chip_info;
+extern const struct adxl380_chip_info adxl382_chip_info;
+
+int adxl380_probe(struct device *dev, struct regmap *regmap,
+ const struct adxl380_chip_info *chip_info);
+bool adxl380_readable_noinc_reg(struct device *dev, unsigned int reg);
+
+#endif /* _ADXL380_H_ */
diff --git a/drivers/iio/accel/adxl380_i2c.c b/drivers/iio/accel/adxl380_i2c.c
new file mode 100644
index 000000000000..1dc1e77be815
--- /dev/null
+++ b/drivers/iio/accel/adxl380_i2c.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ADXL380 3-Axis Digital Accelerometer I2C driver
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "adxl380.h"
+
+static const struct regmap_config adxl380_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .readable_noinc_reg = adxl380_readable_noinc_reg,
+};
+
+static int adxl380_i2c_probe(struct i2c_client *client)
+{
+ struct regmap *regmap;
+ const struct adxl380_chip_info *chip_data;
+
+ chip_data = i2c_get_match_data(client);
+
+ regmap = devm_regmap_init_i2c(client, &adxl380_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return adxl380_probe(&client->dev, regmap, chip_data);
+}
+
+static const struct i2c_device_id adxl380_i2c_id[] = {
+ { "adxl380", (kernel_ulong_t)&adxl380_chip_info },
+ { "adxl382", (kernel_ulong_t)&adxl382_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adxl380_i2c_id);
+
+static const struct of_device_id adxl380_of_match[] = {
+ { .compatible = "adi,adxl380", .data = &adxl380_chip_info },
+ { .compatible = "adi,adxl382", .data = &adxl382_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adxl380_of_match);
+
+static struct i2c_driver adxl380_i2c_driver = {
+ .driver = {
+ .name = "adxl380_i2c",
+ .of_match_table = adxl380_of_match,
+ },
+ .probe = adxl380_i2c_probe,
+ .id_table = adxl380_i2c_id,
+};
+
+module_i2c_driver(adxl380_i2c_driver);
+
+MODULE_AUTHOR("Ramona Gradinariu <ramona.gradinariu@analog.com>");
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADXL380 3-axis accelerometer I2C driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_ADXL380);
diff --git a/drivers/iio/accel/adxl380_spi.c b/drivers/iio/accel/adxl380_spi.c
new file mode 100644
index 000000000000..e7b5778cb6cf
--- /dev/null
+++ b/drivers/iio/accel/adxl380_spi.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ADXL380 3-Axis Digital Accelerometer SPI driver
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "adxl380.h"
+
+static const struct regmap_config adxl380_spi_regmap_config = {
+ .reg_bits = 7,
+ .pad_bits = 1,
+ .val_bits = 8,
+ .read_flag_mask = BIT(0),
+ .readable_noinc_reg = adxl380_readable_noinc_reg,
+};
+
+static int adxl380_spi_probe(struct spi_device *spi)
+{
+ const struct adxl380_chip_info *chip_data;
+ struct regmap *regmap;
+
+ chip_data = spi_get_device_match_data(spi);
+
+ regmap = devm_regmap_init_spi(spi, &adxl380_spi_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return adxl380_probe(&spi->dev, regmap, chip_data);
+}
+
+static const struct spi_device_id adxl380_spi_id[] = {
+ { "adxl380", (kernel_ulong_t)&adxl380_chip_info },
+ { "adxl382", (kernel_ulong_t)&adxl382_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adxl380_spi_id);
+
+static const struct of_device_id adxl380_of_match[] = {
+ { .compatible = "adi,adxl380", .data = &adxl380_chip_info },
+ { .compatible = "adi,adxl382", .data = &adxl382_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adxl380_of_match);
+
+static struct spi_driver adxl380_spi_driver = {
+ .driver = {
+ .name = "adxl380_spi",
+ .of_match_table = adxl380_of_match,
+ },
+ .probe = adxl380_spi_probe,
+ .id_table = adxl380_spi_id,
+};
+
+module_spi_driver(adxl380_spi_driver);
+
+MODULE_AUTHOR("Ramona Gradinariu <ramona.gradinariu@analog.com>");
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADXL380 3-axis accelerometer SPI driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_ADXL380);
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 6581772cb0c4..2445a0f7bc2b 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -876,8 +876,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = bma180_get_data_reg(data, bit);
if (ret < 0) {
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
index e90e2f01550a..89db242f06e0 100644
--- a/drivers/iio/accel/bma400_core.c
+++ b/drivers/iio/accel/bma400_core.c
@@ -13,6 +13,7 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -795,21 +796,19 @@ static int bma400_enable_steps(struct bma400_data *data, int val)
static int bma400_get_steps_reg(struct bma400_data *data, int *val)
{
- u8 *steps_raw;
int ret;
- steps_raw = kmalloc(BMA400_STEP_RAW_LEN, GFP_KERNEL);
+ u8 *steps_raw __free(kfree) = kmalloc(BMA400_STEP_RAW_LEN, GFP_KERNEL);
if (!steps_raw)
return -ENOMEM;
ret = regmap_bulk_read(data->regmap, BMA400_STEP_CNT0_REG,
steps_raw, BMA400_STEP_RAW_LEN);
- if (ret) {
- kfree(steps_raw);
+ if (ret)
return ret;
- }
+
*val = get_unaligned_le24(steps_raw);
- kfree(steps_raw);
+
return IIO_VAL_INT;
}
diff --git a/drivers/iio/accel/bma400_spi.c b/drivers/iio/accel/bma400_spi.c
index ec13c044b304..765d8c4a4c4d 100644
--- a/drivers/iio/accel/bma400_spi.c
+++ b/drivers/iio/accel/bma400_spi.c
@@ -53,7 +53,7 @@ static int bma400_regmap_spi_write(void *context, const void *data,
return spi_write(spi, data, count);
}
-static struct regmap_bus bma400_regmap_bus = {
+static const struct regmap_bus bma400_regmap_bus = {
.read = bma400_regmap_spi_read,
.write = bma400_regmap_spi_write,
.read_flag_mask = BIT(7),
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index ae0cd48a3e29..0f32c1e92b4d 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -10,9 +10,9 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/acpi.h>
-#include <linux/of_irq.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
@@ -387,7 +387,7 @@ static bool bmc150_apply_bosc0200_acpi_orientation(struct device *dev,
struct iio_mount_matrix *orientation)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct acpi_device *adev = ACPI_COMPANION(dev);
+ acpi_handle handle = ACPI_HANDLE(dev);
char *name, *alt_name, *label;
if (strcmp(dev_name(dev), "i2c-BOSC0200:base") == 0) {
@@ -398,9 +398,9 @@ static bool bmc150_apply_bosc0200_acpi_orientation(struct device *dev,
label = "accel-display";
}
- if (acpi_has_method(adev->handle, "ROTM")) {
+ if (acpi_has_method(handle, "ROTM")) {
name = "ROTM";
- } else if (acpi_has_method(adev->handle, alt_name)) {
+ } else if (acpi_has_method(handle, alt_name)) {
name = alt_name;
indio_dev->label = label;
} else {
@@ -514,7 +514,7 @@ static void bmc150_accel_interrupts_setup(struct iio_dev *indio_dev,
*/
irq_info = bmc150_accel_interrupts_int1;
if (data->type == BOSCH_BMC156 ||
- irq == of_irq_get_byname(dev->of_node, "INT2"))
+ irq == fwnode_irq_get_byname(dev_fwnode(dev), "INT2"))
irq_info = bmc150_accel_interrupts_int2;
for (i = 0; i < BMC150_ACCEL_INTERRUPTS; i++)
@@ -1007,8 +1007,7 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
int j, bit;
j = 0;
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength)
+ iio_for_each_active_channel(indio_dev, bit)
memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit],
sizeof(data->scan.channels[0]));
diff --git a/drivers/iio/accel/bmi088-accel-spi.c b/drivers/iio/accel/bmi088-accel-spi.c
index 7b419a7b2478..df1adc059aa9 100644
--- a/drivers/iio/accel/bmi088-accel-spi.c
+++ b/drivers/iio/accel/bmi088-accel-spi.c
@@ -36,7 +36,7 @@ static int bmi088_regmap_spi_read(void *context, const void *reg,
return spi_write_then_read(spi, addr, sizeof(addr), val, val_size);
}
-static struct regmap_bus bmi088_regmap_bus = {
+static const struct regmap_bus bmi088_regmap_bus = {
.write = bmi088_regmap_spi_write,
.read = bmi088_regmap_spi_read,
};
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index 0f403342b1fc..f7e4dc02b34d 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -62,7 +62,7 @@ static int cros_ec_accel_legacy_read_cmd(struct iio_dev *indio_dev,
return ret;
}
- for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+ for_each_set_bit(i, &scan_mask, iio_get_masklength(indio_dev)) {
*data = st->resp->dump.sensor[sensor_num].data[i] *
st->sign[i];
data++;
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index d25e31613413..acadabec4df7 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -966,8 +966,7 @@ static int fxls8962af_fifo_flush(struct iio_dev *indio_dev)
int j, bit;
j = 0;
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit],
sizeof(data->scan.channels[0]));
}
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 8280d2bef0a3..b76df8816323 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -173,6 +173,7 @@ enum kx_chipset {
KXCJ91008,
KXTJ21009,
KXTF9,
+ KX0221020,
KX0231025,
KX_MAX_CHIPS /* this must be last */
};
@@ -580,8 +581,8 @@ static int kxcjk1013_chip_init(struct kxcjk1013_data *data)
return ret;
}
- /* On KX023, route all used interrupts to INT1 for now */
- if (data->chipset == KX0231025 && data->client->irq > 0) {
+ /* On KX023 and KX022, route all used interrupts to INT1 for now */
+ if ((data->chipset == KX0231025 || data->chipset == KX0221020) && data->client->irq > 0) {
ret = i2c_smbus_write_byte_data(data->client, KX023_REG_INC4,
KX023_REG_INC4_DRDY1 |
KX023_REG_INC4_WUFI1);
@@ -1507,6 +1508,7 @@ static int kxcjk1013_probe(struct i2c_client *client)
case KXTF9:
data->regs = &kxtf9_regs;
break;
+ case KX0221020:
case KX0231025:
data->regs = &kx0231025_regs;
break;
@@ -1712,6 +1714,7 @@ static const struct i2c_device_id kxcjk1013_id[] = {
{"kxcj91008", KXCJ91008},
{"kxtj21009", KXTJ21009},
{"kxtf9", KXTF9},
+ {"kx022-1020", KX0221020},
{"kx023-1025", KX0231025},
{"SMO8500", KXCJ91008},
{}
@@ -1724,6 +1727,7 @@ static const struct of_device_id kxcjk1013_of_match[] = {
{ .compatible = "kionix,kxcj91008", },
{ .compatible = "kionix,kxtj21009", },
{ .compatible = "kionix,kxtf9", },
+ { .compatible = "kionix,kx022-1020", },
{ .compatible = "kionix,kx023-1025", },
{ }
};
diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c
index 4cdbf5424a53..57025354c7cd 100644
--- a/drivers/iio/accel/msa311.c
+++ b/drivers/iio/accel/msa311.c
@@ -900,8 +900,7 @@ static irqreturn_t msa311_buffer_thread(int irq, void *p)
mutex_lock(&msa311->lock);
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
chan = &msa311_channels[bit];
err = msa311_get_axis(msa311, chan, &axis);
diff --git a/drivers/iio/accel/sca3300.c b/drivers/iio/accel/sca3300.c
index 306482b70fad..fca77d660625 100644
--- a/drivers/iio/accel/sca3300.c
+++ b/drivers/iio/accel/sca3300.c
@@ -494,8 +494,7 @@ static irqreturn_t sca3300_trigger_handler(int irq, void *p)
int bit, ret, val, i = 0;
s16 *channels = (s16 *)data->buffer;
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = sca3300_read_reg(data, indio_dev->channels[bit].address, &val);
if (ret) {
dev_err_ratelimited(&data->spi->dev,
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index b3534d5751b9..abead190254b 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -448,8 +448,7 @@ static irqreturn_t stk8312_trigger_handler(int irq, void *p)
goto err;
}
} else {
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = stk8312_read_accel(data, bit);
if (ret < 0) {
mutex_unlock(&data->lock);
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 6d3c7f444d21..a32a77324e92 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -330,8 +330,7 @@ static irqreturn_t stk8ba50_trigger_handler(int irq, void *p)
goto err;
}
} else {
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = stk8ba50_read_accel(data,
stk8ba50_channel_table[bit]);
if (ret < 0)
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index f60fe85a30d5..97ece1a4b7e3 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -21,6 +21,18 @@ config AD_SIGMA_DELTA
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+config AD4000
+ tristate "Analog Devices AD4000 ADC Driver"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices AD4000 high speed
+ SPI analog to digital converters (ADC).
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad4000.
+
config AD4130
tristate "Analog Device AD4130 ADC Driver"
depends on SPI
@@ -36,6 +48,17 @@ config AD4130
To compile this driver as a module, choose M here: the module will be
called ad4130.
+config AD4695
+ tristate "Analog Device AD4695 ADC Driver"
+ depends on SPI
+ select REGMAP_SPI
+ help
+ Say yes here to build support for Analog Devices AD4695 and similar
+ analog to digital converters (ADC).
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad4695.
+
config AD7091R
tristate
@@ -991,6 +1014,19 @@ config NPCM_ADC
This driver can also be built as a module. If so, the module
will be called npcm_adc.
+config PAC1921
+ tristate "Microchip Technology PAC1921 driver"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Microchip Technology's PAC1921
+ High-Side Power/Current Monitor with Analog Output.
+
+ This driver can also be built as a module. If so, the module
+ will be called pac1921.
+
config PAC1934
tristate "Microchip Technology PAC1934 driver"
depends on I2C
@@ -1156,6 +1192,16 @@ config SC27XX_ADC
This driver can also be built as a module. If so, the module
will be called sc27xx_adc.
+config SOPHGO_CV1800B_ADC
+ tristate "Sophgo CV1800B SARADC"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ Say yes here to build support for the SARADC integrated inside
+ the Sophgo CV1800B SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called sophgo_cv1800b_adc.
+
config SPEAR_ADC
tristate "ST SPEAr ADC"
depends on PLAT_SPEAR || COMPILE_TEST
@@ -1171,6 +1217,7 @@ config SD_ADC_MODULATOR
tristate "Generic sigma delta modulator"
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+ select IIO_BACKEND
help
Select this option to enables sigma delta modulator. This driver can
support generic sigma delta modulators.
@@ -1225,6 +1272,7 @@ config STM32_DFSDM_ADC
select IIO_BUFFER
select IIO_BUFFER_HW_CONSUMER
select IIO_TRIGGERED_BUFFER
+ select IIO_BACKEND
help
Select this option to support ADCSigma delta modulator for
STMicroelectronics STM32 digital filter for sigma delta converter.
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index d370e066544e..7b91cd98c0e0 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -6,7 +6,9 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
+obj-$(CONFIG_AD4000) += ad4000.o
obj-$(CONFIG_AD4130) += ad4130.o
+obj-$(CONFIG_AD4695) += ad4695.o
obj-$(CONFIG_AD7091R) += ad7091r-base.o
obj-$(CONFIG_AD7091R5) += ad7091r5.o
obj-$(CONFIG_AD7091R8) += ad7091r8.o
@@ -90,6 +92,7 @@ obj-$(CONFIG_MP2629_ADC) += mp2629_adc.o
obj-$(CONFIG_MXS_LRADC_ADC) += mxs-lradc-adc.o
obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_NPCM_ADC) += npcm_adc.o
+obj-$(CONFIG_PAC1921) += pac1921.o
obj-$(CONFIG_PAC1934) += pac1934.o
obj-$(CONFIG_PALMAS_GPADC) += palmas_gpadc.o
obj-$(CONFIG_QCOM_PM8XXX_XOADC) += qcom-pm8xxx-xoadc.o
@@ -105,6 +108,7 @@ obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_RZG2L_ADC) += rzg2l_adc.o
obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o
obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o
+obj-$(CONFIG_SOPHGO_CV1800B_ADC) += sophgo-cv1800b-adc.o
obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
obj-$(CONFIG_STM32_ADC) += stm32-adc.o
diff --git a/drivers/iio/adc/ad4000.c b/drivers/iio/adc/ad4000.c
new file mode 100644
index 000000000000..6ea491245084
--- /dev/null
+++ b/drivers/iio/adc/ad4000.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * AD4000 SPI ADC driver
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/byteorder/generic.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/units.h>
+#include <linux/util_macros.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#define AD4000_READ_COMMAND 0x54
+#define AD4000_WRITE_COMMAND 0x14
+
+#define AD4000_CONFIG_REG_DEFAULT 0xE1
+
+/* AD4000 Configuration Register programmable bits */
+#define AD4000_CFG_SPAN_COMP BIT(3) /* Input span compression */
+#define AD4000_CFG_HIGHZ BIT(2) /* High impedance mode */
+
+#define AD4000_SCALE_OPTIONS 2
+
+#define AD4000_TQUIET1_NS 190
+#define AD4000_TQUIET2_NS 60
+#define AD4000_TCONV_NS 320
+
+#define __AD4000_DIFF_CHANNEL(_sign, _real_bits, _storage_bits, _reg_access) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .differential = 1, \
+ .channel = 0, \
+ .channel2 = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_separate_available = _reg_access ? BIT(IIO_CHAN_INFO_SCALE) : 0,\
+ .scan_type = { \
+ .sign = _sign, \
+ .realbits = _real_bits, \
+ .storagebits = _storage_bits, \
+ .shift = _storage_bits - _real_bits, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define AD4000_DIFF_CHANNEL(_sign, _real_bits, _reg_access) \
+ __AD4000_DIFF_CHANNEL((_sign), (_real_bits), \
+ ((_real_bits) > 16 ? 32 : 16), (_reg_access))
+
+#define __AD4000_PSEUDO_DIFF_CHANNEL(_sign, _real_bits, _storage_bits, _reg_access)\
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_separate_available = _reg_access ? BIT(IIO_CHAN_INFO_SCALE) : 0,\
+ .scan_type = { \
+ .sign = _sign, \
+ .realbits = _real_bits, \
+ .storagebits = _storage_bits, \
+ .shift = _storage_bits - _real_bits, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define AD4000_PSEUDO_DIFF_CHANNEL(_sign, _real_bits, _reg_access) \
+ __AD4000_PSEUDO_DIFF_CHANNEL((_sign), (_real_bits), \
+ ((_real_bits) > 16 ? 32 : 16), (_reg_access))
+
+static const char * const ad4000_power_supplies[] = {
+ "vdd", "vio"
+};
+
+enum ad4000_sdi {
+ AD4000_SDI_MOSI,
+ AD4000_SDI_VIO,
+ AD4000_SDI_CS,
+ AD4000_SDI_GND,
+};
+
+/* maps adi,sdi-pin property value to enum */
+static const char * const ad4000_sdi_pin[] = {
+ [AD4000_SDI_MOSI] = "sdi",
+ [AD4000_SDI_VIO] = "high",
+ [AD4000_SDI_CS] = "cs",
+ [AD4000_SDI_GND] = "low",
+};
+
+/* Gains stored as fractions of 1000 so they can be expressed by integers. */
+static const int ad4000_gains[] = {
+ 454, 909, 1000, 1900,
+};
+
+struct ad4000_chip_info {
+ const char *dev_name;
+ struct iio_chan_spec chan_spec;
+ struct iio_chan_spec reg_access_chan_spec;
+ bool has_hardware_gain;
+};
+
+static const struct ad4000_chip_info ad4000_chip_info = {
+ .dev_name = "ad4000",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1),
+};
+
+static const struct ad4000_chip_info ad4001_chip_info = {
+ .dev_name = "ad4001",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1),
+};
+
+static const struct ad4000_chip_info ad4002_chip_info = {
+ .dev_name = "ad4002",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1),
+};
+
+static const struct ad4000_chip_info ad4003_chip_info = {
+ .dev_name = "ad4003",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1),
+};
+
+static const struct ad4000_chip_info ad4004_chip_info = {
+ .dev_name = "ad4004",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1),
+};
+
+static const struct ad4000_chip_info ad4005_chip_info = {
+ .dev_name = "ad4005",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1),
+};
+
+static const struct ad4000_chip_info ad4006_chip_info = {
+ .dev_name = "ad4006",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1),
+};
+
+static const struct ad4000_chip_info ad4007_chip_info = {
+ .dev_name = "ad4007",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1),
+};
+
+static const struct ad4000_chip_info ad4008_chip_info = {
+ .dev_name = "ad4008",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1),
+};
+
+static const struct ad4000_chip_info ad4010_chip_info = {
+ .dev_name = "ad4010",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1),
+};
+
+static const struct ad4000_chip_info ad4011_chip_info = {
+ .dev_name = "ad4011",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1),
+};
+
+static const struct ad4000_chip_info ad4020_chip_info = {
+ .dev_name = "ad4020",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1),
+};
+
+static const struct ad4000_chip_info ad4021_chip_info = {
+ .dev_name = "ad4021",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1),
+};
+
+static const struct ad4000_chip_info ad4022_chip_info = {
+ .dev_name = "ad4022",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1),
+};
+
+static const struct ad4000_chip_info adaq4001_chip_info = {
+ .dev_name = "adaq4001",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1),
+ .has_hardware_gain = true,
+};
+
+static const struct ad4000_chip_info adaq4003_chip_info = {
+ .dev_name = "adaq4003",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1),
+ .has_hardware_gain = true,
+};
+
+struct ad4000_state {
+ struct spi_device *spi;
+ struct gpio_desc *cnv_gpio;
+ struct spi_transfer xfers[2];
+ struct spi_message msg;
+ struct mutex lock; /* Protect read modify write cycle */
+ int vref_mv;
+ enum ad4000_sdi sdi_pin;
+ bool span_comp;
+ u16 gain_milli;
+ int scale_tbl[AD4000_SCALE_OPTIONS][2];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the transfer buffers
+ * to live in their own cache lines.
+ */
+ struct {
+ union {
+ __be16 sample_buf16;
+ __be32 sample_buf32;
+ } data;
+ s64 timestamp __aligned(8);
+ } scan __aligned(IIO_DMA_MINALIGN);
+ u8 tx_buf[2];
+ u8 rx_buf[2];
+};
+
+static void ad4000_fill_scale_tbl(struct ad4000_state *st,
+ struct iio_chan_spec const *chan)
+{
+ int val, tmp0, tmp1;
+ int scale_bits;
+ u64 tmp2;
+
+ /*
+ * ADCs that output two's complement code have one less bit to express
+ * voltage magnitude.
+ */
+ if (chan->scan_type.sign == 's')
+ scale_bits = chan->scan_type.realbits - 1;
+ else
+ scale_bits = chan->scan_type.realbits;
+
+ /*
+ * The gain is stored as a fraction of 1000 and, as we need to
+ * divide vref_mv by the gain, we invert the gain/1000 fraction.
+ * Also multiply by an extra MILLI to preserve precision.
+ * Thus, we have MILLI * MILLI equals MICRO as fraction numerator.
+ */
+ val = mult_frac(st->vref_mv, MICRO, st->gain_milli);
+
+ /* Would multiply by NANO here but we multiplied by extra MILLI */
+ tmp2 = shift_right((u64)val * MICRO, scale_bits);
+ tmp0 = div_s64_rem(tmp2, NANO, &tmp1);
+
+ /* Store scale for when span compression is disabled */
+ st->scale_tbl[0][0] = tmp0; /* Integer part */
+ st->scale_tbl[0][1] = abs(tmp1); /* Fractional part */
+
+ /* Store scale for when span compression is enabled */
+ st->scale_tbl[1][0] = tmp0;
+
+ /* The integer part is always zero so don't bother to divide it. */
+ if (chan->differential)
+ st->scale_tbl[1][1] = DIV_ROUND_CLOSEST(abs(tmp1) * 4, 5);
+ else
+ st->scale_tbl[1][1] = DIV_ROUND_CLOSEST(abs(tmp1) * 9, 10);
+}
+
+static int ad4000_write_reg(struct ad4000_state *st, uint8_t val)
+{
+ st->tx_buf[0] = AD4000_WRITE_COMMAND;
+ st->tx_buf[1] = val;
+ return spi_write(st->spi, st->tx_buf, ARRAY_SIZE(st->tx_buf));
+}
+
+static int ad4000_read_reg(struct ad4000_state *st, unsigned int *val)
+{
+ struct spi_transfer t = {
+ .tx_buf = st->tx_buf,
+ .rx_buf = st->rx_buf,
+ .len = 2,
+ };
+ int ret;
+
+ st->tx_buf[0] = AD4000_READ_COMMAND;
+ ret = spi_sync_transfer(st->spi, &t, 1);
+ if (ret < 0)
+ return ret;
+
+ *val = st->rx_buf[1];
+ return ret;
+}
+
+static int ad4000_convert_and_acquire(struct ad4000_state *st)
+{
+ int ret;
+
+ /*
+ * In 4-wire mode, the CNV line is held high for the entire conversion
+ * and acquisition process. In other modes, the CNV GPIO is optional
+ * and, if provided, replaces controller CS. If CNV GPIO is not defined
+ * gpiod_set_value_cansleep() has no effect.
+ */
+ gpiod_set_value_cansleep(st->cnv_gpio, 1);
+ ret = spi_sync(st->spi, &st->msg);
+ gpiod_set_value_cansleep(st->cnv_gpio, 0);
+
+ return ret;
+}
+
+static int ad4000_single_conversion(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val)
+{
+ struct ad4000_state *st = iio_priv(indio_dev);
+ u32 sample;
+ int ret;
+
+ ret = ad4000_convert_and_acquire(st);
+ if (ret < 0)
+ return ret;
+
+ if (chan->scan_type.storagebits > 16)
+ sample = be32_to_cpu(st->scan.data.sample_buf32);
+ else
+ sample = be16_to_cpu(st->scan.data.sample_buf16);
+
+ sample >>= chan->scan_type.shift;
+
+ if (chan->scan_type.sign == 's')
+ *val = sign_extend32(sample, chan->scan_type.realbits - 1);
+
+ return IIO_VAL_INT;
+}
+
+static int ad4000_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long info)
+{
+ struct ad4000_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return ad4000_single_conversion(indio_dev, chan, val);
+ unreachable();
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->scale_tbl[st->span_comp][0];
+ *val2 = st->scale_tbl[st->span_comp][1];
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 0;
+ if (st->span_comp)
+ *val = mult_frac(st->vref_mv, 1, 10);
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4000_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long info)
+{
+ struct ad4000_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (int *)st->scale_tbl;
+ *length = AD4000_SCALE_OPTIONS * 2;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4000_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+}
+
+static int ad4000_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2,
+ long mask)
+{
+ struct ad4000_state *st = iio_priv(indio_dev);
+ unsigned int reg_val;
+ bool span_comp_en;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ guard(mutex)(&st->lock);
+
+ ret = ad4000_read_reg(st, &reg_val);
+ if (ret < 0)
+ return ret;
+
+ span_comp_en = val2 == st->scale_tbl[1][1];
+ reg_val &= ~AD4000_CFG_SPAN_COMP;
+ reg_val |= FIELD_PREP(AD4000_CFG_SPAN_COMP, span_comp_en);
+
+ ret = ad4000_write_reg(st, reg_val);
+ if (ret < 0)
+ return ret;
+
+ st->span_comp = span_comp_en;
+ return 0;
+ }
+ unreachable();
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t ad4000_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad4000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = ad4000_convert_and_acquire(st);
+ if (ret < 0)
+ goto err_out;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->scan, pf->timestamp);
+
+err_out:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static const struct iio_info ad4000_reg_access_info = {
+ .read_raw = &ad4000_read_raw,
+ .read_avail = &ad4000_read_avail,
+ .write_raw = &ad4000_write_raw,
+ .write_raw_get_fmt = &ad4000_write_raw_get_fmt,
+};
+
+static const struct iio_info ad4000_info = {
+ .read_raw = &ad4000_read_raw,
+};
+
+/*
+ * This executes a data sample transfer for when the device connections are
+ * in "3-wire" mode, selected when the adi,sdi-pin device tree property is
+ * absent or set to "high". In this connection mode, the ADC SDI pin is
+ * connected to MOSI or to VIO and ADC CNV pin is connected either to a SPI
+ * controller CS or to a GPIO.
+ * AD4000 series of devices initiate conversions on the rising edge of CNV pin.
+ *
+ * If the CNV pin is connected to an SPI controller CS line (which is by default
+ * active low), the ADC readings would have a latency (delay) of one read.
+ * Moreover, since we also do ADC sampling for filling the buffer on triggered
+ * buffer mode, the timestamps of buffer readings would be disarranged.
+ * To prevent the read latency and reduce the time discrepancy between the
+ * sample read request and the time of actual sampling by the ADC, do a
+ * preparatory transfer to pulse the CS/CNV line.
+ */
+static int ad4000_prepare_3wire_mode_message(struct ad4000_state *st,
+ const struct iio_chan_spec *chan)
+{
+ unsigned int cnv_pulse_time = AD4000_TCONV_NS;
+ struct spi_transfer *xfers = st->xfers;
+
+ xfers[0].cs_change = 1;
+ xfers[0].cs_change_delay.value = cnv_pulse_time;
+ xfers[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ xfers[1].rx_buf = &st->scan.data;
+ xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits);
+ xfers[1].delay.value = AD4000_TQUIET2_NS;
+ xfers[1].delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ spi_message_init_with_transfers(&st->msg, st->xfers, 2);
+
+ return devm_spi_optimize_message(&st->spi->dev, st->spi, &st->msg);
+}
+
+/*
+ * This executes a data sample transfer for when the device connections are
+ * in "4-wire" mode, selected when the adi,sdi-pin device tree property is
+ * set to "cs". In this connection mode, the controller CS pin is connected to
+ * ADC SDI pin and a GPIO is connected to ADC CNV pin.
+ * The GPIO connected to ADC CNV pin is set outside of the SPI transfer.
+ */
+static int ad4000_prepare_4wire_mode_message(struct ad4000_state *st,
+ const struct iio_chan_spec *chan)
+{
+ unsigned int cnv_to_sdi_time = AD4000_TCONV_NS;
+ struct spi_transfer *xfers = st->xfers;
+
+ /*
+ * Dummy transfer to cause enough delay between CNV going high and SDI
+ * going low.
+ */
+ xfers[0].cs_off = 1;
+ xfers[0].delay.value = cnv_to_sdi_time;
+ xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ xfers[1].rx_buf = &st->scan.data;
+ xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits);
+
+ spi_message_init_with_transfers(&st->msg, st->xfers, 2);
+
+ return devm_spi_optimize_message(&st->spi->dev, st->spi, &st->msg);
+}
+
+static int ad4000_config(struct ad4000_state *st)
+{
+ unsigned int reg_val = AD4000_CONFIG_REG_DEFAULT;
+
+ if (device_property_present(&st->spi->dev, "adi,high-z-input"))
+ reg_val |= FIELD_PREP(AD4000_CFG_HIGHZ, 1);
+
+ return ad4000_write_reg(st, reg_val);
+}
+
+static int ad4000_probe(struct spi_device *spi)
+{
+ const struct ad4000_chip_info *chip;
+ struct device *dev = &spi->dev;
+ struct iio_dev *indio_dev;
+ struct ad4000_state *st;
+ int gain_idx, ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = spi_get_device_match_data(spi);
+ if (!chip)
+ return -EINVAL;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(ad4000_power_supplies),
+ ad4000_power_supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable power supplies\n");
+
+ ret = devm_regulator_get_enable_read_voltage(dev, "ref");
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to get ref regulator reference\n");
+ st->vref_mv = ret / 1000;
+
+ st->cnv_gpio = devm_gpiod_get_optional(dev, "cnv", GPIOD_OUT_HIGH);
+ if (IS_ERR(st->cnv_gpio))
+ return dev_err_probe(dev, PTR_ERR(st->cnv_gpio),
+ "Failed to get CNV GPIO");
+
+ ret = device_property_match_property_string(dev, "adi,sdi-pin",
+ ad4000_sdi_pin,
+ ARRAY_SIZE(ad4000_sdi_pin));
+ if (ret < 0 && ret != -EINVAL)
+ return dev_err_probe(dev, ret,
+ "getting adi,sdi-pin property failed\n");
+
+ /* Default to usual SPI connections if pin properties are not present */
+ st->sdi_pin = ret == -EINVAL ? AD4000_SDI_MOSI : ret;
+ switch (st->sdi_pin) {
+ case AD4000_SDI_MOSI:
+ indio_dev->info = &ad4000_reg_access_info;
+ indio_dev->channels = &chip->reg_access_chan_spec;
+
+ /*
+ * In "3-wire mode", the ADC SDI line must be kept high when
+ * data is not being clocked out of the controller.
+ * Request the SPI controller to make MOSI idle high.
+ */
+ spi->mode |= SPI_MOSI_IDLE_HIGH;
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return ret;
+
+ ret = ad4000_prepare_3wire_mode_message(st, indio_dev->channels);
+ if (ret)
+ return ret;
+
+ ret = ad4000_config(st);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to config device\n");
+
+ break;
+ case AD4000_SDI_VIO:
+ indio_dev->info = &ad4000_info;
+ indio_dev->channels = &chip->chan_spec;
+ ret = ad4000_prepare_3wire_mode_message(st, indio_dev->channels);
+ if (ret)
+ return ret;
+
+ break;
+ case AD4000_SDI_CS:
+ indio_dev->info = &ad4000_info;
+ indio_dev->channels = &chip->chan_spec;
+ ret = ad4000_prepare_4wire_mode_message(st, indio_dev->channels);
+ if (ret)
+ return ret;
+
+ break;
+ case AD4000_SDI_GND:
+ return dev_err_probe(dev, -EPROTONOSUPPORT,
+ "Unsupported connection mode\n");
+
+ default:
+ return dev_err_probe(dev, -EINVAL, "Unrecognized connection mode\n");
+ }
+
+ indio_dev->name = chip->dev_name;
+ indio_dev->num_channels = 1;
+
+ devm_mutex_init(dev, &st->lock);
+
+ st->gain_milli = 1000;
+ if (chip->has_hardware_gain) {
+ ret = device_property_read_u16(dev, "adi,gain-milli",
+ &st->gain_milli);
+ if (!ret) {
+ /* Match gain value from dt to one of supported gains */
+ gain_idx = find_closest(st->gain_milli, ad4000_gains,
+ ARRAY_SIZE(ad4000_gains));
+ st->gain_milli = ad4000_gains[gain_idx];
+ } else {
+ return dev_err_probe(dev, ret,
+ "Failed to read gain property\n");
+ }
+ }
+
+ ad4000_fill_scale_tbl(st, indio_dev->channels);
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &ad4000_trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct spi_device_id ad4000_id[] = {
+ { "ad4000", (kernel_ulong_t)&ad4000_chip_info },
+ { "ad4001", (kernel_ulong_t)&ad4001_chip_info },
+ { "ad4002", (kernel_ulong_t)&ad4002_chip_info },
+ { "ad4003", (kernel_ulong_t)&ad4003_chip_info },
+ { "ad4004", (kernel_ulong_t)&ad4004_chip_info },
+ { "ad4005", (kernel_ulong_t)&ad4005_chip_info },
+ { "ad4006", (kernel_ulong_t)&ad4006_chip_info },
+ { "ad4007", (kernel_ulong_t)&ad4007_chip_info },
+ { "ad4008", (kernel_ulong_t)&ad4008_chip_info },
+ { "ad4010", (kernel_ulong_t)&ad4010_chip_info },
+ { "ad4011", (kernel_ulong_t)&ad4011_chip_info },
+ { "ad4020", (kernel_ulong_t)&ad4020_chip_info },
+ { "ad4021", (kernel_ulong_t)&ad4021_chip_info },
+ { "ad4022", (kernel_ulong_t)&ad4022_chip_info },
+ { "adaq4001", (kernel_ulong_t)&adaq4001_chip_info },
+ { "adaq4003", (kernel_ulong_t)&adaq4003_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad4000_id);
+
+static const struct of_device_id ad4000_of_match[] = {
+ { .compatible = "adi,ad4000", .data = &ad4000_chip_info },
+ { .compatible = "adi,ad4001", .data = &ad4001_chip_info },
+ { .compatible = "adi,ad4002", .data = &ad4002_chip_info },
+ { .compatible = "adi,ad4003", .data = &ad4003_chip_info },
+ { .compatible = "adi,ad4004", .data = &ad4004_chip_info },
+ { .compatible = "adi,ad4005", .data = &ad4005_chip_info },
+ { .compatible = "adi,ad4006", .data = &ad4006_chip_info },
+ { .compatible = "adi,ad4007", .data = &ad4007_chip_info },
+ { .compatible = "adi,ad4008", .data = &ad4008_chip_info },
+ { .compatible = "adi,ad4010", .data = &ad4010_chip_info },
+ { .compatible = "adi,ad4011", .data = &ad4011_chip_info },
+ { .compatible = "adi,ad4020", .data = &ad4020_chip_info },
+ { .compatible = "adi,ad4021", .data = &ad4021_chip_info },
+ { .compatible = "adi,ad4022", .data = &ad4022_chip_info },
+ { .compatible = "adi,adaq4001", .data = &adaq4001_chip_info },
+ { .compatible = "adi,adaq4003", .data = &adaq4003_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad4000_of_match);
+
+static struct spi_driver ad4000_driver = {
+ .driver = {
+ .name = "ad4000",
+ .of_match_table = ad4000_of_match,
+ },
+ .probe = ad4000_probe,
+ .id_table = ad4000_id,
+};
+module_spi_driver(ad4000_driver);
+
+MODULE_AUTHOR("Marcelo Schmitt <marcelo.schmitt@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD4000 ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/ad4695.c b/drivers/iio/adc/ad4695.c
new file mode 100644
index 000000000000..595ec4158e73
--- /dev/null
+++ b/drivers/iio/adc/ad4695.c
@@ -0,0 +1,1185 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SPI ADC driver for Analog Devices Inc. AD4695 and similar chips
+ *
+ * https://www.analog.com/en/products/ad4695.html
+ * https://www.analog.com/en/products/ad4696.html
+ * https://www.analog.com/en/products/ad4697.html
+ * https://www.analog.com/en/products/ad4698.html
+ *
+ * Copyright 2024 Analog Devices Inc.
+ * Copyright 2024 BayLibre, SAS
+ */
+
+#include <linux/align.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/minmax.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/units.h>
+
+#include <dt-bindings/iio/adi,ad4695.h>
+
+/* AD4695 registers */
+#define AD4695_REG_SPI_CONFIG_A 0x0000
+#define AD4695_REG_SPI_CONFIG_A_SW_RST (BIT(7) | BIT(0))
+#define AD4695_REG_SPI_CONFIG_A_ADDR_DIR BIT(5)
+#define AD4695_REG_SPI_CONFIG_B 0x0001
+#define AD4695_REG_SPI_CONFIG_B_INST_MODE BIT(7)
+#define AD4695_REG_DEVICE_TYPE 0x0003
+#define AD4695_REG_SCRATCH_PAD 0x000A
+#define AD4695_REG_VENDOR_L 0x000C
+#define AD4695_REG_VENDOR_H 0x000D
+#define AD4695_REG_LOOP_MODE 0x000E
+#define AD4695_REG_SPI_CONFIG_C 0x0010
+#define AD4695_REG_SPI_CONFIG_C_MB_STRICT BIT(7)
+#define AD4695_REG_SPI_STATUS 0x0011
+#define AD4695_REG_STATUS 0x0014
+#define AD4695_REG_ALERT_STATUS1 0x0015
+#define AD4695_REG_ALERT_STATUS2 0x0016
+#define AD4695_REG_CLAMP_STATUS 0x001A
+#define AD4695_REG_SETUP 0x0020
+#define AD4695_REG_SETUP_LDO_EN BIT(4)
+#define AD4695_REG_SETUP_SPI_MODE BIT(2)
+#define AD4695_REG_SETUP_SPI_CYC_CTRL BIT(1)
+#define AD4695_REG_REF_CTRL 0x0021
+#define AD4695_REG_REF_CTRL_OV_MODE BIT(7)
+#define AD4695_REG_REF_CTRL_VREF_SET GENMASK(4, 2)
+#define AD4695_REG_REF_CTRL_REFHIZ_EN BIT(1)
+#define AD4695_REG_REF_CTRL_REFBUF_EN BIT(0)
+#define AD4695_REG_SEQ_CTRL 0x0022
+#define AD4695_REG_SEQ_CTRL_STD_SEQ_EN BIT(7)
+#define AD4695_REG_SEQ_CTRL_NUM_SLOTS_AS GENMASK(6, 0)
+#define AD4695_REG_AC_CTRL 0x0023
+#define AD4695_REG_STD_SEQ_CONFIG 0x0024
+#define AD4695_REG_GPIO_CTRL 0x0026
+#define AD4695_REG_GP_MODE 0x0027
+#define AD4695_REG_TEMP_CTRL 0x0029
+#define AD4695_REG_TEMP_CTRL_TEMP_EN BIT(0)
+#define AD4695_REG_CONFIG_IN(n) (0x0030 | (n))
+#define AD4695_REG_CONFIG_IN_MODE BIT(6)
+#define AD4695_REG_CONFIG_IN_PAIR GENMASK(5, 4)
+#define AD4695_REG_CONFIG_IN_AINHIGHZ_EN BIT(3)
+#define AD4695_REG_UPPER_IN(n) (0x0040 | (2 * (n)))
+#define AD4695_REG_LOWER_IN(n) (0x0060 | (2 * (n)))
+#define AD4695_REG_HYST_IN(n) (0x0080 | (2 * (n)))
+#define AD4695_REG_OFFSET_IN(n) (0x00A0 | (2 * (n)))
+#define AD4695_REG_GAIN_IN(n) (0x00C0 | (2 * (n)))
+#define AD4695_REG_AS_SLOT(n) (0x0100 | (n))
+#define AD4695_REG_AS_SLOT_INX GENMASK(3, 0)
+
+/* Conversion mode commands */
+#define AD4695_CMD_EXIT_CNV_MODE 0x0A
+#define AD4695_CMD_TEMP_CHAN 0x0F
+#define AD4695_CMD_VOLTAGE_CHAN(n) (0x10 | (n))
+
+/* timing specs */
+#define AD4695_T_CONVERT_NS 415
+#define AD4695_T_WAKEUP_HW_MS 3
+#define AD4695_T_WAKEUP_SW_MS 3
+#define AD4695_T_REFBUF_MS 100
+#define AD4695_T_REGCONFIG_NS 20
+#define AD4695_REG_ACCESS_SCLK_HZ (10 * MEGA)
+
+/* Max number of voltage input channels. */
+#define AD4695_MAX_CHANNELS 16
+/* Max size of 1 raw sample in bytes. */
+#define AD4695_MAX_CHANNEL_SIZE 2
+
+enum ad4695_in_pair {
+ AD4695_IN_PAIR_REFGND,
+ AD4695_IN_PAIR_COM,
+ AD4695_IN_PAIR_EVEN_ODD,
+};
+
+struct ad4695_chip_info {
+ const char *name;
+ int max_sample_rate;
+ u32 t_acq_ns;
+ u8 num_voltage_inputs;
+};
+
+struct ad4695_channel_config {
+ unsigned int channel;
+ bool highz_en;
+ bool bipolar;
+ enum ad4695_in_pair pin_pairing;
+ unsigned int common_mode_mv;
+};
+
+struct ad4695_state {
+ struct spi_device *spi;
+ struct regmap *regmap;
+ struct regmap *regmap16;
+ struct gpio_desc *reset_gpio;
+ /* voltages channels plus temperature and timestamp */
+ struct iio_chan_spec iio_chan[AD4695_MAX_CHANNELS + 2];
+ struct ad4695_channel_config channels_cfg[AD4695_MAX_CHANNELS];
+ const struct ad4695_chip_info *chip_info;
+ /* Reference voltage. */
+ unsigned int vref_mv;
+ /* Common mode input pin voltage. */
+ unsigned int com_mv;
+ /* 1 per voltage and temperature chan plus 1 xfer to trigger 1st CNV */
+ struct spi_transfer buf_read_xfer[AD4695_MAX_CHANNELS + 2];
+ struct spi_message buf_read_msg;
+ /* Raw conversion data received. */
+ u8 buf[ALIGN((AD4695_MAX_CHANNELS + 2) * AD4695_MAX_CHANNEL_SIZE,
+ sizeof(s64)) + sizeof(s64)] __aligned(IIO_DMA_MINALIGN);
+ u16 raw_data;
+ /* Commands to send for single conversion. */
+ u16 cnv_cmd;
+ u8 cnv_cmd2;
+};
+
+static const struct regmap_range ad4695_regmap_rd_ranges[] = {
+ regmap_reg_range(AD4695_REG_SPI_CONFIG_A, AD4695_REG_SPI_CONFIG_B),
+ regmap_reg_range(AD4695_REG_DEVICE_TYPE, AD4695_REG_DEVICE_TYPE),
+ regmap_reg_range(AD4695_REG_SCRATCH_PAD, AD4695_REG_SCRATCH_PAD),
+ regmap_reg_range(AD4695_REG_VENDOR_L, AD4695_REG_LOOP_MODE),
+ regmap_reg_range(AD4695_REG_SPI_CONFIG_C, AD4695_REG_SPI_STATUS),
+ regmap_reg_range(AD4695_REG_STATUS, AD4695_REG_ALERT_STATUS2),
+ regmap_reg_range(AD4695_REG_CLAMP_STATUS, AD4695_REG_CLAMP_STATUS),
+ regmap_reg_range(AD4695_REG_SETUP, AD4695_REG_AC_CTRL),
+ regmap_reg_range(AD4695_REG_GPIO_CTRL, AD4695_REG_TEMP_CTRL),
+ regmap_reg_range(AD4695_REG_CONFIG_IN(0), AD4695_REG_CONFIG_IN(15)),
+ regmap_reg_range(AD4695_REG_AS_SLOT(0), AD4695_REG_AS_SLOT(127)),
+};
+
+static const struct regmap_access_table ad4695_regmap_rd_table = {
+ .yes_ranges = ad4695_regmap_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ad4695_regmap_rd_ranges),
+};
+
+static const struct regmap_range ad4695_regmap_wr_ranges[] = {
+ regmap_reg_range(AD4695_REG_SPI_CONFIG_A, AD4695_REG_SPI_CONFIG_B),
+ regmap_reg_range(AD4695_REG_SCRATCH_PAD, AD4695_REG_SCRATCH_PAD),
+ regmap_reg_range(AD4695_REG_LOOP_MODE, AD4695_REG_LOOP_MODE),
+ regmap_reg_range(AD4695_REG_SPI_CONFIG_C, AD4695_REG_SPI_STATUS),
+ regmap_reg_range(AD4695_REG_SETUP, AD4695_REG_AC_CTRL),
+ regmap_reg_range(AD4695_REG_GPIO_CTRL, AD4695_REG_TEMP_CTRL),
+ regmap_reg_range(AD4695_REG_CONFIG_IN(0), AD4695_REG_CONFIG_IN(15)),
+ regmap_reg_range(AD4695_REG_AS_SLOT(0), AD4695_REG_AS_SLOT(127)),
+};
+
+static const struct regmap_access_table ad4695_regmap_wr_table = {
+ .yes_ranges = ad4695_regmap_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ad4695_regmap_wr_ranges),
+};
+
+static const struct regmap_config ad4695_regmap_config = {
+ .name = "ad4695-8",
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = AD4695_REG_AS_SLOT(127),
+ .rd_table = &ad4695_regmap_rd_table,
+ .wr_table = &ad4695_regmap_wr_table,
+ .can_multi_write = true,
+};
+
+static const struct regmap_range ad4695_regmap16_rd_ranges[] = {
+ regmap_reg_range(AD4695_REG_STD_SEQ_CONFIG, AD4695_REG_STD_SEQ_CONFIG),
+ regmap_reg_range(AD4695_REG_UPPER_IN(0), AD4695_REG_GAIN_IN(15)),
+};
+
+static const struct regmap_access_table ad4695_regmap16_rd_table = {
+ .yes_ranges = ad4695_regmap16_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ad4695_regmap16_rd_ranges),
+};
+
+static const struct regmap_range ad4695_regmap16_wr_ranges[] = {
+ regmap_reg_range(AD4695_REG_STD_SEQ_CONFIG, AD4695_REG_STD_SEQ_CONFIG),
+ regmap_reg_range(AD4695_REG_UPPER_IN(0), AD4695_REG_GAIN_IN(15)),
+};
+
+static const struct regmap_access_table ad4695_regmap16_wr_table = {
+ .yes_ranges = ad4695_regmap16_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ad4695_regmap16_wr_ranges),
+};
+
+static const struct regmap_config ad4695_regmap16_config = {
+ .name = "ad4695-16",
+ .reg_bits = 16,
+ .reg_stride = 2,
+ .val_bits = 16,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .max_register = AD4695_REG_GAIN_IN(15),
+ .rd_table = &ad4695_regmap16_rd_table,
+ .wr_table = &ad4695_regmap16_wr_table,
+ .can_multi_write = true,
+};
+
+static const struct iio_chan_spec ad4695_channel_template = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ },
+};
+
+static const struct iio_chan_spec ad4695_temp_channel_template = {
+ .address = AD4695_CMD_TEMP_CHAN,
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ },
+};
+
+static const struct iio_chan_spec ad4695_soft_timestamp_channel_template =
+ IIO_CHAN_SOFT_TIMESTAMP(0);
+
+static const char * const ad4695_power_supplies[] = {
+ "avdd", "vio"
+};
+
+static const struct ad4695_chip_info ad4695_chip_info = {
+ .name = "ad4695",
+ .max_sample_rate = 500 * KILO,
+ .t_acq_ns = 1715,
+ .num_voltage_inputs = 16,
+};
+
+static const struct ad4695_chip_info ad4696_chip_info = {
+ .name = "ad4696",
+ .max_sample_rate = 1 * MEGA,
+ .t_acq_ns = 715,
+ .num_voltage_inputs = 16,
+};
+
+static const struct ad4695_chip_info ad4697_chip_info = {
+ .name = "ad4697",
+ .max_sample_rate = 500 * KILO,
+ .t_acq_ns = 1715,
+ .num_voltage_inputs = 8,
+};
+
+static const struct ad4695_chip_info ad4698_chip_info = {
+ .name = "ad4698",
+ .max_sample_rate = 1 * MEGA,
+ .t_acq_ns = 715,
+ .num_voltage_inputs = 8,
+};
+
+/**
+ * ad4695_set_single_cycle_mode - Set the device in single cycle mode
+ * @st: The AD4695 state
+ * @channel: The first channel to read
+ *
+ * As per the datasheet, to enable single cycle mode, we need to set
+ * STD_SEQ_EN=0, NUM_SLOTS_AS=0 and CYC_CTRL=1 (Table 15). Setting SPI_MODE=1
+ * triggers the first conversion using the channel in AS_SLOT0.
+ *
+ * Context: can sleep, must be called with iio_device_claim_direct held
+ * Return: 0 on success, a negative error code on failure
+ */
+static int ad4695_set_single_cycle_mode(struct ad4695_state *st,
+ unsigned int channel)
+{
+ int ret;
+
+ ret = regmap_clear_bits(st->regmap, AD4695_REG_SEQ_CTRL,
+ AD4695_REG_SEQ_CTRL_STD_SEQ_EN |
+ AD4695_REG_SEQ_CTRL_NUM_SLOTS_AS);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4695_REG_AS_SLOT(0),
+ FIELD_PREP(AD4695_REG_AS_SLOT_INX, channel));
+ if (ret)
+ return ret;
+
+ return regmap_set_bits(st->regmap, AD4695_REG_SETUP,
+ AD4695_REG_SETUP_SPI_MODE |
+ AD4695_REG_SETUP_SPI_CYC_CTRL);
+}
+
+/**
+ * ad4695_enter_advanced_sequencer_mode - Put the ADC in advanced sequencer mode
+ * @st: The driver state
+ * @n: The number of slots to use - must be >= 2, <= 128
+ *
+ * As per the datasheet, to enable advanced sequencer, we need to set
+ * STD_SEQ_EN=0, NUM_SLOTS_AS=n-1 and CYC_CTRL=0 (Table 15). Setting SPI_MODE=1
+ * triggers the first conversion using the channel in AS_SLOT0.
+ *
+ * Return: 0 on success, a negative error code on failure
+ */
+static int ad4695_enter_advanced_sequencer_mode(struct ad4695_state *st, u32 n)
+{
+ int ret;
+
+ ret = regmap_update_bits(st->regmap, AD4695_REG_SEQ_CTRL,
+ AD4695_REG_SEQ_CTRL_STD_SEQ_EN |
+ AD4695_REG_SEQ_CTRL_NUM_SLOTS_AS,
+ FIELD_PREP(AD4695_REG_SEQ_CTRL_STD_SEQ_EN, 0) |
+ FIELD_PREP(AD4695_REG_SEQ_CTRL_NUM_SLOTS_AS, n - 1));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(st->regmap, AD4695_REG_SETUP,
+ AD4695_REG_SETUP_SPI_MODE | AD4695_REG_SETUP_SPI_CYC_CTRL,
+ FIELD_PREP(AD4695_REG_SETUP_SPI_MODE, 1) |
+ FIELD_PREP(AD4695_REG_SETUP_SPI_CYC_CTRL, 0));
+}
+
+/**
+ * ad4695_exit_conversion_mode - Exit conversion mode
+ * @st: The AD4695 state
+ *
+ * Sends SPI command to exit conversion mode.
+ *
+ * Return: 0 on success, a negative error code on failure
+ */
+static int ad4695_exit_conversion_mode(struct ad4695_state *st)
+{
+ struct spi_transfer xfer = {
+ .tx_buf = &st->cnv_cmd2,
+ .len = 1,
+ .delay.value = AD4695_T_REGCONFIG_NS,
+ .delay.unit = SPI_DELAY_UNIT_NSECS,
+ };
+
+ /*
+ * Technically, could do a 5-bit transfer, but shifting to start of
+ * 8 bits instead for better SPI controller support.
+ */
+ st->cnv_cmd2 = AD4695_CMD_EXIT_CNV_MODE << 3;
+
+ return spi_sync_transfer(st->spi, &xfer, 1);
+}
+
+static int ad4695_set_ref_voltage(struct ad4695_state *st, int vref_mv)
+{
+ u8 val;
+
+ if (vref_mv >= 2400 && vref_mv <= 2750)
+ val = 0;
+ else if (vref_mv > 2750 && vref_mv <= 3250)
+ val = 1;
+ else if (vref_mv > 3250 && vref_mv <= 3750)
+ val = 2;
+ else if (vref_mv > 3750 && vref_mv <= 4500)
+ val = 3;
+ else if (vref_mv > 4500 && vref_mv <= 5100)
+ val = 4;
+ else
+ return -EINVAL;
+
+ return regmap_update_bits(st->regmap, AD4695_REG_REF_CTRL,
+ AD4695_REG_REF_CTRL_VREF_SET,
+ FIELD_PREP(AD4695_REG_REF_CTRL_VREF_SET, val));
+}
+
+static int ad4695_write_chn_cfg(struct ad4695_state *st,
+ struct ad4695_channel_config *cfg)
+{
+ u32 mask, val;
+
+ mask = AD4695_REG_CONFIG_IN_MODE;
+ val = FIELD_PREP(AD4695_REG_CONFIG_IN_MODE, cfg->bipolar ? 1 : 0);
+
+ mask |= AD4695_REG_CONFIG_IN_PAIR;
+ val |= FIELD_PREP(AD4695_REG_CONFIG_IN_PAIR, cfg->pin_pairing);
+
+ mask |= AD4695_REG_CONFIG_IN_AINHIGHZ_EN;
+ val |= FIELD_PREP(AD4695_REG_CONFIG_IN_AINHIGHZ_EN,
+ cfg->highz_en ? 1 : 0);
+
+ return regmap_update_bits(st->regmap,
+ AD4695_REG_CONFIG_IN(cfg->channel),
+ mask, val);
+}
+
+static int ad4695_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct ad4695_state *st = iio_priv(indio_dev);
+ struct spi_transfer *xfer;
+ u8 temp_chan_bit = st->chip_info->num_voltage_inputs;
+ u32 bit, num_xfer, num_slots;
+ u32 temp_en = 0;
+ int ret;
+
+ /*
+ * We are using the advanced sequencer since it is the only way to read
+ * multiple channels that allows individual configuration of each
+ * voltage input channel. Slot 0 in the advanced sequencer is used to
+ * account for the gap between trigger polls - we don't read data from
+ * this slot. Each enabled voltage channel is assigned a slot starting
+ * with slot 1.
+ */
+ num_slots = 1;
+
+ memset(st->buf_read_xfer, 0, sizeof(st->buf_read_xfer));
+
+ /* First xfer is only to trigger conversion of slot 1, so no rx. */
+ xfer = &st->buf_read_xfer[0];
+ xfer->cs_change = 1;
+ xfer->delay.value = st->chip_info->t_acq_ns;
+ xfer->delay.unit = SPI_DELAY_UNIT_NSECS;
+ xfer->cs_change_delay.value = AD4695_T_CONVERT_NS;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ num_xfer = 1;
+
+ iio_for_each_active_channel(indio_dev, bit) {
+ xfer = &st->buf_read_xfer[num_xfer];
+ xfer->bits_per_word = 16;
+ xfer->rx_buf = &st->buf[(num_xfer - 1) * 2];
+ xfer->len = 2;
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = AD4695_T_CONVERT_NS;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ if (bit == temp_chan_bit) {
+ temp_en = 1;
+ } else {
+ ret = regmap_write(st->regmap,
+ AD4695_REG_AS_SLOT(num_slots),
+ FIELD_PREP(AD4695_REG_AS_SLOT_INX, bit));
+ if (ret)
+ return ret;
+
+ num_slots++;
+ }
+
+ num_xfer++;
+ }
+
+ /*
+ * The advanced sequencer requires that at least 2 slots are enabled.
+ * Since slot 0 is always used for other purposes, we need only 1
+ * enabled voltage channel to meet this requirement. If the temperature
+ * channel is the only enabled channel, we need to add one more slot
+ * in the sequence but not read from it.
+ */
+ if (num_slots < 2) {
+ /* move last xfer so we can insert one more xfer before it */
+ st->buf_read_xfer[num_xfer] = *xfer;
+ num_xfer++;
+
+ /* modify 2nd to last xfer for extra slot */
+ memset(xfer, 0, sizeof(*xfer));
+ xfer->cs_change = 1;
+ xfer->delay.value = st->chip_info->t_acq_ns;
+ xfer->delay.unit = SPI_DELAY_UNIT_NSECS;
+ xfer->cs_change_delay.value = AD4695_T_CONVERT_NS;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ xfer++;
+
+ /* and add the extra slot in the sequencer */
+ ret = regmap_write(st->regmap,
+ AD4695_REG_AS_SLOT(num_slots),
+ FIELD_PREP(AD4695_REG_AS_SLOT_INX, 0));
+ if (ret)
+ return ret;
+
+ num_slots++;
+ }
+
+ /*
+ * Don't keep CS asserted after last xfer. Also triggers conversion of
+ * slot 0.
+ */
+ xfer->cs_change = 0;
+
+ /*
+ * Temperature channel isn't included in the sequence, but rather
+ * controlled by setting a bit in the TEMP_CTRL register.
+ */
+
+ ret = regmap_update_bits(st->regmap, AD4695_REG_TEMP_CTRL,
+ AD4695_REG_TEMP_CTRL_TEMP_EN,
+ FIELD_PREP(AD4695_REG_TEMP_CTRL_TEMP_EN, temp_en));
+ if (ret)
+ return ret;
+
+ spi_message_init_with_transfers(&st->buf_read_msg, st->buf_read_xfer,
+ num_xfer);
+
+ ret = spi_optimize_message(st->spi, &st->buf_read_msg);
+ if (ret)
+ return ret;
+
+ /* This triggers conversion of slot 0. */
+ ret = ad4695_enter_advanced_sequencer_mode(st, num_slots);
+ if (ret)
+ spi_unoptimize_message(&st->buf_read_msg);
+
+ return ret;
+}
+
+static int ad4695_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct ad4695_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = ad4695_exit_conversion_mode(st);
+ if (ret)
+ return ret;
+
+ spi_unoptimize_message(&st->buf_read_msg);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops ad4695_buffer_setup_ops = {
+ .preenable = ad4695_buffer_preenable,
+ .postdisable = ad4695_buffer_postdisable,
+};
+
+static irqreturn_t ad4695_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad4695_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = spi_sync(st->spi, &st->buf_read_msg);
+ if (ret)
+ goto out;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->buf, pf->timestamp);
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ad4695_read_one_sample - Read a single sample using single-cycle mode
+ * @st: The AD4695 state
+ * @address: The address of the channel to read
+ *
+ * Upon successful return, the sample will be stored in `st->raw_data`.
+ *
+ * Context: can sleep, must be called with iio_device_claim_direct held
+ * Return: 0 on success, a negative error code on failure
+ */
+static int ad4695_read_one_sample(struct ad4695_state *st, unsigned int address)
+{
+ struct spi_transfer xfer[2] = { };
+ int ret, i = 0;
+
+ ret = ad4695_set_single_cycle_mode(st, address);
+ if (ret)
+ return ret;
+
+ /*
+ * Setting the first channel to the temperature channel isn't supported
+ * in single-cycle mode, so we have to do an extra xfer to read the
+ * temperature.
+ */
+ if (address == AD4695_CMD_TEMP_CHAN) {
+ /* We aren't reading, so we can make this a short xfer. */
+ st->cnv_cmd2 = AD4695_CMD_TEMP_CHAN << 3;
+ xfer[0].tx_buf = &st->cnv_cmd2;
+ xfer[0].len = 1;
+ xfer[0].cs_change = 1;
+ xfer[0].cs_change_delay.value = AD4695_T_CONVERT_NS;
+ xfer[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ i = 1;
+ }
+
+ /* Then read the result and exit conversion mode. */
+ st->cnv_cmd = AD4695_CMD_EXIT_CNV_MODE << 11;
+ xfer[i].bits_per_word = 16;
+ xfer[i].tx_buf = &st->cnv_cmd;
+ xfer[i].rx_buf = &st->raw_data;
+ xfer[i].len = 2;
+
+ return spi_sync_transfer(st->spi, xfer, i + 1);
+}
+
+static int ad4695_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ad4695_state *st = iio_priv(indio_dev);
+ struct ad4695_channel_config *cfg = &st->channels_cfg[chan->scan_index];
+ u8 realbits = chan->scan_type.realbits;
+ unsigned int reg_val;
+ int ret, tmp;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ ret = ad4695_read_one_sample(st, chan->address);
+ if (ret)
+ return ret;
+
+ if (chan->scan_type.sign == 's')
+ *val = sign_extend32(st->raw_data, realbits - 1);
+ else
+ *val = st->raw_data;
+
+ return IIO_VAL_INT;
+ }
+ unreachable();
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = st->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ /* T_scale (°C) = raw * V_REF (mV) / (-1.8 mV/°C * 2^16) */
+ *val = st->vref_mv * -556;
+ *val2 = 16;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (cfg->pin_pairing == AD4695_IN_PAIR_COM)
+ *val = st->com_mv * (1 << realbits) / st->vref_mv;
+ else if (cfg->pin_pairing == AD4695_IN_PAIR_EVEN_ODD)
+ *val = cfg->common_mode_mv * (1 << realbits) / st->vref_mv;
+ else
+ *val = 0;
+
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ /* T_offset (°C) = -725 mV / (-1.8 mV/°C) */
+ /* T_offset (raw) = T_offset (°C) * (-1.8 mV/°C) * 2^16 / V_REF (mV) */
+ *val = -47513600;
+ *val2 = st->vref_mv;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBSCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ ret = regmap_read(st->regmap16,
+ AD4695_REG_GAIN_IN(chan->scan_index),
+ &reg_val);
+ if (ret)
+ return ret;
+
+ *val = reg_val;
+ *val2 = 15;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ unreachable();
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ ret = regmap_read(st->regmap16,
+ AD4695_REG_OFFSET_IN(chan->scan_index),
+ &reg_val);
+ if (ret)
+ return ret;
+
+ tmp = sign_extend32(reg_val, 15);
+
+ *val = tmp / 4;
+ *val2 = abs(tmp) % 4 * MICRO / 4;
+
+ if (tmp < 0 && *val2) {
+ *val *= -1;
+ *val2 *= -1;
+ }
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ unreachable();
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4695_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct ad4695_state *st = iio_priv(indio_dev);
+ unsigned int reg_val;
+
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (val < 0 || val2 < 0)
+ reg_val = 0;
+ else if (val > 1)
+ reg_val = U16_MAX;
+ else
+ reg_val = (val * (1 << 16) +
+ mul_u64_u32_div(val2, 1 << 16,
+ MICRO)) / 2;
+
+ return regmap_write(st->regmap16,
+ AD4695_REG_GAIN_IN(chan->scan_index),
+ reg_val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (val2 >= 0 && val > S16_MAX / 4)
+ reg_val = S16_MAX;
+ else if ((val2 < 0 ? -val : val) < S16_MIN / 4)
+ reg_val = S16_MIN;
+ else if (val2 < 0)
+ reg_val = clamp_t(int,
+ -(val * 4 + -val2 * 4 / MICRO),
+ S16_MIN, S16_MAX);
+ else if (val < 0)
+ reg_val = clamp_t(int,
+ val * 4 - val2 * 4 / MICRO,
+ S16_MIN, S16_MAX);
+ else
+ reg_val = clamp_t(int,
+ val * 4 + val2 * 4 / MICRO,
+ S16_MIN, S16_MAX);
+
+ return regmap_write(st->regmap16,
+ AD4695_REG_OFFSET_IN(chan->scan_index),
+ reg_val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+ }
+ unreachable();
+}
+
+static int ad4695_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ static const int ad4695_calibscale_available[6] = {
+ /* Range of 0 (inclusive) to 2 (exclusive) */
+ 0, 15, 1, 15, U16_MAX, 15
+ };
+ static const int ad4695_calibbias_available[6] = {
+ /*
+ * Datasheet says FSR/8 which translates to signed/4. The step
+ * depends on oversampling ratio which is always 1 for now.
+ */
+ S16_MIN / 4, 0, 0, MICRO / 4, S16_MAX / 4, S16_MAX % 4 * MICRO / 4
+ };
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *vals = ad4695_calibscale_available;
+ *type = IIO_VAL_FRACTIONAL_LOG2;
+ return IIO_AVAIL_RANGE;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *vals = ad4695_calibbias_available;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_RANGE;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4695_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ad4695_state *st = iio_priv(indio_dev);
+
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ if (readval) {
+ if (regmap_check_range_table(st->regmap, reg,
+ &ad4695_regmap_rd_table))
+ return regmap_read(st->regmap, reg, readval);
+ if (regmap_check_range_table(st->regmap16, reg,
+ &ad4695_regmap16_rd_table))
+ return regmap_read(st->regmap16, reg, readval);
+ } else {
+ if (regmap_check_range_table(st->regmap, reg,
+ &ad4695_regmap_wr_table))
+ return regmap_write(st->regmap, reg, writeval);
+ if (regmap_check_range_table(st->regmap16, reg,
+ &ad4695_regmap16_wr_table))
+ return regmap_write(st->regmap16, reg, writeval);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad4695_info = {
+ .read_raw = &ad4695_read_raw,
+ .write_raw = &ad4695_write_raw,
+ .read_avail = &ad4695_read_avail,
+ .debugfs_reg_access = &ad4695_debugfs_reg_access,
+};
+
+static int ad4695_parse_channel_cfg(struct ad4695_state *st)
+{
+ struct device *dev = &st->spi->dev;
+ struct ad4695_channel_config *chan_cfg;
+ struct iio_chan_spec *iio_chan;
+ int ret, i;
+
+ /* populate defaults */
+ for (i = 0; i < st->chip_info->num_voltage_inputs; i++) {
+ chan_cfg = &st->channels_cfg[i];
+ iio_chan = &st->iio_chan[i];
+
+ chan_cfg->highz_en = true;
+ chan_cfg->channel = i;
+
+ *iio_chan = ad4695_channel_template;
+ iio_chan->channel = i;
+ iio_chan->scan_index = i;
+ iio_chan->address = AD4695_CMD_VOLTAGE_CHAN(i);
+ }
+
+ /* modify based on firmware description */
+ device_for_each_child_node_scoped(dev, child) {
+ u32 reg, val;
+
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to read reg property (%s)\n",
+ fwnode_get_name(child));
+
+ if (reg >= st->chip_info->num_voltage_inputs)
+ return dev_err_probe(dev, -EINVAL,
+ "reg out of range (%s)\n",
+ fwnode_get_name(child));
+
+ iio_chan = &st->iio_chan[reg];
+ chan_cfg = &st->channels_cfg[reg];
+
+ chan_cfg->highz_en =
+ !fwnode_property_read_bool(child, "adi,no-high-z");
+ chan_cfg->bipolar = fwnode_property_read_bool(child, "bipolar");
+
+ ret = fwnode_property_read_u32(child, "common-mode-channel",
+ &val);
+ if (ret && ret != -EINVAL)
+ return dev_err_probe(dev, ret,
+ "failed to read common-mode-channel (%s)\n",
+ fwnode_get_name(child));
+
+ if (ret == -EINVAL || val == AD4695_COMMON_MODE_REFGND)
+ chan_cfg->pin_pairing = AD4695_IN_PAIR_REFGND;
+ else if (val == AD4695_COMMON_MODE_COM)
+ chan_cfg->pin_pairing = AD4695_IN_PAIR_COM;
+ else
+ chan_cfg->pin_pairing = AD4695_IN_PAIR_EVEN_ODD;
+
+ if (chan_cfg->pin_pairing == AD4695_IN_PAIR_EVEN_ODD &&
+ val % 2 == 0)
+ return dev_err_probe(dev, -EINVAL,
+ "common-mode-channel must be odd number (%s)\n",
+ fwnode_get_name(child));
+
+ if (chan_cfg->pin_pairing == AD4695_IN_PAIR_EVEN_ODD &&
+ val != reg + 1)
+ return dev_err_probe(dev, -EINVAL,
+ "common-mode-channel must be next consecutive channel (%s)\n",
+ fwnode_get_name(child));
+
+ if (chan_cfg->pin_pairing == AD4695_IN_PAIR_EVEN_ODD) {
+ char name[5];
+
+ snprintf(name, sizeof(name), "in%d", reg + 1);
+
+ ret = devm_regulator_get_enable_read_voltage(dev, name);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to get %s voltage (%s)\n",
+ name, fwnode_get_name(child));
+
+ chan_cfg->common_mode_mv = ret / 1000;
+ }
+
+ if (chan_cfg->bipolar &&
+ chan_cfg->pin_pairing == AD4695_IN_PAIR_REFGND)
+ return dev_err_probe(dev, -EINVAL,
+ "bipolar mode is not available for inputs paired with REFGND (%s).\n",
+ fwnode_get_name(child));
+
+ if (chan_cfg->bipolar)
+ iio_chan->scan_type.sign = 's';
+
+ ret = ad4695_write_chn_cfg(st, chan_cfg);
+ if (ret)
+ return ret;
+ }
+
+ /* Temperature channel must be next scan index after voltage channels. */
+ st->iio_chan[i] = ad4695_temp_channel_template;
+ st->iio_chan[i].scan_index = i;
+ i++;
+
+ st->iio_chan[i] = ad4695_soft_timestamp_channel_template;
+ st->iio_chan[i].scan_index = i;
+
+ return 0;
+}
+
+static int ad4695_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct ad4695_state *st;
+ struct iio_dev *indio_dev;
+ struct gpio_desc *cnv_gpio;
+ bool use_internal_ldo_supply;
+ bool use_internal_ref_buffer;
+ int ret;
+
+ cnv_gpio = devm_gpiod_get_optional(dev, "cnv", GPIOD_OUT_LOW);
+ if (IS_ERR(cnv_gpio))
+ return dev_err_probe(dev, PTR_ERR(cnv_gpio),
+ "Failed to get CNV GPIO\n");
+
+ /* Driver currently requires CNV pin to be connected to SPI CS */
+ if (cnv_gpio)
+ return dev_err_probe(dev, -ENODEV,
+ "CNV GPIO is not supported\n");
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+
+ st->chip_info = spi_get_device_match_data(spi);
+ if (!st->chip_info)
+ return -EINVAL;
+
+ /* Registers cannot be read at the max allowable speed */
+ spi->max_speed_hz = AD4695_REG_ACCESS_SCLK_HZ;
+
+ st->regmap = devm_regmap_init_spi(spi, &ad4695_regmap_config);
+ if (IS_ERR(st->regmap))
+ return dev_err_probe(dev, PTR_ERR(st->regmap),
+ "Failed to initialize regmap\n");
+
+ st->regmap16 = devm_regmap_init_spi(spi, &ad4695_regmap16_config);
+ if (IS_ERR(st->regmap16))
+ return dev_err_probe(dev, PTR_ERR(st->regmap16),
+ "Failed to initialize regmap16\n");
+
+ ret = devm_regulator_bulk_get_enable(dev,
+ ARRAY_SIZE(ad4695_power_supplies),
+ ad4695_power_supplies);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to enable power supplies\n");
+
+ /* If LDO_IN supply is present, then we are using internal LDO. */
+ ret = devm_regulator_get_enable_optional(dev, "ldo-in");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret,
+ "Failed to enable LDO_IN supply\n");
+
+ use_internal_ldo_supply = ret == 0;
+
+ if (!use_internal_ldo_supply) {
+ /* Otherwise we need an external VDD supply. */
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to enable VDD supply\n");
+ }
+
+ /* If REFIN supply is given, then we are using internal buffer */
+ ret = devm_regulator_get_enable_read_voltage(dev, "refin");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "Failed to get REFIN voltage\n");
+
+ if (ret != -ENODEV) {
+ st->vref_mv = ret / 1000;
+ use_internal_ref_buffer = true;
+ } else {
+ /* Otherwise, we need an external reference. */
+ ret = devm_regulator_get_enable_read_voltage(dev, "ref");
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to get REF voltage\n");
+
+ st->vref_mv = ret / 1000;
+ use_internal_ref_buffer = false;
+ }
+
+ ret = devm_regulator_get_enable_read_voltage(dev, "com");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "Failed to get COM voltage\n");
+
+ st->com_mv = ret == -ENODEV ? 0 : ret / 1000;
+
+ /*
+ * Reset the device using hardware reset if available or fall back to
+ * software reset.
+ */
+
+ st->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(st->reset_gpio))
+ return PTR_ERR(st->reset_gpio);
+
+ if (st->reset_gpio) {
+ gpiod_set_value(st->reset_gpio, 0);
+ msleep(AD4695_T_WAKEUP_HW_MS);
+ } else {
+ ret = regmap_write(st->regmap, AD4695_REG_SPI_CONFIG_A,
+ AD4695_REG_SPI_CONFIG_A_SW_RST);
+ if (ret)
+ return ret;
+
+ msleep(AD4695_T_WAKEUP_SW_MS);
+ }
+
+ /* Needed for regmap16 to be able to work correctly. */
+ ret = regmap_set_bits(st->regmap, AD4695_REG_SPI_CONFIG_A,
+ AD4695_REG_SPI_CONFIG_A_ADDR_DIR);
+ if (ret)
+ return ret;
+
+ /* Disable internal LDO if it isn't needed. */
+ ret = regmap_update_bits(st->regmap, AD4695_REG_SETUP,
+ AD4695_REG_SETUP_LDO_EN,
+ FIELD_PREP(AD4695_REG_SETUP_LDO_EN,
+ use_internal_ldo_supply ? 1 : 0));
+ if (ret)
+ return ret;
+
+ /* configure reference supply */
+
+ if (device_property_present(dev, "adi,no-ref-current-limit")) {
+ ret = regmap_set_bits(st->regmap, AD4695_REG_REF_CTRL,
+ AD4695_REG_REF_CTRL_OV_MODE);
+ if (ret)
+ return ret;
+ }
+
+ if (device_property_present(dev, "adi,no-ref-high-z")) {
+ if (use_internal_ref_buffer)
+ return dev_err_probe(dev, -EINVAL,
+ "Cannot disable high-Z mode for internal reference buffer\n");
+
+ ret = regmap_clear_bits(st->regmap, AD4695_REG_REF_CTRL,
+ AD4695_REG_REF_CTRL_REFHIZ_EN);
+ if (ret)
+ return ret;
+ }
+
+ ret = ad4695_set_ref_voltage(st, st->vref_mv);
+ if (ret)
+ return ret;
+
+ if (use_internal_ref_buffer) {
+ ret = regmap_set_bits(st->regmap, AD4695_REG_REF_CTRL,
+ AD4695_REG_REF_CTRL_REFBUF_EN);
+ if (ret)
+ return ret;
+
+ /* Give the capacitor some time to charge up. */
+ msleep(AD4695_T_REFBUF_MS);
+ }
+
+ ret = ad4695_parse_channel_cfg(st);
+ if (ret)
+ return ret;
+
+ indio_dev->name = st->chip_info->name;
+ indio_dev->info = &ad4695_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->iio_chan;
+ indio_dev->num_channels = st->chip_info->num_voltage_inputs + 2;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ ad4695_trigger_handler,
+ &ad4695_buffer_setup_ops);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct spi_device_id ad4695_spi_id_table[] = {
+ { .name = "ad4695", .driver_data = (kernel_ulong_t)&ad4695_chip_info },
+ { .name = "ad4696", .driver_data = (kernel_ulong_t)&ad4696_chip_info },
+ { .name = "ad4697", .driver_data = (kernel_ulong_t)&ad4697_chip_info },
+ { .name = "ad4698", .driver_data = (kernel_ulong_t)&ad4698_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad4695_spi_id_table);
+
+static const struct of_device_id ad4695_of_match_table[] = {
+ { .compatible = "adi,ad4695", .data = &ad4695_chip_info, },
+ { .compatible = "adi,ad4696", .data = &ad4696_chip_info, },
+ { .compatible = "adi,ad4697", .data = &ad4697_chip_info, },
+ { .compatible = "adi,ad4698", .data = &ad4698_chip_info, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad4695_of_match_table);
+
+static struct spi_driver ad4695_driver = {
+ .driver = {
+ .name = "ad4695",
+ .of_match_table = ad4695_of_match_table,
+ },
+ .probe = ad4695_probe,
+ .id_table = ad4695_spi_id_table,
+};
+module_spi_driver(ad4695_driver);
+
+MODULE_AUTHOR("Ramona Gradinariu <ramona.gradinariu@analog.com>");
+MODULE_AUTHOR("David Lechner <dlechner@baylibre.com>");
+MODULE_DESCRIPTION("Analog Devices AD4695 ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/ad7091r5.c b/drivers/iio/adc/ad7091r5.c
index a75837334157..1b59708abf30 100644
--- a/drivers/iio/adc/ad7091r5.c
+++ b/drivers/iio/adc/ad7091r5.c
@@ -112,13 +112,13 @@ static int ad7091r5_i2c_probe(struct i2c_client *i2c)
static const struct of_device_id ad7091r5_dt_ids[] = {
{ .compatible = "adi,ad7091r5", .data = &ad7091r5_init_info },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, ad7091r5_dt_ids);
static const struct i2c_device_id ad7091r5_i2c_ids[] = {
- {"ad7091r5", (kernel_ulong_t)&ad7091r5_init_info },
- {}
+ { "ad7091r5", (kernel_ulong_t)&ad7091r5_init_info },
+ { }
};
MODULE_DEVICE_TABLE(i2c, ad7091r5_i2c_ids);
diff --git a/drivers/iio/adc/ad7091r8.c b/drivers/iio/adc/ad7091r8.c
index 700564305057..c9e014d6a77c 100644
--- a/drivers/iio/adc/ad7091r8.c
+++ b/drivers/iio/adc/ad7091r8.c
@@ -159,7 +159,7 @@ static int ad7091r_regmap_bus_reg_write(void *context, unsigned int reg,
return spi_write(spi, &st->tx_buf, 2);
}
-static struct regmap_bus ad7091r8_regmap_bus = {
+static const struct regmap_bus ad7091r8_regmap_bus = {
.reg_read = ad7091r_regmap_bus_reg_read,
.reg_write = ad7091r_regmap_bus_reg_write,
.reg_format_endian_default = REGMAP_ENDIAN_BIG,
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 108e9ccab1ef..a5d91933f505 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -382,8 +382,7 @@ static int ad7124_init_config_vref(struct ad7124_state *st, struct ad7124_channe
cfg->vref_mv = 2500;
st->adc_control &= ~AD7124_ADC_CTRL_REF_EN_MSK;
st->adc_control |= AD7124_ADC_CTRL_REF_EN(1);
- return ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL,
- 2, st->adc_control);
+ return 0;
default:
dev_err(&st->sd.spi->dev, "Invalid reference %d\n", refsel);
return -EINVAL;
@@ -401,24 +400,17 @@ static int ad7124_write_config(struct ad7124_state *st, struct ad7124_channel_co
tmp = (cfg->buf_positive << 1) + cfg->buf_negative;
val = AD7124_CONFIG_BIPOLAR(cfg->bipolar) | AD7124_CONFIG_REF_SEL(cfg->refsel) |
- AD7124_CONFIG_IN_BUFF(tmp);
- ret = ad_sd_write_reg(&st->sd, AD7124_CONFIG(cfg->cfg_slot), 2, val);
- if (ret < 0)
- return ret;
+ AD7124_CONFIG_IN_BUFF(tmp) | AD7124_CONFIG_PGA(cfg->pga_bits);
- tmp = AD7124_FILTER_TYPE_SEL(cfg->filter_type);
- ret = ad7124_spi_write_mask(st, AD7124_FILTER(cfg->cfg_slot), AD7124_FILTER_TYPE_MSK,
- tmp, 3);
- if (ret < 0)
- return ret;
-
- ret = ad7124_spi_write_mask(st, AD7124_FILTER(cfg->cfg_slot), AD7124_FILTER_FS_MSK,
- AD7124_FILTER_FS(cfg->odr_sel_bits), 3);
+ ret = ad_sd_write_reg(&st->sd, AD7124_CONFIG(cfg->cfg_slot), 2, val);
if (ret < 0)
return ret;
- return ad7124_spi_write_mask(st, AD7124_CONFIG(cfg->cfg_slot), AD7124_CONFIG_PGA_MSK,
- AD7124_CONFIG_PGA(cfg->pga_bits), 2);
+ tmp = AD7124_FILTER_TYPE_SEL(cfg->filter_type) |
+ AD7124_FILTER_FS(cfg->odr_sel_bits);
+ return ad7124_spi_write_mask(st, AD7124_FILTER(cfg->cfg_slot),
+ AD7124_FILTER_TYPE_MSK | AD7124_FILTER_FS_MSK,
+ tmp, 3);
}
static struct ad7124_channel_config *ad7124_pop_config(struct ad7124_state *st)
@@ -907,9 +899,9 @@ static int ad7124_setup(struct ad7124_state *st)
/* Set the power mode */
st->adc_control &= ~AD7124_ADC_CTRL_PWR_MSK;
st->adc_control |= AD7124_ADC_CTRL_PWR(power_mode);
- ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control);
- if (ret < 0)
- return ret;
+
+ st->adc_control &= ~AD7124_ADC_CTRL_MODE_MSK;
+ st->adc_control |= AD7124_ADC_CTRL_MODE(AD_SD_MODE_IDLE);
mutex_init(&st->cfgs_lock);
INIT_KFIFO(st->live_cfgs_fifo);
@@ -927,6 +919,10 @@ static int ad7124_setup(struct ad7124_state *st)
ad7124_set_channel_odr(st, i, 10);
}
+ ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control);
+ if (ret < 0)
+ return ret;
+
return ret;
}
@@ -1016,14 +1012,14 @@ static const struct of_device_id ad7124_of_match[] = {
.data = &ad7124_chip_info_tbl[ID_AD7124_4], },
{ .compatible = "adi,ad7124-8",
.data = &ad7124_chip_info_tbl[ID_AD7124_8], },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ad7124_of_match);
static const struct spi_device_id ad71124_ids[] = {
{ "ad7124-4", (kernel_ulong_t)&ad7124_chip_info_tbl[ID_AD7124_4] },
{ "ad7124-8", (kernel_ulong_t)&ad7124_chip_info_tbl[ID_AD7124_8] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad71124_ids);
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 334ab90991d4..7042ddfdfc03 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -201,6 +202,7 @@ struct ad7192_chip_info {
struct ad7192_state {
const struct ad7192_chip_info *chip_info;
struct clk *mclk;
+ struct clk_hw int_clk_hw;
u16 int_vref_mv;
u32 aincom_mv;
u32 fclk;
@@ -284,7 +286,7 @@ static const struct iio_chan_spec_ext_info ad7192_calibsys_ext_info[] = {
&ad7192_syscalib_mode_enum),
IIO_ENUM_AVAILABLE("sys_calibration_mode", IIO_SHARED_BY_TYPE,
&ad7192_syscalib_mode_enum),
- {}
+ { }
};
static struct ad7192_state *ad_sigma_delta_to_ad7192(struct ad_sigma_delta *sd)
@@ -396,25 +398,162 @@ static inline bool ad7192_valid_external_frequency(u32 freq)
freq <= AD7192_EXT_FREQ_MHZ_MAX);
}
-static int ad7192_clock_select(struct ad7192_state *st)
+/*
+ * Position 0 of ad7192_clock_names, xtal, corresponds to clock source
+ * configuration AD7192_CLK_EXT_MCLK1_2 and position 1, mclk, corresponds to
+ * AD7192_CLK_EXT_MCLK2
+ */
+static const char *const ad7192_clock_names[] = {
+ "xtal",
+ "mclk"
+};
+
+static struct ad7192_state *clk_hw_to_ad7192(struct clk_hw *hw)
+{
+ return container_of(hw, struct ad7192_state, int_clk_hw);
+}
+
+static unsigned long ad7192_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return AD7192_INT_FREQ_MHZ;
+}
+
+static int ad7192_clk_output_is_enabled(struct clk_hw *hw)
+{
+ struct ad7192_state *st = clk_hw_to_ad7192(hw);
+
+ return st->clock_sel == AD7192_CLK_INT_CO;
+}
+
+static int ad7192_clk_prepare(struct clk_hw *hw)
+{
+ struct ad7192_state *st = clk_hw_to_ad7192(hw);
+ int ret;
+
+ st->mode &= ~AD7192_MODE_CLKSRC_MASK;
+ st->mode |= AD7192_CLK_INT_CO;
+
+ ret = ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
+ if (ret)
+ return ret;
+
+ st->clock_sel = AD7192_CLK_INT_CO;
+
+ return 0;
+}
+
+static void ad7192_clk_unprepare(struct clk_hw *hw)
+{
+ struct ad7192_state *st = clk_hw_to_ad7192(hw);
+ int ret;
+
+ st->mode &= ~AD7192_MODE_CLKSRC_MASK;
+ st->mode |= AD7192_CLK_INT;
+
+ ret = ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
+ if (ret)
+ return;
+
+ st->clock_sel = AD7192_CLK_INT;
+}
+
+static const struct clk_ops ad7192_int_clk_ops = {
+ .recalc_rate = ad7192_clk_recalc_rate,
+ .is_enabled = ad7192_clk_output_is_enabled,
+ .prepare = ad7192_clk_prepare,
+ .unprepare = ad7192_clk_unprepare,
+};
+
+static int ad7192_register_clk_provider(struct ad7192_state *st)
{
struct device *dev = &st->sd.spi->dev;
- unsigned int clock_sel;
+ struct clk_init_data init = {};
+ int ret;
- clock_sel = AD7192_CLK_INT;
+ if (!IS_ENABLED(CONFIG_COMMON_CLK))
+ return 0;
- /* use internal clock */
- if (!st->mclk) {
- if (device_property_read_bool(dev, "adi,int-clock-output-enable"))
- clock_sel = AD7192_CLK_INT_CO;
- } else {
- if (device_property_read_bool(dev, "adi,clock-xtal"))
- clock_sel = AD7192_CLK_EXT_MCLK1_2;
- else
- clock_sel = AD7192_CLK_EXT_MCLK2;
+ if (!device_property_present(dev, "#clock-cells"))
+ return 0;
+
+ init.name = devm_kasprintf(dev, GFP_KERNEL, "%s-clk",
+ fwnode_get_name(dev_fwnode(dev)));
+ if (!init.name)
+ return -ENOMEM;
+
+ init.ops = &ad7192_int_clk_ops;
+
+ st->int_clk_hw.init = &init;
+ ret = devm_clk_hw_register(dev, &st->int_clk_hw);
+ if (ret)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &st->int_clk_hw);
+}
+
+static int ad7192_clock_setup(struct ad7192_state *st)
+{
+ struct device *dev = &st->sd.spi->dev;
+ int ret;
+
+ /*
+ * The following two if branches are kept for backward compatibility but
+ * the use of the two devicetree properties is highly discouraged. Clock
+ * configuration should be done according to the bindings.
+ */
+
+ if (device_property_read_bool(dev, "adi,int-clock-output-enable")) {
+ st->clock_sel = AD7192_CLK_INT_CO;
+ st->fclk = AD7192_INT_FREQ_MHZ;
+ dev_warn(dev, "Property adi,int-clock-output-enable is deprecated! Check bindings!\n");
+ return 0;
+ }
+
+ if (device_property_read_bool(dev, "adi,clock-xtal")) {
+ st->clock_sel = AD7192_CLK_EXT_MCLK1_2;
+ st->mclk = devm_clk_get_enabled(dev, "mclk");
+ if (IS_ERR(st->mclk))
+ return dev_err_probe(dev, PTR_ERR(st->mclk),
+ "Failed to get mclk\n");
+
+ st->fclk = clk_get_rate(st->mclk);
+ if (!ad7192_valid_external_frequency(st->fclk))
+ return dev_err_probe(dev, -EINVAL,
+ "External clock frequency out of bounds\n");
+
+ dev_warn(dev, "Property adi,clock-xtal is deprecated! Check bindings!\n");
+ return 0;
+ }
+
+ ret = device_property_match_property_string(dev, "clock-names",
+ ad7192_clock_names,
+ ARRAY_SIZE(ad7192_clock_names));
+ if (ret < 0) {
+ st->clock_sel = AD7192_CLK_INT;
+ st->fclk = AD7192_INT_FREQ_MHZ;
+
+ ret = ad7192_register_clk_provider(st);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register clock provider\n");
+ return 0;
}
- return clock_sel;
+ st->clock_sel = AD7192_CLK_EXT_MCLK1_2 + ret;
+
+ st->mclk = devm_clk_get_enabled(dev, ad7192_clock_names[ret]);
+ if (IS_ERR(st->mclk))
+ return dev_err_probe(dev, PTR_ERR(st->mclk),
+ "Failed to get clock source\n");
+
+ st->fclk = clk_get_rate(st->mclk);
+ if (!ad7192_valid_external_frequency(st->fclk))
+ return dev_err_probe(dev, -EINVAL,
+ "External clock frequency out of bounds\n");
+
+ return 0;
}
static int ad7192_setup(struct iio_dev *indio_dev, struct device *dev)
@@ -1275,21 +1414,9 @@ static int ad7192_probe(struct spi_device *spi)
if (ret)
return ret;
- st->fclk = AD7192_INT_FREQ_MHZ;
-
- st->mclk = devm_clk_get_optional_enabled(dev, "mclk");
- if (IS_ERR(st->mclk))
- return PTR_ERR(st->mclk);
-
- st->clock_sel = ad7192_clock_select(st);
-
- if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
- st->clock_sel == AD7192_CLK_EXT_MCLK2) {
- st->fclk = clk_get_rate(st->mclk);
- if (!ad7192_valid_external_frequency(st->fclk))
- return dev_err_probe(dev, -EINVAL,
- "External clock frequency out of bounds\n");
- }
+ ret = ad7192_clock_setup(st);
+ if (ret)
+ return ret;
ret = ad7192_setup(indio_dev, dev);
if (ret)
@@ -1304,7 +1431,7 @@ static const struct of_device_id ad7192_of_match[] = {
{ .compatible = "adi,ad7193", .data = &ad7192_chip_info_tbl[ID_AD7193] },
{ .compatible = "adi,ad7194", .data = &ad7192_chip_info_tbl[ID_AD7194] },
{ .compatible = "adi,ad7195", .data = &ad7192_chip_info_tbl[ID_AD7195] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ad7192_of_match);
@@ -1314,7 +1441,7 @@ static const struct spi_device_id ad7192_ids[] = {
{ "ad7193", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7193] },
{ "ad7194", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7194] },
{ "ad7195", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7195] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7192_ids);
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index bdac020045b4..7949b076fb87 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -123,7 +123,8 @@ static int ad7266_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
struct ad7266_state *st = iio_priv(indio_dev);
- unsigned int nr = find_first_bit(scan_mask, indio_dev->masklength);
+ unsigned int nr = find_first_bit(scan_mask,
+ iio_get_masklength(indio_dev));
ad7266_select_input(st, nr);
@@ -456,8 +457,8 @@ static int ad7266_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7266_id[] = {
- {"ad7265", 0},
- {"ad7266", 0},
+ { "ad7265", 0 },
+ { "ad7266", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, ad7266_id);
diff --git a/drivers/iio/adc/ad7280a.c b/drivers/iio/adc/ad7280a.c
index d4a4e15c8244..35aa39fe4bde 100644
--- a/drivers/iio/adc/ad7280a.c
+++ b/drivers/iio/adc/ad7280a.c
@@ -7,6 +7,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/crc8.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -803,16 +804,16 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct ad7280_state *st = iio_priv(indio_dev);
- unsigned int *channels;
int i, ret;
- channels = kcalloc(st->scan_cnt, sizeof(*channels), GFP_KERNEL);
+ unsigned int *channels __free(kfree) = kcalloc(st->scan_cnt, sizeof(*channels),
+ GFP_KERNEL);
if (!channels)
return IRQ_HANDLED;
ret = ad7280_read_all_channels(st, st->scan_cnt, channels);
if (ret < 0)
- goto out;
+ return IRQ_HANDLED;
for (i = 0; i < st->scan_cnt; i++) {
unsigned int val;
@@ -852,9 +853,6 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
}
}
-out:
- kfree(channels);
-
return IRQ_HANDLED;
}
@@ -1092,8 +1090,8 @@ static int ad7280_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7280_id[] = {
- {"ad7280a", 0},
- {}
+ { "ad7280a", 0 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7280_id);
diff --git a/drivers/iio/adc/ad7291.c b/drivers/iio/adc/ad7291.c
index b59b2a51623c..4c7f887adbbf 100644
--- a/drivers/iio/adc/ad7291.c
+++ b/drivers/iio/adc/ad7291.c
@@ -537,14 +537,14 @@ static int ad7291_probe(struct i2c_client *client)
static const struct i2c_device_id ad7291_id[] = {
{ "ad7291" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ad7291_id);
static const struct of_device_id ad7291_of_match[] = {
{ .compatible = "adi,ad7291" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ad7291_of_match);
diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c
index ede80f380911..a398973f313d 100644
--- a/drivers/iio/adc/ad7292.c
+++ b/drivers/iio/adc/ad7292.c
@@ -301,13 +301,13 @@ static int ad7292_probe(struct spi_device *spi)
static const struct spi_device_id ad7292_id_table[] = {
{ "ad7292", 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7292_id_table);
static const struct of_device_id ad7292_of_match[] = {
{ .compatible = "adi,ad7292" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ad7292_of_match);
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
index c0430f71f592..b35bd4d9ef81 100644
--- a/drivers/iio/adc/ad7298.c
+++ b/drivers/iio/adc/ad7298.c
@@ -109,7 +109,8 @@ static int ad7298_update_scan_mode(struct iio_dev *indio_dev,
int scan_count;
/* Now compute overall size */
- scan_count = bitmap_weight(active_scan_mask, indio_dev->masklength);
+ scan_count = bitmap_weight(active_scan_mask,
+ iio_get_masklength(indio_dev));
command = AD7298_WRITE | st->ext_ref;
@@ -354,8 +355,8 @@ static const struct acpi_device_id ad7298_acpi_ids[] = {
MODULE_DEVICE_TABLE(acpi, ad7298_acpi_ids);
static const struct spi_device_id ad7298_id[] = {
- {"ad7298", 0},
- {}
+ { "ad7298", 0 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7298_id);
diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c
index 7568cd0a2b32..e8bddfb0d07d 100644
--- a/drivers/iio/adc/ad7380.c
+++ b/drivers/iio/adc/ad7380.c
@@ -8,9 +8,11 @@
* Datasheets of supported parts:
* ad7380/1 : https://www.analog.com/media/en/technical-documentation/data-sheets/AD7380-7381.pdf
* ad7383/4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7383-7384.pdf
+ * ad7386/7/8 : https://www.analog.com/media/en/technical-documentation/data-sheets/AD7386-7387-7388.pdf
* ad7380-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7380-4.pdf
* ad7381-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7381-4.pdf
* ad7383/4-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7383-4-ad7384-4.pdf
+ * ad7386/7/8-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7386-4-7387-4-7388-4.pdf
*/
#include <linux/align.h>
@@ -31,7 +33,7 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
-#define MAX_NUM_CHANNELS 4
+#define MAX_NUM_CHANNELS 8
/* 2.5V internal reference voltage */
#define AD7380_INTERNAL_REF_MV 2500
@@ -49,6 +51,8 @@
#define AD7380_REG_ADDR_ALERT_LOW_TH 0x4
#define AD7380_REG_ADDR_ALERT_HIGH_TH 0x5
+#define AD7380_CONFIG1_CH BIT(11)
+#define AD7380_CONFIG1_SEQ BIT(10)
#define AD7380_CONFIG1_OS_MODE BIT(9)
#define AD7380_CONFIG1_OSR GENMASK(8, 6)
#define AD7380_CONFIG1_CRC_W BIT(5)
@@ -80,6 +84,8 @@ struct ad7380_chip_info {
const char *name;
const struct iio_chan_spec *channels;
unsigned int num_channels;
+ unsigned int num_simult_channels;
+ bool has_mux;
const char * const *vcm_supplies;
unsigned int num_vcm_supplies;
const unsigned long *available_scan_masks;
@@ -91,82 +97,151 @@ enum {
AD7380_SCAN_TYPE_RESOLUTION_BOOST,
};
-/* Extended scan types for 14-bit chips. */
-static const struct iio_scan_type ad7380_scan_type_14[] = {
+/* Extended scan types for 12-bit unsigned chips. */
+static const struct iio_scan_type ad7380_scan_type_12_u[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 'u',
+ .realbits = 12,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 'u',
+ .realbits = 14,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+};
+
+/* Extended scan types for 14-bit signed chips. */
+static const struct iio_scan_type ad7380_scan_type_14_s[] = {
[AD7380_SCAN_TYPE_NORMAL] = {
.sign = 's',
.realbits = 14,
.storagebits = 16,
- .endianness = IIO_CPU
+ .endianness = IIO_CPU,
},
[AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
.sign = 's',
.realbits = 16,
.storagebits = 16,
- .endianness = IIO_CPU
+ .endianness = IIO_CPU,
},
};
-/* Extended scan types for 16-bit chips. */
-static const struct iio_scan_type ad7380_scan_type_16[] = {
+/* Extended scan types for 14-bit unsigned chips. */
+static const struct iio_scan_type ad7380_scan_type_14_u[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 'u',
+ .realbits = 14,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+};
+
+/* Extended scan types for 16-bit signed_chips. */
+static const struct iio_scan_type ad7380_scan_type_16_s[] = {
[AD7380_SCAN_TYPE_NORMAL] = {
.sign = 's',
.realbits = 16,
.storagebits = 16,
- .endianness = IIO_CPU
+ .endianness = IIO_CPU,
},
[AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
.sign = 's',
.realbits = 18,
.storagebits = 32,
- .endianness = IIO_CPU
+ .endianness = IIO_CPU,
+ },
+};
+
+/* Extended scan types for 16-bit unsigned chips. */
+static const struct iio_scan_type ad7380_scan_type_16_u[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 'u',
+ .realbits = 18,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
},
};
-#define AD7380_CHANNEL(index, bits, diff) { \
- .type = IIO_VOLTAGE, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- ((diff) ? 0 : BIT(IIO_CHAN_INFO_OFFSET)), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
- .info_mask_shared_by_type_available = \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
- .indexed = 1, \
- .differential = (diff), \
- .channel = (diff) ? (2 * (index)) : (index), \
- .channel2 = (diff) ? (2 * (index) + 1) : 0, \
- .scan_index = (index), \
- .has_ext_scan_type = 1, \
- .ext_scan_type = ad7380_scan_type_##bits, \
- .num_ext_scan_type = ARRAY_SIZE(ad7380_scan_type_##bits),\
+#define AD7380_CHANNEL(index, bits, diff, sign) { \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ ((diff) ? 0 : BIT(IIO_CHAN_INFO_OFFSET)), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .indexed = 1, \
+ .differential = (diff), \
+ .channel = (diff) ? (2 * (index)) : (index), \
+ .channel2 = (diff) ? (2 * (index) + 1) : 0, \
+ .scan_index = (index), \
+ .has_ext_scan_type = 1, \
+ .ext_scan_type = ad7380_scan_type_##bits##_##sign, \
+ .num_ext_scan_type = ARRAY_SIZE(ad7380_scan_type_##bits##_##sign), \
}
-#define DEFINE_AD7380_2_CHANNEL(name, bits, diff) \
+#define DEFINE_AD7380_2_CHANNEL(name, bits, diff, sign) \
static const struct iio_chan_spec name[] = { \
- AD7380_CHANNEL(0, bits, diff), \
- AD7380_CHANNEL(1, bits, diff), \
+ AD7380_CHANNEL(0, bits, diff, sign), \
+ AD7380_CHANNEL(1, bits, diff, sign), \
IIO_CHAN_SOFT_TIMESTAMP(2), \
}
-#define DEFINE_AD7380_4_CHANNEL(name, bits, diff) \
+#define DEFINE_AD7380_4_CHANNEL(name, bits, diff, sign) \
static const struct iio_chan_spec name[] = { \
- AD7380_CHANNEL(0, bits, diff), \
- AD7380_CHANNEL(1, bits, diff), \
- AD7380_CHANNEL(2, bits, diff), \
- AD7380_CHANNEL(3, bits, diff), \
+ AD7380_CHANNEL(0, bits, diff, sign), \
+ AD7380_CHANNEL(1, bits, diff, sign), \
+ AD7380_CHANNEL(2, bits, diff, sign), \
+ AD7380_CHANNEL(3, bits, diff, sign), \
IIO_CHAN_SOFT_TIMESTAMP(4), \
}
+#define DEFINE_AD7380_8_CHANNEL(name, bits, diff, sign) \
+static const struct iio_chan_spec name[] = { \
+ AD7380_CHANNEL(0, bits, diff, sign), \
+ AD7380_CHANNEL(1, bits, diff, sign), \
+ AD7380_CHANNEL(2, bits, diff, sign), \
+ AD7380_CHANNEL(3, bits, diff, sign), \
+ AD7380_CHANNEL(4, bits, diff, sign), \
+ AD7380_CHANNEL(5, bits, diff, sign), \
+ AD7380_CHANNEL(6, bits, diff, sign), \
+ AD7380_CHANNEL(7, bits, diff, sign), \
+ IIO_CHAN_SOFT_TIMESTAMP(8), \
+}
+
/* fully differential */
-DEFINE_AD7380_2_CHANNEL(ad7380_channels, 16, 1);
-DEFINE_AD7380_2_CHANNEL(ad7381_channels, 14, 1);
-DEFINE_AD7380_4_CHANNEL(ad7380_4_channels, 16, 1);
-DEFINE_AD7380_4_CHANNEL(ad7381_4_channels, 14, 1);
+DEFINE_AD7380_2_CHANNEL(ad7380_channels, 16, 1, s);
+DEFINE_AD7380_2_CHANNEL(ad7381_channels, 14, 1, s);
+DEFINE_AD7380_4_CHANNEL(ad7380_4_channels, 16, 1, s);
+DEFINE_AD7380_4_CHANNEL(ad7381_4_channels, 14, 1, s);
/* pseudo differential */
-DEFINE_AD7380_2_CHANNEL(ad7383_channels, 16, 0);
-DEFINE_AD7380_2_CHANNEL(ad7384_channels, 14, 0);
-DEFINE_AD7380_4_CHANNEL(ad7383_4_channels, 16, 0);
-DEFINE_AD7380_4_CHANNEL(ad7384_4_channels, 14, 0);
+DEFINE_AD7380_2_CHANNEL(ad7383_channels, 16, 0, s);
+DEFINE_AD7380_2_CHANNEL(ad7384_channels, 14, 0, s);
+DEFINE_AD7380_4_CHANNEL(ad7383_4_channels, 16, 0, s);
+DEFINE_AD7380_4_CHANNEL(ad7384_4_channels, 14, 0, s);
+
+/* Single ended */
+DEFINE_AD7380_4_CHANNEL(ad7386_channels, 16, 0, u);
+DEFINE_AD7380_4_CHANNEL(ad7387_channels, 14, 0, u);
+DEFINE_AD7380_4_CHANNEL(ad7388_channels, 12, 0, u);
+DEFINE_AD7380_8_CHANNEL(ad7386_4_channels, 16, 0, u);
+DEFINE_AD7380_8_CHANNEL(ad7387_4_channels, 14, 0, u);
+DEFINE_AD7380_8_CHANNEL(ad7388_4_channels, 12, 0, u);
static const char * const ad7380_2_channel_vcm_supplies[] = {
"aina", "ainb",
@@ -187,6 +262,60 @@ static const unsigned long ad7380_4_channel_scan_masks[] = {
0
};
+/*
+ * Single ended parts have a 2:1 multiplexer in front of each ADC.
+ *
+ * From an IIO point of view, all inputs are exported, i.e ad7386/7/8
+ * export 4 channels and ad7386-4/7-4/8-4 export 8 channels.
+ *
+ * Inputs AinX0 of multiplexers correspond to the first half of IIO channels
+ * (i.e 0-1 or 0-3) and inputs AinX1 correspond to second half (i.e 2-3 or
+ * 4-7). Example for AD7386/7/8 (2 channels parts):
+ *
+ * IIO | AD7386/7/8
+ * | +----------------------------
+ * | | _____ ______
+ * | | | | | |
+ * voltage0 | AinA0 --|--->| | | |
+ * | | | mux |----->| ADCA |---
+ * voltage2 | AinA1 --|--->| | | |
+ * | | |_____| |_____ |
+ * | | _____ ______
+ * | | | | | |
+ * voltage1 | AinB0 --|--->| | | |
+ * | | | mux |----->| ADCB |---
+ * voltage3 | AinB1 --|--->| | | |
+ * | | |_____| |______|
+ * | |
+ * | +----------------------------
+ *
+ * Since this is simultaneous sampling for AinX0 OR AinX1 we have two separate
+ * scan masks.
+ * When sequencer mode is enabled, chip automatically cycles through
+ * AinX0 and AinX1 channels. From an IIO point of view, we ca enable all
+ * channels, at the cost of an extra read, thus dividing the maximum rate by
+ * two.
+ */
+enum {
+ AD7380_SCAN_MASK_CH_0,
+ AD7380_SCAN_MASK_CH_1,
+ AD7380_SCAN_MASK_SEQ,
+};
+
+static const unsigned long ad7380_2x2_channel_scan_masks[] = {
+ [AD7380_SCAN_MASK_CH_0] = GENMASK(1, 0),
+ [AD7380_SCAN_MASK_CH_1] = GENMASK(3, 2),
+ [AD7380_SCAN_MASK_SEQ] = GENMASK(3, 0),
+ 0
+};
+
+static const unsigned long ad7380_2x4_channel_scan_masks[] = {
+ [AD7380_SCAN_MASK_CH_0] = GENMASK(3, 0),
+ [AD7380_SCAN_MASK_CH_1] = GENMASK(7, 4),
+ [AD7380_SCAN_MASK_SEQ] = GENMASK(7, 0),
+ 0
+};
+
static const struct ad7380_timing_specs ad7380_timing = {
.t_csh_ns = 10,
};
@@ -208,6 +337,7 @@ static const struct ad7380_chip_info ad7380_chip_info = {
.name = "ad7380",
.channels = ad7380_channels,
.num_channels = ARRAY_SIZE(ad7380_channels),
+ .num_simult_channels = 2,
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
};
@@ -216,6 +346,7 @@ static const struct ad7380_chip_info ad7381_chip_info = {
.name = "ad7381",
.channels = ad7381_channels,
.num_channels = ARRAY_SIZE(ad7381_channels),
+ .num_simult_channels = 2,
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
};
@@ -224,6 +355,7 @@ static const struct ad7380_chip_info ad7383_chip_info = {
.name = "ad7383",
.channels = ad7383_channels,
.num_channels = ARRAY_SIZE(ad7383_channels),
+ .num_simult_channels = 2,
.vcm_supplies = ad7380_2_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
@@ -234,16 +366,48 @@ static const struct ad7380_chip_info ad7384_chip_info = {
.name = "ad7384",
.channels = ad7384_channels,
.num_channels = ARRAY_SIZE(ad7384_channels),
+ .num_simult_channels = 2,
.vcm_supplies = ad7380_2_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
};
+static const struct ad7380_chip_info ad7386_chip_info = {
+ .name = "ad7386",
+ .channels = ad7386_channels,
+ .num_channels = ARRAY_SIZE(ad7386_channels),
+ .num_simult_channels = 2,
+ .has_mux = true,
+ .available_scan_masks = ad7380_2x2_channel_scan_masks,
+ .timing_specs = &ad7380_timing,
+};
+
+static const struct ad7380_chip_info ad7387_chip_info = {
+ .name = "ad7387",
+ .channels = ad7387_channels,
+ .num_channels = ARRAY_SIZE(ad7387_channels),
+ .num_simult_channels = 2,
+ .has_mux = true,
+ .available_scan_masks = ad7380_2x2_channel_scan_masks,
+ .timing_specs = &ad7380_timing,
+};
+
+static const struct ad7380_chip_info ad7388_chip_info = {
+ .name = "ad7388",
+ .channels = ad7388_channels,
+ .num_channels = ARRAY_SIZE(ad7388_channels),
+ .num_simult_channels = 2,
+ .has_mux = true,
+ .available_scan_masks = ad7380_2x2_channel_scan_masks,
+ .timing_specs = &ad7380_timing,
+};
+
static const struct ad7380_chip_info ad7380_4_chip_info = {
.name = "ad7380-4",
.channels = ad7380_4_channels,
.num_channels = ARRAY_SIZE(ad7380_4_channels),
+ .num_simult_channels = 4,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
};
@@ -252,6 +416,7 @@ static const struct ad7380_chip_info ad7381_4_chip_info = {
.name = "ad7381-4",
.channels = ad7381_4_channels,
.num_channels = ARRAY_SIZE(ad7381_4_channels),
+ .num_simult_channels = 4,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
};
@@ -260,6 +425,7 @@ static const struct ad7380_chip_info ad7383_4_chip_info = {
.name = "ad7383-4",
.channels = ad7383_4_channels,
.num_channels = ARRAY_SIZE(ad7383_4_channels),
+ .num_simult_channels = 4,
.vcm_supplies = ad7380_4_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
@@ -270,23 +436,58 @@ static const struct ad7380_chip_info ad7384_4_chip_info = {
.name = "ad7384-4",
.channels = ad7384_4_channels,
.num_channels = ARRAY_SIZE(ad7384_4_channels),
+ .num_simult_channels = 4,
.vcm_supplies = ad7380_4_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
};
+static const struct ad7380_chip_info ad7386_4_chip_info = {
+ .name = "ad7386-4",
+ .channels = ad7386_4_channels,
+ .num_channels = ARRAY_SIZE(ad7386_4_channels),
+ .num_simult_channels = 4,
+ .has_mux = true,
+ .available_scan_masks = ad7380_2x4_channel_scan_masks,
+ .timing_specs = &ad7380_4_timing,
+};
+
+static const struct ad7380_chip_info ad7387_4_chip_info = {
+ .name = "ad7387-4",
+ .channels = ad7387_4_channels,
+ .num_channels = ARRAY_SIZE(ad7387_4_channels),
+ .num_simult_channels = 4,
+ .has_mux = true,
+ .available_scan_masks = ad7380_2x4_channel_scan_masks,
+ .timing_specs = &ad7380_4_timing,
+};
+
+static const struct ad7380_chip_info ad7388_4_chip_info = {
+ .name = "ad7388-4",
+ .channels = ad7388_4_channels,
+ .num_channels = ARRAY_SIZE(ad7388_4_channels),
+ .num_simult_channels = 4,
+ .has_mux = true,
+ .available_scan_masks = ad7380_2x4_channel_scan_masks,
+ .timing_specs = &ad7380_4_timing,
+};
+
struct ad7380_state {
const struct ad7380_chip_info *chip_info;
struct spi_device *spi;
struct regmap *regmap;
unsigned int oversampling_ratio;
bool resolution_boost_enabled;
+ unsigned int ch;
+ bool seq;
unsigned int vref_mv;
unsigned int vcm_mv[MAX_NUM_CHANNELS];
/* xfers, message an buffer for reading sample data */
- struct spi_transfer xfer[2];
- struct spi_message msg;
+ struct spi_transfer normal_xfer[2];
+ struct spi_message normal_msg;
+ struct spi_transfer seq_xfer[4];
+ struct spi_message seq_msg;
/*
* DMA (thus cache coherency maintenance) requires the transfer buffers
* to live in their own cache lines.
@@ -379,6 +580,43 @@ static int ad7380_debugfs_reg_access(struct iio_dev *indio_dev, u32 reg,
unreachable();
}
+/*
+ * When switching channel, the ADC require an additional settling time.
+ * According to the datasheet, data is value on the third CS low. We already
+ * have an extra toggle before each read (either direct reads or buffered reads)
+ * to sample correct data, so we just add a single CS toggle at the end of the
+ * register write.
+ */
+static int ad7380_set_ch(struct ad7380_state *st, unsigned int ch)
+{
+ struct spi_transfer xfer = {
+ .delay = {
+ .value = T_CONVERT_NS,
+ .unit = SPI_DELAY_UNIT_NSECS,
+ }
+ };
+ int ret;
+
+ if (st->ch == ch)
+ return 0;
+
+ ret = regmap_update_bits(st->regmap,
+ AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_CH,
+ FIELD_PREP(AD7380_CONFIG1_CH, ch));
+
+ if (ret)
+ return ret;
+
+ st->ch = ch;
+
+ if (st->oversampling_ratio > 1)
+ xfer.delay.value = T_CONVERT_0_NS +
+ T_CONVERT_X_NS * (st->oversampling_ratio - 1);
+
+ return spi_sync_transfer(st->spi, &xfer, 1);
+}
+
/**
* ad7380_update_xfers - update the SPI transfers base on the current scan type
* @st: device instance specific state
@@ -387,33 +625,47 @@ static int ad7380_debugfs_reg_access(struct iio_dev *indio_dev, u32 reg,
static void ad7380_update_xfers(struct ad7380_state *st,
const struct iio_scan_type *scan_type)
{
- /*
- * First xfer only triggers conversion and has to be long enough for
- * all conversions to complete, which can be multiple conversion in the
- * case of oversampling. Technically T_CONVERT_X_NS is lower for some
- * chips, but we use the maximum value for simplicity for now.
- */
- if (st->oversampling_ratio > 1)
- st->xfer[0].delay.value = T_CONVERT_0_NS + T_CONVERT_X_NS *
- (st->oversampling_ratio - 1);
- else
- st->xfer[0].delay.value = T_CONVERT_NS;
-
- st->xfer[0].delay.unit = SPI_DELAY_UNIT_NSECS;
+ struct spi_transfer *xfer = st->seq ? st->seq_xfer : st->normal_xfer;
+ unsigned int t_convert = T_CONVERT_NS;
/*
- * Second xfer reads all channels. Data size depends on if resolution
- * boost is enabled or not.
+ * In the case of oversampling, conversion time is higher than in normal
+ * mode. Technically T_CONVERT_X_NS is lower for some chips, but we use
+ * the maximum value for simplicity for now.
*/
- st->xfer[1].bits_per_word = scan_type->realbits;
- st->xfer[1].len = BITS_TO_BYTES(scan_type->storagebits) *
- (st->chip_info->num_channels - 1);
+ if (st->oversampling_ratio > 1)
+ t_convert = T_CONVERT_0_NS + T_CONVERT_X_NS *
+ (st->oversampling_ratio - 1);
+
+ if (st->seq) {
+ xfer[0].delay.value = xfer[1].delay.value = t_convert;
+ xfer[0].delay.unit = xfer[1].delay.unit = SPI_DELAY_UNIT_NSECS;
+ xfer[2].bits_per_word = xfer[3].bits_per_word =
+ scan_type->realbits;
+ xfer[2].len = xfer[3].len =
+ BITS_TO_BYTES(scan_type->storagebits) *
+ st->chip_info->num_simult_channels;
+ xfer[3].rx_buf = xfer[2].rx_buf + xfer[2].len;
+ /* Additional delay required here when oversampling is enabled */
+ if (st->oversampling_ratio > 1)
+ xfer[2].delay.value = t_convert;
+ else
+ xfer[2].delay.value = 0;
+ xfer[2].delay.unit = SPI_DELAY_UNIT_NSECS;
+ } else {
+ xfer[0].delay.value = t_convert;
+ xfer[0].delay.unit = SPI_DELAY_UNIT_NSECS;
+ xfer[1].bits_per_word = scan_type->realbits;
+ xfer[1].len = BITS_TO_BYTES(scan_type->storagebits) *
+ st->chip_info->num_simult_channels;
+ }
}
static int ad7380_triggered_buffer_preenable(struct iio_dev *indio_dev)
{
struct ad7380_state *st = iio_priv(indio_dev);
const struct iio_scan_type *scan_type;
+ struct spi_message *msg = &st->normal_msg;
/*
* Currently, we always read all channels at the same time. The scan_type
@@ -423,16 +675,63 @@ static int ad7380_triggered_buffer_preenable(struct iio_dev *indio_dev)
if (IS_ERR(scan_type))
return PTR_ERR(scan_type);
+ if (st->chip_info->has_mux) {
+ unsigned int index;
+ int ret;
+
+ /*
+ * Depending on the requested scan_mask and current state,
+ * we need to either change CH bit, or enable sequencer mode
+ * to sample correct data.
+ * Sequencer mode is enabled if active mask corresponds to all
+ * IIO channels enabled. Otherwise, CH bit is set.
+ */
+ ret = iio_active_scan_mask_index(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ index = ret;
+ if (index == AD7380_SCAN_MASK_SEQ) {
+ ret = regmap_update_bits(st->regmap,
+ AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_SEQ,
+ FIELD_PREP(AD7380_CONFIG1_SEQ, 1));
+ if (ret)
+ return ret;
+ msg = &st->seq_msg;
+ st->seq = true;
+ } else {
+ ret = ad7380_set_ch(st, index);
+ if (ret)
+ return ret;
+ }
+
+ }
+
ad7380_update_xfers(st, scan_type);
- return spi_optimize_message(st->spi, &st->msg);
+ return spi_optimize_message(st->spi, msg);
}
static int ad7380_triggered_buffer_postdisable(struct iio_dev *indio_dev)
{
struct ad7380_state *st = iio_priv(indio_dev);
+ struct spi_message *msg = &st->normal_msg;
+ int ret;
+
+ if (st->seq) {
+ ret = regmap_update_bits(st->regmap,
+ AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_SEQ,
+ FIELD_PREP(AD7380_CONFIG1_SEQ, 0));
+ if (ret)
+ return ret;
+
+ msg = &st->seq_msg;
+ st->seq = false;
+ }
- spi_unoptimize_message(&st->msg);
+ spi_unoptimize_message(msg);
return 0;
}
@@ -447,9 +746,10 @@ static irqreturn_t ad7380_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct ad7380_state *st = iio_priv(indio_dev);
+ struct spi_message *msg = st->seq ? &st->seq_msg : &st->normal_msg;
int ret;
- ret = spi_sync(st->spi, &st->msg);
+ ret = spi_sync(st->spi, msg);
if (ret)
goto out;
@@ -465,20 +765,43 @@ out:
static int ad7380_read_direct(struct ad7380_state *st, unsigned int scan_index,
const struct iio_scan_type *scan_type, int *val)
{
+ unsigned int index = scan_index;
int ret;
+ if (st->chip_info->has_mux) {
+ unsigned int ch = 0;
+
+ if (index >= st->chip_info->num_simult_channels) {
+ index -= st->chip_info->num_simult_channels;
+ ch = 1;
+ }
+
+ ret = ad7380_set_ch(st, ch);
+ if (ret)
+ return ret;
+ }
+
ad7380_update_xfers(st, scan_type);
- ret = spi_sync(st->spi, &st->msg);
+ ret = spi_sync(st->spi, &st->normal_msg);
if (ret < 0)
return ret;
- if (scan_type->storagebits > 16)
- *val = sign_extend32(*(u32 *)(st->scan_data + 4 * scan_index),
- scan_type->realbits - 1);
- else
- *val = sign_extend32(*(u16 *)(st->scan_data + 2 * scan_index),
- scan_type->realbits - 1);
+ if (scan_type->storagebits > 16) {
+ if (scan_type->sign == 's')
+ *val = sign_extend32(*(u32 *)(st->scan_data + 4 * index),
+ scan_type->realbits - 1);
+ else
+ *val = *(u32 *)(st->scan_data + 4 * index) &
+ GENMASK(scan_type->realbits - 1, 0);
+ } else {
+ if (scan_type->sign == 's')
+ *val = sign_extend32(*(u16 *)(st->scan_data + 2 * index),
+ scan_type->realbits - 1);
+ else
+ *val = *(u16 *)(st->scan_data + 2 * index) &
+ GENMASK(scan_type->realbits - 1, 0);
+ }
return IIO_VAL_INT;
}
@@ -655,6 +978,8 @@ static int ad7380_init(struct ad7380_state *st, struct regulator *vref)
/* This is the default value after reset. */
st->oversampling_ratio = 1;
+ st->ch = 0;
+ st->seq = false;
/* SPI 1-wire mode */
return regmap_update_bits(st->regmap, AD7380_REG_ADDR_CONFIG2,
@@ -756,21 +1081,45 @@ static int ad7380_probe(struct spi_device *spi)
"failed to allocate register map\n");
/*
- * Setting up a low latency read for getting sample data. Used for both
- * direct read an triggered buffer. Additional fields will be set up in
- * ad7380_update_xfers() based on the current state of the driver at the
- * time of the read.
+ * Setting up xfer structures for both normal and sequence mode. These
+ * struct are used for both direct read and triggered buffer. Additional
+ * fields will be set up in ad7380_update_xfers() based on the current
+ * state of the driver at the time of the read.
+ */
+
+ /*
+ * In normal mode a read is composed of two steps:
+ * - first, toggle CS (no data xfer) to trigger a conversion
+ * - then, read data
*/
+ st->normal_xfer[0].cs_change = 1;
+ st->normal_xfer[0].cs_change_delay.value = st->chip_info->timing_specs->t_csh_ns;
+ st->normal_xfer[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ st->normal_xfer[1].rx_buf = st->scan_data;
- /* toggle CS (no data xfer) to trigger a conversion */
- st->xfer[0].cs_change = 1;
- st->xfer[0].cs_change_delay.value = st->chip_info->timing_specs->t_csh_ns;
- st->xfer[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ spi_message_init_with_transfers(&st->normal_msg, st->normal_xfer,
+ ARRAY_SIZE(st->normal_xfer));
+ /*
+ * In sequencer mode a read is composed of four steps:
+ * - CS toggle (no data xfer) to get the right point in the sequence
+ * - CS toggle (no data xfer) to trigger a conversion of AinX0 and
+ * acquisition of AinX1
+ * - 2 data reads, to read AinX0 and AinX1
+ */
+ st->seq_xfer[0].cs_change = 1;
+ st->seq_xfer[0].cs_change_delay.value = st->chip_info->timing_specs->t_csh_ns;
+ st->seq_xfer[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ st->seq_xfer[1].cs_change = 1;
+ st->seq_xfer[1].cs_change_delay.value = st->chip_info->timing_specs->t_csh_ns;
+ st->seq_xfer[1].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
- /* then do a second xfer to read the data */
- st->xfer[1].rx_buf = st->scan_data;
+ st->seq_xfer[2].rx_buf = st->scan_data;
+ st->seq_xfer[2].cs_change = 1;
+ st->seq_xfer[2].cs_change_delay.value = st->chip_info->timing_specs->t_csh_ns;
+ st->seq_xfer[2].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
- spi_message_init_with_transfers(&st->msg, st->xfer, ARRAY_SIZE(st->xfer));
+ spi_message_init_with_transfers(&st->seq_msg, st->seq_xfer,
+ ARRAY_SIZE(st->seq_xfer));
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
@@ -798,10 +1147,16 @@ static const struct of_device_id ad7380_of_match_table[] = {
{ .compatible = "adi,ad7381", .data = &ad7381_chip_info },
{ .compatible = "adi,ad7383", .data = &ad7383_chip_info },
{ .compatible = "adi,ad7384", .data = &ad7384_chip_info },
+ { .compatible = "adi,ad7386", .data = &ad7386_chip_info },
+ { .compatible = "adi,ad7387", .data = &ad7387_chip_info },
+ { .compatible = "adi,ad7388", .data = &ad7388_chip_info },
{ .compatible = "adi,ad7380-4", .data = &ad7380_4_chip_info },
{ .compatible = "adi,ad7381-4", .data = &ad7381_4_chip_info },
{ .compatible = "adi,ad7383-4", .data = &ad7383_4_chip_info },
{ .compatible = "adi,ad7384-4", .data = &ad7384_4_chip_info },
+ { .compatible = "adi,ad7386-4", .data = &ad7386_4_chip_info },
+ { .compatible = "adi,ad7387-4", .data = &ad7387_4_chip_info },
+ { .compatible = "adi,ad7388-4", .data = &ad7388_4_chip_info },
{ }
};
@@ -810,10 +1165,16 @@ static const struct spi_device_id ad7380_id_table[] = {
{ "ad7381", (kernel_ulong_t)&ad7381_chip_info },
{ "ad7383", (kernel_ulong_t)&ad7383_chip_info },
{ "ad7384", (kernel_ulong_t)&ad7384_chip_info },
+ { "ad7386", (kernel_ulong_t)&ad7386_chip_info },
+ { "ad7387", (kernel_ulong_t)&ad7387_chip_info },
+ { "ad7388", (kernel_ulong_t)&ad7388_chip_info },
{ "ad7380-4", (kernel_ulong_t)&ad7380_4_chip_info },
{ "ad7381-4", (kernel_ulong_t)&ad7381_4_chip_info },
{ "ad7383-4", (kernel_ulong_t)&ad7383_4_chip_info },
{ "ad7384-4", (kernel_ulong_t)&ad7384_4_chip_info },
+ { "ad7386-4", (kernel_ulong_t)&ad7386_4_chip_info },
+ { "ad7387-4", (kernel_ulong_t)&ad7387_4_chip_info },
+ { "ad7388-4", (kernel_ulong_t)&ad7388_4_chip_info },
{ }
};
MODULE_DEVICE_TABLE(spi, ad7380_id_table);
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index 80aebed47d1f..aeb8e383fe71 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -409,35 +409,35 @@ static int ad7476_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7476_id[] = {
- {"ad7091", ID_AD7091},
- {"ad7091r", ID_AD7091R},
- {"ad7273", ID_AD7273},
- {"ad7274", ID_AD7274},
- {"ad7276", ID_AD7276},
- {"ad7277", ID_AD7277},
- {"ad7278", ID_AD7278},
- {"ad7466", ID_AD7466},
- {"ad7467", ID_AD7467},
- {"ad7468", ID_AD7468},
- {"ad7475", ID_AD7475},
- {"ad7476", ID_AD7466},
- {"ad7476a", ID_AD7466},
- {"ad7477", ID_AD7467},
- {"ad7477a", ID_AD7467},
- {"ad7478", ID_AD7468},
- {"ad7478a", ID_AD7468},
- {"ad7495", ID_AD7495},
- {"ad7910", ID_AD7467},
- {"ad7920", ID_AD7466},
- {"ad7940", ID_AD7940},
- {"adc081s", ID_ADC081S},
- {"adc101s", ID_ADC101S},
- {"adc121s", ID_ADC121S},
- {"ads7866", ID_ADS7866},
- {"ads7867", ID_ADS7867},
- {"ads7868", ID_ADS7868},
- {"ltc2314-14", ID_LTC2314_14},
- {}
+ { "ad7091", ID_AD7091 },
+ { "ad7091r", ID_AD7091R },
+ { "ad7273", ID_AD7273 },
+ { "ad7274", ID_AD7274 },
+ { "ad7276", ID_AD7276},
+ { "ad7277", ID_AD7277 },
+ { "ad7278", ID_AD7278 },
+ { "ad7466", ID_AD7466 },
+ { "ad7467", ID_AD7467 },
+ { "ad7468", ID_AD7468 },
+ { "ad7475", ID_AD7475 },
+ { "ad7476", ID_AD7466 },
+ { "ad7476a", ID_AD7466 },
+ { "ad7477", ID_AD7467 },
+ { "ad7477a", ID_AD7467 },
+ { "ad7478", ID_AD7468 },
+ { "ad7478a", ID_AD7468 },
+ { "ad7495", ID_AD7495 },
+ { "ad7910", ID_AD7467 },
+ { "ad7920", ID_AD7466 },
+ { "ad7940", ID_AD7940 },
+ { "adc081s", ID_ADC081S },
+ { "adc101s", ID_ADC101S },
+ { "adc121s", ID_ADC121S },
+ { "ads7866", ID_ADS7866 },
+ { "ads7867", ID_ADS7867 },
+ { "ads7868", ID_ADS7868 },
+ { "ltc2314-14", ID_LTC2314_14 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7476_id);
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index c321c6ef48df..9b457472d49c 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -70,19 +70,17 @@ static int ad7606_reg_access(struct iio_dev *indio_dev,
struct ad7606_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
+
if (readval) {
ret = st->bops->reg_read(st, reg);
if (ret < 0)
- goto err_unlock;
+ return ret;
*readval = ret;
- ret = 0;
+ return 0;
} else {
- ret = st->bops->reg_write(st, reg, writeval);
+ return st->bops->reg_write(st, reg, writeval);
}
-err_unlock:
- mutex_unlock(&st->lock);
- return ret;
}
static int ad7606_read_samples(struct ad7606_state *st)
@@ -100,19 +98,19 @@ static irqreturn_t ad7606_trigger_handler(int irq, void *p)
struct ad7606_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = ad7606_read_samples(st);
- if (ret == 0)
- iio_push_to_buffers_with_timestamp(indio_dev, st->data,
- iio_get_time_ns(indio_dev));
+ if (ret)
+ goto error_ret;
+ iio_push_to_buffers_with_timestamp(indio_dev, st->data,
+ iio_get_time_ns(indio_dev));
+error_ret:
iio_trigger_notify_done(indio_dev->trig);
/* The rising edge of the CONVST signal starts a new conversion. */
gpiod_set_value(st->gpio_convst, 1);
- mutex_unlock(&st->lock);
-
return IRQ_HANDLED;
}
@@ -212,9 +210,9 @@ static int ad7606_write_os_hw(struct iio_dev *indio_dev, int val)
struct ad7606_state *st = iio_priv(indio_dev);
DECLARE_BITMAP(values, 3);
- values[0] = val;
+ values[0] = val & GENMASK(2, 0);
- gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc,
+ gpiod_set_array_value(st->gpio_os->ndescs, st->gpio_os->desc,
st->gpio_os->info, values);
/* AD7616 requires a reset to update value */
@@ -233,19 +231,17 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
struct ad7606_state *st = iio_priv(indio_dev);
int i, ret, ch = 0;
+ guard(mutex)(&st->lock);
+
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- mutex_lock(&st->lock);
i = find_closest(val2, st->scale_avail, st->num_scales);
if (st->sw_mode_en)
ch = chan->address;
ret = st->write_scale(indio_dev, ch, i);
- if (ret < 0) {
- mutex_unlock(&st->lock);
+ if (ret < 0)
return ret;
- }
st->range[ch] = i;
- mutex_unlock(&st->lock);
return 0;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
@@ -253,14 +249,9 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
i = find_closest(val, st->oversampling_avail,
st->num_os_ratios);
- mutex_lock(&st->lock);
ret = st->write_os(indio_dev, i);
- if (ret < 0) {
- mutex_unlock(&st->lock);
+ if (ret < 0)
return ret;
- }
- st->oversampling = st->oversampling_avail[i];
- mutex_unlock(&st->lock);
return 0;
default:
@@ -419,7 +410,7 @@ static int ad7606_request_gpios(struct ad7606_state *st)
return PTR_ERR(st->gpio_range);
st->gpio_standby = devm_gpiod_get_optional(dev, "standby",
- GPIOD_OUT_HIGH);
+ GPIOD_OUT_LOW);
if (IS_ERR(st->gpio_standby))
return PTR_ERR(st->gpio_standby);
@@ -662,7 +653,7 @@ static int ad7606_suspend(struct device *dev)
if (st->gpio_standby) {
gpiod_set_value(st->gpio_range, 1);
- gpiod_set_value(st->gpio_standby, 0);
+ gpiod_set_value(st->gpio_standby, 1);
}
return 0;
diff --git a/drivers/iio/adc/ad7606_par.c b/drivers/iio/adc/ad7606_par.c
index 6bc587b20f05..02d8c309304e 100644
--- a/drivers/iio/adc/ad7606_par.c
+++ b/drivers/iio/adc/ad7606_par.c
@@ -125,7 +125,7 @@ static const struct of_device_id ad7606_of_match[] = {
{ .compatible = "adi,ad7606-4" },
{ .compatible = "adi,ad7606-6" },
{ .compatible = "adi,ad7606-8" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ad7606_of_match);
diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c
index 263a778bcf25..62ec12195307 100644
--- a/drivers/iio/adc/ad7606_spi.c
+++ b/drivers/iio/adc/ad7606_spi.c
@@ -249,8 +249,9 @@ static int ad7616_sw_mode_config(struct iio_dev *indio_dev)
static int ad7606B_sw_mode_config(struct iio_dev *indio_dev)
{
struct ad7606_state *st = iio_priv(indio_dev);
- unsigned long os[3] = {1};
+ DECLARE_BITMAP(os, 3);
+ bitmap_fill(os, 3);
/*
* Software mode is enabled when all three oversampling
* pins are set to high. If oversampling gpios are defined
@@ -258,7 +259,7 @@ static int ad7606B_sw_mode_config(struct iio_dev *indio_dev)
* otherwise, they must be hardwired to VDD
*/
if (st->gpio_os) {
- gpiod_set_array_value(ARRAY_SIZE(os),
+ gpiod_set_array_value(st->gpio_os->ndescs,
st->gpio_os->desc, st->gpio_os->info, os);
}
/* OS of 128 and 256 are available only in software mode */
@@ -333,7 +334,7 @@ static const struct spi_device_id ad7606_id_table[] = {
{ "ad7606-8", ID_AD7606_8 },
{ "ad7606b", ID_AD7606B },
{ "ad7616", ID_AD7616 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7606_id_table);
@@ -344,7 +345,7 @@ static const struct of_device_id ad7606_of_match[] = {
{ .compatible = "adi,ad7606-8" },
{ .compatible = "adi,ad7606b" },
{ .compatible = "adi,ad7616" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ad7606_of_match);
diff --git a/drivers/iio/adc/ad7766.c b/drivers/iio/adc/ad7766.c
index 3079a0872947..4d570383ef02 100644
--- a/drivers/iio/adc/ad7766.c
+++ b/drivers/iio/adc/ad7766.c
@@ -291,13 +291,13 @@ static int ad7766_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7766_id[] = {
- {"ad7766", ID_AD7766},
- {"ad7766-1", ID_AD7766_1},
- {"ad7766-2", ID_AD7766_2},
- {"ad7767", ID_AD7766},
- {"ad7767-1", ID_AD7766_1},
- {"ad7767-2", ID_AD7766_2},
- {}
+ { "ad7766", ID_AD7766 },
+ { "ad7766-1", ID_AD7766_1 },
+ { "ad7766-2", ID_AD7766_2 },
+ { "ad7767", ID_AD7766 },
+ { "ad7767-1", ID_AD7766_1 },
+ { "ad7767-2", ID_AD7766_2 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7766_id);
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index 70a25949142c..113703fb7245 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -544,13 +544,10 @@ static int ad7768_set_channel_label(struct iio_dev *indio_dev,
{
struct ad7768_state *st = iio_priv(indio_dev);
struct device *device = indio_dev->dev.parent;
- struct fwnode_handle *fwnode;
- struct fwnode_handle *child;
const char *label;
int crt_ch = 0;
- fwnode = dev_fwnode(device);
- fwnode_for_each_child_node(fwnode, child) {
+ device_for_each_child_node_scoped(device, child) {
if (fwnode_property_read_u32(child, "reg", &crt_ch))
continue;
@@ -658,7 +655,7 @@ MODULE_DEVICE_TABLE(spi, ad7768_id_table);
static const struct of_device_id ad7768_of_match[] = {
{ .compatible = "adi,ad7768-1" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ad7768_of_match);
diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c
index a813fe04787c..e9b0c577c9cc 100644
--- a/drivers/iio/adc/ad7780.c
+++ b/drivers/iio/adc/ad7780.c
@@ -355,11 +355,11 @@ static int ad7780_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7780_id[] = {
- {"ad7170", ID_AD7170},
- {"ad7171", ID_AD7171},
- {"ad7780", ID_AD7780},
- {"ad7781", ID_AD7781},
- {}
+ { "ad7170", ID_AD7170 },
+ { "ad7171", ID_AD7171 },
+ { "ad7780", ID_AD7780 },
+ { "ad7781", ID_AD7781 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7780_id);
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index d4ad7e0b515a..abebd519cafa 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -824,16 +824,16 @@ static int ad7793_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7793_id[] = {
- {"ad7785", ID_AD7785},
- {"ad7792", ID_AD7792},
- {"ad7793", ID_AD7793},
- {"ad7794", ID_AD7794},
- {"ad7795", ID_AD7795},
- {"ad7796", ID_AD7796},
- {"ad7797", ID_AD7797},
- {"ad7798", ID_AD7798},
- {"ad7799", ID_AD7799},
- {}
+ { "ad7785", ID_AD7785 },
+ { "ad7792", ID_AD7792 },
+ { "ad7793", ID_AD7793 },
+ { "ad7794", ID_AD7794 },
+ { "ad7795", ID_AD7795 },
+ { "ad7796", ID_AD7796 },
+ { "ad7797", ID_AD7797 },
+ { "ad7798", ID_AD7798 },
+ { "ad7799", ID_AD7799 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7793_id);
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index 965bdc8aa696..6265ce7df703 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -329,8 +329,8 @@ static int ad7887_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7887_id[] = {
- {"ad7887", ID_AD7887},
- {}
+ { "ad7887", ID_AD7887 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7887_id);
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 9d6bf6d0927a..09680015a7ab 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -361,14 +361,14 @@ static int ad7923_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7923_id[] = {
- {"ad7904", AD7904},
- {"ad7914", AD7914},
- {"ad7923", AD7924},
- {"ad7924", AD7924},
- {"ad7908", AD7908},
- {"ad7918", AD7918},
- {"ad7928", AD7928},
- {}
+ { "ad7904", AD7904 },
+ { "ad7914", AD7914 },
+ { "ad7923", AD7924 },
+ { "ad7924", AD7924 },
+ { "ad7908", AD7908 },
+ { "ad7918", AD7918 },
+ { "ad7928", AD7928 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7923_id);
@@ -380,7 +380,7 @@ static const struct of_device_id ad7923_of_match[] = {
{ .compatible = "adi,ad7908", },
{ .compatible = "adi,ad7918", },
{ .compatible = "adi,ad7928", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ad7923_of_match);
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 0f0dcd9ca6b6..0f107e3fc2c8 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -237,7 +237,8 @@ static int ad799x_update_scan_mode(struct iio_dev *indio_dev,
if (!st->rx_buf)
return -ENOMEM;
- st->transfer_size = bitmap_weight(scan_mask, indio_dev->masklength) * 2;
+ st->transfer_size = bitmap_weight(scan_mask,
+ iio_get_masklength(indio_dev)) * 2;
switch (st->id) {
case ad7992:
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index 41c1b519c573..05fb7a75531f 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
+#include <linux/seq_file.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
@@ -104,7 +105,30 @@
#define AD9467_DEF_OUTPUT_MODE 0x08
#define AD9467_REG_VREF_MASK 0x0F
+/*
+ * Analog Devices AD9643 14-Bit, 170/210/250 MSPS ADC
+ */
+
+#define CHIPID_AD9643 0x82
+#define AD9643_REG_VREF_MASK 0x1F
+
+/*
+ * Analog Devices AD9652 16-bit 310 MSPS ADC
+ */
+
+#define CHIPID_AD9652 0xC1
+#define AD9652_REG_VREF_MASK 0xC0
+
+/*
+ * Analog Devices AD9649 14-bit 20/40/65/80 MSPS ADC
+ */
+
+#define CHIPID_AD9649 0x6F
+#define AD9649_TEST_POINTS 8
+
#define AD9647_MAX_TEST_POINTS 32
+#define AD9467_CAN_INVERT(st) \
+ (!(st)->info->has_dco || (st)->info->has_dco_invert)
struct ad9467_chip_info {
const char *name;
@@ -113,12 +137,23 @@ struct ad9467_chip_info {
unsigned int num_channels;
const unsigned int (*scale_table)[2];
int num_scales;
+ unsigned long test_mask;
+ unsigned int test_mask_len;
unsigned long max_rate;
unsigned int default_output_mode;
unsigned int vref_mask;
unsigned int num_lanes;
+ unsigned int dco_en;
+ unsigned int test_points;
/* data clock output */
bool has_dco;
+ bool has_dco_invert;
+};
+
+struct ad9467_chan_test_mode {
+ struct ad9467_state *st;
+ unsigned int idx;
+ u8 mode;
};
struct ad9467_state {
@@ -126,6 +161,8 @@ struct ad9467_state {
struct iio_backend *back;
struct spi_device *spi;
struct clk *clk;
+ /* used for debugfs */
+ struct ad9467_chan_test_mode *chan_test;
unsigned int output_mode;
unsigned int (*scales)[2];
/*
@@ -138,6 +175,8 @@ struct ad9467_state {
* at the io delay control section.
*/
DECLARE_BITMAP(calib_map, AD9647_MAX_TEST_POINTS * 2);
+ /* number of bits of the map */
+ unsigned int calib_map_size;
struct gpio_desc *pwrdown_gpio;
/* ensure consistent state obtained on multiple related accesses */
struct mutex lock;
@@ -211,6 +250,24 @@ static const unsigned int ad9467_scale_table[][2] = {
{2300, 8}, {2400, 9}, {2500, 10},
};
+static const unsigned int ad9643_scale_table[][2] = {
+ {2087, 0x0F}, {2065, 0x0E}, {2042, 0x0D}, {2020, 0x0C}, {1997, 0x0B},
+ {1975, 0x0A}, {1952, 0x09}, {1930, 0x08}, {1907, 0x07}, {1885, 0x06},
+ {1862, 0x05}, {1840, 0x04}, {1817, 0x03}, {1795, 0x02}, {1772, 0x01},
+ {1750, 0x00}, {1727, 0x1F}, {1704, 0x1E}, {1681, 0x1D}, {1658, 0x1C},
+ {1635, 0x1B}, {1612, 0x1A}, {1589, 0x19}, {1567, 0x18}, {1544, 0x17},
+ {1521, 0x16}, {1498, 0x15}, {1475, 0x14}, {1452, 0x13}, {1429, 0x12},
+ {1406, 0x11}, {1383, 0x10},
+};
+
+static const unsigned int ad9649_scale_table[][2] = {
+ {2000, 0},
+};
+
+static const unsigned int ad9652_scale_table[][2] = {
+ {1250, 0}, {1125, 1}, {1200, 2}, {1250, 3}, {1000, 5},
+};
+
static void __ad9467_get_scale(struct ad9467_state *st, int index,
unsigned int *val, unsigned int *val2)
{
@@ -224,14 +281,14 @@ static void __ad9467_get_scale(struct ad9467_state *st, int index,
*val2 = tmp % 1000000;
}
-#define AD9467_CHAN(_chan, _si, _bits, _sign) \
+#define AD9467_CHAN(_chan, avai_mask, _si, _bits, _sign) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = _chan, \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
- .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = avai_mask, \
.scan_index = _si, \
.scan_type = { \
.sign = _sign, \
@@ -241,11 +298,42 @@ static void __ad9467_get_scale(struct ad9467_state *st, int index,
}
static const struct iio_chan_spec ad9434_channels[] = {
- AD9467_CHAN(0, 0, 12, 's'),
+ AD9467_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 0, 12, 's'),
};
static const struct iio_chan_spec ad9467_channels[] = {
- AD9467_CHAN(0, 0, 16, 's'),
+ AD9467_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 0, 16, 's'),
+};
+
+static const struct iio_chan_spec ad9643_channels[] = {
+ AD9467_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 0, 14, 's'),
+ AD9467_CHAN(1, BIT(IIO_CHAN_INFO_SCALE), 1, 14, 's'),
+};
+
+static const struct iio_chan_spec ad9649_channels[] = {
+ AD9467_CHAN(0, 0, 0, 14, 's'),
+};
+
+static const struct iio_chan_spec ad9652_channels[] = {
+ AD9467_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 0, 16, 's'),
+ AD9467_CHAN(1, BIT(IIO_CHAN_INFO_SCALE), 1, 16, 's'),
+};
+
+static const char * const ad9467_test_modes[] = {
+ [AN877_ADC_TESTMODE_OFF] = "off",
+ [AN877_ADC_TESTMODE_MIDSCALE_SHORT] = "midscale_short",
+ [AN877_ADC_TESTMODE_POS_FULLSCALE] = "pos_fullscale",
+ [AN877_ADC_TESTMODE_NEG_FULLSCALE] = "neg_fullscale",
+ [AN877_ADC_TESTMODE_ALT_CHECKERBOARD] = "checkerboard",
+ [AN877_ADC_TESTMODE_PN23_SEQ] = "prbs23",
+ [AN877_ADC_TESTMODE_PN9_SEQ] = "prbs9",
+ [AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE] = "one_zero_toggle",
+ [AN877_ADC_TESTMODE_USER] = "user",
+ [AN877_ADC_TESTMODE_BIT_TOGGLE] = "bit_toggle",
+ [AN877_ADC_TESTMODE_SYNC] = "sync",
+ [AN877_ADC_TESTMODE_ONE_BIT_HIGH] = "one_bit_high",
+ [AN877_ADC_TESTMODE_MIXED_BIT_FREQUENCY] = "mixed_bit_frequency",
+ [AN877_ADC_TESTMODE_RAMP] = "ramp",
};
static const struct ad9467_chip_info ad9467_chip_tbl = {
@@ -256,6 +344,10 @@ static const struct ad9467_chip_info ad9467_chip_tbl = {
.num_scales = ARRAY_SIZE(ad9467_scale_table),
.channels = ad9467_channels,
.num_channels = ARRAY_SIZE(ad9467_channels),
+ .test_points = AD9647_MAX_TEST_POINTS,
+ .test_mask = GENMASK(AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE,
+ AN877_ADC_TESTMODE_OFF),
+ .test_mask_len = AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE + 1,
.default_output_mode = AD9467_DEF_OUTPUT_MODE,
.vref_mask = AD9467_REG_VREF_MASK,
.num_lanes = 8,
@@ -269,6 +361,9 @@ static const struct ad9467_chip_info ad9434_chip_tbl = {
.num_scales = ARRAY_SIZE(ad9434_scale_table),
.channels = ad9434_channels,
.num_channels = ARRAY_SIZE(ad9434_channels),
+ .test_points = AD9647_MAX_TEST_POINTS,
+ .test_mask = GENMASK(AN877_ADC_TESTMODE_USER, AN877_ADC_TESTMODE_OFF),
+ .test_mask_len = AN877_ADC_TESTMODE_USER + 1,
.default_output_mode = AD9434_DEF_OUTPUT_MODE,
.vref_mask = AD9434_REG_VREF_MASK,
.num_lanes = 6,
@@ -282,17 +377,78 @@ static const struct ad9467_chip_info ad9265_chip_tbl = {
.num_scales = ARRAY_SIZE(ad9265_scale_table),
.channels = ad9467_channels,
.num_channels = ARRAY_SIZE(ad9467_channels),
+ .test_points = AD9647_MAX_TEST_POINTS,
+ .test_mask = GENMASK(AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE,
+ AN877_ADC_TESTMODE_OFF),
+ .test_mask_len = AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE + 1,
.default_output_mode = AD9265_DEF_OUTPUT_MODE,
.vref_mask = AD9265_REG_VREF_MASK,
.has_dco = true,
+ .has_dco_invert = true,
+};
+
+static const struct ad9467_chip_info ad9643_chip_tbl = {
+ .name = "ad9643",
+ .id = CHIPID_AD9643,
+ .max_rate = 250000000UL,
+ .scale_table = ad9643_scale_table,
+ .num_scales = ARRAY_SIZE(ad9643_scale_table),
+ .channels = ad9643_channels,
+ .num_channels = ARRAY_SIZE(ad9643_channels),
+ .test_points = AD9647_MAX_TEST_POINTS,
+ .test_mask = BIT(AN877_ADC_TESTMODE_RAMP) |
+ GENMASK(AN877_ADC_TESTMODE_MIXED_BIT_FREQUENCY, AN877_ADC_TESTMODE_OFF),
+ .test_mask_len = AN877_ADC_TESTMODE_RAMP + 1,
+ .vref_mask = AD9643_REG_VREF_MASK,
+ .has_dco = true,
+ .has_dco_invert = true,
+ .dco_en = AN877_ADC_DCO_DELAY_ENABLE,
+};
+
+static const struct ad9467_chip_info ad9649_chip_tbl = {
+ .name = "ad9649",
+ .id = CHIPID_AD9649,
+ .max_rate = 80000000UL,
+ .scale_table = ad9649_scale_table,
+ .num_scales = ARRAY_SIZE(ad9649_scale_table),
+ .channels = ad9649_channels,
+ .num_channels = ARRAY_SIZE(ad9649_channels),
+ .test_points = AD9649_TEST_POINTS,
+ .test_mask = GENMASK(AN877_ADC_TESTMODE_MIXED_BIT_FREQUENCY,
+ AN877_ADC_TESTMODE_OFF),
+ .test_mask_len = AN877_ADC_TESTMODE_MIXED_BIT_FREQUENCY + 1,
+ .has_dco = true,
+ .has_dco_invert = true,
+ .dco_en = AN877_ADC_DCO_DELAY_ENABLE,
+};
+
+static const struct ad9467_chip_info ad9652_chip_tbl = {
+ .name = "ad9652",
+ .id = CHIPID_AD9652,
+ .max_rate = 310000000UL,
+ .scale_table = ad9652_scale_table,
+ .num_scales = ARRAY_SIZE(ad9652_scale_table),
+ .channels = ad9652_channels,
+ .num_channels = ARRAY_SIZE(ad9652_channels),
+ .test_points = AD9647_MAX_TEST_POINTS,
+ .test_mask = GENMASK(AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE,
+ AN877_ADC_TESTMODE_OFF),
+ .test_mask_len = AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE + 1,
+ .vref_mask = AD9652_REG_VREF_MASK,
+ .has_dco = true,
};
static int ad9467_get_scale(struct ad9467_state *st, int *val, int *val2)
{
const struct ad9467_chip_info *info = st->info;
- unsigned int i, vref_val;
+ unsigned int vref_val;
+ unsigned int i = 0;
int ret;
+ /* nothing to read if we only have one possible scale */
+ if (info->num_scales == 1)
+ goto out_get_scale;
+
ret = ad9467_spi_read(st, AN877_ADC_REG_VREF);
if (ret < 0)
return ret;
@@ -307,6 +463,7 @@ static int ad9467_get_scale(struct ad9467_state *st, int *val, int *val2)
if (i == info->num_scales)
return -ERANGE;
+out_get_scale:
__ad9467_get_scale(st, i, val, val2);
return IIO_VAL_INT_PLUS_MICRO;
@@ -321,6 +478,8 @@ static int ad9467_set_scale(struct ad9467_state *st, int val, int val2)
if (val != 0)
return -EINVAL;
+ if (info->num_scales == 1)
+ return -EOPNOTSUPP;
for (i = 0; i < info->num_scales; i++) {
__ad9467_get_scale(st, i, &scale_val[0], &scale_val[1]);
@@ -352,40 +511,96 @@ static int ad9467_outputmode_set(struct ad9467_state *st, unsigned int mode)
AN877_ADC_TRANSFER_SYNC);
}
-static int ad9647_calibrate_prepare(struct ad9467_state *st)
+static int ad9467_testmode_set(struct ad9467_state *st, unsigned int chan,
+ unsigned int test_mode)
+{
+ int ret;
+
+ if (st->info->num_channels > 1) {
+ /* so that the test mode is only applied to one channel */
+ ret = ad9467_spi_write(st, AN877_ADC_REG_CHAN_INDEX, BIT(chan));
+ if (ret)
+ return ret;
+ }
+
+ ret = ad9467_spi_write(st, AN877_ADC_REG_TEST_IO, test_mode);
+ if (ret)
+ return ret;
+
+ if (st->info->num_channels > 1) {
+ /* go to default state where all channels get write commands */
+ ret = ad9467_spi_write(st, AN877_ADC_REG_CHAN_INDEX,
+ GENMASK(st->info->num_channels - 1, 0));
+ if (ret)
+ return ret;
+ }
+
+ return ad9467_spi_write(st, AN877_ADC_REG_TRANSFER,
+ AN877_ADC_TRANSFER_SYNC);
+}
+
+static int ad9467_backend_testmode_on(struct ad9467_state *st,
+ unsigned int chan,
+ enum iio_backend_test_pattern pattern)
{
struct iio_backend_data_fmt data = {
.enable = false,
};
- unsigned int c;
int ret;
- ret = ad9467_spi_write(st, AN877_ADC_REG_TEST_IO,
- AN877_ADC_TESTMODE_PN9_SEQ);
+ ret = iio_backend_data_format_set(st->back, chan, &data);
+ if (ret)
+ return ret;
+
+ ret = iio_backend_test_pattern_set(st->back, chan, pattern);
+ if (ret)
+ return ret;
+
+ return iio_backend_chan_enable(st->back, chan);
+}
+
+static int ad9467_backend_testmode_off(struct ad9467_state *st,
+ unsigned int chan)
+{
+ struct iio_backend_data_fmt data = {
+ .enable = true,
+ .sign_extend = true,
+ };
+ int ret;
+
+ ret = iio_backend_chan_disable(st->back, chan);
if (ret)
return ret;
- ret = ad9467_spi_write(st, AN877_ADC_REG_TRANSFER,
- AN877_ADC_TRANSFER_SYNC);
+ ret = iio_backend_test_pattern_set(st->back, chan,
+ IIO_BACKEND_NO_TEST_PATTERN);
if (ret)
return ret;
+ return iio_backend_data_format_set(st->back, chan, &data);
+}
+
+static int ad9647_calibrate_prepare(struct ad9467_state *st)
+{
+ unsigned int c;
+ int ret;
+
ret = ad9467_outputmode_set(st, st->info->default_output_mode);
if (ret)
return ret;
for (c = 0; c < st->info->num_channels; c++) {
- ret = iio_backend_data_format_set(st->back, c, &data);
+ ret = ad9467_testmode_set(st, c, AN877_ADC_TESTMODE_PN9_SEQ);
if (ret)
return ret;
- }
- ret = iio_backend_test_pattern_set(st->back, 0,
- IIO_BACKEND_ADI_PRBS_9A);
- if (ret)
- return ret;
+ ret = ad9467_backend_testmode_on(st, c,
+ IIO_BACKEND_ADI_PRBS_9A);
+ if (ret)
+ return ret;
+ }
- return iio_backend_chan_enable(st->back, 0);
+ return 0;
}
static int ad9647_calibrate_polarity_set(struct ad9467_state *st,
@@ -442,7 +657,7 @@ static int ad9467_calibrate_apply(struct ad9467_state *st, unsigned int val)
if (st->info->has_dco) {
ret = ad9467_spi_write(st, AN877_ADC_REG_OUTPUT_DELAY,
- val);
+ val | st->info->dco_en);
if (ret)
return ret;
@@ -461,57 +676,38 @@ static int ad9467_calibrate_apply(struct ad9467_state *st, unsigned int val)
static int ad9647_calibrate_stop(struct ad9467_state *st)
{
- struct iio_backend_data_fmt data = {
- .sign_extend = true,
- .enable = true,
- };
unsigned int c, mode;
int ret;
- ret = iio_backend_chan_disable(st->back, 0);
- if (ret)
- return ret;
-
- ret = iio_backend_test_pattern_set(st->back, 0,
- IIO_BACKEND_NO_TEST_PATTERN);
- if (ret)
- return ret;
-
for (c = 0; c < st->info->num_channels; c++) {
- ret = iio_backend_data_format_set(st->back, c, &data);
+ ret = ad9467_backend_testmode_off(st, c);
+ if (ret)
+ return ret;
+
+ ret = ad9467_testmode_set(st, c, AN877_ADC_TESTMODE_OFF);
if (ret)
return ret;
}
mode = st->info->default_output_mode | AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT;
- ret = ad9467_outputmode_set(st, mode);
- if (ret)
- return ret;
-
- ret = ad9467_spi_write(st, AN877_ADC_REG_TEST_IO,
- AN877_ADC_TESTMODE_OFF);
- if (ret)
- return ret;
-
- return ad9467_spi_write(st, AN877_ADC_REG_TRANSFER,
- AN877_ADC_TRANSFER_SYNC);
+ return ad9467_outputmode_set(st, mode);
}
static int ad9467_calibrate(struct ad9467_state *st)
{
- unsigned int point, val, inv_val, cnt, inv_cnt = 0;
+ unsigned int point, val, inv_val, cnt, inv_cnt = 0, c;
/*
* Half of the bitmap is for the inverted signal. The number of test
* points is the same though...
*/
- unsigned int test_points = AD9647_MAX_TEST_POINTS;
+ unsigned int test_points = st->info->test_points;
unsigned long sample_rate = clk_get_rate(st->clk);
struct device *dev = &st->spi->dev;
bool invert = false, stat;
int ret;
/* all points invalid */
- bitmap_fill(st->calib_map, BITS_PER_TYPE(st->calib_map));
+ bitmap_fill(st->calib_map, st->calib_map_size);
ret = ad9647_calibrate_prepare(st);
if (ret)
@@ -521,16 +717,31 @@ retune:
if (ret)
return ret;
- for (point = 0; point < test_points; point++) {
+ for (point = 0; point < st->info->test_points; point++) {
ret = ad9467_calibrate_apply(st, point);
if (ret)
return ret;
- ret = iio_backend_chan_status(st->back, 0, &stat);
- if (ret)
- return ret;
+ for (c = 0; c < st->info->num_channels; c++) {
+ ret = iio_backend_chan_status(st->back, c, &stat);
+ if (ret)
+ return ret;
- __assign_bit(point + invert * test_points, st->calib_map, stat);
+ /*
+ * A point is considered valid if all channels report no
+ * error. If one reports an error, then we consider the
+ * point as invalid and we can break the loop right away.
+ */
+ if (stat) {
+ dev_dbg(dev, "Invalid point(%u, inv:%u) for CH:%u\n",
+ point, invert, c);
+ break;
+ }
+
+ if (c == st->info->num_channels - 1)
+ __clear_bit(point + invert * test_points,
+ st->calib_map);
+ }
}
if (!invert) {
@@ -541,8 +752,13 @@ retune:
* a row.
*/
if (cnt < 3) {
- invert = true;
- goto retune;
+ if (AD9467_CAN_INVERT(st)) {
+ invert = true;
+ goto retune;
+ }
+
+ if (!cnt)
+ return -EIO;
}
} else {
inv_cnt = ad9467_find_optimal_point(st->calib_map, test_points,
@@ -679,7 +895,7 @@ static int ad9467_update_scan_mode(struct iio_dev *indio_dev,
return 0;
}
-static const struct iio_info ad9467_info = {
+static struct iio_info ad9467_info = {
.read_raw = ad9467_read_raw,
.write_raw = ad9467_write_raw,
.update_scan_mode = ad9467_update_scan_mode,
@@ -762,12 +978,134 @@ static int ad9467_iio_backend_get(struct ad9467_state *st)
return -ENODEV;
}
+static int ad9467_test_mode_available_show(struct seq_file *s, void *ignored)
+{
+ struct ad9467_state *st = s->private;
+ unsigned int bit;
+
+ for_each_set_bit(bit, &st->info->test_mask, st->info->test_mask_len)
+ seq_printf(s, "%s\n", ad9467_test_modes[bit]);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ad9467_test_mode_available);
+
+static ssize_t ad9467_chan_test_mode_read(struct file *file,
+ char __user *userbuf, size_t count,
+ loff_t *ppos)
+{
+ struct ad9467_chan_test_mode *chan = file->private_data;
+ struct ad9467_state *st = chan->st;
+ char buf[128] = {0};
+ size_t len;
+ int ret;
+
+ if (chan->mode == AN877_ADC_TESTMODE_PN9_SEQ ||
+ chan->mode == AN877_ADC_TESTMODE_PN23_SEQ) {
+ len = scnprintf(buf, sizeof(buf), "Running \"%s\" Test:\n\t",
+ ad9467_test_modes[chan->mode]);
+
+ ret = iio_backend_debugfs_print_chan_status(st->back, chan->idx,
+ buf + len,
+ sizeof(buf) - len);
+ if (ret < 0)
+ return ret;
+ len += ret;
+ } else if (chan->mode == AN877_ADC_TESTMODE_OFF) {
+ len = scnprintf(buf, sizeof(buf), "No test Running...\n");
+ } else {
+ len = scnprintf(buf, sizeof(buf), "Running \"%s\" Test on CH:%u\n",
+ ad9467_test_modes[chan->mode], chan->idx);
+ }
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static ssize_t ad9467_chan_test_mode_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ad9467_chan_test_mode *chan = file->private_data;
+ struct ad9467_state *st = chan->st;
+ char test_mode[32] = {0};
+ unsigned int mode;
+ int ret;
+
+ ret = simple_write_to_buffer(test_mode, sizeof(test_mode) - 1, ppos,
+ userbuf, count);
+ if (ret < 0)
+ return ret;
+
+ for_each_set_bit(mode, &st->info->test_mask, st->info->test_mask_len) {
+ if (sysfs_streq(test_mode, ad9467_test_modes[mode]))
+ break;
+ }
+
+ if (mode == st->info->test_mask_len)
+ return -EINVAL;
+
+ guard(mutex)(&st->lock);
+
+ if (mode == AN877_ADC_TESTMODE_OFF) {
+ unsigned int out_mode;
+
+ if (chan->mode == AN877_ADC_TESTMODE_PN9_SEQ ||
+ chan->mode == AN877_ADC_TESTMODE_PN23_SEQ) {
+ ret = ad9467_backend_testmode_off(st, chan->idx);
+ if (ret)
+ return ret;
+ }
+
+ ret = ad9467_testmode_set(st, chan->idx, mode);
+ if (ret)
+ return ret;
+
+ out_mode = st->info->default_output_mode | AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT;
+ ret = ad9467_outputmode_set(st, out_mode);
+ if (ret)
+ return ret;
+ } else {
+ ret = ad9467_outputmode_set(st, st->info->default_output_mode);
+ if (ret)
+ return ret;
+
+ ret = ad9467_testmode_set(st, chan->idx, mode);
+ if (ret)
+ return ret;
+
+ /* some patterns have a backend matching monitoring block */
+ if (mode == AN877_ADC_TESTMODE_PN9_SEQ) {
+ ret = ad9467_backend_testmode_on(st, chan->idx,
+ IIO_BACKEND_ADI_PRBS_9A);
+ if (ret)
+ return ret;
+ } else if (mode == AN877_ADC_TESTMODE_PN23_SEQ) {
+ ret = ad9467_backend_testmode_on(st, chan->idx,
+ IIO_BACKEND_ADI_PRBS_23A);
+ if (ret)
+ return ret;
+ }
+ }
+
+ chan->mode = mode;
+
+ return count;
+}
+
+static const struct file_operations ad9467_chan_test_mode_fops = {
+ .open = simple_open,
+ .read = ad9467_chan_test_mode_read,
+ .write = ad9467_chan_test_mode_write,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
static ssize_t ad9467_dump_calib_table(struct file *file,
char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ad9467_state *st = file->private_data;
- unsigned int bit, size = BITS_PER_TYPE(st->calib_map);
+ unsigned int bit;
/* +2 for the newline and +1 for the string termination */
unsigned char map[AD9647_MAX_TEST_POINTS * 2 + 3];
ssize_t len = 0;
@@ -776,8 +1114,8 @@ static ssize_t ad9467_dump_calib_table(struct file *file,
if (*ppos)
goto out_read;
- for (bit = 0; bit < size; bit++) {
- if (bit == size / 2)
+ for (bit = 0; bit < st->calib_map_size; bit++) {
+ if (AD9467_CAN_INVERT(st) && bit == st->calib_map_size / 2)
len += scnprintf(map + len, sizeof(map) - len, "\n");
len += scnprintf(map + len, sizeof(map) - len, "%c",
@@ -800,12 +1138,33 @@ static void ad9467_debugfs_init(struct iio_dev *indio_dev)
{
struct dentry *d = iio_get_debugfs_dentry(indio_dev);
struct ad9467_state *st = iio_priv(indio_dev);
+ char attr_name[32];
+ unsigned int chan;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return;
+ st->chan_test = devm_kcalloc(&st->spi->dev, st->info->num_channels,
+ sizeof(*st->chan_test), GFP_KERNEL);
+ if (!st->chan_test)
+ return;
+
debugfs_create_file("calibration_table_dump", 0400, d, st,
&ad9467_calib_table_fops);
+
+ for (chan = 0; chan < st->info->num_channels; chan++) {
+ snprintf(attr_name, sizeof(attr_name), "in_voltage%u_test_mode",
+ chan);
+ st->chan_test[chan].idx = chan;
+ st->chan_test[chan].st = st;
+ debugfs_create_file(attr_name, 0600, d, &st->chan_test[chan],
+ &ad9467_chan_test_mode_fops);
+ }
+
+ debugfs_create_file("in_voltage_test_mode_available", 0400, d, st,
+ &ad9467_test_mode_available_fops);
+
+ iio_backend_debugfs_add(st->back, indio_dev);
}
static int ad9467_probe(struct spi_device *spi)
@@ -826,6 +1185,10 @@ static int ad9467_probe(struct spi_device *spi)
if (!st->info)
return -ENODEV;
+ st->calib_map_size = st->info->test_points;
+ if (AD9467_CAN_INVERT(st))
+ st->calib_map_size *= 2;
+
st->clk = devm_clk_get_enabled(&spi->dev, "adc-clk");
if (IS_ERR(st->clk))
return PTR_ERR(st->clk);
@@ -850,6 +1213,8 @@ static int ad9467_probe(struct spi_device *spi)
return -ENODEV;
}
+ if (st->info->num_scales > 1)
+ ad9467_info.read_avail = ad9467_read_avail;
indio_dev->name = st->info->name;
indio_dev->channels = st->info->channels;
indio_dev->num_channels = st->info->num_channels;
@@ -884,7 +1249,10 @@ static const struct of_device_id ad9467_of_match[] = {
{ .compatible = "adi,ad9265", .data = &ad9265_chip_tbl, },
{ .compatible = "adi,ad9434", .data = &ad9434_chip_tbl, },
{ .compatible = "adi,ad9467", .data = &ad9467_chip_tbl, },
- {}
+ { .compatible = "adi,ad9643", .data = &ad9643_chip_tbl, },
+ { .compatible = "adi,ad9649", .data = &ad9649_chip_tbl, },
+ { .compatible = "adi,ad9652", .data = &ad9652_chip_tbl, },
+ { }
};
MODULE_DEVICE_TABLE(of, ad9467_of_match);
@@ -892,7 +1260,10 @@ static const struct spi_device_id ad9467_ids[] = {
{ "ad9265", (kernel_ulong_t)&ad9265_chip_tbl },
{ "ad9434", (kernel_ulong_t)&ad9434_chip_tbl },
{ "ad9467", (kernel_ulong_t)&ad9467_chip_tbl },
- {}
+ { "ad9643", (kernel_ulong_t)&ad9643_chip_tbl },
+ { "ad9649", (kernel_ulong_t)&ad9649_chip_tbl, },
+ { "ad9652", (kernel_ulong_t)&ad9652_chip_tbl, },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad9467_ids);
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index dcd557e93586..e2bed2d648f2 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -351,7 +351,7 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
if (sigma_delta->num_slots == 1) {
channel = find_first_bit(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
ret = ad_sigma_delta_set_channel(sigma_delta,
indio_dev->channels[channel].address);
if (ret)
@@ -364,7 +364,7 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
* implementation is mandatory.
*/
slot = 0;
- for_each_set_bit(i, indio_dev->active_scan_mask, indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
sigma_delta->slots[slot] = indio_dev->channels[i].address;
slot++;
}
@@ -526,7 +526,7 @@ static bool ad_sd_validate_scan_mask(struct iio_dev *indio_dev, const unsigned l
{
struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
- return bitmap_weight(mask, indio_dev->masklength) <= sigma_delta->num_slots;
+ return bitmap_weight(mask, iio_get_masklength(indio_dev)) <= sigma_delta->num_slots;
}
static const struct iio_buffer_setup_ops ad_sd_buffer_setup_ops = {
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index 21ce7564e83d..5c8c87eb36d1 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -61,6 +61,10 @@
#define ADI_AXI_ADC_REG_CHAN_STATUS(c) (0x0404 + (c) * 0x40)
#define ADI_AXI_ADC_CHAN_STAT_PN_MASK GENMASK(2, 1)
+/* out of sync */
+#define ADI_AXI_ADC_CHAN_STAT_PN_OOS BIT(1)
+/* spurious out of sync */
+#define ADI_AXI_ADC_CHAN_STAT_PN_ERR BIT(2)
#define ADI_AXI_ADC_REG_CHAN_CTRL_3(c) (0x0418 + (c) * 0x40)
#define ADI_AXI_ADC_CHAN_PN_SEL_MASK GENMASK(19, 16)
@@ -199,17 +203,19 @@ static int axi_adc_test_pattern_set(struct iio_backend *back,
return regmap_update_bits(st->regmap, ADI_AXI_ADC_REG_CHAN_CTRL_3(chan),
ADI_AXI_ADC_CHAN_PN_SEL_MASK,
FIELD_PREP(ADI_AXI_ADC_CHAN_PN_SEL_MASK, 0));
+ case IIO_BACKEND_ADI_PRBS_23A:
+ return regmap_update_bits(st->regmap, ADI_AXI_ADC_REG_CHAN_CTRL_3(chan),
+ ADI_AXI_ADC_CHAN_PN_SEL_MASK,
+ FIELD_PREP(ADI_AXI_ADC_CHAN_PN_SEL_MASK, 1));
default:
return -EINVAL;
}
}
-static int axi_adc_chan_status(struct iio_backend *back, unsigned int chan,
- bool *error)
+static int axi_adc_read_chan_status(struct adi_axi_adc_state *st, unsigned int chan,
+ unsigned int *status)
{
- struct adi_axi_adc_state *st = iio_backend_get_priv(back);
int ret;
- u32 val;
guard(mutex)(&st->lock);
/* reset test bits by setting them */
@@ -221,7 +227,18 @@ static int axi_adc_chan_status(struct iio_backend *back, unsigned int chan,
/* let's give enough time to validate or erroring the incoming pattern */
fsleep(1000);
- ret = regmap_read(st->regmap, ADI_AXI_ADC_REG_CHAN_STATUS(chan), &val);
+ return regmap_read(st->regmap, ADI_AXI_ADC_REG_CHAN_STATUS(chan),
+ status);
+}
+
+static int axi_adc_chan_status(struct iio_backend *back, unsigned int chan,
+ bool *error)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ u32 val;
+ int ret;
+
+ ret = axi_adc_read_chan_status(st, chan, &val);
if (ret)
return ret;
@@ -233,6 +250,30 @@ static int axi_adc_chan_status(struct iio_backend *back, unsigned int chan,
return 0;
}
+static int axi_adc_debugfs_print_chan_status(struct iio_backend *back,
+ unsigned int chan, char *buf,
+ size_t len)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ u32 val;
+ int ret;
+
+ ret = axi_adc_read_chan_status(st, chan, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * PN_ERR is cleared in case out of sync is set. Hence, no point in
+ * checking both bits.
+ */
+ if (val & ADI_AXI_ADC_CHAN_STAT_PN_OOS)
+ return scnprintf(buf, len, "CH%u: Out of Sync.\n", chan);
+ if (val & ADI_AXI_ADC_CHAN_STAT_PN_ERR)
+ return scnprintf(buf, len, "CH%u: Spurious Out of Sync.\n", chan);
+
+ return scnprintf(buf, len, "CH%u: OK.\n", chan);
+}
+
static int axi_adc_chan_enable(struct iio_backend *back, unsigned int chan)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
@@ -267,13 +308,24 @@ static void axi_adc_free_buffer(struct iio_backend *back,
iio_dmaengine_buffer_free(buffer);
}
+static int axi_adc_reg_access(struct iio_backend *back, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+
+ return regmap_write(st->regmap, reg, writeval);
+}
+
static const struct regmap_config axi_adc_regmap_config = {
.val_bits = 32,
.reg_bits = 32,
.reg_stride = 4,
};
-static const struct iio_backend_ops adi_axi_adc_generic = {
+static const struct iio_backend_ops adi_axi_adc_ops = {
.enable = axi_adc_enable,
.disable = axi_adc_disable,
.data_format_set = axi_adc_data_format_set,
@@ -285,6 +337,13 @@ static const struct iio_backend_ops adi_axi_adc_generic = {
.iodelay_set = axi_adc_iodelays_set,
.test_pattern_set = axi_adc_test_pattern_set,
.chan_status = axi_adc_chan_status,
+ .debugfs_reg_access = iio_backend_debugfs_ptr(axi_adc_reg_access),
+ .debugfs_print_chan_status = iio_backend_debugfs_ptr(axi_adc_debugfs_print_chan_status),
+};
+
+static const struct iio_backend_info adi_axi_adc_generic = {
+ .name = "axi-adc",
+ .ops = &adi_axi_adc_ops,
};
static int adi_axi_adc_probe(struct platform_device *pdev)
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index 090416c0d622..1d5fd5f534b8 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -555,8 +555,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (of_find_property(data->dev->of_node, "aspeed,battery-sensing",
- NULL)) {
+ if (of_property_present(data->dev->of_node, "aspeed,battery-sensing")) {
if (data->model_data->bat_sense_sup) {
data->battery_sensing = 1;
if (readl(data->base + ASPEED_REG_ENGINE_CONTROL) &
@@ -695,7 +694,7 @@ static const struct of_device_id aspeed_adc_matches[] = {
{ .compatible = "aspeed,ast2500-adc", .data = &ast2500_model_data },
{ .compatible = "aspeed,ast2600-adc0", .data = &ast2600_adc0_model_data },
{ .compatible = "aspeed,ast2600-adc1", .data = &ast2600_adc1_model_data },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, aspeed_adc_matches);
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index eb501e3c86a5..9c39acff17e6 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -7,6 +7,7 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -268,9 +269,7 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
struct iio_chan_spec const *chan;
int i, j = 0;
- for (i = 0; i < idev->masklength; i++) {
- if (!test_bit(i, idev->active_scan_mask))
- continue;
+ iio_for_each_active_channel(idev, i) {
chan = idev->channels + i;
st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, chan->channel));
j++;
@@ -543,22 +542,18 @@ static int at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
int i;
for (i = 0; i < st->caps->trigger_number; i++) {
- char *name = kasprintf(GFP_KERNEL,
- "%s-dev%d-%s",
- idev->name,
- iio_device_id(idev),
- triggers[i].name);
+ char *name __free(kfree) = kasprintf(GFP_KERNEL, "%s-dev%d-%s",
+ idev->name,
+ iio_device_id(idev),
+ triggers[i].name);
if (!name)
return -ENOMEM;
if (strcmp(trigger_name, name) == 0) {
- kfree(name);
if (triggers[i].value == 0)
return -EINVAL;
return triggers[i].value;
}
-
- kfree(name);
}
return -EINVAL;
@@ -1340,7 +1335,7 @@ static const struct of_device_id at91_adc_dt_ids[] = {
{ .compatible = "atmel,at91sam9g45-adc", .data = &at91sam9g45_caps },
{ .compatible = "atmel,at91sam9x5-adc", .data = &at91sam9x5_caps },
{ .compatible = "atmel,sama5d3-adc", .data = &sama5d3_caps },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, at91_adc_dt_ids);
diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
index b487e577befb..d43c8d124a0c 100644
--- a/drivers/iio/adc/axp20x_adc.c
+++ b/drivers/iio/adc/axp20x_adc.c
@@ -5,6 +5,7 @@
* Quentin Schulz <quentin.schulz@free-electrons.com>
*/
+#include <asm/unaligned.h>
#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
@@ -30,6 +31,8 @@
#define AXP22X_ADC_EN1_MASK (GENMASK(7, 5) | BIT(0))
+#define AXP717_ADC_EN1_MASK GENMASK(7, 0)
+
#define AXP192_GPIO30_IN_RANGE_GPIO0 BIT(0)
#define AXP192_GPIO30_IN_RANGE_GPIO1 BIT(1)
#define AXP192_GPIO30_IN_RANGE_GPIO2 BIT(2)
@@ -43,6 +46,13 @@
#define AXP22X_ADC_RATE_HZ(x) ((ilog2((x) / 100) << 6) & AXP20X_ADC_RATE_MASK)
+#define AXP717_ADC_DATA_TS 0x00
+#define AXP717_ADC_DATA_TEMP 0x01
+#define AXP717_ADC_DATA_VMID 0x02
+#define AXP717_ADC_DATA_BKUP_BATT 0x03
+
+#define AXP717_ADC_DATA_MASK GENMASK(13, 0)
+
#define AXP813_V_I_ADC_RATE_MASK GENMASK(5, 4)
#define AXP813_ADC_RATE_MASK (AXP20X_ADC_RATE_MASK | AXP813_V_I_ADC_RATE_MASK)
#define AXP813_TS_GPIO0_ADC_RATE_HZ(x) AXP20X_ADC_RATE_HZ(x)
@@ -125,6 +135,20 @@ enum axp22x_adc_channel_i {
AXP22X_BATT_DISCHRG_I,
};
+enum axp717_adc_channel_v {
+ AXP717_BATT_V = 0,
+ AXP717_TS_IN,
+ AXP717_VBUS_V,
+ AXP717_VSYS_V,
+ AXP717_DIE_TEMP_V,
+ AXP717_VMID_V = 6,
+ AXP717_BKUP_BATT_V,
+};
+
+enum axp717_adc_channel_i {
+ AXP717_BATT_CHRG_I = 5,
+};
+
enum axp813_adc_channel_v {
AXP813_TS_IN = 0,
AXP813_GPIO0_V,
@@ -179,6 +203,22 @@ static struct iio_map axp22x_maps[] = {
}, { /* sentinel */ }
};
+static struct iio_map axp717_maps[] = {
+ {
+ .consumer_dev_name = "axp20x-usb-power-supply",
+ .consumer_channel = "vbus_v",
+ .adc_channel_label = "vbus_v",
+ }, {
+ .consumer_dev_name = "axp20x-battery-power-supply",
+ .consumer_channel = "batt_v",
+ .adc_channel_label = "batt_v",
+ }, {
+ .consumer_dev_name = "axp20x-battery-power-supply",
+ .consumer_channel = "batt_chrg_i",
+ .adc_channel_label = "batt_chrg_i",
+ },
+};
+
/*
* Channels are mapped by physical system. Their channels share the same index.
* i.e. acin_i is in_current0_raw and acin_v is in_voltage0_raw.
@@ -274,6 +314,29 @@ static const struct iio_chan_spec axp22x_adc_channels[] = {
AXP22X_TS_ADC_H),
};
+/*
+ * Scale and offset is unknown for temp, ts, batt_chrg_i, vmid_v, and
+ * bkup_batt_v channels. Leaving scale and offset undefined for now.
+ */
+static const struct iio_chan_spec axp717_adc_channels[] = {
+ AXP20X_ADC_CHANNEL(AXP717_BATT_V, "batt_v", IIO_VOLTAGE,
+ AXP717_BATT_V_H),
+ AXP20X_ADC_CHANNEL(AXP717_TS_IN, "ts_v", IIO_VOLTAGE,
+ AXP717_ADC_DATA_H),
+ AXP20X_ADC_CHANNEL(AXP717_VBUS_V, "vbus_v", IIO_VOLTAGE,
+ AXP717_VBUS_V_H),
+ AXP20X_ADC_CHANNEL(AXP717_VSYS_V, "vsys_v", IIO_VOLTAGE,
+ AXP717_VSYS_V_H),
+ AXP20X_ADC_CHANNEL(AXP717_DIE_TEMP_V, "pmic_temp", IIO_TEMP,
+ AXP717_ADC_DATA_H),
+ AXP20X_ADC_CHANNEL(AXP717_BATT_CHRG_I, "batt_chrg_i", IIO_CURRENT,
+ AXP717_BATT_CHRG_I_H),
+ AXP20X_ADC_CHANNEL(AXP717_VMID_V, "vmid_v", IIO_VOLTAGE,
+ AXP717_ADC_DATA_H),
+ AXP20X_ADC_CHANNEL(AXP717_BKUP_BATT_V, "bkup_batt_v", IIO_VOLTAGE,
+ AXP717_ADC_DATA_H),
+};
+
static const struct iio_chan_spec axp813_adc_channels[] = {
{
.type = IIO_TEMP,
@@ -354,6 +417,51 @@ static int axp22x_adc_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
}
+static int axp717_adc_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val)
+{
+ struct axp20x_adc_iio *info = iio_priv(indio_dev);
+ u8 bulk_reg[2];
+ int ret;
+
+ /*
+ * A generic "ADC data" channel is used for TS, tdie, vmid,
+ * and vbackup. This channel must both first be enabled and
+ * also selected before it can be read.
+ */
+ switch (chan->channel) {
+ case AXP717_TS_IN:
+ regmap_write(info->regmap, AXP717_ADC_DATA_SEL,
+ AXP717_ADC_DATA_TS);
+ break;
+ case AXP717_DIE_TEMP_V:
+ regmap_write(info->regmap, AXP717_ADC_DATA_SEL,
+ AXP717_ADC_DATA_TEMP);
+ break;
+ case AXP717_VMID_V:
+ regmap_write(info->regmap, AXP717_ADC_DATA_SEL,
+ AXP717_ADC_DATA_VMID);
+ break;
+ case AXP717_BKUP_BATT_V:
+ regmap_write(info->regmap, AXP717_ADC_DATA_SEL,
+ AXP717_ADC_DATA_BKUP_BATT);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * All channels are 14 bits, with the first 2 bits on the high
+ * register reserved and the remaining bits as the ADC value.
+ */
+ ret = regmap_bulk_read(info->regmap, chan->address, bulk_reg, 2);
+ if (ret < 0)
+ return ret;
+
+ *val = FIELD_GET(AXP717_ADC_DATA_MASK, get_unaligned_be16(bulk_reg));
+ return IIO_VAL_INT;
+}
+
static int axp813_adc_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val)
{
@@ -571,6 +679,27 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val,
}
}
+static int axp717_adc_scale(struct iio_chan_spec const *chan, int *val,
+ int *val2)
+{
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = 1;
+ return IIO_VAL_INT;
+
+ case IIO_CURRENT:
+ *val = 1;
+ return IIO_VAL_INT;
+
+ case IIO_TEMP:
+ *val = 100;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
static int axp813_adc_scale(struct iio_chan_spec const *chan, int *val,
int *val2)
{
@@ -746,6 +875,22 @@ static int axp22x_read_raw(struct iio_dev *indio_dev,
}
}
+static int axp717_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return axp717_adc_scale(chan, val, val2);
+
+ case IIO_CHAN_INFO_RAW:
+ return axp717_adc_raw(indio_dev, chan, val);
+
+ default:
+ return -EINVAL;
+ }
+}
+
static int axp813_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
@@ -860,6 +1005,10 @@ static const struct iio_info axp22x_adc_iio_info = {
.read_raw = axp22x_read_raw,
};
+static const struct iio_info axp717_adc_iio_info = {
+ .read_raw = axp717_read_raw,
+};
+
static const struct iio_info axp813_adc_iio_info = {
.read_raw = axp813_read_raw,
};
@@ -889,7 +1038,9 @@ struct axp_data {
const struct iio_info *iio_info;
int num_channels;
struct iio_chan_spec const *channels;
+ unsigned long adc_en1;
unsigned long adc_en1_mask;
+ unsigned long adc_en2;
unsigned long adc_en2_mask;
int (*adc_rate)(struct axp20x_adc_iio *info,
int rate);
@@ -910,7 +1061,9 @@ static const struct axp_data axp20x_data = {
.iio_info = &axp20x_adc_iio_info,
.num_channels = ARRAY_SIZE(axp20x_adc_channels),
.channels = axp20x_adc_channels,
+ .adc_en1 = AXP20X_ADC_EN1,
.adc_en1_mask = AXP20X_ADC_EN1_MASK,
+ .adc_en2 = AXP20X_ADC_EN2,
.adc_en2_mask = AXP20X_ADC_EN2_MASK,
.adc_rate = axp20x_adc_rate,
.maps = axp20x_maps,
@@ -920,15 +1073,26 @@ static const struct axp_data axp22x_data = {
.iio_info = &axp22x_adc_iio_info,
.num_channels = ARRAY_SIZE(axp22x_adc_channels),
.channels = axp22x_adc_channels,
+ .adc_en1 = AXP20X_ADC_EN1,
.adc_en1_mask = AXP22X_ADC_EN1_MASK,
.adc_rate = axp22x_adc_rate,
.maps = axp22x_maps,
};
+static const struct axp_data axp717_data = {
+ .iio_info = &axp717_adc_iio_info,
+ .num_channels = ARRAY_SIZE(axp717_adc_channels),
+ .channels = axp717_adc_channels,
+ .adc_en1 = AXP717_ADC_CH_EN_CONTROL,
+ .adc_en1_mask = AXP717_ADC_EN1_MASK,
+ .maps = axp717_maps,
+};
+
static const struct axp_data axp813_data = {
.iio_info = &axp813_adc_iio_info,
.num_channels = ARRAY_SIZE(axp813_adc_channels),
.channels = axp813_adc_channels,
+ .adc_en1 = AXP20X_ADC_EN1,
.adc_en1_mask = AXP22X_ADC_EN1_MASK,
.adc_rate = axp813_adc_rate,
.maps = axp22x_maps,
@@ -938,6 +1102,7 @@ static const struct of_device_id axp20x_adc_of_match[] = {
{ .compatible = "x-powers,axp192-adc", .data = (void *)&axp192_data, },
{ .compatible = "x-powers,axp209-adc", .data = (void *)&axp20x_data, },
{ .compatible = "x-powers,axp221-adc", .data = (void *)&axp22x_data, },
+ { .compatible = "x-powers,axp717-adc", .data = (void *)&axp717_data, },
{ .compatible = "x-powers,axp813-adc", .data = (void *)&axp813_data, },
{ /* sentinel */ }
};
@@ -947,6 +1112,7 @@ static const struct platform_device_id axp20x_adc_id_match[] = {
{ .name = "axp192-adc", .driver_data = (kernel_ulong_t)&axp192_data, },
{ .name = "axp20x-adc", .driver_data = (kernel_ulong_t)&axp20x_data, },
{ .name = "axp22x-adc", .driver_data = (kernel_ulong_t)&axp22x_data, },
+ { .name = "axp717-adc", .driver_data = (kernel_ulong_t)&axp717_data, },
{ .name = "axp813-adc", .driver_data = (kernel_ulong_t)&axp813_data, },
{ /* sentinel */ },
};
@@ -988,14 +1154,16 @@ static int axp20x_probe(struct platform_device *pdev)
indio_dev->channels = info->data->channels;
/* Enable the ADCs on IP */
- regmap_write(info->regmap, AXP20X_ADC_EN1, info->data->adc_en1_mask);
+ regmap_write(info->regmap, info->data->adc_en1,
+ info->data->adc_en1_mask);
if (info->data->adc_en2_mask)
- regmap_set_bits(info->regmap, AXP20X_ADC_EN2,
+ regmap_set_bits(info->regmap, info->data->adc_en2,
info->data->adc_en2_mask);
/* Configure ADCs rate */
- info->data->adc_rate(info, 100);
+ if (info->data->adc_rate)
+ info->data->adc_rate(info, 100);
ret = iio_map_array_register(indio_dev, info->data->maps);
if (ret < 0) {
@@ -1015,10 +1183,10 @@ fail_register:
iio_map_array_unregister(indio_dev);
fail_map:
- regmap_write(info->regmap, AXP20X_ADC_EN1, 0);
+ regmap_write(info->regmap, info->data->adc_en1, 0);
if (info->data->adc_en2_mask)
- regmap_write(info->regmap, AXP20X_ADC_EN2, 0);
+ regmap_write(info->regmap, info->data->adc_en2, 0);
return ret;
}
@@ -1031,10 +1199,10 @@ static void axp20x_remove(struct platform_device *pdev)
iio_device_unregister(indio_dev);
iio_map_array_unregister(indio_dev);
- regmap_write(info->regmap, AXP20X_ADC_EN1, 0);
+ regmap_write(info->regmap, info->data->adc_en1, 0);
if (info->data->adc_en2_mask)
- regmap_write(info->regmap, AXP20X_ADC_EN2, 0);
+ regmap_write(info->regmap, info->data->adc_en2, 0);
}
static struct platform_driver axp20x_adc_driver = {
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index f135cf2362df..8c3acc0cd7e9 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -299,7 +299,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
static const struct platform_device_id axp288_adc_id_table[] = {
{ .name = "axp288_adc" },
- {},
+ { }
};
static struct platform_driver axp288_adc_driver = {
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index 6bc149c51414..cdfe304eaa20 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -606,7 +606,7 @@ static void iproc_adc_remove(struct platform_device *pdev)
static const struct of_device_id iproc_adc_of_match[] = {
{.compatible = "brcm,iproc-static-adc", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, iproc_adc_of_match);
diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c
index 4cdddc6e36e9..fa04e0a5f645 100644
--- a/drivers/iio/adc/berlin2-adc.c
+++ b/drivers/iio/adc/berlin2-adc.c
@@ -351,7 +351,7 @@ static int berlin2_adc_probe(struct platform_device *pdev)
static const struct of_device_id berlin2_adc_match[] = {
{ .compatible = "marvell,berlin2-adc", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, berlin2_adc_match);
diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
index a432342348ab..2c51b90b7101 100644
--- a/drivers/iio/adc/cc10001_adc.c
+++ b/drivers/iio/adc/cc10001_adc.c
@@ -157,9 +157,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
i = 0;
sample_invalid = false;
- for_each_set_bit(scan_idx, indio_dev->active_scan_mask,
- indio_dev->masklength) {
-
+ iio_for_each_active_channel(indio_dev, scan_idx) {
channel = indio_dev->channels[scan_idx].channel;
cc10001_adc_start(adc_dev, channel);
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 06cfbbabaf8d..de7252a10047 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -108,7 +108,7 @@ static void dln2_adc_update_demux(struct dln2_adc *dln2)
dln2->demux_count = 0;
/* Optimize all 8-channels case */
- if (indio_dev->masklength &&
+ if (iio_get_masklength(indio_dev) &&
(*indio_dev->active_scan_mask & 0xff) == 0xff) {
dln2_adc_add_demux(dln2, 0, 0, 16);
dln2->ts_pad_offset = 0;
@@ -117,9 +117,7 @@ static void dln2_adc_update_demux(struct dln2_adc *dln2)
}
/* Build demux table from fixed 8-channels to active_scan_mask */
- for_each_set_bit(out_ind,
- indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, out_ind) {
/* Handle timestamp separately */
if (out_ind == DLN2_ADC_MAX_CHANNELS)
break;
@@ -541,7 +539,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
/* Assign trigger channel based on first enabled channel */
trigger_chan = find_first_bit(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
if (trigger_chan < DLN2_ADC_MAX_CHANNELS) {
dln2->trigger_chan = trigger_chan;
ret = dln2_adc_set_chan_period(dln2, dln2->trigger_chan,
diff --git a/drivers/iio/adc/ep93xx_adc.c b/drivers/iio/adc/ep93xx_adc.c
index 971942ce4c66..cc38d5e0608e 100644
--- a/drivers/iio/adc/ep93xx_adc.c
+++ b/drivers/iio/adc/ep93xx_adc.c
@@ -228,7 +228,7 @@ static void ep93xx_adc_remove(struct platform_device *pdev)
static const struct of_device_id ep93xx_adc_of_ids[] = {
{ .compatible = "cirrus,ep9301-adc" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ep93xx_adc_of_ids);
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 78fada4b7b1c..4d00ee8dd14d 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -519,7 +519,7 @@ static const struct of_device_id exynos_adc_match[] = {
.compatible = "samsung,exynos7-adc",
.data = &exynos7_adc_data,
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, exynos_adc_match);
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index 771fa12bdc02..fb635a756440 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -524,7 +524,7 @@ static int hi8435_probe(struct spi_device *spi)
static const struct of_device_id hi8435_dt_ids[] = {
{ .compatible = "holt,hi8435" },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, hi8435_dt_ids);
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index b3372ccff7d5..8da0419ecfa3 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -363,10 +363,7 @@ static irqreturn_t hx711_trigger(int irq, void *p)
memset(hx711_data->buffer, 0, sizeof(hx711_data->buffer));
- for (i = 0; i < indio_dev->masklength; i++) {
- if (!test_bit(i, indio_dev->active_scan_mask))
- continue;
-
+ iio_for_each_active_channel(indio_dev, i) {
hx711_data->buffer[j] = hx711_reset_read(hx711_data,
indio_dev->channels[i].channel);
j++;
@@ -555,7 +552,7 @@ static int hx711_probe(struct platform_device *pdev)
static const struct of_device_id of_hx711_match[] = {
{ .compatible = "avia,hx711", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, of_hx711_match);
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 727e390bd979..48c95e12e791 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -755,8 +755,7 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
* Single register reads: bulk_read will not work with ina226/219
* as there is no auto-increment of the register pointer.
*/
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
unsigned int val;
ret = regmap_read(chip->regmap,
@@ -1053,12 +1052,12 @@ static void ina2xx_remove(struct i2c_client *client)
}
static const struct i2c_device_id ina2xx_id[] = {
- {"ina219", ina219},
- {"ina220", ina219},
- {"ina226", ina226},
- {"ina230", ina226},
- {"ina231", ina226},
- {}
+ { "ina219", ina219 },
+ { "ina220", ina219 },
+ { "ina226", ina226 },
+ { "ina230", ina226 },
+ { "ina231", ina226 },
+ { }
};
MODULE_DEVICE_TABLE(i2c, ina2xx_id);
@@ -1083,7 +1082,7 @@ static const struct of_device_id ina2xx_of_match[] = {
.compatible = "ti,ina231",
.data = (void *)ina226
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, ina2xx_of_match);
diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
index af70ca760797..1e802c8779a4 100644
--- a/drivers/iio/adc/ingenic-adc.c
+++ b/drivers/iio/adc/ingenic-adc.c
@@ -908,7 +908,7 @@ static const struct of_device_id ingenic_adc_of_match[] = {
{ .compatible = "ingenic,jz4760-adc", .data = &jz4760_adc_soc_data, },
{ .compatible = "ingenic,jz4760b-adc", .data = &jz4760_adc_soc_data, },
{ .compatible = "ingenic,jz4770-adc", .data = &jz4770_adc_soc_data, },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ingenic_adc_of_match);
diff --git a/drivers/iio/adc/lpc32xx_adc.c b/drivers/iio/adc/lpc32xx_adc.c
index e34ed7dacd89..43a7bc8158b5 100644
--- a/drivers/iio/adc/lpc32xx_adc.c
+++ b/drivers/iio/adc/lpc32xx_adc.c
@@ -217,7 +217,7 @@ static int lpc32xx_adc_probe(struct platform_device *pdev)
static const struct of_device_id lpc32xx_adc_match[] = {
{ .compatible = "nxp,lpc3220-adc" },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, lpc32xx_adc_match);
diff --git a/drivers/iio/adc/ltc2496.c b/drivers/iio/adc/ltc2496.c
index 2593fa4322eb..f06dd0b9a858 100644
--- a/drivers/iio/adc/ltc2496.c
+++ b/drivers/iio/adc/ltc2496.c
@@ -94,7 +94,7 @@ static const struct ltc2497_chip_info ltc2496_info = {
static const struct of_device_id ltc2496_of_match[] = {
{ .compatible = "lltc,ltc2496", .data = &ltc2496_info, },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, ltc2496_of_match);
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index 6401a7727c31..f010b2fd1202 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -151,7 +151,7 @@ MODULE_DEVICE_TABLE(i2c, ltc2497_id);
static const struct of_device_id ltc2497_of_match[] = {
{ .compatible = "lltc,ltc2497", .data = &ltc2497_info[TYPE_LTC2497] },
{ .compatible = "lltc,ltc2499", .data = &ltc2497_info[TYPE_LTC2499] },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, ltc2497_of_match);
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 136fcf753837..f5ba4a1b5a7d 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -73,13 +73,13 @@ enum max1027_id {
};
static const struct spi_device_id max1027_id[] = {
- {"max1027", max1027},
- {"max1029", max1029},
- {"max1031", max1031},
- {"max1227", max1227},
- {"max1229", max1229},
- {"max1231", max1231},
- {}
+ { "max1027", max1027 },
+ { "max1029", max1029 },
+ { "max1031", max1031 },
+ { "max1227", max1227 },
+ { "max1229", max1229 },
+ { "max1231", max1231 },
+ { }
};
MODULE_DEVICE_TABLE(spi, max1027_id);
@@ -90,7 +90,7 @@ static const struct of_device_id max1027_adc_dt_ids[] = {
{ .compatible = "maxim,max1227" },
{ .compatible = "maxim,max1229" },
{ .compatible = "maxim,max1231" },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
diff --git a/drivers/iio/adc/max11100.c b/drivers/iio/adc/max11100.c
index 49e38dca8fe2..2f07437caec3 100644
--- a/drivers/iio/adc/max11100.c
+++ b/drivers/iio/adc/max11100.c
@@ -143,8 +143,8 @@ static int max11100_probe(struct spi_device *spi)
}
static const struct of_device_id max11100_ids[] = {
- {.compatible = "maxim,max11100"},
- { },
+ { .compatible = "maxim,max11100" },
+ { }
};
MODULE_DEVICE_TABLE(of, max11100_ids);
diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c
index 75ab57d9aef7..3d0a7d0eb7ee 100644
--- a/drivers/iio/adc/max1118.c
+++ b/drivers/iio/adc/max1118.c
@@ -174,8 +174,7 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p)
mutex_lock(&adc->lock);
- for_each_set_bit(scan_index, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, scan_index) {
const struct iio_chan_spec *scan_chan =
&indio_dev->channels[scan_index];
int ret = max1118_read(indio_dev, scan_chan->channel);
@@ -261,7 +260,7 @@ static const struct spi_device_id max1118_id[] = {
{ "max1117", max1117 },
{ "max1118", max1118 },
{ "max1119", max1119 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, max1118_id);
@@ -269,7 +268,7 @@ static const struct of_device_id max1118_dt_ids[] = {
{ .compatible = "maxim,max1117" },
{ .compatible = "maxim,max1118" },
{ .compatible = "maxim,max1119" },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, max1118_dt_ids);
diff --git a/drivers/iio/adc/max1241.c b/drivers/iio/adc/max1241.c
index 500bb09ab19b..d62c1a011659 100644
--- a/drivers/iio/adc/max1241.c
+++ b/drivers/iio/adc/max1241.c
@@ -177,12 +177,12 @@ static int max1241_probe(struct spi_device *spi)
static const struct spi_device_id max1241_id[] = {
{ "max1241", max1241 },
- {}
+ { }
};
static const struct of_device_id max1241_dt_ids[] = {
{ .compatible = "maxim,max1241" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, max1241_dt_ids);
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index bf4b6dc53fd2..d0c6e94f7204 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -13,6 +13,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/sysfs.h>
@@ -818,7 +819,6 @@ static int max1363_read_event_config(struct iio_dev *indio_dev,
static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
{
- u8 *tx_buf;
int ret, i = 3, j;
unsigned long numelements;
int len;
@@ -850,11 +850,10 @@ static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
}
numelements = bitmap_weight(modemask, MAX1363_MAX_CHANNELS);
len = 3 * numelements + 3;
- tx_buf = kmalloc(len, GFP_KERNEL);
- if (!tx_buf) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ u8 *tx_buf __free(kfree) = kmalloc(len, GFP_KERNEL);
+ if (!tx_buf)
+ return -ENOMEM;
+
tx_buf[0] = st->configbyte;
tx_buf[1] = st->setupbyte;
tx_buf[2] = (st->monitor_speed << 1);
@@ -893,11 +892,9 @@ static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
ret = st->send(st->client, tx_buf, len);
if (ret < 0)
- goto error_ret;
- if (ret != len) {
- ret = -EIO;
- goto error_ret;
- }
+ return ret;
+ if (ret != len)
+ return -EIO;
/*
* Now that we hopefully have sensible thresholds in place it is
@@ -910,18 +907,13 @@ static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
tx_buf[1] = MAX1363_MON_INT_ENABLE | (st->monitor_speed << 1) | 0xF0;
ret = st->send(st->client, tx_buf, 2);
if (ret < 0)
- goto error_ret;
- if (ret != 2) {
- ret = -EIO;
- goto error_ret;
- }
- ret = 0;
- st->monitor_on = true;
-error_ret:
+ return ret;
+ if (ret != 2)
+ return -EIO;
- kfree(tx_buf);
+ st->monitor_on = true;
- return ret;
+ return 0;
}
/*
diff --git a/drivers/iio/adc/max34408.c b/drivers/iio/adc/max34408.c
index 6c2ea2bc52c6..ffec22be2d59 100644
--- a/drivers/iio/adc/max34408.c
+++ b/drivers/iio/adc/max34408.c
@@ -250,14 +250,14 @@ static const struct of_device_id max34408_of_match[] = {
.compatible = "maxim,max34409",
.data = &max34409_model_data,
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, max34408_of_match);
static const struct i2c_device_id max34408_id[] = {
{ "max34408", (kernel_ulong_t)&max34408_model_data },
{ "max34409", (kernel_ulong_t)&max34409_model_data },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, max34408_id);
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index 76e517b7b1e4..14fe42fc4b7d 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -504,9 +504,9 @@ static int max9611_init(struct max9611_dev *max9611)
}
static const struct of_device_id max9611_of_table[] = {
- {.compatible = "maxim,max9611", .data = "max9611"},
- {.compatible = "maxim,max9612", .data = "max9612"},
- { },
+ { .compatible = "maxim,max9611", .data = "max9611" },
+ { .compatible = "maxim,max9612", .data = "max9612" },
+ { }
};
MODULE_DEVICE_TABLE(of, max9611_of_table);
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index da1421bd7b62..57cff3772ebe 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -459,16 +459,6 @@ static int mcp320x_probe(struct spi_device *spi)
}
static const struct of_device_id mcp320x_dt_ids[] = {
- /* NOTE: The use of compatibles with no vendor prefix is deprecated. */
- { .compatible = "mcp3001" },
- { .compatible = "mcp3002" },
- { .compatible = "mcp3004" },
- { .compatible = "mcp3008" },
- { .compatible = "mcp3201" },
- { .compatible = "mcp3202" },
- { .compatible = "mcp3204" },
- { .compatible = "mcp3208" },
- { .compatible = "mcp3301" },
{ .compatible = "microchip,mcp3001" },
{ .compatible = "microchip,mcp3002" },
{ .compatible = "microchip,mcp3004" },
diff --git a/drivers/iio/adc/mcp3564.c b/drivers/iio/adc/mcp3564.c
index d83bed0e63d2..a68f1cd6883e 100644
--- a/drivers/iio/adc/mcp3564.c
+++ b/drivers/iio/adc/mcp3564.c
@@ -349,8 +349,6 @@ struct mcp3564_chip_info {
* struct mcp3564_state - working data for a ADC device
* @chip_info: chip specific data
* @spi: SPI device structure
- * @vref: the regulator device used as a voltage reference in case
- * external voltage reference is used
* @vref_mv: voltage reference value in miliVolts
* @lock: synchronize access to driver's state members
* @dev_addr: hardware device address
@@ -369,7 +367,6 @@ struct mcp3564_chip_info {
struct mcp3564_state {
const struct mcp3564_chip_info *chip_info;
struct spi_device *spi;
- struct regulator *vref;
unsigned short vref_mv;
struct mutex lock; /* Synchronize access to driver's state members */
u8 dev_addr;
@@ -1085,11 +1082,6 @@ static int mcp3564_parse_fw_children(struct iio_dev *indio_dev)
return 0;
}
-static void mcp3564_disable_reg(void *reg)
-{
- regulator_disable(reg);
-}
-
static void mcp3564_fill_scale_tbls(struct mcp3564_state *adc)
{
unsigned int pow = adc->chip_info->resolution - 1;
@@ -1110,7 +1102,7 @@ static void mcp3564_fill_scale_tbls(struct mcp3564_state *adc)
}
}
-static int mcp3564_config(struct iio_dev *indio_dev)
+static int mcp3564_config(struct iio_dev *indio_dev, bool *use_internal_vref_attr)
{
struct mcp3564_state *adc = iio_priv(indio_dev);
struct device *dev = &adc->spi->dev;
@@ -1119,6 +1111,7 @@ static int mcp3564_config(struct iio_dev *indio_dev)
enum mcp3564_ids ids;
int ret = 0;
unsigned int tmp = 0x01;
+ bool internal_vref;
bool err = false;
/*
@@ -1218,36 +1211,22 @@ static int mcp3564_config(struct iio_dev *indio_dev)
dev_dbg(dev, "Found %s chip\n", adc->chip_info->name);
- adc->vref = devm_regulator_get_optional(dev, "vref");
- if (IS_ERR(adc->vref)) {
- if (PTR_ERR(adc->vref) != -ENODEV)
- return dev_err_probe(dev, PTR_ERR(adc->vref),
- "failed to get regulator\n");
+ ret = devm_regulator_get_enable_read_voltage(dev, "vref");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "Failed to get vref voltage\n");
+
+ internal_vref = ret == -ENODEV;
+ adc->vref_mv = internal_vref ? MCP3564R_INT_VREF_MV : ret / MILLI;
+ *use_internal_vref_attr = internal_vref;
+ if (internal_vref) {
/* Check if chip has internal vref */
if (!adc->have_vref)
- return dev_err_probe(dev, PTR_ERR(adc->vref),
- "Unknown Vref\n");
- adc->vref = NULL;
+ return dev_err_probe(dev, -ENODEV, "Unknown Vref\n");
+
dev_dbg(dev, "%s: Using internal Vref\n", __func__);
} else {
- ret = regulator_enable(adc->vref);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, mcp3564_disable_reg,
- adc->vref);
- if (ret)
- return ret;
-
dev_dbg(dev, "%s: Using External Vref\n", __func__);
-
- ret = regulator_get_voltage(adc->vref);
- if (ret < 0)
- return dev_err_probe(dev, ret,
- "Failed to read vref regulator\n");
-
- adc->vref_mv = ret / MILLI;
}
ret = mcp3564_parse_fw_children(indio_dev);
@@ -1350,10 +1329,8 @@ static int mcp3564_config(struct iio_dev *indio_dev)
tmp_reg |= FIELD_PREP(MCP3564_CONFIG0_CLK_SEL_MASK, MCP3564_CONFIG0_USE_INT_CLK);
tmp_reg |= MCP3456_CONFIG0_BIT6_DEFAULT;
- if (!adc->vref) {
+ if (internal_vref)
tmp_reg |= FIELD_PREP(MCP3456_CONFIG0_VREF_MASK, 1);
- adc->vref_mv = MCP3564R_INT_VREF_MV;
- }
ret = mcp3564_write_8bits(adc, MCP3564_CONFIG0_REG, tmp_reg);
@@ -1412,6 +1389,7 @@ static int mcp3564_probe(struct spi_device *spi)
int ret;
struct iio_dev *indio_dev;
struct mcp3564_state *adc;
+ bool use_internal_vref_attr;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
if (!indio_dev)
@@ -1428,7 +1406,7 @@ static int mcp3564_probe(struct spi_device *spi)
* enable/disable certain channels
* change the sampling rate to the requested value
*/
- ret = mcp3564_config(indio_dev);
+ ret = mcp3564_config(indio_dev, &use_internal_vref_attr);
if (ret)
return dev_err_probe(&spi->dev, ret,
"Can't configure MCP356X device\n");
@@ -1440,7 +1418,7 @@ static int mcp3564_probe(struct spi_device *spi)
indio_dev->name = adc->chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
- if (!adc->vref)
+ if (use_internal_vref_attr)
indio_dev->info = &mcp3564r_info;
else
indio_dev->info = &mcp3564_info;
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index 7a32e7a1be9d..d0e77971c5d3 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -103,7 +103,7 @@ struct mcp3911_chip_info {
const struct iio_chan_spec *channels;
unsigned int num_channels;
- int (*config)(struct mcp3911 *adc);
+ int (*config)(struct mcp3911 *adc, bool external_vref);
int (*get_osr)(struct mcp3911 *adc, u32 *val);
int (*set_osr)(struct mcp3911 *adc, u32 val);
int (*enable_offset)(struct mcp3911 *adc, bool enable);
@@ -115,7 +115,6 @@ struct mcp3911_chip_info {
struct mcp3911 {
struct spi_device *spi;
struct mutex lock;
- struct regulator *vref;
struct clk *clki;
u32 dev_addr;
struct iio_trigger *trig;
@@ -385,23 +384,11 @@ static int mcp3911_write_raw(struct iio_dev *indio_dev,
}
}
-static int mcp3911_calc_scale_table(struct mcp3911 *adc)
+static int mcp3911_calc_scale_table(u32 vref_mv)
{
- struct device *dev = &adc->spi->dev;
- u32 ref = MCP3911_INT_VREF_MV;
u32 div;
- int ret;
u64 tmp;
- if (adc->vref) {
- ret = regulator_get_voltage(adc->vref);
- if (ret < 0) {
- return dev_err_probe(dev, ret, "failed to get vref voltage\n");
- }
-
- ref = ret / 1000;
- }
-
/*
* For 24-bit Conversion
* Raw = ((Voltage)/(Vref) * 2^23 * Gain * 1.5
@@ -412,7 +399,7 @@ static int mcp3911_calc_scale_table(struct mcp3911 *adc)
*/
for (int i = 0; i < MCP3911_NUM_SCALES; i++) {
div = 12582912 * BIT(i);
- tmp = div_s64((s64)ref * 1000000000LL, div);
+ tmp = div_s64((s64)vref_mv * 1000000000LL, div);
mcp3911_scale_table[i][0] = 0;
mcp3911_scale_table[i][1] = tmp;
@@ -523,7 +510,7 @@ static irqreturn_t mcp3911_trigger_handler(int irq, void *p)
goto out;
}
- for_each_set_bit(scan_index, indio_dev->active_scan_mask, indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, scan_index) {
const struct iio_chan_spec *scan_chan = &indio_dev->channels[scan_index];
adc->scan.channels[i] = get_unaligned_be24(&adc->rx_buf[scan_chan->channel * 3]);
@@ -544,7 +531,7 @@ static const struct iio_info mcp3911_info = {
.write_raw_get_fmt = mcp3911_write_raw_get_fmt,
};
-static int mcp3911_config(struct mcp3911 *adc)
+static int mcp3911_config(struct mcp3911 *adc, bool external_vref)
{
struct device *dev = &adc->spi->dev;
u32 regval;
@@ -555,7 +542,7 @@ static int mcp3911_config(struct mcp3911 *adc)
return ret;
regval &= ~MCP3911_CONFIG_VREFEXT;
- if (adc->vref) {
+ if (external_vref) {
dev_dbg(dev, "use external voltage reference\n");
regval |= FIELD_PREP(MCP3911_CONFIG_VREFEXT, 1);
} else {
@@ -610,7 +597,7 @@ static int mcp3911_config(struct mcp3911 *adc)
return mcp3911_write(adc, MCP3911_REG_GAIN, regval, 1);
}
-static int mcp3910_config(struct mcp3911 *adc)
+static int mcp3910_config(struct mcp3911 *adc, bool external_vref)
{
struct device *dev = &adc->spi->dev;
u32 regval;
@@ -621,7 +608,7 @@ static int mcp3910_config(struct mcp3911 *adc)
return ret;
regval &= ~MCP3910_CONFIG1_VREFEXT;
- if (adc->vref) {
+ if (external_vref) {
dev_dbg(dev, "use external voltage reference\n");
regval |= FIELD_PREP(MCP3910_CONFIG1_VREFEXT, 1);
} else {
@@ -677,11 +664,6 @@ static int mcp3910_config(struct mcp3911 *adc)
return adc->chip->enable_offset(adc, 0);
}
-static void mcp3911_cleanup_regulator(void *vref)
-{
- regulator_disable(vref);
-}
-
static int mcp3911_set_trigger_state(struct iio_trigger *trig, bool enable)
{
struct mcp3911 *adc = iio_trigger_get_drvdata(trig);
@@ -704,6 +686,8 @@ static int mcp3911_probe(struct spi_device *spi)
struct device *dev = &spi->dev;
struct iio_dev *indio_dev;
struct mcp3911 *adc;
+ bool external_vref;
+ u32 vref_mv;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*adc));
@@ -714,23 +698,12 @@ static int mcp3911_probe(struct spi_device *spi)
adc->spi = spi;
adc->chip = spi_get_device_match_data(spi);
- adc->vref = devm_regulator_get_optional(dev, "vref");
- if (IS_ERR(adc->vref)) {
- if (PTR_ERR(adc->vref) == -ENODEV) {
- adc->vref = NULL;
- } else {
- return dev_err_probe(dev, PTR_ERR(adc->vref), "failed to get regulator\n");
- }
+ ret = devm_regulator_get_enable_read_voltage(dev, "vref");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to get vref voltage\n");
- } else {
- ret = regulator_enable(adc->vref);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, mcp3911_cleanup_regulator, adc->vref);
- if (ret)
- return ret;
- }
+ external_vref = ret != -ENODEV;
+ vref_mv = external_vref ? ret / 1000 : MCP3911_INT_VREF_MV;
adc->clki = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(adc->clki)) {
@@ -755,11 +728,11 @@ static int mcp3911_probe(struct spi_device *spi)
}
dev_dbg(dev, "use device address %i\n", adc->dev_addr);
- ret = adc->chip->config(adc);
+ ret = adc->chip->config(adc, external_vref);
if (ret)
return ret;
- ret = mcp3911_calc_scale_table(adc);
+ ret = mcp3911_calc_scale_table(vref_mv);
if (ret)
return ret;
diff --git a/drivers/iio/adc/mp2629_adc.c b/drivers/iio/adc/mp2629_adc.c
index 5f672765d4a2..5fbf9b6abd9c 100644
--- a/drivers/iio/adc/mp2629_adc.c
+++ b/drivers/iio/adc/mp2629_adc.c
@@ -184,8 +184,8 @@ static void mp2629_adc_remove(struct platform_device *pdev)
}
static const struct of_device_id mp2629_adc_of_match[] = {
- { .compatible = "mps,mp2629_adc"},
- {}
+ { .compatible = "mps,mp2629_adc" },
+ { }
};
MODULE_DEVICE_TABLE(of, mp2629_adc_of_match);
diff --git a/drivers/iio/adc/mt6360-adc.c b/drivers/iio/adc/mt6360-adc.c
index 3710473e526f..e2ec805e834f 100644
--- a/drivers/iio/adc/mt6360-adc.c
+++ b/drivers/iio/adc/mt6360-adc.c
@@ -268,7 +268,7 @@ static irqreturn_t mt6360_adc_trigger_handler(int irq, void *p)
int i = 0, bit, val, ret;
memset(&data, 0, sizeof(data));
- for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = mt6360_adc_read_channel(mad, bit, &val);
if (ret < 0) {
dev_warn(&indio_dev->dev, "Failed to get channel %d conversion val\n", bit);
@@ -355,7 +355,7 @@ static int mt6360_adc_probe(struct platform_device *pdev)
static const struct of_device_id mt6360_adc_of_id[] = {
{ .compatible = "mediatek,mt6360-adc", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, mt6360_adc_of_id);
diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c
index 600151a62f1f..458544cb8ee4 100644
--- a/drivers/iio/adc/nau7802.c
+++ b/drivers/iio/adc/nau7802.c
@@ -539,7 +539,7 @@ MODULE_DEVICE_TABLE(i2c, nau7802_i2c_id);
static const struct of_device_id nau7802_dt_ids[] = {
{ .compatible = "nuvoton,nau7802" },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, nau7802_dt_ids);
diff --git a/drivers/iio/adc/pac1921.c b/drivers/iio/adc/pac1921.c
new file mode 100644
index 000000000000..4c2a1c07bc39
--- /dev/null
+++ b/drivers/iio/adc/pac1921.c
@@ -0,0 +1,1261 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * IIO driver for PAC1921 High-Side Power/Current Monitor
+ *
+ * Copyright (C) 2024 Matteo Martelli <matteomartelli3@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/regmap.h>
+#include <linux/units.h>
+
+/* pac1921 registers */
+#define PAC1921_REG_GAIN_CFG 0x00
+#define PAC1921_REG_INT_CFG 0x01
+#define PAC1921_REG_CONTROL 0x02
+#define PAC1921_REG_VBUS 0x10
+#define PAC1921_REG_VSENSE 0x12
+#define PAC1921_REG_OVERFLOW_STS 0x1C
+#define PAC1921_REG_VPOWER 0x1D
+
+/* pac1921 gain configuration bits */
+#define PAC1921_GAIN_DI_GAIN_MASK GENMASK(5, 3)
+#define PAC1921_GAIN_DV_GAIN_MASK GENMASK(2, 0)
+
+/* pac1921 integration configuration bits */
+#define PAC1921_INT_CFG_SMPL_MASK GENMASK(7, 4)
+#define PAC1921_INT_CFG_VSFEN BIT(3)
+#define PAC1921_INT_CFG_VBFEN BIT(2)
+#define PAC1921_INT_CFG_RIOV BIT(1)
+#define PAC1921_INT_CFG_INTEN BIT(0)
+
+/* pac1921 control bits */
+#define PAC1921_CONTROL_MXSL_MASK GENMASK(7, 6)
+enum pac1921_mxsl {
+ PAC1921_MXSL_VPOWER_PIN = 0,
+ PAC1921_MXSL_VSENSE_FREE_RUN = 1,
+ PAC1921_MXSL_VBUS_FREE_RUN = 2,
+ PAC1921_MXSL_VPOWER_FREE_RUN = 3,
+};
+#define PAC1921_CONTROL_SLEEP BIT(2)
+
+/* pac1921 result registers mask and resolution */
+#define PAC1921_RES_MASK GENMASK(15, 6)
+#define PAC1921_RES_RESOLUTION 1023
+
+/* pac1921 overflow status bits */
+#define PAC1921_OVERFLOW_VSOV BIT(2)
+#define PAC1921_OVERFLOW_VBOV BIT(1)
+#define PAC1921_OVERFLOW_VPOV BIT(0)
+
+/* pac1921 constants */
+#define PAC1921_MAX_VSENSE_MV 100
+#define PAC1921_MAX_VBUS_V 32
+/* Time to first communication after power up (tINT_T) */
+#define PAC1921_POWERUP_TIME_MS 20
+/* Time from Sleep State to Start of Integration Period (tSLEEP_TO_INT) */
+#define PAC1921_SLEEP_TO_INT_TIME_US 86
+
+/* pac1921 defaults */
+#define PAC1921_DEFAULT_DV_GAIN 0 /* 2^(value): 1x gain (HW default) */
+#define PAC1921_DEFAULT_DI_GAIN 0 /* 2^(value): 1x gain (HW default) */
+#define PAC1921_DEFAULT_NUM_SAMPLES 0 /* 2^(value): 1 sample (HW default) */
+
+/*
+ * Pre-computed scale factors for BUS voltage
+ * format: IIO_VAL_INT_PLUS_NANO
+ * unit: mV
+ *
+ * Vbus scale (mV) = max_vbus (mV) / dv_gain / resolution
+ */
+static const int pac1921_vbus_scales[][2] = {
+ { 31, 280547409 }, /* dv_gain x1 */
+ { 15, 640273704 }, /* dv_gain x2 */
+ { 7, 820136852 }, /* dv_gain x4 */
+ { 3, 910068426 }, /* dv_gain x8 */
+ { 1, 955034213 }, /* dv_gain x16 */
+ { 0, 977517106 }, /* dv_gain x32 */
+};
+
+/*
+ * Pre-computed scales for SENSE voltage
+ * format: IIO_VAL_INT_PLUS_NANO
+ * unit: mV
+ *
+ * Vsense scale (mV) = max_vsense (mV) / di_gain / resolution
+ */
+static const int pac1921_vsense_scales[][2] = {
+ { 0, 97751710 }, /* di_gain x1 */
+ { 0, 48875855 }, /* di_gain x2 */
+ { 0, 24437927 }, /* di_gain x4 */
+ { 0, 12218963 }, /* di_gain x8 */
+ { 0, 6109481 }, /* di_gain x16 */
+ { 0, 3054740 }, /* di_gain x32 */
+ { 0, 1527370 }, /* di_gain x64 */
+ { 0, 763685 }, /* di_gain x128 */
+};
+
+/*
+ * Numbers of samples used to integrate measurements at the end of an
+ * integration period.
+ *
+ * Changing the number of samples affects the integration period: higher the
+ * number of samples, longer the integration period.
+ *
+ * These correspond to the oversampling ratios available exposed to userspace.
+ */
+static const int pac1921_int_num_samples[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048
+};
+
+/*
+ * The integration period depends on the configuration of number of integration
+ * samples, measurement resolution and post filters. The following array
+ * contains integration periods, in microsecs unit, based on table 4-5 from
+ * datasheet considering power integration mode, 14-Bit resolution and post
+ * filters on. Each index corresponds to a specific number of samples from 1
+ * to 2048.
+ */
+static const unsigned int pac1921_int_periods_usecs[] = {
+ 2720, /* 1 sample */
+ 4050, /* 2 samples */
+ 6790, /* 4 samples */
+ 12200, /* 8 samples */
+ 23000, /* 16 samples */
+ 46000, /* 32 samples */
+ 92000, /* 64 samples */
+ 184000, /* 128 samples */
+ 368000, /* 256 samples */
+ 736000, /* 512 samples */
+ 1471000, /* 1024 samples */
+ 2941000 /* 2048 samples */
+};
+
+/* pac1921 regmap configuration */
+static const struct regmap_range pac1921_regmap_wr_ranges[] = {
+ regmap_reg_range(PAC1921_REG_GAIN_CFG, PAC1921_REG_CONTROL),
+};
+
+static const struct regmap_access_table pac1921_regmap_wr_table = {
+ .yes_ranges = pac1921_regmap_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(pac1921_regmap_wr_ranges),
+};
+
+static const struct regmap_range pac1921_regmap_rd_ranges[] = {
+ regmap_reg_range(PAC1921_REG_GAIN_CFG, PAC1921_REG_CONTROL),
+ regmap_reg_range(PAC1921_REG_VBUS, PAC1921_REG_VPOWER + 1),
+};
+
+static const struct regmap_access_table pac1921_regmap_rd_table = {
+ .yes_ranges = pac1921_regmap_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(pac1921_regmap_rd_ranges),
+};
+
+static const struct regmap_config pac1921_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &pac1921_regmap_rd_table,
+ .wr_table = &pac1921_regmap_wr_table,
+};
+
+enum pac1921_channels {
+ PAC1921_CHAN_VBUS = 0,
+ PAC1921_CHAN_VSENSE = 1,
+ PAC1921_CHAN_CURRENT = 2,
+ PAC1921_CHAN_POWER = 3,
+};
+#define PAC1921_NUM_MEAS_CHANS 4
+
+struct pac1921_priv {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct regulator *vdd;
+ struct iio_info iio_info;
+
+ /*
+ * Synchronize access to private members, and ensure atomicity of
+ * consecutive regmap operations.
+ */
+ struct mutex lock;
+
+ u32 rshunt_uohm; /* uOhm */
+ u8 dv_gain;
+ u8 di_gain;
+ u8 n_samples;
+ u8 prev_ovf_flags;
+ u8 ovf_enabled_events;
+
+ bool first_integr_started;
+ bool first_integr_done;
+ unsigned long integr_started_time_jiffies;
+ unsigned int integr_period_usecs;
+
+ int current_scales[ARRAY_SIZE(pac1921_vsense_scales)][2];
+
+ struct {
+ u16 chan[PAC1921_NUM_MEAS_CHANS];
+ s64 timestamp __aligned(8);
+ } scan;
+};
+
+/*
+ * Check if first integration after configuration update has completed.
+ *
+ * Must be called with lock held.
+ */
+static bool pac1921_data_ready(struct pac1921_priv *priv)
+{
+ if (!priv->first_integr_started)
+ return false;
+
+ if (!priv->first_integr_done) {
+ unsigned long t_ready;
+
+ /*
+ * Data valid after the device entered into integration state,
+ * considering worst case where the device was in sleep state,
+ * and completed the first integration period.
+ */
+ t_ready = priv->integr_started_time_jiffies +
+ usecs_to_jiffies(PAC1921_SLEEP_TO_INT_TIME_US) +
+ usecs_to_jiffies(priv->integr_period_usecs);
+
+ if (time_before(jiffies, t_ready))
+ return false;
+
+ priv->first_integr_done = true;
+ }
+
+ return true;
+}
+
+static inline void pac1921_calc_scale(int dividend, int divisor, int *val,
+ int *val2)
+{
+ s64 tmp;
+
+ tmp = div_s64(dividend * (s64)NANO, divisor);
+ *val = (int)div_s64_rem(tmp, NANO, val2);
+}
+
+/*
+ * Fill the table of scale factors for current
+ * format: IIO_VAL_INT_PLUS_NANO
+ * unit: mA
+ *
+ * Vsense LSB (nV) = max_vsense (nV) * di_gain / resolution
+ * Current scale (mA) = Vsense LSB (nV) / shunt (uOhm)
+ *
+ * Must be called with held lock when updating after first initialization.
+ */
+static void pac1921_calc_current_scales(struct pac1921_priv *priv)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(priv->current_scales); i++) {
+ int max = (PAC1921_MAX_VSENSE_MV * MICRO) >> i;
+ int vsense_lsb = DIV_ROUND_CLOSEST(max, PAC1921_RES_RESOLUTION);
+
+ pac1921_calc_scale(vsense_lsb, (int)priv->rshunt_uohm,
+ &priv->current_scales[i][0],
+ &priv->current_scales[i][1]);
+ }
+}
+
+/*
+ * Check if overflow occurred and if so, push the corresponding events.
+ *
+ * Must be called with lock held.
+ */
+static int pac1921_check_push_overflow(struct iio_dev *indio_dev, s64 timestamp)
+{
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ unsigned int flags;
+ int ret;
+
+ ret = regmap_read(priv->regmap, PAC1921_REG_OVERFLOW_STS, &flags);
+ if (ret)
+ return ret;
+
+ if (flags & PAC1921_OVERFLOW_VBOV &&
+ !(priv->prev_ovf_flags & PAC1921_OVERFLOW_VBOV) &&
+ priv->ovf_enabled_events & PAC1921_OVERFLOW_VBOV) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_VOLTAGE, PAC1921_CHAN_VBUS,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ timestamp);
+ }
+ if (flags & PAC1921_OVERFLOW_VSOV &&
+ !(priv->prev_ovf_flags & PAC1921_OVERFLOW_VSOV) &&
+ priv->ovf_enabled_events & PAC1921_OVERFLOW_VSOV) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_VOLTAGE, PAC1921_CHAN_VSENSE,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ timestamp);
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_CURRENT, PAC1921_CHAN_CURRENT,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ timestamp);
+ }
+ if (flags & PAC1921_OVERFLOW_VPOV &&
+ !(priv->prev_ovf_flags & PAC1921_OVERFLOW_VPOV) &&
+ priv->ovf_enabled_events & PAC1921_OVERFLOW_VPOV) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_POWER, PAC1921_CHAN_POWER,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ timestamp);
+ }
+
+ priv->prev_ovf_flags = (u8)flags;
+
+ return 0;
+}
+
+/*
+ * Read the value from a result register
+ *
+ * Result registers contain the most recent averaged values of Vbus, Vsense and
+ * Vpower. Each value is 10 bits wide and spread across two consecutive 8 bit
+ * registers, with 6 bit LSB zero padding.
+ */
+static int pac1921_read_res(struct pac1921_priv *priv, unsigned long reg,
+ u16 *val)
+{
+ int ret = regmap_bulk_read(priv->regmap, (unsigned int)reg, val,
+ sizeof(*val));
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(PAC1921_RES_MASK, get_unaligned_be16(val));
+
+ return 0;
+}
+
+static int pac1921_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+
+ guard(mutex)(&priv->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW: {
+ s64 ts;
+ u16 res_val;
+ int ret;
+
+ if (!pac1921_data_ready(priv))
+ return -EBUSY;
+
+ ts = iio_get_time_ns(indio_dev);
+
+ ret = pac1921_check_push_overflow(indio_dev, ts);
+ if (ret)
+ return ret;
+
+ ret = pac1921_read_res(priv, chan->address, &res_val);
+ if (ret)
+ return ret;
+
+ *val = (int)res_val;
+
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->channel) {
+ case PAC1921_CHAN_VBUS:
+ *val = pac1921_vbus_scales[priv->dv_gain][0];
+ *val2 = pac1921_vbus_scales[priv->dv_gain][1];
+ return IIO_VAL_INT_PLUS_NANO;
+
+ case PAC1921_CHAN_VSENSE:
+ *val = pac1921_vsense_scales[priv->di_gain][0];
+ *val2 = pac1921_vsense_scales[priv->di_gain][1];
+ return IIO_VAL_INT_PLUS_NANO;
+
+ case PAC1921_CHAN_CURRENT:
+ *val = priv->current_scales[priv->di_gain][0];
+ *val2 = priv->current_scales[priv->di_gain][1];
+ return IIO_VAL_INT_PLUS_NANO;
+
+ case PAC1921_CHAN_POWER: {
+ /*
+ * Power scale factor in mW:
+ * Current scale (mA) * max_vbus (V) / dv_gain
+ */
+
+ /* Get current scale based on di_gain */
+ int *curr_scale = priv->current_scales[priv->di_gain];
+
+ /* Convert current_scale from INT_PLUS_NANO to INT */
+ s64 tmp = curr_scale[0] * (s64)NANO + curr_scale[1];
+
+ /* Multiply by max_vbus (V) / dv_gain */
+ tmp *= PAC1921_MAX_VBUS_V >> (int)priv->dv_gain;
+
+ /* Convert back to INT_PLUS_NANO */
+ *val = (int)div_s64_rem(tmp, NANO, val2);
+
+ return IIO_VAL_INT_PLUS_NANO;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = pac1921_int_num_samples[priv->n_samples];
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ /*
+ * The sampling frequency (Hz) is read-only and corresponds to
+ * how often the device provides integrated measurements into
+ * the result registers, thus it's 1/integration_period.
+ * The integration period depends on the number of integration
+ * samples, measurement resolution and post filters.
+ *
+ * 1/(integr_period_usecs/MICRO) = MICRO/integr_period_usecs
+ */
+ *val = MICRO;
+ *val2 = (int)priv->integr_period_usecs;
+ return IIO_VAL_FRACTIONAL;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pac1921_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *type = IIO_VAL_INT;
+ *vals = pac1921_int_num_samples;
+ *length = ARRAY_SIZE(pac1921_int_num_samples);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Perform configuration update sequence: set the device into read state, then
+ * write the config register and set the device back into integration state.
+ * Also reset integration start time and mark first integration to be yet
+ * completed.
+ *
+ * Must be called with lock held.
+ */
+static int pac1921_update_cfg_reg(struct pac1921_priv *priv, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ /* Enter READ state before configuration */
+ int ret = regmap_update_bits(priv->regmap, PAC1921_REG_INT_CFG,
+ PAC1921_INT_CFG_INTEN, 0);
+ if (ret)
+ return ret;
+
+ /* Update configuration value */
+ ret = regmap_update_bits(priv->regmap, reg, mask, val);
+ if (ret)
+ return ret;
+
+ /* Re-enable integration */
+ ret = regmap_update_bits(priv->regmap, PAC1921_REG_INT_CFG,
+ PAC1921_INT_CFG_INTEN, PAC1921_INT_CFG_INTEN);
+ if (ret)
+ return ret;
+
+ /*
+ * Reset integration started time and mark this integration period as
+ * the first one so that new measurements will be considered as valid
+ * only at the end of this integration period.
+ */
+ priv->integr_started_time_jiffies = jiffies;
+ priv->first_integr_done = false;
+
+ return 0;
+}
+
+/*
+ * Retrieve the index of the given scale (represented by scale_val and
+ * scale_val2) from scales_tbl. The returned index (if found) is the log2 of
+ * the gain corresponding to the given scale.
+ *
+ * Must be called with lock held if the scales_tbl can change runtime (e.g. for
+ * the current scales table)
+ */
+static int pac1921_lookup_scale(const int (*const scales_tbl)[2], size_t size,
+ int scale_val, int scale_val2)
+{
+ for (unsigned int i = 0; i < size; i++)
+ if (scales_tbl[i][0] == scale_val &&
+ scales_tbl[i][1] == scale_val2)
+ return (int)i;
+
+ return -EINVAL;
+}
+
+/*
+ * Configure device with the given gain (only if changed)
+ *
+ * Must be called with lock held.
+ */
+static int pac1921_update_gain(struct pac1921_priv *priv, u8 *priv_val, u8 gain,
+ unsigned int mask)
+{
+ unsigned int reg_val;
+ int ret;
+
+ if (*priv_val == gain)
+ return 0;
+
+ reg_val = (gain << __ffs(mask)) & mask;
+ ret = pac1921_update_cfg_reg(priv, PAC1921_REG_GAIN_CFG, mask, reg_val);
+ if (ret)
+ return ret;
+
+ *priv_val = gain;
+
+ return 0;
+}
+
+/*
+ * Given a scale factor represented by scale_val and scale_val2 with format
+ * IIO_VAL_INT_PLUS_NANO, find the corresponding gain value and write it to the
+ * device.
+ *
+ * Must be called with lock held.
+ */
+static int pac1921_update_gain_from_scale(struct pac1921_priv *priv,
+ struct iio_chan_spec const *chan,
+ int scale_val, int scale_val2)
+{
+ int ret;
+
+ switch (chan->channel) {
+ case PAC1921_CHAN_VBUS:
+ ret = pac1921_lookup_scale(pac1921_vbus_scales,
+ ARRAY_SIZE(pac1921_vbus_scales),
+ scale_val, scale_val2);
+ if (ret < 0)
+ return ret;
+
+ return pac1921_update_gain(priv, &priv->dv_gain, (u8)ret,
+ PAC1921_GAIN_DV_GAIN_MASK);
+ case PAC1921_CHAN_VSENSE:
+ ret = pac1921_lookup_scale(pac1921_vsense_scales,
+ ARRAY_SIZE(pac1921_vsense_scales),
+ scale_val, scale_val2);
+ if (ret < 0)
+ return ret;
+
+ return pac1921_update_gain(priv, &priv->di_gain, (u8)ret,
+ PAC1921_GAIN_DI_GAIN_MASK);
+ case PAC1921_CHAN_CURRENT:
+ ret = pac1921_lookup_scale(priv->current_scales,
+ ARRAY_SIZE(priv->current_scales),
+ scale_val, scale_val2);
+ if (ret < 0)
+ return ret;
+
+ return pac1921_update_gain(priv, &priv->di_gain, (u8)ret,
+ PAC1921_GAIN_DI_GAIN_MASK);
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Retrieve the index of the given number of samples from the constant table.
+ * The returned index (if found) is the log2 of the given num_samples.
+ */
+static int pac1921_lookup_int_num_samples(int num_samples)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(pac1921_int_num_samples); i++)
+ if (pac1921_int_num_samples[i] == num_samples)
+ return (int)i;
+
+ return -EINVAL;
+}
+
+/*
+ * Update the device with the given number of integration samples.
+ *
+ * Must be called with lock held.
+ */
+static int pac1921_update_int_num_samples(struct pac1921_priv *priv,
+ int num_samples)
+{
+ unsigned int reg_val;
+ u8 n_samples;
+ int ret;
+
+ ret = pac1921_lookup_int_num_samples(num_samples);
+ if (ret < 0)
+ return ret;
+
+ n_samples = (u8)ret;
+
+ if (priv->n_samples == n_samples)
+ return 0;
+
+ reg_val = FIELD_PREP(PAC1921_INT_CFG_SMPL_MASK, n_samples);
+
+ ret = pac1921_update_cfg_reg(priv, PAC1921_REG_INT_CFG,
+ PAC1921_INT_CFG_SMPL_MASK, reg_val);
+ if (ret)
+ return ret;
+
+ priv->n_samples = n_samples;
+
+ priv->integr_period_usecs = pac1921_int_periods_usecs[priv->n_samples];
+
+ return 0;
+}
+
+static int pac1921_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pac1921_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+
+ guard(mutex)(&priv->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return pac1921_update_gain_from_scale(priv, chan, val, val2);
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return pac1921_update_int_num_samples(priv, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pac1921_read_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, char *label)
+{
+ switch (chan->channel) {
+ case PAC1921_CHAN_VBUS:
+ return sprintf(label, "vbus\n");
+ case PAC1921_CHAN_VSENSE:
+ return sprintf(label, "vsense\n");
+ case PAC1921_CHAN_CURRENT:
+ return sprintf(label, "current\n");
+ case PAC1921_CHAN_POWER:
+ return sprintf(label, "power\n");
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pac1921_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+
+ guard(mutex)(&priv->lock);
+
+ switch (chan->channel) {
+ case PAC1921_CHAN_VBUS:
+ return !!(priv->ovf_enabled_events & PAC1921_OVERFLOW_VBOV);
+ case PAC1921_CHAN_VSENSE:
+ case PAC1921_CHAN_CURRENT:
+ return !!(priv->ovf_enabled_events & PAC1921_OVERFLOW_VSOV);
+ case PAC1921_CHAN_POWER:
+ return !!(priv->ovf_enabled_events & PAC1921_OVERFLOW_VPOV);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pac1921_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ u8 ovf_bit;
+
+ guard(mutex)(&priv->lock);
+
+ switch (chan->channel) {
+ case PAC1921_CHAN_VBUS:
+ ovf_bit = PAC1921_OVERFLOW_VBOV;
+ break;
+ case PAC1921_CHAN_VSENSE:
+ case PAC1921_CHAN_CURRENT:
+ ovf_bit = PAC1921_OVERFLOW_VSOV;
+ break;
+ case PAC1921_CHAN_POWER:
+ ovf_bit = PAC1921_OVERFLOW_VPOV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (state)
+ priv->ovf_enabled_events |= ovf_bit;
+ else
+ priv->ovf_enabled_events &= ~ovf_bit;
+
+ return 0;
+}
+
+static int pac1921_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val,
+ int *val2)
+{
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = PAC1921_RES_RESOLUTION;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info pac1921_iio = {
+ .read_raw = pac1921_read_raw,
+ .read_avail = pac1921_read_avail,
+ .write_raw = pac1921_write_raw,
+ .write_raw_get_fmt = pac1921_write_raw_get_fmt,
+ .read_label = pac1921_read_label,
+ .read_event_config = pac1921_read_event_config,
+ .write_event_config = pac1921_write_event_config,
+ .read_event_value = pac1921_read_event_value,
+};
+
+static ssize_t pac1921_read_shunt_resistor(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ int vals[2];
+
+ if (chan->channel != PAC1921_CHAN_CURRENT)
+ return -EINVAL;
+
+ guard(mutex)(&priv->lock);
+
+ vals[0] = (int)priv->rshunt_uohm;
+ vals[1] = MICRO;
+
+ return iio_format_value(buf, IIO_VAL_FRACTIONAL, 1, vals);
+}
+
+static ssize_t pac1921_write_shunt_resistor(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ u64 rshunt_uohm;
+ int val, val_fract;
+ int ret;
+
+ if (chan->channel != PAC1921_CHAN_CURRENT)
+ return -EINVAL;
+
+ ret = iio_str_to_fixpoint(buf, 100000, &val, &val_fract);
+ if (ret)
+ return ret;
+
+ rshunt_uohm = (u32)val * MICRO + (u32)val_fract;
+ if (rshunt_uohm == 0 || rshunt_uohm > INT_MAX)
+ return -EINVAL;
+
+ guard(mutex)(&priv->lock);
+
+ priv->rshunt_uohm = (u32)rshunt_uohm;
+
+ pac1921_calc_current_scales(priv);
+
+ return len;
+}
+
+/*
+ * Emit on sysfs the list of available scales contained in scales_tbl
+ *
+ * TODO:: this function can be replaced with iio_format_avail_list() if the
+ * latter will ever be exported.
+ *
+ * Must be called with lock held if the scales_tbl can change runtime (e.g. for
+ * the current scales table)
+ */
+static ssize_t pac1921_format_scale_avail(const int (*const scales_tbl)[2],
+ size_t size, char *buf)
+{
+ ssize_t len = 0;
+
+ for (unsigned int i = 0; i < size; i++) {
+ if (i != 0) {
+ len += sysfs_emit_at(buf, len, " ");
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ }
+ len += sysfs_emit_at(buf, len, "%d.%09d", scales_tbl[i][0],
+ scales_tbl[i][1]);
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ }
+
+ len += sysfs_emit_at(buf, len, "\n");
+ return len;
+}
+
+/*
+ * Read available scales for a specific channel
+ *
+ * NOTE: using extended info insted of iio.read_avail() because access to
+ * current scales must be locked as they depend on shunt resistor which may
+ * change runtime. Caller of iio.read_avail() would access the table unlocked
+ * instead.
+ */
+static ssize_t pac1921_read_scale_avail(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ const int (*scales_tbl)[2];
+ size_t size;
+
+ switch (chan->channel) {
+ case PAC1921_CHAN_VBUS:
+ scales_tbl = pac1921_vbus_scales;
+ size = ARRAY_SIZE(pac1921_vbus_scales);
+ return pac1921_format_scale_avail(scales_tbl, size, buf);
+
+ case PAC1921_CHAN_VSENSE:
+ scales_tbl = pac1921_vsense_scales;
+ size = ARRAY_SIZE(pac1921_vsense_scales);
+ return pac1921_format_scale_avail(scales_tbl, size, buf);
+
+ case PAC1921_CHAN_CURRENT: {
+ guard(mutex)(&priv->lock);
+ scales_tbl = priv->current_scales;
+ size = ARRAY_SIZE(priv->current_scales);
+ return pac1921_format_scale_avail(scales_tbl, size, buf);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+#define PAC1921_EXT_INFO_SCALE_AVAIL { \
+ .name = "scale_available", \
+ .read = pac1921_read_scale_avail, \
+ .shared = IIO_SEPARATE, \
+}
+
+static const struct iio_chan_spec_ext_info pac1921_ext_info_voltage[] = {
+ PAC1921_EXT_INFO_SCALE_AVAIL,
+ {}
+};
+
+static const struct iio_chan_spec_ext_info pac1921_ext_info_current[] = {
+ PAC1921_EXT_INFO_SCALE_AVAIL,
+ {
+ .name = "shunt_resistor",
+ .read = pac1921_read_shunt_resistor,
+ .write = pac1921_write_shunt_resistor,
+ .shared = IIO_SEPARATE,
+ },
+ {}
+};
+
+static const struct iio_event_spec pac1921_overflow_event[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_shared_by_all = BIT(IIO_EV_INFO_VALUE),
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec pac1921_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .channel = PAC1921_CHAN_VBUS,
+ .address = PAC1921_REG_VBUS,
+ .scan_index = PAC1921_CHAN_VBUS,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 10,
+ .storagebits = 16,
+ .endianness = IIO_CPU
+ },
+ .indexed = 1,
+ .event_spec = pac1921_overflow_event,
+ .num_event_specs = ARRAY_SIZE(pac1921_overflow_event),
+ .ext_info = pac1921_ext_info_voltage,
+ },
+ {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .channel = PAC1921_CHAN_VSENSE,
+ .address = PAC1921_REG_VSENSE,
+ .scan_index = PAC1921_CHAN_VSENSE,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 10,
+ .storagebits = 16,
+ .endianness = IIO_CPU
+ },
+ .indexed = 1,
+ .event_spec = pac1921_overflow_event,
+ .num_event_specs = ARRAY_SIZE(pac1921_overflow_event),
+ .ext_info = pac1921_ext_info_voltage,
+ },
+ {
+ .type = IIO_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .channel = PAC1921_CHAN_CURRENT,
+ .address = PAC1921_REG_VSENSE,
+ .scan_index = PAC1921_CHAN_CURRENT,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 10,
+ .storagebits = 16,
+ .endianness = IIO_CPU
+ },
+ .event_spec = pac1921_overflow_event,
+ .num_event_specs = ARRAY_SIZE(pac1921_overflow_event),
+ .ext_info = pac1921_ext_info_current,
+ },
+ {
+ .type = IIO_POWER,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .channel = PAC1921_CHAN_POWER,
+ .address = PAC1921_REG_VPOWER,
+ .scan_index = PAC1921_CHAN_POWER,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 10,
+ .storagebits = 16,
+ .endianness = IIO_CPU
+ },
+ .event_spec = pac1921_overflow_event,
+ .num_event_specs = ARRAY_SIZE(pac1921_overflow_event),
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(PAC1921_NUM_MEAS_CHANS),
+};
+
+static irqreturn_t pac1921_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *idev = pf->indio_dev;
+ struct pac1921_priv *priv = iio_priv(idev);
+ int ret;
+ int bit;
+ int ch = 0;
+
+ guard(mutex)(&priv->lock);
+
+ if (!pac1921_data_ready(priv))
+ goto done;
+
+ ret = pac1921_check_push_overflow(idev, pf->timestamp);
+ if (ret)
+ goto done;
+
+ iio_for_each_active_channel(idev, bit) {
+ u16 val;
+
+ ret = pac1921_read_res(priv, idev->channels[ch].address, &val);
+ if (ret)
+ goto done;
+
+ priv->scan.chan[ch++] = val;
+ }
+
+ iio_push_to_buffers_with_timestamp(idev, &priv->scan, pf->timestamp);
+
+done:
+ iio_trigger_notify_done(idev->trig);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Initialize device by writing initial configuration and putting it into
+ * integration state.
+ *
+ * Must be called with lock held when called after first initialization
+ * (e.g. from pm resume)
+ */
+static int pac1921_init(struct pac1921_priv *priv)
+{
+ unsigned int val;
+ int ret;
+
+ /* Enter READ state before configuration */
+ ret = regmap_update_bits(priv->regmap, PAC1921_REG_INT_CFG,
+ PAC1921_INT_CFG_INTEN, 0);
+ if (ret)
+ return ret;
+
+ /* Configure gains, use 14-bits measurement resolution (HW default) */
+ val = FIELD_PREP(PAC1921_GAIN_DI_GAIN_MASK, priv->di_gain) |
+ FIELD_PREP(PAC1921_GAIN_DV_GAIN_MASK, priv->dv_gain);
+ ret = regmap_write(priv->regmap, PAC1921_REG_GAIN_CFG, val);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure integration:
+ * - num of integration samples
+ * - filters enabled (HW default)
+ * - set READ/INT pin override (RIOV) to control operation mode via
+ * register instead of pin
+ */
+ val = FIELD_PREP(PAC1921_INT_CFG_SMPL_MASK, priv->n_samples) |
+ PAC1921_INT_CFG_VSFEN | PAC1921_INT_CFG_VBFEN |
+ PAC1921_INT_CFG_RIOV;
+ ret = regmap_write(priv->regmap, PAC1921_REG_INT_CFG, val);
+ if (ret)
+ return ret;
+
+ /*
+ * Init control register:
+ * - VPower free run integration mode
+ * - OUT pin full scale range: 3V (HW detault)
+ * - no timeout, no sleep, no sleep override, no recalc (HW defaults)
+ */
+ val = FIELD_PREP(PAC1921_CONTROL_MXSL_MASK,
+ PAC1921_MXSL_VPOWER_FREE_RUN);
+ ret = regmap_write(priv->regmap, PAC1921_REG_CONTROL, val);
+ if (ret)
+ return ret;
+
+ /* Enable integration */
+ ret = regmap_update_bits(priv->regmap, PAC1921_REG_INT_CFG,
+ PAC1921_INT_CFG_INTEN, PAC1921_INT_CFG_INTEN);
+ if (ret)
+ return ret;
+
+ priv->first_integr_started = true;
+ priv->integr_started_time_jiffies = jiffies;
+ priv->integr_period_usecs = pac1921_int_periods_usecs[priv->n_samples];
+
+ return 0;
+}
+
+static int pac1921_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ int ret;
+
+ guard(mutex)(&priv->lock);
+
+ priv->first_integr_started = false;
+ priv->first_integr_done = false;
+
+ ret = regmap_update_bits(priv->regmap, PAC1921_REG_INT_CFG,
+ PAC1921_INT_CFG_INTEN, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(priv->regmap, PAC1921_REG_CONTROL,
+ PAC1921_CONTROL_SLEEP, PAC1921_CONTROL_SLEEP);
+ if (ret)
+ return ret;
+
+ return regulator_disable(priv->vdd);
+
+}
+
+static int pac1921_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ int ret;
+
+ guard(mutex)(&priv->lock);
+
+ ret = regulator_enable(priv->vdd);
+ if (ret)
+ return ret;
+
+ msleep(PAC1921_POWERUP_TIME_MS);
+
+ return pac1921_init(priv);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(pac1921_pm_ops, pac1921_suspend,
+ pac1921_resume);
+
+static void pac1921_regulator_disable(void *data)
+{
+ struct regulator *regulator = data;
+
+ regulator_disable(regulator);
+}
+
+static int pac1921_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct pac1921_priv *priv;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(indio_dev);
+ priv->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ priv->regmap = devm_regmap_init_i2c(client, &pac1921_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return dev_err_probe(dev, (int)PTR_ERR(priv->regmap),
+ "Cannot initialize register map\n");
+
+ devm_mutex_init(dev, &priv->lock);
+
+ priv->dv_gain = PAC1921_DEFAULT_DV_GAIN;
+ priv->di_gain = PAC1921_DEFAULT_DI_GAIN;
+ priv->n_samples = PAC1921_DEFAULT_NUM_SAMPLES;
+
+ ret = device_property_read_u32(dev, "shunt-resistor-micro-ohms",
+ &priv->rshunt_uohm);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Cannot read shunt resistor property\n");
+ if (priv->rshunt_uohm == 0 || priv->rshunt_uohm > INT_MAX)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid shunt resistor: %u\n",
+ priv->rshunt_uohm);
+
+ pac1921_calc_current_scales(priv);
+
+ priv->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(priv->vdd))
+ return dev_err_probe(dev, (int)PTR_ERR(priv->vdd),
+ "Cannot get vdd regulator\n");
+
+ ret = regulator_enable(priv->vdd);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot enable vdd regulator\n");
+
+ ret = devm_add_action_or_reset(dev, pac1921_regulator_disable,
+ priv->vdd);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Cannot add action for vdd regulator disposal\n");
+
+ msleep(PAC1921_POWERUP_TIME_MS);
+
+ ret = pac1921_init(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot initialize device\n");
+
+ priv->iio_info = pac1921_iio;
+
+ indio_dev->name = "pac1921";
+ indio_dev->info = &priv->iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = pac1921_channels;
+ indio_dev->num_channels = ARRAY_SIZE(pac1921_channels);
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &pac1921_trigger_handler, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Cannot setup IIO triggered buffer\n");
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot register IIO device\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id pac1921_id[] = {
+ { .name = "pac1921", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pac1921_id);
+
+static const struct of_device_id pac1921_of_match[] = {
+ { .compatible = "microchip,pac1921" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pac1921_of_match);
+
+static struct i2c_driver pac1921_driver = {
+ .driver = {
+ .name = "pac1921",
+ .pm = pm_sleep_ptr(&pac1921_pm_ops),
+ .of_match_table = pac1921_of_match,
+ },
+ .probe = pac1921_probe,
+ .id_table = pac1921_id,
+};
+
+module_i2c_driver(pac1921_driver);
+
+MODULE_AUTHOR("Matteo Martelli <matteomartelli3@gmail.com>");
+MODULE_DESCRIPTION("IIO driver for PAC1921 High-Side Power/Current Monitor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c
index ae24a27805ab..8210728034d0 100644
--- a/drivers/iio/adc/pac1934.c
+++ b/drivers/iio/adc/pac1934.c
@@ -1571,7 +1571,7 @@ static const struct i2c_device_id pac1934_id[] = {
{ .name = "pac1932", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1932] },
{ .name = "pac1933", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1933] },
{ .name = "pac1934", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1934] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, pac1934_id);
@@ -1592,7 +1592,7 @@ static const struct of_device_id pac1934_of_match[] = {
.compatible = "microchip,pac1934",
.data = &pac1934_chip_config[PAC1934]
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, pac1934_of_match);
@@ -1602,7 +1602,7 @@ MODULE_DEVICE_TABLE(of, pac1934_of_match);
*/
static const struct acpi_device_id pac1934_acpi_match[] = {
{ "MCHP1930", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1934] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(acpi, pac1934_acpi_match);
diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
index c9d2c66434e4..9e1112f5acc6 100644
--- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c
+++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
@@ -1006,7 +1006,7 @@ static const struct of_device_id pm8xxx_xoadc_id_table[] = {
.compatible = "qcom,pm8921-adc",
.data = &pm8921_variant,
},
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, pm8xxx_xoadc_id_table);
diff --git a/drivers/iio/adc/qcom-spmi-rradc.c b/drivers/iio/adc/qcom-spmi-rradc.c
index 1402df68dd52..6aa70b4629a7 100644
--- a/drivers/iio/adc/qcom-spmi-rradc.c
+++ b/drivers/iio/adc/qcom-spmi-rradc.c
@@ -1002,7 +1002,7 @@ static int rradc_probe(struct platform_device *pdev)
static const struct of_device_id rradc_match_table[] = {
{ .compatible = "qcom,pm660-rradc" },
{ .compatible = "qcom,pmi8998-rradc" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, rradc_match_table);
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index bbe954a738c7..240cfa391674 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -331,7 +331,7 @@ static const struct of_device_id rockchip_saradc_match[] = {
.compatible = "rockchip,rk3588-saradc",
.data = &rk3588_saradc_data,
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, rockchip_saradc_match);
@@ -370,7 +370,7 @@ static irqreturn_t rockchip_saradc_trigger_handler(int irq, void *p)
mutex_lock(&info->lock);
- for_each_set_bit(i, i_dev->active_scan_mask, i_dev->masklength) {
+ iio_for_each_active_channel(i_dev, i) {
const struct iio_chan_spec *chan = &i_dev->channels[i];
ret = rockchip_saradc_conversion(info, chan);
diff --git a/drivers/iio/adc/rtq6056.c b/drivers/iio/adc/rtq6056.c
index bcb129840908..56ed948a8ae1 100644
--- a/drivers/iio/adc/rtq6056.c
+++ b/drivers/iio/adc/rtq6056.c
@@ -643,7 +643,7 @@ static irqreturn_t rtq6056_buffer_trigger_handler(int irq, void *p)
pm_runtime_get_sync(dev);
- for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
unsigned int addr = rtq6056_channels[bit].address;
ret = regmap_read(priv->regmap, addr, &raw);
@@ -865,7 +865,7 @@ static const struct richtek_dev_data rtq6059_devdata = {
static const struct of_device_id rtq6056_device_match[] = {
{ .compatible = "richtek,rtq6056", .data = &rtq6056_devdata },
{ .compatible = "richtek,rtq6059", .data = &rtq6059_devdata },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, rtq6056_device_match);
diff --git a/drivers/iio/adc/sd_adc_modulator.c b/drivers/iio/adc/sd_adc_modulator.c
index 327cc2097f6c..654b6a38b650 100644
--- a/drivers/iio/adc/sd_adc_modulator.c
+++ b/drivers/iio/adc/sd_adc_modulator.c
@@ -6,11 +6,14 @@
* Author: Arnaud Pouliquen <arnaud.pouliquen@st.com>.
*/
+#include <linux/iio/backend.h>
#include <linux/iio/iio.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
static const struct iio_info iio_sd_mod_iio_info;
@@ -24,7 +27,59 @@ static const struct iio_chan_spec iio_sd_mod_ch = {
},
};
-static int iio_sd_mod_probe(struct platform_device *pdev)
+struct iio_sd_backend_priv {
+ struct regulator *vref;
+ int vref_mv;
+};
+
+static int iio_sd_mod_enable(struct iio_backend *backend)
+{
+ struct iio_sd_backend_priv *priv = iio_backend_get_priv(backend);
+
+ if (priv->vref)
+ return regulator_enable(priv->vref);
+
+ return 0;
+};
+
+static void iio_sd_mod_disable(struct iio_backend *backend)
+{
+ struct iio_sd_backend_priv *priv = iio_backend_get_priv(backend);
+
+ if (priv->vref)
+ regulator_disable(priv->vref);
+};
+
+static int iio_sd_mod_read(struct iio_backend *backend, struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct iio_sd_backend_priv *priv = iio_backend_get_priv(backend);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *val = priv->vref_mv;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 0;
+ return IIO_VAL_INT;
+ }
+
+ return -EOPNOTSUPP;
+};
+
+static const struct iio_backend_ops sd_backend_ops = {
+ .enable = iio_sd_mod_enable,
+ .disable = iio_sd_mod_disable,
+ .read_raw = iio_sd_mod_read,
+};
+
+static const struct iio_backend_info sd_backend_info = {
+ .name = "sd-modulator",
+ .ops = &sd_backend_ops,
+};
+
+static int iio_sd_mod_register(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct iio_dev *iio;
@@ -45,6 +100,45 @@ static int iio_sd_mod_probe(struct platform_device *pdev)
return devm_iio_device_register(&pdev->dev, iio);
}
+static int iio_sd_mod_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regulator *vref;
+ struct iio_sd_backend_priv *priv;
+ int ret;
+
+ /* If sd modulator is not defined as an IIO backend device, fallback to legacy */
+ if (!device_property_present(dev, "#io-backend-cells"))
+ return iio_sd_mod_register(pdev);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * Get regulator reference if any, but don't enable regulator right now.
+ * Rely on enable and disable callbacks to manage regulator power.
+ */
+ vref = devm_regulator_get_optional(dev, "vref");
+ if (IS_ERR(vref)) {
+ if (PTR_ERR(vref) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(vref), "Failed to get vref\n");
+ } else {
+ /*
+ * Retrieve voltage right now, as regulator_get_voltage() provides it whatever
+ * the state of the regulator.
+ */
+ ret = regulator_get_voltage(vref);
+ if (ret < 0)
+ return ret;
+
+ priv->vref = vref;
+ priv->vref_mv = ret / 1000;
+ }
+
+ return devm_iio_backend_register(&pdev->dev, &sd_backend_info, priv);
+};
+
static const struct of_device_id sd_adc_of_match[] = {
{ .compatible = "sd-modulator" },
{ .compatible = "ads1201" },
@@ -65,3 +159,4 @@ module_platform_driver(iio_sd_mod_adc);
MODULE_DESCRIPTION("Basic sigma delta modulator");
MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_BACKEND);
diff --git a/drivers/iio/adc/sophgo-cv1800b-adc.c b/drivers/iio/adc/sophgo-cv1800b-adc.c
new file mode 100644
index 000000000000..0951deb7b111
--- /dev/null
+++ b/drivers/iio/adc/sophgo-cv1800b-adc.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Sophgo CV1800B SARADC Driver
+ *
+ * Copyright (C) Bootlin 2024
+ * Author: Thomas Bonnefille <thomas.bonnefille@bootlin.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <linux/iio/iio.h>
+
+#define CV1800B_ADC_CTRL_REG 0x04
+#define CV1800B_ADC_EN BIT(0)
+#define CV1800B_ADC_SEL(x) BIT((x) + 5)
+#define CV1800B_ADC_STATUS_REG 0x08
+#define CV1800B_ADC_BUSY BIT(0)
+#define CV1800B_ADC_CYC_SET_REG 0x0C
+#define CV1800B_MASK_STARTUP_CYCLE GENMASK(4, 0)
+#define CV1800B_MASK_SAMPLE_WINDOW GENMASK(11, 8)
+#define CV1800B_MASK_CLKDIV GENMASK(15, 12)
+#define CV1800B_MASK_COMPARE_CYCLE GENMASK(19, 16)
+#define CV1800B_ADC_CH_RESULT_REG(x) (0x14 + 4 * (x))
+#define CV1800B_ADC_CH_RESULT GENMASK(11, 0)
+#define CV1800B_ADC_CH_VALID BIT(15)
+#define CV1800B_ADC_INTR_EN_REG 0x20
+#define CV1800B_ADC_INTR_CLR_REG 0x24
+#define CV1800B_ADC_INTR_CLR_BIT BIT(0)
+#define CV1800B_ADC_INTR_STA_REG 0x28
+#define CV1800B_ADC_INTR_STA_BIT BIT(0)
+#define CV1800B_READ_TIMEOUT_MS 1000
+#define CV1800B_READ_TIMEOUT_US (CV1800B_READ_TIMEOUT_MS * 1000)
+
+#define CV1800B_ADC_CHANNEL(index) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .scan_index = index, \
+ }
+
+struct cv1800b_adc {
+ struct completion completion;
+ void __iomem *regs;
+ struct mutex lock; /* ADC Control and Result register */
+ struct clk *clk;
+ int irq;
+};
+
+static const struct iio_chan_spec sophgo_channels[] = {
+ CV1800B_ADC_CHANNEL(0),
+ CV1800B_ADC_CHANNEL(1),
+ CV1800B_ADC_CHANNEL(2),
+};
+
+static void cv1800b_adc_start_measurement(struct cv1800b_adc *saradc,
+ int channel)
+{
+ writel(0, saradc->regs + CV1800B_ADC_CTRL_REG);
+ writel(CV1800B_ADC_SEL(channel) | CV1800B_ADC_EN,
+ saradc->regs + CV1800B_ADC_CTRL_REG);
+}
+
+static int cv1800b_adc_wait(struct cv1800b_adc *saradc)
+{
+ if (saradc->irq < 0) {
+ u32 reg;
+
+ return readl_poll_timeout(saradc->regs + CV1800B_ADC_STATUS_REG,
+ reg, !(reg & CV1800B_ADC_BUSY),
+ 500, CV1800B_READ_TIMEOUT_US);
+ }
+
+ return wait_for_completion_timeout(&saradc->completion,
+ msecs_to_jiffies(CV1800B_READ_TIMEOUT_MS)) > 0 ?
+ 0 : -ETIMEDOUT;
+}
+
+static int cv1800b_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct cv1800b_adc *saradc = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW: {
+ u32 sample;
+
+ scoped_guard(mutex, &saradc->lock) {
+ int ret;
+
+ cv1800b_adc_start_measurement(saradc, chan->scan_index);
+ ret = cv1800b_adc_wait(saradc);
+ if (ret < 0)
+ return ret;
+
+ sample = readl(saradc->regs + CV1800B_ADC_CH_RESULT_REG(chan->scan_index));
+ }
+ if (!(sample & CV1800B_ADC_CH_VALID))
+ return -ENODATA;
+
+ *val = sample & CV1800B_ADC_CH_RESULT;
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ *val = 3300;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ u32 status_reg = readl(saradc->regs + CV1800B_ADC_CYC_SET_REG);
+ unsigned int clk_div = (1 + FIELD_GET(CV1800B_MASK_CLKDIV, status_reg));
+ unsigned int freq = clk_get_rate(saradc->clk) / clk_div;
+ unsigned int nb_startup_cycle = 1 + FIELD_GET(CV1800B_MASK_STARTUP_CYCLE, status_reg);
+ unsigned int nb_sample_cycle = 1 + FIELD_GET(CV1800B_MASK_SAMPLE_WINDOW, status_reg);
+ unsigned int nb_compare_cycle = 1 + FIELD_GET(CV1800B_MASK_COMPARE_CYCLE, status_reg);
+
+ *val = freq / (nb_startup_cycle + nb_sample_cycle + nb_compare_cycle);
+ return IIO_VAL_INT;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t cv1800b_adc_interrupt_handler(int irq, void *private)
+{
+ struct cv1800b_adc *saradc = private;
+ u32 reg = readl(saradc->regs + CV1800B_ADC_INTR_STA_REG);
+
+ if (!(FIELD_GET(CV1800B_ADC_INTR_STA_BIT, reg)))
+ return IRQ_NONE;
+
+ writel(CV1800B_ADC_INTR_CLR_BIT, saradc->regs + CV1800B_ADC_INTR_CLR_REG);
+ complete(&saradc->completion);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_info cv1800b_adc_info = {
+ .read_raw = &cv1800b_adc_read_raw,
+};
+
+static int cv1800b_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cv1800b_adc *saradc;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*saradc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ saradc = iio_priv(indio_dev);
+ indio_dev->name = "sophgo-cv1800b-adc";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &cv1800b_adc_info;
+ indio_dev->num_channels = ARRAY_SIZE(sophgo_channels);
+ indio_dev->channels = sophgo_channels;
+
+ saradc->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(saradc->clk))
+ return PTR_ERR(saradc->clk);
+
+ saradc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(saradc->regs))
+ return PTR_ERR(saradc->regs);
+
+ saradc->irq = platform_get_irq_optional(pdev, 0);
+ if (saradc->irq > 0) {
+ init_completion(&saradc->completion);
+ ret = devm_request_irq(dev, saradc->irq,
+ cv1800b_adc_interrupt_handler, 0,
+ dev_name(dev), saradc);
+ if (ret)
+ return ret;
+
+ writel(1, saradc->regs + CV1800B_ADC_INTR_EN_REG);
+ }
+
+ ret = devm_mutex_init(dev, &saradc->lock);
+ if (ret)
+ return ret;
+
+ writel(FIELD_PREP(CV1800B_MASK_STARTUP_CYCLE, 15) |
+ FIELD_PREP(CV1800B_MASK_SAMPLE_WINDOW, 15) |
+ FIELD_PREP(CV1800B_MASK_CLKDIV, 1) |
+ FIELD_PREP(CV1800B_MASK_COMPARE_CYCLE, 15),
+ saradc->regs + CV1800B_ADC_CYC_SET_REG);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id cv1800b_adc_match[] = {
+ { .compatible = "sophgo,cv1800b-saradc", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cv1800b_adc_match);
+
+static struct platform_driver cv1800b_adc_driver = {
+ .driver = {
+ .name = "sophgo-cv1800b-saradc",
+ .of_match_table = cv1800b_adc_match,
+ },
+ .probe = cv1800b_adc_probe,
+};
+module_platform_driver(cv1800b_adc_driver);
+
+MODULE_AUTHOR("Thomas Bonnefille <thomas.bonnefille@bootlin.com>");
+MODULE_DESCRIPTION("Sophgo CV1800B SARADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 375aa7720f80..32ca26ed59f7 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1261,7 +1261,7 @@ static int stm32_adc_conf_scan_seq(struct iio_dev *indio_dev,
stm32_adc_writel(adc, adc->cfg->regs->smpr[0], adc->smpr_val[0]);
stm32_adc_writel(adc, adc->cfg->regs->smpr[1], adc->smpr_val[1]);
- for_each_set_bit(bit, scan_mask, indio_dev->masklength) {
+ for_each_set_bit(bit, scan_mask, iio_get_masklength(indio_dev)) {
chan = indio_dev->channels + bit;
/*
* Assign one channel per SQ entry in regular
@@ -1619,7 +1619,7 @@ static int stm32_adc_update_scan_mode(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- adc->num_conv = bitmap_weight(scan_mask, indio_dev->masklength);
+ adc->num_conv = bitmap_weight(scan_mask, iio_get_masklength(indio_dev));
ret = stm32_adc_conf_scan_seq(indio_dev, scan_mask);
pm_runtime_mark_last_busy(dev);
@@ -2638,7 +2638,7 @@ static const struct of_device_id stm32_adc_of_match[] = {
{ .compatible = "st,stm32h7-adc", .data = (void *)&stm32h7_adc_cfg },
{ .compatible = "st,stm32mp1-adc", .data = (void *)&stm32mp1_adc_cfg },
{ .compatible = "st,stm32mp13-adc", .data = (void *)&stm32mp13_adc_cfg },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index fabd654245f5..2037f73426d4 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -9,6 +9,7 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/iio/adc/stm32-dfsdm-adc.h>
+#include <linux/iio/backend.h>
#include <linux/iio/buffer.h>
#include <linux/iio/hw-consumer.h>
#include <linux/iio/sysfs.h>
@@ -78,6 +79,7 @@ struct stm32_dfsdm_adc {
/* ADC specific */
unsigned int oversamp;
struct iio_hw_consumer *hwc;
+ struct iio_backend **backend;
struct completion completion;
u32 *buffer;
@@ -666,6 +668,74 @@ static int stm32_dfsdm_channel_parse_of(struct stm32_dfsdm *dfsdm,
return 0;
}
+static int stm32_dfsdm_generic_channel_parse_of(struct stm32_dfsdm *dfsdm,
+ struct iio_dev *indio_dev,
+ struct iio_chan_spec *ch,
+ struct fwnode_handle *node)
+{
+ struct stm32_dfsdm_channel *df_ch;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct iio_backend *backend;
+ const char *of_str;
+ int ret, val;
+
+ ret = fwnode_property_read_u32(node, "reg", &ch->channel);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Missing channel index %d\n", ret);
+ return ret;
+ }
+
+ if (ch->channel >= dfsdm->num_chs) {
+ dev_err(&indio_dev->dev, " Error bad channel number %d (max = %d)\n",
+ ch->channel, dfsdm->num_chs);
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_string(node, "label", &ch->datasheet_name);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ " Error parsing 'label' for idx %d\n", ch->channel);
+ return ret;
+ }
+
+ df_ch = &dfsdm->ch_list[ch->channel];
+ df_ch->id = ch->channel;
+
+ ret = fwnode_property_read_string(node, "st,adc-channel-type", &of_str);
+ if (!ret) {
+ val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_type);
+ if (val < 0)
+ return val;
+ } else {
+ val = 0;
+ }
+ df_ch->type = val;
+
+ ret = fwnode_property_read_string(node, "st,adc-channel-clk-src", &of_str);
+ if (!ret) {
+ val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_src);
+ if (val < 0)
+ return val;
+ } else {
+ val = 0;
+ }
+ df_ch->src = val;
+
+ ret = fwnode_property_read_u32(node, "st,adc-alt-channel", &df_ch->alt_si);
+ if (ret != -EINVAL)
+ df_ch->alt_si = 0;
+
+ if (adc->dev_data->type == DFSDM_IIO) {
+ backend = devm_iio_backend_fwnode_get(&indio_dev->dev, NULL, node);
+ if (IS_ERR(backend))
+ return dev_err_probe(&indio_dev->dev, PTR_ERR(backend),
+ "Failed to get backend\n");
+ adc->backend[ch->scan_index] = backend;
+ }
+
+ return 0;
+}
+
static ssize_t dfsdm_adc_audio_get_spiclk(struct iio_dev *indio_dev,
uintptr_t priv,
const struct iio_chan_spec *chan,
@@ -987,7 +1057,7 @@ static int stm32_dfsdm_update_scan_mode(struct iio_dev *indio_dev,
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
- adc->nconv = bitmap_weight(scan_mask, indio_dev->masklength);
+ adc->nconv = bitmap_weight(scan_mask, iio_get_masklength(indio_dev));
adc->smask = *scan_mask;
dev_dbg(&indio_dev->dev, "nconv=%d mask=%lx\n", adc->nconv, *scan_mask);
@@ -998,6 +1068,7 @@ static int stm32_dfsdm_update_scan_mode(struct iio_dev *indio_dev,
static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int i = 0;
int ret;
/* Reset adc buffer index */
@@ -1009,6 +1080,15 @@ static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
return ret;
}
+ if (adc->backend) {
+ while (adc->backend[i]) {
+ ret = iio_backend_enable(adc->backend[i]);
+ if (ret < 0)
+ return ret;
+ i++;
+ }
+ }
+
ret = stm32_dfsdm_start_dfsdm(adc->dfsdm);
if (ret < 0)
goto err_stop_hwc;
@@ -1041,6 +1121,7 @@ err_stop_hwc:
static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int i = 0;
stm32_dfsdm_stop_conv(indio_dev);
@@ -1048,6 +1129,13 @@ static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)
stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+ if (adc->backend) {
+ while (adc->backend[i]) {
+ iio_backend_disable(adc->backend[i]);
+ i++;
+ }
+ }
+
if (adc->hwc)
iio_hw_consumer_disable(adc->hwc);
@@ -1220,14 +1308,25 @@ static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
+ struct stm32_dfsdm_filter_osr *flo = &fl->flo[fl->fast];
+ u32 max = flo->max << (flo->lshift - chan->scan_type.shift);
+ int idx = chan->scan_index;
int ret;
+ if (flo->lshift < chan->scan_type.shift)
+ max = flo->max >> (chan->scan_type.shift - flo->lshift);
+
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
- ret = iio_hw_consumer_enable(adc->hwc);
+ if (adc->hwc)
+ ret = iio_hw_consumer_enable(adc->hwc);
+ if (adc->backend)
+ ret = iio_backend_enable(adc->backend[idx]);
if (ret < 0) {
dev_err(&indio_dev->dev,
"%s: IIO enable failed (channel %d)\n",
@@ -1236,7 +1335,10 @@ static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
return ret;
}
ret = stm32_dfsdm_single_conv(indio_dev, chan, val);
- iio_hw_consumer_disable(adc->hwc);
+ if (adc->hwc)
+ iio_hw_consumer_disable(adc->hwc);
+ if (adc->backend)
+ iio_backend_disable(adc->backend[idx]);
if (ret < 0) {
dev_err(&indio_dev->dev,
"%s: Conversion failed (channel %d)\n",
@@ -1256,6 +1358,50 @@ static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
*val = adc->sample_freq;
return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ /*
+ * Scale is expressed in mV.
+ * When fast mode is disabled, actual resolution may be lower
+ * than 2^n, where n = realbits - 1.
+ * This leads to underestimating the input voltage.
+ * To compensate this deviation, the voltage reference can be
+ * corrected with a factor = realbits resolution / actual max
+ */
+ if (adc->backend) {
+ ret = iio_backend_read_scale(adc->backend[idx], chan, val, NULL);
+ if (ret < 0)
+ return ret;
+
+ *val = div_u64((u64)*val * (u64)BIT(DFSDM_DATA_RES - 1), max);
+ *val2 = chan->scan_type.realbits;
+ if (chan->differential)
+ *val *= 2;
+ }
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_OFFSET:
+ /*
+ * DFSDM output data are in the range [-2^n, 2^n],
+ * with n = realbits - 1.
+ * - Differential modulator:
+ * Offset correspond to SD modulator offset.
+ * - Single ended modulator:
+ * Input is in [0V, Vref] range,
+ * where 0V corresponds to -2^n, and Vref to 2^n.
+ * Add 2^n to offset. (i.e. middle of input range)
+ * offset = offset(sd) * vref / res(sd) * max / vref.
+ */
+ if (adc->backend) {
+ ret = iio_backend_read_offset(adc->backend[idx], chan, val, NULL);
+ if (ret < 0)
+ return ret;
+
+ *val = div_u64((u64)max * *val, BIT(*val2 - 1));
+ if (!chan->differential)
+ *val += max;
+ }
+ return IIO_VAL_INT;
}
return -EINVAL;
@@ -1320,7 +1466,7 @@ static const struct iio_chan_spec_ext_info dfsdm_adc_audio_ext_info[] = {
.read = dfsdm_adc_audio_get_spiclk,
.write = dfsdm_adc_audio_set_spiclk,
},
- {},
+ { }
};
static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev)
@@ -1362,15 +1508,18 @@ static int stm32_dfsdm_dma_request(struct device *dev,
return 0;
}
-static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
- struct iio_chan_spec *ch)
+static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev, struct iio_chan_spec *ch,
+ struct fwnode_handle *child)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
int ret;
- ret = stm32_dfsdm_channel_parse_of(adc->dfsdm, indio_dev, ch);
+ if (child)
+ ret = stm32_dfsdm_generic_channel_parse_of(adc->dfsdm, indio_dev, ch, child);
+ else /* Legacy binding */
+ ret = stm32_dfsdm_channel_parse_of(adc->dfsdm, indio_dev, ch);
if (ret < 0)
- return ret;
+ return dev_err_probe(&indio_dev->dev, ret, "Failed to parse channel\n");
ch->type = IIO_VOLTAGE;
ch->indexed = 1;
@@ -1379,12 +1528,21 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
* IIO_CHAN_INFO_RAW: used to compute regular conversion
* IIO_CHAN_INFO_OVERSAMPLING_RATIO: used to set oversampling
*/
- ch->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ if (child) {
+ ch->info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET);
+ } else {
+ /* Legacy. Scaling not supported */
+ ch->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ }
+
ch->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) |
BIT(IIO_CHAN_INFO_SAMP_FREQ);
if (adc->dev_data->type == DFSDM_AUDIO) {
ch->ext_info = dfsdm_adc_audio_ext_info;
+ ch->scan_index = 0;
} else {
ch->scan_type.shift = 8;
}
@@ -1396,20 +1554,67 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
&adc->dfsdm->ch_list[ch->channel]);
}
+static int stm32_dfsdm_chan_init(struct iio_dev *indio_dev, struct iio_chan_spec *channels)
+{
+ int num_ch = indio_dev->num_channels;
+ int chan_idx = 0;
+ int ret;
+
+ for (chan_idx = 0; chan_idx < num_ch; chan_idx++) {
+ channels[chan_idx].scan_index = chan_idx;
+ ret = stm32_dfsdm_adc_chan_init_one(indio_dev, &channels[chan_idx], NULL);
+ if (ret < 0)
+ return dev_err_probe(&indio_dev->dev, ret, "Channels init failed\n");
+ }
+
+ return 0;
+}
+
+static int stm32_dfsdm_generic_chan_init(struct iio_dev *indio_dev, struct iio_chan_spec *channels)
+{
+ int chan_idx = 0, ret;
+
+ device_for_each_child_node_scoped(&indio_dev->dev, child) {
+ /* Skip DAI node in DFSDM audio nodes */
+ if (fwnode_property_present(child, "compatible"))
+ continue;
+
+ channels[chan_idx].scan_index = chan_idx;
+ ret = stm32_dfsdm_adc_chan_init_one(indio_dev, &channels[chan_idx], child);
+ if (ret < 0)
+ return dev_err_probe(&indio_dev->dev, ret, "Channels init failed\n");
+
+ chan_idx++;
+ }
+
+ return chan_idx;
+}
+
static int stm32_dfsdm_audio_init(struct device *dev, struct iio_dev *indio_dev)
{
struct iio_chan_spec *ch;
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
struct stm32_dfsdm_channel *d_ch;
- int ret;
+ bool legacy = false;
+ int num_ch, ret;
+
+ /* If st,adc-channels is defined legacy binding is used. Else assume generic binding. */
+ num_ch = of_property_count_u32_elems(indio_dev->dev.of_node, "st,adc-channels");
+ if (num_ch == 1)
+ legacy = true;
ch = devm_kzalloc(&indio_dev->dev, sizeof(*ch), GFP_KERNEL);
if (!ch)
return -ENOMEM;
- ch->scan_index = 0;
+ indio_dev->num_channels = 1;
+ indio_dev->channels = ch;
+
+ if (legacy)
+ ret = stm32_dfsdm_chan_init(indio_dev, ch);
+ else
+ ret = stm32_dfsdm_generic_chan_init(indio_dev, ch);
- ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch);
if (ret < 0) {
dev_err(&indio_dev->dev, "Channels init failed\n");
return ret;
@@ -1420,9 +1625,6 @@ static int stm32_dfsdm_audio_init(struct device *dev, struct iio_dev *indio_dev)
if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
adc->spi_freq = adc->dfsdm->spi_master_freq;
- indio_dev->num_channels = 1;
- indio_dev->channels = ch;
-
return stm32_dfsdm_dma_request(dev, indio_dev);
}
@@ -1430,43 +1632,61 @@ static int stm32_dfsdm_adc_init(struct device *dev, struct iio_dev *indio_dev)
{
struct iio_chan_spec *ch;
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
- int num_ch;
- int ret, chan_idx;
+ int num_ch, ret;
+ bool legacy = false;
adc->oversamp = DFSDM_DEFAULT_OVERSAMPLING;
ret = stm32_dfsdm_compute_all_osrs(indio_dev, adc->oversamp);
if (ret < 0)
return ret;
- num_ch = of_property_count_u32_elems(indio_dev->dev.of_node,
- "st,adc-channels");
- if (num_ch < 0 || num_ch > adc->dfsdm->num_chs) {
- dev_err(&indio_dev->dev, "Bad st,adc-channels\n");
- return num_ch < 0 ? num_ch : -EINVAL;
+ num_ch = device_get_child_node_count(&indio_dev->dev);
+ if (!num_ch) {
+ /* No channels nodes found. Assume legacy binding */
+ num_ch = of_property_count_u32_elems(indio_dev->dev.of_node, "st,adc-channels");
+ if (num_ch < 0) {
+ dev_err(&indio_dev->dev, "Bad st,adc-channels\n");
+ return num_ch;
+ }
+
+ legacy = true;
}
- /* Bind to SD modulator IIO device */
- adc->hwc = devm_iio_hw_consumer_alloc(&indio_dev->dev);
- if (IS_ERR(adc->hwc))
- return -EPROBE_DEFER;
+ if (num_ch > adc->dfsdm->num_chs) {
+ dev_err(&indio_dev->dev, "Number of channel [%d] exceeds [%d]\n",
+ num_ch, adc->dfsdm->num_chs);
+ return -EINVAL;
+ }
+ indio_dev->num_channels = num_ch;
- ch = devm_kcalloc(&indio_dev->dev, num_ch, sizeof(*ch),
- GFP_KERNEL);
- if (!ch)
- return -ENOMEM;
+ if (legacy) {
+ /* Bind to SD modulator IIO device. */
+ adc->hwc = devm_iio_hw_consumer_alloc(&indio_dev->dev);
+ if (IS_ERR(adc->hwc))
+ return dev_err_probe(&indio_dev->dev, -EPROBE_DEFER,
+ "waiting for SD modulator\n");
+ } else {
+ /* Generic binding. SD modulator IIO device not used. Use SD modulator backend. */
+ adc->hwc = NULL;
- for (chan_idx = 0; chan_idx < num_ch; chan_idx++) {
- ch[chan_idx].scan_index = chan_idx;
- ret = stm32_dfsdm_adc_chan_init_one(indio_dev, &ch[chan_idx]);
- if (ret < 0) {
- dev_err(&indio_dev->dev, "Channels init failed\n");
- return ret;
- }
+ adc->backend = devm_kcalloc(&indio_dev->dev, num_ch, sizeof(*adc->backend),
+ GFP_KERNEL);
+ if (!adc->backend)
+ return -ENOMEM;
}
- indio_dev->num_channels = num_ch;
+ ch = devm_kcalloc(&indio_dev->dev, num_ch, sizeof(*ch), GFP_KERNEL);
+ if (!ch)
+ return -ENOMEM;
indio_dev->channels = ch;
+ if (legacy)
+ ret = stm32_dfsdm_chan_init(indio_dev, ch);
+ else
+ ret = stm32_dfsdm_generic_chan_init(indio_dev, ch);
+ if (ret < 0)
+ return ret;
+
init_completion(&adc->completion);
/* Optionally request DMA */
@@ -1677,3 +1897,4 @@ module_platform_driver(stm32_dfsdm_adc_driver);
MODULE_DESCRIPTION("STM32 sigma delta ADC");
MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_BACKEND);
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
index a05d978b8cb8..bef59fcc0d80 100644
--- a/drivers/iio/adc/stm32-dfsdm-core.c
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -299,7 +299,7 @@ static const struct of_device_id stm32_dfsdm_of_match[] = {
.compatible = "st,stm32mp1-dfsdm",
.data = &stm32mp1_dfsdm_data,
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, stm32_dfsdm_of_match);
diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
index 8e56def1c9e5..b0add5a2eab5 100644
--- a/drivers/iio/adc/stmpe-adc.c
+++ b/drivers/iio/adc/stmpe-adc.c
@@ -347,7 +347,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(stmpe_adc_pm_ops, NULL, stmpe_adc_resume);
static const struct of_device_id stmpe_adc_ids[] = {
{ .compatible = "st,stmpe-adc", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, stmpe_adc_ids);
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
index b11ce555ba3b..e2dbd070c7c4 100644
--- a/drivers/iio/adc/ti-adc0832.c
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -211,8 +211,7 @@ static irqreturn_t adc0832_trigger_handler(int irq, void *p)
mutex_lock(&adc->lock);
- for_each_set_bit(scan_index, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, scan_index) {
const struct iio_chan_spec *scan_chan =
&indio_dev->channels[scan_index];
int ret = adc0832_adc_conversion(adc, scan_chan->channel,
@@ -310,7 +309,7 @@ static const struct of_device_id adc0832_dt_ids[] = {
{ .compatible = "ti,adc0832", },
{ .compatible = "ti,adc0834", },
{ .compatible = "ti,adc0838", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, adc0832_dt_ids);
@@ -319,7 +318,7 @@ static const struct spi_device_id adc0832_id[] = {
{ "adc0832", adc0832 },
{ "adc0834", adc0834 },
{ "adc0838", adc0838 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, adc0832_id);
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
index 1f6e53832e06..bf98f9bf942a 100644
--- a/drivers/iio/adc/ti-adc084s021.c
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -166,8 +166,7 @@ static int adc084s021_buffer_preenable(struct iio_dev *indio_dev)
int scan_index;
int i = 0;
- for_each_set_bit(scan_index, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, scan_index) {
const struct iio_chan_spec *channel =
&indio_dev->channels[scan_index];
adc->tx_buf[i++] = channel->channel << 3;
@@ -243,13 +242,13 @@ static int adc084s021_probe(struct spi_device *spi)
static const struct of_device_id adc084s021_of_match[] = {
{ .compatible = "ti,adc084s021", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, adc084s021_of_match);
static const struct spi_device_id adc084s021_id[] = {
{ ADC084S021_DRIVER_NAME, 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, adc084s021_id);
diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c
index c0a72d72f3a9..7f065f457b36 100644
--- a/drivers/iio/adc/ti-adc12138.c
+++ b/drivers/iio/adc/ti-adc12138.c
@@ -344,8 +344,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
mutex_lock(&adc->lock);
- for_each_set_bit(scan_index, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, scan_index) {
const struct iio_chan_spec *scan_chan =
&indio_dev->channels[scan_index];
@@ -520,7 +519,7 @@ static const struct of_device_id adc12138_dt_ids[] = {
{ .compatible = "ti,adc12130", },
{ .compatible = "ti,adc12132", },
{ .compatible = "ti,adc12138", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, adc12138_dt_ids);
@@ -528,7 +527,7 @@ static const struct spi_device_id adc12138_id[] = {
{ "adc12130", adc12130 },
{ "adc12132", adc12132 },
{ "adc12138", adc12138 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, adc12138_id);
diff --git a/drivers/iio/adc/ti-adc161s626.c b/drivers/iio/adc/ti-adc161s626.c
index f7c78d0dd449..474e733fb8e0 100644
--- a/drivers/iio/adc/ti-adc161s626.c
+++ b/drivers/iio/adc/ti-adc161s626.c
@@ -226,14 +226,14 @@ static int ti_adc_probe(struct spi_device *spi)
static const struct of_device_id ti_adc_dt_ids[] = {
{ .compatible = "ti,adc141s626", },
{ .compatible = "ti,adc161s626", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ti_adc_dt_ids);
static const struct spi_device_id ti_adc_id[] = {
- {"adc141s626", TI_ADC141S626},
- {"adc161s626", TI_ADC161S626},
- {},
+ { "adc141s626", TI_ADC141S626 },
+ { "adc161s626", TI_ADC161S626 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ti_adc_id);
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index d3363d02f292..6d1bc9659946 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -456,7 +456,7 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
mutex_lock(&data->lock);
chan = find_first_bit(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
ret = ads1015_get_adc_result(data, chan, &res);
if (ret < 0) {
mutex_unlock(&data->lock);
@@ -1173,7 +1173,7 @@ static const struct i2c_device_id ads1015_id[] = {
{ "ads1015", (kernel_ulong_t)&ads1015_data },
{ "ads1115", (kernel_ulong_t)&ads1115_data },
{ "tla2024", (kernel_ulong_t)&tla2024_data },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ads1015_id);
@@ -1181,7 +1181,7 @@ static const struct of_device_id ads1015_of_match[] = {
{ .compatible = "ti,ads1015", .data = &ads1015_data },
{ .compatible = "ti,ads1115", .data = &ads1115_data },
{ .compatible = "ti,tla2024", .data = &tla2024_data },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ads1015_of_match);
diff --git a/drivers/iio/adc/ti-ads1119.c b/drivers/iio/adc/ti-ads1119.c
index d649980479e4..1c7606375149 100644
--- a/drivers/iio/adc/ti-ads1119.c
+++ b/drivers/iio/adc/ti-ads1119.c
@@ -435,7 +435,7 @@ static int ads1119_triggered_buffer_preenable(struct iio_dev *indio_dev)
int ret;
index = find_first_bit(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
ret = ads1119_set_conv_mode(st, true);
if (ret)
@@ -508,7 +508,7 @@ static irqreturn_t ads1119_trigger_handler(int irq, void *private)
if (!iio_trigger_using_own(indio_dev)) {
index = find_first_bit(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
ret = ads1119_poll_data_ready(st, &indio_dev->channels[index]);
if (ret) {
diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
index 4ca62121f0d1..14941f384dad 100644
--- a/drivers/iio/adc/ti-ads124s08.c
+++ b/drivers/iio/adc/ti-ads124s08.c
@@ -279,8 +279,7 @@ static irqreturn_t ads124s_trigger_handler(int irq, void *p)
int scan_index, j = 0;
int ret;
- for_each_set_bit(scan_index, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, scan_index) {
ret = ads124s_write_reg(indio_dev, ADS124S08_INPUT_MUX,
scan_index);
if (ret)
@@ -358,7 +357,7 @@ MODULE_DEVICE_TABLE(spi, ads124s_id);
static const struct of_device_id ads124s_of_table[] = {
{ .compatible = "ti,ads124s06" },
{ .compatible = "ti,ads124s08" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ads124s_of_table);
diff --git a/drivers/iio/adc/ti-ads1298.c b/drivers/iio/adc/ti-ads1298.c
index 1d1eaba3d6d1..13cb32125eef 100644
--- a/drivers/iio/adc/ti-ads1298.c
+++ b/drivers/iio/adc/ti-ads1298.c
@@ -502,8 +502,7 @@ static void ads1298_rdata_complete(void *context)
}
/* Demux the channel data into our bounce buffer */
- for_each_set_bit(scan_index, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, scan_index) {
const struct iio_chan_spec *scan_chan =
&indio_dev->channels[scan_index];
const u8 *data = priv->rx_buffer + scan_chan->address;
diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c
index 96dd9366f8ff..91a427eb0882 100644
--- a/drivers/iio/adc/ti-ads131e08.c
+++ b/drivers/iio/adc/ti-ads131e08.c
@@ -637,7 +637,7 @@ static irqreturn_t ads131e08_trigger_handler(int irq, void *private)
if (ret)
goto out;
- for_each_set_bit(chn, indio_dev->active_scan_mask, indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, chn) {
src = st->rx_buf + ADS131E08_NUM_STATUS_BYTES + chn * num_bytes;
dest = st->tmp_buf.data + i * ADS131E08_NUM_STORAGE_BYTES;
@@ -918,7 +918,7 @@ static const struct of_device_id ads131e08_of_match[] = {
.data = &ads131e08_info_tbl[ads131e06], },
{ .compatible = "ti,ads131e08",
.data = &ads131e08_info_tbl[ads131e08], },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ads131e08_of_match);
@@ -926,7 +926,7 @@ static const struct spi_device_id ads131e08_ids[] = {
{ "ads131e04", (kernel_ulong_t)&ads131e08_info_tbl[ads131e04] },
{ "ads131e06", (kernel_ulong_t)&ads131e08_info_tbl[ads131e06] },
{ "ads131e08", (kernel_ulong_t)&ads131e08_info_tbl[ads131e08] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ads131e08_ids);
diff --git a/drivers/iio/adc/ti-ads7924.c b/drivers/iio/adc/ti-ads7924.c
index 4da78302359b..66b54c0d75aa 100644
--- a/drivers/iio/adc/ti-ads7924.c
+++ b/drivers/iio/adc/ti-ads7924.c
@@ -448,13 +448,13 @@ static int ads7924_probe(struct i2c_client *client)
static const struct i2c_device_id ads7924_id[] = {
{ "ads7924" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ads7924_id);
static const struct of_device_id ads7924_of_match[] = {
{ .compatible = "ti,ads7924", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ads7924_of_match);
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index 263fc3a1b87e..af28672aa803 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -705,7 +705,7 @@ static const struct of_device_id ads7950_of_table[] = {
{ .compatible = "ti,ads7959", .data = &ti_ads7950_chip_info[TI_ADS7959] },
{ .compatible = "ti,ads7960", .data = &ti_ads7950_chip_info[TI_ADS7960] },
{ .compatible = "ti,ads7961", .data = &ti_ads7950_chip_info[TI_ADS7961] },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ads7950_of_table);
diff --git a/drivers/iio/adc/ti-ads8344.c b/drivers/iio/adc/ti-ads8344.c
index bbd85cb47f81..3bec8a2e61ab 100644
--- a/drivers/iio/adc/ti-ads8344.c
+++ b/drivers/iio/adc/ti-ads8344.c
@@ -175,7 +175,7 @@ static int ads8344_probe(struct spi_device *spi)
static const struct of_device_id ads8344_of_match[] = {
{ .compatible = "ti,ads8344", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ads8344_of_match);
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 7a79f0cebfbf..9b1814f1965a 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -384,9 +384,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8);
int i, j = 0;
- for (i = 0; i < indio_dev->masklength; i++) {
- if (!test_bit(i, indio_dev->active_scan_mask))
- continue;
+ iio_for_each_active_channel(indio_dev, i) {
buffer[j] = ads8688_read(indio_dev, i);
j++;
}
@@ -454,9 +452,9 @@ static int ads8688_probe(struct spi_device *spi)
}
static const struct spi_device_id ads8688_id[] = {
- {"ads8684", ID_ADS8684},
- {"ads8688", ID_ADS8688},
- {}
+ { "ads8684", ID_ADS8684 },
+ { "ads8688", ID_ADS8688 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ads8688_id);
diff --git a/drivers/iio/adc/ti-lmp92064.c b/drivers/iio/adc/ti-lmp92064.c
index 84ba5c4a0eea..169e3591320b 100644
--- a/drivers/iio/adc/ti-lmp92064.c
+++ b/drivers/iio/adc/ti-lmp92064.c
@@ -360,7 +360,7 @@ static int lmp92064_adc_probe(struct spi_device *spi)
static const struct spi_device_id lmp92064_id_table[] = {
{ "lmp92064" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, lmp92064_id_table);
diff --git a/drivers/iio/adc/ti-tlc4541.c b/drivers/iio/adc/ti-tlc4541.c
index 30f629a553a1..08de997584fd 100644
--- a/drivers/iio/adc/ti-tlc4541.c
+++ b/drivers/iio/adc/ti-tlc4541.c
@@ -237,14 +237,14 @@ static void tlc4541_remove(struct spi_device *spi)
static const struct of_device_id tlc4541_dt_ids[] = {
{ .compatible = "ti,tlc3541", },
{ .compatible = "ti,tlc4541", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, tlc4541_dt_ids);
static const struct spi_device_id tlc4541_id[] = {
- {"tlc3541", TLC3541},
- {"tlc4541", TLC4541},
- {}
+ { "tlc3541", TLC3541 },
+ { "tlc4541", TLC4541 },
+ { }
};
MODULE_DEVICE_TABLE(spi, tlc4541_id);
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index edcef8f11522..311d97001249 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -6,6 +6,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
@@ -141,7 +142,7 @@ enum tsc2046_state {
struct tsc2046_adc_priv {
struct spi_device *spi;
const struct tsc2046_adc_dcfg *dcfg;
- struct regulator *vref_reg;
+ bool internal_vref;
struct iio_trigger *trig;
struct hrtimer trig_timer;
@@ -257,7 +258,7 @@ static u8 tsc2046_adc_get_cmd(struct tsc2046_adc_priv *priv, int ch_idx,
case TI_TSC2046_ADDR_VBAT:
case TI_TSC2046_ADDR_TEMP0:
pd |= TI_TSC2046_SER;
- if (!priv->vref_reg)
+ if (priv->internal_vref)
pd |= TI_TSC2046_PD1_VREF_ON;
}
@@ -273,7 +274,6 @@ static int tsc2046_adc_read_one(struct tsc2046_adc_priv *priv, int ch_idx,
u32 *effective_speed_hz)
{
struct tsc2046_adc_ch_cfg *ch = &priv->ch_cfg[ch_idx];
- struct tsc2046_adc_atom *rx_buf, *tx_buf;
unsigned int val, val_normalized = 0;
int ret, i, count_skip = 0, max_count;
struct spi_transfer xfer;
@@ -287,18 +287,20 @@ static int tsc2046_adc_read_one(struct tsc2046_adc_priv *priv, int ch_idx,
max_count = 1;
}
- if (sizeof(*tx_buf) * max_count > PAGE_SIZE)
+ if (sizeof(struct tsc2046_adc_atom) * max_count > PAGE_SIZE)
return -ENOSPC;
- tx_buf = kcalloc(max_count, sizeof(*tx_buf), GFP_KERNEL);
+ struct tsc2046_adc_atom *tx_buf __free(kfree) = kcalloc(max_count,
+ sizeof(*tx_buf),
+ GFP_KERNEL);
if (!tx_buf)
return -ENOMEM;
- rx_buf = kcalloc(max_count, sizeof(*rx_buf), GFP_KERNEL);
- if (!rx_buf) {
- ret = -ENOMEM;
- goto free_tx;
- }
+ struct tsc2046_adc_atom *rx_buf __free(kfree) = kcalloc(max_count,
+ sizeof(*rx_buf),
+ GFP_KERNEL);
+ if (!rx_buf)
+ return -ENOMEM;
/*
* Do not enable automatic power down on working samples. Otherwise the
@@ -326,7 +328,7 @@ static int tsc2046_adc_read_one(struct tsc2046_adc_priv *priv, int ch_idx,
if (ret) {
dev_err_ratelimited(&priv->spi->dev, "SPI transfer failed %pe\n",
ERR_PTR(ret));
- goto free_bufs;
+ return ret;
}
if (effective_speed_hz)
@@ -337,14 +339,7 @@ static int tsc2046_adc_read_one(struct tsc2046_adc_priv *priv, int ch_idx,
val_normalized += val;
}
- ret = DIV_ROUND_UP(val_normalized, max_count - count_skip);
-
-free_bufs:
- kfree(rx_buf);
-free_tx:
- kfree(tx_buf);
-
- return ret;
+ return DIV_ROUND_UP(val_normalized, max_count - count_skip);
}
static size_t tsc2046_adc_group_set_layout(struct tsc2046_adc_priv *priv,
@@ -746,49 +741,6 @@ static void tsc2046_adc_parse_fwnode(struct tsc2046_adc_priv *priv)
}
}
-static void tsc2046_adc_regulator_disable(void *data)
-{
- struct tsc2046_adc_priv *priv = data;
-
- regulator_disable(priv->vref_reg);
-}
-
-static int tsc2046_adc_configure_regulator(struct tsc2046_adc_priv *priv)
-{
- struct device *dev = &priv->spi->dev;
- int ret;
-
- priv->vref_reg = devm_regulator_get_optional(dev, "vref");
- if (IS_ERR(priv->vref_reg)) {
- /* If regulator exists but can't be get, return an error */
- if (PTR_ERR(priv->vref_reg) != -ENODEV)
- return PTR_ERR(priv->vref_reg);
- priv->vref_reg = NULL;
- }
- if (!priv->vref_reg) {
- /* Use internal reference */
- priv->vref_mv = TI_TSC2046_INT_VREF;
- return 0;
- }
-
- ret = regulator_enable(priv->vref_reg);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, tsc2046_adc_regulator_disable,
- priv);
- if (ret)
- return ret;
-
- ret = regulator_get_voltage(priv->vref_reg);
- if (ret < 0)
- return ret;
-
- priv->vref_mv = ret / MILLI;
-
- return 0;
-}
-
static int tsc2046_adc_probe(struct spi_device *spi)
{
const struct tsc2046_adc_dcfg *dcfg;
@@ -830,10 +782,13 @@ static int tsc2046_adc_probe(struct spi_device *spi)
indio_dev->num_channels = dcfg->num_channels;
indio_dev->info = &tsc2046_adc_info;
- ret = tsc2046_adc_configure_regulator(priv);
- if (ret)
+ ret = devm_regulator_get_enable_read_voltage(dev, "vref");
+ if (ret < 0 && ret != -ENODEV)
return ret;
+ priv->internal_vref = ret == -ENODEV;
+ priv->vref_mv = priv->internal_vref ? TI_TSC2046_INT_VREF : ret / MILLI;
+
tsc2046_adc_parse_fwnode(priv);
ret = tsc2046_adc_setup_spi_msg(priv);
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index e4548df3f8fb..5afd2feb8c3d 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -752,7 +752,7 @@ static int vf610_adc_buffer_postenable(struct iio_dev *indio_dev)
writel(val, info->regs + VF610_REG_ADC_GC);
channel = find_first_bit(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
val = VF610_ADC_ADCHC(channel);
val |= VF610_ADC_AIEN;
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
index f051358d6b50..ebc583b07e0c 100644
--- a/drivers/iio/adc/xilinx-ams.c
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -1275,7 +1275,6 @@ static int ams_parse_firmware(struct iio_dev *indio_dev)
struct ams *ams = iio_priv(indio_dev);
struct iio_chan_spec *ams_channels, *dev_channels;
struct device *dev = indio_dev->dev.parent;
- struct fwnode_handle *child = NULL;
struct fwnode_handle *fwnode = dev_fwnode(dev);
size_t ams_size;
int ret, ch_cnt = 0, i, rising_off, falling_off;
@@ -1297,16 +1296,12 @@ static int ams_parse_firmware(struct iio_dev *indio_dev)
num_channels += ret;
}
- fwnode_for_each_child_node(fwnode, child) {
- if (fwnode_device_is_available(child)) {
- ret = ams_init_module(indio_dev, child, ams_channels + num_channels);
- if (ret < 0) {
- fwnode_handle_put(child);
- return ret;
- }
+ device_for_each_child_node_scoped(dev, child) {
+ ret = ams_init_module(indio_dev, child, ams_channels + num_channels);
+ if (ret < 0)
+ return ret;
- num_channels += ret;
- }
+ num_channels += ret;
}
for (i = 0; i < num_channels; i++) {
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 564c0cad0fc7..cfbfcaefec0f 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -628,7 +628,7 @@ static int xadc_update_scan_mode(struct iio_dev *indio_dev,
size_t n;
void *data;
- n = bitmap_weight(mask, indio_dev->masklength);
+ n = bitmap_weight(mask, iio_get_masklength(indio_dev));
data = devm_krealloc_array(indio_dev->dev.parent, xadc->data,
n, sizeof(*xadc->data), GFP_KERNEL);
@@ -681,8 +681,7 @@ static irqreturn_t xadc_trigger_handler(int irq, void *p)
goto out;
j = 0;
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
chan = xadc_scan_index_to_channel(i);
xadc_read_adc_reg(xadc, chan, &xadc->data[j]);
j++;
diff --git a/drivers/iio/buffer/industrialio-buffer-cb.c b/drivers/iio/buffer/industrialio-buffer-cb.c
index 4c12b7a94af5..4befc9f55201 100644
--- a/drivers/iio/buffer/industrialio-buffer-cb.c
+++ b/drivers/iio/buffer/industrialio-buffer-cb.c
@@ -77,7 +77,7 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
}
cb_buff->indio_dev = cb_buff->channels[0].indio_dev;
- cb_buff->buffer.scan_mask = bitmap_zalloc(cb_buff->indio_dev->masklength,
+ cb_buff->buffer.scan_mask = bitmap_zalloc(iio_get_masklength(cb_buff->indio_dev),
GFP_KERNEL);
if (cb_buff->buffer.scan_mask == NULL) {
ret = -ENOMEM;
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index 647f417a045e..dbde1443d6ed 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -248,7 +248,7 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
iio_dma_buffer_queue_wake(queue);
dma_fence_end_signalling(cookie);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_block_done, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_block_list_abort() - Indicate that a list block has been
@@ -287,7 +287,7 @@ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
iio_dma_buffer_queue_wake(queue);
dma_fence_end_signalling(cookie);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_block_list_abort, IIO_DMA_BUFFER);
static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
{
@@ -420,7 +420,7 @@ out_unlock:
return ret;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_request_update, IIO_DMA_BUFFER);
static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue)
{
@@ -506,7 +506,7 @@ int iio_dma_buffer_enable(struct iio_buffer *buffer,
return 0;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_enable, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_disable() - Disable DMA buffer
@@ -530,7 +530,7 @@ int iio_dma_buffer_disable(struct iio_buffer *buffer,
return 0;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_disable, IIO_DMA_BUFFER);
static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
struct iio_dma_buffer_block *block)
@@ -636,7 +636,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
{
return iio_dma_buffer_io(buffer, n, user_buffer, false);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_read, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_write() - DMA buffer write callback
@@ -653,7 +653,7 @@ int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
return iio_dma_buffer_io(buffer, n,
(__force __user char *)user_buffer, true);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_write);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_write, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_usage() - DMA buffer data_available and
@@ -696,7 +696,7 @@ size_t iio_dma_buffer_usage(struct iio_buffer *buf)
return data_available;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_usage);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_usage, IIO_DMA_BUFFER);
struct iio_dma_buffer_block *
iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
@@ -723,7 +723,7 @@ iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
return block;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_attach_dmabuf);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_attach_dmabuf, IIO_DMA_BUFFER);
void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
struct iio_dma_buffer_block *block)
@@ -731,7 +731,7 @@ void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
block->state = IIO_BLOCK_STATE_DEAD;
iio_buffer_block_put_atomic(block);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_detach_dmabuf);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_detach_dmabuf, IIO_DMA_BUFFER);
static int iio_dma_can_enqueue_block(struct iio_dma_buffer_block *block)
{
@@ -784,7 +784,7 @@ out_end_signalling:
return ret;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_enqueue_dmabuf);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_enqueue_dmabuf, IIO_DMA_BUFFER);
void iio_dma_buffer_lock_queue(struct iio_buffer *buffer)
{
@@ -792,7 +792,7 @@ void iio_dma_buffer_lock_queue(struct iio_buffer *buffer)
mutex_lock(&queue->lock);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_lock_queue);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_lock_queue, IIO_DMA_BUFFER);
void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer)
{
@@ -800,7 +800,7 @@ void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer)
mutex_unlock(&queue->lock);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_unlock_queue);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_unlock_queue, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
@@ -816,7 +816,7 @@ int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
return 0;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_set_bytes_per_datum, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_set_length - DMA buffer set_length callback
@@ -836,7 +836,7 @@ int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
return 0;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_set_length, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_init() - Initialize DMA buffer queue
@@ -864,7 +864,7 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
return 0;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_init, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_exit() - Cleanup DMA buffer queue
@@ -882,7 +882,7 @@ void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
mutex_unlock(&queue->lock);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_exit, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_release() - Release final buffer resources
@@ -896,7 +896,7 @@ void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
{
mutex_destroy(&queue->lock);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_release);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_release, IIO_DMA_BUFFER);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework");
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 426cc614587a..19af1caf14cd 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -350,3 +350,4 @@ EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_DMA_BUFFER);
diff --git a/drivers/iio/buffer/industrialio-hw-consumer.c b/drivers/iio/buffer/industrialio-hw-consumer.c
index fb58f599a80b..526b2a8d725d 100644
--- a/drivers/iio/buffer/industrialio-hw-consumer.c
+++ b/drivers/iio/buffer/industrialio-hw-consumer.c
@@ -52,6 +52,7 @@ static const struct iio_buffer_access_funcs iio_hw_buf_access = {
static struct hw_consumer_buffer *iio_hw_consumer_get_buffer(
struct iio_hw_consumer *hwc, struct iio_dev *indio_dev)
{
+ unsigned int mask_longs = BITS_TO_LONGS(iio_get_masklength(indio_dev));
struct hw_consumer_buffer *buf;
list_for_each_entry(buf, &hwc->buffers, head) {
@@ -59,8 +60,7 @@ static struct hw_consumer_buffer *iio_hw_consumer_get_buffer(
return buf;
}
- buf = kzalloc(struct_size(buf, scan_mask, BITS_TO_LONGS(indio_dev->masklength)),
- GFP_KERNEL);
+ buf = kzalloc(struct_size(buf, scan_mask, mask_longs), GFP_KERNEL);
if (!buf)
return NULL;
diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
index f959252a4fe6..b2c547ac8d34 100644
--- a/drivers/iio/chemical/bme680.h
+++ b/drivers/iio/chemical/bme680.h
@@ -12,7 +12,7 @@
#define BME680_REG_TEMP_MSB 0x22
#define BME680_REG_PRESS_MSB 0x1F
-#define BM6880_REG_HUMIDITY_MSB 0x25
+#define BME680_REG_HUMIDITY_MSB 0x25
#define BME680_REG_GAS_MSB 0x2A
#define BME680_REG_GAS_R_LSB 0x2B
#define BME680_GAS_STAB_BIT BIT(4)
@@ -39,14 +39,12 @@
#define BME680_HUM_REG_SHIFT_VAL 4
#define BME680_BIT_H1_DATA_MASK GENMASK(3, 0)
-#define BME680_REG_RES_HEAT_RANGE 0x02
#define BME680_RHRANGE_MASK GENMASK(5, 4)
#define BME680_REG_RES_HEAT_VAL 0x00
-#define BME680_REG_RANGE_SW_ERR 0x04
#define BME680_RSERROR_MASK GENMASK(7, 4)
#define BME680_REG_RES_HEAT_0 0x5A
#define BME680_REG_GAS_WAIT_0 0x64
-#define BME680_ADC_GAS_RES_SHIFT 6
+#define BME680_ADC_GAS_RES GENMASK(15, 6)
#define BME680_AMB_TEMP 25
#define BME680_REG_CTRL_GAS_1 0x71
@@ -58,33 +56,24 @@
#define BME680_GAS_MEAS_BIT BIT(6)
#define BME680_MEAS_BIT BIT(5)
+#define BME680_TEMP_NUM_BYTES 3
+#define BME680_PRESS_NUM_BYTES 3
+#define BME680_HUMID_NUM_BYTES 2
+#define BME680_GAS_NUM_BYTES 2
+
+#define BME680_MEAS_TRIM_MASK GENMASK(24, 4)
+
+#define BME680_STARTUP_TIME_US 5000
+
/* Calibration Parameters */
#define BME680_T2_LSB_REG 0x8A
-#define BME680_T3_REG 0x8C
-#define BME680_P1_LSB_REG 0x8E
-#define BME680_P2_LSB_REG 0x90
-#define BME680_P3_REG 0x92
-#define BME680_P4_LSB_REG 0x94
-#define BME680_P5_LSB_REG 0x96
-#define BME680_P7_REG 0x98
-#define BME680_P6_REG 0x99
-#define BME680_P8_LSB_REG 0x9C
-#define BME680_P9_LSB_REG 0x9E
-#define BME680_P10_REG 0xA0
-#define BME680_H2_LSB_REG 0xE2
#define BME680_H2_MSB_REG 0xE1
-#define BME680_H1_MSB_REG 0xE3
-#define BME680_H1_LSB_REG 0xE2
-#define BME680_H3_REG 0xE4
-#define BME680_H4_REG 0xE5
-#define BME680_H5_REG 0xE6
-#define BME680_H6_REG 0xE7
-#define BME680_H7_REG 0xE8
-#define BME680_T1_LSB_REG 0xE9
-#define BME680_GH2_LSB_REG 0xEB
-#define BME680_GH1_REG 0xED
#define BME680_GH3_REG 0xEE
+#define BME680_CALIB_RANGE_1_LEN 23
+#define BME680_CALIB_RANGE_2_LEN 14
+#define BME680_CALIB_RANGE_3_LEN 5
+
extern const struct regmap_config bme680_regmap_config;
int bme680_core_probe(struct device *dev, struct regmap *regmap,
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index 500f56834b01..5d2e750ca2b9 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -8,18 +8,64 @@
* Datasheet:
* https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BME680-DS001-00.pdf
*/
-#include <linux/acpi.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/module.h>
#include <linux/log2.h>
+#include <linux/module.h>
#include <linux/regmap.h>
+
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <asm/unaligned.h>
+
#include "bme680.h"
+/* 1st set of calibration data */
+enum {
+ /* Temperature calib indexes */
+ T2_LSB = 0,
+ T3 = 2,
+ /* Pressure calib indexes */
+ P1_LSB = 4,
+ P2_LSB = 6,
+ P3 = 8,
+ P4_LSB = 10,
+ P5_LSB = 12,
+ P7 = 14,
+ P6 = 15,
+ P8_LSB = 18,
+ P9_LSB = 20,
+ P10 = 22,
+};
+
+/* 2nd set of calibration data */
+enum {
+ /* Humidity calib indexes */
+ H2_MSB = 0,
+ H1_LSB = 1,
+ H3 = 3,
+ H4 = 4,
+ H5 = 5,
+ H6 = 6,
+ H7 = 7,
+ /* Stray T1 calib index */
+ T1_LSB = 8,
+ /* Gas heater calib indexes */
+ GH2_LSB = 10,
+ GH1 = 12,
+ GH3 = 13,
+};
+
+/* 3rd set of calibration data */
+enum {
+ RES_HEAT_VAL = 0,
+ RES_HEAT_RANGE = 2,
+ RANGE_SW_ERR = 4,
+};
+
struct bme680_calib {
u16 par_t1;
s16 par_t2;
@@ -52,16 +98,21 @@ struct bme680_calib {
struct bme680_data {
struct regmap *regmap;
struct bme680_calib bme680;
+ struct mutex lock; /* Protect multiple serial R/W ops to device. */
u8 oversampling_temp;
u8 oversampling_press;
u8 oversampling_humid;
u16 heater_dur;
u16 heater_temp;
- /*
- * Carryover value from temperature conversion, used in pressure
- * and humidity compensation calculations.
- */
- s32 t_fine;
+
+ union {
+ u8 buf[3];
+ unsigned int check;
+ __be16 be16;
+ u8 bme680_cal_buf_1[BME680_CALIB_RANGE_1_LEN];
+ u8 bme680_cal_buf_2[BME680_CALIB_RANGE_2_LEN];
+ u8 bme680_cal_buf_3[BME680_CALIB_RANGE_3_LEN];
+ };
};
static const struct regmap_range bme680_volatile_ranges[] = {
@@ -110,217 +161,98 @@ static int bme680_read_calib(struct bme680_data *data,
struct bme680_calib *calib)
{
struct device *dev = regmap_get_device(data->regmap);
- unsigned int tmp, tmp_msb, tmp_lsb;
+ unsigned int tmp_msb, tmp_lsb;
int ret;
- __le16 buf;
-
- /* Temperature related coefficients */
- ret = regmap_bulk_read(data->regmap, BME680_T1_LSB_REG,
- &buf, sizeof(buf));
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_T1_LSB_REG\n");
- return ret;
- }
- calib->par_t1 = le16_to_cpu(buf);
ret = regmap_bulk_read(data->regmap, BME680_T2_LSB_REG,
- &buf, sizeof(buf));
+ data->bme680_cal_buf_1,
+ sizeof(data->bme680_cal_buf_1));
if (ret < 0) {
- dev_err(dev, "failed to read BME680_T2_LSB_REG\n");
+ dev_err(dev, "failed to read 1st set of calib data;\n");
return ret;
}
- calib->par_t2 = le16_to_cpu(buf);
- ret = regmap_read(data->regmap, BME680_T3_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_T3_REG\n");
- return ret;
- }
- calib->par_t3 = tmp;
-
- /* Pressure related coefficients */
- ret = regmap_bulk_read(data->regmap, BME680_P1_LSB_REG,
- &buf, sizeof(buf));
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P1_LSB_REG\n");
- return ret;
- }
- calib->par_p1 = le16_to_cpu(buf);
-
- ret = regmap_bulk_read(data->regmap, BME680_P2_LSB_REG,
- &buf, sizeof(buf));
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P2_LSB_REG\n");
- return ret;
- }
- calib->par_p2 = le16_to_cpu(buf);
-
- ret = regmap_read(data->regmap, BME680_P3_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P3_REG\n");
- return ret;
- }
- calib->par_p3 = tmp;
-
- ret = regmap_bulk_read(data->regmap, BME680_P4_LSB_REG,
- &buf, sizeof(buf));
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P4_LSB_REG\n");
- return ret;
- }
- calib->par_p4 = le16_to_cpu(buf);
+ calib->par_t2 = get_unaligned_le16(&data->bme680_cal_buf_1[T2_LSB]);
+ calib->par_t3 = data->bme680_cal_buf_1[T3];
+ calib->par_p1 = get_unaligned_le16(&data->bme680_cal_buf_1[P1_LSB]);
+ calib->par_p2 = get_unaligned_le16(&data->bme680_cal_buf_1[P2_LSB]);
+ calib->par_p3 = data->bme680_cal_buf_1[P3];
+ calib->par_p4 = get_unaligned_le16(&data->bme680_cal_buf_1[P4_LSB]);
+ calib->par_p5 = get_unaligned_le16(&data->bme680_cal_buf_1[P5_LSB]);
+ calib->par_p7 = data->bme680_cal_buf_1[P7];
+ calib->par_p6 = data->bme680_cal_buf_1[P6];
+ calib->par_p8 = get_unaligned_le16(&data->bme680_cal_buf_1[P8_LSB]);
+ calib->par_p9 = get_unaligned_le16(&data->bme680_cal_buf_1[P9_LSB]);
+ calib->par_p10 = data->bme680_cal_buf_1[P10];
- ret = regmap_bulk_read(data->regmap, BME680_P5_LSB_REG,
- &buf, sizeof(buf));
+ ret = regmap_bulk_read(data->regmap, BME680_H2_MSB_REG,
+ data->bme680_cal_buf_2,
+ sizeof(data->bme680_cal_buf_2));
if (ret < 0) {
- dev_err(dev, "failed to read BME680_P5_LSB_REG\n");
+ dev_err(dev, "failed to read 2nd set of calib data;\n");
return ret;
}
- calib->par_p5 = le16_to_cpu(buf);
- ret = regmap_read(data->regmap, BME680_P6_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P6_REG\n");
- return ret;
- }
- calib->par_p6 = tmp;
-
- ret = regmap_read(data->regmap, BME680_P7_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P7_REG\n");
- return ret;
- }
- calib->par_p7 = tmp;
-
- ret = regmap_bulk_read(data->regmap, BME680_P8_LSB_REG,
- &buf, sizeof(buf));
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P8_LSB_REG\n");
- return ret;
- }
- calib->par_p8 = le16_to_cpu(buf);
-
- ret = regmap_bulk_read(data->regmap, BME680_P9_LSB_REG,
- &buf, sizeof(buf));
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P9_LSB_REG\n");
- return ret;
- }
- calib->par_p9 = le16_to_cpu(buf);
-
- ret = regmap_read(data->regmap, BME680_P10_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P10_REG\n");
- return ret;
- }
- calib->par_p10 = tmp;
-
- /* Humidity related coefficients */
- ret = regmap_read(data->regmap, BME680_H1_MSB_REG, &tmp_msb);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_H1_MSB_REG\n");
- return ret;
- }
- ret = regmap_read(data->regmap, BME680_H1_LSB_REG, &tmp_lsb);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_H1_LSB_REG\n");
- return ret;
- }
+ tmp_lsb = data->bme680_cal_buf_2[H1_LSB];
+ tmp_msb = data->bme680_cal_buf_2[H1_LSB + 1];
calib->par_h1 = (tmp_msb << BME680_HUM_REG_SHIFT_VAL) |
(tmp_lsb & BME680_BIT_H1_DATA_MASK);
- ret = regmap_read(data->regmap, BME680_H2_MSB_REG, &tmp_msb);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_H2_MSB_REG\n");
- return ret;
- }
- ret = regmap_read(data->regmap, BME680_H2_LSB_REG, &tmp_lsb);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_H2_LSB_REG\n");
- return ret;
- }
+ tmp_msb = data->bme680_cal_buf_2[H2_MSB];
+ tmp_lsb = data->bme680_cal_buf_2[H2_MSB + 1];
calib->par_h2 = (tmp_msb << BME680_HUM_REG_SHIFT_VAL) |
(tmp_lsb >> BME680_HUM_REG_SHIFT_VAL);
- ret = regmap_read(data->regmap, BME680_H3_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_H3_REG\n");
- return ret;
- }
- calib->par_h3 = tmp;
-
- ret = regmap_read(data->regmap, BME680_H4_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_H4_REG\n");
- return ret;
- }
- calib->par_h4 = tmp;
+ calib->par_h3 = data->bme680_cal_buf_2[H3];
+ calib->par_h4 = data->bme680_cal_buf_2[H4];
+ calib->par_h5 = data->bme680_cal_buf_2[H5];
+ calib->par_h6 = data->bme680_cal_buf_2[H6];
+ calib->par_h7 = data->bme680_cal_buf_2[H7];
+ calib->par_t1 = get_unaligned_le16(&data->bme680_cal_buf_2[T1_LSB]);
+ calib->par_gh2 = get_unaligned_le16(&data->bme680_cal_buf_2[GH2_LSB]);
+ calib->par_gh1 = data->bme680_cal_buf_2[GH1];
+ calib->par_gh3 = data->bme680_cal_buf_2[GH3];
- ret = regmap_read(data->regmap, BME680_H5_REG, &tmp);
+ ret = regmap_bulk_read(data->regmap, BME680_REG_RES_HEAT_VAL,
+ data->bme680_cal_buf_3,
+ sizeof(data->bme680_cal_buf_3));
if (ret < 0) {
- dev_err(dev, "failed to read BME680_H5_REG\n");
+ dev_err(dev, "failed to read 3rd set of calib data;\n");
return ret;
}
- calib->par_h5 = tmp;
- ret = regmap_read(data->regmap, BME680_H6_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_H6_REG\n");
- return ret;
- }
- calib->par_h6 = tmp;
+ calib->res_heat_val = data->bme680_cal_buf_3[RES_HEAT_VAL];
- ret = regmap_read(data->regmap, BME680_H7_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_H7_REG\n");
- return ret;
- }
- calib->par_h7 = tmp;
+ calib->res_heat_range = FIELD_GET(BME680_RHRANGE_MASK,
+ data->bme680_cal_buf_3[RES_HEAT_RANGE]);
- /* Gas heater related coefficients */
- ret = regmap_read(data->regmap, BME680_GH1_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_GH1_REG\n");
- return ret;
- }
- calib->par_gh1 = tmp;
+ calib->range_sw_err = FIELD_GET(BME680_RSERROR_MASK,
+ data->bme680_cal_buf_3[RANGE_SW_ERR]);
- ret = regmap_bulk_read(data->regmap, BME680_GH2_LSB_REG,
- &buf, sizeof(buf));
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_GH2_LSB_REG\n");
- return ret;
- }
- calib->par_gh2 = le16_to_cpu(buf);
-
- ret = regmap_read(data->regmap, BME680_GH3_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_GH3_REG\n");
- return ret;
- }
- calib->par_gh3 = tmp;
+ return 0;
+}
- /* Other coefficients */
- ret = regmap_read(data->regmap, BME680_REG_RES_HEAT_RANGE, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read resistance heat range\n");
- return ret;
- }
- calib->res_heat_range = FIELD_GET(BME680_RHRANGE_MASK, tmp);
+static int bme680_read_temp_adc(struct bme680_data *data, u32 *adc_temp)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ u32 value_temp;
+ int ret;
- ret = regmap_read(data->regmap, BME680_REG_RES_HEAT_VAL, &tmp);
+ ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB,
+ data->buf, BME680_TEMP_NUM_BYTES);
if (ret < 0) {
- dev_err(dev, "failed to read resistance heat value\n");
+ dev_err(dev, "failed to read temperature\n");
return ret;
}
- calib->res_heat_val = tmp;
- ret = regmap_read(data->regmap, BME680_REG_RANGE_SW_ERR, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read range software error\n");
- return ret;
+ value_temp = FIELD_GET(BME680_MEAS_TRIM_MASK,
+ get_unaligned_be24(data->buf));
+ if (value_temp == BME680_MEAS_SKIPPED) {
+ /* reading was skipped */
+ dev_err(dev, "reading temperature skipped\n");
+ return -EINVAL;
}
- calib->range_sw_err = FIELD_GET(BME680_RSERROR_MASK, tmp);
+ *adc_temp = value_temp;
return 0;
}
@@ -332,25 +264,65 @@ static int bme680_read_calib(struct bme680_data *data,
* Returns temperature measurement in DegC, resolutions is 0.01 DegC. Therefore,
* output value of "3233" represents 32.33 DegC.
*/
-static s16 bme680_compensate_temp(struct bme680_data *data,
- s32 adc_temp)
+static s32 bme680_calc_t_fine(struct bme680_data *data, u32 adc_temp)
{
struct bme680_calib *calib = &data->bme680;
s64 var1, var2, var3;
- s16 calc_temp;
/* If the calibration is invalid, attempt to reload it */
if (!calib->par_t2)
bme680_read_calib(data, calib);
- var1 = (adc_temp >> 3) - ((s32)calib->par_t1 << 1);
+ var1 = ((s32)adc_temp >> 3) - ((s32)calib->par_t1 << 1);
var2 = (var1 * calib->par_t2) >> 11;
var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
var3 = (var3 * ((s32)calib->par_t3 << 4)) >> 14;
- data->t_fine = var2 + var3;
- calc_temp = (data->t_fine * 5 + 128) >> 8;
+ return var2 + var3; /* t_fine = var2 + var3 */
+}
- return calc_temp;
+static int bme680_get_t_fine(struct bme680_data *data, s32 *t_fine)
+{
+ u32 adc_temp;
+ int ret;
+
+ ret = bme680_read_temp_adc(data, &adc_temp);
+ if (ret)
+ return ret;
+
+ *t_fine = bme680_calc_t_fine(data, adc_temp);
+
+ return 0;
+}
+
+static s16 bme680_compensate_temp(struct bme680_data *data,
+ u32 adc_temp)
+{
+ return (bme680_calc_t_fine(data, adc_temp) * 5 + 128) / 256;
+}
+
+static int bme680_read_press_adc(struct bme680_data *data, u32 *adc_press)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ u32 value_press;
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, BME680_REG_PRESS_MSB,
+ data->buf, BME680_PRESS_NUM_BYTES);
+ if (ret < 0) {
+ dev_err(dev, "failed to read pressure\n");
+ return ret;
+ }
+
+ value_press = FIELD_GET(BME680_MEAS_TRIM_MASK,
+ get_unaligned_be24(data->buf));
+ if (value_press == BME680_MEAS_SKIPPED) {
+ /* reading was skipped */
+ dev_err(dev, "reading pressure skipped\n");
+ return -EINVAL;
+ }
+ *adc_press = value_press;
+
+ return 0;
}
/*
@@ -361,12 +333,12 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
* 97356 Pa = 973.56 hPa.
*/
static u32 bme680_compensate_press(struct bme680_data *data,
- u32 adc_press)
+ u32 adc_press, s32 t_fine)
{
struct bme680_calib *calib = &data->bme680;
s32 var1, var2, var3, press_comp;
- var1 = (data->t_fine >> 1) - 64000;
+ var1 = (t_fine >> 1) - 64000;
var2 = ((((var1 >> 2) * (var1 >> 2)) >> 11) * calib->par_p6) >> 2;
var2 = var2 + (var1 * calib->par_p5 << 1);
var2 = (var2 >> 2) + ((s32)calib->par_p4 << 16);
@@ -394,6 +366,30 @@ static u32 bme680_compensate_press(struct bme680_data *data,
return press_comp;
}
+static int bme680_read_humid_adc(struct bme680_data *data, u32 *adc_humidity)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ u32 value_humidity;
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, BME680_REG_HUMIDITY_MSB,
+ &data->be16, BME680_HUMID_NUM_BYTES);
+ if (ret < 0) {
+ dev_err(dev, "failed to read humidity\n");
+ return ret;
+ }
+
+ value_humidity = be16_to_cpu(data->be16);
+ if (value_humidity == BME680_MEAS_SKIPPED) {
+ /* reading was skipped */
+ dev_err(dev, "reading humidity skipped\n");
+ return -EINVAL;
+ }
+ *adc_humidity = value_humidity;
+
+ return 0;
+}
+
/*
* Taken from Bosch BME680 API:
* https://github.com/BoschSensortec/BME680_driver/blob/63bb5336/bme680.c#L937
@@ -402,15 +398,15 @@ static u32 bme680_compensate_press(struct bme680_data *data,
* value of "43215" represents 43.215 %rH.
*/
static u32 bme680_compensate_humid(struct bme680_data *data,
- u16 adc_humid)
+ u16 adc_humid, s32 t_fine)
{
struct bme680_calib *calib = &data->bme680;
s32 var1, var2, var3, var4, var5, var6, temp_scaled, calc_hum;
- temp_scaled = (data->t_fine * 5 + 128) >> 8;
- var1 = (adc_humid - ((s32) ((s32) calib->par_h1 * 16))) -
- (((temp_scaled * (s32) calib->par_h3) / 100) >> 1);
- var2 = ((s32) calib->par_h2 *
+ temp_scaled = (t_fine * 5 + 128) >> 8;
+ var1 = (adc_humid - (((s32)calib->par_h1 * 16))) -
+ (((temp_scaled * calib->par_h3) / 100) >> 1);
+ var2 = (calib->par_h2 *
(((temp_scaled * calib->par_h4) / 100) +
(((temp_scaled * ((temp_scaled * calib->par_h5) / 100))
>> 6) / 100) + (1 << 14))) >> 10;
@@ -442,7 +438,7 @@ static u32 bme680_compensate_gas(struct bme680_data *data, u16 gas_res_adc,
u32 calc_gas_res;
/* Look up table for the possible gas range values */
- const u32 lookupTable[16] = {2147483647u, 2147483647u,
+ static const u32 lookupTable[16] = {2147483647u, 2147483647u,
2147483647u, 2147483647u, 2147483647u,
2126008810u, 2147483647u, 2130303777u,
2147483647u, 2147483647u, 2143188679u,
@@ -540,7 +536,6 @@ static u8 bme680_oversampling_to_reg(u8 val)
static int bme680_wait_for_eoc(struct bme680_data *data)
{
struct device *dev = regmap_get_device(data->regmap);
- unsigned int check;
int ret;
/*
* (Sum of oversampling ratios * time per oversampling) +
@@ -553,16 +548,16 @@ static int bme680_wait_for_eoc(struct bme680_data *data)
usleep_range(wait_eoc_us, wait_eoc_us + 100);
- ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
+ ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &data->check);
if (ret) {
dev_err(dev, "failed to read measurement status register.\n");
return ret;
}
- if (check & BME680_MEAS_BIT) {
+ if (data->check & BME680_MEAS_BIT) {
dev_err(dev, "Device measurement cycle incomplete.\n");
return -EBUSY;
}
- if (!(check & BME680_NEW_DATA_BIT)) {
+ if (!(data->check & BME680_NEW_DATA_BIT)) {
dev_err(dev, "No new data available from the device.\n");
return -ENODATA;
}
@@ -606,10 +601,12 @@ static int bme680_chip_config(struct bme680_data *data)
ret = regmap_write_bits(data->regmap, BME680_REG_CTRL_MEAS,
BME680_OSRS_TEMP_MASK | BME680_OSRS_PRESS_MASK,
osrs);
- if (ret < 0)
+ if (ret < 0) {
dev_err(dev, "failed to write ctrl_meas register\n");
+ return ret;
+ }
- return ret;
+ return 0;
}
static int bme680_gas_config(struct bme680_data *data)
@@ -618,6 +615,11 @@ static int bme680_gas_config(struct bme680_data *data)
int ret;
u8 heatr_res, heatr_dur;
+ /* Go to sleep */
+ ret = bme680_set_mode(data, false);
+ if (ret < 0)
+ return ret;
+
heatr_res = bme680_calc_heater_res(data, data->heater_temp);
/* set target heater temperature */
@@ -649,77 +651,35 @@ static int bme680_gas_config(struct bme680_data *data)
static int bme680_read_temp(struct bme680_data *data, int *val)
{
- struct device *dev = regmap_get_device(data->regmap);
int ret;
- __be32 tmp = 0;
- s32 adc_temp;
+ u32 adc_temp;
s16 comp_temp;
- /* set forced mode to trigger measurement */
- ret = bme680_set_mode(data, true);
- if (ret < 0)
- return ret;
-
- ret = bme680_wait_for_eoc(data);
+ ret = bme680_read_temp_adc(data, &adc_temp);
if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB,
- &tmp, 3);
- if (ret < 0) {
- dev_err(dev, "failed to read temperature\n");
- return ret;
- }
-
- adc_temp = be32_to_cpu(tmp) >> 12;
- if (adc_temp == BME680_MEAS_SKIPPED) {
- /* reading was skipped */
- dev_err(dev, "reading temperature skipped\n");
- return -EINVAL;
- }
comp_temp = bme680_compensate_temp(data, adc_temp);
- /*
- * val might be NULL if we're called by the read_press/read_humid
- * routine which is called to get t_fine value used in
- * compensate_press/compensate_humid to get compensated
- * pressure/humidity readings.
- */
- if (val) {
- *val = comp_temp * 10; /* Centidegrees to millidegrees */
- return IIO_VAL_INT;
- }
-
- return ret;
+ *val = comp_temp * 10; /* Centidegrees to millidegrees */
+ return IIO_VAL_INT;
}
static int bme680_read_press(struct bme680_data *data,
int *val, int *val2)
{
- struct device *dev = regmap_get_device(data->regmap);
int ret;
- __be32 tmp = 0;
- s32 adc_press;
+ u32 adc_press;
+ s32 t_fine;
- /* Read and compensate temperature to get a reading of t_fine */
- ret = bme680_read_temp(data, NULL);
- if (ret < 0)
+ ret = bme680_get_t_fine(data, &t_fine);
+ if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BME680_REG_PRESS_MSB,
- &tmp, 3);
- if (ret < 0) {
- dev_err(dev, "failed to read pressure\n");
+ ret = bme680_read_press_adc(data, &adc_press);
+ if (ret)
return ret;
- }
-
- adc_press = be32_to_cpu(tmp) >> 12;
- if (adc_press == BME680_MEAS_SKIPPED) {
- /* reading was skipped */
- dev_err(dev, "reading pressure skipped\n");
- return -EINVAL;
- }
- *val = bme680_compensate_press(data, adc_press);
+ *val = bme680_compensate_press(data, adc_press, t_fine);
*val2 = 1000;
return IIO_VAL_FRACTIONAL;
}
@@ -727,31 +687,19 @@ static int bme680_read_press(struct bme680_data *data,
static int bme680_read_humid(struct bme680_data *data,
int *val, int *val2)
{
- struct device *dev = regmap_get_device(data->regmap);
int ret;
- __be16 tmp = 0;
- s32 adc_humidity;
- u32 comp_humidity;
+ u32 adc_humidity, comp_humidity;
+ s32 t_fine;
- /* Read and compensate temperature to get a reading of t_fine */
- ret = bme680_read_temp(data, NULL);
- if (ret < 0)
+ ret = bme680_get_t_fine(data, &t_fine);
+ if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BM6880_REG_HUMIDITY_MSB,
- &tmp, sizeof(tmp));
- if (ret < 0) {
- dev_err(dev, "failed to read humidity\n");
+ ret = bme680_read_humid_adc(data, &adc_humidity);
+ if (ret)
return ret;
- }
- adc_humidity = be16_to_cpu(tmp);
- if (adc_humidity == BME680_MEAS_SKIPPED) {
- /* reading was skipped */
- dev_err(dev, "reading humidity skipped\n");
- return -EINVAL;
- }
- comp_humidity = bme680_compensate_humid(data, adc_humidity);
+ comp_humidity = bme680_compensate_humid(data, adc_humidity, t_fine);
*val = comp_humidity;
*val2 = 1000;
@@ -763,59 +711,37 @@ static int bme680_read_gas(struct bme680_data *data,
{
struct device *dev = regmap_get_device(data->regmap);
int ret;
- __be16 tmp = 0;
- unsigned int check;
- u16 adc_gas_res;
+ u16 adc_gas_res, gas_regs_val;
u8 gas_range;
- /* Set heater settings */
- ret = bme680_gas_config(data);
- if (ret < 0) {
- dev_err(dev, "failed to set gas config\n");
- return ret;
- }
-
- /* set forced mode to trigger measurement */
- ret = bme680_set_mode(data, true);
- if (ret < 0)
- return ret;
-
- ret = bme680_wait_for_eoc(data);
- if (ret)
- return ret;
-
- ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
- if (check & BME680_GAS_MEAS_BIT) {
+ ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &data->check);
+ if (data->check & BME680_GAS_MEAS_BIT) {
dev_err(dev, "gas measurement incomplete\n");
return -EBUSY;
}
- ret = regmap_read(data->regmap, BME680_REG_GAS_R_LSB, &check);
+ ret = regmap_bulk_read(data->regmap, BME680_REG_GAS_MSB,
+ &data->be16, BME680_GAS_NUM_BYTES);
if (ret < 0) {
- dev_err(dev, "failed to read gas_r_lsb register\n");
+ dev_err(dev, "failed to read gas resistance\n");
return ret;
}
+ gas_regs_val = be16_to_cpu(data->be16);
+ adc_gas_res = FIELD_GET(BME680_ADC_GAS_RES, gas_regs_val);
+
/*
* occurs if either the gas heating duration was insuffient
* to reach the target heater temperature or the target
* heater temperature was too high for the heater sink to
* reach.
*/
- if ((check & BME680_GAS_STAB_BIT) == 0) {
+ if ((gas_regs_val & BME680_GAS_STAB_BIT) == 0) {
dev_err(dev, "heater failed to reach the target temperature\n");
return -EINVAL;
}
- ret = regmap_bulk_read(data->regmap, BME680_REG_GAS_MSB,
- &tmp, sizeof(tmp));
- if (ret < 0) {
- dev_err(dev, "failed to read gas resistance\n");
- return ret;
- }
-
- gas_range = check & BME680_GAS_RANGE_MASK;
- adc_gas_res = be16_to_cpu(tmp) >> BME680_ADC_GAS_RES_SHIFT;
+ gas_range = FIELD_GET(BME680_GAS_RANGE_MASK, gas_regs_val);
*val = bme680_compensate_gas(data, adc_gas_res, gas_range);
return IIO_VAL_INT;
@@ -826,6 +752,18 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct bme680_data *data = iio_priv(indio_dev);
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ /* set forced mode to trigger measurement */
+ ret = bme680_set_mode(data, true);
+ if (ret < 0)
+ return ret;
+
+ ret = bme680_wait_for_eoc(data);
+ if (ret)
+ return ret;
switch (mask) {
case IIO_CHAN_INFO_PROCESSED:
@@ -871,6 +809,8 @@ static int bme680_write_raw(struct iio_dev *indio_dev,
{
struct bme680_data *data = iio_priv(indio_dev);
+ guard(mutex)(&data->lock);
+
if (val2 != 0)
return -EINVAL;
@@ -921,52 +861,19 @@ static const struct iio_info bme680_info = {
.attrs = &bme680_attribute_group,
};
-static const char *bme680_match_acpi_device(struct device *dev)
-{
- const struct acpi_device_id *id;
-
- id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!id)
- return NULL;
-
- return dev_name(dev);
-}
-
int bme680_core_probe(struct device *dev, struct regmap *regmap,
const char *name)
{
struct iio_dev *indio_dev;
struct bme680_data *data;
- unsigned int val;
int ret;
- ret = regmap_write(regmap, BME680_REG_SOFT_RESET,
- BME680_CMD_SOFTRESET);
- if (ret < 0) {
- dev_err(dev, "Failed to reset chip\n");
- return ret;
- }
-
- ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val);
- if (ret < 0) {
- dev_err(dev, "Error reading chip ID\n");
- return ret;
- }
-
- if (val != BME680_CHIP_ID_VAL) {
- dev_err(dev, "Wrong chip ID, got %x expected %x\n",
- val, BME680_CHIP_ID_VAL);
- return -ENODEV;
- }
-
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
- if (!name && ACPI_HANDLE(dev))
- name = bme680_match_acpi_device(dev);
-
data = iio_priv(indio_dev);
+ mutex_init(&data->lock);
dev_set_drvdata(dev, indio_dev);
data->regmap = regmap;
indio_dev->name = name;
@@ -982,25 +889,39 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
data->heater_temp = 320; /* degree Celsius */
data->heater_dur = 150; /* milliseconds */
- ret = bme680_chip_config(data);
- if (ret < 0) {
- dev_err(dev, "failed to set chip_config data\n");
- return ret;
- }
+ ret = regmap_write(regmap, BME680_REG_SOFT_RESET,
+ BME680_CMD_SOFTRESET);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to reset chip\n");
- ret = bme680_gas_config(data);
- if (ret < 0) {
- dev_err(dev, "failed to set gas config data\n");
- return ret;
+ usleep_range(BME680_STARTUP_TIME_US, BME680_STARTUP_TIME_US + 1000);
+
+ ret = regmap_read(regmap, BME680_REG_CHIP_ID, &data->check);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Error reading chip ID\n");
+
+ if (data->check != BME680_CHIP_ID_VAL) {
+ dev_err(dev, "Wrong chip ID, got %x expected %x\n",
+ data->check, BME680_CHIP_ID_VAL);
+ return -ENODEV;
}
ret = bme680_read_calib(data, &data->bme680);
if (ret < 0) {
- dev_err(dev,
+ return dev_err_probe(dev, ret,
"failed to read calibration coefficients at probe\n");
- return ret;
}
+ ret = bme680_chip_config(data);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to set chip_config data\n");
+
+ ret = bme680_gas_config(data);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to set gas config data\n");
+
return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_NS_GPL(bme680_core_probe, IIO_BME680);
diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c
index 4404d42ae5ec..7c54bd17d4b0 100644
--- a/drivers/iio/chemical/bme680_spi.c
+++ b/drivers/iio/chemical/bme680_spi.c
@@ -100,7 +100,7 @@ static int bme680_regmap_spi_read(void *context, const void *reg,
return spi_write_then_read(spi, &addr, 1, val, val_size);
}
-static struct regmap_bus bme680_regmap_bus = {
+static const struct regmap_bus bme680_regmap_bus = {
.write = bme680_regmap_spi_write,
.read = bme680_regmap_spi_read,
.reg_format_endian_default = REGMAP_ENDIAN_BIG,
diff --git a/drivers/iio/chemical/sgp40.c b/drivers/iio/chemical/sgp40.c
index 7f0de14a1956..07d8ab830211 100644
--- a/drivers/iio/chemical/sgp40.c
+++ b/drivers/iio/chemical/sgp40.c
@@ -14,11 +14,16 @@
* 1) read raw logarithmic resistance value from sensor
* --> useful to pass it to the algorithm of the sensor vendor for
* measuring deteriorations and improvements of air quality.
+ * It can be read from the attribute in_resistance_raw.
*
- * 2) calculate an estimated absolute voc index (0 - 500 index points) for
- * measuring the air quality.
+ * 2) calculate an estimated absolute voc index (in_concentration_input)
+ * with 0 - 500 index points) for measuring the air quality.
* For this purpose the value of the resistance for which the voc index
- * will be 250 can be set up using calibbias.
+ * will be 250 can be set up using in_resistance_calibbias (default 30000).
+ *
+ * The voc index is calculated as:
+ * x = (in_resistance_raw - in_resistance_calibbias) * 0.65
+ * in_concentration_input = 500 / (1 + e^x)
*
* Compensation values of relative humidity and temperature can be set up
* by writing to the out values of temp and humidityrelative.
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index 6bfe5d6847e7..9fc71a73caa1 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -198,9 +198,7 @@ int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
return 0;
out = (s16 *)st->samples;
- for_each_set_bit(i,
- indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
*out = data[i];
out++;
}
@@ -587,7 +585,7 @@ static int cros_ec_sensors_read_data_unsafe(struct iio_dev *indio_dev,
int ret;
/* Read all sensors enabled in scan_mask. Each value is 2 bytes. */
- for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+ for_each_set_bit(i, &scan_mask, iio_get_masklength(indio_dev)) {
ret = cros_ec_sensors_cmd_read_u16(ec,
cros_ec_sensors_idx_to_reg(st, i),
data);
@@ -683,7 +681,7 @@ int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev,
return ret;
}
- for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+ for_each_set_bit(i, &scan_mask, iio_get_masklength(indio_dev)) {
*data = st->resp->data.data[i];
data++;
}
diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c
index 7190eaede7fb..ed15dcbf4cf6 100644
--- a/drivers/iio/common/scmi_sensors/scmi_iio.c
+++ b/drivers/iio/common/scmi_sensors/scmi_iio.c
@@ -158,7 +158,7 @@ static int scmi_iio_set_odr_val(struct iio_dev *iio_dev, int val, int val2)
* To calculate the multiplier,we convert the sf into char string and
* count the number of characters
*/
- sf = (u64)uHz * 0xFFFF;
+ sf = uHz * 0xFFFF;
do_div(sf, MICROHZ_PER_HZ);
mult = scnprintf(buf, sizeof(buf), "%llu", sf) - 1;
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index a2596c2d3de3..1cfd7e2a622f 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -371,6 +371,17 @@ config LTC2632
To compile this driver as a module, choose M here: the
module will be called ltc2632.
+config LTC2664
+ tristate "Analog Devices LTC2664 and LTC2672 DAC SPI driver"
+ depends on SPI
+ select REGMAP
+ help
+ Say yes here to build support for Analog Devices
+ LTC2664 and LTC2672 converters (DAC).
+
+ To compile this driver as a module, choose M here: the
+ module will be called ltc2664.
+
config M62332
tristate "Mitsubishi M62332 DAC driver"
depends on I2C
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 8432a81a19dc..2cf148f16306 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_DS4424) += ds4424.o
obj-$(CONFIG_LPC18XX_DAC) += lpc18xx_dac.o
obj-$(CONFIG_LTC1660) += ltc1660.o
obj-$(CONFIG_LTC2632) += ltc2632.o
+obj-$(CONFIG_LTC2664) += ltc2664.o
obj-$(CONFIG_LTC2688) += ltc2688.o
obj-$(CONFIG_M62332) += m62332.o
obj-$(CONFIG_MAX517) += max517.o
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index 4572d6f49275..953fcfa2110b 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -20,8 +20,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include <linux/platform_data/ad5449.h>
-
#define AD5449_MAX_CHANNELS 2
#define AD5449_MAX_VREFS 2
@@ -268,7 +266,6 @@ static const char *ad5449_vref_name(struct ad5449 *st, int n)
static int ad5449_spi_probe(struct spi_device *spi)
{
- struct ad5449_platform_data *pdata = spi->dev.platform_data;
const struct spi_device_id *id = spi_get_device_id(spi);
struct iio_dev *indio_dev;
struct ad5449 *st;
@@ -306,16 +303,8 @@ static int ad5449_spi_probe(struct spi_device *spi)
mutex_init(&st->lock);
if (st->chip_info->has_ctrl) {
- unsigned int ctrl = 0x00;
- if (pdata) {
- if (pdata->hardware_clear_to_midscale)
- ctrl |= AD5449_CTRL_HCLR_TO_MIDSCALE;
- ctrl |= pdata->sdo_mode << AD5449_CTRL_SDO_OFFSET;
- st->has_sdo = pdata->sdo_mode != AD5449_SDO_DISABLED;
- } else {
- st->has_sdo = true;
- }
- ad5449_write(indio_dev, AD5449_CMD_CTRL, ctrl);
+ st->has_sdo = true;
+ ad5449_write(indio_dev, AD5449_CMD_CTRL, 0x0);
}
ret = iio_device_register(indio_dev);
diff --git a/drivers/iio/dac/ad9739a.c b/drivers/iio/dac/ad9739a.c
index f56eabe53723..615d1a196db3 100644
--- a/drivers/iio/dac/ad9739a.c
+++ b/drivers/iio/dac/ad9739a.c
@@ -145,7 +145,7 @@ static int ad9739a_buffer_postdisable(struct iio_dev *indio_dev)
struct ad9739a_state *st = iio_priv(indio_dev);
return iio_backend_data_source_set(st->back, 0,
- IIO_BACKEND_INTERNAL_CONTINUOS_WAVE);
+ IIO_BACKEND_INTERNAL_CONTINUOUS_WAVE);
}
static bool ad9739a_reg_accessible(struct device *dev, unsigned int reg)
@@ -413,8 +413,7 @@ static int ad9739a_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = iio_backend_extend_chan_spec(indio_dev, st->back,
- &ad9739a_channels[0]);
+ ret = iio_backend_extend_chan_spec(st->back, &ad9739a_channels[0]);
if (ret)
return ret;
@@ -432,7 +431,13 @@ static int ad9739a_probe(struct spi_device *spi)
indio_dev->num_channels = ARRAY_SIZE(ad9739a_channels);
indio_dev->setup_ops = &ad9739a_buffer_setup_ops;
- return devm_iio_device_register(&spi->dev, indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ iio_backend_debugfs_add(st->back, indio_dev);
+
+ return 0;
}
static const struct of_device_id ad9739a_of_match[] = {
diff --git a/drivers/iio/dac/adi-axi-dac.c b/drivers/iio/dac/adi-axi-dac.c
index 6d56428e623d..0cb00f3bec04 100644
--- a/drivers/iio/dac/adi-axi-dac.c
+++ b/drivers/iio/dac/adi-axi-dac.c
@@ -452,7 +452,7 @@ static int axi_dac_data_source_set(struct iio_backend *back, unsigned int chan,
struct axi_dac_state *st = iio_backend_get_priv(back);
switch (data) {
- case IIO_BACKEND_INTERNAL_CONTINUOS_WAVE:
+ case IIO_BACKEND_INTERNAL_CONTINUOUS_WAVE:
return regmap_update_bits(st->regmap,
AXI_DAC_REG_CHAN_CNTRL_7(chan),
AXI_DAC_DATA_SEL,
@@ -507,7 +507,18 @@ static int axi_dac_set_sample_rate(struct iio_backend *back, unsigned int chan,
return 0;
}
-static const struct iio_backend_ops axi_dac_generic = {
+static int axi_dac_reg_access(struct iio_backend *back, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct axi_dac_state *st = iio_backend_get_priv(back);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+
+ return regmap_write(st->regmap, reg, writeval);
+}
+
+static const struct iio_backend_ops axi_dac_generic_ops = {
.enable = axi_dac_enable,
.disable = axi_dac_disable,
.request_buffer = axi_dac_request_buffer,
@@ -517,6 +528,12 @@ static const struct iio_backend_ops axi_dac_generic = {
.ext_info_get = axi_dac_ext_info_get,
.data_source_set = axi_dac_data_source_set,
.set_sample_rate = axi_dac_set_sample_rate,
+ .debugfs_reg_access = iio_backend_debugfs_ptr(axi_dac_reg_access),
+};
+
+static const struct iio_backend_info axi_dac_generic = {
+ .name = "axi-dac",
+ .ops = &axi_dac_generic_ops,
};
static const struct regmap_config axi_dac_regmap_config = {
diff --git a/drivers/iio/dac/ltc2664.c b/drivers/iio/dac/ltc2664.c
new file mode 100644
index 000000000000..5be5345ac5c8
--- /dev/null
+++ b/drivers/iio/dac/ltc2664.c
@@ -0,0 +1,735 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LTC2664 4 channel, 12-/16-Bit Voltage Output SoftSpan DAC driver
+ * LTC2672 5 channel, 12-/16-Bit Current Output Softspan DAC driver
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#define LTC2664_CMD_WRITE_N(n) (0x00 + (n))
+#define LTC2664_CMD_UPDATE_N(n) (0x10 + (n))
+#define LTC2664_CMD_WRITE_N_UPDATE_ALL 0x20
+#define LTC2664_CMD_WRITE_N_UPDATE_N(n) (0x30 + (n))
+#define LTC2664_CMD_POWER_DOWN_N(n) (0x40 + (n))
+#define LTC2664_CMD_POWER_DOWN_ALL 0x50
+#define LTC2664_CMD_SPAN_N(n) (0x60 + (n))
+#define LTC2664_CMD_CONFIG 0x70
+#define LTC2664_CMD_MUX 0xB0
+#define LTC2664_CMD_TOGGLE_SEL 0xC0
+#define LTC2664_CMD_GLOBAL_TOGGLE 0xD0
+#define LTC2664_CMD_NO_OPERATION 0xF0
+#define LTC2664_REF_DISABLE 0x0001
+#define LTC2664_MSPAN_SOFTSPAN 7
+
+#define LTC2672_MAX_CHANNEL 5
+#define LTC2672_MAX_SPAN 7
+#define LTC2672_SCALE_MULTIPLIER(n) (50 * BIT(n))
+
+enum {
+ LTC2664_SPAN_RANGE_0V_5V,
+ LTC2664_SPAN_RANGE_0V_10V,
+ LTC2664_SPAN_RANGE_M5V_5V,
+ LTC2664_SPAN_RANGE_M10V_10V,
+ LTC2664_SPAN_RANGE_M2V5_2V5,
+};
+
+enum {
+ LTC2664_INPUT_A,
+ LTC2664_INPUT_B,
+ LTC2664_INPUT_B_AVAIL,
+ LTC2664_POWERDOWN,
+ LTC2664_POWERDOWN_MODE,
+ LTC2664_TOGGLE_EN,
+ LTC2664_GLOBAL_TOGGLE,
+};
+
+static const u16 ltc2664_mspan_lut[8][2] = {
+ { LTC2664_SPAN_RANGE_M10V_10V, 32768 }, /* MPS2=0, MPS1=0, MSP0=0 (0)*/
+ { LTC2664_SPAN_RANGE_M5V_5V, 32768 }, /* MPS2=0, MPS1=0, MSP0=1 (1)*/
+ { LTC2664_SPAN_RANGE_M2V5_2V5, 32768 }, /* MPS2=0, MPS1=1, MSP0=0 (2)*/
+ { LTC2664_SPAN_RANGE_0V_10V, 0 }, /* MPS2=0, MPS1=1, MSP0=1 (3)*/
+ { LTC2664_SPAN_RANGE_0V_10V, 32768 }, /* MPS2=1, MPS1=0, MSP0=0 (4)*/
+ { LTC2664_SPAN_RANGE_0V_5V, 0 }, /* MPS2=1, MPS1=0, MSP0=1 (5)*/
+ { LTC2664_SPAN_RANGE_0V_5V, 32768 }, /* MPS2=1, MPS1=1, MSP0=0 (6)*/
+ { LTC2664_SPAN_RANGE_0V_5V, 0 } /* MPS2=1, MPS1=1, MSP0=1 (7)*/
+};
+
+struct ltc2664_state;
+
+struct ltc2664_chip_info {
+ const char *name;
+ int (*scale_get)(const struct ltc2664_state *st, int c);
+ int (*offset_get)(const struct ltc2664_state *st, int c);
+ int measurement_type;
+ unsigned int num_channels;
+ const int (*span_helper)[2];
+ unsigned int num_span;
+ unsigned int internal_vref_mv;
+ bool manual_span_support;
+ bool rfsadj_support;
+};
+
+struct ltc2664_chan {
+ /* indicates if the channel should be toggled */
+ bool toggle_chan;
+ /* indicates if the channel is in powered down state */
+ bool powerdown;
+ /* span code of the channel */
+ u8 span;
+ /* raw data of the current state of the chip registers (A/B) */
+ u16 raw[2];
+};
+
+struct ltc2664_state {
+ struct spi_device *spi;
+ struct regmap *regmap;
+ struct ltc2664_chan channels[LTC2672_MAX_CHANNEL];
+ /* lock to protect against multiple access to the device and shared data */
+ struct mutex lock;
+ const struct ltc2664_chip_info *chip_info;
+ struct iio_chan_spec *iio_channels;
+ int vref_mv;
+ u32 rfsadj_ohms;
+ u32 toggle_sel;
+ bool global_toggle;
+};
+
+static const int ltc2664_span_helper[][2] = {
+ { 0, 5000 },
+ { 0, 10000 },
+ { -5000, 5000 },
+ { -10000, 10000 },
+ { -2500, 2500 },
+};
+
+static const int ltc2672_span_helper[][2] = {
+ { 0, 0 },
+ { 0, 3125 },
+ { 0, 6250 },
+ { 0, 12500 },
+ { 0, 25000 },
+ { 0, 50000 },
+ { 0, 100000 },
+ { 0, 200000 },
+ { 0, 300000 },
+};
+
+static int ltc2664_scale_get(const struct ltc2664_state *st, int c)
+{
+ const struct ltc2664_chan *chan = &st->channels[c];
+ const int (*span_helper)[2] = st->chip_info->span_helper;
+ int span, fs;
+
+ span = chan->span;
+ if (span < 0)
+ return span;
+
+ fs = span_helper[span][1] - span_helper[span][0];
+
+ return fs * st->vref_mv / 2500;
+}
+
+static int ltc2672_scale_get(const struct ltc2664_state *st, int c)
+{
+ const struct ltc2664_chan *chan = &st->channels[c];
+ int span, fs;
+
+ span = chan->span - 1;
+ if (span < 0)
+ return span;
+
+ fs = 1000 * st->vref_mv;
+
+ if (span == LTC2672_MAX_SPAN)
+ return mul_u64_u32_div(4800, fs, st->rfsadj_ohms);
+
+ return mul_u64_u32_div(LTC2672_SCALE_MULTIPLIER(span), fs, st->rfsadj_ohms);
+}
+
+static int ltc2664_offset_get(const struct ltc2664_state *st, int c)
+{
+ const struct ltc2664_chan *chan = &st->channels[c];
+ int span;
+
+ span = chan->span;
+ if (span < 0)
+ return span;
+
+ if (st->chip_info->span_helper[span][0] < 0)
+ return -32768;
+
+ return 0;
+}
+
+static int ltc2664_dac_code_write(struct ltc2664_state *st, u32 chan, u32 input,
+ u16 code)
+{
+ struct ltc2664_chan *c = &st->channels[chan];
+ int ret, reg;
+
+ guard(mutex)(&st->lock);
+ /* select the correct input register to write to */
+ if (c->toggle_chan) {
+ ret = regmap_write(st->regmap, LTC2664_CMD_TOGGLE_SEL,
+ input << chan);
+ if (ret)
+ return ret;
+ }
+ /*
+ * If in toggle mode the dac should be updated by an
+ * external signal (or sw toggle) and not here.
+ */
+ if (st->toggle_sel & BIT(chan))
+ reg = LTC2664_CMD_WRITE_N(chan);
+ else
+ reg = LTC2664_CMD_WRITE_N_UPDATE_N(chan);
+
+ ret = regmap_write(st->regmap, reg, code);
+ if (ret)
+ return ret;
+
+ c->raw[input] = code;
+
+ if (c->toggle_chan) {
+ ret = regmap_write(st->regmap, LTC2664_CMD_TOGGLE_SEL,
+ st->toggle_sel);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ltc2664_dac_code_read(struct ltc2664_state *st, u32 chan, u32 input,
+ u32 *code)
+{
+ guard(mutex)(&st->lock);
+ *code = st->channels[chan].raw[input];
+}
+
+static const int ltc2664_raw_range[] = { 0, 1, U16_MAX };
+
+static int ltc2664_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ *vals = ltc2664_raw_range;
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_RANGE;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltc2664_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long info)
+{
+ struct ltc2664_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ ltc2664_dac_code_read(st, chan->channel, LTC2664_INPUT_A, val);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = st->chip_info->offset_get(st, chan->channel);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->chip_info->scale_get(st, chan->channel);
+
+ *val2 = 16;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltc2664_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long info)
+{
+ struct ltc2664_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ if (val > U16_MAX || val < 0)
+ return -EINVAL;
+
+ return ltc2664_dac_code_write(st, chan->channel,
+ LTC2664_INPUT_A, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t ltc2664_reg_bool_get(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ltc2664_state *st = iio_priv(indio_dev);
+ u32 val;
+
+ guard(mutex)(&st->lock);
+ switch (private) {
+ case LTC2664_POWERDOWN:
+ val = st->channels[chan->channel].powerdown;
+
+ return sysfs_emit(buf, "%u\n", val);
+ case LTC2664_POWERDOWN_MODE:
+ return sysfs_emit(buf, "42kohm_to_gnd\n");
+ case LTC2664_TOGGLE_EN:
+ val = !!(st->toggle_sel & BIT(chan->channel));
+
+ return sysfs_emit(buf, "%u\n", val);
+ case LTC2664_GLOBAL_TOGGLE:
+ val = st->global_toggle;
+
+ return sysfs_emit(buf, "%u\n", val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t ltc2664_reg_bool_set(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ltc2664_state *st = iio_priv(indio_dev);
+ int ret;
+ bool en;
+
+ ret = kstrtobool(buf, &en);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&st->lock);
+ switch (private) {
+ case LTC2664_POWERDOWN:
+ ret = regmap_write(st->regmap,
+ en ? LTC2664_CMD_POWER_DOWN_N(chan->channel) :
+ LTC2664_CMD_UPDATE_N(chan->channel), en);
+ if (ret)
+ return ret;
+
+ st->channels[chan->channel].powerdown = en;
+
+ return len;
+ case LTC2664_TOGGLE_EN:
+ if (en)
+ st->toggle_sel |= BIT(chan->channel);
+ else
+ st->toggle_sel &= ~BIT(chan->channel);
+
+ ret = regmap_write(st->regmap, LTC2664_CMD_TOGGLE_SEL,
+ st->toggle_sel);
+ if (ret)
+ return ret;
+
+ return len;
+ case LTC2664_GLOBAL_TOGGLE:
+ ret = regmap_write(st->regmap, LTC2664_CMD_GLOBAL_TOGGLE, en);
+ if (ret)
+ return ret;
+
+ st->global_toggle = en;
+
+ return len;
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t ltc2664_dac_input_read(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ltc2664_state *st = iio_priv(indio_dev);
+ u32 val;
+
+ if (private == LTC2664_INPUT_B_AVAIL)
+ return sysfs_emit(buf, "[%u %u %u]\n", ltc2664_raw_range[0],
+ ltc2664_raw_range[1],
+ ltc2664_raw_range[2] / 4);
+
+ ltc2664_dac_code_read(st, chan->channel, private, &val);
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t ltc2664_dac_input_write(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ltc2664_state *st = iio_priv(indio_dev);
+ int ret;
+ u16 val;
+
+ if (private == LTC2664_INPUT_B_AVAIL)
+ return -EINVAL;
+
+ ret = kstrtou16(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ ret = ltc2664_dac_code_write(st, chan->channel, private, val);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static int ltc2664_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ltc2664_state *st = iio_priv(indio_dev);
+
+ if (readval)
+ return -EOPNOTSUPP;
+
+ return regmap_write(st->regmap, reg, writeval);
+}
+
+#define LTC2664_CHAN_EXT_INFO(_name, _what, _shared, _read, _write) { \
+ .name = _name, \
+ .read = (_read), \
+ .write = (_write), \
+ .private = (_what), \
+ .shared = (_shared), \
+}
+
+/*
+ * For toggle mode we only expose the symbol attr (sw_toggle) in case a TGPx is
+ * not provided in dts.
+ */
+static const struct iio_chan_spec_ext_info ltc2664_toggle_sym_ext_info[] = {
+ LTC2664_CHAN_EXT_INFO("raw0", LTC2664_INPUT_A, IIO_SEPARATE,
+ ltc2664_dac_input_read, ltc2664_dac_input_write),
+ LTC2664_CHAN_EXT_INFO("raw1", LTC2664_INPUT_B, IIO_SEPARATE,
+ ltc2664_dac_input_read, ltc2664_dac_input_write),
+ LTC2664_CHAN_EXT_INFO("powerdown", LTC2664_POWERDOWN, IIO_SEPARATE,
+ ltc2664_reg_bool_get, ltc2664_reg_bool_set),
+ LTC2664_CHAN_EXT_INFO("powerdown_mode", LTC2664_POWERDOWN_MODE,
+ IIO_SEPARATE, ltc2664_reg_bool_get, NULL),
+ LTC2664_CHAN_EXT_INFO("symbol", LTC2664_GLOBAL_TOGGLE, IIO_SEPARATE,
+ ltc2664_reg_bool_get, ltc2664_reg_bool_set),
+ LTC2664_CHAN_EXT_INFO("toggle_en", LTC2664_TOGGLE_EN,
+ IIO_SEPARATE, ltc2664_reg_bool_get,
+ ltc2664_reg_bool_set),
+ { }
+};
+
+static const struct iio_chan_spec_ext_info ltc2664_ext_info[] = {
+ LTC2664_CHAN_EXT_INFO("powerdown", LTC2664_POWERDOWN, IIO_SEPARATE,
+ ltc2664_reg_bool_get, ltc2664_reg_bool_set),
+ LTC2664_CHAN_EXT_INFO("powerdown_mode", LTC2664_POWERDOWN_MODE,
+ IIO_SEPARATE, ltc2664_reg_bool_get, NULL),
+ { }
+};
+
+static const struct iio_chan_spec ltc2664_channel_template = {
+ .indexed = 1,
+ .output = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW),
+ .ext_info = ltc2664_ext_info,
+};
+
+static const struct ltc2664_chip_info ltc2664_chip = {
+ .name = "ltc2664",
+ .scale_get = ltc2664_scale_get,
+ .offset_get = ltc2664_offset_get,
+ .measurement_type = IIO_VOLTAGE,
+ .num_channels = 4,
+ .span_helper = ltc2664_span_helper,
+ .num_span = ARRAY_SIZE(ltc2664_span_helper),
+ .internal_vref_mv = 2500,
+ .manual_span_support = true,
+ .rfsadj_support = false,
+};
+
+static const struct ltc2664_chip_info ltc2672_chip = {
+ .name = "ltc2672",
+ .scale_get = ltc2672_scale_get,
+ .offset_get = ltc2664_offset_get,
+ .measurement_type = IIO_CURRENT,
+ .num_channels = 5,
+ .span_helper = ltc2672_span_helper,
+ .num_span = ARRAY_SIZE(ltc2672_span_helper),
+ .internal_vref_mv = 1250,
+ .manual_span_support = false,
+ .rfsadj_support = true,
+};
+
+static int ltc2664_set_span(const struct ltc2664_state *st, int min, int max,
+ int chan)
+{
+ const struct ltc2664_chip_info *chip_info = st->chip_info;
+ const int (*span_helper)[2] = chip_info->span_helper;
+ int span, ret;
+
+ for (span = 0; span < chip_info->num_span; span++) {
+ if (min == span_helper[span][0] && max == span_helper[span][1])
+ break;
+ }
+
+ if (span == chip_info->num_span)
+ return -EINVAL;
+
+ ret = regmap_write(st->regmap, LTC2664_CMD_SPAN_N(chan), span);
+ if (ret)
+ return ret;
+
+ return span;
+}
+
+static int ltc2664_channel_config(struct ltc2664_state *st)
+{
+ const struct ltc2664_chip_info *chip_info = st->chip_info;
+ struct device *dev = &st->spi->dev;
+ u32 reg, tmp[2], mspan;
+ int ret, span = 0;
+
+ mspan = LTC2664_MSPAN_SOFTSPAN;
+ ret = device_property_read_u32(dev, "adi,manual-span-operation-config",
+ &mspan);
+ if (!ret) {
+ if (!chip_info->manual_span_support)
+ return dev_err_probe(dev, -EINVAL,
+ "adi,manual-span-operation-config not supported\n");
+
+ if (mspan >= ARRAY_SIZE(ltc2664_mspan_lut))
+ return dev_err_probe(dev, -EINVAL,
+ "adi,manual-span-operation-config not in range\n");
+ }
+
+ st->rfsadj_ohms = 20000;
+ ret = device_property_read_u32(dev, "adi,rfsadj-ohms", &st->rfsadj_ohms);
+ if (!ret) {
+ if (!chip_info->rfsadj_support)
+ return dev_err_probe(dev, -EINVAL,
+ "adi,rfsadj-ohms not supported\n");
+
+ if (st->rfsadj_ohms < 19000 || st->rfsadj_ohms > 41000)
+ return dev_err_probe(dev, -EINVAL,
+ "adi,rfsadj-ohms not in range\n");
+ }
+
+ device_for_each_child_node_scoped(dev, child) {
+ struct ltc2664_chan *chan;
+
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get reg property\n");
+
+ if (reg >= chip_info->num_channels)
+ return dev_err_probe(dev, -EINVAL,
+ "reg bigger than: %d\n",
+ chip_info->num_channels);
+
+ chan = &st->channels[reg];
+
+ if (fwnode_property_read_bool(child, "adi,toggle-mode")) {
+ chan->toggle_chan = true;
+ /* assume sw toggle ABI */
+ st->iio_channels[reg].ext_info = ltc2664_toggle_sym_ext_info;
+
+ /*
+ * Clear IIO_CHAN_INFO_RAW bit as toggle channels expose
+ * out_voltage/current_raw{0|1} files.
+ */
+ __clear_bit(IIO_CHAN_INFO_RAW,
+ &st->iio_channels[reg].info_mask_separate);
+ }
+
+ chan->raw[0] = ltc2664_mspan_lut[mspan][1];
+ chan->raw[1] = ltc2664_mspan_lut[mspan][1];
+
+ chan->span = ltc2664_mspan_lut[mspan][0];
+
+ ret = fwnode_property_read_u32_array(child, "output-range-microvolt",
+ tmp, ARRAY_SIZE(tmp));
+ if (!ret && mspan == LTC2664_MSPAN_SOFTSPAN) {
+ chan->span = ltc2664_set_span(st, tmp[0] / 1000,
+ tmp[1] / 1000, reg);
+ if (span < 0)
+ return dev_err_probe(dev, span,
+ "Failed to set span\n");
+ }
+
+ ret = fwnode_property_read_u32_array(child, "output-range-microamp",
+ tmp, ARRAY_SIZE(tmp));
+ if (!ret) {
+ chan->span = ltc2664_set_span(st, 0, tmp[1] / 1000, reg);
+ if (span < 0)
+ return dev_err_probe(dev, span,
+ "Failed to set span\n");
+ }
+ }
+
+ return 0;
+}
+
+static int ltc2664_setup(struct ltc2664_state *st)
+{
+ const struct ltc2664_chip_info *chip_info = st->chip_info;
+ struct gpio_desc *gpio;
+ int ret, i;
+
+ /* If we have a clr/reset pin, use that to reset the chip. */
+ gpio = devm_gpiod_get_optional(&st->spi->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio))
+ return dev_err_probe(&st->spi->dev, PTR_ERR(gpio),
+ "Failed to get reset gpio");
+ if (gpio) {
+ fsleep(1000);
+ gpiod_set_value_cansleep(gpio, 0);
+ }
+
+ /*
+ * Duplicate the default channel configuration as it can change during
+ * @ltc2664_channel_config()
+ */
+ st->iio_channels = devm_kcalloc(&st->spi->dev,
+ chip_info->num_channels,
+ sizeof(struct iio_chan_spec),
+ GFP_KERNEL);
+ if (!st->iio_channels)
+ return -ENOMEM;
+
+ for (i = 0; i < chip_info->num_channels; i++) {
+ st->iio_channels[i] = ltc2664_channel_template;
+ st->iio_channels[i].type = chip_info->measurement_type;
+ st->iio_channels[i].channel = i;
+ }
+
+ ret = ltc2664_channel_config(st);
+ if (ret)
+ return ret;
+
+ return regmap_set_bits(st->regmap, LTC2664_CMD_CONFIG, LTC2664_REF_DISABLE);
+}
+
+static const struct regmap_config ltc2664_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = LTC2664_CMD_NO_OPERATION,
+};
+
+static const struct iio_info ltc2664_info = {
+ .write_raw = ltc2664_write_raw,
+ .read_raw = ltc2664_read_raw,
+ .read_avail = ltc2664_read_avail,
+ .debugfs_reg_access = ltc2664_reg_access,
+};
+
+static int ltc2664_probe(struct spi_device *spi)
+{
+ static const char * const regulators[] = { "vcc", "iovcc", "v-neg" };
+ const struct ltc2664_chip_info *chip_info;
+ struct device *dev = &spi->dev;
+ struct iio_dev *indio_dev;
+ struct ltc2664_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+
+ chip_info = spi_get_device_match_data(spi);
+ if (!chip_info)
+ return -ENODEV;
+
+ st->chip_info = chip_info;
+
+ mutex_init(&st->lock);
+
+ st->regmap = devm_regmap_init_spi(spi, &ltc2664_regmap_config);
+ if (IS_ERR(st->regmap))
+ return dev_err_probe(dev, PTR_ERR(st->regmap),
+ "Failed to init regmap");
+
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulators),
+ regulators);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable regulators\n");
+
+ ret = devm_regulator_get_enable_read_voltage(dev, "ref");
+ if (ret < 0 && ret != -ENODEV)
+ return ret;
+
+ st->vref_mv = ret > 0 ? ret / 1000 : chip_info->internal_vref_mv;
+
+ ret = ltc2664_setup(st);
+ if (ret)
+ return ret;
+
+ indio_dev->name = chip_info->name;
+ indio_dev->info = &ltc2664_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->iio_channels;
+ indio_dev->num_channels = chip_info->num_channels;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct spi_device_id ltc2664_id[] = {
+ { "ltc2664", (kernel_ulong_t)&ltc2664_chip },
+ { "ltc2672", (kernel_ulong_t)&ltc2672_chip },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ltc2664_id);
+
+static const struct of_device_id ltc2664_of_id[] = {
+ { .compatible = "adi,ltc2664", .data = &ltc2664_chip },
+ { .compatible = "adi,ltc2672", .data = &ltc2672_chip },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ltc2664_of_id);
+
+static struct spi_driver ltc2664_driver = {
+ .driver = {
+ .name = "ltc2664",
+ .of_match_table = ltc2664_of_id,
+ },
+ .probe = ltc2664_probe,
+ .id_table = ltc2664_id,
+};
+module_spi_driver(ltc2664_driver);
+
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_AUTHOR("Kim Seer Paller <kimseer.paller@analog.com>");
+MODULE_DESCRIPTION("Analog Devices LTC2664 and LTC2672 DAC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/dac/ltc2688.c b/drivers/iio/dac/ltc2688.c
index af50d2a95898..376dca163c91 100644
--- a/drivers/iio/dac/ltc2688.c
+++ b/drivers/iio/dac/ltc2688.c
@@ -918,7 +918,7 @@ static bool ltc2688_reg_writable(struct device *dev, unsigned int reg)
return false;
}
-static struct regmap_bus ltc2688_regmap_bus = {
+static const struct regmap_bus ltc2688_regmap_bus = {
.read = ltc2688_spi_read,
.write = ltc2688_spi_write,
.read_flag_mask = LTC2688_READ_OPERATION,
diff --git a/drivers/iio/dac/mcp4728.c b/drivers/iio/dac/mcp4728.c
index c449ca949465..192175dc6419 100644
--- a/drivers/iio/dac/mcp4728.c
+++ b/drivers/iio/dac/mcp4728.c
@@ -84,7 +84,6 @@ enum mcp4728_scale {
struct mcp4728_data {
struct i2c_client *client;
- struct regulator *vdd_reg;
bool powerdown;
int scales_avail[MCP4728_N_SCALES * 2];
struct mcp4728_channel_data chdata[MCP4728_N_CHANNELS];
@@ -415,15 +414,9 @@ static void mcp4728_init_scale_avail(enum mcp4728_scale scale, int vref_mv,
data->scales_avail[scale * 2 + 1] = value_micro;
}
-static int mcp4728_init_scales_avail(struct mcp4728_data *data)
+static int mcp4728_init_scales_avail(struct mcp4728_data *data, int vdd_mv)
{
- int ret;
-
- ret = regulator_get_voltage(data->vdd_reg);
- if (ret < 0)
- return ret;
-
- mcp4728_init_scale_avail(MCP4728_SCALE_VDD, ret / 1000, data);
+ mcp4728_init_scale_avail(MCP4728_SCALE_VDD, vdd_mv, data);
mcp4728_init_scale_avail(MCP4728_SCALE_VINT_NO_GAIN, 2048, data);
mcp4728_init_scale_avail(MCP4728_SCALE_VINT_GAIN_X2, 4096, data);
@@ -530,17 +523,12 @@ static int mcp4728_init_channels_data(struct mcp4728_data *data)
return 0;
}
-static void mcp4728_reg_disable(void *reg)
-{
- regulator_disable(reg);
-}
-
static int mcp4728_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct mcp4728_data *data;
struct iio_dev *indio_dev;
- int err;
+ int ret, vdd_mv;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
@@ -550,18 +538,11 @@ static int mcp4728_probe(struct i2c_client *client)
i2c_set_clientdata(client, indio_dev);
data->client = client;
- data->vdd_reg = devm_regulator_get(&client->dev, "vdd");
- if (IS_ERR(data->vdd_reg))
- return PTR_ERR(data->vdd_reg);
-
- err = regulator_enable(data->vdd_reg);
- if (err)
- return err;
+ ret = devm_regulator_get_enable_read_voltage(&client->dev, "vdd");
+ if (ret < 0)
+ return ret;
- err = devm_add_action_or_reset(&client->dev, mcp4728_reg_disable,
- data->vdd_reg);
- if (err)
- return err;
+ vdd_mv = ret / 1000;
/*
* MCP4728 has internal EEPROM that save each channel boot
@@ -569,15 +550,15 @@ static int mcp4728_probe(struct i2c_client *client)
* driver at kernel boot. mcp4728_init_channels_data() reads back DAC
* settings and stores them in data structure.
*/
- err = mcp4728_init_channels_data(data);
- if (err) {
- return dev_err_probe(&client->dev, err,
+ ret = mcp4728_init_channels_data(data);
+ if (ret) {
+ return dev_err_probe(&client->dev, ret,
"failed to read mcp4728 current configuration\n");
}
- err = mcp4728_init_scales_avail(data);
- if (err) {
- return dev_err_probe(&client->dev, err,
+ ret = mcp4728_init_scales_avail(data, vdd_mv);
+ if (ret) {
+ return dev_err_probe(&client->dev, ret,
"failed to init scales\n");
}
diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c
index da4327624d45..26aa99059813 100644
--- a/drivers/iio/dac/mcp4922.c
+++ b/drivers/iio/dac/mcp4922.c
@@ -30,7 +30,6 @@ struct mcp4922_state {
struct spi_device *spi;
unsigned int value[MCP4922_NUM_CHANNELS];
unsigned int vref_mv;
- struct regulator *vref_reg;
u8 mosi[2] __aligned(IIO_DMA_MINALIGN);
};
@@ -132,27 +131,13 @@ static int mcp4922_probe(struct spi_device *spi)
state = iio_priv(indio_dev);
state->spi = spi;
- state->vref_reg = devm_regulator_get(&spi->dev, "vref");
- if (IS_ERR(state->vref_reg))
- return dev_err_probe(&spi->dev, PTR_ERR(state->vref_reg),
- "Vref regulator not specified\n");
-
- ret = regulator_enable(state->vref_reg);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable vref regulator: %d\n",
- ret);
- return ret;
- }
- ret = regulator_get_voltage(state->vref_reg);
- if (ret < 0) {
- dev_err(&spi->dev, "Failed to read vref regulator: %d\n",
- ret);
- goto error_disable_reg;
- }
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vref");
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret, "Failed to get vref voltage\n");
+
state->vref_mv = ret / 1000;
- spi_set_drvdata(spi, indio_dev);
id = spi_get_device_id(spi);
indio_dev->info = &mcp4922_info;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -163,30 +148,13 @@ static int mcp4922_probe(struct spi_device *spi)
indio_dev->num_channels = MCP4922_NUM_CHANNELS;
indio_dev->name = id->name;
- ret = iio_device_register(indio_dev);
- if (ret) {
- dev_err(&spi->dev, "Failed to register iio device: %d\n",
- ret);
- goto error_disable_reg;
- }
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "Failed to register iio device\n");
return 0;
-
-error_disable_reg:
- regulator_disable(state->vref_reg);
-
- return ret;
}
-static void mcp4922_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct mcp4922_state *state;
-
- iio_device_unregister(indio_dev);
- state = iio_priv(indio_dev);
- regulator_disable(state->vref_reg);
-}
static const struct spi_device_id mcp4922_id[] = {
{"mcp4902", ID_MCP4902},
@@ -202,7 +170,6 @@ static struct spi_driver mcp4922_driver = {
.name = "mcp4922",
},
.probe = mcp4922_probe,
- .remove = mcp4922_remove,
.id_table = mcp4922_id,
};
module_spi_driver(mcp4922_driver);
diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c
index 7f89d2a52f49..6f4aa4794a0c 100644
--- a/drivers/iio/dac/ti-dac7311.c
+++ b/drivers/iio/dac/ti-dac7311.c
@@ -249,7 +249,9 @@ static int ti_dac_probe(struct spi_device *spi)
spi->mode = SPI_MODE_1;
spi->bits_per_word = 16;
- spi_setup(spi);
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "spi_setup failed\n");
indio_dev->info = &ti_dac_info;
indio_dev->name = spi_get_device_id(spi)->name;
diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c
index 9b2f99449a82..4ca3f1aaff99 100644
--- a/drivers/iio/dummy/iio_simple_dummy_buffer.c
+++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c
@@ -68,7 +68,7 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
* Here let's pretend we have random access. And the values are in the
* constant table fakedata.
*/
- for_each_set_bit(j, indio_dev->active_scan_mask, indio_dev->masklength)
+ iio_for_each_active_channel(indio_dev, j)
data[i++] = fakedata[j];
iio_push_to_buffers_with_timestamp(indio_dev, data,
diff --git a/drivers/iio/frequency/adf4377.c b/drivers/iio/frequency/adf4377.c
index 9284c13f1abb..25fbc2cef1f7 100644
--- a/drivers/iio/frequency/adf4377.c
+++ b/drivers/iio/frequency/adf4377.c
@@ -400,7 +400,13 @@ enum muxout_select_mode {
ADF4377_MUXOUT_HIGH = 0x8,
};
+struct adf4377_chip_info {
+ const char *name;
+ bool has_gpio_enclk2;
+};
+
struct adf4377_state {
+ const struct adf4377_chip_info *chip_info;
struct spi_device *spi;
struct regmap *regmap;
struct clk *clkin;
@@ -889,11 +895,13 @@ static int adf4377_properties_parse(struct adf4377_state *st)
return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_enclk1),
"failed to get the CE GPIO\n");
- st->gpio_enclk2 = devm_gpiod_get_optional(&st->spi->dev, "clk2-enable",
- GPIOD_OUT_LOW);
- if (IS_ERR(st->gpio_enclk2))
- return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_enclk2),
- "failed to get the CE GPIO\n");
+ if (st->chip_info->has_gpio_enclk2) {
+ st->gpio_enclk2 = devm_gpiod_get_optional(&st->spi->dev, "clk2-enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->gpio_enclk2))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_enclk2),
+ "failed to get the CE GPIO\n");
+ }
ret = device_property_match_property_string(&spi->dev, "adi,muxout-select",
adf4377_muxout_modes,
@@ -921,6 +929,16 @@ static int adf4377_freq_change(struct notifier_block *nb, unsigned long action,
return NOTIFY_OK;
}
+static const struct adf4377_chip_info adf4377_chip_info = {
+ .name = "adf4377",
+ .has_gpio_enclk2 = true,
+};
+
+static const struct adf4377_chip_info adf4378_chip_info = {
+ .name = "adf4378",
+ .has_gpio_enclk2 = false,
+};
+
static int adf4377_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -945,6 +963,7 @@ static int adf4377_probe(struct spi_device *spi)
st->regmap = regmap;
st->spi = spi;
+ st->chip_info = spi_get_device_match_data(spi);
mutex_init(&st->lock);
ret = adf4377_properties_parse(st);
@@ -964,13 +983,15 @@ static int adf4377_probe(struct spi_device *spi)
}
static const struct spi_device_id adf4377_id[] = {
- { "adf4377", 0 },
+ { "adf4377", (kernel_ulong_t)&adf4377_chip_info },
+ { "adf4378", (kernel_ulong_t)&adf4378_chip_info },
{}
};
MODULE_DEVICE_TABLE(spi, adf4377_id);
static const struct of_device_id adf4377_of_match[] = {
- { .compatible = "adi,adf4377" },
+ { .compatible = "adi,adf4377", .data = &adf4377_chip_info },
+ { .compatible = "adi,adf4378", .data = &adf4378_chip_info },
{}
};
MODULE_DEVICE_TABLE(of, adf4377_of_match);
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 52326dc521ac..85637e8ac45f 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -321,8 +321,7 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
if (ret)
goto err;
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = spi_write_then_read(afe->spi,
&afe4403_channel_values[bit], 1,
rx, sizeof(rx));
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 7f69baa1ed53..d49e1572a439 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -333,8 +333,7 @@ static irqreturn_t afe4404_trigger_handler(int irq, void *private)
struct afe4404_data *afe = iio_priv(indio_dev);
int ret, bit, i = 0;
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = regmap_read(afe->regmap, afe4404_channel_values[bit],
&afe->buffer[i++]);
if (ret)
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
index 07a343e35a81..1d074eb6a8c5 100644
--- a/drivers/iio/health/max30102.c
+++ b/drivers/iio/health/max30102.c
@@ -293,7 +293,7 @@ static irqreturn_t max30102_interrupt_handler(int irq, void *private)
struct iio_dev *indio_dev = private;
struct max30102_data *data = iio_priv(indio_dev);
unsigned int measurements = bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
int ret, cnt = 0;
mutex_lock(&data->lock);
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index b15b7a3b66d5..54f11f000b6f 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -25,6 +25,17 @@ config DHT11
Other sensors should work as well as long as they speak the
same protocol.
+config ENS210
+ tristate "ENS210 temperature and humidity sensor"
+ depends on I2C
+ select CRC7
+ help
+ Say yes here to get support for the ScioSense ENS210 family of
+ humidity and temperature sensors.
+
+ This driver can also be built as a module. If so, the module will be
+ called ens210.
+
config HDC100X
tristate "TI HDC100x relative humidity and temperature sensor"
depends on I2C
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index 5fbeef299f61..34b3dc749466 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_AM2315) += am2315.o
obj-$(CONFIG_DHT11) += dht11.o
+obj-$(CONFIG_ENS210) += ens210.o
obj-$(CONFIG_HDC100X) += hdc100x.o
obj-$(CONFIG_HDC2010) += hdc2010.o
obj-$(CONFIG_HDC3020) += hdc3020.o
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index a56474be5dd2..6b0aa3a3f025 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -174,8 +174,7 @@ static irqreturn_t am2315_trigger_handler(int irq, void *p)
data->scan.chans[1] = sensor_data.temp_data;
} else {
i = 0;
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
data->scan.chans[i] = (bit ? sensor_data.temp_data :
sensor_data.hum_data);
i++;
diff --git a/drivers/iio/humidity/ens210.c b/drivers/iio/humidity/ens210.c
new file mode 100644
index 000000000000..e9167574203a
--- /dev/null
+++ b/drivers/iio/humidity/ens210.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ens210.c - Support for ScioSense ens210 temperature & humidity sensor family
+ *
+ * (7-bit I2C slave address 0x43 ENS210)
+ * (7-bit I2C slave address 0x43 ENS210A)
+ * (7-bit I2C slave address 0x44 ENS211)
+ * (7-bit I2C slave address 0x45 ENS212)
+ * (7-bit I2C slave address 0x46 ENS213A)
+ * (7-bit I2C slave address 0x47 ENS215)
+ *
+ * Datasheet:
+ * https://www.sciosense.com/wp-content/uploads/2024/04/ENS21x-Datasheet.pdf
+ * https://www.sciosense.com/wp-content/uploads/2023/12/ENS210-Datasheet.pdf
+ */
+
+#include <linux/crc7.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
+
+/* register definitions */
+#define ENS210_REG_PART_ID 0x00
+#define ENS210_REG_DIE_REV 0x02
+#define ENS210_REG_UID 0x04
+#define ENS210_REG_SYS_CTRL 0x10
+#define ENS210_REG_SYS_STAT 0x11
+#define ENS210_REG_SENS_RUN 0x21
+#define ENS210_REG_SENS_START 0x22
+#define ENS210_REG_SENS_STOP 0x23
+#define ENS210_REG_SENS_STAT 0x24
+#define ENS210_REG_T_VAL 0x30
+#define ENS210_REG_H_VAL 0x33
+
+/* value definitions */
+#define ENS210_SENS_START_T_START BIT(0)
+#define ENS210_SENS_START_H_START BIT(1)
+
+#define ENS210_SENS_STAT_T_ACTIVE BIT(0)
+#define ENS210_SENS_STAT_H_ACTIVE BIT(1)
+
+#define ENS210_SYS_CTRL_LOW_POWER_ENABLE BIT(0)
+#define ENS210_SYS_CTRL_SYS_RESET BIT(7)
+
+#define ENS210_SYS_STAT_SYS_ACTIVE BIT(0)
+
+enum ens210_partnumber {
+ ENS210 = 0x0210,
+ ENS210A = 0xa210,
+ ENS211 = 0x0211,
+ ENS212 = 0x0212,
+ ENS213A = 0xa213,
+ ENS215 = 0x0215,
+};
+
+/**
+ * struct ens210_chip_info - Humidity/Temperature chip specific information
+ * @name: name of device
+ * @part_id: chip identifier
+ * @conv_time_msec: time for conversion calculation in m/s
+ */
+struct ens210_chip_info {
+ const char *name;
+ enum ens210_partnumber part_id;
+ unsigned int conv_time_msec;
+};
+
+/**
+ * struct ens210_data - Humidity/Temperature sensor device structure
+ * @client: i2c client
+ * @chip_info: chip specific information
+ * @lock: lock protecting against simultaneous callers of get_measurement
+ * since multiple uninterrupted transactions are required
+ */
+struct ens210_data {
+ struct i2c_client *client;
+ const struct ens210_chip_info *chip_info;
+ struct mutex lock;
+};
+
+/* calculate 17-bit crc7 */
+static u8 ens210_crc7(u32 val)
+{
+ unsigned int val_be = (val & 0x1ffff) >> 0x8;
+
+ return crc7_be(0xde, (u8 *)&val_be, 3) >> 1;
+}
+
+static int ens210_get_measurement(struct iio_dev *indio_dev, bool temp, int *val)
+{
+ struct ens210_data *data = iio_priv(indio_dev);
+ struct device *dev = &data->client->dev;
+ u32 regval;
+ u8 regval_le[3];
+ int ret;
+
+ /* assert read */
+ ret = i2c_smbus_write_byte_data(data->client, ENS210_REG_SENS_START,
+ temp ? ENS210_SENS_START_T_START :
+ ENS210_SENS_START_H_START);
+ if (ret)
+ return ret;
+
+ /* wait for conversion to be ready */
+ msleep(data->chip_info->conv_time_msec);
+
+ ret = i2c_smbus_read_byte_data(data->client, ENS210_REG_SENS_STAT);
+ if (ret < 0)
+ return ret;
+
+ /* perform read */
+ ret = i2c_smbus_read_i2c_block_data(
+ data->client, temp ? ENS210_REG_T_VAL : ENS210_REG_H_VAL, 3,
+ regval_le);
+ if (ret < 0) {
+ dev_err(dev, "failed to read register");
+ return -EIO;
+ }
+ if (ret != 3) {
+ dev_err(dev, "expected 3 bytes, received %d\n", ret);
+ return -EIO;
+ }
+
+ regval = get_unaligned_le24(regval_le);
+ if (ens210_crc7(regval) != ((regval >> 17) & 0x7f)) {
+ dev_err(dev, "invalid crc\n");
+ return -EIO;
+ }
+
+ if (!((regval >> 16) & 0x1)) {
+ dev_err(dev, "data is not valid");
+ return -EIO;
+ }
+
+ *val = regval & GENMASK(15, 0);
+ return IIO_VAL_INT;
+}
+
+static int ens210_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int *val,
+ int *val2, long mask)
+{
+ struct ens210_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ scoped_guard(mutex, &data->lock) {
+ ret = ens210_get_measurement(
+ indio_dev, channel->type == IIO_TEMP, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ }
+ return -EINVAL; /* compiler warning workaround */
+ case IIO_CHAN_INFO_SCALE:
+ if (channel->type == IIO_TEMP) {
+ *val = 15;
+ *val2 = 625000;
+ } else {
+ *val = 1;
+ *val2 = 953125;
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -17481;
+ *val2 = 600000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_chan_spec ens210_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ },
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }
+};
+
+static const struct iio_info ens210_info = {
+ .read_raw = ens210_read_raw,
+};
+
+static int ens210_probe(struct i2c_client *client)
+{
+ struct ens210_data *data;
+ struct iio_dev *indio_dev;
+ uint16_t part_id;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
+ I2C_FUNC_SMBUS_WRITE_BYTE |
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ return dev_err_probe(&client->dev, -EOPNOTSUPP,
+ "adapter does not support some i2c transactions\n");
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+ data->chip_info = i2c_get_match_data(client);
+
+ ret = devm_regulator_get_enable(&client->dev, "vdd");
+ if (ret)
+ return ret;
+
+ /* reset device */
+ ret = i2c_smbus_write_byte_data(client, ENS210_REG_SYS_CTRL,
+ ENS210_SYS_CTRL_SYS_RESET);
+ if (ret)
+ return ret;
+
+ /* wait for device to become active */
+ usleep_range(4000, 5000);
+
+ /* disable low power mode */
+ ret = i2c_smbus_write_byte_data(client, ENS210_REG_SYS_CTRL, 0x00);
+ if (ret)
+ return ret;
+
+ /* wait for device to finish */
+ usleep_range(4000, 5000);
+
+ /* get part_id */
+ ret = i2c_smbus_read_word_data(client, ENS210_REG_PART_ID);
+ if (ret < 0)
+ return ret;
+ part_id = ret;
+
+ if (part_id != data->chip_info->part_id) {
+ dev_info(&client->dev,
+ "Part ID does not match (0x%04x != 0x%04x)\n", part_id,
+ data->chip_info->part_id);
+ }
+
+ /* reenable low power */
+ ret = i2c_smbus_write_byte_data(client, ENS210_REG_SYS_CTRL,
+ ENS210_SYS_CTRL_LOW_POWER_ENABLE);
+ if (ret)
+ return ret;
+
+ indio_dev->name = data->chip_info->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ens210_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ens210_channels);
+ indio_dev->info = &ens210_info;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct ens210_chip_info ens210_chip_info_data = {
+ .name = "ens210",
+ .part_id = ENS210,
+ .conv_time_msec = 130,
+};
+
+static const struct ens210_chip_info ens210a_chip_info_data = {
+ .name = "ens210a",
+ .part_id = ENS210A,
+ .conv_time_msec = 130,
+};
+
+static const struct ens210_chip_info ens211_chip_info_data = {
+ .name = "ens211",
+ .part_id = ENS211,
+ .conv_time_msec = 32,
+};
+
+static const struct ens210_chip_info ens212_chip_info_data = {
+ .name = "ens212",
+ .part_id = ENS212,
+ .conv_time_msec = 32,
+};
+
+static const struct ens210_chip_info ens213a_chip_info_data = {
+ .name = "ens213a",
+ .part_id = ENS213A,
+ .conv_time_msec = 130,
+};
+
+static const struct ens210_chip_info ens215_chip_info_data = {
+ .name = "ens215",
+ .part_id = ENS215,
+ .conv_time_msec = 130,
+};
+
+static const struct of_device_id ens210_of_match[] = {
+ { .compatible = "sciosense,ens210", .data = &ens210_chip_info_data },
+ { .compatible = "sciosense,ens210a", .data = &ens210a_chip_info_data },
+ { .compatible = "sciosense,ens211", .data = &ens211_chip_info_data },
+ { .compatible = "sciosense,ens212", .data = &ens212_chip_info_data },
+ { .compatible = "sciosense,ens213a", .data = &ens213a_chip_info_data },
+ { .compatible = "sciosense,ens215", .data = &ens215_chip_info_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ens210_of_match);
+
+static const struct i2c_device_id ens210_id_table[] = {
+ { "ens210", (kernel_ulong_t)&ens210_chip_info_data },
+ { "ens210a", (kernel_ulong_t)&ens210a_chip_info_data },
+ { "ens211", (kernel_ulong_t)&ens211_chip_info_data },
+ { "ens212", (kernel_ulong_t)&ens212_chip_info_data },
+ { "ens213a", (kernel_ulong_t)&ens213a_chip_info_data },
+ { "ens215", (kernel_ulong_t)&ens215_chip_info_data },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ens210_id_table);
+
+static struct i2c_driver ens210_driver = {
+ .probe = ens210_probe,
+ .id_table = ens210_id_table,
+ .driver = {
+ .name = "ens210",
+ .of_match_table = ens210_of_match,
+ },
+};
+module_i2c_driver(ens210_driver);
+
+MODULE_DESCRIPTION("ScioSense ENS210 temperature and humidity sensor driver");
+MODULE_AUTHOR("Joshua Felmeden <jfelmeden@thegoodpenguin.co.uk>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index 0bfd6205f5f6..6484ab8aff55 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -202,8 +202,6 @@ enum {
ADIS16400_SCAN_TIMESTAMP,
};
-#ifdef CONFIG_DEBUG_FS
-
static ssize_t adis16400_show_serial_number(struct file *file,
char __user *userbuf, size_t count, loff_t *ppos)
{
@@ -273,11 +271,14 @@ static int adis16400_show_flash_count(void *arg, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(adis16400_flash_count_fops,
adis16400_show_flash_count, NULL, "%lld\n");
-static int adis16400_debugfs_init(struct iio_dev *indio_dev)
+static void adis16400_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16400_state *st = iio_priv(indio_dev);
struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return;
+
if (st->variant->flags & ADIS16400_HAS_SERIAL_NUMBER)
debugfs_create_file_unsafe("serial_number", 0400,
d, st, &adis16400_serial_number_fops);
@@ -286,19 +287,8 @@ static int adis16400_debugfs_init(struct iio_dev *indio_dev)
d, st, &adis16400_product_id_fops);
debugfs_create_file_unsafe("flash_count", 0400,
d, st, &adis16400_flash_count_fops);
-
- return 0;
}
-#else
-
-static int adis16400_debugfs_init(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-#endif
-
enum adis16400_chip_variant {
ADIS16300,
ADIS16334,
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
index 69facd72bd7d..eaa38dd6201f 100644
--- a/drivers/iio/imu/adis16460.c
+++ b/drivers/iio/imu/adis16460.c
@@ -69,8 +69,6 @@ struct adis16460 {
struct adis adis;
};
-#ifdef CONFIG_DEBUG_FS
-
static int adis16460_show_serial_number(void *arg, u64 *val)
{
struct adis16460 *adis16460 = arg;
@@ -125,30 +123,22 @@ static int adis16460_show_flash_count(void *arg, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(adis16460_flash_count_fops,
adis16460_show_flash_count, NULL, "%lld\n");
-static int adis16460_debugfs_init(struct iio_dev *indio_dev)
+static void adis16460_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16460 *adis16460 = iio_priv(indio_dev);
struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return;
+
debugfs_create_file_unsafe("serial_number", 0400,
d, adis16460, &adis16460_serial_number_fops);
debugfs_create_file_unsafe("product_id", 0400,
d, adis16460, &adis16460_product_id_fops);
debugfs_create_file_unsafe("flash_count", 0400,
d, adis16460, &adis16460_flash_count_fops);
-
- return 0;
-}
-
-#else
-
-static int adis16460_debugfs_init(struct iio_dev *indio_dev)
-{
- return 0;
}
-#endif
-
static int adis16460_set_freq(struct iio_dev *indio_dev, int val, int val2)
{
struct adis16460 *st = iio_priv(indio_dev);
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index 482258ed5a3c..88efe728b61b 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -164,7 +164,6 @@ module_param(low_rate_allow, bool, 0444);
MODULE_PARM_DESC(low_rate_allow,
"Allow IMU rates below the minimum advisable when external clk is used in SCALED mode (default: N)");
-#ifdef CONFIG_DEBUG_FS
static ssize_t adis16475_show_firmware_revision(struct file *file,
char __user *userbuf,
size_t count, loff_t *ppos)
@@ -279,6 +278,9 @@ static void adis16475_debugfs_init(struct iio_dev *indio_dev)
struct adis16475 *st = iio_priv(indio_dev);
struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return;
+
debugfs_create_file_unsafe("serial_number", 0400,
d, st, &adis16475_serial_number_fops);
debugfs_create_file_unsafe("product_id", 0400,
@@ -290,11 +292,6 @@ static void adis16475_debugfs_init(struct iio_dev *indio_dev)
debugfs_create_file("firmware_date", 0400, d,
st, &adis16475_firmware_date_fops);
}
-#else
-static void adis16475_debugfs_init(struct iio_dev *indio_dev)
-{
-}
-#endif
static int adis16475_get_freq(struct adis16475 *st, u32 *freq)
{
@@ -1593,8 +1590,7 @@ static int adis16475_push_single_sample(struct iio_poll_func *pf)
return -EINVAL;
}
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
/*
* When burst mode is used, system flags is the first data
* channel in the sequence, but the scan index is 7.
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index c59ef6f7cfd4..294181f2fcb3 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -193,8 +193,6 @@ module_param(low_rate_allow, bool, 0444);
MODULE_PARM_DESC(low_rate_allow,
"Allow IMU rates below the minimum advisable when external clk is used in PPS mode (default: N)");
-#ifdef CONFIG_DEBUG_FS
-
static ssize_t adis16480_show_firmware_revision(struct file *file,
char __user *userbuf, size_t count, loff_t *ppos)
{
@@ -304,11 +302,14 @@ static int adis16480_show_flash_count(void *arg, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(adis16480_flash_count_fops,
adis16480_show_flash_count, NULL, "%lld\n");
-static int adis16480_debugfs_init(struct iio_dev *indio_dev)
+static void adis16480_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16480 *adis16480 = iio_priv(indio_dev);
struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return;
+
debugfs_create_file_unsafe("firmware_revision", 0400,
d, adis16480, &adis16480_firmware_revision_fops);
debugfs_create_file_unsafe("firmware_date", 0400,
@@ -319,19 +320,8 @@ static int adis16480_debugfs_init(struct iio_dev *indio_dev)
d, adis16480, &adis16480_product_id_fops);
debugfs_create_file_unsafe("flash_count", 0400,
d, adis16480, &adis16480_flash_count_fops);
-
- return 0;
-}
-
-#else
-
-static int adis16480_debugfs_init(struct iio_dev *indio_dev)
-{
- return 0;
}
-#endif
-
static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
{
struct adis16480 *st = iio_priv(indio_dev);
@@ -1395,7 +1385,7 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
goto irq_done;
}
- for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
/*
* When burst mode is used, temperature is the first data
* channel in the sequence, but the temperature scan index
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index 90aa04d94da5..495e8a74ac67 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -435,8 +435,7 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
__le16 sample;
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
ret = regmap_bulk_read(data->regmap, base + i * sizeof(sample),
&sample, sizeof(sample));
if (ret)
diff --git a/drivers/iio/imu/bmi323/bmi323.h b/drivers/iio/imu/bmi323/bmi323.h
index dff126d41658..209bccb1f335 100644
--- a/drivers/iio/imu/bmi323/bmi323.h
+++ b/drivers/iio/imu/bmi323/bmi323.h
@@ -205,5 +205,6 @@
struct device;
int bmi323_core_probe(struct device *dev);
extern const struct regmap_config bmi323_regmap_config;
+extern const struct dev_pm_ops bmi323_core_pm_ops;
#endif
diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
index d708d1fe3e42..671401ce80dc 100644
--- a/drivers/iio/imu/bmi323/bmi323_core.c
+++ b/drivers/iio/imu/bmi323/bmi323_core.c
@@ -118,6 +118,38 @@ static const struct bmi323_hw bmi323_hw[2] = {
},
};
+static const unsigned int bmi323_reg_savestate[] = {
+ BMI323_INT_MAP1_REG,
+ BMI323_INT_MAP2_REG,
+ BMI323_IO_INT_CTR_REG,
+ BMI323_IO_INT_CONF_REG,
+ BMI323_ACC_CONF_REG,
+ BMI323_GYRO_CONF_REG,
+ BMI323_FEAT_IO0_REG,
+ BMI323_FIFO_WTRMRK_REG,
+ BMI323_FIFO_CONF_REG
+};
+
+static const unsigned int bmi323_ext_reg_savestate[] = {
+ BMI323_GEN_SET1_REG,
+ BMI323_TAP1_REG,
+ BMI323_TAP2_REG,
+ BMI323_TAP3_REG,
+ BMI323_FEAT_IO0_S_TAP_MSK,
+ BMI323_STEP_SC1_REG,
+ BMI323_ANYMO1_REG,
+ BMI323_NOMO1_REG,
+ BMI323_ANYMO1_REG + BMI323_MO2_OFFSET,
+ BMI323_NOMO1_REG + BMI323_MO2_OFFSET,
+ BMI323_ANYMO1_REG + BMI323_MO3_OFFSET,
+ BMI323_NOMO1_REG + BMI323_MO3_OFFSET
+};
+
+struct bmi323_regs_runtime_pm {
+ unsigned int reg_settings[ARRAY_SIZE(bmi323_reg_savestate)];
+ unsigned int ext_reg_settings[ARRAY_SIZE(bmi323_ext_reg_savestate)];
+};
+
struct bmi323_data {
struct device *dev;
struct regmap *regmap;
@@ -130,6 +162,7 @@ struct bmi323_data {
u32 odrns[BMI323_SENSORS_CNT];
u32 odrhz[BMI323_SENSORS_CNT];
unsigned int feature_events;
+ struct bmi323_regs_runtime_pm runtime_pm_status;
/*
* Lock to protect the members of device's private data from concurrent
@@ -1972,6 +2005,11 @@ static void bmi323_disable(void *data_ptr)
bmi323_set_mode(data, BMI323_ACCEL, ACC_GYRO_MODE_DISABLE);
bmi323_set_mode(data, BMI323_GYRO, ACC_GYRO_MODE_DISABLE);
+
+ /*
+ * Place the peripheral in its lowest power consuming state.
+ */
+ regmap_write(data->regmap, BMI323_CMD_REG, BMI323_RST_VAL);
}
static int bmi323_set_bw(struct bmi323_data *data,
@@ -2030,6 +2068,13 @@ static int bmi323_init(struct bmi323_data *data)
return dev_err_probe(data->dev, -EINVAL,
"Sensor power error = 0x%x\n", val);
+ return 0;
+}
+
+static int bmi323_init_reset(struct bmi323_data *data)
+{
+ int ret;
+
/*
* Set the Bandwidth coefficient which defines the 3 dB cutoff
* frequency in relation to the ODR.
@@ -2078,12 +2123,18 @@ int bmi323_core_probe(struct device *dev)
data = iio_priv(indio_dev);
data->dev = dev;
data->regmap = regmap;
+ data->irq_pin = BMI323_IRQ_DISABLED;
+ data->state = BMI323_IDLE;
mutex_init(&data->mutex);
ret = bmi323_init(data);
if (ret)
return -EINVAL;
+ ret = bmi323_init_reset(data);
+ if (ret)
+ return -EINVAL;
+
if (!iio_read_acpi_mount_matrix(dev, &data->orientation, "ROTM")) {
ret = iio_read_mount_matrix(dev, &data->orientation);
if (ret)
@@ -2117,10 +2168,139 @@ int bmi323_core_probe(struct device *dev)
return dev_err_probe(data->dev, ret,
"Unable to register iio device\n");
- return 0;
+ return bmi323_fifo_disable(data);
}
EXPORT_SYMBOL_NS_GPL(bmi323_core_probe, IIO_BMI323);
+#if defined(CONFIG_PM)
+static int bmi323_core_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bmi323_data *data = iio_priv(indio_dev);
+ struct bmi323_regs_runtime_pm *savestate = &data->runtime_pm_status;
+ int ret;
+
+ guard(mutex)(&data->mutex);
+
+ ret = iio_device_suspend_triggering(indio_dev);
+ if (ret)
+ return ret;
+
+ /* Save registers meant to be restored by resume pm callback. */
+ for (unsigned int i = 0; i < ARRAY_SIZE(bmi323_reg_savestate); i++) {
+ ret = regmap_read(data->regmap, bmi323_reg_savestate[i],
+ &savestate->reg_settings[i]);
+ if (ret) {
+ dev_err(data->dev,
+ "Error reading bmi323 reg 0x%x: %d\n",
+ bmi323_reg_savestate[i], ret);
+ return ret;
+ }
+ }
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(bmi323_ext_reg_savestate); i++) {
+ ret = bmi323_read_ext_reg(data, bmi323_reg_savestate[i],
+ &savestate->reg_settings[i]);
+ if (ret) {
+ dev_err(data->dev,
+ "Error reading bmi323 external reg 0x%x: %d\n",
+ bmi323_reg_savestate[i], ret);
+ return ret;
+ }
+ }
+
+ /* Perform soft reset to place the device in its lowest power state. */
+ ret = regmap_write(data->regmap, BMI323_CMD_REG, BMI323_RST_VAL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int bmi323_core_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bmi323_data *data = iio_priv(indio_dev);
+ struct bmi323_regs_runtime_pm *savestate = &data->runtime_pm_status;
+ unsigned int val;
+ int ret;
+
+ guard(mutex)(&data->mutex);
+
+ /*
+ * Perform the device power-on and initial setup once again
+ * after being reset in the lower power state by runtime-pm.
+ */
+ ret = bmi323_init(data);
+ if (!ret)
+ return ret;
+
+ /* Register must be cleared before changing an active config */
+ ret = regmap_write(data->regmap, BMI323_FEAT_IO0_REG, 0);
+ if (ret) {
+ dev_err(data->dev, "Error stopping feature engine\n");
+ return ret;
+ }
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(bmi323_ext_reg_savestate); i++) {
+ ret = bmi323_write_ext_reg(data, bmi323_reg_savestate[i],
+ savestate->reg_settings[i]);
+ if (ret) {
+ dev_err(data->dev,
+ "Error writing bmi323 external reg 0x%x: %d\n",
+ bmi323_reg_savestate[i], ret);
+ return ret;
+ }
+ }
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(bmi323_reg_savestate); i++) {
+ ret = regmap_write(data->regmap, bmi323_reg_savestate[i],
+ savestate->reg_settings[i]);
+ if (ret) {
+ dev_err(data->dev,
+ "Error writing bmi323 reg 0x%x: %d\n",
+ bmi323_reg_savestate[i], ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Clear old FIFO samples that might be generated before suspend
+ * or generated from a peripheral state not equal to the saved one.
+ */
+ if (data->state == BMI323_BUFFER_FIFO) {
+ ret = regmap_write(data->regmap, BMI323_FIFO_CTRL_REG,
+ BMI323_FIFO_FLUSH_MSK);
+ if (ret) {
+ dev_err(data->dev, "Error flushing FIFO buffer: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = regmap_read(data->regmap, BMI323_ERR_REG, &val);
+ if (ret) {
+ dev_err(data->dev,
+ "Error reading bmi323 error register: %d\n", ret);
+ return ret;
+ }
+
+ if (val) {
+ dev_err(data->dev,
+ "Sensor power error in PM = 0x%x\n", val);
+ return -EINVAL;
+ }
+
+ return iio_device_resume_triggering(indio_dev);
+}
+
+#endif
+
+const struct dev_pm_ops bmi323_core_pm_ops = {
+ SET_RUNTIME_PM_OPS(bmi323_core_runtime_suspend,
+ bmi323_core_runtime_resume, NULL)
+};
+EXPORT_SYMBOL_NS_GPL(bmi323_core_pm_ops, IIO_BMI323);
+
MODULE_DESCRIPTION("Bosch BMI323 IMU driver");
MODULE_AUTHOR("Jagath Jog J <jagathjog1996@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/bmi323/bmi323_i2c.c b/drivers/iio/imu/bmi323/bmi323_i2c.c
index 52140bf05765..0ba5d69d8329 100644
--- a/drivers/iio/imu/bmi323/bmi323_i2c.c
+++ b/drivers/iio/imu/bmi323/bmi323_i2c.c
@@ -61,7 +61,7 @@ static int bmi323_regmap_i2c_write(void *context, const void *data,
data + sizeof(u8));
}
-static struct regmap_bus bmi323_regmap_bus = {
+static const struct regmap_bus bmi323_regmap_bus = {
.read = bmi323_regmap_i2c_read,
.write = bmi323_regmap_i2c_write,
};
@@ -128,6 +128,7 @@ MODULE_DEVICE_TABLE(of, bmi323_of_i2c_match);
static struct i2c_driver bmi323_i2c_driver = {
.driver = {
.name = "bmi323",
+ .pm = pm_ptr(&bmi323_core_pm_ops),
.of_match_table = bmi323_of_i2c_match,
.acpi_match_table = bmi323_acpi_match,
},
diff --git a/drivers/iio/imu/bmi323/bmi323_spi.c b/drivers/iio/imu/bmi323/bmi323_spi.c
index 7b1e8127d0dd..9de3ade78d71 100644
--- a/drivers/iio/imu/bmi323/bmi323_spi.c
+++ b/drivers/iio/imu/bmi323/bmi323_spi.c
@@ -36,7 +36,7 @@ static int bmi323_regmap_spi_write(void *context, const void *data,
return spi_write(spi, data_buff + 1, count - 1);
}
-static struct regmap_bus bmi323_regmap_bus = {
+static const struct regmap_bus bmi323_regmap_bus = {
.read = bmi323_regmap_spi_read,
.write = bmi323_regmap_spi_write,
};
@@ -79,6 +79,7 @@ MODULE_DEVICE_TABLE(of, bmi323_of_spi_match);
static struct spi_driver bmi323_spi_driver = {
.driver = {
.name = "bmi323",
+ .pm = pm_ptr(&bmi323_core_pm_ops),
.of_match_table = bmi323_of_spi_match,
},
.probe = bmi323_spi_probe,
diff --git a/drivers/iio/imu/bno055/bno055.c b/drivers/iio/imu/bno055/bno055.c
index 52744dd98e65..ea6519b22b2f 100644
--- a/drivers/iio/imu/bno055/bno055.c
+++ b/drivers/iio/imu/bno055/bno055.c
@@ -1458,7 +1458,7 @@ static irqreturn_t bno055_trigger_handler(int irq, void *p)
* then we split the transfer, skipping the gap.
*/
for_each_set_bitrange(start, end, iio_dev->active_scan_mask,
- iio_dev->masklength) {
+ iio_get_masklength(iio_dev)) {
/*
* First transfer will start from the beginning of the first
* ones-field in the bitmap
diff --git a/drivers/iio/imu/bno055/bno055_ser_core.c b/drivers/iio/imu/bno055/bno055_ser_core.c
index 694ff14a3aa2..da7873bfd348 100644
--- a/drivers/iio/imu/bno055/bno055_ser_core.c
+++ b/drivers/iio/imu/bno055/bno055_ser_core.c
@@ -492,7 +492,7 @@ static const struct serdev_device_ops bno055_ser_serdev_ops = {
.write_wakeup = serdev_device_write_wakeup,
};
-static struct regmap_bus bno055_ser_regmap_bus = {
+static const struct regmap_bus bno055_ser_regmap_bus = {
.write = bno055_ser_write_reg,
.read = bno055_ser_read_reg,
};
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index d37eca5ef761..c61c012e25bb 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1200,8 +1200,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p)
base = KMX61_MAG_XOUT_L;
mutex_lock(&data->lock);
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = kmx61_read_measurement(data, base, bit);
if (ret < 0) {
mutex_unlock(&data->lock);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 937ff9c5a74c..ed0267929725 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -2127,25 +2127,15 @@ static const struct iio_info st_lsm6dsx_gyro_info = {
.write_raw_get_fmt = st_lsm6dsx_write_raw_get_fmt,
};
-static int st_lsm6dsx_get_drdy_pin(struct st_lsm6dsx_hw *hw, int *drdy_pin)
-{
- struct device *dev = hw->dev;
-
- if (!dev_fwnode(dev))
- return -EINVAL;
-
- return device_property_read_u32(dev, "st,drdy-int-pin", drdy_pin);
-}
-
static int
st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw,
const struct st_lsm6dsx_reg **drdy_reg)
{
+ struct device *dev = hw->dev;
int err = 0, drdy_pin;
- if (st_lsm6dsx_get_drdy_pin(hw, &drdy_pin) < 0) {
+ if (device_property_read_u32(dev, "st,drdy-int-pin", &drdy_pin) < 0) {
struct st_sensors_platform_data *pdata;
- struct device *dev = hw->dev;
pdata = (struct st_sensors_platform_data *)dev->platform_data;
drdy_pin = pdata ? pdata->drdy_int_pin : 1;
@@ -2180,7 +2170,7 @@ static int st_lsm6dsx_init_shub(struct st_lsm6dsx_hw *hw)
hub_settings = &hw->settings->shub_settings;
pdata = (struct st_sensors_platform_data *)dev->platform_data;
- if ((dev_fwnode(dev) && device_property_read_bool(dev, "st,pullups")) ||
+ if (device_property_read_bool(dev, "st,pullups") ||
(pdata && pdata->pullups)) {
if (hub_settings->pullup_en.sec_page) {
err = st_lsm6dsx_set_page(hw, true);
@@ -2565,7 +2555,7 @@ static int st_lsm6dsx_irq_setup(struct st_lsm6dsx_hw *hw)
return err;
pdata = (struct st_sensors_platform_data *)dev->platform_data;
- if ((dev_fwnode(dev) && device_property_read_bool(dev, "drive-open-drain")) ||
+ if (device_property_read_bool(dev, "drive-open-drain") ||
(pdata && pdata->open_drain)) {
reg = &hw->settings->irq_config.od;
err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
@@ -2646,73 +2636,6 @@ static int st_lsm6dsx_init_regulators(struct device *dev)
return 0;
}
-#ifdef CONFIG_ACPI
-
-static int lsm6dsx_get_acpi_mount_matrix(struct device *dev,
- struct iio_mount_matrix *orientation)
-{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_device *adev = ACPI_COMPANION(dev);
- union acpi_object *obj, *elements;
- acpi_status status;
- int i, j, val[3];
- char *str;
-
- if (!has_acpi_companion(dev))
- return -EINVAL;
-
- if (!acpi_has_method(adev->handle, "ROTM"))
- return -EINVAL;
-
- status = acpi_evaluate_object(adev->handle, "ROTM", NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- dev_warn(dev, "Failed to get ACPI mount matrix: %d\n", status);
- return -EINVAL;
- }
-
- obj = buffer.pointer;
- if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3)
- goto unknown_format;
-
- elements = obj->package.elements;
- for (i = 0; i < 3; i++) {
- if (elements[i].type != ACPI_TYPE_STRING)
- goto unknown_format;
-
- str = elements[i].string.pointer;
- if (sscanf(str, "%d %d %d", &val[0], &val[1], &val[2]) != 3)
- goto unknown_format;
-
- for (j = 0; j < 3; j++) {
- switch (val[j]) {
- case -1: str = "-1"; break;
- case 0: str = "0"; break;
- case 1: str = "1"; break;
- default: goto unknown_format;
- }
- orientation->rotation[i * 3 + j] = str;
- }
- }
-
- kfree(buffer.pointer);
- return 0;
-
-unknown_format:
- dev_warn(dev, "Unknown ACPI mount matrix format, ignoring\n");
- kfree(buffer.pointer);
- return -EINVAL;
-}
-
-#else
-
-static int lsm6dsx_get_acpi_mount_matrix(struct device *dev,
- struct iio_mount_matrix *orientation)
-{
- return -EOPNOTSUPP;
-}
-
-#endif
-
int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
struct regmap *regmap)
{
@@ -2760,8 +2683,7 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
hub_settings = &hw->settings->shub_settings;
if (hub_settings->master_en.addr &&
- (!dev_fwnode(dev) ||
- !device_property_read_bool(dev, "st,disable-sensor-hub"))) {
+ !device_property_read_bool(dev, "st,disable-sensor-hub")) {
err = st_lsm6dsx_shub_probe(hw, name);
if (err < 0)
return err;
@@ -2787,8 +2709,7 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
return err;
}
- err = lsm6dsx_get_acpi_mount_matrix(hw->dev, &hw->orientation);
- if (err) {
+ if (!iio_read_acpi_mount_matrix(hw->dev, &hw->orientation, "ROTM")) {
err = iio_read_mount_matrix(hw->dev, &hw->orientation);
if (err)
return err;
@@ -2803,7 +2724,7 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
return err;
}
- if ((dev_fwnode(dev) && device_property_read_bool(dev, "wakeup-source")) ||
+ if (device_property_read_bool(dev, "wakeup-source") ||
(pdata && pdata->wakeup_source))
device_init_wakeup(dev, true);
diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c
index efe05be284b6..20b3b5212da7 100644
--- a/drivers/iio/industrialio-backend.c
+++ b/drivers/iio/industrialio-backend.c
@@ -32,6 +32,7 @@
#define dev_fmt(fmt) "iio-backend: " fmt
#include <linux/cleanup.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -40,6 +41,7 @@
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/slab.h>
+#include <linux/stringify.h>
#include <linux/types.h>
#include <linux/iio/backend.h>
@@ -52,6 +54,14 @@ struct iio_backend {
struct device *dev;
struct module *owner;
void *priv;
+ const char *name;
+ unsigned int cached_reg_addr;
+ /*
+ * This index is relative to the frontend. Meaning that for
+ * frontends with multiple backends, this will be the index of this
+ * backend. Used for the debugfs directory name.
+ */
+ u8 idx;
};
/*
@@ -111,7 +121,142 @@ static DEFINE_MUTEX(iio_back_lock);
__ret = iio_backend_check_op(__back, op); \
if (!__ret) \
__back->ops->op(__back, ##args); \
+ else \
+ dev_dbg(__back->dev, "Op(%s) not implemented\n",\
+ __stringify(op)); \
+}
+
+static ssize_t iio_backend_debugfs_read_reg(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct iio_backend *back = file->private_data;
+ char read_buf[20];
+ unsigned int val;
+ int ret, len;
+
+ ret = iio_backend_op_call(back, debugfs_reg_access,
+ back->cached_reg_addr, 0, &val);
+ if (ret)
+ return ret;
+
+ len = scnprintf(read_buf, sizeof(read_buf), "0x%X\n", val);
+
+ return simple_read_from_buffer(userbuf, count, ppos, read_buf, len);
+}
+
+static ssize_t iio_backend_debugfs_write_reg(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct iio_backend *back = file->private_data;
+ unsigned int val;
+ char buf[80];
+ ssize_t rc;
+ int ret;
+
+ rc = simple_write_to_buffer(buf, sizeof(buf), ppos, userbuf, count);
+ if (rc < 0)
+ return rc;
+
+ ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val);
+
+ switch (ret) {
+ case 1:
+ return count;
+ case 2:
+ ret = iio_backend_op_call(back, debugfs_reg_access,
+ back->cached_reg_addr, val, NULL);
+ if (ret)
+ return ret;
+ return count;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct file_operations iio_backend_debugfs_reg_fops = {
+ .open = simple_open,
+ .read = iio_backend_debugfs_read_reg,
+ .write = iio_backend_debugfs_write_reg,
+};
+
+static ssize_t iio_backend_debugfs_read_name(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct iio_backend *back = file->private_data;
+ char name[128];
+ int len;
+
+ len = scnprintf(name, sizeof(name), "%s\n", back->name);
+
+ return simple_read_from_buffer(userbuf, count, ppos, name, len);
+}
+
+static const struct file_operations iio_backend_debugfs_name_fops = {
+ .open = simple_open,
+ .read = iio_backend_debugfs_read_name,
+};
+
+/**
+ * iio_backend_debugfs_add - Add debugfs interfaces for Backends
+ * @back: Backend device
+ * @indio_dev: IIO device
+ */
+void iio_backend_debugfs_add(struct iio_backend *back,
+ struct iio_dev *indio_dev)
+{
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+ struct dentry *back_d;
+ char name[128];
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS) || !d)
+ return;
+ if (!back->ops->debugfs_reg_access && !back->name)
+ return;
+
+ snprintf(name, sizeof(name), "backend%d", back->idx);
+
+ back_d = debugfs_create_dir(name, d);
+ if (IS_ERR(back_d))
+ return;
+
+ if (back->ops->debugfs_reg_access)
+ debugfs_create_file("direct_reg_access", 0600, back_d, back,
+ &iio_backend_debugfs_reg_fops);
+
+ if (back->name)
+ debugfs_create_file("name", 0400, back_d, back,
+ &iio_backend_debugfs_name_fops);
}
+EXPORT_SYMBOL_NS_GPL(iio_backend_debugfs_add, IIO_BACKEND);
+
+/**
+ * iio_backend_debugfs_print_chan_status - Print channel status
+ * @back: Backend device
+ * @chan: Channel number
+ * @buf: Buffer where to print the status
+ * @len: Available space
+ *
+ * One usecase where this is useful is for testing test tones in a digital
+ * interface and "ask" the backend to dump more details on why a test tone might
+ * have errors.
+ *
+ * RETURNS:
+ * Number of copied bytes on success, negative error code on failure.
+ */
+ssize_t iio_backend_debugfs_print_chan_status(struct iio_backend *back,
+ unsigned int chan, char *buf,
+ size_t len)
+{
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return -ENODEV;
+
+ return iio_backend_op_call(back, debugfs_print_chan_status, chan, buf,
+ len);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_debugfs_print_chan_status, IIO_BACKEND);
/**
* iio_backend_chan_enable - Enable a backend channel
@@ -147,6 +292,29 @@ static void __iio_backend_disable(void *back)
}
/**
+ * iio_backend_disable - Backend disable
+ * @back: Backend device
+ */
+void iio_backend_disable(struct iio_backend *back)
+{
+ __iio_backend_disable(back);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_disable, IIO_BACKEND);
+
+/**
+ * iio_backend_enable - Backend enable
+ * @back: Backend device
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_enable(struct iio_backend *back)
+{
+ return iio_backend_op_call(back, enable);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_enable, IIO_BACKEND);
+
+/**
* devm_iio_backend_enable - Device managed backend enable
* @dev: Consumer device for the backend
* @back: Backend device
@@ -158,7 +326,7 @@ int devm_iio_backend_enable(struct device *dev, struct iio_backend *back)
{
int ret;
- ret = iio_backend_op_call(back, enable);
+ ret = iio_backend_enable(back);
if (ret)
return ret;
@@ -357,6 +525,25 @@ int devm_iio_backend_request_buffer(struct device *dev,
}
EXPORT_SYMBOL_NS_GPL(devm_iio_backend_request_buffer, IIO_BACKEND);
+/**
+ * iio_backend_read_raw - Read a channel attribute from a backend device.
+ * @back: Backend device
+ * @chan: IIO channel reference
+ * @val: First returned value
+ * @val2: Second returned value
+ * @mask: Specify the attribute to return
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_read_raw(struct iio_backend *back,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask)
+{
+ return iio_backend_op_call(back, read_raw, chan, val, val2, mask);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_read_raw, IIO_BACKEND);
+
static struct iio_backend *iio_backend_from_indio_dev_parent(const struct device *dev)
{
struct iio_backend *back = ERR_PTR(-ENODEV), *iter;
@@ -451,7 +638,6 @@ EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_set, IIO_BACKEND);
/**
* iio_backend_extend_chan_spec - Extend an IIO channel
- * @indio_dev: IIO device
* @back: Backend device
* @chan: IIO channel
*
@@ -461,8 +647,7 @@ EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_set, IIO_BACKEND);
* RETURNS:
* 0 on success, negative error number on failure.
*/
-int iio_backend_extend_chan_spec(struct iio_dev *indio_dev,
- struct iio_backend *back,
+int iio_backend_extend_chan_spec(struct iio_backend *back,
struct iio_chan_spec *chan)
{
const struct iio_chan_spec_ext_info *frontend_ext_info = chan->ext_info;
@@ -533,19 +718,10 @@ static int __devm_iio_backend_get(struct device *dev, struct iio_backend *back)
return 0;
}
-/**
- * devm_iio_backend_get - Device managed backend device get
- * @dev: Consumer device for the backend
- * @name: Backend name
- *
- * Get's the backend associated with @dev.
- *
- * RETURNS:
- * A backend pointer, negative error pointer otherwise.
- */
-struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name)
+static struct iio_backend *__devm_iio_backend_fwnode_get(struct device *dev, const char *name,
+ struct fwnode_handle *fwnode)
{
- struct fwnode_handle *fwnode;
+ struct fwnode_handle *fwnode_back;
struct iio_backend *back;
unsigned int index;
int ret;
@@ -560,30 +736,67 @@ struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name)
index = 0;
}
- fwnode = fwnode_find_reference(dev_fwnode(dev), "io-backends", index);
+ fwnode_back = fwnode_find_reference(fwnode, "io-backends", index);
if (IS_ERR(fwnode))
return dev_err_cast_probe(dev, fwnode,
"Cannot get Firmware reference\n");
guard(mutex)(&iio_back_lock);
list_for_each_entry(back, &iio_back_list, entry) {
- if (!device_match_fwnode(back->dev, fwnode))
+ if (!device_match_fwnode(back->dev, fwnode_back))
continue;
- fwnode_handle_put(fwnode);
+ fwnode_handle_put(fwnode_back);
ret = __devm_iio_backend_get(dev, back);
if (ret)
return ERR_PTR(ret);
+ if (name)
+ back->idx = index;
+
return back;
}
- fwnode_handle_put(fwnode);
+ fwnode_handle_put(fwnode_back);
return ERR_PTR(-EPROBE_DEFER);
}
+
+/**
+ * devm_iio_backend_get - Device managed backend device get
+ * @dev: Consumer device for the backend
+ * @name: Backend name
+ *
+ * Get's the backend associated with @dev.
+ *
+ * RETURNS:
+ * A backend pointer, negative error pointer otherwise.
+ */
+struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name)
+{
+ return __devm_iio_backend_fwnode_get(dev, name, dev_fwnode(dev));
+}
EXPORT_SYMBOL_NS_GPL(devm_iio_backend_get, IIO_BACKEND);
/**
+ * devm_iio_backend_fwnode_get - Device managed backend firmware node get
+ * @dev: Consumer device for the backend
+ * @name: Backend name
+ * @fwnode: Firmware node of the backend consumer
+ *
+ * Get's the backend associated with a firmware node.
+ *
+ * RETURNS:
+ * A backend pointer, negative error pointer otherwise.
+ */
+struct iio_backend *devm_iio_backend_fwnode_get(struct device *dev,
+ const char *name,
+ struct fwnode_handle *fwnode)
+{
+ return __devm_iio_backend_fwnode_get(dev, name, fwnode);
+}
+EXPORT_SYMBOL_NS_GPL(devm_iio_backend_fwnode_get, IIO_BACKEND);
+
+/**
* __devm_iio_backend_get_from_fwnode_lookup - Device managed fwnode backend device get
* @dev: Consumer device for the backend
* @fwnode: Firmware node of the backend device
@@ -639,20 +852,20 @@ static void iio_backend_unregister(void *arg)
/**
* devm_iio_backend_register - Device managed backend device register
* @dev: Backend device being registered
- * @ops: Backend ops
+ * @info: Backend info
* @priv: Device private data
*
- * @ops is mandatory. Not providing it results in -EINVAL.
+ * @info is mandatory. Not providing it results in -EINVAL.
*
* RETURNS:
* 0 on success, negative error number on failure.
*/
int devm_iio_backend_register(struct device *dev,
- const struct iio_backend_ops *ops, void *priv)
+ const struct iio_backend_info *info, void *priv)
{
struct iio_backend *back;
- if (!ops)
+ if (!info || !info->ops)
return dev_err_probe(dev, -EINVAL, "No backend ops given\n");
/*
@@ -665,7 +878,8 @@ int devm_iio_backend_register(struct device *dev,
if (!back)
return -ENOMEM;
- back->ops = ops;
+ back->ops = info->ops;
+ back->name = info->name;
back->owner = dev->driver->owner;
back->dev = dev;
back->priv = priv;
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index d6fe105d2f40..8104696cd475 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -508,18 +508,19 @@ static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
static int iio_scan_mask_set(struct iio_dev *indio_dev,
struct iio_buffer *buffer, int bit)
{
+ unsigned int masklength = iio_get_masklength(indio_dev);
const unsigned long *mask;
unsigned long *trialmask;
- if (!indio_dev->masklength) {
+ if (!masklength) {
WARN(1, "Trying to set scanmask prior to registering buffer\n");
return -EINVAL;
}
- trialmask = bitmap_alloc(indio_dev->masklength, GFP_KERNEL);
+ trialmask = bitmap_alloc(masklength, GFP_KERNEL);
if (!trialmask)
return -ENOMEM;
- bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
+ bitmap_copy(trialmask, buffer->scan_mask, masklength);
set_bit(bit, trialmask);
if (!iio_validate_scan_mask(indio_dev, trialmask))
@@ -527,12 +528,11 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
if (indio_dev->available_scan_masks) {
mask = iio_scan_mask_match(indio_dev->available_scan_masks,
- indio_dev->masklength,
- trialmask, false);
+ masklength, trialmask, false);
if (!mask)
goto err_invalid_mask;
}
- bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
+ bitmap_copy(buffer->scan_mask, trialmask, masklength);
bitmap_free(trialmask);
@@ -552,7 +552,7 @@ static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
static int iio_scan_mask_query(struct iio_dev *indio_dev,
struct iio_buffer *buffer, int bit)
{
- if (bit > indio_dev->masklength)
+ if (bit > iio_get_masklength(indio_dev))
return -EINVAL;
if (!buffer->scan_mask)
@@ -768,8 +768,7 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
int length, i, largest = 0;
/* How much space will the demuxed element take? */
- for_each_set_bit(i, mask,
- indio_dev->masklength) {
+ for_each_set_bit(i, mask, iio_get_masklength(indio_dev)) {
length = iio_storage_bytes_for_si(indio_dev, i);
if (length < 0)
return length;
@@ -890,6 +889,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
struct iio_device_config *config)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+ unsigned int masklength = iio_get_masklength(indio_dev);
unsigned long *compound_mask;
const unsigned long *scan_mask;
bool strict_scanmask = false;
@@ -898,7 +898,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
unsigned int modes;
if (insert_buffer &&
- bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
+ bitmap_empty(insert_buffer->scan_mask, masklength)) {
dev_dbg(&indio_dev->dev,
"At least one scan element must be enabled first\n");
return -EINVAL;
@@ -952,7 +952,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
}
/* What scan mask do we actually have? */
- compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
+ compound_mask = bitmap_zalloc(masklength, GFP_KERNEL);
if (!compound_mask)
return -ENOMEM;
@@ -962,20 +962,19 @@ static int iio_verify_update(struct iio_dev *indio_dev,
if (buffer == remove_buffer)
continue;
bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
- indio_dev->masklength);
+ masklength);
scan_timestamp |= buffer->scan_timestamp;
}
if (insert_buffer) {
bitmap_or(compound_mask, compound_mask,
- insert_buffer->scan_mask, indio_dev->masklength);
+ insert_buffer->scan_mask, masklength);
scan_timestamp |= insert_buffer->scan_timestamp;
}
if (indio_dev->available_scan_masks) {
scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
- indio_dev->masklength,
- compound_mask,
+ masklength, compound_mask,
strict_scanmask);
bitmap_free(compound_mask);
if (!scan_mask)
@@ -1040,6 +1039,7 @@ static int iio_buffer_add_demux(struct iio_buffer *buffer,
static int iio_buffer_update_demux(struct iio_dev *indio_dev,
struct iio_buffer *buffer)
{
+ unsigned int masklength = iio_get_masklength(indio_dev);
int ret, in_ind = -1, out_ind, length;
unsigned int in_loc = 0, out_loc = 0;
struct iio_demux_table *p = NULL;
@@ -1051,17 +1051,13 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
/* First work out which scan mode we will actually have */
if (bitmap_equal(indio_dev->active_scan_mask,
- buffer->scan_mask,
- indio_dev->masklength))
+ buffer->scan_mask, masklength))
return 0;
/* Now we have the two masks, work from least sig and build up sizes */
- for_each_set_bit(out_ind,
- buffer->scan_mask,
- indio_dev->masklength) {
+ for_each_set_bit(out_ind, buffer->scan_mask, masklength) {
in_ind = find_next_bit(indio_dev->active_scan_mask,
- indio_dev->masklength,
- in_ind + 1);
+ masklength, in_ind + 1);
while (in_ind != out_ind) {
ret = iio_storage_bytes_for_si(indio_dev, in_ind);
if (ret < 0)
@@ -1071,8 +1067,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
/* Make sure we are aligned */
in_loc = roundup(in_loc, length) + length;
in_ind = find_next_bit(indio_dev->active_scan_mask,
- indio_dev->masklength,
- in_ind + 1);
+ masklength, in_ind + 1);
}
ret = iio_storage_bytes_for_si(indio_dev, in_ind);
if (ret < 0)
@@ -2104,6 +2099,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
int index)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+ unsigned int masklength = iio_get_masklength(indio_dev);
struct iio_dev_attr *p;
const struct iio_dev_attr *id_attr;
struct attribute **attr;
@@ -2166,8 +2162,8 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
iio_dev_opaque->scan_index_timestamp =
channels[i].scan_index;
}
- if (indio_dev->masklength && !buffer->scan_mask) {
- buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
+ if (masklength && !buffer->scan_mask) {
+ buffer->scan_mask = bitmap_zalloc(masklength,
GFP_KERNEL);
if (!buffer->scan_mask) {
ret = -ENOMEM;
@@ -2273,7 +2269,7 @@ int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
for (i = 0; i < indio_dev->num_channels; i++)
ml = max(ml, channels[i].scan_index + 1);
- indio_dev->masklength = ml;
+ ACCESS_PRIVATE(indio_dev, masklength) = ml;
}
if (!iio_dev_opaque->attached_buffers_cnt)
@@ -2337,7 +2333,7 @@ void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
const unsigned long *mask)
{
- return bitmap_weight(mask, indio_dev->masklength) == 1;
+ return bitmap_weight(mask, iio_get_masklength(indio_dev)) == 1;
}
EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 0f6cda7ffe45..6a6568d4a2cb 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -667,7 +667,6 @@ static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type,
vals[1]);
case IIO_VAL_FRACTIONAL:
tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
- tmp1 = vals[1];
tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1);
if ((tmp2 < 0) && (tmp0 == 0))
return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1));
@@ -1912,7 +1911,7 @@ static void iio_sanity_check_avail_scan_masks(struct iio_dev *indio_dev)
int i;
av_masks = indio_dev->available_scan_masks;
- masklength = indio_dev->masklength;
+ masklength = iio_get_masklength(indio_dev);
longs_per_mask = BITS_TO_LONGS(masklength);
/*
@@ -1965,6 +1964,49 @@ static void iio_sanity_check_avail_scan_masks(struct iio_dev *indio_dev)
}
}
+/**
+ * iio_active_scan_mask_index - Get index of the active scan mask inside the
+ * available scan masks array
+ * @indio_dev: the IIO device containing the active and available scan masks
+ *
+ * Returns: the index or -EINVAL if active_scan_mask is not set
+ */
+int iio_active_scan_mask_index(struct iio_dev *indio_dev)
+
+{
+ const unsigned long *av_masks;
+ unsigned int masklength = iio_get_masklength(indio_dev);
+ int i = 0;
+
+ if (!indio_dev->active_scan_mask)
+ return -EINVAL;
+
+ /*
+ * As in iio_scan_mask_match and iio_sanity_check_avail_scan_masks,
+ * the condition here do not handle multi-long masks correctly.
+ * It only checks the first long to be zero, and will use such mask
+ * as a terminator even if there was bits set after the first long.
+ *
+ * This should be fine since the available_scan_mask has already been
+ * sanity tested using iio_sanity_check_avail_scan_masks.
+ *
+ * See iio_scan_mask_match and iio_sanity_check_avail_scan_masks for
+ * more details
+ */
+ av_masks = indio_dev->available_scan_masks;
+ while (*av_masks) {
+ if (indio_dev->active_scan_mask == av_masks)
+ return i;
+ av_masks += BITS_TO_LONGS(masklength);
+ i++;
+ }
+
+ dev_warn(indio_dev->dev.parent,
+ "active scan mask is not part of the available scan masks\n");
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(iio_active_scan_mask_index);
+
int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 2e84776f4fbd..54416a384232 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -347,6 +347,7 @@ int iio_trigger_detach_poll_func(struct iio_trigger *trig,
iio_trigger_put_irq(trig, pf->irq);
free_irq(pf->irq, pf);
module_put(iio_dev_opaque->driver_module);
+ pf->irq = 0;
return ret;
}
@@ -770,3 +771,29 @@ void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
if (indio_dev->trig)
iio_trigger_put(indio_dev->trig);
}
+
+int iio_device_suspend_triggering(struct iio_dev *indio_dev)
+{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
+ guard(mutex)(&iio_dev_opaque->mlock);
+
+ if ((indio_dev->pollfunc) && (indio_dev->pollfunc->irq > 0))
+ disable_irq(indio_dev->pollfunc->irq);
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_device_suspend_triggering);
+
+int iio_device_resume_triggering(struct iio_dev *indio_dev)
+{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
+ guard(mutex)(&iio_dev_opaque->mlock);
+
+ if ((indio_dev->pollfunc) && (indio_dev->pollfunc->irq > 0))
+ enable_irq(indio_dev->pollfunc->irq);
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_device_resume_triggering);
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index b68dcc1fbaca..515ff46b5b82 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -114,6 +114,19 @@ config AS73211
This driver can also be built as a module. If so, the module
will be called as73211.
+config BH1745
+ tristate "ROHM BH1745 colour sensor"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select IIO_GTS_HELPER
+ help
+ Say Y here to build support for the ROHM bh1745 colour sensor.
+
+ To compile this driver as a module, choose M here: the module will
+ be called bh1745.
+
config BH1750
tristate "ROHM BH1750 ambient light sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 1a071a8e9f8e..321010fc0b93 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_APDS9300) += apds9300.o
obj-$(CONFIG_APDS9306) += apds9306.o
obj-$(CONFIG_APDS9960) += apds9960.o
obj-$(CONFIG_AS73211) += as73211.o
+obj-$(CONFIG_BH1745) += bh1745.o
obj-$(CONFIG_BH1750) += bh1750.o
obj-$(CONFIG_BH1780) += bh1780.o
obj-$(CONFIG_CM32181) += cm32181.o
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 5169f12c3eba..c1b43053fbc7 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -125,8 +125,7 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
ret = i2c_smbus_read_word_data(data->client,
ADJD_S311_DATA_REG(i));
if (ret < 0)
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index e9e65130b6f9..3c14e4c30805 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -146,6 +146,25 @@ struct apds9960_data {
/* gesture buffer */
u8 buffer[4]; /* 4 8-bit channels */
+
+ /* calibration value buffer */
+ int calibbias[5];
+};
+
+enum {
+ APDS9960_CHAN_PROXIMITY,
+ APDS9960_CHAN_GESTURE_UP,
+ APDS9960_CHAN_GESTURE_DOWN,
+ APDS9960_CHAN_GESTURE_LEFT,
+ APDS9960_CHAN_GESTURE_RIGHT,
+};
+
+static const unsigned int apds9960_offset_regs[][2] = {
+ [APDS9960_CHAN_PROXIMITY] = {APDS9960_REG_POFFSET_UR, APDS9960_REG_POFFSET_DL},
+ [APDS9960_CHAN_GESTURE_UP] = {APDS9960_REG_GOFFSET_U, 0},
+ [APDS9960_CHAN_GESTURE_DOWN] = {APDS9960_REG_GOFFSET_D, 0},
+ [APDS9960_CHAN_GESTURE_LEFT] = {APDS9960_REG_GOFFSET_L, 0},
+ [APDS9960_CHAN_GESTURE_RIGHT] = {APDS9960_REG_GOFFSET_R, 0},
};
static const struct reg_default apds9960_reg_defaults[] = {
@@ -255,6 +274,7 @@ static const struct iio_event_spec apds9960_als_event_spec[] = {
#define APDS9960_GESTURE_CHANNEL(_dir, _si) { \
.type = IIO_PROXIMITY, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_CALIBBIAS), \
.channel = _si + 1, \
.scan_index = _si, \
.indexed = 1, \
@@ -282,7 +302,8 @@ static const struct iio_chan_spec apds9960_channels[] = {
{
.type = IIO_PROXIMITY,
.address = APDS9960_REG_PDATA,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.channel = 0,
.indexed = 0,
@@ -316,6 +337,28 @@ static const struct iio_chan_spec apds9960_channels[] = {
APDS9960_INTENSITY_CHANNEL(BLUE),
};
+static int apds9960_set_calibbias(struct apds9960_data *data,
+ struct iio_chan_spec const *chan, int calibbias)
+{
+ int ret, i;
+
+ if (calibbias < S8_MIN || calibbias > S8_MAX)
+ return -EINVAL;
+
+ guard(mutex)(&data->lock);
+ for (i = 0; i < 2; i++) {
+ if (apds9960_offset_regs[chan->channel][i] == 0)
+ break;
+
+ ret = regmap_write(data->regmap, apds9960_offset_regs[chan->channel][i], calibbias);
+ if (ret < 0)
+ return ret;
+ }
+ data->calibbias[chan->channel] = calibbias;
+
+ return 0;
+}
+
/* integration time in us */
static const int apds9960_int_time[][2] = {
{ 28000, 246},
@@ -531,6 +574,12 @@ static int apds9960_read_raw(struct iio_dev *indio_dev,
}
mutex_unlock(&data->lock);
break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ mutex_lock(&data->lock);
+ *val = data->calibbias[chan->channel];
+ ret = IIO_VAL_INT;
+ mutex_unlock(&data->lock);
+ break;
}
return ret;
@@ -564,6 +613,10 @@ static int apds9960_write_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val2 != 0)
+ return -EINVAL;
+ return apds9960_set_calibbias(data, chan, val);
default:
return -EINVAL;
}
diff --git a/drivers/iio/light/bh1745.c b/drivers/iio/light/bh1745.c
new file mode 100644
index 000000000000..2e458e9d5d85
--- /dev/null
+++ b/drivers/iio/light/bh1745.c
@@ -0,0 +1,906 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ROHM BH1745 digital colour sensor driver
+ *
+ * Copyright (C) Mudit Sharma <muditsharma.info@gmail.com>
+ *
+ * 7-bit I2C slave addresses:
+ * 0x38 (ADDR pin low)
+ * 0x39 (ADDR pin high)
+ */
+
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/util_macros.h>
+#include <linux/iio/events.h>
+#include <linux/regmap.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/iio-gts-helper.h>
+
+/* BH1745 configuration registers */
+
+/* System control */
+#define BH1745_SYS_CTRL 0x40
+#define BH1745_SYS_CTRL_SW_RESET BIT(7)
+#define BH1745_SYS_CTRL_INTR_RESET BIT(6)
+#define BH1745_SYS_CTRL_PART_ID_MASK GENMASK(5, 0)
+#define BH1745_PART_ID 0x0B
+
+/* Mode control 1 */
+#define BH1745_MODE_CTRL1 0x41
+#define BH1745_CTRL1_MEASUREMENT_TIME_MASK GENMASK(2, 0)
+
+/* Mode control 2 */
+#define BH1745_MODE_CTRL2 0x42
+#define BH1745_CTRL2_RGBC_EN BIT(4)
+#define BH1745_CTRL2_ADC_GAIN_MASK GENMASK(1, 0)
+
+/* Interrupt */
+#define BH1745_INTR 0x60
+#define BH1745_INTR_STATUS BIT(7)
+#define BH1745_INTR_SOURCE_MASK GENMASK(3, 2)
+#define BH1745_INTR_ENABLE BIT(0)
+
+#define BH1745_PERSISTENCE 0x61
+
+/* Threshold high */
+#define BH1745_TH_LSB 0x62
+#define BH1745_TH_MSB 0x63
+
+/* Threshold low */
+#define BH1745_TL_LSB 0x64
+#define BH1745_TL_MSB 0x65
+
+/* BH1745 data output regs */
+#define BH1745_RED_LSB 0x50
+#define BH1745_RED_MSB 0x51
+#define BH1745_GREEN_LSB 0x52
+#define BH1745_GREEN_MSB 0x53
+#define BH1745_BLUE_LSB 0x54
+#define BH1745_BLUE_MSB 0x55
+#define BH1745_CLEAR_LSB 0x56
+#define BH1745_CLEAR_MSB 0x57
+
+#define BH1745_MANU_ID_REG 0x92
+
+/* From 16x max HW gain and 32x max integration time */
+#define BH1745_MAX_GAIN 512
+
+enum bh1745_int_source {
+ BH1745_INTR_SOURCE_RED,
+ BH1745_INTR_SOURCE_GREEN,
+ BH1745_INTR_SOURCE_BLUE,
+ BH1745_INTR_SOURCE_CLEAR,
+};
+
+enum bh1745_gain {
+ BH1745_ADC_GAIN_1X,
+ BH1745_ADC_GAIN_2X,
+ BH1745_ADC_GAIN_16X,
+};
+
+enum bh1745_measurement_time {
+ BH1745_MEASUREMENT_TIME_160MS,
+ BH1745_MEASUREMENT_TIME_320MS,
+ BH1745_MEASUREMENT_TIME_640MS,
+ BH1745_MEASUREMENT_TIME_1280MS,
+ BH1745_MEASUREMENT_TIME_2560MS,
+ BH1745_MEASUREMENT_TIME_5120MS,
+};
+
+enum bh1745_presistence_value {
+ BH1745_PRESISTENCE_UPDATE_TOGGLE,
+ BH1745_PRESISTENCE_UPDATE_EACH_MEASUREMENT,
+ BH1745_PRESISTENCE_UPDATE_FOUR_MEASUREMENT,
+ BH1745_PRESISTENCE_UPDATE_EIGHT_MEASUREMENT,
+};
+
+static const struct iio_gain_sel_pair bh1745_gain[] = {
+ GAIN_SCALE_GAIN(1, BH1745_ADC_GAIN_1X),
+ GAIN_SCALE_GAIN(2, BH1745_ADC_GAIN_2X),
+ GAIN_SCALE_GAIN(16, BH1745_ADC_GAIN_16X),
+};
+
+static const struct iio_itime_sel_mul bh1745_itimes[] = {
+ GAIN_SCALE_ITIME_US(5120000, BH1745_MEASUREMENT_TIME_5120MS, 32),
+ GAIN_SCALE_ITIME_US(2560000, BH1745_MEASUREMENT_TIME_2560MS, 16),
+ GAIN_SCALE_ITIME_US(1280000, BH1745_MEASUREMENT_TIME_1280MS, 8),
+ GAIN_SCALE_ITIME_US(640000, BH1745_MEASUREMENT_TIME_640MS, 4),
+ GAIN_SCALE_ITIME_US(320000, BH1745_MEASUREMENT_TIME_320MS, 2),
+ GAIN_SCALE_ITIME_US(160000, BH1745_MEASUREMENT_TIME_160MS, 1),
+};
+
+struct bh1745_data {
+ /*
+ * Lock to prevent device setting update or read before
+ * related calculations are completed
+ */
+ struct mutex lock;
+ struct regmap *regmap;
+ struct device *dev;
+ struct iio_trigger *trig;
+ struct iio_gts gts;
+};
+
+static const struct regmap_range bh1745_volatile_ranges[] = {
+ regmap_reg_range(BH1745_MODE_CTRL2, BH1745_MODE_CTRL2), /* VALID */
+ regmap_reg_range(BH1745_RED_LSB, BH1745_CLEAR_MSB), /* Data */
+ regmap_reg_range(BH1745_INTR, BH1745_INTR), /* Interrupt */
+};
+
+static const struct regmap_access_table bh1745_volatile_regs = {
+ .yes_ranges = bh1745_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bh1745_volatile_ranges),
+};
+
+static const struct regmap_range bh1745_readable_ranges[] = {
+ regmap_reg_range(BH1745_SYS_CTRL, BH1745_MODE_CTRL2),
+ regmap_reg_range(BH1745_RED_LSB, BH1745_CLEAR_MSB),
+ regmap_reg_range(BH1745_INTR, BH1745_INTR),
+ regmap_reg_range(BH1745_PERSISTENCE, BH1745_TL_MSB),
+ regmap_reg_range(BH1745_MANU_ID_REG, BH1745_MANU_ID_REG),
+};
+
+static const struct regmap_access_table bh1745_readable_regs = {
+ .yes_ranges = bh1745_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bh1745_readable_ranges),
+};
+
+static const struct regmap_range bh1745_writable_ranges[] = {
+ regmap_reg_range(BH1745_SYS_CTRL, BH1745_MODE_CTRL2),
+ regmap_reg_range(BH1745_INTR, BH1745_INTR),
+ regmap_reg_range(BH1745_PERSISTENCE, BH1745_TL_MSB),
+};
+
+static const struct regmap_access_table bh1745_writable_regs = {
+ .yes_ranges = bh1745_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bh1745_writable_ranges),
+};
+
+static const struct regmap_config bh1745_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = BH1745_MANU_ID_REG,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_table = &bh1745_volatile_regs,
+ .wr_table = &bh1745_writable_regs,
+ .rd_table = &bh1745_readable_regs,
+};
+
+static const struct iio_event_spec bh1745_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_PERIOD),
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define BH1745_CHANNEL(_colour, _si, _addr) \
+ { \
+ .type = IIO_INTENSITY, .modified = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_INT_TIME), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_INT_TIME), \
+ .event_spec = bh1745_event_spec, \
+ .num_event_specs = ARRAY_SIZE(bh1745_event_spec), \
+ .channel2 = IIO_MOD_LIGHT_##_colour, .address = _addr, \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+ }
+
+static const struct iio_chan_spec bh1745_channels[] = {
+ BH1745_CHANNEL(RED, 0, BH1745_RED_LSB),
+ BH1745_CHANNEL(GREEN, 1, BH1745_GREEN_LSB),
+ BH1745_CHANNEL(BLUE, 2, BH1745_BLUE_LSB),
+ BH1745_CHANNEL(CLEAR, 3, BH1745_CLEAR_LSB),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static int bh1745_reset(struct bh1745_data *data)
+{
+ return regmap_set_bits(data->regmap, BH1745_SYS_CTRL,
+ BH1745_SYS_CTRL_SW_RESET |
+ BH1745_SYS_CTRL_INTR_RESET);
+}
+
+static int bh1745_power_on(struct bh1745_data *data)
+{
+ return regmap_set_bits(data->regmap, BH1745_MODE_CTRL2,
+ BH1745_CTRL2_RGBC_EN);
+}
+
+static void bh1745_power_off(void *data_ptr)
+{
+ struct bh1745_data *data = data_ptr;
+ struct device *dev = data->dev;
+ int ret;
+
+ ret = regmap_clear_bits(data->regmap, BH1745_MODE_CTRL2,
+ BH1745_CTRL2_RGBC_EN);
+ if (ret)
+ dev_err(dev, "Failed to turn off device\n");
+}
+
+static int bh1745_get_scale(struct bh1745_data *data, int *val, int *val2)
+{
+ int ret;
+ int value;
+ int gain_sel, int_time_sel;
+ int gain;
+ const struct iio_itime_sel_mul *int_time;
+
+ ret = regmap_read(data->regmap, BH1745_MODE_CTRL2, &value);
+ if (ret)
+ return ret;
+
+ gain_sel = FIELD_GET(BH1745_CTRL2_ADC_GAIN_MASK, value);
+ gain = iio_gts_find_gain_by_sel(&data->gts, gain_sel);
+
+ ret = regmap_read(data->regmap, BH1745_MODE_CTRL1, &value);
+ if (ret)
+ return ret;
+
+ int_time_sel = FIELD_GET(BH1745_CTRL1_MEASUREMENT_TIME_MASK, value);
+ int_time = iio_gts_find_itime_by_sel(&data->gts, int_time_sel);
+
+ return iio_gts_get_scale(&data->gts, gain, int_time->time_us, val,
+ val2);
+}
+
+static int bh1745_set_scale(struct bh1745_data *data, int val)
+{
+ struct device *dev = data->dev;
+ int ret;
+ int value;
+ int hw_gain_sel, current_int_time_sel, new_int_time_sel;
+
+ ret = regmap_read(data->regmap, BH1745_MODE_CTRL1, &value);
+ if (ret)
+ return ret;
+
+ current_int_time_sel = FIELD_GET(BH1745_CTRL1_MEASUREMENT_TIME_MASK,
+ value);
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts,
+ current_int_time_sel,
+ val, 0, &hw_gain_sel);
+ if (ret) {
+ for (int i = 0; i < data->gts.num_itime; i++) {
+ new_int_time_sel = data->gts.itime_table[i].sel;
+
+ if (new_int_time_sel == current_int_time_sel)
+ continue;
+
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts,
+ new_int_time_sel,
+ val, 0,
+ &hw_gain_sel);
+ if (!ret)
+ break;
+ }
+
+ if (ret) {
+ dev_dbg(dev, "Unsupported scale value requested: %d\n",
+ val);
+ return -EINVAL;
+ }
+
+ ret = regmap_write_bits(data->regmap, BH1745_MODE_CTRL1,
+ BH1745_CTRL1_MEASUREMENT_TIME_MASK,
+ new_int_time_sel);
+ if (ret)
+ return ret;
+ }
+
+ return regmap_write_bits(data->regmap, BH1745_MODE_CTRL2,
+ BH1745_CTRL2_ADC_GAIN_MASK, hw_gain_sel);
+}
+
+static int bh1745_get_int_time(struct bh1745_data *data, int *val)
+{
+ int ret;
+ int value;
+ int int_time, int_time_sel;
+
+ ret = regmap_read(data->regmap, BH1745_MODE_CTRL1, &value);
+ if (ret)
+ return ret;
+
+ int_time_sel = FIELD_GET(BH1745_CTRL1_MEASUREMENT_TIME_MASK, value);
+ int_time = iio_gts_find_int_time_by_sel(&data->gts, int_time_sel);
+ if (int_time < 0)
+ return int_time;
+
+ *val = int_time;
+
+ return 0;
+}
+
+static int bh1745_set_int_time(struct bh1745_data *data, int val, int val2)
+{
+ struct device *dev = data->dev;
+ int ret;
+ int value;
+ int current_int_time, current_hwgain_sel, current_hwgain;
+ int new_hwgain, new_hwgain_sel, new_int_time_sel;
+ int req_int_time = (1000000 * val) + val2;
+
+ if (!iio_gts_valid_time(&data->gts, req_int_time)) {
+ dev_dbg(dev, "Unsupported integration time requested: %d\n",
+ req_int_time);
+ return -EINVAL;
+ }
+
+ ret = bh1745_get_int_time(data, &current_int_time);
+ if (ret)
+ return ret;
+
+ if (current_int_time == req_int_time)
+ return 0;
+
+ ret = regmap_read(data->regmap, BH1745_MODE_CTRL2, &value);
+ if (ret)
+ return ret;
+
+ current_hwgain_sel = FIELD_GET(BH1745_CTRL2_ADC_GAIN_MASK, value);
+ current_hwgain = iio_gts_find_gain_by_sel(&data->gts,
+ current_hwgain_sel);
+ ret = iio_gts_find_new_gain_by_old_gain_time(&data->gts, current_hwgain,
+ current_int_time,
+ req_int_time,
+ &new_hwgain);
+ if (new_hwgain < 0) {
+ dev_dbg(dev, "No corresponding gain for requested integration time\n");
+ return ret;
+ }
+
+ if (ret) {
+ bool in_range;
+
+ new_hwgain = iio_find_closest_gain_low(&data->gts, new_hwgain,
+ &in_range);
+ if (new_hwgain < 0) {
+ new_hwgain = iio_gts_get_min_gain(&data->gts);
+ if (new_hwgain < 0)
+ return ret;
+ }
+
+ if (!in_range)
+ dev_dbg(dev, "Optimal gain out of range\n");
+
+ dev_dbg(dev, "Scale changed, new hw_gain %d\n", new_hwgain);
+ }
+
+ new_hwgain_sel = iio_gts_find_sel_by_gain(&data->gts, new_hwgain);
+ if (new_hwgain_sel < 0)
+ return new_hwgain_sel;
+
+ ret = regmap_write_bits(data->regmap, BH1745_MODE_CTRL2,
+ BH1745_CTRL2_ADC_GAIN_MASK,
+ new_hwgain_sel);
+ if (ret)
+ return ret;
+
+ new_int_time_sel = iio_gts_find_sel_by_int_time(&data->gts,
+ req_int_time);
+ if (new_int_time_sel < 0)
+ return new_int_time_sel;
+
+ return regmap_write_bits(data->regmap, BH1745_MODE_CTRL1,
+ BH1745_CTRL1_MEASUREMENT_TIME_MASK,
+ new_int_time_sel);
+}
+
+static int bh1745_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct bh1745_data *data = iio_priv(indio_dev);
+ int ret;
+ int value;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ ret = regmap_bulk_read(data->regmap, chan->address,
+ &value, 2);
+ if (ret)
+ return ret;
+ *val = value;
+
+ return IIO_VAL_INT;
+ }
+ unreachable();
+
+ case IIO_CHAN_INFO_SCALE: {
+ guard(mutex)(&data->lock);
+ ret = bh1745_get_scale(data, val, val2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ }
+
+ case IIO_CHAN_INFO_INT_TIME: {
+ guard(mutex)(&data->lock);
+ *val = 0;
+ ret = bh1745_get_int_time(data, val2);
+ if (ret)
+ return 0;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bh1745_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct bh1745_data *data = iio_priv(indio_dev);
+
+ guard(mutex)(&data->lock);
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return bh1745_set_scale(data, val);
+
+ case IIO_CHAN_INFO_INT_TIME:
+ return bh1745_set_int_time(data, val, val2);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bh1745_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_INT_TIME:
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bh1745_read_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct bh1745_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = regmap_bulk_read(data->regmap, BH1745_TH_LSB,
+ val, 2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ case IIO_EV_DIR_FALLING:
+ ret = regmap_bulk_read(data->regmap, BH1745_TL_LSB,
+ val, 2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_EV_INFO_PERIOD:
+ ret = regmap_read(data->regmap, BH1745_PERSISTENCE, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bh1745_write_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct bh1745_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (val < 0x0 || val > 0xFFFF)
+ return -EINVAL;
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = regmap_bulk_write(data->regmap, BH1745_TH_LSB,
+ &val, 2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ case IIO_EV_DIR_FALLING:
+ ret = regmap_bulk_write(data->regmap, BH1745_TL_LSB,
+ &val, 2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_EV_INFO_PERIOD:
+ if (val < BH1745_PRESISTENCE_UPDATE_TOGGLE ||
+ val > BH1745_PRESISTENCE_UPDATE_EIGHT_MEASUREMENT)
+ return -EINVAL;
+ ret = regmap_write(data->regmap, BH1745_PERSISTENCE, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bh1745_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct bh1745_data *data = iio_priv(indio_dev);
+ int ret;
+ int value;
+ int int_src;
+
+ ret = regmap_read(data->regmap, BH1745_INTR, &value);
+ if (ret)
+ return ret;
+
+ if (!FIELD_GET(BH1745_INTR_ENABLE, value))
+ return 0;
+
+ int_src = FIELD_GET(BH1745_INTR_SOURCE_MASK, value);
+
+ switch (chan->channel2) {
+ case IIO_MOD_LIGHT_RED:
+ if (int_src == BH1745_INTR_SOURCE_RED)
+ return 1;
+ return 0;
+
+ case IIO_MOD_LIGHT_GREEN:
+ if (int_src == BH1745_INTR_SOURCE_GREEN)
+ return 1;
+ return 0;
+
+ case IIO_MOD_LIGHT_BLUE:
+ if (int_src == BH1745_INTR_SOURCE_BLUE)
+ return 1;
+ return 0;
+
+ case IIO_MOD_LIGHT_CLEAR:
+ if (int_src == BH1745_INTR_SOURCE_CLEAR)
+ return 1;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bh1745_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct bh1745_data *data = iio_priv(indio_dev);
+ int value;
+
+ if (state == 0)
+ return regmap_clear_bits(data->regmap,
+ BH1745_INTR, BH1745_INTR_ENABLE);
+
+ if (state == 1) {
+ /* Latch is always enabled when enabling interrupt */
+ value = BH1745_INTR_ENABLE;
+
+ switch (chan->channel2) {
+ case IIO_MOD_LIGHT_RED:
+ return regmap_write(data->regmap, BH1745_INTR,
+ value | FIELD_PREP(BH1745_INTR_SOURCE_MASK,
+ BH1745_INTR_SOURCE_RED));
+
+ case IIO_MOD_LIGHT_GREEN:
+ return regmap_write(data->regmap, BH1745_INTR,
+ value | FIELD_PREP(BH1745_INTR_SOURCE_MASK,
+ BH1745_INTR_SOURCE_GREEN));
+
+ case IIO_MOD_LIGHT_BLUE:
+ return regmap_write(data->regmap, BH1745_INTR,
+ value | FIELD_PREP(BH1745_INTR_SOURCE_MASK,
+ BH1745_INTR_SOURCE_BLUE));
+
+ case IIO_MOD_LIGHT_CLEAR:
+ return regmap_write(data->regmap, BH1745_INTR,
+ value | FIELD_PREP(BH1745_INTR_SOURCE_MASK,
+ BH1745_INTR_SOURCE_CLEAR));
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int bh1745_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, const int **vals,
+ int *type, int *length, long mask)
+{
+ struct bh1745_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ return iio_gts_avail_times(&data->gts, vals, type, length);
+
+ case IIO_CHAN_INFO_SCALE:
+ return iio_gts_all_avail_scales(&data->gts, vals, type, length);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info bh1745_info = {
+ .read_raw = bh1745_read_raw,
+ .write_raw = bh1745_write_raw,
+ .write_raw_get_fmt = bh1745_write_raw_get_fmt,
+ .read_event_value = bh1745_read_thresh,
+ .write_event_value = bh1745_write_thresh,
+ .read_event_config = bh1745_read_event_config,
+ .write_event_config = bh1745_write_event_config,
+ .read_avail = bh1745_read_avail,
+};
+
+static irqreturn_t bh1745_interrupt_handler(int interrupt, void *p)
+{
+ struct iio_dev *indio_dev = p;
+ struct bh1745_data *data = iio_priv(indio_dev);
+ int ret;
+ int value;
+ int int_src;
+
+ ret = regmap_read(data->regmap, BH1745_INTR, &value);
+ if (ret)
+ return IRQ_NONE;
+
+ int_src = FIELD_GET(BH1745_INTR_SOURCE_MASK, value);
+
+ if (value & BH1745_INTR_STATUS) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, int_src,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns(indio_dev));
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t bh1745_trigger_handler(int interrupt, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bh1745_data *data = iio_priv(indio_dev);
+ struct {
+ u16 chans[4];
+ s64 timestamp __aligned(8);
+ } scan;
+ u16 value;
+ int ret;
+ int i;
+ int j = 0;
+
+ iio_for_each_active_channel(indio_dev, i) {
+ ret = regmap_bulk_read(data->regmap, BH1745_RED_LSB + 2 * i,
+ &value, 2);
+ if (ret)
+ goto err;
+
+ scan.chans[j++] = value;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan,
+ iio_get_time_ns(indio_dev));
+
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int bh1745_setup_triggered_buffer(struct iio_dev *indio_dev,
+ struct device *parent,
+ int irq)
+{
+ struct bh1745_data *data = iio_priv(indio_dev);
+ struct device *dev = data->dev;
+ int ret;
+
+ ret = devm_iio_triggered_buffer_setup(parent, indio_dev, NULL,
+ bh1745_trigger_handler, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Triggered buffer setup failed\n");
+
+ if (irq) {
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ bh1745_interrupt_handler,
+ IRQF_ONESHOT,
+ "bh1745_interrupt", indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Request for IRQ failed\n");
+ }
+
+ return 0;
+}
+
+static int bh1745_init(struct bh1745_data *data)
+{
+ int ret;
+ struct device *dev = data->dev;
+
+ mutex_init(&data->lock);
+
+ ret = devm_iio_init_iio_gts(dev, BH1745_MAX_GAIN, 0, bh1745_gain,
+ ARRAY_SIZE(bh1745_gain), bh1745_itimes,
+ ARRAY_SIZE(bh1745_itimes), &data->gts);
+ if (ret)
+ return ret;
+
+ ret = bh1745_reset(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to reset sensor\n");
+
+ ret = bh1745_power_on(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to turn on sensor\n");
+
+ ret = devm_add_action_or_reset(dev, bh1745_power_off, data);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to add action or reset\n");
+
+ return 0;
+}
+
+static int bh1745_probe(struct i2c_client *client)
+{
+ int ret;
+ int value;
+ int part_id;
+ struct bh1745_data *data;
+ struct iio_dev *indio_dev;
+ struct device *dev = &client->dev;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->info = &bh1745_info;
+ indio_dev->name = "bh1745";
+ indio_dev->channels = bh1745_channels;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = ARRAY_SIZE(bh1745_channels);
+ data = iio_priv(indio_dev);
+ data->dev = &client->dev;
+ data->regmap = devm_regmap_init_i2c(client, &bh1745_regmap);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev, PTR_ERR(data->regmap),
+ "Failed to initialize Regmap\n");
+
+ ret = regmap_read(data->regmap, BH1745_SYS_CTRL, &value);
+ if (ret)
+ return ret;
+
+ part_id = FIELD_GET(BH1745_SYS_CTRL_PART_ID_MASK, value);
+ if (part_id != BH1745_PART_ID)
+ dev_warn(dev, "Unknown part ID 0x%x\n", part_id);
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get and enable regulator\n");
+
+ ret = bh1745_init(data);
+ if (ret)
+ return ret;
+
+ ret = bh1745_setup_triggered_buffer(indio_dev, indio_dev->dev.parent,
+ client->irq);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register device\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id bh1745_idtable[] = {
+ { "bh1745" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, bh1745_idtable);
+
+static const struct of_device_id bh1745_of_match[] = {
+ { .compatible = "rohm,bh1745" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bh1745_of_match);
+
+static struct i2c_driver bh1745_driver = {
+ .driver = {
+ .name = "bh1745",
+ .of_match_table = bh1745_of_match,
+ },
+ .probe = bh1745_probe,
+ .id_table = bh1745_idtable,
+};
+module_i2c_driver(bh1745_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mudit Sharma <muditsharma.info@gmail.com>");
+MODULE_DESCRIPTION("BH1745 colour sensor driver");
+MODULE_IMPORT_NS(IIO_GTS_HELPER);
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index 7125e011a38a..f8b1d7dd6f5f 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -420,7 +420,7 @@ static int gp2ap002_regmap_i2c_write(void *context, unsigned int reg,
return i2c_smbus_write_byte_data(i2c, reg, val);
}
-static struct regmap_bus gp2ap002_regmap_bus = {
+static const struct regmap_bus gp2ap002_regmap_bus = {
.reg_read = gp2ap002_regmap_i2c_read,
.reg_write = gp2ap002_regmap_i2c_write,
};
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index 757383456da6..b3f87dded040 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -965,8 +965,7 @@ static irqreturn_t gp2ap020a00f_trigger_handler(int irq, void *data)
size_t d_size = 0;
int i, out_val, ret;
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
ret = regmap_bulk_read(priv->regmap,
GP2AP020A00F_DATA_REG(i),
&priv->buffer[d_size], 2);
@@ -1397,8 +1396,7 @@ static int gp2ap020a00f_buffer_postenable(struct iio_dev *indio_dev)
* two separate IIO channels they are treated in the driver logic
* as if they were controlled independently.
*/
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
switch (i) {
case GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR:
err = gp2ap020a00f_exec_cmd(data,
@@ -1435,8 +1433,7 @@ static int gp2ap020a00f_buffer_predisable(struct iio_dev *indio_dev)
mutex_lock(&data->lock);
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
switch (i) {
case GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR:
err = gp2ap020a00f_exec_cmd(data,
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index 59329546df58..b176bf4c884b 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -181,8 +181,7 @@ static irqreturn_t isl29125_trigger_handler(int irq, void *p)
struct isl29125_data *data = iio_priv(indio_dev);
int i, j = 0;
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
int ret = i2c_smbus_read_word_data(data->client,
isl29125_regs[i].data);
if (ret < 0)
diff --git a/drivers/iio/light/ltr390.c b/drivers/iio/light/ltr390.c
index fff1e899097d..7e58b50f3660 100644
--- a/drivers/iio/light/ltr390.c
+++ b/drivers/iio/light/ltr390.c
@@ -23,20 +23,30 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/regmap.h>
+#include <linux/bitfield.h>
#include <linux/iio/iio.h>
#include <asm/unaligned.h>
-#define LTR390_MAIN_CTRL 0x00
-#define LTR390_PART_ID 0x06
-#define LTR390_UVS_DATA 0x10
+#define LTR390_MAIN_CTRL 0x00
+#define LTR390_ALS_UVS_MEAS_RATE 0x04
+#define LTR390_ALS_UVS_GAIN 0x05
+#define LTR390_PART_ID 0x06
+#define LTR390_ALS_DATA 0x0D
+#define LTR390_UVS_DATA 0x10
+#define LTR390_INT_CFG 0x19
+
+#define LTR390_PART_NUMBER_ID 0xb
+#define LTR390_ALS_UVS_GAIN_MASK 0x07
+#define LTR390_ALS_UVS_INT_TIME_MASK 0x70
+#define LTR390_ALS_UVS_INT_TIME(x) FIELD_PREP(LTR390_ALS_UVS_INT_TIME_MASK, (x))
#define LTR390_SW_RESET BIT(4)
#define LTR390_UVS_MODE BIT(3)
#define LTR390_SENSOR_ENABLE BIT(1)
-#define LTR390_PART_NUMBER_ID 0xb
+#define LTR390_FRACTIONAL_PRECISION 100
/*
* At 20-bit resolution (integration time: 400ms) and 18x gain, 2300 counts of
@@ -55,11 +65,19 @@
*/
#define LTR390_WINDOW_FACTOR 1
+enum ltr390_mode {
+ LTR390_SET_ALS_MODE,
+ LTR390_SET_UVS_MODE,
+};
+
struct ltr390_data {
struct regmap *regmap;
struct i2c_client *client;
/* Protects device from simulataneous reads */
struct mutex lock;
+ enum ltr390_mode mode;
+ int gain;
+ int int_time_us;
};
static const struct regmap_config ltr390_regmap_config = {
@@ -75,8 +93,6 @@ static int ltr390_register_read(struct ltr390_data *data, u8 register_address)
int ret;
u8 recieve_buffer[3];
- guard(mutex)(&data->lock);
-
ret = regmap_bulk_read(data->regmap, register_address, recieve_buffer,
sizeof(recieve_buffer));
if (ret) {
@@ -87,6 +103,38 @@ static int ltr390_register_read(struct ltr390_data *data, u8 register_address)
return get_unaligned_le24(recieve_buffer);
}
+static int ltr390_set_mode(struct ltr390_data *data, enum ltr390_mode mode)
+{
+ int ret;
+
+ if (data->mode == mode)
+ return 0;
+
+ switch (mode) {
+ case LTR390_SET_ALS_MODE:
+ ret = regmap_clear_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_UVS_MODE);
+ break;
+
+ case LTR390_SET_UVS_MODE:
+ ret = regmap_set_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_UVS_MODE);
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ data->mode = mode;
+ return 0;
+}
+
+static int ltr390_counts_per_uvi(struct ltr390_data *data)
+{
+ const int orig_gain = 18;
+ const int orig_int_time = 400;
+
+ return DIV_ROUND_CLOSEST(23 * data->gain * data->int_time_us, 10 * orig_gain * orig_int_time);
+}
+
static int ltr390_read_raw(struct iio_dev *iio_device,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
@@ -94,29 +142,174 @@ static int ltr390_read_raw(struct iio_dev *iio_device,
int ret;
struct ltr390_data *data = iio_priv(iio_device);
+ guard(mutex)(&data->lock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = ltr390_register_read(data, LTR390_UVS_DATA);
- if (ret < 0)
- return ret;
+ switch (chan->type) {
+ case IIO_UVINDEX:
+ ret = ltr390_set_mode(data, LTR390_SET_UVS_MODE);
+ if (ret < 0)
+ return ret;
+
+ ret = ltr390_register_read(data, LTR390_UVS_DATA);
+ if (ret < 0)
+ return ret;
+ break;
+
+ case IIO_LIGHT:
+ ret = ltr390_set_mode(data, LTR390_SET_ALS_MODE);
+ if (ret < 0)
+ return ret;
+
+ ret = ltr390_register_read(data, LTR390_ALS_DATA);
+ if (ret < 0)
+ return ret;
+ break;
+
+ default:
+ return -EINVAL;
+ }
*val = ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = LTR390_WINDOW_FACTOR;
- *val2 = LTR390_COUNTS_PER_UVI;
- return IIO_VAL_FRACTIONAL;
+ switch (chan->type) {
+ case IIO_UVINDEX:
+ *val = LTR390_WINDOW_FACTOR * LTR390_FRACTIONAL_PRECISION;
+ *val2 = ltr390_counts_per_uvi(data);
+ return IIO_VAL_FRACTIONAL;
+
+ case IIO_LIGHT:
+ *val = LTR390_WINDOW_FACTOR * 6 * 100;
+ *val2 = data->gain * data->int_time_us;
+ return IIO_VAL_FRACTIONAL;
+
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = data->int_time_us;
+ return IIO_VAL_INT;
+
default:
return -EINVAL;
}
}
-static const struct iio_info ltr390_info = {
- .read_raw = ltr390_read_raw,
+/* integration time in us */
+static const int ltr390_int_time_map_us[] = { 400000, 200000, 100000, 50000, 25000, 12500 };
+static const int ltr390_gain_map[] = { 1, 3, 6, 9, 18 };
+
+static const struct iio_chan_spec ltr390_channels[] = {
+ /* UV sensor */
+ {
+ .type = IIO_UVINDEX,
+ .scan_index = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) | BIT(IIO_CHAN_INFO_SCALE)
+ },
+ /* ALS sensor */
+ {
+ .type = IIO_LIGHT,
+ .scan_index = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) | BIT(IIO_CHAN_INFO_SCALE)
+ },
};
-static const struct iio_chan_spec ltr390_channel = {
- .type = IIO_UVINDEX,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)
+static int ltr390_set_gain(struct ltr390_data *data, int val)
+{
+ int ret, idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(ltr390_gain_map); idx++) {
+ if (ltr390_gain_map[idx] != val)
+ continue;
+
+ guard(mutex)(&data->lock);
+ ret = regmap_update_bits(data->regmap,
+ LTR390_ALS_UVS_GAIN,
+ LTR390_ALS_UVS_GAIN_MASK, idx);
+ if (ret)
+ return ret;
+
+ data->gain = ltr390_gain_map[idx];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ltr390_set_int_time(struct ltr390_data *data, int val)
+{
+ int ret, idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(ltr390_int_time_map_us); idx++) {
+ if (ltr390_int_time_map_us[idx] != val)
+ continue;
+
+ guard(mutex)(&data->lock);
+ ret = regmap_update_bits(data->regmap,
+ LTR390_ALS_UVS_MEAS_RATE,
+ LTR390_ALS_UVS_INT_TIME_MASK,
+ LTR390_ALS_UVS_INT_TIME(idx));
+ if (ret)
+ return ret;
+
+ data->int_time_us = ltr390_int_time_map_us[idx];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ltr390_read_avail(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *length = ARRAY_SIZE(ltr390_gain_map);
+ *type = IIO_VAL_INT;
+ *vals = ltr390_gain_map;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_INT_TIME:
+ *length = ARRAY_SIZE(ltr390_int_time_map_us);
+ *type = IIO_VAL_INT;
+ *vals = ltr390_int_time_map_us;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltr390_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct ltr390_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (val2 != 0)
+ return -EINVAL;
+
+ return ltr390_set_gain(data, val);
+
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val2 != 0)
+ return -EINVAL;
+
+ return ltr390_set_int_time(data, val);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ltr390_info = {
+ .read_raw = ltr390_read_raw,
+ .write_raw = ltr390_write_raw,
+ .read_avail = ltr390_read_avail,
};
static int ltr390_probe(struct i2c_client *client)
@@ -139,11 +332,18 @@ static int ltr390_probe(struct i2c_client *client)
"regmap initialization failed\n");
data->client = client;
+ /* default value of integration time from pg: 15 of the datasheet */
+ data->int_time_us = 100000;
+ /* default value of gain from pg: 16 of the datasheet */
+ data->gain = 3;
+ /* default mode for ltr390 is ALS mode */
+ data->mode = LTR390_SET_ALS_MODE;
+
mutex_init(&data->lock);
indio_dev->info = &ltr390_info;
- indio_dev->channels = &ltr390_channel;
- indio_dev->num_channels = 1;
+ indio_dev->channels = ltr390_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ltr390_channels);
indio_dev->name = "ltr390";
ret = regmap_read(data->regmap, LTR390_PART_ID, &part_number);
@@ -161,8 +361,7 @@ static int ltr390_probe(struct i2c_client *client)
/* Wait for the registers to reset before proceeding */
usleep_range(1000, 2000);
- ret = regmap_set_bits(data->regmap, LTR390_MAIN_CTRL,
- LTR390_SENSOR_ENABLE | LTR390_UVS_MODE);
+ ret = regmap_set_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_SENSOR_ENABLE);
if (ret)
return dev_err_probe(dev, ret, "failed to enable the sensor\n");
diff --git a/drivers/iio/light/ltrf216a.c b/drivers/iio/light/ltrf216a.c
index 68dc48420a88..bc8444516689 100644
--- a/drivers/iio/light/ltrf216a.c
+++ b/drivers/iio/light/ltrf216a.c
@@ -68,6 +68,13 @@ static const int ltrf216a_int_time_reg[][2] = {
{ 25, 0x40 },
};
+struct ltr_chip_info {
+ /* Chip contains CLEAR_DATA_0/1/2 registers at offset 0xa..0xc */
+ bool has_clear_data;
+ /* Lux calculation multiplier for ALS data */
+ int lux_multiplier;
+};
+
/*
* Window Factor is needed when the device is under Window glass
* with coated tinted ink. This is to compensate for the light loss
@@ -79,6 +86,7 @@ static const int ltrf216a_int_time_reg[][2] = {
struct ltrf216a_data {
struct regmap *regmap;
struct i2c_client *client;
+ const struct ltr_chip_info *info;
u32 int_time;
u16 int_time_fac;
u8 als_gain_fac;
@@ -246,7 +254,7 @@ static int ltrf216a_get_lux(struct ltrf216a_data *data)
ltrf216a_set_power_state(data, false);
- lux = greendata * 45 * LTRF216A_WIN_FAC;
+ lux = greendata * data->info->lux_multiplier * LTRF216A_WIN_FAC;
return lux;
}
@@ -334,15 +342,15 @@ static const struct iio_info ltrf216a_info = {
static bool ltrf216a_readable_reg(struct device *dev, unsigned int reg)
{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+
switch (reg) {
case LTRF216A_MAIN_CTRL:
case LTRF216A_ALS_MEAS_RES:
case LTRF216A_ALS_GAIN:
case LTRF216A_PART_ID:
case LTRF216A_MAIN_STATUS:
- case LTRF216A_ALS_CLEAR_DATA_0:
- case LTRF216A_ALS_CLEAR_DATA_1:
- case LTRF216A_ALS_CLEAR_DATA_2:
case LTRF216A_ALS_DATA_0:
case LTRF216A_ALS_DATA_1:
case LTRF216A_ALS_DATA_2:
@@ -355,6 +363,10 @@ static bool ltrf216a_readable_reg(struct device *dev, unsigned int reg)
case LTRF216A_ALS_THRES_LOW_1:
case LTRF216A_ALS_THRES_LOW_2:
return true;
+ case LTRF216A_ALS_CLEAR_DATA_0:
+ case LTRF216A_ALS_CLEAR_DATA_1:
+ case LTRF216A_ALS_CLEAR_DATA_2:
+ return data->info->has_clear_data;
default:
return false;
}
@@ -382,15 +394,23 @@ static bool ltrf216a_writable_reg(struct device *dev, unsigned int reg)
static bool ltrf216a_volatile_reg(struct device *dev, unsigned int reg)
{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+
switch (reg) {
case LTRF216A_MAIN_STATUS:
- case LTRF216A_ALS_CLEAR_DATA_0:
- case LTRF216A_ALS_CLEAR_DATA_1:
- case LTRF216A_ALS_CLEAR_DATA_2:
case LTRF216A_ALS_DATA_0:
case LTRF216A_ALS_DATA_1:
case LTRF216A_ALS_DATA_2:
return true;
+ /*
+ * If these registers are not present on a chip (like LTR-308),
+ * the missing registers are not considered volatile.
+ */
+ case LTRF216A_ALS_CLEAR_DATA_0:
+ case LTRF216A_ALS_CLEAR_DATA_1:
+ case LTRF216A_ALS_CLEAR_DATA_2:
+ return data->info->has_clear_data;
default:
return false;
}
@@ -433,6 +453,7 @@ static int ltrf216a_probe(struct i2c_client *client)
i2c_set_clientdata(client, indio_dev);
data->client = client;
+ data->info = i2c_get_match_data(client);
mutex_init(&data->lock);
@@ -520,15 +541,27 @@ cache_only:
static DEFINE_RUNTIME_DEV_PM_OPS(ltrf216a_pm_ops, ltrf216a_runtime_suspend,
ltrf216a_runtime_resume, NULL);
+static const struct ltr_chip_info ltr308_chip_info = {
+ .has_clear_data = false,
+ .lux_multiplier = 60,
+};
+
+static const struct ltr_chip_info ltrf216a_chip_info = {
+ .has_clear_data = true,
+ .lux_multiplier = 45,
+};
+
static const struct i2c_device_id ltrf216a_id[] = {
- { "ltrf216a" },
+ { "ltr308", .driver_data = (kernel_ulong_t)&ltr308_chip_info },
+ { "ltrf216a", .driver_data = (kernel_ulong_t)&ltrf216a_chip_info },
{}
};
MODULE_DEVICE_TABLE(i2c, ltrf216a_id);
static const struct of_device_id ltrf216a_of_match[] = {
- { .compatible = "liteon,ltrf216a" },
- { .compatible = "ltr,ltrf216a" },
+ { .compatible = "liteon,ltr308", .data = &ltr308_chip_info },
+ { .compatible = "liteon,ltrf216a", .data = &ltrf216a_chip_info },
+ { .compatible = "ltr,ltrf216a", .data = &ltrf216a_chip_info },
{}
};
MODULE_DEVICE_TABLE(of, ltrf216a_of_match);
diff --git a/drivers/iio/light/noa1305.c b/drivers/iio/light/noa1305.c
index 596cc48c4c34..25f63da70297 100644
--- a/drivers/iio/light/noa1305.c
+++ b/drivers/iio/light/noa1305.c
@@ -29,6 +29,7 @@
#define NOA1305_INTEGR_TIME_25MS 0x05
#define NOA1305_INTEGR_TIME_12_5MS 0x06
#define NOA1305_INTEGR_TIME_6_25MS 0x07
+#define NOA1305_INTEGR_TIME_MASK 0x07
#define NOA1305_REG_INT_SELECT 0x3
#define NOA1305_INT_SEL_ACTIVE_HIGH 0x01
#define NOA1305_INT_SEL_ACTIVE_LOW 0x02
@@ -43,12 +44,34 @@
#define NOA1305_DEVICE_ID 0x0519
#define NOA1305_DRIVER_NAME "noa1305"
+static int noa1305_scale_available[] = {
+ 100, 8 * 77, /* 800 ms */
+ 100, 4 * 77, /* 400 ms */
+ 100, 2 * 77, /* 200 ms */
+ 100, 1 * 77, /* 100 ms */
+ 1000, 5 * 77, /* 50 ms */
+ 10000, 25 * 77, /* 25 ms */
+ 100000, 125 * 77, /* 12.5 ms */
+ 1000000, 625 * 77, /* 6.25 ms */
+};
+
+static int noa1305_int_time_available[] = {
+ 0, 800000, /* 800 ms */
+ 0, 400000, /* 400 ms */
+ 0, 200000, /* 200 ms */
+ 0, 100000, /* 100 ms */
+ 0, 50000, /* 50 ms */
+ 0, 25000, /* 25 ms */
+ 0, 12500, /* 12.5 ms */
+ 0, 6250, /* 6.25 ms */
+};
+
struct noa1305_priv {
struct i2c_client *client;
struct regmap *regmap;
};
-static int noa1305_measure(struct noa1305_priv *priv)
+static int noa1305_measure(struct noa1305_priv *priv, int *val)
{
__le16 data;
int ret;
@@ -58,7 +81,9 @@ static int noa1305_measure(struct noa1305_priv *priv)
if (ret < 0)
return ret;
- return le16_to_cpu(data);
+ *val = le16_to_cpu(data);
+
+ return IIO_VAL_INT;
}
static int noa1305_scale(struct noa1305_priv *priv, int *val, int *val2)
@@ -76,91 +101,113 @@ static int noa1305_scale(struct noa1305_priv *priv, int *val, int *val2)
* Integration Constant = 7.7
* Integration Time in Seconds
*/
- switch (data) {
- case NOA1305_INTEGR_TIME_800MS:
- *val = 100;
- *val2 = 77 * 8;
- break;
- case NOA1305_INTEGR_TIME_400MS:
- *val = 100;
- *val2 = 77 * 4;
- break;
- case NOA1305_INTEGR_TIME_200MS:
- *val = 100;
- *val2 = 77 * 2;
- break;
- case NOA1305_INTEGR_TIME_100MS:
- *val = 100;
- *val2 = 77;
- break;
- case NOA1305_INTEGR_TIME_50MS:
- *val = 1000;
- *val2 = 77 * 5;
- break;
- case NOA1305_INTEGR_TIME_25MS:
- *val = 10000;
- *val2 = 77 * 25;
- break;
- case NOA1305_INTEGR_TIME_12_5MS:
- *val = 100000;
- *val2 = 77 * 125;
- break;
- case NOA1305_INTEGR_TIME_6_25MS:
- *val = 1000000;
- *val2 = 77 * 625;
- break;
- default:
- return -EINVAL;
- }
+ data &= NOA1305_INTEGR_TIME_MASK;
+ *val = noa1305_scale_available[2 * data + 0];
+ *val2 = noa1305_scale_available[2 * data + 1];
return IIO_VAL_FRACTIONAL;
}
+static int noa1305_int_time(struct noa1305_priv *priv, int *val, int *val2)
+{
+ int data;
+ int ret;
+
+ ret = regmap_read(priv->regmap, NOA1305_REG_INTEGRATION_TIME, &data);
+ if (ret < 0)
+ return ret;
+
+ data &= NOA1305_INTEGR_TIME_MASK;
+ *val = noa1305_int_time_available[2 * data + 0];
+ *val2 = noa1305_int_time_available[2 * data + 1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
static const struct iio_chan_spec noa1305_channels[] = {
{
.type = IIO_LIGHT,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME),
}
};
+static int noa1305_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type,
+ int *length, long mask)
+{
+ if (chan->type != IIO_LIGHT)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = noa1305_scale_available;
+ *length = ARRAY_SIZE(noa1305_scale_available);
+ *type = IIO_VAL_FRACTIONAL;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_INT_TIME:
+ *vals = noa1305_int_time_available;
+ *length = ARRAY_SIZE(noa1305_int_time_available);
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
static int noa1305_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask)
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
{
- int ret = -EINVAL;
struct noa1305_priv *priv = iio_priv(indio_dev);
+ if (chan->type != IIO_LIGHT)
+ return -EINVAL;
+
switch (mask) {
case IIO_CHAN_INFO_RAW:
- switch (chan->type) {
- case IIO_LIGHT:
- ret = noa1305_measure(priv);
- if (ret < 0)
- return ret;
- *val = ret;
- return IIO_VAL_INT;
- default:
- break;
- }
- break;
+ return noa1305_measure(priv, val);
case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_LIGHT:
- return noa1305_scale(priv, val, val2);
- default:
- break;
- }
- break;
+ return noa1305_scale(priv, val, val2);
+ case IIO_CHAN_INFO_INT_TIME:
+ return noa1305_int_time(priv, val, val2);
default:
- break;
+ return -EINVAL;
}
+}
- return ret;
+static int noa1305_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct noa1305_priv *priv = iio_priv(indio_dev);
+ int i;
+
+ if (chan->type != IIO_LIGHT)
+ return -EINVAL;
+
+ if (mask != IIO_CHAN_INFO_INT_TIME)
+ return -EINVAL;
+
+ if (val) /* >= 1s integration time not supported */
+ return -EINVAL;
+
+ /* Look up integration time register settings and write it if found. */
+ for (i = 0; i < ARRAY_SIZE(noa1305_int_time_available) / 2; i++)
+ if (noa1305_int_time_available[2 * i + 1] == val2)
+ return regmap_write(priv->regmap, NOA1305_REG_INTEGRATION_TIME, i);
+
+ return -EINVAL;
}
static const struct iio_info noa1305_info = {
+ .read_avail = noa1305_read_avail,
.read_raw = noa1305_read_raw,
+ .write_raw = noa1305_write_raw,
};
static bool noa1305_writable_reg(struct device *dev, unsigned int reg)
diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c
index 4937bf6fa046..76711c3cdf7c 100644
--- a/drivers/iio/light/rohm-bu27034.c
+++ b/drivers/iio/light/rohm-bu27034.c
@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * BU27034 ROHM Ambient Light Sensor
+ * BU27034ANUC ROHM Ambient Light Sensor
*
* Copyright (c) 2023, ROHM Semiconductor.
- * https://fscdn.rohm.com/en/products/databook/datasheet/ic/sensor/light/bu27034nuc-e.pdf
*/
#include <linux/bitfield.h>
@@ -30,17 +29,15 @@
#define BU27034_REG_MODE_CONTROL2 0x42
#define BU27034_MASK_D01_GAIN GENMASK(7, 3)
-#define BU27034_MASK_D2_GAIN_HI GENMASK(7, 6)
-#define BU27034_MASK_D2_GAIN_LO GENMASK(2, 0)
#define BU27034_REG_MODE_CONTROL3 0x43
#define BU27034_REG_MODE_CONTROL4 0x44
#define BU27034_MASK_MEAS_EN BIT(0)
#define BU27034_MASK_VALID BIT(7)
+#define BU27034_NUM_HW_DATA_CHANS 2
#define BU27034_REG_DATA0_LO 0x50
#define BU27034_REG_DATA1_LO 0x52
-#define BU27034_REG_DATA2_LO 0x54
-#define BU27034_REG_DATA2_HI 0x55
+#define BU27034_REG_DATA1_HI 0x53
#define BU27034_REG_MANUFACTURER_ID 0x92
#define BU27034_REG_MAX BU27034_REG_MANUFACTURER_ID
@@ -88,58 +85,48 @@ enum {
BU27034_CHAN_ALS,
BU27034_CHAN_DATA0,
BU27034_CHAN_DATA1,
- BU27034_CHAN_DATA2,
BU27034_NUM_CHANS
};
static const unsigned long bu27034_scan_masks[] = {
- GENMASK(BU27034_CHAN_DATA2, BU27034_CHAN_ALS), 0
+ GENMASK(BU27034_CHAN_DATA1, BU27034_CHAN_DATA0),
+ GENMASK(BU27034_CHAN_DATA1, BU27034_CHAN_ALS), 0
};
/*
- * Available scales with gain 1x - 4096x, timings 55, 100, 200, 400 mS
+ * Available scales with gain 1x - 1024x, timings 55, 100, 200, 400 mS
* Time impacts to gain: 1x, 2x, 4x, 8x.
*
- * => Max total gain is HWGAIN * gain by integration time (8 * 4096) = 32768
+ * => Max total gain is HWGAIN * gain by integration time (8 * 1024) = 8192
+ * if 1x gain is scale 1, scale for 2x gain is 0.5, 4x => 0.25,
+ * ... 8192x => 0.0001220703125 => 122070.3125 nanos
*
- * Using NANO precision for scale we must use scale 64x corresponding gain 1x
- * to avoid precision loss. (32x would result scale 976 562.5(nanos).
+ * Using NANO precision for scale, we must use scale 16x corresponding gain 1x
+ * to avoid precision loss. (8x would result scale 976 562.5(nanos).
*/
-#define BU27034_SCALE_1X 64
+#define BU27034_SCALE_1X 16
/* See the data sheet for the "Gain Setting" table */
#define BU27034_GSEL_1X 0x00 /* 00000 */
#define BU27034_GSEL_4X 0x08 /* 01000 */
-#define BU27034_GSEL_16X 0x0a /* 01010 */
#define BU27034_GSEL_32X 0x0b /* 01011 */
-#define BU27034_GSEL_64X 0x0c /* 01100 */
#define BU27034_GSEL_256X 0x18 /* 11000 */
#define BU27034_GSEL_512X 0x19 /* 11001 */
#define BU27034_GSEL_1024X 0x1a /* 11010 */
-#define BU27034_GSEL_2048X 0x1b /* 11011 */
-#define BU27034_GSEL_4096X 0x1c /* 11100 */
/* Available gain settings */
static const struct iio_gain_sel_pair bu27034_gains[] = {
GAIN_SCALE_GAIN(1, BU27034_GSEL_1X),
GAIN_SCALE_GAIN(4, BU27034_GSEL_4X),
- GAIN_SCALE_GAIN(16, BU27034_GSEL_16X),
GAIN_SCALE_GAIN(32, BU27034_GSEL_32X),
- GAIN_SCALE_GAIN(64, BU27034_GSEL_64X),
GAIN_SCALE_GAIN(256, BU27034_GSEL_256X),
GAIN_SCALE_GAIN(512, BU27034_GSEL_512X),
GAIN_SCALE_GAIN(1024, BU27034_GSEL_1024X),
- GAIN_SCALE_GAIN(2048, BU27034_GSEL_2048X),
- GAIN_SCALE_GAIN(4096, BU27034_GSEL_4096X),
};
/*
- * The IC has 5 modes for sampling time. 5 mS mode is exceptional as it limits
- * the data collection to data0-channel only and cuts the supported range to
- * 10 bit. It is not supported by the driver.
- *
- * "normal" modes are 55, 100, 200 and 400 mS modes - which do have direct
- * multiplying impact to the register values (similar to gain).
+ * Measurement modes are 55, 100, 200 and 400 mS modes - which do have direct
+ * multiplying impact to the data register values (similar to gain).
*
* This means that if meas-mode is changed for example from 400 => 200,
* the scale is doubled. Eg, time impact to total gain is x1, x2, x4, x8.
@@ -156,13 +143,13 @@ static const struct iio_itime_sel_mul bu27034_itimes[] = {
GAIN_SCALE_ITIME_US(55000, BU27034_MEAS_MODE_55MS, 1),
};
-#define BU27034_CHAN_DATA(_name, _ch2) \
+#define BU27034_CHAN_DATA(_name) \
{ \
.type = IIO_INTENSITY, \
.channel = BU27034_CHAN_##_name, \
- .channel2 = (_ch2), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_SCALE), \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_HARDWAREGAIN), \
.info_mask_separate_available = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), \
.info_mask_shared_by_all_available = \
@@ -195,13 +182,12 @@ static const struct iio_chan_spec bu27034_channels[] = {
/*
* The BU27034 DATA0 and DATA1 channels are both on the visible light
* area (mostly). The data0 sensitivity peaks at 500nm, DATA1 at 600nm.
- * These wave lengths are pretty much on the border of colours making
- * these a poor candidates for R/G/B standardization. Hence they're both
- * marked as clear channels
+ * These wave lengths are cyan(ish) and orange(ish), making these
+ * sub-optiomal candidates for R/G/B standardization. Hence the
+ * colour modifier is omitted.
*/
- BU27034_CHAN_DATA(DATA0, IIO_MOD_LIGHT_CLEAR),
- BU27034_CHAN_DATA(DATA1, IIO_MOD_LIGHT_CLEAR),
- BU27034_CHAN_DATA(DATA2, IIO_MOD_LIGHT_IR),
+ BU27034_CHAN_DATA(DATA0),
+ BU27034_CHAN_DATA(DATA1),
IIO_CHAN_SOFT_TIMESTAMP(4),
};
@@ -215,10 +201,10 @@ struct bu27034_data {
struct mutex mutex;
struct iio_gts gts;
struct task_struct *task;
- __le16 raw[3];
+ __le16 raw[BU27034_NUM_HW_DATA_CHANS];
struct {
u32 mlux;
- __le16 channels[3];
+ __le16 channels[BU27034_NUM_HW_DATA_CHANS];
s64 ts __aligned(8);
} scan;
};
@@ -232,7 +218,7 @@ static const struct regmap_range bu27034_volatile_ranges[] = {
.range_max = BU27034_REG_MODE_CONTROL4,
}, {
.range_min = BU27034_REG_DATA0_LO,
- .range_max = BU27034_REG_DATA2_HI,
+ .range_max = BU27034_REG_DATA1_HI,
},
};
@@ -244,7 +230,7 @@ static const struct regmap_access_table bu27034_volatile_regs = {
static const struct regmap_range bu27034_read_only_ranges[] = {
{
.range_min = BU27034_REG_DATA0_LO,
- .range_max = BU27034_REG_DATA2_HI,
+ .range_max = BU27034_REG_DATA1_HI,
}, {
.range_min = BU27034_REG_MANUFACTURER_ID,
.range_max = BU27034_REG_MANUFACTURER_ID,
@@ -273,41 +259,17 @@ struct bu27034_gain_check {
static int bu27034_get_gain_sel(struct bu27034_data *data, int chan)
{
+ int reg[] = {
+ [BU27034_CHAN_DATA0] = BU27034_REG_MODE_CONTROL2,
+ [BU27034_CHAN_DATA1] = BU27034_REG_MODE_CONTROL3,
+ };
int ret, val;
- switch (chan) {
- case BU27034_CHAN_DATA0:
- case BU27034_CHAN_DATA1:
- {
- int reg[] = {
- [BU27034_CHAN_DATA0] = BU27034_REG_MODE_CONTROL2,
- [BU27034_CHAN_DATA1] = BU27034_REG_MODE_CONTROL3,
- };
- ret = regmap_read(data->regmap, reg[chan], &val);
- if (ret)
- return ret;
-
- return FIELD_GET(BU27034_MASK_D01_GAIN, val);
- }
- case BU27034_CHAN_DATA2:
- {
- int d2_lo_bits = fls(BU27034_MASK_D2_GAIN_LO);
-
- ret = regmap_read(data->regmap, BU27034_REG_MODE_CONTROL2, &val);
- if (ret)
- return ret;
+ ret = regmap_read(data->regmap, reg[chan], &val);
+ if (ret)
+ return ret;
- /*
- * The data2 channel gain is composed by 5 non continuous bits
- * [7:6], [2:0]. Thus when we combine the 5-bit 'selector'
- * from register value we must right shift the high bits by 3.
- */
- return FIELD_GET(BU27034_MASK_D2_GAIN_HI, val) << d2_lo_bits |
- FIELD_GET(BU27034_MASK_D2_GAIN_LO, val);
- }
- default:
- return -EINVAL;
- }
+ return FIELD_GET(BU27034_MASK_D01_GAIN, val);
}
static int bu27034_get_gain(struct bu27034_data *data, int chan, int *gain)
@@ -390,44 +352,9 @@ static int bu27034_write_gain_sel(struct bu27034_data *data, int chan, int sel)
};
int mask, val;
- if (chan != BU27034_CHAN_DATA0 && chan != BU27034_CHAN_DATA1)
- return -EINVAL;
-
val = FIELD_PREP(BU27034_MASK_D01_GAIN, sel);
-
mask = BU27034_MASK_D01_GAIN;
- if (chan == BU27034_CHAN_DATA0) {
- /*
- * We keep the same gain for channel 2 as we set for channel 0
- * We can't allow them to be individually controlled because
- * setting one will impact also the other. Also, if we don't
- * always update both gains we may result unsupported bit
- * combinations.
- *
- * This is not nice but this is yet another place where the
- * user space must be prepared to surprizes. Namely, see chan 2
- * gain changed when chan 0 gain is changed.
- *
- * This is not fatal for most users though. I don't expect the
- * channel 2 to be used in any generic cases - the intensity
- * values provided by the sensor for IR area are not openly
- * documented. Also, channel 2 is not used for visible light.
- *
- * So, if there is application which is written to utilize the
- * channel 2 - then it is probably specifically targeted to this
- * sensor and knows how to utilize those values. It is safe to
- * hope such user can also cope with the gain changes.
- */
- mask |= BU27034_MASK_D2_GAIN_LO;
-
- /*
- * The D2 gain bits are directly the lowest bits of selector.
- * Just do add those bits to the value
- */
- val |= sel & BU27034_MASK_D2_GAIN_LO;
- }
-
return regmap_update_bits(data->regmap, reg[chan], mask, val);
}
@@ -435,13 +362,6 @@ static int bu27034_set_gain(struct bu27034_data *data, int chan, int gain)
{
int ret;
- /*
- * We don't allow setting channel 2 gain as it messes up the
- * gain for channel 0 - which shares the high bits
- */
- if (chan != BU27034_CHAN_DATA0 && chan != BU27034_CHAN_DATA1)
- return -EINVAL;
-
ret = iio_gts_find_sel_by_gain(&data->gts, gain);
if (ret < 0)
return ret;
@@ -565,9 +485,6 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
int ret, time_sel, gain_sel, i;
bool found = false;
- if (chan == BU27034_CHAN_DATA2)
- return -EINVAL;
-
if (chan == BU27034_CHAN_ALS) {
if (val == 0 && val2 == 1000000)
return 0;
@@ -592,9 +509,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
/*
* Populate information for the other channel which should also
- * maintain the scale. (Due to the HW limitations the chan2
- * gets the same gain as chan0, so we only need to explicitly
- * set the chan 0 and 1).
+ * maintain the scale.
*/
if (chan == BU27034_CHAN_DATA0)
gain.chan = BU27034_CHAN_DATA1;
@@ -608,7 +523,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
/*
* Iterate through all the times to see if we find one which
* can support requested scale for requested channel, while
- * maintaining the scale for other channels
+ * maintaining the scale for the other channel
*/
for (i = 0; i < data->gts.num_itime; i++) {
new_time_sel = data->gts.itime_table[i].sel;
@@ -623,7 +538,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
if (ret)
continue;
- /* Can the other channel(s) maintain scale? */
+ /* Can the other channel maintain scale? */
ret = iio_gts_find_new_gain_sel_by_old_gain_time(
&data->gts, gain.old_gain, time_sel,
new_time_sel, &gain.new_gain);
@@ -635,7 +550,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
}
if (!found) {
dev_dbg(data->dev,
- "Can't set scale maintaining other channels\n");
+ "Can't set scale maintaining other channel\n");
ret = -EINVAL;
goto unlock_out;
@@ -659,102 +574,21 @@ unlock_out:
}
/*
- * for (D1/D0 < 0.87):
- * lx = 0.004521097 * D1 - 0.002663996 * D0 +
- * 0.00012213 * D1 * D1 / D0
- *
- * => 115.7400832 * ch1 / gain1 / mt -
- * 68.1982976 * ch0 / gain0 / mt +
- * 0.00012213 * 25600 * (ch1 / gain1 / mt) * 25600 *
- * (ch1 /gain1 / mt) / (25600 * ch0 / gain0 / mt)
- *
- * A = 0.00012213 * 25600 * (ch1 /gain1 / mt) * 25600 *
- * (ch1 /gain1 / mt) / (25600 * ch0 / gain0 / mt)
- * => 0.00012213 * 25600 * (ch1 /gain1 / mt) *
- * (ch1 /gain1 / mt) / (ch0 / gain0 / mt)
- * => 0.00012213 * 25600 * (ch1 / gain1) * (ch1 /gain1 / mt) /
- * (ch0 / gain0)
- * => 0.00012213 * 25600 * (ch1 / gain1) * (ch1 /gain1 / mt) *
- * gain0 / ch0
- * => 3.126528 * ch1 * ch1 * gain0 / gain1 / gain1 / mt /ch0
- *
- * lx = (115.7400832 * ch1 / gain1 - 68.1982976 * ch0 / gain0) /
- * mt + A
- * => (115.7400832 * ch1 / gain1 - 68.1982976 * ch0 / gain0) /
- * mt + 3.126528 * ch1 * ch1 * gain0 / gain1 / gain1 / mt /
- * ch0
- *
- * => (115.7400832 * ch1 / gain1 - 68.1982976 * ch0 / gain0 +
- * 3.126528 * ch1 * ch1 * gain0 / gain1 / gain1 / ch0) /
- * mt
- *
- * For (0.87 <= D1/D0 < 1.00)
- * lx = (0.001331* D0 + 0.0000354 * D1) * ((D1/D0 – 0.87) * (0.385) + 1)
- * => (0.001331 * 256 * 100 * ch0 / gain0 / mt + 0.0000354 * 256 *
- * 100 * ch1 / gain1 / mt) * ((D1/D0 - 0.87) * (0.385) + 1)
- * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) *
- * ((D1/D0 - 0.87) * (0.385) + 1)
- * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) *
- * (0.385 * D1/D0 - 0.66505)
- * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) *
- * (0.385 * 256 * 100 * ch1 / gain1 / mt / (256 * 100 * ch0 / gain0 / mt) - 0.66505)
- * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) *
- * (9856 * ch1 / gain1 / mt / (25600 * ch0 / gain0 / mt) + 0.66505)
- * => 13.118336 * ch1 / (gain1 * mt)
- * + 22.66064768 * ch0 / (gain0 * mt)
- * + 8931.90144 * ch1 * ch1 * gain0 /
- * (25600 * ch0 * gain1 * gain1 * mt)
- * + 0.602694912 * ch1 / (gain1 * mt)
- *
- * => [0.3489024 * ch1 * ch1 * gain0 / (ch0 * gain1 * gain1)
- * + 22.66064768 * ch0 / gain0
- * + 13.721030912 * ch1 / gain1
- * ] / mt
- *
- * For (D1/D0 >= 1.00)
+ * for (D1/D0 < 1.5):
+ * lx = (0.001193 * D0 + (-0.0000747) * D1) * ((D1/D0 – 1.5) * (0.25) + 1)
*
- * lx = (0.001331* D0 + 0.0000354 * D1) * ((D1/D0 – 2.0) * (-0.05) + 1)
- * => (0.001331* D0 + 0.0000354 * D1) * (-0.05D1/D0 + 1.1)
- * => (0.001331 * 256 * 100 * ch0 / gain0 / mt + 0.0000354 * 256 *
- * 100 * ch1 / gain1 / mt) * (-0.05D1/D0 + 1.1)
- * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) *
- * (-0.05 * 256 * 100 * ch1 / gain1 / mt / (256 * 100 * ch0 / gain0 / mt) + 1.1)
- * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) *
- * (-1280 * ch1 / (gain1 * mt * 25600 * ch0 / gain0 / mt) + 1.1)
- * => (34.0736 * ch0 * -1280 * ch1 * gain0 * mt /( gain0 * mt * gain1 * mt * 25600 * ch0)
- * + 34.0736 * 1.1 * ch0 / (gain0 * mt)
- * + 0.90624 * ch1 * -1280 * ch1 *gain0 * mt / (gain1 * mt *gain1 * mt * 25600 * ch0)
- * + 1.1 * 0.90624 * ch1 / (gain1 * mt)
- * => -43614.208 * ch1 / (gain1 * mt * 25600)
- * + 37.48096 ch0 / (gain0 * mt)
- * - 1159.9872 * ch1 * ch1 * gain0 / (gain1 * gain1 * mt * 25600 * ch0)
- * + 0.996864 ch1 / (gain1 * mt)
- * => [
- * - 0.045312 * ch1 * ch1 * gain0 / (gain1 * gain1 * ch0)
- * - 0.706816 * ch1 / gain1
- * + 37.48096 ch0 /gain0
- * ] * mt
+ * => -0.000745625 * D0 + 0.0002515625 * D1 + -0.000018675 * D1 * D1 / D0
*
+ * => (6.44 * ch1 / gain1 + 19.088 * ch0 / gain0 -
+ * 0.47808 * ch1 * ch1 * gain0 / gain1 / gain1 / ch0) /
+ * mt
*
- * So, the first case (D1/D0 < 0.87) can be computed to a form:
+ * Else
+ * lx = 0.001193 * D0 - 0.0000747 * D1
*
- * lx = (3.126528 * ch1 * ch1 * gain0 / (ch0 * gain1 * gain1) +
- * 115.7400832 * ch1 / gain1 +
- * -68.1982976 * ch0 / gain0
- * / mt
- *
- * Second case (0.87 <= D1/D0 < 1.00) goes to form:
- *
- * => [0.3489024 * ch1 * ch1 * gain0 / (ch0 * gain1 * gain1) +
- * 13.721030912 * ch1 / gain1 +
- * 22.66064768 * ch0 / gain0
- * ] / mt
- *
- * Third case (D1/D0 >= 1.00) goes to form:
- * => [-0.045312 * ch1 * ch1 * gain0 / (ch0 * gain1 * gain1) +
- * -0.706816 * ch1 / gain1 +
- * 37.48096 ch0 /(gain0
- * ] / mt
+ * => (1.91232 * ch1 / gain1 + 30.5408 * ch0 / gain0 +
+ * [0 * ch1 * ch1 * gain0 / gain1 / gain1 / ch0] ) /
+ * mt
*
* This can be unified to format:
* lx = [
@@ -764,19 +598,14 @@ unlock_out:
* ] / mt
*
* For case 1:
- * A = 3.126528,
- * B = 115.7400832
- * C = -68.1982976
+ * A = -0.47808,
+ * B = 6.44,
+ * C = 19.088
*
* For case 2:
- * A = 0.3489024
- * B = 13.721030912
- * C = 22.66064768
- *
- * For case 3:
- * A = -0.045312
- * B = -0.706816
- * C = 37.48096
+ * A = 0
+ * B = 1.91232
+ * C = 30.5408
*/
struct bu27034_lx_coeff {
@@ -881,21 +710,16 @@ static int bu27034_fixp_calc_lx(unsigned int ch0, unsigned int ch1,
{
static const struct bu27034_lx_coeff coeff[] = {
{
- .A = 31265280, /* 3.126528 */
- .B = 1157400832, /*115.7400832 */
- .C = 681982976, /* -68.1982976 */
- .is_neg = {false, false, true},
+ .A = 4780800, /* -0.47808 */
+ .B = 64400000, /* 6.44 */
+ .C = 190880000, /* 19.088 */
+ .is_neg = { true, false, false },
}, {
- .A = 3489024, /* 0.3489024 */
- .B = 137210309, /* 13.721030912 */
- .C = 226606476, /* 22.66064768 */
+ .A = 0, /* 0 */
+ .B = 19123200, /* 1.91232 */
+ .C = 305408000, /* 30.5408 */
/* All terms positive */
- }, {
- .A = 453120, /* -0.045312 */
- .B = 7068160, /* -0.706816 */
- .C = 374809600, /* 37.48096 */
- .is_neg = {true, true, false},
- }
+ },
};
const struct bu27034_lx_coeff *c = &coeff[coeff_idx];
u64 res = 0, terms[3];
@@ -967,7 +791,6 @@ static int bu27034_read_result(struct bu27034_data *data, int chan, int *res)
int reg[] = {
[BU27034_CHAN_DATA0] = BU27034_REG_DATA0_LO,
[BU27034_CHAN_DATA1] = BU27034_REG_DATA1_LO,
- [BU27034_CHAN_DATA2] = BU27034_REG_DATA2_LO,
};
int valid, ret;
__le16 val;
@@ -1034,7 +857,7 @@ static int bu27034_get_single_result(struct bu27034_data *data, int chan,
{
int ret;
- if (chan < BU27034_CHAN_DATA0 || chan > BU27034_CHAN_DATA2)
+ if (chan < BU27034_CHAN_DATA0 || chan > BU27034_CHAN_DATA1)
return -EINVAL;
ret = bu27034_meas_set(data, true);
@@ -1059,12 +882,10 @@ static int bu27034_get_single_result(struct bu27034_data *data, int chan,
* D1 = data1/ch1_gain/meas_time_ms * 25600
*
* Then:
- * if (D1/D0 < 0.87)
- * lx = (0.001331 * D0 + 0.0000354 * D1) * ((D1 / D0 - 0.87) * 3.45 + 1)
- * else if (D1/D0 < 1)
- * lx = (0.001331 * D0 + 0.0000354 * D1) * ((D1 / D0 - 0.87) * 0.385 + 1)
- * else
- * lx = (0.001331 * D0 + 0.0000354 * D1) * ((D1 / D0 - 2) * -0.05 + 1)
+ * If (D1/D0 < 1.5)
+ * lx = (0.001193 * D0 + (-0.0000747) * D1) * ((D1 / D0 – 1.5) * 0.25 + 1)
+ * Else
+ * lx = (0.001193 * D0 + (-0.0000747) * D1)
*
* We use it here. Users who have for example some colored lens
* need to modify the calculation but I hope this gives a starting point for
@@ -1115,12 +936,10 @@ static int bu27034_calc_mlux(struct bu27034_data *data, __le16 *res, int *val)
d1_d0_ratio_scaled /= ch0 * gain1;
}
- if (d1_d0_ratio_scaled < 87)
+ if (d1_d0_ratio_scaled < 150)
ret = bu27034_fixp_calc_lx(ch0, ch1, gain0, gain1, meastime, 0);
- else if (d1_d0_ratio_scaled < 100)
- ret = bu27034_fixp_calc_lx(ch0, ch1, gain0, gain1, meastime, 1);
else
- ret = bu27034_fixp_calc_lx(ch0, ch1, gain0, gain1, meastime, 2);
+ ret = bu27034_fixp_calc_lx(ch0, ch1, gain0, gain1, meastime, 1);
if (ret < 0)
return ret;
@@ -1133,7 +952,7 @@ static int bu27034_calc_mlux(struct bu27034_data *data, __le16 *res, int *val)
static int bu27034_get_mlux(struct bu27034_data *data, int chan, int *val)
{
- __le16 res[3];
+ __le16 res[BU27034_NUM_HW_DATA_CHANS];
int ret;
ret = bu27034_meas_set(data, true);
@@ -1171,6 +990,13 @@ static int bu27034_read_raw(struct iio_dev *idev,
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ ret = bu27034_get_gain(data, chan->channel, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
case IIO_CHAN_INFO_SCALE:
return bu27034_get_scale(data, chan->channel, val, val2);
@@ -1215,12 +1041,17 @@ static int bu27034_write_raw_get_fmt(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
long mask)
{
+ struct bu27034_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_SCALE:
return IIO_VAL_INT_PLUS_NANO;
case IIO_CHAN_INFO_INT_TIME:
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ dev_dbg(data->dev,
+ "HARDWAREGAIN is read-only, use scale to set\n");
+ return -EINVAL;
default:
return -EINVAL;
}
@@ -1501,7 +1332,7 @@ static int bu27034_probe(struct i2c_client *i2c)
}
static const struct of_device_id bu27034_of_match[] = {
- { .compatible = "rohm,bu27034" },
+ { .compatible = "rohm,bu27034anuc" },
{ }
};
MODULE_DEVICE_TABLE(of, bu27034_of_match);
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
index 77666b780a5c..66abda021696 100644
--- a/drivers/iio/light/si1145.c
+++ b/drivers/iio/light/si1145.c
@@ -465,11 +465,10 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private)
goto done;
}
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
int run = 1;
- while (i + run < indio_dev->masklength) {
+ while (i + run < iio_get_masklength(indio_dev)) {
if (!test_bit(i + run, indio_dev->active_scan_mask))
break;
if (indio_dev->channels[i + run].address !=
@@ -514,7 +513,7 @@ static int si1145_set_chlist(struct iio_dev *indio_dev, unsigned long scan_mask)
if (data->scan_mask == scan_mask)
return 0;
- for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+ for_each_set_bit(i, &scan_mask, iio_get_masklength(indio_dev)) {
switch (indio_dev->channels[i].address) {
case SI1145_REG_ALSVIS_DATA:
reg |= SI1145_CHLIST_EN_ALSVIS;
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index e3470d6743ef..ed20b6714546 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -35,6 +35,7 @@
#define STK3310_STATE_EN_ALS BIT(1)
#define STK3310_STATE_STANDBY 0x00
+#define STK3013_CHIP_ID_VAL 0x31
#define STK3310_CHIP_ID_VAL 0x13
#define STK3311_CHIP_ID_VAL 0x1D
#define STK3311A_CHIP_ID_VAL 0x15
@@ -84,6 +85,7 @@ static const struct reg_field stk3310_reg_field_flag_nf =
REG_FIELD(STK3310_REG_FLAG, 0, 0);
static const u8 stk3310_chip_ids[] = {
+ STK3013_CHIP_ID_VAL,
STK3310_CHIP_ID_VAL,
STK3311A_CHIP_ID_VAL,
STK3311S34_CHIP_ID_VAL,
@@ -496,7 +498,7 @@ static int stk3310_init(struct iio_dev *indio_dev)
ret = stk3310_check_chip_id(chipid);
if (ret < 0)
- dev_warn(&client->dev, "unknown chip id: 0x%x\n", chipid);
+ dev_info(&client->dev, "new unknown chip id: 0x%x\n", chipid);
state = STK3310_STATE_EN_ALS | STK3310_STATE_EN_PS;
ret = stk3310_set_state(data, state);
@@ -700,6 +702,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(stk3310_pm_ops, stk3310_suspend,
stk3310_resume);
static const struct i2c_device_id stk3310_i2c_id[] = {
+ { "STK3013" },
{ "STK3310" },
{ "STK3311" },
{ "STK3335" },
@@ -708,6 +711,7 @@ static const struct i2c_device_id stk3310_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, stk3310_i2c_id);
static const struct acpi_device_id stk3310_acpi_id[] = {
+ {"STK3013", 0},
{"STK3310", 0},
{"STK3311", 0},
{}
@@ -716,6 +720,7 @@ static const struct acpi_device_id stk3310_acpi_id[] = {
MODULE_DEVICE_TABLE(acpi, stk3310_acpi_id);
static const struct of_device_id stk3310_of_match[] = {
+ { .compatible = "sensortek,stk3013", },
{ .compatible = "sensortek,stk3310", },
{ .compatible = "sensortek,stk3311", },
{ .compatible = "sensortek,stk3335", },
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index c9566615b964..4fecdf10aeb1 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -206,8 +206,7 @@ static irqreturn_t tcs3414_trigger_handler(int irq, void *p)
struct tcs3414_data *data = iio_priv(indio_dev);
int i, j = 0;
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
int ret = i2c_smbus_read_word_data(data->client,
TCS3414_DATA_GREEN + 2*i);
if (ret < 0)
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 89384dba83dd..04452b4664f3 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -383,8 +383,7 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
ret = i2c_smbus_read_word_data(data->client,
TCS3472_CDATA + 2*i);
if (ret < 0)
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index cd2917d71904..8eb718f5e50f 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -39,7 +39,7 @@ config AK8975
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Asahi Kasei AK8975, AK8963,
- AK09911, AK09912 or AK09916 3-Axis Magnetometer.
+ AK09911, AK09912, AK09916 or AK09918 3-Axis Magnetometer.
To compile this driver as a module, choose M here: the module
will be called ak8975.
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index dd466c5fa621..18077fb463a9 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -78,6 +78,7 @@
*/
#define AK09912_REG_WIA1 0x00
#define AK09912_REG_WIA2 0x01
+#define AK09918_DEVICE_ID 0x0C
#define AK09916_DEVICE_ID 0x09
#define AK09912_DEVICE_ID 0x04
#define AK09911_DEVICE_ID 0x05
@@ -209,6 +210,7 @@ enum asahi_compass_chipset {
AK09911,
AK09912,
AK09916,
+ AK09918,
};
enum ak_ctrl_reg_addr {
@@ -371,6 +373,34 @@ static const struct ak_def ak_def_array[] = {
AK09912_REG_HXL,
AK09912_REG_HYL,
AK09912_REG_HZL},
+ },
+ [AK09918] = {
+ /* ak09918 is register compatible with ak09912 this is for avoid
+ * unknown id messages.
+ */
+ .type = AK09918,
+ .raw_to_gauss = ak09912_raw_to_gauss,
+ .range = 32752,
+ .ctrl_regs = {
+ AK09912_REG_ST1,
+ AK09912_REG_ST2,
+ AK09912_REG_CNTL2,
+ AK09912_REG_ASAX,
+ AK09912_MAX_REGS},
+ .ctrl_masks = {
+ AK09912_REG_ST1_DRDY_MASK,
+ AK09912_REG_ST2_HOFL_MASK,
+ 0,
+ AK09912_REG_CNTL2_MODE_MASK},
+ .ctrl_modes = {
+ AK09912_REG_CNTL_MODE_POWER_DOWN,
+ AK09912_REG_CNTL_MODE_ONCE,
+ AK09912_REG_CNTL_MODE_SELF_TEST,
+ AK09912_REG_CNTL_MODE_FUSE_ROM},
+ .data_regs = {
+ AK09912_REG_HXL,
+ AK09912_REG_HYL,
+ AK09912_REG_HZL},
}
};
@@ -452,6 +482,7 @@ static int ak8975_who_i_am(struct i2c_client *client,
/*
* Signature for each device:
* Device | WIA1 | WIA2
+ * AK09918 | DEVICE_ID_| AK09918_DEVICE_ID
* AK09916 | DEVICE_ID_| AK09916_DEVICE_ID
* AK09912 | DEVICE_ID | AK09912_DEVICE_ID
* AK09911 | DEVICE_ID | AK09911_DEVICE_ID
@@ -484,10 +515,18 @@ static int ak8975_who_i_am(struct i2c_client *client,
if (wia_val[1] == AK09916_DEVICE_ID)
return 0;
break;
- default:
- dev_err(&client->dev, "Type %d unknown\n", type);
+ case AK09918:
+ if (wia_val[1] == AK09918_DEVICE_ID)
+ return 0;
+ break;
}
- return -ENODEV;
+
+ dev_info(&client->dev, "Device ID %x is unknown.\n", wia_val[1]);
+ /*
+ * Let driver to probe on unknown id for support more register
+ * compatible variants.
+ */
+ return 0;
}
/*
@@ -692,22 +731,8 @@ static int ak8975_start_read_axis(struct ak8975_data *data,
if (ret < 0)
return ret;
- /* This will be executed only for non-interrupt based waiting case */
- if (ret & data->def->ctrl_masks[ST1_DRDY]) {
- ret = i2c_smbus_read_byte_data(client,
- data->def->ctrl_regs[ST2]);
- if (ret < 0) {
- dev_err(&client->dev, "Error in reading ST2\n");
- return ret;
- }
- if (ret & (data->def->ctrl_masks[ST2_DERR] |
- data->def->ctrl_masks[ST2_HOFL])) {
- dev_err(&client->dev, "ST2 status error 0x%x\n", ret);
- return -EINVAL;
- }
- }
-
- return 0;
+ /* Return with zero if the data is ready. */
+ return !data->def->ctrl_regs[ST1_DRDY];
}
/* Retrieve raw flux value for one of the x, y, or z axis. */
@@ -734,6 +759,20 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
if (ret < 0)
goto exit;
+ /* Read out ST2 for release lock on measurment data. */
+ ret = i2c_smbus_read_byte_data(client, data->def->ctrl_regs[ST2]);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in reading ST2\n");
+ goto exit;
+ }
+
+ if (ret & (data->def->ctrl_masks[ST2_DERR] |
+ data->def->ctrl_masks[ST2_HOFL])) {
+ dev_err(&client->dev, "ST2 status error 0x%x\n", ret);
+ ret = -EINVAL;
+ goto exit;
+ }
+
mutex_unlock(&data->lock);
pm_runtime_mark_last_busy(&data->client->dev);
@@ -1067,6 +1106,7 @@ static const struct i2c_device_id ak8975_id[] = {
{"ak09911", (kernel_ulong_t)&ak_def_array[AK09911] },
{"ak09912", (kernel_ulong_t)&ak_def_array[AK09912] },
{"ak09916", (kernel_ulong_t)&ak_def_array[AK09916] },
+ {"ak09918", (kernel_ulong_t)&ak_def_array[AK09918] },
{}
};
MODULE_DEVICE_TABLE(i2c, ak8975_id);
@@ -1081,7 +1121,7 @@ static const struct of_device_id ak8975_of_match[] = {
{ .compatible = "asahi-kasei,ak09912", .data = &ak_def_array[AK09912] },
{ .compatible = "ak09912", .data = &ak_def_array[AK09912] },
{ .compatible = "asahi-kasei,ak09916", .data = &ak_def_array[AK09916] },
- { .compatible = "ak09916", .data = &ak_def_array[AK09916] },
+ { .compatible = "asahi-kasei,ak09918", .data = &ak_def_array[AK09918] },
{}
};
MODULE_DEVICE_TABLE(of, ak8975_of_match);
diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
index 42b70cd42b39..0e03a772fa43 100644
--- a/drivers/iio/magnetometer/rm3100-core.c
+++ b/drivers/iio/magnetometer/rm3100-core.c
@@ -464,7 +464,7 @@ static irqreturn_t rm3100_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
unsigned long scan_mask = *indio_dev->active_scan_mask;
- unsigned int mask_len = indio_dev->masklength;
+ unsigned int mask_len = iio_get_masklength(indio_dev);
struct rm3100_data *data = iio_priv(indio_dev);
struct regmap *regmap = data->regmap;
int ret, i, bit;
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 3ad38506028e..ce369dbb17fc 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -31,6 +31,8 @@ config BMP280
select REGMAP
select BMP280_I2C if (I2C)
select BMP280_SPI if (SPI_MASTER)
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Bosch Sensortec BMP180, BMP280, BMP380
and BMP580 pressure and temperature sensors. Also supports the BME280 with
@@ -248,6 +250,15 @@ config MS5637
This driver can also be built as a module. If so, the module will
be called ms5637.
+config SDP500
+ tristate "Sensirion SDP500 differential pressure sensor I2C driver"
+ depends on I2C
+ help
+ Say Y here to build support for Sensirion SDP500 differential pressure
+ sensor I2C driver.
+ To compile this driver as a module, choose M here: the core module
+ will be called sdp500.
+
config IIO_ST_PRESS
tristate "STMicroelectronics pressure sensor Driver"
depends on (I2C || SPI_MASTER) && SYSFS
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index a93709e35760..6482288e07ee 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_MS5611) += ms5611_core.o
obj-$(CONFIG_MS5611_I2C) += ms5611_i2c.o
obj-$(CONFIG_MS5611_SPI) += ms5611_spi.o
obj-$(CONFIG_MS5637) += ms5637.o
+obj-$(CONFIG_SDP500) += sdp500.o
obj-$(CONFIG_IIO_ST_PRESS) += st_pressure.o
st_pressure-y := st_pressure_core.o
st_pressure-$(CONFIG_IIO_BUFFER) += st_pressure_buffer.o
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 49081b729618..da379230c837 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -41,7 +41,10 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#include <asm/unaligned.h>
@@ -134,46 +137,169 @@ enum {
BMP380_P11 = 20,
};
+enum bmp280_scan {
+ BMP280_PRESS,
+ BMP280_TEMP,
+ BME280_HUMID,
+};
+
static const struct iio_chan_spec bmp280_channels[] = {
{
.type = IIO_PRESSURE,
+ /* PROCESSED maintained for ABI backwards compatibility */
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_TEMP,
+ /* PROCESSED maintained for ABI backwards compatibility */
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static const struct iio_chan_spec bme280_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ /* PROCESSED maintained for ABI backwards compatibility */
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
},
{
.type = IIO_TEMP,
+ /* PROCESSED maintained for ABI backwards compatibility */
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
},
{
.type = IIO_HUMIDITYRELATIVE,
+ /* PROCESSED maintained for ABI backwards compatibility */
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 2,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
},
+ IIO_CHAN_SOFT_TIMESTAMP(3),
};
static const struct iio_chan_spec bmp380_channels[] = {
{
.type = IIO_PRESSURE,
+ /* PROCESSED maintained for ABI backwards compatibility */
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
},
{
.type = IIO_TEMP,
+ /* PROCESSED maintained for ABI backwards compatibility */
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
},
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static const struct iio_chan_spec bmp580_channels[] = {
{
- .type = IIO_HUMIDITYRELATIVE,
+ .type = IIO_PRESSURE,
+ /* PROCESSED maintained for ABI backwards compatibility */
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_LE,
+ },
+ },
+ {
+ .type = IIO_TEMP,
+ /* PROCESSED maintained for ABI backwards compatibility */
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_LE,
+ },
},
+ IIO_CHAN_SOFT_TIMESTAMP(2),
};
static int bmp280_read_calib(struct bmp280_data *data)
@@ -289,7 +415,7 @@ static int bme280_read_humid_adc(struct bmp280_data *data, u16 *adc_humidity)
int ret;
ret = regmap_bulk_read(data->regmap, BME280_REG_HUMIDITY_MSB,
- &data->be16, sizeof(data->be16));
+ &data->be16, BME280_NUM_HUMIDITY_BYTES);
if (ret) {
dev_err(data->dev, "failed to read humidity\n");
return ret;
@@ -335,7 +461,7 @@ static int bmp280_read_temp_adc(struct bmp280_data *data, u32 *adc_temp)
int ret;
ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB,
- data->buf, sizeof(data->buf));
+ data->buf, BMP280_NUM_TEMP_BYTES);
if (ret) {
dev_err(data->dev, "failed to read temperature\n");
return ret;
@@ -396,7 +522,7 @@ static int bmp280_read_press_adc(struct bmp280_data *data, u32 *adc_press)
int ret;
ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
- data->buf, sizeof(data->buf));
+ data->buf, BMP280_NUM_PRESS_BYTES);
if (ret) {
dev_err(data->dev, "failed to read pressure\n");
return ret;
@@ -445,10 +571,8 @@ static u32 bmp280_compensate_press(struct bmp280_data *data,
return (u32)p;
}
-static int bmp280_read_temp(struct bmp280_data *data,
- int *val, int *val2)
+static int bmp280_read_temp(struct bmp280_data *data, s32 *comp_temp)
{
- s32 comp_temp;
u32 adc_temp;
int ret;
@@ -456,16 +580,15 @@ static int bmp280_read_temp(struct bmp280_data *data,
if (ret)
return ret;
- comp_temp = bmp280_compensate_temp(data, adc_temp);
+ *comp_temp = bmp280_compensate_temp(data, adc_temp);
- *val = comp_temp * 10;
- return IIO_VAL_INT;
+ return 0;
}
-static int bmp280_read_press(struct bmp280_data *data,
- int *val, int *val2)
+static int bmp280_read_press(struct bmp280_data *data, u32 *comp_press)
{
- u32 comp_press, adc_press, t_fine;
+ u32 adc_press;
+ s32 t_fine;
int ret;
ret = bmp280_get_t_fine(data, &t_fine);
@@ -476,17 +599,13 @@ static int bmp280_read_press(struct bmp280_data *data,
if (ret)
return ret;
- comp_press = bmp280_compensate_press(data, adc_press, t_fine);
+ *comp_press = bmp280_compensate_press(data, adc_press, t_fine);
- *val = comp_press;
- *val2 = 256000;
-
- return IIO_VAL_FRACTIONAL;
+ return 0;
}
-static int bme280_read_humid(struct bmp280_data *data, int *val, int *val2)
+static int bme280_read_humid(struct bmp280_data *data, u32 *comp_humidity)
{
- u32 comp_humidity;
u16 adc_humidity;
s32 t_fine;
int ret;
@@ -499,11 +618,9 @@ static int bme280_read_humid(struct bmp280_data *data, int *val, int *val2)
if (ret)
return ret;
- comp_humidity = bme280_compensate_humidity(data, adc_humidity, t_fine);
+ *comp_humidity = bme280_compensate_humidity(data, adc_humidity, t_fine);
- *val = comp_humidity * 1000 / 1024;
-
- return IIO_VAL_INT;
+ return 0;
}
static int bmp280_read_raw_impl(struct iio_dev *indio_dev,
@@ -511,6 +628,8 @@ static int bmp280_read_raw_impl(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct bmp280_data *data = iio_priv(indio_dev);
+ int chan_value;
+ int ret;
guard(mutex)(&data->lock);
@@ -518,11 +637,72 @@ static int bmp280_read_raw_impl(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
case IIO_HUMIDITYRELATIVE:
- return data->chip_info->read_humid(data, val, val2);
+ ret = data->chip_info->read_humid(data, &chan_value);
+ if (ret)
+ return ret;
+
+ *val = data->chip_info->humid_coeffs[0] * chan_value;
+ *val2 = data->chip_info->humid_coeffs[1];
+ return data->chip_info->humid_coeffs_type;
+ case IIO_PRESSURE:
+ ret = data->chip_info->read_press(data, &chan_value);
+ if (ret)
+ return ret;
+
+ *val = data->chip_info->press_coeffs[0] * chan_value;
+ *val2 = data->chip_info->press_coeffs[1];
+ return data->chip_info->press_coeffs_type;
+ case IIO_TEMP:
+ ret = data->chip_info->read_temp(data, &chan_value);
+ if (ret)
+ return ret;
+
+ *val = data->chip_info->temp_coeffs[0] * chan_value;
+ *val2 = data->chip_info->temp_coeffs[1];
+ return data->chip_info->temp_coeffs_type;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_HUMIDITYRELATIVE:
+ ret = data->chip_info->read_humid(data, &chan_value);
+ if (ret)
+ return ret;
+
+ *val = chan_value;
+ return IIO_VAL_INT;
+ case IIO_PRESSURE:
+ ret = data->chip_info->read_press(data, &chan_value);
+ if (ret)
+ return ret;
+
+ *val = chan_value;
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ ret = data->chip_info->read_temp(data, &chan_value);
+ if (ret)
+ return ret;
+
+ *val = chan_value;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_HUMIDITYRELATIVE:
+ *val = data->chip_info->humid_coeffs[0];
+ *val2 = data->chip_info->humid_coeffs[1];
+ return data->chip_info->humid_coeffs_type;
case IIO_PRESSURE:
- return data->chip_info->read_press(data, val, val2);
+ *val = data->chip_info->press_coeffs[0];
+ *val2 = data->chip_info->press_coeffs[1];
+ return data->chip_info->press_coeffs_type;
case IIO_TEMP:
- return data->chip_info->read_temp(data, val, val2);
+ *val = data->chip_info->temp_coeffs[0];
+ *val2 = data->chip_info->temp_coeffs[1];
+ return data->chip_info->temp_coeffs_type;
default:
return -EINVAL;
}
@@ -793,6 +973,16 @@ static const struct iio_info bmp280_info = {
.write_raw = &bmp280_write_raw,
};
+static const unsigned long bmp280_avail_scan_masks[] = {
+ BIT(BMP280_TEMP) | BIT(BMP280_PRESS),
+ 0
+};
+
+static const unsigned long bme280_avail_scan_masks[] = {
+ BIT(BME280_HUMID) | BIT(BMP280_TEMP) | BIT(BMP280_PRESS),
+ 0
+};
+
static int bmp280_chip_config(struct bmp280_data *data)
{
u8 osrs = FIELD_PREP(BMP280_OSRS_TEMP_MASK, data->oversampling_temp + 1) |
@@ -820,8 +1010,57 @@ static int bmp280_chip_config(struct bmp280_data *data)
return ret;
}
+static irqreturn_t bmp280_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bmp280_data *data = iio_priv(indio_dev);
+ s32 adc_temp, adc_press, t_fine;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ /* Burst read data registers */
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
+ data->buf, BMP280_BURST_READ_BYTES);
+ if (ret) {
+ dev_err(data->dev, "failed to burst read sensor data\n");
+ goto out;
+ }
+
+ /* Temperature calculations */
+ adc_temp = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(&data->buf[3]));
+ if (adc_temp == BMP280_TEMP_SKIPPED) {
+ dev_err(data->dev, "reading temperature skipped\n");
+ goto out;
+ }
+
+ data->sensor_data[1] = bmp280_compensate_temp(data, adc_temp);
+
+ /* Pressure calculations */
+ adc_press = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(&data->buf[0]));
+ if (adc_press == BMP280_PRESS_SKIPPED) {
+ dev_err(data->dev, "reading pressure skipped\n");
+ goto out;
+ }
+
+ t_fine = bmp280_calc_t_fine(data, adc_temp);
+
+ data->sensor_data[0] = bmp280_compensate_press(data, adc_press, t_fine);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->sensor_data,
+ iio_get_time_ns(indio_dev));
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const int bmp280_oversampling_avail[] = { 1, 2, 4, 8, 16 };
static const u8 bmp280_chip_ids[] = { BMP280_CHIP_ID };
+static const int bmp280_temp_coeffs[] = { 10, 1 };
+static const int bmp280_press_coeffs[] = { 1, 256000 };
const struct bmp280_chip_info bmp280_chip_info = {
.id_reg = BMP280_REG_ID,
@@ -830,7 +1069,8 @@ const struct bmp280_chip_info bmp280_chip_info = {
.regmap_config = &bmp280_regmap_config,
.start_up_time = 2000,
.channels = bmp280_channels,
- .num_channels = 2,
+ .num_channels = ARRAY_SIZE(bmp280_channels),
+ .avail_scan_masks = bmp280_avail_scan_masks,
.oversampling_temp_avail = bmp280_oversampling_avail,
.num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
@@ -850,10 +1090,17 @@ const struct bmp280_chip_info bmp280_chip_info = {
.num_oversampling_press_avail = ARRAY_SIZE(bmp280_oversampling_avail),
.oversampling_press_default = BMP280_OSRS_PRESS_16X - 1,
+ .temp_coeffs = bmp280_temp_coeffs,
+ .temp_coeffs_type = IIO_VAL_FRACTIONAL,
+ .press_coeffs = bmp280_press_coeffs,
+ .press_coeffs_type = IIO_VAL_FRACTIONAL,
+
.chip_config = bmp280_chip_config,
.read_temp = bmp280_read_temp,
.read_press = bmp280_read_press,
.read_calib = bmp280_read_calib,
+
+ .trigger_handler = bmp280_trigger_handler,
};
EXPORT_SYMBOL_NS(bmp280_chip_info, IIO_BMP280);
@@ -876,16 +1123,74 @@ static int bme280_chip_config(struct bmp280_data *data)
return bmp280_chip_config(data);
}
+static irqreturn_t bme280_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bmp280_data *data = iio_priv(indio_dev);
+ s32 adc_temp, adc_press, adc_humidity, t_fine;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ /* Burst read data registers */
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
+ data->buf, BME280_BURST_READ_BYTES);
+ if (ret) {
+ dev_err(data->dev, "failed to burst read sensor data\n");
+ goto out;
+ }
+
+ /* Temperature calculations */
+ adc_temp = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(&data->buf[3]));
+ if (adc_temp == BMP280_TEMP_SKIPPED) {
+ dev_err(data->dev, "reading temperature skipped\n");
+ goto out;
+ }
+
+ data->sensor_data[1] = bmp280_compensate_temp(data, adc_temp);
+
+ /* Pressure calculations */
+ adc_press = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(&data->buf[0]));
+ if (adc_press == BMP280_PRESS_SKIPPED) {
+ dev_err(data->dev, "reading pressure skipped\n");
+ goto out;
+ }
+
+ t_fine = bmp280_calc_t_fine(data, adc_temp);
+
+ data->sensor_data[0] = bmp280_compensate_press(data, adc_press, t_fine);
+
+ /* Humidity calculations */
+ adc_humidity = get_unaligned_be16(&data->buf[6]);
+
+ if (adc_humidity == BMP280_HUMIDITY_SKIPPED) {
+ dev_err(data->dev, "reading humidity skipped\n");
+ goto out;
+ }
+ data->sensor_data[2] = bme280_compensate_humidity(data, adc_humidity, t_fine);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->sensor_data,
+ iio_get_time_ns(indio_dev));
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const u8 bme280_chip_ids[] = { BME280_CHIP_ID };
+static const int bme280_humid_coeffs[] = { 1000, 1024 };
const struct bmp280_chip_info bme280_chip_info = {
.id_reg = BMP280_REG_ID,
.chip_id = bme280_chip_ids,
.num_chip_id = ARRAY_SIZE(bme280_chip_ids),
- .regmap_config = &bmp280_regmap_config,
+ .regmap_config = &bme280_regmap_config,
.start_up_time = 2000,
- .channels = bmp280_channels,
- .num_channels = 3,
+ .channels = bme280_channels,
+ .num_channels = ARRAY_SIZE(bme280_channels),
+ .avail_scan_masks = bme280_avail_scan_masks,
.oversampling_temp_avail = bmp280_oversampling_avail,
.num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
@@ -899,11 +1204,20 @@ const struct bmp280_chip_info bme280_chip_info = {
.num_oversampling_humid_avail = ARRAY_SIZE(bmp280_oversampling_avail),
.oversampling_humid_default = BME280_OSRS_HUMIDITY_16X - 1,
+ .temp_coeffs = bmp280_temp_coeffs,
+ .temp_coeffs_type = IIO_VAL_FRACTIONAL,
+ .press_coeffs = bmp280_press_coeffs,
+ .press_coeffs_type = IIO_VAL_FRACTIONAL,
+ .humid_coeffs = bme280_humid_coeffs,
+ .humid_coeffs_type = IIO_VAL_FRACTIONAL,
+
.chip_config = bme280_chip_config,
.read_temp = bmp280_read_temp,
.read_press = bmp280_read_press,
.read_humid = bme280_read_humid,
.read_calib = bme280_read_calib,
+
+ .trigger_handler = bme280_trigger_handler,
};
EXPORT_SYMBOL_NS(bme280_chip_info, IIO_BMP280);
@@ -958,7 +1272,7 @@ static int bmp380_read_temp_adc(struct bmp280_data *data, u32 *adc_temp)
int ret;
ret = regmap_bulk_read(data->regmap, BMP380_REG_TEMP_XLSB,
- data->buf, sizeof(data->buf));
+ data->buf, BMP280_NUM_TEMP_BYTES);
if (ret) {
dev_err(data->dev, "failed to read temperature\n");
return ret;
@@ -1027,7 +1341,7 @@ static int bmp380_read_press_adc(struct bmp280_data *data, u32 *adc_press)
int ret;
ret = regmap_bulk_read(data->regmap, BMP380_REG_PRESS_XLSB,
- data->buf, sizeof(data->buf));
+ data->buf, BMP280_NUM_PRESS_BYTES);
if (ret) {
dev_err(data->dev, "failed to read pressure\n");
return ret;
@@ -1091,9 +1405,8 @@ static u32 bmp380_compensate_press(struct bmp280_data *data,
return comp_press;
}
-static int bmp380_read_temp(struct bmp280_data *data, int *val, int *val2)
+static int bmp380_read_temp(struct bmp280_data *data, s32 *comp_temp)
{
- s32 comp_temp;
u32 adc_temp;
int ret;
@@ -1101,15 +1414,14 @@ static int bmp380_read_temp(struct bmp280_data *data, int *val, int *val2)
if (ret)
return ret;
- comp_temp = bmp380_compensate_temp(data, adc_temp);
+ *comp_temp = bmp380_compensate_temp(data, adc_temp);
- *val = comp_temp * 10;
- return IIO_VAL_INT;
+ return 0;
}
-static int bmp380_read_press(struct bmp280_data *data, int *val, int *val2)
+static int bmp380_read_press(struct bmp280_data *data, u32 *comp_press)
{
- u32 adc_press, comp_press, t_fine;
+ u32 adc_press, t_fine;
int ret;
ret = bmp380_get_t_fine(data, &t_fine);
@@ -1120,12 +1432,9 @@ static int bmp380_read_press(struct bmp280_data *data, int *val, int *val2)
if (ret)
return ret;
- comp_press = bmp380_compensate_press(data, adc_press, t_fine);
-
- *val = comp_press;
- *val2 = 100000;
+ *comp_press = bmp380_compensate_press(data, adc_press, t_fine);
- return IIO_VAL_FRACTIONAL;
+ return 0;
}
static int bmp380_read_calib(struct bmp280_data *data)
@@ -1272,10 +1581,11 @@ static int bmp380_chip_config(struct bmp280_data *data)
}
/*
* Waits for measurement before checking configuration error
- * flag. Selected longest measure time indicated in
- * section 3.9.1 in the datasheet.
+ * flag. Selected longest measurement time, calculated from
+ * formula in datasheet section 3.9.2 with an offset of ~+15%
+ * as it seen as well in table 3.9.1.
*/
- msleep(80);
+ msleep(150);
/* Check config error flag */
ret = regmap_read(data->regmap, BMP380_REG_ERROR, &tmp);
@@ -1293,9 +1603,58 @@ static int bmp380_chip_config(struct bmp280_data *data)
return 0;
}
+static irqreturn_t bmp380_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bmp280_data *data = iio_priv(indio_dev);
+ s32 adc_temp, adc_press, t_fine;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ /* Burst read data registers */
+ ret = regmap_bulk_read(data->regmap, BMP380_REG_PRESS_XLSB,
+ data->buf, BMP280_BURST_READ_BYTES);
+ if (ret) {
+ dev_err(data->dev, "failed to burst read sensor data\n");
+ goto out;
+ }
+
+ /* Temperature calculations */
+ adc_temp = get_unaligned_le24(&data->buf[3]);
+ if (adc_temp == BMP380_TEMP_SKIPPED) {
+ dev_err(data->dev, "reading temperature skipped\n");
+ goto out;
+ }
+
+ data->sensor_data[1] = bmp380_compensate_temp(data, adc_temp);
+
+ /* Pressure calculations */
+ adc_press = get_unaligned_le24(&data->buf[0]);
+ if (adc_press == BMP380_PRESS_SKIPPED) {
+ dev_err(data->dev, "reading pressure skipped\n");
+ goto out;
+ }
+
+ t_fine = bmp380_calc_t_fine(data, adc_temp);
+
+ data->sensor_data[0] = bmp380_compensate_press(data, adc_press, t_fine);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->sensor_data,
+ iio_get_time_ns(indio_dev));
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const int bmp380_oversampling_avail[] = { 1, 2, 4, 8, 16, 32 };
static const int bmp380_iir_filter_coeffs_avail[] = { 1, 2, 4, 8, 16, 32, 64, 128};
static const u8 bmp380_chip_ids[] = { BMP380_CHIP_ID, BMP390_CHIP_ID };
+static const int bmp380_temp_coeffs[] = { 10, 1 };
+static const int bmp380_press_coeffs[] = { 1, 100000 };
const struct bmp280_chip_info bmp380_chip_info = {
.id_reg = BMP380_REG_ID,
@@ -1305,7 +1664,8 @@ const struct bmp280_chip_info bmp380_chip_info = {
.spi_read_extra_byte = true,
.start_up_time = 2000,
.channels = bmp380_channels,
- .num_channels = 2,
+ .num_channels = ARRAY_SIZE(bmp380_channels),
+ .avail_scan_masks = bmp280_avail_scan_masks,
.oversampling_temp_avail = bmp380_oversampling_avail,
.num_oversampling_temp_avail = ARRAY_SIZE(bmp380_oversampling_avail),
@@ -1323,11 +1683,18 @@ const struct bmp280_chip_info bmp380_chip_info = {
.num_iir_filter_coeffs_avail = ARRAY_SIZE(bmp380_iir_filter_coeffs_avail),
.iir_filter_coeff_default = 2,
+ .temp_coeffs = bmp380_temp_coeffs,
+ .temp_coeffs_type = IIO_VAL_FRACTIONAL,
+ .press_coeffs = bmp380_press_coeffs,
+ .press_coeffs_type = IIO_VAL_FRACTIONAL,
+
.chip_config = bmp380_chip_config,
.read_temp = bmp380_read_temp,
.read_press = bmp380_read_press,
.read_calib = bmp380_read_calib,
.preinit = bmp380_preinit,
+
+ .trigger_handler = bmp380_trigger_handler,
};
EXPORT_SYMBOL_NS(bmp380_chip_info, IIO_BMP280);
@@ -1443,58 +1810,48 @@ static int bmp580_nvm_operation(struct bmp280_data *data, bool is_write)
* for what is expected on IIO ABI.
*/
-static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2)
+static int bmp580_read_temp(struct bmp280_data *data, s32 *raw_temp)
{
- s32 raw_temp;
+ s32 value_temp;
int ret;
- ret = regmap_bulk_read(data->regmap, BMP580_REG_TEMP_XLSB, data->buf,
- sizeof(data->buf));
+ ret = regmap_bulk_read(data->regmap, BMP580_REG_TEMP_XLSB,
+ data->buf, BMP280_NUM_TEMP_BYTES);
if (ret) {
dev_err(data->dev, "failed to read temperature\n");
return ret;
}
- raw_temp = get_unaligned_le24(data->buf);
- if (raw_temp == BMP580_TEMP_SKIPPED) {
+ value_temp = get_unaligned_le24(data->buf);
+ if (value_temp == BMP580_TEMP_SKIPPED) {
dev_err(data->dev, "reading temperature skipped\n");
return -EIO;
}
+ *raw_temp = sign_extend32(value_temp, 23);
- /*
- * Temperature is returned in Celsius degrees in fractional
- * form down 2^16. We rescale by x1000 to return millidegrees
- * Celsius to respect IIO ABI.
- */
- raw_temp = sign_extend32(raw_temp, 23);
- *val = ((s64)raw_temp * 1000) / (1 << 16);
- return IIO_VAL_INT;
+ return 0;
}
-static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2)
+static int bmp580_read_press(struct bmp280_data *data, u32 *raw_press)
{
- u32 raw_press;
+ u32 value_press;
int ret;
- ret = regmap_bulk_read(data->regmap, BMP580_REG_PRESS_XLSB, data->buf,
- sizeof(data->buf));
+ ret = regmap_bulk_read(data->regmap, BMP580_REG_PRESS_XLSB,
+ data->buf, BMP280_NUM_PRESS_BYTES);
if (ret) {
dev_err(data->dev, "failed to read pressure\n");
return ret;
}
- raw_press = get_unaligned_le24(data->buf);
- if (raw_press == BMP580_PRESS_SKIPPED) {
+ value_press = get_unaligned_le24(data->buf);
+ if (value_press == BMP580_PRESS_SKIPPED) {
dev_err(data->dev, "reading pressure skipped\n");
return -EIO;
}
- /*
- * Pressure is returned in Pascals in fractional form down 2^16.
- * We rescale /1000 to convert to kilopascal to respect IIO ABI.
- */
- *val = raw_press;
- *val2 = 64000; /* 2^6 * 1000 */
- return IIO_VAL_FRACTIONAL;
+ *raw_press = value_press;
+
+ return 0;
}
static const int bmp580_odr_table[][2] = {
@@ -1828,8 +2185,43 @@ static int bmp580_chip_config(struct bmp280_data *data)
return 0;
}
+static irqreturn_t bmp580_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bmp280_data *data = iio_priv(indio_dev);
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ /* Burst read data registers */
+ ret = regmap_bulk_read(data->regmap, BMP580_REG_TEMP_XLSB,
+ data->buf, BMP280_BURST_READ_BYTES);
+ if (ret) {
+ dev_err(data->dev, "failed to burst read sensor data\n");
+ goto out;
+ }
+
+ /* Temperature calculations */
+ memcpy(&data->sensor_data[1], &data->buf[0], 3);
+
+ /* Pressure calculations */
+ memcpy(&data->sensor_data[0], &data->buf[3], 3);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->sensor_data,
+ iio_get_time_ns(indio_dev));
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const int bmp580_oversampling_avail[] = { 1, 2, 4, 8, 16, 32, 64, 128 };
static const u8 bmp580_chip_ids[] = { BMP580_CHIP_ID, BMP580_CHIP_ID_ALT };
+/* Instead of { 1000, 16 } we do this, to avoid overflow issues */
+static const int bmp580_temp_coeffs[] = { 125, 13 };
+static const int bmp580_press_coeffs[] = { 1, 64000};
const struct bmp280_chip_info bmp580_chip_info = {
.id_reg = BMP580_REG_CHIP_ID,
@@ -1837,8 +2229,9 @@ const struct bmp280_chip_info bmp580_chip_info = {
.num_chip_id = ARRAY_SIZE(bmp580_chip_ids),
.regmap_config = &bmp580_regmap_config,
.start_up_time = 2000,
- .channels = bmp380_channels,
- .num_channels = 2,
+ .channels = bmp580_channels,
+ .num_channels = ARRAY_SIZE(bmp580_channels),
+ .avail_scan_masks = bmp280_avail_scan_masks,
.oversampling_temp_avail = bmp580_oversampling_avail,
.num_oversampling_temp_avail = ARRAY_SIZE(bmp580_oversampling_avail),
@@ -1856,16 +2249,23 @@ const struct bmp280_chip_info bmp580_chip_info = {
.num_iir_filter_coeffs_avail = ARRAY_SIZE(bmp380_iir_filter_coeffs_avail),
.iir_filter_coeff_default = 2,
+ .temp_coeffs = bmp580_temp_coeffs,
+ .temp_coeffs_type = IIO_VAL_FRACTIONAL_LOG2,
+ .press_coeffs = bmp580_press_coeffs,
+ .press_coeffs_type = IIO_VAL_FRACTIONAL,
+
.chip_config = bmp580_chip_config,
.read_temp = bmp580_read_temp,
.read_press = bmp580_read_press,
.preinit = bmp580_preinit,
+
+ .trigger_handler = bmp580_trigger_handler,
};
EXPORT_SYMBOL_NS(bmp580_chip_info, IIO_BMP280);
static int bmp180_wait_for_eoc(struct bmp280_data *data, u8 ctrl_meas)
{
- const int conversion_time_max[] = { 4500, 7500, 13500, 25500 };
+ static const int conversion_time_max[] = { 4500, 7500, 13500, 25500 };
unsigned int delay_us;
unsigned int ctrl;
int ret;
@@ -2011,9 +2411,8 @@ static s32 bmp180_compensate_temp(struct bmp280_data *data, u32 adc_temp)
return (bmp180_calc_t_fine(data, adc_temp) + 8) / 16;
}
-static int bmp180_read_temp(struct bmp280_data *data, int *val, int *val2)
+static int bmp180_read_temp(struct bmp280_data *data, s32 *comp_temp)
{
- s32 comp_temp;
u32 adc_temp;
int ret;
@@ -2021,10 +2420,9 @@ static int bmp180_read_temp(struct bmp280_data *data, int *val, int *val2)
if (ret)
return ret;
- comp_temp = bmp180_compensate_temp(data, adc_temp);
+ *comp_temp = bmp180_compensate_temp(data, adc_temp);
- *val = comp_temp * 100;
- return IIO_VAL_INT;
+ return 0;
}
static int bmp180_read_press_adc(struct bmp280_data *data, u32 *adc_press)
@@ -2040,7 +2438,7 @@ static int bmp180_read_press_adc(struct bmp280_data *data, u32 *adc_press)
return ret;
ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB,
- data->buf, sizeof(data->buf));
+ data->buf, BMP280_NUM_PRESS_BYTES);
if (ret) {
dev_err(data->dev, "failed to read pressure\n");
return ret;
@@ -2087,9 +2485,9 @@ static u32 bmp180_compensate_press(struct bmp280_data *data, u32 adc_press,
return p + ((x1 + x2 + 3791) >> 4);
}
-static int bmp180_read_press(struct bmp280_data *data, int *val, int *val2)
+static int bmp180_read_press(struct bmp280_data *data, u32 *comp_press)
{
- u32 comp_press, adc_press;
+ u32 adc_press;
s32 t_fine;
int ret;
@@ -2101,12 +2499,9 @@ static int bmp180_read_press(struct bmp280_data *data, int *val, int *val2)
if (ret)
return ret;
- comp_press = bmp180_compensate_press(data, adc_press, t_fine);
-
- *val = comp_press;
- *val2 = 1000;
+ *comp_press = bmp180_compensate_press(data, adc_press, t_fine);
- return IIO_VAL_FRACTIONAL;
+ return 0;
}
static int bmp180_chip_config(struct bmp280_data *data)
@@ -2114,9 +2509,41 @@ static int bmp180_chip_config(struct bmp280_data *data)
return 0;
}
+static irqreturn_t bmp180_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bmp280_data *data = iio_priv(indio_dev);
+ int ret, chan_value;
+
+ guard(mutex)(&data->lock);
+
+ ret = bmp180_read_temp(data, &chan_value);
+ if (ret)
+ goto out;
+
+ data->sensor_data[1] = chan_value;
+
+ ret = bmp180_read_press(data, &chan_value);
+ if (ret)
+ goto out;
+
+ data->sensor_data[0] = chan_value;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->sensor_data,
+ iio_get_time_ns(indio_dev));
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const int bmp180_oversampling_temp_avail[] = { 1 };
static const int bmp180_oversampling_press_avail[] = { 1, 2, 4, 8 };
static const u8 bmp180_chip_ids[] = { BMP180_CHIP_ID };
+static const int bmp180_temp_coeffs[] = { 100, 1 };
+static const int bmp180_press_coeffs[] = { 1, 1000 };
const struct bmp280_chip_info bmp180_chip_info = {
.id_reg = BMP280_REG_ID,
@@ -2125,7 +2552,8 @@ const struct bmp280_chip_info bmp180_chip_info = {
.regmap_config = &bmp180_regmap_config,
.start_up_time = 2000,
.channels = bmp280_channels,
- .num_channels = 2,
+ .num_channels = ARRAY_SIZE(bmp280_channels),
+ .avail_scan_masks = bmp280_avail_scan_masks,
.oversampling_temp_avail = bmp180_oversampling_temp_avail,
.num_oversampling_temp_avail =
@@ -2137,10 +2565,17 @@ const struct bmp280_chip_info bmp180_chip_info = {
ARRAY_SIZE(bmp180_oversampling_press_avail),
.oversampling_press_default = BMP180_MEAS_PRESS_8X,
+ .temp_coeffs = bmp180_temp_coeffs,
+ .temp_coeffs_type = IIO_VAL_FRACTIONAL,
+ .press_coeffs = bmp180_press_coeffs,
+ .press_coeffs_type = IIO_VAL_FRACTIONAL,
+
.chip_config = bmp180_chip_config,
.read_temp = bmp180_read_temp,
.read_press = bmp180_read_press,
.read_calib = bmp180_read_calib,
+
+ .trigger_handler = bmp180_trigger_handler,
};
EXPORT_SYMBOL_NS(bmp180_chip_info, IIO_BMP280);
@@ -2186,6 +2621,30 @@ static int bmp085_fetch_eoc_irq(struct device *dev,
return 0;
}
+static int bmp280_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct bmp280_data *data = iio_priv(indio_dev);
+
+ pm_runtime_get_sync(data->dev);
+
+ return 0;
+}
+
+static int bmp280_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct bmp280_data *data = iio_priv(indio_dev);
+
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops bmp280_buffer_setup_ops = {
+ .preenable = bmp280_buffer_preenable,
+ .postdisable = bmp280_buffer_postdisable,
+};
+
static void bmp280_pm_disable(void *data)
{
struct device *dev = data;
@@ -2232,6 +2691,7 @@ int bmp280_common_probe(struct device *dev,
/* Apply initial values from chip info structure */
indio_dev->channels = chip_info->channels;
indio_dev->num_channels = chip_info->num_channels;
+ indio_dev->available_scan_masks = chip_info->avail_scan_masks;
data->oversampling_press = chip_info->oversampling_press_default;
data->oversampling_humid = chip_info->oversampling_humid_default;
data->oversampling_temp = chip_info->oversampling_temp_default;
@@ -2317,6 +2777,14 @@ int bmp280_common_probe(struct device *dev,
"failed to read calibration coefficients\n");
}
+ ret = devm_iio_triggered_buffer_setup(data->dev, indio_dev,
+ iio_pollfunc_store_time,
+ data->chip_info->trigger_handler,
+ &bmp280_buffer_setup_ops);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "iio triggered buffer setup failed\n");
+
/*
* Attempt to grab an optional EOC IRQ - only the BMP085 has this
* however as it happens, the BMP085 shares the chip ID of BMP180
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
index 34e3bc758493..5c3a63b4327c 100644
--- a/drivers/iio/pressure/bmp280-i2c.c
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/module.h>
#include <linux/regmap.h>
#include "bmp280.h"
diff --git a/drivers/iio/pressure/bmp280-regmap.c b/drivers/iio/pressure/bmp280-regmap.c
index fa52839474b1..d27d68edd906 100644
--- a/drivers/iio/pressure/bmp280-regmap.c
+++ b/drivers/iio/pressure/bmp280-regmap.c
@@ -41,7 +41,7 @@ const struct regmap_config bmp180_regmap_config = {
};
EXPORT_SYMBOL_NS(bmp180_regmap_config, IIO_BMP280);
-static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
+static bool bme280_is_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case BMP280_REG_CONFIG:
@@ -54,9 +54,37 @@ static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
}
}
+static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP280_REG_CONFIG:
+ case BMP280_REG_CTRL_MEAS:
+ case BMP280_REG_RESET:
+ return true;
+ default:
+ return false;
+ }
+}
+
static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
+ case BMP280_REG_TEMP_XLSB:
+ case BMP280_REG_TEMP_LSB:
+ case BMP280_REG_TEMP_MSB:
+ case BMP280_REG_PRESS_XLSB:
+ case BMP280_REG_PRESS_LSB:
+ case BMP280_REG_PRESS_MSB:
+ case BMP280_REG_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool bme280_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
case BME280_REG_HUMIDITY_LSB:
case BME280_REG_HUMIDITY_MSB:
case BMP280_REG_TEMP_XLSB:
@@ -71,7 +99,6 @@ static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
return false;
}
}
-
static bool bmp380_is_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -167,7 +194,7 @@ const struct regmap_config bmp280_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = BME280_REG_HUMIDITY_LSB,
+ .max_register = BMP280_REG_TEMP_XLSB,
.cache_type = REGCACHE_RBTREE,
.writeable_reg = bmp280_is_writeable_reg,
@@ -175,6 +202,18 @@ const struct regmap_config bmp280_regmap_config = {
};
EXPORT_SYMBOL_NS(bmp280_regmap_config, IIO_BMP280);
+const struct regmap_config bme280_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BME280_REG_HUMIDITY_LSB,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = bme280_is_writeable_reg,
+ .volatile_reg = bme280_is_volatile_reg,
+};
+EXPORT_SYMBOL_NS(bme280_regmap_config, IIO_BMP280);
+
const struct regmap_config bmp380_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index 62b4e58104cf..d18549d9bb64 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -5,10 +5,10 @@
* Inspired by the older BMP085 driver drivers/misc/bmp085-spi.c
*/
#include <linux/bits.h>
-#include <linux/module.h>
-#include <linux/spi/spi.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/regmap.h>
+#include <linux/spi/spi.h>
#include "bmp280.h"
@@ -40,14 +40,10 @@ static int bmp380_regmap_spi_read(void *context, const void *reg,
size_t reg_size, void *val, size_t val_size)
{
struct spi_device *spi = to_spi_device(context);
- u8 rx_buf[4];
+ u8 rx_buf[BME280_BURST_READ_BYTES + 1];
ssize_t status;
- /*
- * Maximum number of consecutive bytes read for a temperature or
- * pressure measurement is 3.
- */
- if (val_size > 3)
+ if (val_size > BME280_BURST_READ_BYTES)
return -EINVAL;
/*
@@ -64,14 +60,14 @@ static int bmp380_regmap_spi_read(void *context, const void *reg,
return 0;
}
-static struct regmap_bus bmp280_regmap_bus = {
+static const struct regmap_bus bmp280_regmap_bus = {
.write = bmp280_regmap_spi_write,
.read = bmp280_regmap_spi_read,
.reg_format_endian_default = REGMAP_ENDIAN_BIG,
.val_format_endian_default = REGMAP_ENDIAN_BIG,
};
-static struct regmap_bus bmp380_regmap_bus = {
+static const struct regmap_bus bmp380_regmap_bus = {
.write = bmp280_regmap_spi_write,
.read = bmp380_regmap_spi_read,
.read_flag_mask = BIT(7),
@@ -83,7 +79,7 @@ static int bmp280_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
const struct bmp280_chip_info *chip_info;
- struct regmap_bus *bmp_regmap_bus;
+ struct regmap_bus const *bmp_regmap_bus;
struct regmap *regmap;
int ret;
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index 7c30e4d523be..ccacc67c1473 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -1,10 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/bitops.h>
#include <linux/device.h>
-#include <linux/iio/iio.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/iio/iio.h>
/* BMP580 specific registers */
#define BMP580_REG_CMD 0x7E
@@ -304,6 +304,16 @@
#define BMP280_PRESS_SKIPPED 0x80000
#define BMP280_HUMIDITY_SKIPPED 0x8000
+/* Number of bytes for each value */
+#define BMP280_NUM_PRESS_BYTES 3
+#define BMP280_NUM_TEMP_BYTES 3
+#define BME280_NUM_HUMIDITY_BYTES 2
+#define BMP280_BURST_READ_BYTES (BMP280_NUM_PRESS_BYTES + \
+ BMP280_NUM_TEMP_BYTES)
+#define BME280_BURST_READ_BYTES (BMP280_NUM_PRESS_BYTES + \
+ BMP280_NUM_TEMP_BYTES + \
+ BME280_NUM_HUMIDITY_BYTES)
+
/* Core exported structs */
static const char *const bmp280_supply_names[] = {
@@ -398,12 +408,18 @@ struct bmp280_data {
int sampling_freq;
/*
+ * Data to push to userspace triggered buffer. Up to 3 channels and
+ * s64 timestamp, aligned.
+ */
+ s32 sensor_data[6] __aligned(8);
+
+ /*
* DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
union {
/* Sensor data buffer */
- u8 buf[3];
+ u8 buf[BME280_BURST_READ_BYTES];
/* Calibration data buffers */
__le16 bmp280_cal_buf[BMP280_CONTIGUOUS_CALIB_REGS / 2];
__be16 bmp180_cal_buf[BMP180_REG_CALIB_COUNT / 2];
@@ -425,6 +441,7 @@ struct bmp280_chip_info {
const struct iio_chan_spec *channels;
int num_channels;
unsigned int start_up_time;
+ const unsigned long *avail_scan_masks;
const int *oversampling_temp_avail;
int num_oversampling_temp_avail;
@@ -446,12 +463,21 @@ struct bmp280_chip_info {
int num_sampling_freq_avail;
int sampling_freq_default;
+ const int *temp_coeffs;
+ const int temp_coeffs_type;
+ const int *press_coeffs;
+ const int press_coeffs_type;
+ const int *humid_coeffs;
+ const int humid_coeffs_type;
+
int (*chip_config)(struct bmp280_data *data);
- int (*read_temp)(struct bmp280_data *data, int *val, int *val2);
- int (*read_press)(struct bmp280_data *data, int *val, int *val2);
- int (*read_humid)(struct bmp280_data *data, int *val, int *val2);
+ int (*read_temp)(struct bmp280_data *data, s32 *adc_temp);
+ int (*read_press)(struct bmp280_data *data, u32 *adc_press);
+ int (*read_humid)(struct bmp280_data *data, u32 *adc_humidity);
int (*read_calib)(struct bmp280_data *data);
int (*preinit)(struct bmp280_data *data);
+
+ irqreturn_t (*trigger_handler)(int irq, void *p);
};
/* Chip infos for each variant */
@@ -464,6 +490,7 @@ extern const struct bmp280_chip_info bmp580_chip_info;
/* Regmap configurations */
extern const struct regmap_config bmp180_regmap_config;
extern const struct regmap_config bmp280_regmap_config;
+extern const struct regmap_config bme280_regmap_config;
extern const struct regmap_config bmp380_regmap_config;
extern const struct regmap_config bmp580_regmap_config;
diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c
index 0bba4c5a8d40..c1cea9d40424 100644
--- a/drivers/iio/pressure/dlhl60d.c
+++ b/drivers/iio/pressure/dlhl60d.c
@@ -256,8 +256,7 @@ static irqreturn_t dlh_trigger_handler(int irq, void *private)
if (ret)
goto out;
- for_each_set_bit(chn, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, chn) {
memcpy(&tmp_buf[i++],
&st->rx_buf[1] + chn * DLH_NUM_DATA_BYTES,
DLH_NUM_DATA_BYTES);
diff --git a/drivers/iio/pressure/sdp500.c b/drivers/iio/pressure/sdp500.c
new file mode 100644
index 000000000000..6ff32e3fa637
--- /dev/null
+++ b/drivers/iio/pressure/sdp500.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Sensirion sdp500 and sdp510 pressure sensors
+ *
+ * Datasheet: https://sensirion.com/resource/datasheet/sdp600
+ */
+
+#include <linux/i2c.h>
+#include <linux/crc8.h>
+#include <linux/iio/iio.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regulator/consumer.h>
+#include <asm/unaligned.h>
+
+#define SDP500_CRC8_POLYNOMIAL 0x31 /* x8+x5+x4+1 (normalized to 0x31) */
+#define SDP500_READ_SIZE 3
+
+#define SDP500_I2C_START_MEAS 0xF1
+
+struct sdp500_data {
+ struct device *dev;
+};
+
+DECLARE_CRC8_TABLE(sdp500_crc8_table);
+
+static int sdp500_start_measurement(struct sdp500_data *data)
+{
+ struct i2c_client *client = to_i2c_client(data->dev);
+
+ return i2c_smbus_write_byte(client, SDP500_I2C_START_MEAS);
+}
+
+static const struct iio_chan_spec sdp500_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static int sdp500_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ u8 rxbuf[SDP500_READ_SIZE];
+ u8 received_crc, calculated_crc;
+ struct sdp500_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = to_i2c_client(data->dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_master_recv(client, rxbuf, SDP500_READ_SIZE);
+ if (ret < 0) {
+ dev_err(data->dev, "Failed to receive data");
+ return ret;
+ }
+ if (ret != SDP500_READ_SIZE) {
+ dev_err(data->dev, "Data is received wrongly");
+ return -EIO;
+ }
+
+ received_crc = rxbuf[2];
+ calculated_crc = crc8(sdp500_crc8_table, rxbuf,
+ sizeof(rxbuf) - 1, 0x00);
+ if (received_crc != calculated_crc) {
+ dev_err(data->dev,
+ "calculated crc = 0x%.2X, received 0x%.2X",
+ calculated_crc, received_crc);
+ return -EIO;
+ }
+
+ *val = get_unaligned_be16(rxbuf);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1;
+ *val2 = 60;
+
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info sdp500_info = {
+ .read_raw = &sdp500_read_raw,
+};
+
+static int sdp500_probe(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev;
+ struct sdp500_data *data;
+ struct device *dev = &client->dev;
+ int ret;
+ u8 rxbuf[SDP500_READ_SIZE];
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get and enable regulator\n");
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ /* has to be done before the first i2c communication */
+ crc8_populate_msb(sdp500_crc8_table, SDP500_CRC8_POLYNOMIAL);
+
+ data = iio_priv(indio_dev);
+ data->dev = dev;
+
+ indio_dev->name = "sdp500";
+ indio_dev->channels = sdp500_channels;
+ indio_dev->info = &sdp500_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = ARRAY_SIZE(sdp500_channels);
+
+ ret = sdp500_start_measurement(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to start measurement");
+
+ /* First measurement is not correct, read it out to get rid of it */
+ i2c_master_recv(client, rxbuf, SDP500_READ_SIZE);
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register indio_dev");
+
+ return 0;
+}
+
+static const struct i2c_device_id sdp500_id[] = {
+ { "sdp500" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, sdp500_id);
+
+static const struct of_device_id sdp500_of_match[] = {
+ { .compatible = "sensirion,sdp500" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdp500_of_match);
+
+static struct i2c_driver sdp500_driver = {
+ .driver = {
+ .name = "sensirion,sdp500",
+ .of_match_table = sdp500_of_match,
+ },
+ .probe = sdp500_probe,
+ .id_table = sdp500_id,
+};
+module_i2c_driver(sdp500_driver);
+
+MODULE_AUTHOR("Thomas Sioutas <thomas.sioutas@prodrive-technologies.com>");
+MODULE_DESCRIPTION("Driver for Sensirion SDP500 differential pressure sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index 2ca3b0bc5eba..31c679074b25 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -32,6 +32,20 @@ config CROS_EC_MKBP_PROXIMITY
To compile this driver as a module, choose M here: the
module will be called cros_ec_mkbp_proximity.
+config HX9023S
+ tristate "TYHX HX9023S SAR sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here to build a driver for TYHX HX9023S capacitive SAR sensor.
+ This driver supports the TYHX HX9023S capacitive
+ SAR sensors. This sensors is used for proximity detection applications.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hx9023s.
+
config IRSD200
tristate "Murata IRS-D200 PIR sensor"
select IIO_BUFFER
@@ -219,4 +233,15 @@ config VL53L0X_I2C
To compile this driver as a module, choose M here: the
module will be called vl53l0x-i2c.
+config AW96103
+ tristate "AW96103/AW96105 Awinic proximity sensor"
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here to build a driver for Awinic's AW96103/AW96105 capacitive
+ proximity sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aw96103.
+
endmenu
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index f36598380446..c5e76995764a 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -6,6 +6,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AS3935) += as3935.o
obj-$(CONFIG_CROS_EC_MKBP_PROXIMITY) += cros_ec_mkbp_proximity.o
+obj-$(CONFIG_HX9023S) += hx9023s.o
obj-$(CONFIG_IRSD200) += irsd200.o
obj-$(CONFIG_ISL29501) += isl29501.o
obj-$(CONFIG_LIDAR_LITE_V2) += pulsedlight-lidar-lite-v2.o
@@ -21,4 +22,5 @@ obj-$(CONFIG_SX_COMMON) += sx_common.o
obj-$(CONFIG_SX9500) += sx9500.o
obj-$(CONFIG_VCNL3020) += vcnl3020.o
obj-$(CONFIG_VL53L0X_I2C) += vl53l0x-i2c.o
+obj-$(CONFIG_AW96103) += aw96103.o
diff --git a/drivers/iio/proximity/aw96103.c b/drivers/iio/proximity/aw96103.c
new file mode 100644
index 000000000000..db9d78e961fd
--- /dev/null
+++ b/drivers/iio/proximity/aw96103.c
@@ -0,0 +1,846 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AWINIC aw96103 proximity sensor driver
+ *
+ * Author: Wang Shuaijie <wangshuaijie@awinic.com>
+ *
+ * Copyright (c) 2024 awinic Technology CO., LTD
+ */
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define AW_DATA_PROCESS_FACTOR 1024
+#define AW96103_CHIP_ID 0xa961
+#define AW96103_BIN_VALID_DATA_OFFSET 64
+#define AW96103_BIN_DATA_LEN_OFFSET 16
+#define AW96103_BIN_DATA_REG_NUM_SIZE 4
+#define AW96103_BIN_CHIP_TYPE_SIZE 8
+#define AW96103_BIN_CHIP_TYPE_OFFSET 24
+
+#define AW96103_REG_SCANCTRL0 0x0000
+#define AW96103_REG_STAT0 0x0090
+#define AW96103_REG_BLFILT_CH0 0x00A8
+#define AW96103_REG_BLRSTRNG_CH0 0x00B4
+#define AW96103_REG_DIFF_CH0 0x0240
+#define AW96103_REG_FWVER2 0x0410
+#define AW96103_REG_CMD 0xF008
+#define AW96103_REG_IRQSRC 0xF080
+#define AW96103_REG_IRQEN 0xF084
+#define AW96103_REG_RESET 0xFF0C
+#define AW96103_REG_CHIPID 0xFF10
+#define AW96103_REG_EEDA0 0x0408
+#define AW96103_REG_EEDA1 0x040C
+#define AW96103_REG_PROXCTRL_CH0 0x00B0
+#define AW96103_REG_PROXTH0_CH0 0x00B8
+#define AW96103_PROXTH_CH_STEP 0x3C
+#define AW96103_THHYST_MASK GENMASK(13, 12)
+#define AW96103_INDEB_MASK GENMASK(11, 10)
+#define AW96103_OUTDEB_MASK GENMASK(9, 8)
+#define AW96103_INITOVERIRQ_MASK BIT(0)
+#define AW96103_BLFILT_CH_STEP 0x3C
+#define AW96103_BLRSTRNG_MASK GENMASK(5, 0)
+#define AW96103_CHIPID_MASK GENMASK(31, 16)
+#define AW96103_BLERRTRIG_MASK BIT(25)
+#define AW96103_CHAN_EN_MASK GENMASK(5, 0)
+#define AW96103_REG_PROXCTRL_CH(x) \
+ (AW96103_REG_PROXCTRL_CH0 + (x) * AW96103_PROXTH_CH_STEP)
+
+#define AW96103_REG_PROXTH0_CH(x) \
+ (AW96103_REG_PROXTH0_CH0 + (x) * AW96103_PROXTH_CH_STEP)
+
+/**
+ * struct aw_bin - Store the data obtained from parsing the configuration file.
+ * @chip_type: Frame header information-chip type
+ * @valid_data_len: Length of valid data obtained after parsing
+ * @valid_data_addr: The offset address of the valid data obtained
+ * after parsing relative to info
+ * @len: The size of the bin file obtained from the firmware
+ * @data: Store the bin file obtained from the firmware
+ */
+struct aw_bin {
+ unsigned char chip_type[8];
+ unsigned int valid_data_len;
+ unsigned int valid_data_addr;
+ unsigned int len;
+ unsigned char data[] __counted_by(len);
+};
+
+enum aw96103_sar_vers {
+ AW96103 = 2,
+ AW96103A = 6,
+ AW96103B = 0xa,
+};
+
+enum aw96103_operation_mode {
+ AW96103_ACTIVE_MODE = 1,
+ AW96103_SLEEP_MODE = 2,
+ AW96103_DEEPSLEEP_MODE = 3,
+ AW96103B_DEEPSLEEP_MODE = 4,
+};
+
+enum aw96103_sensor_type {
+ AW96103_VAL,
+ AW96105_VAL,
+};
+
+struct aw_channels_info {
+ bool used;
+ unsigned int old_irq_status;
+};
+
+struct aw_chip_info {
+ const char *name;
+ struct iio_chan_spec const *channels;
+ int num_channels;
+};
+
+struct aw96103 {
+ unsigned int hostirqen;
+ struct regmap *regmap;
+ struct device *dev;
+ /*
+ * There is one more logical channel than the actual channels,
+ * and the extra logical channel is used for temperature detection
+ * but not for status detection. The specific channel used for
+ * temperature detection is determined by the register configuration.
+ */
+ struct aw_channels_info channels_arr[6];
+ unsigned int max_channels;
+ unsigned int chan_en;
+};
+
+static const unsigned int aw96103_reg_default[] = {
+ 0x0000, 0x00003f3f, 0x0004, 0x00000064, 0x0008, 0x0017c11e,
+ 0x000c, 0x05000000, 0x0010, 0x00093ffd, 0x0014, 0x19240009,
+ 0x0018, 0xd81c0207, 0x001c, 0xff000000, 0x0020, 0x00241900,
+ 0x0024, 0x00093ff7, 0x0028, 0x58020009, 0x002c, 0xd81c0207,
+ 0x0030, 0xff000000, 0x0034, 0x00025800, 0x0038, 0x00093fdf,
+ 0x003c, 0x7d3b0009, 0x0040, 0xd81c0207, 0x0044, 0xff000000,
+ 0x0048, 0x003b7d00, 0x004c, 0x00093f7f, 0x0050, 0xe9310009,
+ 0x0054, 0xd81c0207, 0x0058, 0xff000000, 0x005c, 0x0031e900,
+ 0x0060, 0x00093dff, 0x0064, 0x1a0c0009, 0x0068, 0xd81c0207,
+ 0x006c, 0xff000000, 0x0070, 0x000c1a00, 0x0074, 0x80093fff,
+ 0x0078, 0x043d0009, 0x007c, 0xd81c0207, 0x0080, 0xff000000,
+ 0x0084, 0x003d0400, 0x00a0, 0xe6400000, 0x00a4, 0x00000000,
+ 0x00a8, 0x010408d2, 0x00ac, 0x00000000, 0x00b0, 0x00000000,
+ 0x00b8, 0x00005fff, 0x00bc, 0x00000000, 0x00c0, 0x00000000,
+ 0x00c4, 0x00000000, 0x00c8, 0x00000000, 0x00cc, 0x00000000,
+ 0x00d0, 0x00000000, 0x00d4, 0x00000000, 0x00d8, 0x00000000,
+ 0x00dc, 0xe6447800, 0x00e0, 0x78000000, 0x00e4, 0x010408d2,
+ 0x00e8, 0x00000000, 0x00ec, 0x00000000, 0x00f4, 0x00005fff,
+ 0x00f8, 0x00000000, 0x00fc, 0x00000000, 0x0100, 0x00000000,
+ 0x0104, 0x00000000, 0x0108, 0x00000000, 0x010c, 0x02000000,
+ 0x0110, 0x00000000, 0x0114, 0x00000000, 0x0118, 0xe6447800,
+ 0x011c, 0x78000000, 0x0120, 0x010408d2, 0x0124, 0x00000000,
+ 0x0128, 0x00000000, 0x0130, 0x00005fff, 0x0134, 0x00000000,
+ 0x0138, 0x00000000, 0x013c, 0x00000000, 0x0140, 0x00000000,
+ 0x0144, 0x00000000, 0x0148, 0x02000000, 0x014c, 0x00000000,
+ 0x0150, 0x00000000, 0x0154, 0xe6447800, 0x0158, 0x78000000,
+ 0x015c, 0x010408d2, 0x0160, 0x00000000, 0x0164, 0x00000000,
+ 0x016c, 0x00005fff, 0x0170, 0x00000000, 0x0174, 0x00000000,
+ 0x0178, 0x00000000, 0x017c, 0x00000000, 0x0180, 0x00000000,
+ 0x0184, 0x02000000, 0x0188, 0x00000000, 0x018c, 0x00000000,
+ 0x0190, 0xe6447800, 0x0194, 0x78000000, 0x0198, 0x010408d2,
+ 0x019c, 0x00000000, 0x01a0, 0x00000000, 0x01a8, 0x00005fff,
+ 0x01ac, 0x00000000, 0x01b0, 0x00000000, 0x01b4, 0x00000000,
+ 0x01b8, 0x00000000, 0x01bc, 0x00000000, 0x01c0, 0x02000000,
+ 0x01c4, 0x00000000, 0x01c8, 0x00000000, 0x01cc, 0xe6407800,
+ 0x01d0, 0x78000000, 0x01d4, 0x010408d2, 0x01d8, 0x00000000,
+ 0x01dc, 0x00000000, 0x01e4, 0x00005fff, 0x01e8, 0x00000000,
+ 0x01ec, 0x00000000, 0x01f0, 0x00000000, 0x01f4, 0x00000000,
+ 0x01f8, 0x00000000, 0x01fc, 0x02000000, 0x0200, 0x00000000,
+ 0x0204, 0x00000000, 0x0208, 0x00000008, 0x020c, 0x0000000d,
+ 0x41fc, 0x00000000, 0x4400, 0x00000000, 0x4410, 0x00000000,
+ 0x4420, 0x00000000, 0x4430, 0x00000000, 0x4440, 0x00000000,
+ 0x4450, 0x00000000, 0x4460, 0x00000000, 0x4470, 0x00000000,
+ 0xf080, 0x00003018, 0xf084, 0x00000fff, 0xf800, 0x00000000,
+ 0xf804, 0x00002e00, 0xf8d0, 0x00000001, 0xf8d4, 0x00000000,
+ 0xff00, 0x00000301, 0xff0c, 0x01000000, 0xffe0, 0x00000000,
+ 0xfff4, 0x00004011, 0x0090, 0x00000000, 0x0094, 0x00000000,
+ 0x0098, 0x00000000, 0x009c, 0x3f3f3f3f,
+};
+
+static const struct iio_event_spec aw_common_events[3] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_PERIOD),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_PERIOD),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_HYSTERESIS) |
+ BIT(IIO_EV_INFO_VALUE),
+ }
+};
+
+#define AW_IIO_CHANNEL(idx) \
+{ \
+ .type = IIO_PROXIMITY, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .indexed = 1, \
+ .channel = idx, \
+ .event_spec = aw_common_events, \
+ .num_event_specs = ARRAY_SIZE(aw_common_events), \
+} \
+
+static const struct iio_chan_spec aw96103_channels[] = {
+ AW_IIO_CHANNEL(0),
+ AW_IIO_CHANNEL(1),
+ AW_IIO_CHANNEL(2),
+ AW_IIO_CHANNEL(3),
+};
+
+static const struct iio_chan_spec aw96105_channels[] = {
+ AW_IIO_CHANNEL(0),
+ AW_IIO_CHANNEL(1),
+ AW_IIO_CHANNEL(2),
+ AW_IIO_CHANNEL(3),
+ AW_IIO_CHANNEL(4),
+ AW_IIO_CHANNEL(5),
+};
+
+static const struct aw_chip_info aw_chip_info_tbl[] = {
+ [AW96103_VAL] = {
+ .name = "aw96103_sensor",
+ .channels = aw96103_channels,
+ .num_channels = ARRAY_SIZE(aw96103_channels),
+ },
+ [AW96105_VAL] = {
+ .name = "aw96105_sensor",
+ .channels = aw96105_channels,
+ .num_channels = ARRAY_SIZE(aw96105_channels),
+ },
+};
+
+static void aw96103_parsing_bin_file(struct aw_bin *bin)
+{
+ bin->valid_data_addr = AW96103_BIN_VALID_DATA_OFFSET;
+ bin->valid_data_len =
+ *(unsigned int *)(bin->data + AW96103_BIN_DATA_LEN_OFFSET) -
+ AW96103_BIN_DATA_REG_NUM_SIZE;
+ memcpy(bin->chip_type, bin->data + AW96103_BIN_CHIP_TYPE_OFFSET,
+ AW96103_BIN_CHIP_TYPE_SIZE);
+}
+
+static const struct regmap_config aw96103_regmap_confg = {
+ .reg_bits = 16,
+ .val_bits = 32,
+};
+
+static int aw96103_get_diff_raw(struct aw96103 *aw96103, unsigned int chan,
+ int *buf)
+{
+ u32 data;
+ int ret;
+
+ ret = regmap_read(aw96103->regmap,
+ AW96103_REG_DIFF_CH0 + chan * 4, &data);
+ if (ret)
+ return ret;
+ *buf = (int)(data / AW_DATA_PROCESS_FACTOR);
+
+ return 0;
+}
+
+static int aw96103_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct aw96103 *aw96103 = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = aw96103_get_diff_raw(aw96103, chan->channel, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int aw96103_read_thresh(struct aw96103 *aw96103,
+ const struct iio_chan_spec *chan, int *val)
+{
+ int ret;
+
+ ret = regmap_read(aw96103->regmap,
+ AW96103_REG_PROXTH0_CH(chan->channel), val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+}
+
+static int aw96103_read_out_debounce(struct aw96103 *aw96103,
+ const struct iio_chan_spec *chan,
+ int *val)
+{
+ unsigned int reg_val;
+ int ret;
+
+ ret = regmap_read(aw96103->regmap,
+ AW96103_REG_PROXCTRL_CH(chan->channel), &reg_val);
+ if (ret)
+ return ret;
+ *val = FIELD_GET(AW96103_OUTDEB_MASK, reg_val);
+
+ return IIO_VAL_INT;
+}
+
+static int aw96103_read_in_debounce(struct aw96103 *aw96103,
+ const struct iio_chan_spec *chan, int *val)
+{
+ unsigned int reg_val;
+ int ret;
+
+ ret = regmap_read(aw96103->regmap,
+ AW96103_REG_PROXCTRL_CH(chan->channel), &reg_val);
+ if (ret)
+ return ret;
+ *val = FIELD_GET(AW96103_INDEB_MASK, reg_val);
+
+ return IIO_VAL_INT;
+}
+
+static int aw96103_read_hysteresis(struct aw96103 *aw96103,
+ const struct iio_chan_spec *chan, int *val)
+{
+ unsigned int reg_val;
+ int ret;
+
+ ret = regmap_read(aw96103->regmap,
+ AW96103_REG_PROXCTRL_CH(chan->channel), &reg_val);
+ if (ret)
+ return ret;
+ *val = FIELD_GET(AW96103_THHYST_MASK, reg_val);
+
+ return IIO_VAL_INT;
+}
+
+static int aw96103_read_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct aw96103 *aw96103 = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ return aw96103_read_thresh(aw96103, chan, val);
+ case IIO_EV_INFO_PERIOD:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return aw96103_read_out_debounce(aw96103, chan, val);
+ case IIO_EV_DIR_FALLING:
+ return aw96103_read_in_debounce(aw96103, chan, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_HYSTERESIS:
+ return aw96103_read_hysteresis(aw96103, chan, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int aw96103_write_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct aw96103 *aw96103 = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ return regmap_write(aw96103->regmap,
+ AW96103_REG_PROXTH0_CH(chan->channel), val);
+ case IIO_EV_INFO_PERIOD:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return regmap_update_bits(aw96103->regmap,
+ AW96103_REG_PROXCTRL_CH(chan->channel),
+ AW96103_OUTDEB_MASK,
+ FIELD_PREP(AW96103_OUTDEB_MASK, val));
+
+ case IIO_EV_DIR_FALLING:
+ return regmap_update_bits(aw96103->regmap,
+ AW96103_REG_PROXCTRL_CH(chan->channel),
+ AW96103_INDEB_MASK,
+ FIELD_PREP(AW96103_INDEB_MASK, val));
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_HYSTERESIS:
+ return regmap_update_bits(aw96103->regmap,
+ AW96103_REG_PROXCTRL_CH(chan->channel),
+ AW96103_THHYST_MASK,
+ FIELD_PREP(AW96103_THHYST_MASK, val));
+ default:
+ return -EINVAL;
+ }
+}
+
+static int aw96103_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct aw96103 *aw96103 = iio_priv(indio_dev);
+
+ return aw96103->channels_arr[chan->channel].used;
+}
+
+static int aw96103_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct aw96103 *aw96103 = iio_priv(indio_dev);
+
+ aw96103->channels_arr[chan->channel].used = !!state;
+
+ return regmap_update_bits(aw96103->regmap, AW96103_REG_SCANCTRL0,
+ BIT(chan->channel),
+ state ? BIT(chan->channel) : 0);
+}
+
+static struct iio_info iio_info = {
+ .read_raw = aw96103_read_raw,
+ .read_event_value = aw96103_read_event_val,
+ .write_event_value = aw96103_write_event_val,
+ .read_event_config = aw96103_read_event_config,
+ .write_event_config = aw96103_write_event_config,
+};
+
+static int aw96103_channel_scan_start(struct aw96103 *aw96103)
+{
+ int ret;
+
+ ret = regmap_write(aw96103->regmap, AW96103_REG_CMD,
+ AW96103_ACTIVE_MODE);
+ if (ret)
+ return ret;
+
+ return regmap_write(aw96103->regmap, AW96103_REG_IRQEN,
+ aw96103->hostirqen);
+}
+
+static int aw96103_reg_version_comp(struct aw96103 *aw96103,
+ struct aw_bin *aw_bin)
+{
+ u32 blfilt1_data, fw_ver;
+ unsigned char i;
+ int ret;
+
+ ret = regmap_read(aw96103->regmap, AW96103_REG_FWVER2, &fw_ver);
+ if (ret)
+ return ret;
+ /*
+ * If the chip version is AW96103A and the loaded register
+ * configuration file is for AW96103, special handling of the
+ * AW96103_REG_BLRSTRNG_CH0 register is required.
+ */
+ if ((fw_ver != AW96103A) || (aw_bin->chip_type[7] != '\0'))
+ return 0;
+
+ for (i = 0; i < aw96103->max_channels; i++) {
+ ret = regmap_read(aw96103->regmap,
+ AW96103_REG_BLFILT_CH0 + (AW96103_BLFILT_CH_STEP * i),
+ &blfilt1_data);
+ if (ret)
+ return ret;
+ if (FIELD_GET(AW96103_BLERRTRIG_MASK, blfilt1_data) != 1)
+ return 0;
+
+ ret = regmap_update_bits(aw96103->regmap,
+ AW96103_REG_BLRSTRNG_CH0 + (AW96103_BLFILT_CH_STEP * i),
+ AW96103_BLRSTRNG_MASK, 1 << i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aw96103_bin_valid_loaded(struct aw96103 *aw96103,
+ struct aw_bin *aw_bin_data_s)
+{
+ unsigned int start_addr = aw_bin_data_s->valid_data_addr;
+ u32 i, reg_data;
+ u16 reg_addr;
+ int ret;
+
+ for (i = 0; i < aw_bin_data_s->valid_data_len;
+ i += 6, start_addr += 6) {
+ reg_addr = get_unaligned_le16(aw_bin_data_s->data + start_addr);
+ reg_data = get_unaligned_le32(aw_bin_data_s->data +
+ start_addr + 2);
+ if ((reg_addr == AW96103_REG_EEDA0) ||
+ (reg_addr == AW96103_REG_EEDA1))
+ continue;
+ if (reg_addr == AW96103_REG_IRQEN) {
+ aw96103->hostirqen = reg_data;
+ continue;
+ }
+ if (reg_addr == AW96103_REG_SCANCTRL0)
+ aw96103->chan_en = FIELD_GET(AW96103_CHAN_EN_MASK,
+ reg_data);
+
+ ret = regmap_write(aw96103->regmap, reg_addr, reg_data);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = aw96103_reg_version_comp(aw96103, aw_bin_data_s);
+ if (ret)
+ return ret;
+
+ return aw96103_channel_scan_start(aw96103);
+}
+
+static int aw96103_para_loaded(struct aw96103 *aw96103)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(aw96103_reg_default); i += 2) {
+ ret = regmap_write(aw96103->regmap,
+ (u16)aw96103_reg_default[i],
+ (u32)aw96103_reg_default[i + 1]);
+ if (ret)
+ return ret;
+ if (aw96103_reg_default[i] == AW96103_REG_IRQEN)
+ aw96103->hostirqen = aw96103_reg_default[i + 1];
+ else if (aw96103_reg_default[i] == AW96103_REG_SCANCTRL0)
+ aw96103->chan_en = FIELD_GET(AW96103_CHAN_EN_MASK,
+ aw96103_reg_default[i + 1]);
+ }
+
+ return aw96103_channel_scan_start(aw96103);
+}
+
+static int aw96103_cfg_all_loaded(const struct firmware *cont,
+ struct aw96103 *aw96103)
+{
+ if (!cont)
+ return -EINVAL;
+
+ struct aw_bin *aw_bin __free(kfree) =
+ kzalloc(cont->size + sizeof(*aw_bin), GFP_KERNEL);
+ if (!aw_bin)
+ return -ENOMEM;
+
+ aw_bin->len = cont->size;
+ memcpy(aw_bin->data, cont->data, cont->size);
+ release_firmware(cont);
+ aw96103_parsing_bin_file(aw_bin);
+
+ return aw96103_bin_valid_loaded(aw96103, aw_bin);
+}
+
+static void aw96103_cfg_update(const struct firmware *fw, void *data)
+{
+ struct aw96103 *aw96103 = data;
+ int ret, i;
+
+ if (!fw || !fw->data) {
+ dev_err(aw96103->dev, "No firmware.\n");
+ return;
+ }
+
+ ret = aw96103_cfg_all_loaded(fw, aw96103);
+ /*
+ * If loading the register configuration file fails,
+ * load the default register configuration in the driver to
+ * ensure the basic functionality of the device.
+ */
+ if (ret) {
+ ret = aw96103_para_loaded(aw96103);
+ if (ret) {
+ dev_err(aw96103->dev, "load param error.\n");
+ return;
+ }
+ }
+
+ for (i = 0; i < aw96103->max_channels; i++) {
+ if ((aw96103->chan_en >> i) & 0x01)
+ aw96103->channels_arr[i].used = true;
+ else
+ aw96103->channels_arr[i].used = false;
+ }
+}
+
+static int aw96103_sw_reset(struct aw96103 *aw96103)
+{
+ int ret;
+
+ ret = regmap_write(aw96103->regmap, AW96103_REG_RESET, 0);
+ /*
+ * After reset, the initialization process starts to perform and
+ * it will last for a bout 20ms.
+ */
+ msleep(20);
+
+ return ret;
+}
+
+enum aw96103_irq_trigger_position {
+ FAR = 0,
+ TRIGGER_TH0 = 0x01,
+ TRIGGER_TH1 = 0x03,
+ TRIGGER_TH2 = 0x07,
+ TRIGGER_TH3 = 0x0f,
+};
+
+static irqreturn_t aw96103_irq(int irq, void *data)
+{
+ unsigned int irq_status, curr_status_val, curr_status;
+ struct iio_dev *indio_dev = data;
+ struct aw96103 *aw96103 = iio_priv(indio_dev);
+ int ret, i;
+
+ ret = regmap_read(aw96103->regmap, AW96103_REG_IRQSRC, &irq_status);
+ if (ret)
+ return IRQ_HANDLED;
+
+ ret = regmap_read(aw96103->regmap, AW96103_REG_STAT0, &curr_status_val);
+ if (ret)
+ return IRQ_HANDLED;
+
+ /*
+ * Iteratively analyze the interrupt status of different channels,
+ * with each channel having 4 interrupt states.
+ */
+ for (i = 0; i < aw96103->max_channels; i++) {
+ if (!aw96103->channels_arr[i].used)
+ continue;
+
+ curr_status = (((curr_status_val >> (24 + i)) & 0x1)) |
+ (((curr_status_val >> (16 + i)) & 0x1) << 1) |
+ (((curr_status_val >> (8 + i)) & 0x1) << 2) |
+ (((curr_status_val >> i) & 0x1) << 3);
+ if (aw96103->channels_arr[i].old_irq_status == curr_status)
+ continue;
+
+ switch (curr_status) {
+ case FAR:
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, i,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+ break;
+ case TRIGGER_TH0:
+ case TRIGGER_TH1:
+ case TRIGGER_TH2:
+ case TRIGGER_TH3:
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, i,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns(indio_dev));
+ break;
+ default:
+ return IRQ_HANDLED;
+ }
+ aw96103->channels_arr[i].old_irq_status = curr_status;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int aw96103_interrupt_init(struct iio_dev *indio_dev,
+ struct i2c_client *i2c)
+{
+ struct aw96103 *aw96103 = iio_priv(indio_dev);
+ unsigned int irq_status;
+ int ret;
+
+ ret = regmap_write(aw96103->regmap, AW96103_REG_IRQEN, 0);
+ if (ret)
+ return ret;
+ ret = regmap_read(aw96103->regmap, AW96103_REG_IRQSRC, &irq_status);
+ if (ret)
+ return ret;
+ ret = devm_request_threaded_irq(aw96103->dev, i2c->irq, NULL,
+ aw96103_irq, IRQF_ONESHOT,
+ "aw96103_irq", indio_dev);
+ if (ret)
+ return ret;
+
+ return regmap_write(aw96103->regmap, AW96103_REG_IRQEN,
+ aw96103->hostirqen);
+}
+
+static int aw96103_wait_chip_init(struct aw96103 *aw96103)
+{
+ unsigned int cnt = 20;
+ u32 reg_data;
+ int ret;
+
+ while (cnt--) {
+ /*
+ * The device should generate an initialization completion
+ * interrupt within 20ms.
+ */
+ ret = regmap_read(aw96103->regmap, AW96103_REG_IRQSRC,
+ &reg_data);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(AW96103_INITOVERIRQ_MASK, reg_data))
+ return 0;
+ fsleep(1000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int aw96103_read_chipid(struct aw96103 *aw96103)
+{
+ unsigned char cnt = 0;
+ u32 reg_val = 0;
+ int ret;
+
+ while (cnt < 3) {
+ /*
+ * This retry mechanism and the subsequent delay are just
+ * attempts to read the chip ID as much as possible,
+ * preventing occasional communication failures from causing
+ * the chip ID read to fail.
+ */
+ ret = regmap_read(aw96103->regmap, AW96103_REG_CHIPID,
+ &reg_val);
+ if (ret < 0) {
+ cnt++;
+ fsleep(2000);
+ continue;
+ }
+ break;
+ }
+ if (cnt == 3)
+ return -ETIMEDOUT;
+
+ if (FIELD_GET(AW96103_CHIPID_MASK, reg_val) != AW96103_CHIP_ID)
+ dev_info(aw96103->dev,
+ "unexpected chipid, id=0x%08X\n", reg_val);
+
+ return 0;
+}
+
+static int aw96103_i2c_probe(struct i2c_client *i2c)
+{
+ const struct aw_chip_info *chip_info;
+ struct iio_dev *indio_dev;
+ struct aw96103 *aw96103;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&i2c->dev, sizeof(*aw96103));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ aw96103 = iio_priv(indio_dev);
+ aw96103->dev = &i2c->dev;
+ chip_info = i2c_get_match_data(i2c);
+ aw96103->max_channels = chip_info->num_channels;
+
+ aw96103->regmap = devm_regmap_init_i2c(i2c, &aw96103_regmap_confg);
+ if (IS_ERR(aw96103->regmap))
+ return PTR_ERR(aw96103->regmap);
+
+ ret = devm_regulator_get_enable(aw96103->dev, "vcc");
+ if (ret < 0)
+ return ret;
+
+ ret = aw96103_read_chipid(aw96103);
+ if (ret)
+ return ret;
+
+ ret = aw96103_sw_reset(aw96103);
+ if (ret)
+ return ret;
+
+ ret = aw96103_wait_chip_init(aw96103);
+ if (ret)
+ return ret;
+
+ ret = request_firmware_nowait(THIS_MODULE, true, "aw96103_0.bin",
+ aw96103->dev, GFP_KERNEL, aw96103,
+ aw96103_cfg_update);
+ if (ret)
+ return ret;
+
+ ret = aw96103_interrupt_init(indio_dev, i2c);
+ if (ret)
+ return ret;
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = chip_info->num_channels;
+ indio_dev->channels = chip_info->channels;
+ indio_dev->info = &iio_info;
+ indio_dev->name = chip_info->name;
+
+ return devm_iio_device_register(aw96103->dev, indio_dev);
+}
+
+static const struct of_device_id aw96103_dt_match[] = {
+ {
+ .compatible = "awinic,aw96103",
+ .data = &aw_chip_info_tbl[AW96103_VAL]
+ },
+ {
+ .compatible = "awinic,aw96105",
+ .data = &aw_chip_info_tbl[AW96105_VAL]
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, aw96103_dt_match);
+
+static const struct i2c_device_id aw96103_i2c_id[] = {
+ { "aw96103", (kernel_ulong_t)&aw_chip_info_tbl[AW96103_VAL] },
+ { "aw96105", (kernel_ulong_t)&aw_chip_info_tbl[AW96105_VAL] },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, aw96103_i2c_id);
+
+static struct i2c_driver aw96103_i2c_driver = {
+ .driver = {
+ .name = "aw96103_sensor",
+ .of_match_table = aw96103_dt_match,
+ },
+ .probe = aw96103_i2c_probe,
+ .id_table = aw96103_i2c_id,
+};
+module_i2c_driver(aw96103_i2c_driver);
+
+MODULE_AUTHOR("Wang Shuaijie <wangshuaijie@awinic.com>");
+MODULE_DESCRIPTION("Driver for Awinic AW96103 proximity sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/proximity/cros_ec_mkbp_proximity.c b/drivers/iio/proximity/cros_ec_mkbp_proximity.c
index 4df506bb8b38..cff57d851762 100644
--- a/drivers/iio/proximity/cros_ec_mkbp_proximity.c
+++ b/drivers/iio/proximity/cros_ec_mkbp_proximity.c
@@ -6,10 +6,10 @@
*/
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/drivers/iio/proximity/hx9023s.c b/drivers/iio/proximity/hx9023s.c
new file mode 100644
index 000000000000..8b9f84400e00
--- /dev/null
+++ b/drivers/iio/proximity/hx9023s.c
@@ -0,0 +1,1144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 NanjingTianyihexin Electronics Ltd.
+ * http://www.tianyihexin.com
+ *
+ * Driver for NanjingTianyihexin HX9023S Cap Sensor.
+ * Datasheet available at:
+ * http://www.tianyihexin.com/ueditor/php/upload/file/20240614/1718336303992081.pdf
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/types.h>
+
+#define HX9023S_CHIP_ID 0x1D
+#define HX9023S_CH_NUM 5
+#define HX9023S_POS 0x03
+#define HX9023S_NEG 0x02
+#define HX9023S_NOT_CONNECTED 16
+
+#define HX9023S_GLOBAL_CTRL0 0x00
+#define HX9023S_PRF_CFG 0x02
+#define HX9023S_CH0_CFG_7_0 0x03
+#define HX9023S_CH4_CFG_9_8 0x0C
+#define HX9023S_RANGE_7_0 0x0D
+#define HX9023S_RANGE_9_8 0x0E
+#define HX9023S_RANGE_18_16 0x0F
+#define HX9023S_AVG0_NOSR0_CFG 0x10
+#define HX9023S_NOSR12_CFG 0x11
+#define HX9023S_NOSR34_CFG 0x12
+#define HX9023S_AVG12_CFG 0x13
+#define HX9023S_AVG34_CFG 0x14
+#define HX9023S_OFFSET_DAC0_7_0 0x15
+#define HX9023S_OFFSET_DAC4_9_8 0x1E
+#define HX9023S_SAMPLE_NUM_7_0 0x1F
+#define HX9023S_INTEGRATION_NUM_7_0 0x21
+#define HX9023S_CH_NUM_CFG 0x24
+#define HX9023S_LP_ALP_4_CFG 0x29
+#define HX9023S_LP_ALP_1_0_CFG 0x2A
+#define HX9023S_LP_ALP_3_2_CFG 0x2B
+#define HX9023S_UP_ALP_1_0_CFG 0x2C
+#define HX9023S_UP_ALP_3_2_CFG 0x2D
+#define HX9023S_DN_UP_ALP_0_4_CFG 0x2E
+#define HX9023S_DN_ALP_2_1_CFG 0x2F
+#define HX9023S_DN_ALP_4_3_CFG 0x30
+#define HX9023S_RAW_BL_RD_CFG 0x38
+#define HX9023S_INTERRUPT_CFG 0x39
+#define HX9023S_INTERRUPT_CFG1 0x3A
+#define HX9023S_CALI_DIFF_CFG 0x3B
+#define HX9023S_DITHER_CFG 0x3C
+#define HX9023S_DEVICE_ID 0x60
+#define HX9023S_PROX_STATUS 0x6B
+#define HX9023S_PROX_INT_HIGH_CFG 0x6C
+#define HX9023S_PROX_INT_LOW_CFG 0x6D
+#define HX9023S_PROX_HIGH_DIFF_CFG_CH0_0 0x80
+#define HX9023S_PROX_LOW_DIFF_CFG_CH0_0 0x88
+#define HX9023S_PROX_LOW_DIFF_CFG_CH3_1 0x8F
+#define HX9023S_PROX_HIGH_DIFF_CFG_CH4_0 0x9E
+#define HX9023S_PROX_HIGH_DIFF_CFG_CH4_1 0x9F
+#define HX9023S_PROX_LOW_DIFF_CFG_CH4_0 0xA2
+#define HX9023S_PROX_LOW_DIFF_CFG_CH4_1 0xA3
+#define HX9023S_CAP_INI_CH4_0 0xB3
+#define HX9023S_LP_DIFF_CH4_2 0xBA
+#define HX9023S_RAW_BL_CH4_0 0xB5
+#define HX9023S_LP_DIFF_CH4_0 0xB8
+#define HX9023S_DSP_CONFIG_CTRL1 0xC8
+#define HX9023S_CAP_INI_CH0_0 0xE0
+#define HX9023S_RAW_BL_CH0_0 0xE8
+#define HX9023S_LP_DIFF_CH0_0 0xF4
+#define HX9023S_LP_DIFF_CH3_2 0xFF
+
+#define HX9023S_DATA_LOCK_MASK BIT(4)
+#define HX9023S_INTERRUPT_MASK GENMASK(9, 0)
+#define HX9023S_PROX_DEBOUNCE_MASK GENMASK(3, 0)
+
+struct hx9023s_ch_data {
+ s16 raw; /* Raw Data*/
+ s16 lp; /* Low Pass Filter Data*/
+ s16 bl; /* Base Line Data */
+ s16 diff; /* Difference of Low Pass Data and Base Line Data */
+
+ struct {
+ unsigned int near;
+ unsigned int far;
+ } thres;
+
+ u16 dac;
+ u8 channel_positive;
+ u8 channel_negative;
+ bool sel_bl;
+ bool sel_raw;
+ bool sel_diff;
+ bool sel_lp;
+ bool enable;
+};
+
+struct hx9023s_data {
+ struct iio_trigger *trig;
+ struct regmap *regmap;
+ unsigned long chan_prox_stat;
+ unsigned long chan_read;
+ unsigned long chan_event;
+ unsigned long ch_en_stat;
+ unsigned long chan_in_use;
+ unsigned int prox_state_reg;
+ bool trigger_enabled;
+
+ struct {
+ __le16 channels[HX9023S_CH_NUM];
+ s64 ts __aligned(8);
+ } buffer;
+
+ /*
+ * Serialize access to registers below:
+ * HX9023S_PROX_INT_LOW_CFG,
+ * HX9023S_PROX_INT_HIGH_CFG,
+ * HX9023S_INTERRUPT_CFG,
+ * HX9023S_CH_NUM_CFG
+ * Serialize access to channel configuration in
+ * hx9023s_push_events and hx9023s_trigger_handler.
+ */
+ struct mutex mutex;
+ struct hx9023s_ch_data ch_data[HX9023S_CH_NUM];
+};
+
+static const struct reg_sequence hx9023s_reg_init_list[] = {
+ /* scan period */
+ REG_SEQ0(HX9023S_PRF_CFG, 0x17),
+
+ /* full scale of conversion phase of each channel */
+ REG_SEQ0(HX9023S_RANGE_7_0, 0x11),
+ REG_SEQ0(HX9023S_RANGE_9_8, 0x02),
+ REG_SEQ0(HX9023S_RANGE_18_16, 0x00),
+
+ /* ADC average number and OSR number of each channel */
+ REG_SEQ0(HX9023S_AVG0_NOSR0_CFG, 0x71),
+ REG_SEQ0(HX9023S_NOSR12_CFG, 0x44),
+ REG_SEQ0(HX9023S_NOSR34_CFG, 0x00),
+ REG_SEQ0(HX9023S_AVG12_CFG, 0x33),
+ REG_SEQ0(HX9023S_AVG34_CFG, 0x00),
+
+ /* sample & integration frequency of the ADC */
+ REG_SEQ0(HX9023S_SAMPLE_NUM_7_0, 0x65),
+ REG_SEQ0(HX9023S_INTEGRATION_NUM_7_0, 0x65),
+
+ /* coefficient of the first order low pass filter during each channel */
+ REG_SEQ0(HX9023S_LP_ALP_1_0_CFG, 0x22),
+ REG_SEQ0(HX9023S_LP_ALP_3_2_CFG, 0x22),
+ REG_SEQ0(HX9023S_LP_ALP_4_CFG, 0x02),
+
+ /* up coefficient of the first order low pass filter during each channel */
+ REG_SEQ0(HX9023S_UP_ALP_1_0_CFG, 0x88),
+ REG_SEQ0(HX9023S_UP_ALP_3_2_CFG, 0x88),
+ REG_SEQ0(HX9023S_DN_UP_ALP_0_4_CFG, 0x18),
+
+ /* down coefficient of the first order low pass filter during each channel */
+ REG_SEQ0(HX9023S_DN_ALP_2_1_CFG, 0x11),
+ REG_SEQ0(HX9023S_DN_ALP_4_3_CFG, 0x11),
+
+ /* selection of data for the Data Mux Register to output data */
+ REG_SEQ0(HX9023S_RAW_BL_RD_CFG, 0xF0),
+
+ /* enable the interrupt function */
+ REG_SEQ0(HX9023S_INTERRUPT_CFG, 0xFF),
+ REG_SEQ0(HX9023S_INTERRUPT_CFG1, 0x3B),
+ REG_SEQ0(HX9023S_DITHER_CFG, 0x21),
+
+ /* threshold of the offset compensation */
+ REG_SEQ0(HX9023S_CALI_DIFF_CFG, 0x07),
+
+ /* proximity persistency number(near & far) */
+ REG_SEQ0(HX9023S_PROX_INT_HIGH_CFG, 0x01),
+ REG_SEQ0(HX9023S_PROX_INT_LOW_CFG, 0x01),
+
+ /* disable the data lock */
+ REG_SEQ0(HX9023S_DSP_CONFIG_CTRL1, 0x00),
+};
+
+static const struct iio_event_spec hx9023s_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_shared_by_all = BIT(IIO_EV_INFO_PERIOD),
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_shared_by_all = BIT(IIO_EV_INFO_PERIOD),
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define HX9023S_CHANNEL(idx) \
+{ \
+ .type = IIO_PROXIMITY, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .indexed = 1, \
+ .channel = idx, \
+ .address = 0, \
+ .event_spec = hx9023s_events, \
+ .num_event_specs = ARRAY_SIZE(hx9023s_events), \
+ .scan_index = idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+static const struct iio_chan_spec hx9023s_channels[] = {
+ HX9023S_CHANNEL(0),
+ HX9023S_CHANNEL(1),
+ HX9023S_CHANNEL(2),
+ HX9023S_CHANNEL(3),
+ HX9023S_CHANNEL(4),
+ IIO_CHAN_SOFT_TIMESTAMP(5),
+};
+
+static const unsigned int hx9023s_samp_freq_table[] = {
+ 2, 2, 4, 6, 8, 10, 14, 18, 22, 26,
+ 30, 34, 38, 42, 46, 50, 56, 62, 68, 74,
+ 80, 90, 100, 200, 300, 400, 600, 800, 1000, 2000,
+ 3000, 4000,
+};
+
+static const struct regmap_range hx9023s_rd_reg_ranges[] = {
+ regmap_reg_range(HX9023S_GLOBAL_CTRL0, HX9023S_LP_DIFF_CH3_2),
+};
+
+static const struct regmap_range hx9023s_wr_reg_ranges[] = {
+ regmap_reg_range(HX9023S_GLOBAL_CTRL0, HX9023S_LP_DIFF_CH3_2),
+};
+
+static const struct regmap_range hx9023s_volatile_reg_ranges[] = {
+ regmap_reg_range(HX9023S_CAP_INI_CH4_0, HX9023S_LP_DIFF_CH4_2),
+ regmap_reg_range(HX9023S_CAP_INI_CH0_0, HX9023S_LP_DIFF_CH3_2),
+ regmap_reg_range(HX9023S_PROX_STATUS, HX9023S_PROX_STATUS),
+};
+
+static const struct regmap_access_table hx9023s_rd_regs = {
+ .yes_ranges = hx9023s_rd_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(hx9023s_rd_reg_ranges),
+};
+
+static const struct regmap_access_table hx9023s_wr_regs = {
+ .yes_ranges = hx9023s_wr_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(hx9023s_wr_reg_ranges),
+};
+
+static const struct regmap_access_table hx9023s_volatile_regs = {
+ .yes_ranges = hx9023s_volatile_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(hx9023s_volatile_reg_ranges),
+};
+
+static const struct regmap_config hx9023s_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_MAPLE,
+ .rd_table = &hx9023s_rd_regs,
+ .wr_table = &hx9023s_wr_regs,
+ .volatile_table = &hx9023s_volatile_regs,
+};
+
+static int hx9023s_interrupt_enable(struct hx9023s_data *data)
+{
+ return regmap_update_bits(data->regmap, HX9023S_INTERRUPT_CFG,
+ HX9023S_INTERRUPT_MASK, HX9023S_INTERRUPT_MASK);
+}
+
+static int hx9023s_interrupt_disable(struct hx9023s_data *data)
+{
+ return regmap_update_bits(data->regmap, HX9023S_INTERRUPT_CFG,
+ HX9023S_INTERRUPT_MASK, 0x00);
+}
+
+static int hx9023s_data_lock(struct hx9023s_data *data, bool locked)
+{
+ if (locked)
+ return regmap_update_bits(data->regmap,
+ HX9023S_DSP_CONFIG_CTRL1,
+ HX9023S_DATA_LOCK_MASK,
+ HX9023S_DATA_LOCK_MASK);
+ else
+ return regmap_update_bits(data->regmap,
+ HX9023S_DSP_CONFIG_CTRL1,
+ HX9023S_DATA_LOCK_MASK, 0);
+}
+
+static int hx9023s_ch_cfg(struct hx9023s_data *data)
+{
+ __le16 reg_list[HX9023S_CH_NUM];
+ u8 ch_pos[HX9023S_CH_NUM];
+ u8 ch_neg[HX9023S_CH_NUM];
+ /* Bit positions corresponding to input pin connections */
+ u8 conn_cs[HX9023S_CH_NUM] = { 0, 2, 4, 6, 8 };
+ unsigned int i;
+ u16 reg;
+
+ for (i = 0; i < HX9023S_CH_NUM; i++) {
+ ch_pos[i] = data->ch_data[i].channel_positive == HX9023S_NOT_CONNECTED ?
+ HX9023S_NOT_CONNECTED : conn_cs[data->ch_data[i].channel_positive];
+ ch_neg[i] = data->ch_data[i].channel_negative == HX9023S_NOT_CONNECTED ?
+ HX9023S_NOT_CONNECTED : conn_cs[data->ch_data[i].channel_negative];
+
+ reg = (HX9023S_POS << ch_pos[i]) | (HX9023S_NEG << ch_neg[i]);
+ reg_list[i] = cpu_to_le16(reg);
+ }
+
+ return regmap_bulk_write(data->regmap, HX9023S_CH0_CFG_7_0, reg_list,
+ sizeof(reg_list));
+}
+
+static int hx9023s_write_far_debounce(struct hx9023s_data *data, int val)
+{
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, HX9023S_PROX_INT_LOW_CFG,
+ HX9023S_PROX_DEBOUNCE_MASK,
+ FIELD_GET(HX9023S_PROX_DEBOUNCE_MASK, val));
+}
+
+static int hx9023s_write_near_debounce(struct hx9023s_data *data, int val)
+{
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, HX9023S_PROX_INT_HIGH_CFG,
+ HX9023S_PROX_DEBOUNCE_MASK,
+ FIELD_GET(HX9023S_PROX_DEBOUNCE_MASK, val));
+}
+
+static int hx9023s_read_far_debounce(struct hx9023s_data *data, int *val)
+{
+ int ret;
+
+ ret = regmap_read(data->regmap, HX9023S_PROX_INT_LOW_CFG, val);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(HX9023S_PROX_DEBOUNCE_MASK, *val);
+
+ return IIO_VAL_INT;
+}
+
+static int hx9023s_read_near_debounce(struct hx9023s_data *data, int *val)
+{
+ int ret;
+
+ ret = regmap_read(data->regmap, HX9023S_PROX_INT_HIGH_CFG, val);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(HX9023S_PROX_DEBOUNCE_MASK, *val);
+
+ return IIO_VAL_INT;
+}
+
+static int hx9023s_get_thres_near(struct hx9023s_data *data, u8 ch, int *val)
+{
+ int ret;
+ __le16 buf;
+ unsigned int reg, tmp;
+
+ reg = (ch == 4) ? HX9023S_PROX_HIGH_DIFF_CFG_CH4_0 :
+ HX9023S_PROX_HIGH_DIFF_CFG_CH0_0 + (ch * 2);
+
+ ret = regmap_bulk_read(data->regmap, reg, &buf, sizeof(buf));
+ if (ret)
+ return ret;
+
+ tmp = (le16_to_cpu(buf) & GENMASK(9, 0)) * 32;
+ data->ch_data[ch].thres.near = tmp;
+ *val = tmp;
+
+ return IIO_VAL_INT;
+}
+
+static int hx9023s_get_thres_far(struct hx9023s_data *data, u8 ch, int *val)
+{
+ int ret;
+ __le16 buf;
+ unsigned int reg, tmp;
+
+ reg = (ch == 4) ? HX9023S_PROX_LOW_DIFF_CFG_CH4_0 :
+ HX9023S_PROX_LOW_DIFF_CFG_CH0_0 + (ch * 2);
+
+ ret = regmap_bulk_read(data->regmap, reg, &buf, sizeof(buf));
+ if (ret)
+ return ret;
+
+ tmp = (le16_to_cpu(buf) & GENMASK(9, 0)) * 32;
+ data->ch_data[ch].thres.far = tmp;
+ *val = tmp;
+
+ return IIO_VAL_INT;
+}
+
+static int hx9023s_set_thres_near(struct hx9023s_data *data, u8 ch, int val)
+{
+ __le16 val_le16 = cpu_to_le16((val / 32) & GENMASK(9, 0));
+ unsigned int reg;
+
+ data->ch_data[ch].thres.near = ((val / 32) & GENMASK(9, 0)) * 32;
+ reg = (ch == 4) ? HX9023S_PROX_HIGH_DIFF_CFG_CH4_0 :
+ HX9023S_PROX_HIGH_DIFF_CFG_CH0_0 + (ch * 2);
+
+ return regmap_bulk_write(data->regmap, reg, &val_le16, sizeof(val_le16));
+}
+
+static int hx9023s_set_thres_far(struct hx9023s_data *data, u8 ch, int val)
+{
+ __le16 val_le16 = cpu_to_le16((val / 32) & GENMASK(9, 0));
+ unsigned int reg;
+
+ data->ch_data[ch].thres.far = ((val / 32) & GENMASK(9, 0)) * 32;
+ reg = (ch == 4) ? HX9023S_PROX_LOW_DIFF_CFG_CH4_0 :
+ HX9023S_PROX_LOW_DIFF_CFG_CH0_0 + (ch * 2);
+
+ return regmap_bulk_write(data->regmap, reg, &val_le16, sizeof(val_le16));
+}
+
+static int hx9023s_get_prox_state(struct hx9023s_data *data)
+{
+ return regmap_read(data->regmap, HX9023S_PROX_STATUS, &data->prox_state_reg);
+}
+
+static int hx9023s_data_select(struct hx9023s_data *data)
+{
+ int ret;
+ unsigned int i, buf;
+ unsigned long tmp;
+
+ ret = regmap_read(data->regmap, HX9023S_RAW_BL_RD_CFG, &buf);
+ if (ret)
+ return ret;
+
+ tmp = buf;
+ for (i = 0; i < 4; i++) {
+ data->ch_data[i].sel_diff = test_bit(i, &tmp);
+ data->ch_data[i].sel_lp = !data->ch_data[i].sel_diff;
+ data->ch_data[i].sel_bl = test_bit(i + 4, &tmp);
+ data->ch_data[i].sel_raw = !data->ch_data[i].sel_bl;
+ }
+
+ ret = regmap_read(data->regmap, HX9023S_INTERRUPT_CFG1, &buf);
+ if (ret)
+ return ret;
+
+ tmp = buf;
+ data->ch_data[4].sel_diff = test_bit(2, &tmp);
+ data->ch_data[4].sel_lp = !data->ch_data[4].sel_diff;
+ data->ch_data[4].sel_bl = test_bit(3, &tmp);
+ data->ch_data[4].sel_raw = !data->ch_data[4].sel_bl;
+
+ return 0;
+}
+
+static int hx9023s_sample(struct hx9023s_data *data)
+{
+ int ret;
+ unsigned int i;
+ u8 buf[HX9023S_CH_NUM * 3];
+ u16 value;
+
+ ret = hx9023s_data_lock(data, true);
+ if (ret)
+ return ret;
+
+ ret = hx9023s_data_select(data);
+ if (ret)
+ goto err;
+
+ /* 3 bytes for each of channels 0 to 3 which have contiguous registers */
+ ret = regmap_bulk_read(data->regmap, HX9023S_RAW_BL_CH0_0, buf, 12);
+ if (ret)
+ goto err;
+
+ /* 3 bytes for channel 4 */
+ ret = regmap_bulk_read(data->regmap, HX9023S_RAW_BL_CH4_0, buf + 12, 3);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < HX9023S_CH_NUM; i++) {
+ value = get_unaligned_le16(&buf[i * 3 + 1]);
+ data->ch_data[i].raw = 0;
+ data->ch_data[i].bl = 0;
+ if (data->ch_data[i].sel_raw)
+ data->ch_data[i].raw = value;
+ if (data->ch_data[i].sel_bl)
+ data->ch_data[i].bl = value;
+ }
+
+ /* 3 bytes for each of channels 0 to 3 which have contiguous registers */
+ ret = regmap_bulk_read(data->regmap, HX9023S_LP_DIFF_CH0_0, buf, 12);
+ if (ret)
+ goto err;
+
+ /* 3 bytes for channel 4 */
+ ret = regmap_bulk_read(data->regmap, HX9023S_LP_DIFF_CH4_0, buf + 12, 3);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < HX9023S_CH_NUM; i++) {
+ value = get_unaligned_le16(&buf[i * 3 + 1]);
+ data->ch_data[i].lp = 0;
+ data->ch_data[i].diff = 0;
+ if (data->ch_data[i].sel_lp)
+ data->ch_data[i].lp = value;
+ if (data->ch_data[i].sel_diff)
+ data->ch_data[i].diff = value;
+ }
+
+ for (i = 0; i < HX9023S_CH_NUM; i++) {
+ if (data->ch_data[i].sel_lp && data->ch_data[i].sel_bl)
+ data->ch_data[i].diff = data->ch_data[i].lp - data->ch_data[i].bl;
+ }
+
+ /* 2 bytes for each of channels 0 to 4 which have contiguous registers */
+ ret = regmap_bulk_read(data->regmap, HX9023S_OFFSET_DAC0_7_0, buf, 10);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < HX9023S_CH_NUM; i++) {
+ value = get_unaligned_le16(&buf[i * 2]);
+ value = FIELD_GET(GENMASK(11, 0), value);
+ data->ch_data[i].dac = value;
+ }
+
+err:
+ return hx9023s_data_lock(data, false);
+}
+
+static int hx9023s_ch_en(struct hx9023s_data *data, u8 ch_id, bool en)
+{
+ int ret;
+ unsigned int buf;
+
+ ret = regmap_read(data->regmap, HX9023S_CH_NUM_CFG, &buf);
+ if (ret)
+ return ret;
+
+ data->ch_en_stat = buf;
+ if (en && data->ch_en_stat == 0)
+ data->prox_state_reg = 0;
+
+ data->ch_data[ch_id].enable = en;
+ __assign_bit(ch_id, &data->ch_en_stat, en);
+
+ return regmap_write(data->regmap, HX9023S_CH_NUM_CFG, data->ch_en_stat);
+}
+
+static int hx9023s_property_get(struct hx9023s_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ u32 array[2];
+ u32 i, reg, temp;
+ int ret;
+
+ data->chan_in_use = 0;
+ for (i = 0; i < HX9023S_CH_NUM; i++) {
+ data->ch_data[i].channel_positive = HX9023S_NOT_CONNECTED;
+ data->ch_data[i].channel_negative = HX9023S_NOT_CONNECTED;
+ }
+
+ device_for_each_child_node_scoped(dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret || reg >= HX9023S_CH_NUM)
+ return dev_err_probe(dev, ret < 0 ? ret : -EINVAL,
+ "Failed to read reg\n");
+ __set_bit(reg, &data->chan_in_use);
+
+ ret = fwnode_property_read_u32(child, "single-channel", &temp);
+ if (ret == 0) {
+ data->ch_data[reg].channel_positive = temp;
+ data->ch_data[reg].channel_negative = HX9023S_NOT_CONNECTED;
+ } else {
+ ret = fwnode_property_read_u32_array(child, "diff-channels",
+ array, ARRAY_SIZE(array));
+ if (ret == 0) {
+ data->ch_data[reg].channel_positive = array[0];
+ data->ch_data[reg].channel_negative = array[1];
+ } else {
+ return dev_err_probe(dev, ret,
+ "Property read failed: %d\n",
+ reg);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int hx9023s_update_chan_en(struct hx9023s_data *data,
+ unsigned long chan_read,
+ unsigned long chan_event)
+{
+ unsigned int i;
+ unsigned long channels = chan_read | chan_event;
+
+ if ((data->chan_read | data->chan_event) != channels) {
+ for_each_set_bit(i, &channels, HX9023S_CH_NUM)
+ hx9023s_ch_en(data, i, test_bit(i, &data->chan_in_use));
+ for_each_clear_bit(i, &channels, HX9023S_CH_NUM)
+ hx9023s_ch_en(data, i, false);
+ }
+
+ data->chan_read = chan_read;
+ data->chan_event = chan_event;
+
+ return 0;
+}
+
+static int hx9023s_get_proximity(struct hx9023s_data *data,
+ const struct iio_chan_spec *chan,
+ int *val)
+{
+ int ret;
+
+ ret = hx9023s_sample(data);
+ if (ret)
+ return ret;
+
+ ret = hx9023s_get_prox_state(data);
+ if (ret)
+ return ret;
+
+ *val = data->ch_data[chan->channel].diff;
+ return IIO_VAL_INT;
+}
+
+static int hx9023s_get_samp_freq(struct hx9023s_data *data, int *val, int *val2)
+{
+ int ret;
+ unsigned int odr, index;
+
+ ret = regmap_read(data->regmap, HX9023S_PRF_CFG, &index);
+ if (ret)
+ return ret;
+
+ odr = hx9023s_samp_freq_table[index];
+ *val = KILO / odr;
+ *val2 = div_u64((KILO % odr) * MICRO, odr);
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int hx9023s_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = hx9023s_get_proximity(data, chan, val);
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return hx9023s_get_samp_freq(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hx9023s_set_samp_freq(struct hx9023s_data *data, int val, int val2)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int i, period_ms;
+
+ period_ms = div_u64(NANO, (val * MEGA + val2));
+
+ for (i = 0; i < ARRAY_SIZE(hx9023s_samp_freq_table); i++) {
+ if (period_ms == hx9023s_samp_freq_table[i])
+ break;
+ }
+ if (i == ARRAY_SIZE(hx9023s_samp_freq_table)) {
+ dev_err(dev, "Period:%dms NOT found!\n", period_ms);
+ return -EINVAL;
+ }
+
+ return regmap_write(data->regmap, HX9023S_PRF_CFG, i);
+}
+
+static int hx9023s_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long mask)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ return hx9023s_set_samp_freq(data, val, val2);
+}
+
+static irqreturn_t hx9023s_irq_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ if (data->trigger_enabled)
+ iio_trigger_poll(data->trig);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static void hx9023s_push_events(struct iio_dev *indio_dev)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+ s64 timestamp = iio_get_time_ns(indio_dev);
+ unsigned long prox_changed;
+ unsigned int chan;
+ int ret;
+
+ ret = hx9023s_sample(data);
+ if (ret)
+ return;
+
+ ret = hx9023s_get_prox_state(data);
+ if (ret)
+ return;
+
+ prox_changed = (data->chan_prox_stat ^ data->prox_state_reg) & data->chan_event;
+ for_each_set_bit(chan, &prox_changed, HX9023S_CH_NUM) {
+ unsigned int dir;
+
+ dir = (data->prox_state_reg & BIT(chan)) ?
+ IIO_EV_DIR_FALLING : IIO_EV_DIR_RISING;
+
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, chan,
+ IIO_EV_TYPE_THRESH, dir),
+ timestamp);
+ }
+ data->chan_prox_stat = data->prox_state_reg;
+}
+
+static irqreturn_t hx9023s_irq_thread_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ guard(mutex)(&data->mutex);
+ hx9023s_push_events(indio_dev);
+
+ return IRQ_HANDLED;
+}
+
+static int hx9023s_read_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return hx9023s_get_thres_far(data, chan->channel, val);
+ case IIO_EV_DIR_FALLING:
+ return hx9023s_get_thres_near(data, chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_PERIOD:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return hx9023s_read_far_debounce(data, val);
+ case IIO_EV_DIR_FALLING:
+ return hx9023s_read_near_debounce(data, val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hx9023s_write_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return hx9023s_set_thres_far(data, chan->channel, val);
+ case IIO_EV_DIR_FALLING:
+ return hx9023s_set_thres_near(data, chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_PERIOD:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return hx9023s_write_far_debounce(data, val);
+ case IIO_EV_DIR_FALLING:
+ return hx9023s_write_near_debounce(data, val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hx9023s_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ return test_bit(chan->channel, &data->chan_event);
+}
+
+static int hx9023s_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ if (test_bit(chan->channel, &data->chan_in_use)) {
+ hx9023s_ch_en(data, chan->channel, !!state);
+ __assign_bit(chan->channel, &data->chan_event,
+ data->ch_data[chan->channel].enable);
+ }
+
+ return 0;
+}
+
+static const struct iio_info hx9023s_info = {
+ .read_raw = hx9023s_read_raw,
+ .write_raw = hx9023s_write_raw,
+ .read_event_value = hx9023s_read_event_val,
+ .write_event_value = hx9023s_write_event_val,
+ .read_event_config = hx9023s_read_event_config,
+ .write_event_config = hx9023s_write_event_config,
+};
+
+static int hx9023s_set_trigger_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ guard(mutex)(&data->mutex);
+ if (state)
+ hx9023s_interrupt_enable(data);
+ else if (!data->chan_read)
+ hx9023s_interrupt_disable(data);
+ data->trigger_enabled = state;
+
+ return 0;
+}
+
+static const struct iio_trigger_ops hx9023s_trigger_ops = {
+ .set_trigger_state = hx9023s_set_trigger_state,
+};
+
+static irqreturn_t hx9023s_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct hx9023s_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int bit, index, i = 0;
+ int ret;
+
+ guard(mutex)(&data->mutex);
+ ret = hx9023s_sample(data);
+ if (ret) {
+ dev_warn(dev, "sampling failed\n");
+ goto out;
+ }
+
+ ret = hx9023s_get_prox_state(data);
+ if (ret) {
+ dev_warn(dev, "get prox failed\n");
+ goto out;
+ }
+
+ iio_for_each_active_channel(indio_dev, bit) {
+ index = indio_dev->channels[bit].channel;
+ data->buffer.channels[i++] = cpu_to_le16(data->ch_data[index].diff);
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
+ pf->timestamp);
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int hx9023s_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+ unsigned long channels = 0;
+ unsigned int bit;
+
+ guard(mutex)(&data->mutex);
+ iio_for_each_active_channel(indio_dev, bit)
+ __set_bit(indio_dev->channels[bit].channel, &channels);
+
+ hx9023s_update_chan_en(data, channels, data->chan_event);
+
+ return 0;
+}
+
+static int hx9023s_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ guard(mutex)(&data->mutex);
+ hx9023s_update_chan_en(data, 0, data->chan_event);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops hx9023s_buffer_setup_ops = {
+ .preenable = hx9023s_buffer_preenable,
+ .postdisable = hx9023s_buffer_postdisable,
+};
+
+static int hx9023s_id_check(struct iio_dev *indio_dev)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int id;
+ int ret;
+
+ ret = regmap_read(data->regmap, HX9023S_DEVICE_ID, &id);
+ if (ret)
+ return ret;
+
+ if (id != HX9023S_CHIP_ID)
+ dev_warn(dev, "Unexpected chip ID, assuming compatible\n");
+
+ return 0;
+}
+
+static int hx9023s_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct iio_dev *indio_dev;
+ struct hx9023s_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ mutex_init(&data->mutex);
+
+ data->regmap = devm_regmap_init_i2c(client, &hx9023s_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev, PTR_ERR(data->regmap),
+ "regmap init failed\n");
+
+ ret = hx9023s_property_get(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "dts phase failed\n");
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "regulator get failed\n");
+
+ ret = hx9023s_id_check(indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "id check failed\n");
+
+ indio_dev->name = "hx9023s";
+ indio_dev->channels = hx9023s_channels;
+ indio_dev->num_channels = ARRAY_SIZE(hx9023s_channels);
+ indio_dev->info = &hx9023s_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ i2c_set_clientdata(client, indio_dev);
+
+ ret = regmap_multi_reg_write(data->regmap, hx9023s_reg_init_list,
+ ARRAY_SIZE(hx9023s_reg_init_list));
+ if (ret)
+ return dev_err_probe(dev, ret, "device init failed\n");
+
+ ret = hx9023s_ch_cfg(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "channel config failed\n");
+
+ ret = regcache_sync(data->regmap);
+ if (ret)
+ return dev_err_probe(dev, ret, "regcache sync failed\n");
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(dev, client->irq,
+ hx9023s_irq_handler,
+ hx9023s_irq_thread_handler,
+ IRQF_ONESHOT,
+ "hx9023s_event", indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "irq request failed\n");
+
+ data->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
+ indio_dev->name,
+ iio_device_id(indio_dev));
+ if (!data->trig)
+ return dev_err_probe(dev, -ENOMEM,
+ "iio trigger alloc failed\n");
+
+ data->trig->ops = &hx9023s_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, indio_dev);
+
+ ret = devm_iio_trigger_register(dev, data->trig);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "iio trigger register failed\n");
+ }
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ hx9023s_trigger_handler,
+ &hx9023s_buffer_setup_ops);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "iio triggered buffer setup failed\n");
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static int hx9023s_suspend(struct device *dev)
+{
+ struct hx9023s_data *data = iio_priv(dev_get_drvdata(dev));
+
+ guard(mutex)(&data->mutex);
+ hx9023s_interrupt_disable(data);
+
+ return 0;
+}
+
+static int hx9023s_resume(struct device *dev)
+{
+ struct hx9023s_data *data = iio_priv(dev_get_drvdata(dev));
+
+ guard(mutex)(&data->mutex);
+ if (data->trigger_enabled)
+ hx9023s_interrupt_enable(data);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(hx9023s_pm_ops, hx9023s_suspend,
+ hx9023s_resume);
+
+static const struct of_device_id hx9023s_of_match[] = {
+ { .compatible = "tyhx,hx9023s" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, hx9023s_of_match);
+
+static const struct i2c_device_id hx9023s_id[] = {
+ { "hx9023s" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, hx9023s_id);
+
+static struct i2c_driver hx9023s_driver = {
+ .driver = {
+ .name = "hx9023s",
+ .of_match_table = hx9023s_of_match,
+ .pm = &hx9023s_pm_ops,
+
+ /*
+ * The I2C operations in hx9023s_reg_init() and hx9023s_ch_cfg()
+ * are time-consuming. Prefer async so we don't delay boot
+ * if we're builtin to the kernel.
+ */
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = hx9023s_probe,
+ .id_table = hx9023s_id,
+};
+module_i2c_driver(hx9023s_driver);
+
+MODULE_AUTHOR("Yasin Lee <yasin.lee.x@gmail.com>");
+MODULE_DESCRIPTION("Driver for TYHX HX9023S SAR sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 92630812ece2..3f4eace05cfc 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -654,8 +654,7 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = sx9500_read_prox_data(data, &indio_dev->channels[bit],
&val);
if (ret < 0)
diff --git a/drivers/iio/proximity/sx_common.c b/drivers/iio/proximity/sx_common.c
index a95e9814aaf2..71aa6dced7d3 100644
--- a/drivers/iio/proximity/sx_common.c
+++ b/drivers/iio/proximity/sx_common.c
@@ -369,8 +369,7 @@ static irqreturn_t sx_common_trigger_handler(int irq, void *private)
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = data->chip_info->ops.read_prox_data(data,
&indio_dev->channels[bit],
&val);
@@ -398,8 +397,7 @@ static int sx_common_buffer_preenable(struct iio_dev *indio_dev)
int bit, ret;
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength)
+ iio_for_each_active_channel(indio_dev, bit)
__set_bit(indio_dev->channels[bit].channel, &channels);
ret = sx_common_update_chan_en(data, channels, data->chan_event);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index dc57d07a1f45..5dbb248e9625 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1817,7 +1817,6 @@ static const struct file_operations ucma_fops = {
.release = ucma_close,
.write = ucma_write,
.poll = ucma_poll,
- .llseek = no_llseek,
};
static struct miscdevice ucma_misc = {
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index f760dfffa188..fd67fc9fe85a 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1082,7 +1082,6 @@ static const struct file_operations umad_fops = {
#endif
.open = ib_umad_open,
.release = ib_umad_close,
- .llseek = no_llseek,
};
static int ib_umad_sm_open(struct inode *inode, struct file *filp)
@@ -1150,7 +1149,6 @@ static const struct file_operations umad_sm_fops = {
.owner = THIS_MODULE,
.open = ib_umad_sm_open,
.release = ib_umad_sm_close,
- .llseek = no_llseek,
};
static struct ib_umad_port *get_port(struct ib_device *ibdev,
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index bc099287de9a..94454186ed81 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -353,7 +353,6 @@ const struct file_operations uverbs_event_fops = {
.poll = ib_uverbs_comp_event_poll,
.release = uverbs_uobject_fd_release,
.fasync = ib_uverbs_comp_event_fasync,
- .llseek = no_llseek,
};
const struct file_operations uverbs_async_event_fops = {
@@ -362,7 +361,6 @@ const struct file_operations uverbs_async_event_fops = {
.poll = ib_uverbs_async_event_poll,
.release = uverbs_async_event_release,
.fasync = ib_uverbs_async_event_fasync,
- .llseek = no_llseek,
};
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
@@ -991,7 +989,6 @@ static const struct file_operations uverbs_fops = {
.write = ib_uverbs_write,
.open = ib_uverbs_open,
.release = ib_uverbs_close,
- .llseek = no_llseek,
.unlocked_ioctl = ib_uverbs_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
@@ -1002,7 +999,6 @@ static const struct file_operations uverbs_mmap_fops = {
.mmap = ib_uverbs_mmap,
.open = ib_uverbs_open,
.release = ib_uverbs_close,
- .llseek = no_llseek,
.unlocked_ioctl = ib_uverbs_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index 35d2382ee618..ec9ee59fcf0c 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -203,7 +203,6 @@ static const struct file_operations __fault_opcodes_fops = {
.open = fault_opcodes_open,
.read = fault_opcodes_read,
.write = fault_opcodes_write,
- .llseek = no_llseek
};
void hfi1_fault_exit_debugfs(struct hfi1_ibdev *ibd)
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 253fea374a72..69999d8d24f3 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2673,7 +2673,6 @@ static const struct file_operations devx_async_cmd_event_fops = {
.read = devx_async_cmd_event_read,
.poll = devx_async_cmd_event_poll,
.release = uverbs_uobject_fd_release,
- .llseek = no_llseek,
};
static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
@@ -2788,7 +2787,6 @@ static const struct file_operations devx_async_event_fops = {
.read = devx_async_event_read,
.poll = devx_async_event_poll,
.release = uverbs_uobject_fd_release,
- .llseek = no_llseek,
};
static void devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index eb4906552ac8..b5cbb57ee5f6 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -1299,7 +1299,6 @@ static const struct file_operations evdev_fops = {
.compat_ioctl = evdev_ioctl_compat,
#endif
.fasync = evdev_fasync,
- .llseek = no_llseek,
};
/*
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 5824bca02e5a..ba2b17288bcd 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -718,7 +718,6 @@ static const struct file_operations joydev_fops = {
.compat_ioctl = joydev_compat_ioctl,
#endif
.fasync = joydev_fasync,
- .llseek = no_llseek,
};
/*
diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c
index cf25177b4830..707c5a8ae736 100644
--- a/drivers/input/keyboard/applespi.c
+++ b/drivers/input/keyboard/applespi.c
@@ -1007,7 +1007,6 @@ static const struct file_operations applespi_tp_dim_fops = {
.owner = THIS_MODULE,
.open = applespi_tp_dim_open,
.read = applespi_tp_dim_read,
- .llseek = no_llseek,
};
static void report_finger_data(struct input_dev *input, int slot,
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 6b811d6bf625..dcbc50304a5a 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -6,20 +6,13 @@
*
* Based on the pxa27x matrix keypad controller by Rodolfo Giometti.
*
- * NOTE:
- *
- * The 3-key reset is triggered by pressing the 3 keys in
- * Row 0, Columns 2, 4, and 7 at the same time. This action can
- * be disabled by setting the EP93XX_KEYPAD_DISABLE_3_KEY flag.
- *
- * Normal operation for the matrix does not autorepeat the key press.
- * This action can be enabled by setting the EP93XX_KEYPAD_AUTOREPEAT
- * flag.
*/
#include <linux/bits.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -27,7 +20,6 @@
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
#include <linux/soc/cirrus/ep93xx.h>
-#include <linux/platform_data/keypad-ep93xx.h>
#include <linux/pm_wakeirq.h>
/*
@@ -61,12 +53,16 @@
#define KEY_REG_KEY1_MASK GENMASK(5, 0)
#define KEY_REG_KEY1_SHIFT 0
+#define EP93XX_MATRIX_ROWS (8)
+#define EP93XX_MATRIX_COLS (8)
+
#define EP93XX_MATRIX_SIZE (EP93XX_MATRIX_ROWS * EP93XX_MATRIX_COLS)
struct ep93xx_keypad {
- struct ep93xx_keypad_platform_data *pdata;
struct input_dev *input_dev;
struct clk *clk;
+ unsigned int debounce;
+ u16 prescale;
void __iomem *mmio_base;
@@ -133,23 +129,11 @@ static irqreturn_t ep93xx_keypad_irq_handler(int irq, void *dev_id)
static void ep93xx_keypad_config(struct ep93xx_keypad *keypad)
{
- struct ep93xx_keypad_platform_data *pdata = keypad->pdata;
unsigned int val = 0;
- clk_set_rate(keypad->clk, pdata->clk_rate);
-
- if (pdata->flags & EP93XX_KEYPAD_DISABLE_3_KEY)
- val |= KEY_INIT_DIS3KY;
- if (pdata->flags & EP93XX_KEYPAD_DIAG_MODE)
- val |= KEY_INIT_DIAG;
- if (pdata->flags & EP93XX_KEYPAD_BACK_DRIVE)
- val |= KEY_INIT_BACK;
- if (pdata->flags & EP93XX_KEYPAD_TEST_MODE)
- val |= KEY_INIT_T2;
-
- val |= ((pdata->debounce << KEY_INIT_DBNC_SHIFT) & KEY_INIT_DBNC_MASK);
+ val |= (keypad->debounce << KEY_INIT_DBNC_SHIFT) & KEY_INIT_DBNC_MASK;
- val |= ((pdata->prescale << KEY_INIT_PRSCL_SHIFT) & KEY_INIT_PRSCL_MASK);
+ val |= (keypad->prescale << KEY_INIT_PRSCL_SHIFT) & KEY_INIT_PRSCL_MASK;
__raw_writel(val, keypad->mmio_base + KEY_INIT);
}
@@ -220,17 +204,10 @@ static int ep93xx_keypad_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(ep93xx_keypad_pm_ops,
ep93xx_keypad_suspend, ep93xx_keypad_resume);
-static void ep93xx_keypad_release_gpio_action(void *_pdev)
-{
- struct platform_device *pdev = _pdev;
-
- ep93xx_keypad_release_gpio(pdev);
-}
-
static int ep93xx_keypad_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct ep93xx_keypad *keypad;
- const struct matrix_keymap_data *keymap_data;
struct input_dev *input_dev;
int err;
@@ -238,14 +215,6 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
if (!keypad)
return -ENOMEM;
- keypad->pdata = dev_get_platdata(&pdev->dev);
- if (!keypad->pdata)
- return -EINVAL;
-
- keymap_data = keypad->pdata->keymap_data;
- if (!keymap_data)
- return -EINVAL;
-
keypad->irq = platform_get_irq(pdev, 0);
if (keypad->irq < 0)
return keypad->irq;
@@ -254,19 +223,13 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
if (IS_ERR(keypad->mmio_base))
return PTR_ERR(keypad->mmio_base);
- err = ep93xx_keypad_acquire_gpio(pdev);
- if (err)
- return err;
-
- err = devm_add_action_or_reset(&pdev->dev,
- ep93xx_keypad_release_gpio_action, pdev);
- if (err)
- return err;
-
keypad->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(keypad->clk))
return PTR_ERR(keypad->clk);
+ device_property_read_u32(dev, "debounce-delay-ms", &keypad->debounce);
+ device_property_read_u16(dev, "cirrus,prescale", &keypad->prescale);
+
input_dev = devm_input_allocate_device(&pdev->dev);
if (!input_dev)
return -ENOMEM;
@@ -278,13 +241,13 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
input_dev->open = ep93xx_keypad_open;
input_dev->close = ep93xx_keypad_close;
- err = matrix_keypad_build_keymap(keymap_data, NULL,
+ err = matrix_keypad_build_keymap(NULL, NULL,
EP93XX_MATRIX_ROWS, EP93XX_MATRIX_COLS,
keypad->keycodes, input_dev);
if (err)
return err;
- if (keypad->pdata->flags & EP93XX_KEYPAD_AUTOREPEAT)
+ if (device_property_read_bool(&pdev->dev, "autorepeat"))
__set_bit(EV_REP, input_dev->evbit);
input_set_drvdata(input_dev, keypad);
@@ -313,10 +276,17 @@ static void ep93xx_keypad_remove(struct platform_device *pdev)
dev_pm_clear_wake_irq(&pdev->dev);
}
+static const struct of_device_id ep93xx_keypad_of_ids[] = {
+ { .compatible = "cirrus,ep9307-keypad" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ep93xx_keypad_of_ids);
+
static struct platform_driver ep93xx_keypad_driver = {
.driver = {
.name = "ep93xx-keypad",
.pm = pm_sleep_ptr(&ep93xx_keypad_pm_ops),
+ .of_match_table = ep93xx_keypad_of_ids,
},
.probe = ep93xx_keypad_probe,
.remove_new = ep93xx_keypad_remove,
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 445856c9127a..2c51ea9d01d7 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -1132,7 +1132,6 @@ static const struct file_operations uinput_fops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = uinput_compat_ioctl,
#endif
- .llseek = no_llseek,
};
static struct miscdevice uinput_misc = {
diff --git a/drivers/input/serio/userio.c b/drivers/input/serio/userio.c
index a88e2eee55c3..1ab12b247f98 100644
--- a/drivers/input/serio/userio.c
+++ b/drivers/input/serio/userio.c
@@ -267,7 +267,6 @@ static const struct file_operations userio_fops = {
.read = userio_char_read,
.write = userio_char_write,
.poll = userio_char_poll,
- .llseek = no_llseek,
};
static struct miscdevice userio_misc = {
diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c
index f788db15cd76..b956e4050f38 100644
--- a/drivers/interconnect/icc-clk.c
+++ b/drivers/interconnect/icc-clk.c
@@ -87,6 +87,7 @@ struct icc_provider *icc_clk_register(struct device *dev,
onecell = devm_kzalloc(dev, struct_size(onecell, nodes, 2 * num_clocks), GFP_KERNEL);
if (!onecell)
return ERR_PTR(-ENOMEM);
+ onecell->num_nodes = 2 * num_clocks;
qp = devm_kzalloc(dev, struct_size(qp, clocks, num_clocks), GFP_KERNEL);
if (!qp)
@@ -133,8 +134,6 @@ struct icc_provider *icc_clk_register(struct device *dev,
onecell->nodes[j++] = node;
}
- onecell->num_nodes = j;
-
ret = icc_provider_register(provider);
if (ret)
goto err;
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 9b84cd8becef..de96d4661340 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -26,6 +26,15 @@ config INTERCONNECT_QCOM_MSM8916
This is a driver for the Qualcomm Network-on-Chip on msm8916-based
platforms.
+config INTERCONNECT_QCOM_MSM8937
+ tristate "Qualcomm MSM8937 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8937-based
+ platforms.
+
config INTERCONNECT_QCOM_MSM8939
tristate "Qualcomm MSM8939 interconnect driver"
depends on INTERCONNECT_QCOM
@@ -53,6 +62,15 @@ config INTERCONNECT_QCOM_MSM8974
This is a driver for the Qualcomm Network-on-Chip on msm8974-based
platforms.
+config INTERCONNECT_QCOM_MSM8976
+ tristate "Qualcomm MSM8976 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8976-based
+ platforms.
+
config INTERCONNECT_QCOM_MSM8996
tristate "Qualcomm MSM8996 interconnect driver"
depends on INTERCONNECT_QCOM
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 7a7b6a71876f..bfeea8416fcf 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -6,9 +6,11 @@ interconnect_qcom-y := icc-common.o
icc-bcm-voter-objs := bcm-voter.o
qnoc-msm8909-objs := msm8909.o
qnoc-msm8916-objs := msm8916.o
+qnoc-msm8937-objs := msm8937.o
qnoc-msm8939-objs := msm8939.o
qnoc-msm8953-objs := msm8953.o
qnoc-msm8974-objs := msm8974.o
+qnoc-msm8976-objs := msm8976.o
qnoc-msm8996-objs := msm8996.o
icc-osm-l3-objs := osm-l3.o
qnoc-qcm2290-objs := qcm2290.o
@@ -41,9 +43,11 @@ icc-smd-rpm-objs := smd-rpm.o icc-rpm.o icc-rpm-clocks.o
obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8909) += qnoc-msm8909.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8937) += qnoc-msm8937.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8939) += qnoc-msm8939.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8953) += qnoc-msm8953.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8976) += qnoc-msm8976.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8996) += qnoc-msm8996.o
obj-$(CONFIG_INTERCONNECT_QCOM_OSM_L3) += icc-osm-l3.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCM2290) += qnoc-qcm2290.o
diff --git a/drivers/interconnect/qcom/msm8937.c b/drivers/interconnect/qcom/msm8937.c
new file mode 100644
index 000000000000..052b14c28ef8
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8937.c
@@ -0,0 +1,1350 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on data from msm8937-bus.dtsi in Qualcomm's msm-3.18 release:
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/interconnect/qcom,msm8937.h>
+
+#include "icc-rpm.h"
+
+enum {
+ QNOC_MASTER_AMPSS_M0 = 1,
+ QNOC_MASTER_GRAPHICS_3D,
+ QNOC_SNOC_BIMC_0_MAS,
+ QNOC_SNOC_BIMC_2_MAS,
+ QNOC_SNOC_BIMC_1_MAS,
+ QNOC_MASTER_TCU_0,
+ QNOC_MASTER_SPDM,
+ QNOC_MASTER_BLSP_1,
+ QNOC_MASTER_BLSP_2,
+ QNOC_MASTER_USB_HS,
+ QNOC_MASTER_XM_USB_HS1,
+ QNOC_MASTER_CRYPTO_CORE0,
+ QNOC_MASTER_SDCC_1,
+ QNOC_MASTER_SDCC_2,
+ QNOC_SNOC_PNOC_MAS,
+ QNOC_MASTER_QDSS_BAM,
+ QNOC_BIMC_SNOC_MAS,
+ QNOC_MASTER_JPEG,
+ QNOC_MASTER_MDP_PORT0,
+ QNOC_PNOC_SNOC_MAS,
+ QNOC_MASTER_VIDEO_P0,
+ QNOC_MASTER_VFE,
+ QNOC_MASTER_VFE1,
+ QNOC_MASTER_CPP,
+ QNOC_MASTER_QDSS_ETR,
+ QNOC_PNOC_M_0,
+ QNOC_PNOC_M_1,
+ QNOC_PNOC_INT_0,
+ QNOC_PNOC_INT_1,
+ QNOC_PNOC_INT_2,
+ QNOC_PNOC_INT_3,
+ QNOC_PNOC_SLV_0,
+ QNOC_PNOC_SLV_1,
+ QNOC_PNOC_SLV_2,
+ QNOC_PNOC_SLV_3,
+ QNOC_PNOC_SLV_4,
+ QNOC_PNOC_SLV_6,
+ QNOC_PNOC_SLV_7,
+ QNOC_PNOC_SLV_8,
+ QNOC_SNOC_QDSS_INT,
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_INT_2,
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV,
+ QNOC_SLAVE_SDCC_2,
+ QNOC_SLAVE_SPDM_WRAPPER,
+ QNOC_SLAVE_PDM,
+ QNOC_SLAVE_PRNG,
+ QNOC_SLAVE_TCSR,
+ QNOC_SLAVE_SNOC_CFG,
+ QNOC_SLAVE_MESSAGE_RAM,
+ QNOC_SLAVE_CAMERA_CFG,
+ QNOC_SLAVE_DISPLAY_CFG,
+ QNOC_SLAVE_VENUS_CFG,
+ QNOC_SLAVE_GRAPHICS_3D_CFG,
+ QNOC_SLAVE_TLMM,
+ QNOC_SLAVE_BLSP_1,
+ QNOC_SLAVE_BLSP_2,
+ QNOC_SLAVE_PMIC_ARB,
+ QNOC_SLAVE_SDCC_1,
+ QNOC_SLAVE_CRYPTO_0_CFG,
+ QNOC_SLAVE_USB_HS,
+ QNOC_SLAVE_TCU,
+ QNOC_PNOC_SNOC_SLV,
+ QNOC_SLAVE_APPSS,
+ QNOC_SLAVE_WCSS,
+ QNOC_SNOC_BIMC_0_SLV,
+ QNOC_SNOC_BIMC_1_SLV,
+ QNOC_SNOC_BIMC_2_SLV,
+ QNOC_SLAVE_OCIMEM,
+ QNOC_SNOC_PNOC_SLV,
+ QNOC_SLAVE_QDSS_STM,
+ QNOC_SLAVE_CATS_128,
+ QNOC_SLAVE_OCMEM_64,
+ QNOC_SLAVE_LPASS,
+};
+
+static const u16 mas_apps_proc_links[] = {
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_apps_proc = {
+ .name = "mas_apps_proc",
+ .id = QNOC_MASTER_AMPSS_M0,
+ .buswidth = 8,
+ .mas_rpm_id = 0,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_apps_proc_links),
+ .links = mas_apps_proc_links,
+};
+
+static const u16 mas_oxili_links[] = {
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_oxili = {
+ .name = "mas_oxili",
+ .id = QNOC_MASTER_GRAPHICS_3D,
+ .buswidth = 8,
+ .mas_rpm_id = 6,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_oxili_links),
+ .links = mas_oxili_links,
+};
+
+static const u16 mas_snoc_bimc_0_links[] = {
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_snoc_bimc_0 = {
+ .name = "mas_snoc_bimc_0",
+ .id = QNOC_SNOC_BIMC_0_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 3,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 3,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_0_links),
+ .links = mas_snoc_bimc_0_links,
+};
+
+static const u16 mas_snoc_bimc_2_links[] = {
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_snoc_bimc_2 = {
+ .name = "mas_snoc_bimc_2",
+ .id = QNOC_SNOC_BIMC_2_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 108,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 4,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_2_links),
+ .links = mas_snoc_bimc_2_links,
+};
+
+static const u16 mas_snoc_bimc_1_links[] = {
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_snoc_bimc_1 = {
+ .name = "mas_snoc_bimc_1",
+ .id = QNOC_SNOC_BIMC_1_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 76,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_1_links),
+ .links = mas_snoc_bimc_1_links,
+};
+
+static const u16 mas_tcu_0_links[] = {
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_tcu_0 = {
+ .name = "mas_tcu_0",
+ .id = QNOC_MASTER_TCU_0,
+ .buswidth = 8,
+ .mas_rpm_id = 102,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 2,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(mas_tcu_0_links),
+ .links = mas_tcu_0_links,
+};
+
+static const u16 mas_spdm_links[] = {
+ QNOC_PNOC_M_0
+};
+
+static struct qcom_icc_node mas_spdm = {
+ .name = "mas_spdm",
+ .id = QNOC_MASTER_SPDM,
+ .buswidth = 4,
+ .mas_rpm_id = 50,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_spdm_links),
+ .links = mas_spdm_links,
+};
+
+static const u16 mas_blsp_1_links[] = {
+ QNOC_PNOC_M_1
+};
+
+static struct qcom_icc_node mas_blsp_1 = {
+ .name = "mas_blsp_1",
+ .id = QNOC_MASTER_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = 41,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_blsp_1_links),
+ .links = mas_blsp_1_links,
+};
+
+static const u16 mas_blsp_2_links[] = {
+ QNOC_PNOC_M_1
+};
+
+static struct qcom_icc_node mas_blsp_2 = {
+ .name = "mas_blsp_2",
+ .id = QNOC_MASTER_BLSP_2,
+ .buswidth = 4,
+ .mas_rpm_id = 39,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_blsp_2_links),
+ .links = mas_blsp_2_links,
+};
+
+static const u16 mas_usb_hs1_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_usb_hs1 = {
+ .name = "mas_usb_hs1",
+ .id = QNOC_MASTER_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = 42,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 12,
+ .num_links = ARRAY_SIZE(mas_usb_hs1_links),
+ .links = mas_usb_hs1_links,
+};
+
+static const u16 mas_xi_usb_hs1_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_xi_usb_hs1 = {
+ .name = "mas_xi_usb_hs1",
+ .id = QNOC_MASTER_XM_USB_HS1,
+ .buswidth = 8,
+ .mas_rpm_id = 138,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 11,
+ .num_links = ARRAY_SIZE(mas_xi_usb_hs1_links),
+ .links = mas_xi_usb_hs1_links,
+};
+
+static const u16 mas_crypto_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_crypto = {
+ .name = "mas_crypto",
+ .id = QNOC_MASTER_CRYPTO_CORE0,
+ .buswidth = 8,
+ .mas_rpm_id = 23,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_crypto_links),
+ .links = mas_crypto_links,
+};
+
+static const u16 mas_sdcc_1_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_sdcc_1 = {
+ .name = "mas_sdcc_1",
+ .id = QNOC_MASTER_SDCC_1,
+ .buswidth = 8,
+ .mas_rpm_id = 33,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 7,
+ .num_links = ARRAY_SIZE(mas_sdcc_1_links),
+ .links = mas_sdcc_1_links,
+};
+
+static const u16 mas_sdcc_2_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_sdcc_2 = {
+ .name = "mas_sdcc_2",
+ .id = QNOC_MASTER_SDCC_2,
+ .buswidth = 8,
+ .mas_rpm_id = 35,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 8,
+ .num_links = ARRAY_SIZE(mas_sdcc_2_links),
+ .links = mas_sdcc_2_links,
+};
+
+static const u16 mas_snoc_pcnoc_links[] = {
+ QNOC_PNOC_SLV_7,
+ QNOC_PNOC_INT_2,
+ QNOC_PNOC_INT_3
+};
+
+static struct qcom_icc_node mas_snoc_pcnoc = {
+ .name = "mas_snoc_pcnoc",
+ .id = QNOC_SNOC_PNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 77,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 9,
+ .num_links = ARRAY_SIZE(mas_snoc_pcnoc_links),
+ .links = mas_snoc_pcnoc_links,
+};
+
+static const u16 mas_qdss_bam_links[] = {
+ QNOC_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_bam = {
+ .name = "mas_qdss_bam",
+ .id = QNOC_MASTER_QDSS_BAM,
+ .buswidth = 4,
+ .mas_rpm_id = 19,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 11,
+ .num_links = ARRAY_SIZE(mas_qdss_bam_links),
+ .links = mas_qdss_bam_links,
+};
+
+static const u16 mas_bimc_snoc_links[] = {
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_INT_2
+};
+
+static struct qcom_icc_node mas_bimc_snoc = {
+ .name = "mas_bimc_snoc",
+ .id = QNOC_BIMC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 21,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_bimc_snoc_links),
+ .links = mas_bimc_snoc_links,
+};
+
+static const u16 mas_jpeg_links[] = {
+ QNOC_SNOC_BIMC_2_SLV
+};
+
+static struct qcom_icc_node mas_jpeg = {
+ .name = "mas_jpeg",
+ .id = QNOC_MASTER_JPEG,
+ .buswidth = 16,
+ .mas_rpm_id = 7,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(mas_jpeg_links),
+ .links = mas_jpeg_links,
+};
+
+static const u16 mas_mdp_links[] = {
+ QNOC_SNOC_BIMC_0_SLV
+};
+
+static struct qcom_icc_node mas_mdp = {
+ .name = "mas_mdp",
+ .id = QNOC_MASTER_MDP_PORT0,
+ .buswidth = 16,
+ .mas_rpm_id = 8,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 7,
+ .num_links = ARRAY_SIZE(mas_mdp_links),
+ .links = mas_mdp_links,
+};
+
+static const u16 mas_pcnoc_snoc_links[] = {
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_BIMC_1_SLV
+};
+
+static struct qcom_icc_node mas_pcnoc_snoc = {
+ .name = "mas_pcnoc_snoc",
+ .id = QNOC_PNOC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 29,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(mas_pcnoc_snoc_links),
+ .links = mas_pcnoc_snoc_links,
+};
+
+static const u16 mas_venus_links[] = {
+ QNOC_SNOC_BIMC_2_SLV
+};
+
+static struct qcom_icc_node mas_venus = {
+ .name = "mas_venus",
+ .id = QNOC_MASTER_VIDEO_P0,
+ .buswidth = 16,
+ .mas_rpm_id = 9,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 8,
+ .num_links = ARRAY_SIZE(mas_venus_links),
+ .links = mas_venus_links,
+};
+
+static const u16 mas_vfe0_links[] = {
+ QNOC_SNOC_BIMC_0_SLV
+};
+
+static struct qcom_icc_node mas_vfe0 = {
+ .name = "mas_vfe0",
+ .id = QNOC_MASTER_VFE,
+ .buswidth = 16,
+ .mas_rpm_id = 11,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 9,
+ .num_links = ARRAY_SIZE(mas_vfe0_links),
+ .links = mas_vfe0_links,
+};
+
+static const u16 mas_vfe1_links[] = {
+ QNOC_SNOC_BIMC_0_SLV
+};
+
+static struct qcom_icc_node mas_vfe1 = {
+ .name = "mas_vfe1",
+ .id = QNOC_MASTER_VFE1,
+ .buswidth = 16,
+ .mas_rpm_id = 133,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 13,
+ .num_links = ARRAY_SIZE(mas_vfe1_links),
+ .links = mas_vfe1_links,
+};
+
+static const u16 mas_cpp_links[] = {
+ QNOC_SNOC_BIMC_2_SLV
+};
+
+static struct qcom_icc_node mas_cpp = {
+ .name = "mas_cpp",
+ .id = QNOC_MASTER_CPP,
+ .buswidth = 16,
+ .mas_rpm_id = 115,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 12,
+ .num_links = ARRAY_SIZE(mas_cpp_links),
+ .links = mas_cpp_links,
+};
+
+static const u16 mas_qdss_etr_links[] = {
+ QNOC_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_etr = {
+ .name = "mas_qdss_etr",
+ .id = QNOC_MASTER_QDSS_ETR,
+ .buswidth = 8,
+ .mas_rpm_id = 31,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 10,
+ .num_links = ARRAY_SIZE(mas_qdss_etr_links),
+ .links = mas_qdss_etr_links,
+};
+
+static const u16 pcnoc_m_0_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node pcnoc_m_0 = {
+ .name = "pcnoc_m_0",
+ .id = QNOC_PNOC_M_0,
+ .buswidth = 4,
+ .mas_rpm_id = 87,
+ .slv_rpm_id = 116,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(pcnoc_m_0_links),
+ .links = pcnoc_m_0_links,
+};
+
+static const u16 pcnoc_m_1_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node pcnoc_m_1 = {
+ .name = "pcnoc_m_1",
+ .id = QNOC_PNOC_M_1,
+ .buswidth = 4,
+ .mas_rpm_id = 88,
+ .slv_rpm_id = 117,
+ .num_links = ARRAY_SIZE(pcnoc_m_1_links),
+ .links = pcnoc_m_1_links,
+};
+
+static const u16 pcnoc_int_0_links[] = {
+ QNOC_PNOC_SNOC_SLV,
+ QNOC_PNOC_SLV_7,
+ QNOC_PNOC_INT_3,
+ QNOC_PNOC_INT_2
+};
+
+static struct qcom_icc_node pcnoc_int_0 = {
+ .name = "pcnoc_int_0",
+ .id = QNOC_PNOC_INT_0,
+ .buswidth = 8,
+ .mas_rpm_id = 85,
+ .slv_rpm_id = 114,
+ .num_links = ARRAY_SIZE(pcnoc_int_0_links),
+ .links = pcnoc_int_0_links,
+};
+
+static const u16 pcnoc_int_1_links[] = {
+ QNOC_PNOC_SNOC_SLV,
+ QNOC_PNOC_SLV_7,
+ QNOC_PNOC_INT_3,
+ QNOC_PNOC_INT_2
+};
+
+static struct qcom_icc_node pcnoc_int_1 = {
+ .name = "pcnoc_int_1",
+ .id = QNOC_PNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_int_1_links),
+ .links = pcnoc_int_1_links,
+};
+
+static const u16 pcnoc_int_2_links[] = {
+ QNOC_PNOC_SLV_2,
+ QNOC_PNOC_SLV_3,
+ QNOC_PNOC_SLV_6,
+ QNOC_PNOC_SLV_8
+};
+
+static struct qcom_icc_node pcnoc_int_2 = {
+ .name = "pcnoc_int_2",
+ .id = QNOC_PNOC_INT_2,
+ .buswidth = 8,
+ .mas_rpm_id = 124,
+ .slv_rpm_id = 184,
+ .num_links = ARRAY_SIZE(pcnoc_int_2_links),
+ .links = pcnoc_int_2_links,
+};
+
+static const u16 pcnoc_int_3_links[] = {
+ QNOC_PNOC_SLV_1,
+ QNOC_PNOC_SLV_0,
+ QNOC_PNOC_SLV_4,
+ QNOC_SLAVE_GRAPHICS_3D_CFG,
+ QNOC_SLAVE_TCU
+};
+
+static struct qcom_icc_node pcnoc_int_3 = {
+ .name = "pcnoc_int_3",
+ .id = QNOC_PNOC_INT_3,
+ .buswidth = 8,
+ .mas_rpm_id = 125,
+ .slv_rpm_id = 185,
+ .num_links = ARRAY_SIZE(pcnoc_int_3_links),
+ .links = pcnoc_int_3_links,
+};
+
+static const u16 pcnoc_s_0_links[] = {
+ QNOC_SLAVE_SPDM_WRAPPER,
+ QNOC_SLAVE_PDM,
+ QNOC_SLAVE_PRNG,
+ QNOC_SLAVE_SDCC_2
+};
+
+static struct qcom_icc_node pcnoc_s_0 = {
+ .name = "pcnoc_s_0",
+ .id = QNOC_PNOC_SLV_0,
+ .buswidth = 4,
+ .mas_rpm_id = 89,
+ .slv_rpm_id = 118,
+ .num_links = ARRAY_SIZE(pcnoc_s_0_links),
+ .links = pcnoc_s_0_links,
+};
+
+static const u16 pcnoc_s_1_links[] = {
+ QNOC_SLAVE_TCSR
+};
+
+static struct qcom_icc_node pcnoc_s_1 = {
+ .name = "pcnoc_s_1",
+ .id = QNOC_PNOC_SLV_1,
+ .buswidth = 4,
+ .mas_rpm_id = 90,
+ .slv_rpm_id = 119,
+ .num_links = ARRAY_SIZE(pcnoc_s_1_links),
+ .links = pcnoc_s_1_links,
+};
+
+static const u16 pcnoc_s_2_links[] = {
+ QNOC_SLAVE_SNOC_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_2 = {
+ .name = "pcnoc_s_2",
+ .id = QNOC_PNOC_SLV_2,
+ .buswidth = 4,
+ .mas_rpm_id = 91,
+ .slv_rpm_id = 120,
+ .num_links = ARRAY_SIZE(pcnoc_s_2_links),
+ .links = pcnoc_s_2_links,
+};
+
+static const u16 pcnoc_s_3_links[] = {
+ QNOC_SLAVE_MESSAGE_RAM
+};
+
+static struct qcom_icc_node pcnoc_s_3 = {
+ .name = "pcnoc_s_3",
+ .id = QNOC_PNOC_SLV_3,
+ .buswidth = 4,
+ .mas_rpm_id = 92,
+ .slv_rpm_id = 121,
+ .num_links = ARRAY_SIZE(pcnoc_s_3_links),
+ .links = pcnoc_s_3_links,
+};
+
+static const u16 pcnoc_s_4_links[] = {
+ QNOC_SLAVE_CAMERA_CFG,
+ QNOC_SLAVE_DISPLAY_CFG,
+ QNOC_SLAVE_VENUS_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_4 = {
+ .name = "pcnoc_s_4",
+ .id = QNOC_PNOC_SLV_4,
+ .buswidth = 4,
+ .mas_rpm_id = 93,
+ .slv_rpm_id = 122,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(pcnoc_s_4_links),
+ .links = pcnoc_s_4_links,
+};
+
+static const u16 pcnoc_s_6_links[] = {
+ QNOC_SLAVE_TLMM,
+ QNOC_SLAVE_BLSP_1,
+ QNOC_SLAVE_BLSP_2
+};
+
+static struct qcom_icc_node pcnoc_s_6 = {
+ .name = "pcnoc_s_6",
+ .id = QNOC_PNOC_SLV_6,
+ .buswidth = 4,
+ .mas_rpm_id = 94,
+ .slv_rpm_id = 123,
+ .num_links = ARRAY_SIZE(pcnoc_s_6_links),
+ .links = pcnoc_s_6_links,
+};
+
+static const u16 pcnoc_s_7_links[] = {
+ QNOC_SLAVE_SDCC_1,
+ QNOC_SLAVE_PMIC_ARB
+};
+
+static struct qcom_icc_node pcnoc_s_7 = {
+ .name = "pcnoc_s_7",
+ .id = QNOC_PNOC_SLV_7,
+ .buswidth = 4,
+ .mas_rpm_id = 95,
+ .slv_rpm_id = 124,
+ .num_links = ARRAY_SIZE(pcnoc_s_7_links),
+ .links = pcnoc_s_7_links,
+};
+
+static const u16 pcnoc_s_8_links[] = {
+ QNOC_SLAVE_USB_HS,
+ QNOC_SLAVE_CRYPTO_0_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_8 = {
+ .name = "pcnoc_s_8",
+ .id = QNOC_PNOC_SLV_8,
+ .buswidth = 4,
+ .mas_rpm_id = 96,
+ .slv_rpm_id = 125,
+ .num_links = ARRAY_SIZE(pcnoc_s_8_links),
+ .links = pcnoc_s_8_links,
+};
+
+static const u16 qdss_int_links[] = {
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_BIMC_1_SLV
+};
+
+static struct qcom_icc_node qdss_int = {
+ .name = "qdss_int",
+ .id = QNOC_SNOC_QDSS_INT,
+ .buswidth = 8,
+ .mas_rpm_id = 98,
+ .slv_rpm_id = 128,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(qdss_int_links),
+ .links = qdss_int_links,
+};
+
+static const u16 snoc_int_0_links[] = {
+ QNOC_SLAVE_LPASS,
+ QNOC_SLAVE_WCSS,
+ QNOC_SLAVE_APPSS
+};
+
+static struct qcom_icc_node snoc_int_0 = {
+ .name = "snoc_int_0",
+ .id = QNOC_SNOC_INT_0,
+ .buswidth = 8,
+ .mas_rpm_id = 99,
+ .slv_rpm_id = 130,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(snoc_int_0_links),
+ .links = snoc_int_0_links,
+};
+
+static const u16 snoc_int_1_links[] = {
+ QNOC_SLAVE_QDSS_STM,
+ QNOC_SLAVE_OCIMEM,
+ QNOC_SNOC_PNOC_SLV
+};
+
+static struct qcom_icc_node snoc_int_1 = {
+ .name = "snoc_int_1",
+ .id = QNOC_SNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = 100,
+ .slv_rpm_id = 131,
+ .num_links = ARRAY_SIZE(snoc_int_1_links),
+ .links = snoc_int_1_links,
+};
+
+static const u16 snoc_int_2_links[] = {
+ QNOC_SLAVE_CATS_128,
+ QNOC_SLAVE_OCMEM_64
+};
+
+static struct qcom_icc_node snoc_int_2 = {
+ .name = "snoc_int_2",
+ .id = QNOC_SNOC_INT_2,
+ .buswidth = 8,
+ .mas_rpm_id = 134,
+ .slv_rpm_id = 197,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(snoc_int_2_links),
+ .links = snoc_int_2_links,
+};
+
+static struct qcom_icc_node slv_ebi = {
+ .name = "slv_ebi",
+ .id = QNOC_SLAVE_EBI_CH0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 0,
+};
+
+static const u16 slv_bimc_snoc_links[] = {
+ QNOC_BIMC_SNOC_MAS
+};
+
+static struct qcom_icc_node slv_bimc_snoc = {
+ .name = "slv_bimc_snoc",
+ .id = QNOC_BIMC_SNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 2,
+ .num_links = ARRAY_SIZE(slv_bimc_snoc_links),
+ .links = slv_bimc_snoc_links,
+};
+
+static struct qcom_icc_node slv_sdcc_2 = {
+ .name = "slv_sdcc_2",
+ .id = QNOC_SLAVE_SDCC_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 33,
+};
+
+static struct qcom_icc_node slv_spdm = {
+ .name = "slv_spdm",
+ .id = QNOC_SLAVE_SPDM_WRAPPER,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 60,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_pdm = {
+ .name = "slv_pdm",
+ .id = QNOC_SLAVE_PDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 41,
+};
+
+static struct qcom_icc_node slv_prng = {
+ .name = "slv_prng",
+ .id = QNOC_SLAVE_PRNG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 44,
+};
+
+static struct qcom_icc_node slv_tcsr = {
+ .name = "slv_tcsr",
+ .id = QNOC_SLAVE_TCSR,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 50,
+};
+
+static struct qcom_icc_node slv_snoc_cfg = {
+ .name = "slv_snoc_cfg",
+ .id = QNOC_SLAVE_SNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 70,
+};
+
+static struct qcom_icc_node slv_message_ram = {
+ .name = "slv_message_ram",
+ .id = QNOC_SLAVE_MESSAGE_RAM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 55,
+};
+
+static struct qcom_icc_node slv_camera_ss_cfg = {
+ .name = "slv_camera_ss_cfg",
+ .id = QNOC_SLAVE_CAMERA_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 3,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_disp_ss_cfg = {
+ .name = "slv_disp_ss_cfg",
+ .id = QNOC_SLAVE_DISPLAY_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_venus_cfg = {
+ .name = "slv_venus_cfg",
+ .id = QNOC_SLAVE_VENUS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 10,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_gpu_cfg = {
+ .name = "slv_gpu_cfg",
+ .id = QNOC_SLAVE_GRAPHICS_3D_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 11,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_tlmm = {
+ .name = "slv_tlmm",
+ .id = QNOC_SLAVE_TLMM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 51,
+};
+
+static struct qcom_icc_node slv_blsp_1 = {
+ .name = "slv_blsp_1",
+ .id = QNOC_SLAVE_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 39,
+};
+
+static struct qcom_icc_node slv_blsp_2 = {
+ .name = "slv_blsp_2",
+ .id = QNOC_SLAVE_BLSP_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 37,
+};
+
+static struct qcom_icc_node slv_pmic_arb = {
+ .name = "slv_pmic_arb",
+ .id = QNOC_SLAVE_PMIC_ARB,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 59,
+};
+
+static struct qcom_icc_node slv_sdcc_1 = {
+ .name = "slv_sdcc_1",
+ .id = QNOC_SLAVE_SDCC_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 31,
+};
+
+static struct qcom_icc_node slv_crypto_0_cfg = {
+ .name = "slv_crypto_0_cfg",
+ .id = QNOC_SLAVE_CRYPTO_0_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 52,
+};
+
+static struct qcom_icc_node slv_usb_hs = {
+ .name = "slv_usb_hs",
+ .id = QNOC_SLAVE_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 40,
+};
+
+static struct qcom_icc_node slv_tcu = {
+ .name = "slv_tcu",
+ .id = QNOC_SLAVE_TCU,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 133,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static const u16 slv_pcnoc_snoc_links[] = {
+ QNOC_PNOC_SNOC_MAS
+};
+
+static struct qcom_icc_node slv_pcnoc_snoc = {
+ .name = "slv_pcnoc_snoc",
+ .id = QNOC_PNOC_SNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 45,
+ .num_links = ARRAY_SIZE(slv_pcnoc_snoc_links),
+ .links = slv_pcnoc_snoc_links,
+};
+
+static struct qcom_icc_node slv_kpss_ahb = {
+ .name = "slv_kpss_ahb",
+ .id = QNOC_SLAVE_APPSS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 20,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_wcss = {
+ .name = "slv_wcss",
+ .id = QNOC_SLAVE_WCSS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 23,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static const u16 slv_snoc_bimc_0_links[] = {
+ QNOC_SNOC_BIMC_0_MAS
+};
+
+static struct qcom_icc_node slv_snoc_bimc_0 = {
+ .name = "slv_snoc_bimc_0",
+ .id = QNOC_SNOC_BIMC_0_SLV,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 24,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_0_links),
+ .links = slv_snoc_bimc_0_links,
+};
+
+static const u16 slv_snoc_bimc_1_links[] = {
+ QNOC_SNOC_BIMC_1_MAS
+};
+
+static struct qcom_icc_node slv_snoc_bimc_1 = {
+ .name = "slv_snoc_bimc_1",
+ .id = QNOC_SNOC_BIMC_1_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 104,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_1_links),
+ .links = slv_snoc_bimc_1_links,
+};
+
+static const u16 slv_snoc_bimc_2_links[] = {
+ QNOC_SNOC_BIMC_2_MAS
+};
+
+static struct qcom_icc_node slv_snoc_bimc_2 = {
+ .name = "slv_snoc_bimc_2",
+ .id = QNOC_SNOC_BIMC_2_SLV,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 137,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_2_links),
+ .links = slv_snoc_bimc_2_links,
+};
+
+static struct qcom_icc_node slv_imem = {
+ .name = "slv_imem",
+ .id = QNOC_SLAVE_OCIMEM,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 26,
+};
+
+static const u16 slv_snoc_pcnoc_links[] = {
+ QNOC_SNOC_PNOC_MAS
+};
+
+static struct qcom_icc_node slv_snoc_pcnoc = {
+ .name = "slv_snoc_pcnoc",
+ .id = QNOC_SNOC_PNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 28,
+ .num_links = ARRAY_SIZE(slv_snoc_pcnoc_links),
+ .links = slv_snoc_pcnoc_links,
+};
+
+static struct qcom_icc_node slv_qdss_stm = {
+ .name = "slv_qdss_stm",
+ .id = QNOC_SLAVE_QDSS_STM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 30,
+};
+
+static struct qcom_icc_node slv_cats_0 = {
+ .name = "slv_cats_0",
+ .id = QNOC_SLAVE_CATS_128,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 106,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_cats_1 = {
+ .name = "slv_cats_1",
+ .id = QNOC_SLAVE_OCMEM_64,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 107,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_lpass = {
+ .name = "slv_lpass",
+ .id = QNOC_SLAVE_LPASS,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 21,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node *msm8937_bimc_nodes[] = {
+ [MAS_APPS_PROC] = &mas_apps_proc,
+ [MAS_OXILI] = &mas_oxili,
+ [MAS_SNOC_BIMC_0] = &mas_snoc_bimc_0,
+ [MAS_SNOC_BIMC_2] = &mas_snoc_bimc_2,
+ [MAS_SNOC_BIMC_1] = &mas_snoc_bimc_1,
+ [MAS_TCU_0] = &mas_tcu_0,
+ [SLV_EBI] = &slv_ebi,
+ [SLV_BIMC_SNOC] = &slv_bimc_snoc,
+};
+
+static const struct regmap_config msm8937_bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x5A000,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8937_bimc = {
+ .type = QCOM_ICC_BIMC,
+ .nodes = msm8937_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8937_bimc_nodes),
+ .bus_clk_desc = &bimc_clk,
+ .regmap_cfg = &msm8937_bimc_regmap_config,
+ .qos_offset = 0x8000,
+ .ab_coeff = 154,
+};
+
+static struct qcom_icc_node *msm8937_pcnoc_nodes[] = {
+ [MAS_SPDM] = &mas_spdm,
+ [MAS_BLSP_1] = &mas_blsp_1,
+ [MAS_BLSP_2] = &mas_blsp_2,
+ [MAS_USB_HS1] = &mas_usb_hs1,
+ [MAS_XI_USB_HS1] = &mas_xi_usb_hs1,
+ [MAS_CRYPTO] = &mas_crypto,
+ [MAS_SDCC_1] = &mas_sdcc_1,
+ [MAS_SDCC_2] = &mas_sdcc_2,
+ [MAS_SNOC_PCNOC] = &mas_snoc_pcnoc,
+ [PCNOC_M_0] = &pcnoc_m_0,
+ [PCNOC_M_1] = &pcnoc_m_1,
+ [PCNOC_INT_0] = &pcnoc_int_0,
+ [PCNOC_INT_1] = &pcnoc_int_1,
+ [PCNOC_INT_2] = &pcnoc_int_2,
+ [PCNOC_INT_3] = &pcnoc_int_3,
+ [PCNOC_S_0] = &pcnoc_s_0,
+ [PCNOC_S_1] = &pcnoc_s_1,
+ [PCNOC_S_2] = &pcnoc_s_2,
+ [PCNOC_S_3] = &pcnoc_s_3,
+ [PCNOC_S_4] = &pcnoc_s_4,
+ [PCNOC_S_6] = &pcnoc_s_6,
+ [PCNOC_S_7] = &pcnoc_s_7,
+ [PCNOC_S_8] = &pcnoc_s_8,
+ [SLV_SDCC_2] = &slv_sdcc_2,
+ [SLV_SPDM] = &slv_spdm,
+ [SLV_PDM] = &slv_pdm,
+ [SLV_PRNG] = &slv_prng,
+ [SLV_TCSR] = &slv_tcsr,
+ [SLV_SNOC_CFG] = &slv_snoc_cfg,
+ [SLV_MESSAGE_RAM] = &slv_message_ram,
+ [SLV_CAMERA_SS_CFG] = &slv_camera_ss_cfg,
+ [SLV_DISP_SS_CFG] = &slv_disp_ss_cfg,
+ [SLV_VENUS_CFG] = &slv_venus_cfg,
+ [SLV_GPU_CFG] = &slv_gpu_cfg,
+ [SLV_TLMM] = &slv_tlmm,
+ [SLV_BLSP_1] = &slv_blsp_1,
+ [SLV_BLSP_2] = &slv_blsp_2,
+ [SLV_PMIC_ARB] = &slv_pmic_arb,
+ [SLV_SDCC_1] = &slv_sdcc_1,
+ [SLV_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [SLV_USB_HS] = &slv_usb_hs,
+ [SLV_TCU] = &slv_tcu,
+ [SLV_PCNOC_SNOC] = &slv_pcnoc_snoc,
+};
+
+static const struct regmap_config msm8937_pcnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x13080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8937_pcnoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = msm8937_pcnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8937_pcnoc_nodes),
+ .bus_clk_desc = &bus_0_clk,
+ .qos_offset = 0x7000,
+ .keep_alive = true,
+ .regmap_cfg = &msm8937_pcnoc_regmap_config,
+};
+
+static struct qcom_icc_node *msm8937_snoc_nodes[] = {
+ [MAS_QDSS_BAM] = &mas_qdss_bam,
+ [MAS_BIMC_SNOC] = &mas_bimc_snoc,
+ [MAS_PCNOC_SNOC] = &mas_pcnoc_snoc,
+ [MAS_QDSS_ETR] = &mas_qdss_etr,
+ [QDSS_INT] = &qdss_int,
+ [SNOC_INT_0] = &snoc_int_0,
+ [SNOC_INT_1] = &snoc_int_1,
+ [SNOC_INT_2] = &snoc_int_2,
+ [SLV_KPSS_AHB] = &slv_kpss_ahb,
+ [SLV_WCSS] = &slv_wcss,
+ [SLV_SNOC_BIMC_1] = &slv_snoc_bimc_1,
+ [SLV_IMEM] = &slv_imem,
+ [SLV_SNOC_PCNOC] = &slv_snoc_pcnoc,
+ [SLV_QDSS_STM] = &slv_qdss_stm,
+ [SLV_CATS_1] = &slv_cats_1,
+ [SLV_LPASS] = &slv_lpass,
+};
+
+static const struct regmap_config msm8937_snoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x16080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8937_snoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = msm8937_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8937_snoc_nodes),
+ .bus_clk_desc = &bus_1_clk,
+ .regmap_cfg = &msm8937_snoc_regmap_config,
+ .qos_offset = 0x7000,
+};
+
+static struct qcom_icc_node *msm8937_snoc_mm_nodes[] = {
+ [MAS_JPEG] = &mas_jpeg,
+ [MAS_MDP] = &mas_mdp,
+ [MAS_VENUS] = &mas_venus,
+ [MAS_VFE0] = &mas_vfe0,
+ [MAS_VFE1] = &mas_vfe1,
+ [MAS_CPP] = &mas_cpp,
+ [SLV_SNOC_BIMC_0] = &slv_snoc_bimc_0,
+ [SLV_SNOC_BIMC_2] = &slv_snoc_bimc_2,
+ [SLV_CATS_0] = &slv_cats_0,
+};
+
+static const struct qcom_icc_desc msm8937_snoc_mm = {
+ .type = QCOM_ICC_NOC,
+ .nodes = msm8937_snoc_mm_nodes,
+ .num_nodes = ARRAY_SIZE(msm8937_snoc_mm_nodes),
+ .bus_clk_desc = &bus_2_clk,
+ .regmap_cfg = &msm8937_snoc_regmap_config,
+ .qos_offset = 0x7000,
+ .ab_coeff = 154,
+};
+
+static const struct of_device_id msm8937_noc_of_match[] = {
+ { .compatible = "qcom,msm8937-bimc", .data = &msm8937_bimc },
+ { .compatible = "qcom,msm8937-pcnoc", .data = &msm8937_pcnoc },
+ { .compatible = "qcom,msm8937-snoc", .data = &msm8937_snoc },
+ { .compatible = "qcom,msm8937-snoc-mm", .data = &msm8937_snoc_mm },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm8937_noc_of_match);
+
+static struct platform_driver msm8937_noc_driver = {
+ .probe = qnoc_probe,
+ .remove_new = qnoc_remove,
+ .driver = {
+ .name = "qnoc-msm8937",
+ .of_match_table = msm8937_noc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(msm8937_noc_driver);
+
+MODULE_DESCRIPTION("Qualcomm MSM8937 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/msm8953.c b/drivers/interconnect/qcom/msm8953.c
index 9e8867c07692..62f8c0774b3e 100644
--- a/drivers/interconnect/qcom/msm8953.c
+++ b/drivers/interconnect/qcom/msm8953.c
@@ -1169,6 +1169,7 @@ static const struct qcom_icc_desc msm8953_bimc = {
.nodes = msm8953_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8953_bimc_nodes),
.qos_offset = 0x8000,
+ .ab_coeff = 153,
.regmap_cfg = &msm8953_bimc_regmap_config
};
@@ -1295,6 +1296,7 @@ static const struct qcom_icc_desc msm8953_snoc_mm = {
.nodes = msm8953_snoc_mm_nodes,
.num_nodes = ARRAY_SIZE(msm8953_snoc_mm_nodes),
.qos_offset = 0x7000,
+ .ab_coeff = 153,
.regmap_cfg = &msm8953_snoc_regmap_config,
};
diff --git a/drivers/interconnect/qcom/msm8976.c b/drivers/interconnect/qcom/msm8976.c
new file mode 100644
index 000000000000..ab963def77c3
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8976.c
@@ -0,0 +1,1440 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on data from msm8976-bus.dtsi in Qualcomm's msm-3.10 release:
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/interconnect/qcom,msm8976.h>
+
+#include "icc-rpm.h"
+
+enum {
+ QNOC_MASTER_AMPSS_M0 = 1,
+ QNOC_MNOC_BIMC_MAS,
+ QNOC_SNOC_BIMC_MAS,
+ QNOC_MASTER_TCU_0,
+ QNOC_MASTER_USB_HS2,
+ QNOC_MASTER_BLSP_1,
+ QNOC_MASTER_USB_HS,
+ QNOC_MASTER_BLSP_2,
+ QNOC_MASTER_CRYPTO_CORE0,
+ QNOC_MASTER_SDCC_1,
+ QNOC_MASTER_SDCC_2,
+ QNOC_MASTER_SDCC_3,
+ QNOC_SNOC_PNOC_MAS,
+ QNOC_MASTER_LPASS_AHB,
+ QNOC_MASTER_SPDM,
+ QNOC_MASTER_DEHR,
+ QNOC_MASTER_XM_USB_HS1,
+ QNOC_MASTER_QDSS_BAM,
+ QNOC_BIMC_SNOC_MAS,
+ QNOC_MASTER_JPEG,
+ QNOC_MASTER_GRAPHICS_3D,
+ QNOC_MASTER_MDP_PORT0,
+ QNOC_MASTER_MDP_PORT1,
+ QNOC_PNOC_SNOC_MAS,
+ QNOC_MASTER_VIDEO_P0,
+ QNOC_MASTER_VIDEO_P1,
+ QNOC_MASTER_VFE0,
+ QNOC_MASTER_VFE1,
+ QNOC_MASTER_CPP,
+ QNOC_MASTER_QDSS_ETR,
+ QNOC_MASTER_LPASS_PROC,
+ QNOC_MASTER_IPA,
+ QNOC_PNOC_M_0,
+ QNOC_PNOC_M_1,
+ QNOC_PNOC_INT_0,
+ QNOC_PNOC_INT_1,
+ QNOC_PNOC_INT_2,
+ QNOC_PNOC_SLV_1,
+ QNOC_PNOC_SLV_2,
+ QNOC_PNOC_SLV_3,
+ QNOC_PNOC_SLV_4,
+ QNOC_PNOC_SLV_8,
+ QNOC_PNOC_SLV_9,
+ QNOC_SNOC_MM_INT_0,
+ QNOC_SNOC_QDSS_INT,
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_INT_2,
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV,
+ QNOC_SLAVE_TCSR,
+ QNOC_SLAVE_TLMM,
+ QNOC_SLAVE_CRYPTO_0_CFG,
+ QNOC_SLAVE_MESSAGE_RAM,
+ QNOC_SLAVE_PDM,
+ QNOC_SLAVE_PRNG,
+ QNOC_SLAVE_PMIC_ARB,
+ QNOC_SLAVE_SNOC_CFG,
+ QNOC_SLAVE_DCC_CFG,
+ QNOC_SLAVE_CAMERA_CFG,
+ QNOC_SLAVE_DISPLAY_CFG,
+ QNOC_SLAVE_VENUS_CFG,
+ QNOC_SLAVE_SDCC_1,
+ QNOC_SLAVE_BLSP_1,
+ QNOC_SLAVE_USB_HS,
+ QNOC_SLAVE_SDCC_3,
+ QNOC_SLAVE_SDCC_2,
+ QNOC_SLAVE_GRAPHICS_3D_CFG,
+ QNOC_SLAVE_USB_HS2,
+ QNOC_SLAVE_BLSP_2,
+ QNOC_PNOC_SNOC_SLV,
+ QNOC_SLAVE_APPSS,
+ QNOC_MNOC_BIMC_SLV,
+ QNOC_SNOC_BIMC_SLV,
+ QNOC_SLAVE_SYSTEM_IMEM,
+ QNOC_SNOC_PNOC_SLV,
+ QNOC_SLAVE_QDSS_STM,
+ QNOC_SLAVE_CATS_128,
+ QNOC_SLAVE_OCMEM_64,
+ QNOC_SLAVE_LPASS,
+};
+
+static const u16 mas_apps_proc_links[] = {
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_apps_proc = {
+ .name = "mas_apps_proc",
+ .id = QNOC_MASTER_AMPSS_M0,
+ .buswidth = 16,
+ .mas_rpm_id = 0,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_apps_proc_links),
+ .links = mas_apps_proc_links,
+};
+
+static const u16 mas_smmnoc_bimc_links[] = {
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_smmnoc_bimc = {
+ .name = "mas_smmnoc_bimc",
+ .id = QNOC_MNOC_BIMC_MAS,
+ .channels = 2,
+ .buswidth = 16,
+ .mas_rpm_id = 135,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_smmnoc_bimc_links),
+ .links = mas_smmnoc_bimc_links,
+};
+
+static const u16 mas_snoc_bimc_links[] = {
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_snoc_bimc = {
+ .name = "mas_snoc_bimc",
+ .id = QNOC_SNOC_BIMC_MAS,
+ .channels = 2,
+ .buswidth = 16,
+ .mas_rpm_id = 3,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 3,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_links),
+ .links = mas_snoc_bimc_links,
+};
+
+static const u16 mas_tcu_0_links[] = {
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_tcu_0 = {
+ .name = "mas_tcu_0",
+ .id = QNOC_MASTER_TCU_0,
+ .buswidth = 16,
+ .mas_rpm_id = 102,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 2,
+ .qos.qos_port = 4,
+ .num_links = ARRAY_SIZE(mas_tcu_0_links),
+ .links = mas_tcu_0_links,
+};
+
+static const u16 mas_usb_hs2_links[] = {
+ QNOC_PNOC_M_0
+};
+
+static struct qcom_icc_node mas_usb_hs2 = {
+ .name = "mas_usb_hs2",
+ .id = QNOC_MASTER_USB_HS2,
+ .buswidth = 4,
+ .mas_rpm_id = 57,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_usb_hs2_links),
+ .links = mas_usb_hs2_links,
+};
+
+static const u16 mas_blsp_1_links[] = {
+ QNOC_PNOC_M_1
+};
+
+static struct qcom_icc_node mas_blsp_1 = {
+ .name = "mas_blsp_1",
+ .id = QNOC_MASTER_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = 41,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_blsp_1_links),
+ .links = mas_blsp_1_links,
+};
+
+static const u16 mas_usb_hs1_links[] = {
+ QNOC_PNOC_M_1
+};
+
+static struct qcom_icc_node mas_usb_hs1 = {
+ .name = "mas_usb_hs1",
+ .id = QNOC_MASTER_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = 42,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_usb_hs1_links),
+ .links = mas_usb_hs1_links,
+};
+
+static const u16 mas_blsp_2_links[] = {
+ QNOC_PNOC_M_1
+};
+
+static struct qcom_icc_node mas_blsp_2 = {
+ .name = "mas_blsp_2",
+ .id = QNOC_MASTER_BLSP_2,
+ .buswidth = 4,
+ .mas_rpm_id = 39,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_blsp_2_links),
+ .links = mas_blsp_2_links,
+};
+
+static const u16 mas_crypto_links[] = {
+ QNOC_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_crypto = {
+ .name = "mas_crypto",
+ .id = QNOC_MASTER_CRYPTO_CORE0,
+ .buswidth = 8,
+ .mas_rpm_id = 23,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_crypto_links),
+ .links = mas_crypto_links,
+};
+
+static const u16 mas_sdcc_1_links[] = {
+ QNOC_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_sdcc_1 = {
+ .name = "mas_sdcc_1",
+ .id = QNOC_MASTER_SDCC_1,
+ .buswidth = 8,
+ .mas_rpm_id = 33,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 7,
+ .num_links = ARRAY_SIZE(mas_sdcc_1_links),
+ .links = mas_sdcc_1_links,
+};
+
+static const u16 mas_sdcc_2_links[] = {
+ QNOC_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_sdcc_2 = {
+ .name = "mas_sdcc_2",
+ .id = QNOC_MASTER_SDCC_2,
+ .buswidth = 8,
+ .mas_rpm_id = 35,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 8,
+ .num_links = ARRAY_SIZE(mas_sdcc_2_links),
+ .links = mas_sdcc_2_links,
+};
+
+static const u16 mas_sdcc_3_links[] = {
+ QNOC_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_sdcc_3 = {
+ .name = "mas_sdcc_3",
+ .id = QNOC_MASTER_SDCC_3,
+ .buswidth = 8,
+ .mas_rpm_id = 34,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 10,
+ .num_links = ARRAY_SIZE(mas_sdcc_3_links),
+ .links = mas_sdcc_3_links,
+};
+
+static const u16 mas_snoc_pcnoc_links[] = {
+ QNOC_PNOC_INT_2
+};
+
+static struct qcom_icc_node mas_snoc_pcnoc = {
+ .name = "mas_snoc_pcnoc",
+ .id = QNOC_SNOC_PNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 77,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 9,
+ .num_links = ARRAY_SIZE(mas_snoc_pcnoc_links),
+ .links = mas_snoc_pcnoc_links,
+};
+
+static const u16 mas_lpass_ahb_links[] = {
+ QNOC_PNOC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_lpass_ahb = {
+ .name = "mas_lpass_ahb",
+ .id = QNOC_MASTER_LPASS_AHB,
+ .buswidth = 8,
+ .mas_rpm_id = 18,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 12,
+ .num_links = ARRAY_SIZE(mas_lpass_ahb_links),
+ .links = mas_lpass_ahb_links,
+};
+
+static const u16 mas_spdm_links[] = {
+ QNOC_PNOC_M_0
+};
+
+static struct qcom_icc_node mas_spdm = {
+ .name = "mas_spdm",
+ .id = QNOC_MASTER_SPDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_spdm_links),
+ .links = mas_spdm_links,
+};
+
+static const u16 mas_dehr_links[] = {
+ QNOC_PNOC_M_0
+};
+
+static struct qcom_icc_node mas_dehr = {
+ .name = "mas_dehr",
+ .id = QNOC_MASTER_DEHR,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_dehr_links),
+ .links = mas_dehr_links,
+};
+
+static const u16 mas_xm_usb_hs1_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_xm_usb_hs1 = {
+ .name = "mas_xm_usb_hs1",
+ .id = QNOC_MASTER_XM_USB_HS1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_xm_usb_hs1_links),
+ .links = mas_xm_usb_hs1_links,
+};
+
+static const u16 mas_qdss_bam_links[] = {
+ QNOC_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_bam = {
+ .name = "mas_qdss_bam",
+ .id = QNOC_MASTER_QDSS_BAM,
+ .buswidth = 4,
+ .mas_rpm_id = 19,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 11,
+ .num_links = ARRAY_SIZE(mas_qdss_bam_links),
+ .links = mas_qdss_bam_links,
+};
+
+static const u16 mas_bimc_snoc_links[] = {
+ QNOC_SNOC_INT_2
+};
+
+static struct qcom_icc_node mas_bimc_snoc = {
+ .name = "mas_bimc_snoc",
+ .id = QNOC_BIMC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 21,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_bimc_snoc_links),
+ .links = mas_bimc_snoc_links,
+};
+
+static const u16 mas_jpeg_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_MNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_jpeg = {
+ .name = "mas_jpeg",
+ .id = QNOC_MASTER_JPEG,
+ .buswidth = 16,
+ .mas_rpm_id = 7,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(mas_jpeg_links),
+ .links = mas_jpeg_links,
+};
+
+static const u16 mas_oxili_links[] = {
+ QNOC_MNOC_BIMC_SLV,
+ QNOC_SNOC_MM_INT_0
+};
+
+static struct qcom_icc_node mas_oxili = {
+ .name = "mas_oxili",
+ .id = QNOC_MASTER_GRAPHICS_3D,
+ .channels = 2,
+ .buswidth = 16,
+ .ib_coeff = 200,
+ .mas_rpm_id = 6,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 16, /* [16, 17] */
+ .num_links = ARRAY_SIZE(mas_oxili_links),
+ .links = mas_oxili_links,
+};
+
+static const u16 mas_mdp0_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_MNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_mdp0 = {
+ .name = "mas_mdp0",
+ .id = QNOC_MASTER_MDP_PORT0,
+ .buswidth = 16,
+ .ib_coeff = 50,
+ .mas_rpm_id = 8,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 7,
+ .num_links = ARRAY_SIZE(mas_mdp0_links),
+ .links = mas_mdp0_links,
+};
+
+static const u16 mas_mdp1_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_MNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_mdp1 = {
+ .name = "mas_mdp1",
+ .id = QNOC_MASTER_MDP_PORT1,
+ .buswidth = 16,
+ .ib_coeff = 50,
+ .mas_rpm_id = 61,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 13,
+ .num_links = ARRAY_SIZE(mas_mdp1_links),
+ .links = mas_mdp1_links,
+};
+
+static const u16 mas_pcnoc_snoc_links[] = {
+ QNOC_SNOC_INT_2
+};
+
+static struct qcom_icc_node mas_pcnoc_snoc = {
+ .name = "mas_pcnoc_snoc",
+ .id = QNOC_PNOC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 29,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(mas_pcnoc_snoc_links),
+ .links = mas_pcnoc_snoc_links,
+};
+
+static const u16 mas_venus_0_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_MNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_venus_0 = {
+ .name = "mas_venus_0",
+ .id = QNOC_MASTER_VIDEO_P0,
+ .buswidth = 16,
+ .mas_rpm_id = 9,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 8,
+ .num_links = ARRAY_SIZE(mas_venus_0_links),
+ .links = mas_venus_0_links,
+};
+
+static const u16 mas_venus_1_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_MNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_venus_1 = {
+ .name = "mas_venus_1",
+ .id = QNOC_MASTER_VIDEO_P1,
+ .buswidth = 16,
+ .mas_rpm_id = 10,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 14,
+ .num_links = ARRAY_SIZE(mas_venus_1_links),
+ .links = mas_venus_1_links,
+};
+
+static const u16 mas_vfe_0_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_MNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_vfe_0 = {
+ .name = "mas_vfe_0",
+ .id = QNOC_MASTER_VFE0,
+ .buswidth = 16,
+ .mas_rpm_id = 11,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 9,
+ .num_links = ARRAY_SIZE(mas_vfe_0_links),
+ .links = mas_vfe_0_links,
+};
+
+static const u16 mas_vfe_1_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_MNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_vfe_1 = {
+ .name = "mas_vfe_1",
+ .id = QNOC_MASTER_VFE1,
+ .buswidth = 16,
+ .mas_rpm_id = 133,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 15,
+ .num_links = ARRAY_SIZE(mas_vfe_1_links),
+ .links = mas_vfe_1_links,
+};
+
+static const u16 mas_cpp_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_MNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_cpp = {
+ .name = "mas_cpp",
+ .id = QNOC_MASTER_CPP,
+ .buswidth = 16,
+ .mas_rpm_id = 115,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 12,
+ .num_links = ARRAY_SIZE(mas_cpp_links),
+ .links = mas_cpp_links,
+};
+
+static const u16 mas_qdss_etr_links[] = {
+ QNOC_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_etr = {
+ .name = "mas_qdss_etr",
+ .id = QNOC_MASTER_QDSS_ETR,
+ .buswidth = 8,
+ .mas_rpm_id = 31,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 10,
+ .num_links = ARRAY_SIZE(mas_qdss_etr_links),
+ .links = mas_qdss_etr_links,
+};
+
+static const u16 mas_lpass_proc_links[] = {
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_lpass_proc = {
+ .name = "mas_lpass_proc",
+ .id = QNOC_MASTER_LPASS_PROC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 19,
+ .num_links = ARRAY_SIZE(mas_lpass_proc_links),
+ .links = mas_lpass_proc_links,
+};
+
+static const u16 mas_ipa_links[] = {
+ QNOC_SNOC_INT_2
+};
+
+static struct qcom_icc_node mas_ipa = {
+ .name = "mas_ipa",
+ .id = QNOC_MASTER_IPA,
+ .buswidth = 8,
+ .mas_rpm_id = 59,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 18,
+ .num_links = ARRAY_SIZE(mas_ipa_links),
+ .links = mas_ipa_links,
+};
+
+static const u16 pcnoc_m_0_links[] = {
+ QNOC_PNOC_SNOC_SLV
+};
+
+static struct qcom_icc_node pcnoc_m_0 = {
+ .name = "pcnoc_m_0",
+ .id = QNOC_PNOC_M_0,
+ .buswidth = 4,
+ .mas_rpm_id = 87,
+ .slv_rpm_id = 116,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(pcnoc_m_0_links),
+ .links = pcnoc_m_0_links,
+};
+
+static const u16 pcnoc_m_1_links[] = {
+ QNOC_PNOC_SNOC_SLV
+};
+
+static struct qcom_icc_node pcnoc_m_1 = {
+ .name = "pcnoc_m_1",
+ .id = QNOC_PNOC_M_1,
+ .buswidth = 4,
+ .mas_rpm_id = 88,
+ .slv_rpm_id = 117,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(pcnoc_m_1_links),
+ .links = pcnoc_m_1_links,
+};
+
+static const u16 pcnoc_int_0_links[] = {
+ QNOC_PNOC_SNOC_SLV,
+ QNOC_PNOC_INT_2
+};
+
+static struct qcom_icc_node pcnoc_int_0 = {
+ .name = "pcnoc_int_0",
+ .id = QNOC_PNOC_INT_0,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_int_0_links),
+ .links = pcnoc_int_0_links,
+};
+
+static const u16 pcnoc_int_1_links[] = {
+ QNOC_PNOC_SNOC_SLV,
+ QNOC_PNOC_INT_2
+};
+
+static struct qcom_icc_node pcnoc_int_1 = {
+ .name = "pcnoc_int_1",
+ .id = QNOC_PNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = 86,
+ .slv_rpm_id = 115,
+ .num_links = ARRAY_SIZE(pcnoc_int_1_links),
+ .links = pcnoc_int_1_links,
+};
+
+static const u16 pcnoc_int_2_links[] = {
+ QNOC_PNOC_SLV_1,
+ QNOC_PNOC_SLV_2,
+ QNOC_PNOC_SLV_4,
+ QNOC_PNOC_SLV_8,
+ QNOC_PNOC_SLV_9,
+ QNOC_PNOC_SLV_3
+};
+
+static struct qcom_icc_node pcnoc_int_2 = {
+ .name = "pcnoc_int_2",
+ .id = QNOC_PNOC_INT_2,
+ .buswidth = 8,
+ .mas_rpm_id = 124,
+ .slv_rpm_id = 184,
+ .num_links = ARRAY_SIZE(pcnoc_int_2_links),
+ .links = pcnoc_int_2_links,
+};
+
+static const u16 pcnoc_s_1_links[] = {
+ QNOC_SLAVE_CRYPTO_0_CFG,
+ QNOC_SLAVE_PRNG,
+ QNOC_SLAVE_PDM,
+ QNOC_SLAVE_MESSAGE_RAM
+};
+
+static struct qcom_icc_node pcnoc_s_1 = {
+ .name = "pcnoc_s_1",
+ .id = QNOC_PNOC_SLV_1,
+ .buswidth = 4,
+ .mas_rpm_id = 90,
+ .slv_rpm_id = 119,
+ .num_links = ARRAY_SIZE(pcnoc_s_1_links),
+ .links = pcnoc_s_1_links,
+};
+
+static const u16 pcnoc_s_2_links[] = {
+ QNOC_SLAVE_PMIC_ARB
+};
+
+static struct qcom_icc_node pcnoc_s_2 = {
+ .name = "pcnoc_s_2",
+ .id = QNOC_PNOC_SLV_2,
+ .buswidth = 4,
+ .mas_rpm_id = 91,
+ .slv_rpm_id = 120,
+ .num_links = ARRAY_SIZE(pcnoc_s_2_links),
+ .links = pcnoc_s_2_links,
+};
+
+static const u16 pcnoc_s_3_links[] = {
+ QNOC_SLAVE_SNOC_CFG,
+ QNOC_SLAVE_DCC_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_3 = {
+ .name = "pcnoc_s_3",
+ .id = QNOC_PNOC_SLV_3,
+ .buswidth = 4,
+ .mas_rpm_id = 92,
+ .slv_rpm_id = 121,
+ .num_links = ARRAY_SIZE(pcnoc_s_3_links),
+ .links = pcnoc_s_3_links,
+};
+
+static const u16 pcnoc_s_4_links[] = {
+ QNOC_SLAVE_CAMERA_CFG,
+ QNOC_SLAVE_DISPLAY_CFG,
+ QNOC_SLAVE_VENUS_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_4 = {
+ .name = "pcnoc_s_4",
+ .id = QNOC_PNOC_SLV_4,
+ .buswidth = 4,
+ .mas_rpm_id = 93,
+ .slv_rpm_id = 122,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(pcnoc_s_4_links),
+ .links = pcnoc_s_4_links,
+};
+
+static const u16 pcnoc_s_8_links[] = {
+ QNOC_SLAVE_USB_HS,
+ QNOC_SLAVE_SDCC_3,
+ QNOC_SLAVE_BLSP_1,
+ QNOC_SLAVE_SDCC_1
+};
+
+static struct qcom_icc_node pcnoc_s_8 = {
+ .name = "pcnoc_s_8",
+ .id = QNOC_PNOC_SLV_8,
+ .buswidth = 4,
+ .mas_rpm_id = 96,
+ .slv_rpm_id = 125,
+ .num_links = ARRAY_SIZE(pcnoc_s_8_links),
+ .links = pcnoc_s_8_links,
+};
+
+static const u16 pcnoc_s_9_links[] = {
+ QNOC_SLAVE_GRAPHICS_3D_CFG,
+ QNOC_SLAVE_USB_HS2,
+ QNOC_SLAVE_SDCC_2,
+ QNOC_SLAVE_BLSP_2
+};
+
+static struct qcom_icc_node pcnoc_s_9 = {
+ .name = "pcnoc_s_9",
+ .id = QNOC_PNOC_SLV_9,
+ .buswidth = 4,
+ .mas_rpm_id = 97,
+ .slv_rpm_id = 126,
+ .num_links = ARRAY_SIZE(pcnoc_s_9_links),
+ .links = pcnoc_s_9_links,
+};
+
+static const u16 mm_int_0_links[] = {
+ QNOC_SNOC_INT_0
+};
+
+static struct qcom_icc_node mm_int_0 = {
+ .name = "mm_int_0",
+ .id = QNOC_SNOC_MM_INT_0,
+ .buswidth = 16,
+ .ib_coeff = 200,
+ .mas_rpm_id = 79,
+ .slv_rpm_id = 108,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_0_links),
+ .links = mm_int_0_links,
+};
+
+static const u16 qdss_int_links[] = {
+ QNOC_SNOC_INT_2
+};
+
+static struct qcom_icc_node qdss_int = {
+ .name = "qdss_int",
+ .id = QNOC_SNOC_QDSS_INT,
+ .buswidth = 8,
+ .mas_rpm_id = 98,
+ .slv_rpm_id = 128,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(qdss_int_links),
+ .links = qdss_int_links,
+};
+
+static const u16 snoc_int_0_links[] = {
+ QNOC_SLAVE_QDSS_STM,
+ QNOC_SLAVE_SYSTEM_IMEM,
+ QNOC_SNOC_PNOC_SLV
+};
+
+static struct qcom_icc_node snoc_int_0 = {
+ .name = "snoc_int_0",
+ .id = QNOC_SNOC_INT_0,
+ .buswidth = 8,
+ .mas_rpm_id = 99,
+ .slv_rpm_id = 130,
+ .num_links = ARRAY_SIZE(snoc_int_0_links),
+ .links = snoc_int_0_links,
+};
+
+static const u16 snoc_int_1_links[] = {
+ QNOC_SLAVE_LPASS,
+ QNOC_SLAVE_CATS_128,
+ QNOC_SLAVE_OCMEM_64,
+ QNOC_SLAVE_APPSS
+};
+
+static struct qcom_icc_node snoc_int_1 = {
+ .name = "snoc_int_1",
+ .id = QNOC_SNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = 100,
+ .slv_rpm_id = 131,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(snoc_int_1_links),
+ .links = snoc_int_1_links,
+};
+
+static const u16 snoc_int_2_links[] = {
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node snoc_int_2 = {
+ .name = "snoc_int_2",
+ .id = QNOC_SNOC_INT_2,
+ .buswidth = 8,
+ .mas_rpm_id = 134,
+ .slv_rpm_id = 197,
+ .num_links = ARRAY_SIZE(snoc_int_2_links),
+ .links = snoc_int_2_links,
+};
+
+static struct qcom_icc_node slv_ebi = {
+ .name = "slv_ebi",
+ .id = QNOC_SLAVE_EBI_CH0,
+ .channels = 2,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 0,
+};
+
+static const u16 slv_bimc_snoc_links[] = {
+ QNOC_BIMC_SNOC_MAS
+};
+
+static struct qcom_icc_node slv_bimc_snoc = {
+ .name = "slv_bimc_snoc",
+ .id = QNOC_BIMC_SNOC_SLV,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 2,
+ .num_links = ARRAY_SIZE(slv_bimc_snoc_links),
+ .links = slv_bimc_snoc_links,
+};
+
+static struct qcom_icc_node slv_tcsr = {
+ .name = "slv_tcsr",
+ .id = QNOC_SLAVE_TCSR,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 50,
+};
+
+static struct qcom_icc_node slv_tlmm = {
+ .name = "slv_tlmm",
+ .id = QNOC_SLAVE_TLMM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 51,
+};
+
+static struct qcom_icc_node slv_crypto_0_cfg = {
+ .name = "slv_crypto_0_cfg",
+ .id = QNOC_SLAVE_CRYPTO_0_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 52,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_message_ram = {
+ .name = "slv_message_ram",
+ .id = QNOC_SLAVE_MESSAGE_RAM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 55,
+};
+
+static struct qcom_icc_node slv_pdm = {
+ .name = "slv_pdm",
+ .id = QNOC_SLAVE_PDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 41,
+};
+
+static struct qcom_icc_node slv_prng = {
+ .name = "slv_prng",
+ .id = QNOC_SLAVE_PRNG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 44,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_pmic_arb = {
+ .name = "slv_pmic_arb",
+ .id = QNOC_SLAVE_PMIC_ARB,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 59,
+};
+
+static struct qcom_icc_node slv_snoc_cfg = {
+ .name = "slv_snoc_cfg",
+ .id = QNOC_SLAVE_SNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 70,
+};
+
+static struct qcom_icc_node slv_dcc_cfg = {
+ .name = "slv_dcc_cfg",
+ .id = QNOC_SLAVE_DCC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 155,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_camera_ss_cfg = {
+ .name = "slv_camera_ss_cfg",
+ .id = QNOC_SLAVE_CAMERA_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 3,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_disp_ss_cfg = {
+ .name = "slv_disp_ss_cfg",
+ .id = QNOC_SLAVE_DISPLAY_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_venus_cfg = {
+ .name = "slv_venus_cfg",
+ .id = QNOC_SLAVE_VENUS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 10,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_sdcc_1 = {
+ .name = "slv_sdcc_1",
+ .id = QNOC_SLAVE_SDCC_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 31,
+};
+
+static struct qcom_icc_node slv_blsp_1 = {
+ .name = "slv_blsp_1",
+ .id = QNOC_SLAVE_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 39,
+};
+
+static struct qcom_icc_node slv_usb_hs = {
+ .name = "slv_usb_hs",
+ .id = QNOC_SLAVE_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 40,
+};
+
+static struct qcom_icc_node slv_sdcc_3 = {
+ .name = "slv_sdcc_3",
+ .id = QNOC_SLAVE_SDCC_3,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 32,
+};
+
+static struct qcom_icc_node slv_sdcc_2 = {
+ .name = "slv_sdcc_2",
+ .id = QNOC_SLAVE_SDCC_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 33,
+};
+
+static struct qcom_icc_node slv_gpu_cfg = {
+ .name = "slv_gpu_cfg",
+ .id = QNOC_SLAVE_GRAPHICS_3D_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 11,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_usb_hs2 = {
+ .name = "slv_usb_hs2",
+ .id = QNOC_SLAVE_USB_HS2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 79,
+};
+
+static struct qcom_icc_node slv_blsp_2 = {
+ .name = "slv_blsp_2",
+ .id = QNOC_SLAVE_BLSP_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 37,
+};
+
+static const u16 slv_pcnoc_snoc_links[] = {
+ QNOC_PNOC_SNOC_MAS
+};
+
+static struct qcom_icc_node slv_pcnoc_snoc = {
+ .name = "slv_pcnoc_snoc",
+ .id = QNOC_PNOC_SNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 45,
+ .num_links = ARRAY_SIZE(slv_pcnoc_snoc_links),
+ .links = slv_pcnoc_snoc_links,
+};
+
+static struct qcom_icc_node slv_kpss_ahb = {
+ .name = "slv_kpss_ahb",
+ .id = QNOC_SLAVE_APPSS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 20,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static const u16 slv_smmnoc_bimc_links[] = {
+ QNOC_MNOC_BIMC_MAS
+};
+
+static struct qcom_icc_node slv_smmnoc_bimc = {
+ .name = "slv_smmnoc_bimc",
+ .id = QNOC_MNOC_BIMC_SLV,
+ .channels = 2,
+ .buswidth = 16,
+ .ib_coeff = 200,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 198,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_smmnoc_bimc_links),
+ .links = slv_smmnoc_bimc_links,
+};
+
+static const u16 slv_snoc_bimc_links[] = {
+ QNOC_SNOC_BIMC_MAS
+};
+
+static struct qcom_icc_node slv_snoc_bimc = {
+ .name = "slv_snoc_bimc",
+ .id = QNOC_SNOC_BIMC_SLV,
+ .channels = 2,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 24,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_links),
+ .links = slv_snoc_bimc_links,
+};
+
+static struct qcom_icc_node slv_imem = {
+ .name = "slv_imem",
+ .id = QNOC_SLAVE_SYSTEM_IMEM,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 26,
+};
+
+static const u16 slv_snoc_pcnoc_links[] = {
+ QNOC_SNOC_PNOC_MAS
+};
+
+static struct qcom_icc_node slv_snoc_pcnoc = {
+ .name = "slv_snoc_pcnoc",
+ .id = QNOC_SNOC_PNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 28,
+ .num_links = ARRAY_SIZE(slv_snoc_pcnoc_links),
+ .links = slv_snoc_pcnoc_links,
+};
+
+static struct qcom_icc_node slv_qdss_stm = {
+ .name = "slv_qdss_stm",
+ .id = QNOC_SLAVE_QDSS_STM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 30,
+};
+
+static struct qcom_icc_node slv_cats_0 = {
+ .name = "slv_cats_0",
+ .id = QNOC_SLAVE_CATS_128,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 106,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_cats_1 = {
+ .name = "slv_cats_1",
+ .id = QNOC_SLAVE_OCMEM_64,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 107,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_lpass = {
+ .name = "slv_lpass",
+ .id = QNOC_SLAVE_LPASS,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 21,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node * const msm8976_bimc_nodes[] = {
+ [MAS_APPS_PROC] = &mas_apps_proc,
+ [MAS_SMMNOC_BIMC] = &mas_smmnoc_bimc,
+ [MAS_SNOC_BIMC] = &mas_snoc_bimc,
+ [MAS_TCU_0] = &mas_tcu_0,
+ [SLV_EBI] = &slv_ebi,
+ [SLV_BIMC_SNOC] = &slv_bimc_snoc,
+};
+
+static const struct regmap_config msm8976_bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x62000,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8976_bimc = {
+ .type = QCOM_ICC_BIMC,
+ .nodes = msm8976_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8976_bimc_nodes),
+ .bus_clk_desc = &bimc_clk,
+ .regmap_cfg = &msm8976_bimc_regmap_config,
+ .qos_offset = 0x8000,
+ .ab_coeff = 154,
+};
+
+static struct qcom_icc_node * const msm8976_pcnoc_nodes[] = {
+ [MAS_USB_HS2] = &mas_usb_hs2,
+ [MAS_BLSP_1] = &mas_blsp_1,
+ [MAS_USB_HS1] = &mas_usb_hs1,
+ [MAS_BLSP_2] = &mas_blsp_2,
+ [MAS_CRYPTO] = &mas_crypto,
+ [MAS_SDCC_1] = &mas_sdcc_1,
+ [MAS_SDCC_2] = &mas_sdcc_2,
+ [MAS_SDCC_3] = &mas_sdcc_3,
+ [MAS_SNOC_PCNOC] = &mas_snoc_pcnoc,
+ [MAS_LPASS_AHB] = &mas_lpass_ahb,
+ [MAS_SPDM] = &mas_spdm,
+ [MAS_DEHR] = &mas_dehr,
+ [MAS_XM_USB_HS1] = &mas_xm_usb_hs1,
+ [PCNOC_M_0] = &pcnoc_m_0,
+ [PCNOC_M_1] = &pcnoc_m_1,
+ [PCNOC_INT_0] = &pcnoc_int_0,
+ [PCNOC_INT_1] = &pcnoc_int_1,
+ [PCNOC_INT_2] = &pcnoc_int_2,
+ [PCNOC_S_1] = &pcnoc_s_1,
+ [PCNOC_S_2] = &pcnoc_s_2,
+ [PCNOC_S_3] = &pcnoc_s_3,
+ [PCNOC_S_4] = &pcnoc_s_4,
+ [PCNOC_S_8] = &pcnoc_s_8,
+ [PCNOC_S_9] = &pcnoc_s_9,
+ [SLV_TCSR] = &slv_tcsr,
+ [SLV_TLMM] = &slv_tlmm,
+ [SLV_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [SLV_MESSAGE_RAM] = &slv_message_ram,
+ [SLV_PDM] = &slv_pdm,
+ [SLV_PRNG] = &slv_prng,
+ [SLV_PMIC_ARB] = &slv_pmic_arb,
+ [SLV_SNOC_CFG] = &slv_snoc_cfg,
+ [SLV_DCC_CFG] = &slv_dcc_cfg,
+ [SLV_CAMERA_SS_CFG] = &slv_camera_ss_cfg,
+ [SLV_DISP_SS_CFG] = &slv_disp_ss_cfg,
+ [SLV_VENUS_CFG] = &slv_venus_cfg,
+ [SLV_SDCC_1] = &slv_sdcc_1,
+ [SLV_BLSP_1] = &slv_blsp_1,
+ [SLV_USB_HS] = &slv_usb_hs,
+ [SLV_SDCC_3] = &slv_sdcc_3,
+ [SLV_SDCC_2] = &slv_sdcc_2,
+ [SLV_GPU_CFG] = &slv_gpu_cfg,
+ [SLV_USB_HS2] = &slv_usb_hs2,
+ [SLV_BLSP_2] = &slv_blsp_2,
+ [SLV_PCNOC_SNOC] = &slv_pcnoc_snoc,
+};
+
+static const struct regmap_config msm8976_pcnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14000,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8976_pcnoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = msm8976_pcnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8976_pcnoc_nodes),
+ .bus_clk_desc = &bus_0_clk,
+ .qos_offset = 0x7000,
+ .keep_alive = true,
+ .regmap_cfg = &msm8976_pcnoc_regmap_config,
+};
+
+static struct qcom_icc_node * const msm8976_snoc_nodes[] = {
+ [MAS_QDSS_BAM] = &mas_qdss_bam,
+ [MAS_BIMC_SNOC] = &mas_bimc_snoc,
+ [MAS_PCNOC_SNOC] = &mas_pcnoc_snoc,
+ [MAS_QDSS_ETR] = &mas_qdss_etr,
+ [MAS_LPASS_PROC] = &mas_lpass_proc,
+ [MAS_IPA] = &mas_ipa,
+ [QDSS_INT] = &qdss_int,
+ [SNOC_INT_0] = &snoc_int_0,
+ [SNOC_INT_1] = &snoc_int_1,
+ [SNOC_INT_2] = &snoc_int_2,
+ [SLV_KPSS_AHB] = &slv_kpss_ahb,
+ [SLV_SNOC_BIMC] = &slv_snoc_bimc,
+ [SLV_IMEM] = &slv_imem,
+ [SLV_SNOC_PCNOC] = &slv_snoc_pcnoc,
+ [SLV_QDSS_STM] = &slv_qdss_stm,
+ [SLV_CATS_0] = &slv_cats_0,
+ [SLV_CATS_1] = &slv_cats_1,
+ [SLV_LPASS] = &slv_lpass,
+};
+
+static const struct regmap_config msm8976_snoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1A000,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8976_snoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = msm8976_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8976_snoc_nodes),
+ .bus_clk_desc = &bus_1_clk,
+ .regmap_cfg = &msm8976_snoc_regmap_config,
+ .qos_offset = 0x7000,
+};
+
+static struct qcom_icc_node * const msm8976_snoc_mm_nodes[] = {
+ [MAS_JPEG] = &mas_jpeg,
+ [MAS_OXILI] = &mas_oxili,
+ [MAS_MDP0] = &mas_mdp0,
+ [MAS_MDP1] = &mas_mdp1,
+ [MAS_VENUS_0] = &mas_venus_0,
+ [MAS_VENUS_1] = &mas_venus_1,
+ [MAS_VFE_0] = &mas_vfe_0,
+ [MAS_VFE_1] = &mas_vfe_1,
+ [MAS_CPP] = &mas_cpp,
+ [MM_INT_0] = &mm_int_0,
+ [SLV_SMMNOC_BIMC] = &slv_smmnoc_bimc,
+};
+
+static const struct qcom_icc_desc msm8976_snoc_mm = {
+ .type = QCOM_ICC_NOC,
+ .nodes = msm8976_snoc_mm_nodes,
+ .num_nodes = ARRAY_SIZE(msm8976_snoc_mm_nodes),
+ .bus_clk_desc = &bus_2_clk,
+ .regmap_cfg = &msm8976_snoc_regmap_config,
+ .qos_offset = 0x7000,
+ .ab_coeff = 154,
+};
+
+static const struct of_device_id msm8976_noc_of_match[] = {
+ { .compatible = "qcom,msm8976-bimc", .data = &msm8976_bimc },
+ { .compatible = "qcom,msm8976-pcnoc", .data = &msm8976_pcnoc },
+ { .compatible = "qcom,msm8976-snoc", .data = &msm8976_snoc },
+ { .compatible = "qcom,msm8976-snoc-mm", .data = &msm8976_snoc_mm },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm8976_noc_of_match);
+
+static struct platform_driver msm8976_noc_driver = {
+ .probe = qnoc_probe,
+ .remove_new = qnoc_remove,
+ .driver = {
+ .name = "qnoc-msm8976",
+ .of_match_table = msm8976_noc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(msm8976_noc_driver);
+
+MODULE_DESCRIPTION("Qualcomm MSM8976 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
index 11b49a89c03d..63e9ff223ac4 100644
--- a/drivers/interconnect/qcom/qcs404.c
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include "icc-rpm.h"
@@ -101,6 +102,11 @@ static struct qcom_icc_node mas_apps_proc = {
.buswidth = 8,
.mas_rpm_id = 0,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
.num_links = ARRAY_SIZE(mas_apps_proc_links),
.links = mas_apps_proc_links,
};
@@ -116,6 +122,11 @@ static struct qcom_icc_node mas_oxili = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 2,
.num_links = ARRAY_SIZE(mas_oxili_links),
.links = mas_oxili_links,
};
@@ -131,6 +142,11 @@ static struct qcom_icc_node mas_mdp = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 1,
+ .qos.qos_port = 4,
.num_links = ARRAY_SIZE(mas_mdp_links),
.links = mas_mdp_links,
};
@@ -145,6 +161,10 @@ static struct qcom_icc_node mas_snoc_bimc_1 = {
.buswidth = 8,
.mas_rpm_id = 76,
.slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
.num_links = ARRAY_SIZE(mas_snoc_bimc_1_links),
.links = mas_snoc_bimc_1_links,
};
@@ -160,6 +180,11 @@ static struct qcom_icc_node mas_tcu_0 = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 2,
+ .qos.qos_port = 6,
.num_links = ARRAY_SIZE(mas_tcu_0_links),
.links = mas_tcu_0_links,
};
@@ -174,6 +199,8 @@ static struct qcom_icc_node mas_spdm = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_spdm_links),
.links = mas_spdm_links,
};
@@ -231,6 +258,11 @@ static struct qcom_icc_node mas_crypto = {
.buswidth = 8,
.mas_rpm_id = 23,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 0,
.num_links = ARRAY_SIZE(mas_crypto_links),
.links = mas_crypto_links,
};
@@ -287,6 +319,11 @@ static struct qcom_icc_node mas_qpic = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 14,
.num_links = ARRAY_SIZE(mas_qpic_links),
.links = mas_qpic_links,
};
@@ -301,6 +338,11 @@ static struct qcom_icc_node mas_qdss_bam = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 1,
.num_links = ARRAY_SIZE(mas_qdss_bam_links),
.links = mas_qdss_bam_links,
};
@@ -348,6 +390,11 @@ static struct qcom_icc_node mas_qdss_etr = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 0,
.num_links = ARRAY_SIZE(mas_qdss_etr_links),
.links = mas_qdss_etr_links,
};
@@ -363,6 +410,11 @@ static struct qcom_icc_node mas_emac = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 17,
.num_links = ARRAY_SIZE(mas_emac_links),
.links = mas_emac_links,
};
@@ -378,6 +430,11 @@ static struct qcom_icc_node mas_pcie = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 8,
.num_links = ARRAY_SIZE(mas_pcie_links),
.links = mas_pcie_links,
};
@@ -393,6 +450,11 @@ static struct qcom_icc_node mas_usb3 = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 16,
.num_links = ARRAY_SIZE(mas_usb3_links),
.links = mas_usb3_links,
};
@@ -491,6 +553,8 @@ static struct qcom_icc_node pcnoc_s_2 = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(pcnoc_s_2_links),
.links = pcnoc_s_2_links,
};
@@ -626,6 +690,8 @@ static struct qcom_icc_node qdss_int = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(qdss_int_links),
.links = qdss_int_links,
};
@@ -704,6 +770,8 @@ static struct qcom_icc_node slv_spdm = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_pdm = {
@@ -752,6 +820,8 @@ static struct qcom_icc_node slv_disp_ss_cfg = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_gpu_cfg = {
@@ -760,6 +830,8 @@ static struct qcom_icc_node slv_gpu_cfg = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_blsp_1 = {
@@ -784,6 +856,8 @@ static struct qcom_icc_node slv_pcie = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_ethernet = {
@@ -792,6 +866,8 @@ static struct qcom_icc_node slv_ethernet = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_blsp_2 = {
@@ -816,6 +892,8 @@ static struct qcom_icc_node slv_tcu = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_pmic_arb = {
@@ -894,6 +972,8 @@ static struct qcom_icc_node slv_kpss_ahb = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_wcss = {
@@ -954,6 +1034,8 @@ static struct qcom_icc_node slv_cats_0 = {
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_cats_1 = {
@@ -962,6 +1044,8 @@ static struct qcom_icc_node slv_cats_1 = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_lpass = {
@@ -970,6 +1054,8 @@ static struct qcom_icc_node slv_lpass = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node * const qcs404_bimc_nodes[] = {
@@ -982,10 +1068,22 @@ static struct qcom_icc_node * const qcs404_bimc_nodes[] = {
[SLAVE_BIMC_SNOC] = &slv_bimc_snoc,
};
+static const struct regmap_config qcs404_bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x80000,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc qcs404_bimc = {
- .bus_clk_desc = &bimc_clk,
+ .type = QCOM_ICC_BIMC,
.nodes = qcs404_bimc_nodes,
.num_nodes = ARRAY_SIZE(qcs404_bimc_nodes),
+ .bus_clk_desc = &bimc_clk,
+ .regmap_cfg = &qcs404_bimc_regmap_config,
+ .qos_offset = 0x8000,
+ .ab_coeff = 153,
};
static struct qcom_icc_node * const qcs404_pcnoc_nodes[] = {
@@ -1037,10 +1135,22 @@ static struct qcom_icc_node * const qcs404_pcnoc_nodes[] = {
[SLAVE_PCNOC_SNOC] = &slv_pcnoc_snoc,
};
+static const struct regmap_config qcs404_pcnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x15080,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc qcs404_pcnoc = {
- .bus_clk_desc = &bus_0_clk,
+ .type = QCOM_ICC_NOC,
.nodes = qcs404_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(qcs404_pcnoc_nodes),
+ .bus_clk_desc = &bus_0_clk,
+ .qos_offset = 0x7000,
+ .keep_alive = true,
+ .regmap_cfg = &qcs404_pcnoc_regmap_config,
};
static struct qcom_icc_node * const qcs404_snoc_nodes[] = {
@@ -1066,10 +1176,21 @@ static struct qcom_icc_node * const qcs404_snoc_nodes[] = {
[SLAVE_LPASS] = &slv_lpass,
};
+static const struct regmap_config qcs404_snoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x23080,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc qcs404_snoc = {
- .bus_clk_desc = &bus_1_clk,
+ .type = QCOM_ICC_NOC,
.nodes = qcs404_snoc_nodes,
.num_nodes = ARRAY_SIZE(qcs404_snoc_nodes),
+ .bus_clk_desc = &bus_1_clk,
+ .qos_offset = 0x11000,
+ .regmap_cfg = &qcs404_snoc_regmap_config,
};
diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
index b321c3009acb..4236a43dc256 100644
--- a/drivers/interconnect/qcom/sm8350.c
+++ b/drivers/interconnect/qcom/sm8350.c
@@ -628,60 +628,6 @@ static struct qcom_icc_node xm_gic = {
.links = { SM8350_SLAVE_SNOC_GEM_NOC_GC },
};
-static struct qcom_icc_node qnm_mnoc_hf_disp = {
- .name = "qnm_mnoc_hf_disp",
- .id = SM8350_MASTER_MNOC_HF_MEM_NOC_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8350_SLAVE_LLCC_DISP },
-};
-
-static struct qcom_icc_node qnm_mnoc_sf_disp = {
- .name = "qnm_mnoc_sf_disp",
- .id = SM8350_MASTER_MNOC_SF_MEM_NOC_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8350_SLAVE_LLCC_DISP },
-};
-
-static struct qcom_icc_node llcc_mc_disp = {
- .name = "llcc_mc_disp",
- .id = SM8350_MASTER_LLCC_DISP,
- .channels = 4,
- .buswidth = 4,
- .num_links = 1,
- .links = { SM8350_SLAVE_EBI1_DISP },
-};
-
-static struct qcom_icc_node qxm_mdp0_disp = {
- .name = "qxm_mdp0_disp",
- .id = SM8350_MASTER_MDP0_DISP,
- .channels = 1,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP },
-};
-
-static struct qcom_icc_node qxm_mdp1_disp = {
- .name = "qxm_mdp1_disp",
- .id = SM8350_MASTER_MDP1_DISP,
- .channels = 1,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP },
-};
-
-static struct qcom_icc_node qxm_rot_disp = {
- .name = "qxm_rot_disp",
- .id = SM8350_MASTER_ROTATOR_DISP,
- .channels = 1,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP },
-};
-
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SM8350_SLAVE_A1NOC_SNOC,
@@ -1320,40 +1266,6 @@ static struct qcom_icc_node srvc_snoc = {
.buswidth = 4,
};
-static struct qcom_icc_node qns_llcc_disp = {
- .name = "qns_llcc_disp",
- .id = SM8350_SLAVE_LLCC_DISP,
- .channels = 4,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8350_MASTER_LLCC_DISP },
-};
-
-static struct qcom_icc_node ebi_disp = {
- .name = "ebi_disp",
- .id = SM8350_SLAVE_EBI1_DISP,
- .channels = 4,
- .buswidth = 4,
-};
-
-static struct qcom_icc_node qns_mem_noc_hf_disp = {
- .name = "qns_mem_noc_hf_disp",
- .id = SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8350_MASTER_MNOC_HF_MEM_NOC_DISP },
-};
-
-static struct qcom_icc_node qns_mem_noc_sf_disp = {
- .name = "qns_mem_noc_sf_disp",
- .id = SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8350_MASTER_MNOC_SF_MEM_NOC_DISP },
-};
-
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.enable_mask = BIT(3),
@@ -1583,55 +1495,6 @@ static struct qcom_icc_bcm bcm_sn14 = {
.nodes = { &qns_pcie_mem_noc },
};
-static struct qcom_icc_bcm bcm_acv_disp = {
- .name = "ACV",
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &ebi_disp },
-};
-
-static struct qcom_icc_bcm bcm_mc0_disp = {
- .name = "MC0",
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &ebi_disp },
-};
-
-static struct qcom_icc_bcm bcm_mm0_disp = {
- .name = "MM0",
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &qns_mem_noc_hf_disp },
-};
-
-static struct qcom_icc_bcm bcm_mm1_disp = {
- .name = "MM1",
- .keepalive = false,
- .num_nodes = 2,
- .nodes = { &qxm_mdp0_disp, &qxm_mdp1_disp },
-};
-
-static struct qcom_icc_bcm bcm_mm4_disp = {
- .name = "MM4",
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &qns_mem_noc_sf_disp },
-};
-
-static struct qcom_icc_bcm bcm_mm5_disp = {
- .name = "MM5",
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &qxm_rot_disp },
-};
-
-static struct qcom_icc_bcm bcm_sh0_disp = {
- .name = "SH0",
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &qns_llcc_disp },
-};
-
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
};
@@ -1785,7 +1648,6 @@ static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh2,
&bcm_sh3,
&bcm_sh4,
- &bcm_sh0_disp,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
@@ -1808,9 +1670,6 @@ static struct qcom_icc_node * const gem_noc_nodes[] = {
[SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc,
[SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc,
[SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
- [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp,
- [MASTER_MNOC_SF_MEM_NOC_DISP] = &qnm_mnoc_sf_disp,
- [SLAVE_LLCC_DISP] = &qns_llcc_disp,
};
static const struct qcom_icc_desc sm8350_gem_noc = {
@@ -1843,15 +1702,11 @@ static const struct qcom_icc_desc sm8350_lpass_ag_noc = {
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
- &bcm_acv_disp,
- &bcm_mc0_disp,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
- [MASTER_LLCC_DISP] = &llcc_mc_disp,
- [SLAVE_EBI1_DISP] = &ebi_disp,
};
static const struct qcom_icc_desc sm8350_mc_virt = {
@@ -1866,10 +1721,6 @@ static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm1,
&bcm_mm4,
&bcm_mm5,
- &bcm_mm0_disp,
- &bcm_mm1_disp,
- &bcm_mm4_disp,
- &bcm_mm5_disp,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
@@ -1886,11 +1737,6 @@ static struct qcom_icc_node * const mmss_noc_nodes[] = {
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
- [MASTER_MDP0_DISP] = &qxm_mdp0_disp,
- [MASTER_MDP1_DISP] = &qxm_mdp1_disp,
- [MASTER_ROTATOR_DISP] = &qxm_rot_disp,
- [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp,
- [SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp,
};
static const struct qcom_icc_desc sm8350_mmss_noc = {
@@ -1965,6 +1811,7 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8350",
.of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/interconnect/qcom/sm8350.h b/drivers/interconnect/qcom/sm8350.h
index 328d15238a0d..074c6131ab36 100644
--- a/drivers/interconnect/qcom/sm8350.h
+++ b/drivers/interconnect/qcom/sm8350.h
@@ -154,15 +154,5 @@
#define SM8350_SLAVE_PCIE_1 143
#define SM8350_SLAVE_QDSS_STM 144
#define SM8350_SLAVE_TCU 145
-#define SM8350_MASTER_LLCC_DISP 146
-#define SM8350_MASTER_MNOC_HF_MEM_NOC_DISP 147
-#define SM8350_MASTER_MNOC_SF_MEM_NOC_DISP 148
-#define SM8350_MASTER_MDP0_DISP 149
-#define SM8350_MASTER_MDP1_DISP 150
-#define SM8350_MASTER_ROTATOR_DISP 151
-#define SM8350_SLAVE_EBI1_DISP 152
-#define SM8350_SLAVE_LLCC_DISP 153
-#define SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP 154
-#define SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP 155
#endif
diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
index 8c8226f0dffd..e590973ce5cf 100644
--- a/drivers/iommu/iommufd/fault.c
+++ b/drivers/iommu/iommufd/fault.c
@@ -360,7 +360,6 @@ static const struct file_operations iommufd_fault_fops = {
.write = iommufd_fault_fops_write,
.poll = iommufd_fault_fops_poll,
.release = iommufd_fault_fops_release,
- .llseek = no_llseek,
};
int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 3ed257334562..70dee9ad4bae 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -1024,7 +1024,6 @@ static int capi_release(struct inode *inode, struct file *file)
static const struct file_operations capi_fops =
{
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = capi_read,
.write = capi_write,
.poll = capi_poll,
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index 83d6b484d3c6..7cfa8c61dba0 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -266,7 +266,6 @@ static const struct file_operations mISDN_fops = {
.unlocked_ioctl = mISDN_ioctl,
.open = mISDN_open,
.release = mISDN_close,
- .llseek = no_llseek,
};
static struct miscdevice mISDNtimer = {
diff --git a/drivers/leds/uleds.c b/drivers/leds/uleds.c
index 3d361c920030..374a841f18c3 100644
--- a/drivers/leds/uleds.c
+++ b/drivers/leds/uleds.c
@@ -200,7 +200,6 @@ static const struct file_operations uleds_fops = {
.read = uleds_read,
.write = uleds_write,
.poll = uleds_poll,
- .llseek = no_llseek,
};
static struct miscdevice uleds_misc = {
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index b0407c5fadb2..88adee42ba82 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -842,7 +842,6 @@ out:
static const struct file_operations adb_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = adb_read,
.write = adb_write,
.open = adb_open,
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index b2b78a53e532..a01bc5090cdf 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1314,7 +1314,6 @@ static int smu_release(struct inode *inode, struct file *file)
static const struct file_operations smu_device_fops = {
- .llseek = no_llseek,
.read = smu_read,
.write = smu_write,
.poll = smu_fpoll,
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 4eed97295927..6fb995778636 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -25,6 +25,7 @@ config ARM_MHU_V2
config ARM_MHU_V3
tristate "ARM MHUv3 Mailbox"
+ depends on ARM64 || COMPILE_TEST
depends on HAS_IOMEM || COMPILE_TEST
depends on OF
help
@@ -73,7 +74,7 @@ config ARMADA_37XX_RWTM_MBOX
config OMAP2PLUS_MBOX
tristate "OMAP2+ Mailbox framework support"
- depends on ARCH_OMAP2PLUS || ARCH_K3
+ depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
help
Mailbox implementation for OMAP family chips with hardware for
interprocessor communication involving DSP, IVA1.0 and IVA2 in
diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c
index fbfd0202047c..ea12fb8d2401 100644
--- a/drivers/mailbox/bcm2835-mailbox.c
+++ b/drivers/mailbox/bcm2835-mailbox.c
@@ -145,7 +145,8 @@ static int bcm2835_mbox_probe(struct platform_device *pdev)
spin_lock_init(&mbox->lock);
ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0),
- bcm2835_mbox_irq, 0, dev_name(dev), mbox);
+ bcm2835_mbox_irq, IRQF_NO_SUSPEND, dev_name(dev),
+ mbox);
if (ret) {
dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n",
ret);
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index d17efb1dd0cb..f815dab3be50 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -30,7 +30,7 @@
#define IMX_MU_SCU_CHANS 6
/* TX0/RX0 */
#define IMX_MU_S4_CHANS 2
-#define IMX_MU_CHAN_NAME_SIZE 20
+#define IMX_MU_CHAN_NAME_SIZE 32
#define IMX_MU_V2_PAR_OFF 0x4
#define IMX_MU_V2_TR_MASK GENMASK(7, 0)
@@ -782,7 +782,7 @@ static int imx_mu_init_generic(struct imx_mu_priv *priv)
cp->chan = &priv->mbox_chans[i];
priv->mbox_chans[i].con_priv = cp;
snprintf(cp->irq_desc, sizeof(cp->irq_desc),
- "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ "%s[%i-%i]", dev_name(priv->dev), cp->type, cp->idx);
}
priv->mbox.num_chans = IMX_MU_CHANS;
@@ -819,7 +819,7 @@ static int imx_mu_init_specific(struct imx_mu_priv *priv)
cp->chan = &priv->mbox_chans[i];
priv->mbox_chans[i].con_priv = cp;
snprintf(cp->irq_desc, sizeof(cp->irq_desc),
- "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ "%s[%i-%i]", dev_name(priv->dev), cp->type, cp->idx);
}
priv->mbox.num_chans = num_chans;
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index ebff3baf3045..d3d26a2c9895 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -450,30 +450,20 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
const char *name)
{
struct device_node *np = cl->dev->of_node;
- struct property *prop;
- const char *mbox_name;
- int index = 0;
+ int index;
if (!np) {
dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
return ERR_PTR(-EINVAL);
}
- if (!of_get_property(np, "mbox-names", NULL)) {
- dev_err(cl->dev,
- "%s() requires an \"mbox-names\" property\n", __func__);
+ index = of_property_match_string(np, "mbox-names", name);
+ if (index < 0) {
+ dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
+ __func__, name);
return ERR_PTR(-EINVAL);
}
-
- of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
- if (!strncmp(name, mbox_name, strlen(name)))
- return mbox_request_channel(cl, index);
- index++;
- }
-
- dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
- __func__, name);
- return ERR_PTR(-EINVAL);
+ return mbox_request_channel(cl, index);
}
EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index 7a87424657a1..6797770474a5 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -603,7 +603,7 @@ static struct platform_driver omap_mbox_driver = {
.driver = {
.name = "omap-mailbox",
.pm = &omap_mbox_pm_ops,
- .of_match_table = of_match_ptr(omap_mailbox_of_match),
+ .of_match_table = omap_mailbox_of_match,
},
};
module_platform_driver(omap_mbox_driver);
diff --git a/drivers/mailbox/rockchip-mailbox.c b/drivers/mailbox/rockchip-mailbox.c
index 8ffad059e898..4d966cb2ed03 100644
--- a/drivers/mailbox/rockchip-mailbox.c
+++ b/drivers/mailbox/rockchip-mailbox.c
@@ -159,7 +159,7 @@ static const struct of_device_id rockchip_mbox_of_match[] = {
{ .compatible = "rockchip,rk3368-mailbox", .data = &rk3368_drv_data},
{ },
};
-MODULE_DEVICE_TABLE(of, rockchp_mbox_of_match);
+MODULE_DEVICE_TABLE(of, rockchip_mbox_of_match);
static int rockchip_mbox_probe(struct platform_device *pdev)
{
diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
index 9ae57de77d4d..ee8539dfcef5 100644
--- a/drivers/mailbox/sprd-mailbox.c
+++ b/drivers/mailbox/sprd-mailbox.c
@@ -62,7 +62,6 @@ struct sprd_mbox_priv {
void __iomem *outbox_base;
/* Base register address for supplementary outbox */
void __iomem *supp_base;
- struct clk *clk;
u32 outbox_fifo_depth;
struct mutex lock;
@@ -291,19 +290,13 @@ static const struct mbox_chan_ops sprd_mbox_ops = {
.shutdown = sprd_mbox_shutdown,
};
-static void sprd_mbox_disable(void *data)
-{
- struct sprd_mbox_priv *priv = data;
-
- clk_disable_unprepare(priv->clk);
-}
-
static int sprd_mbox_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sprd_mbox_priv *priv;
int ret, inbox_irq, outbox_irq, supp_irq;
unsigned long id, supp;
+ struct clk *clk;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -331,20 +324,10 @@ static int sprd_mbox_probe(struct platform_device *pdev)
if (IS_ERR(priv->outbox_base))
return PTR_ERR(priv->outbox_base);
- priv->clk = devm_clk_get(dev, "enable");
- if (IS_ERR(priv->clk)) {
+ clk = devm_clk_get_enabled(dev, "enable");
+ if (IS_ERR(clk)) {
dev_err(dev, "failed to get mailbox clock\n");
- return PTR_ERR(priv->clk);
- }
-
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, sprd_mbox_disable, priv);
- if (ret) {
- dev_err(dev, "failed to add mailbox disable action\n");
- return ret;
+ return PTR_ERR(clk);
}
inbox_irq = platform_get_irq_byname(pdev, "inbox");
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 098bf526136c..d478aafa02c9 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -529,9 +529,6 @@ static struct dm_buffer *list_to_buffer(struct list_head *l)
{
struct lru_entry *le = list_entry(l, struct lru_entry, list);
- if (!le)
- return NULL;
-
return le_to_buffer(le);
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 17f0fab1e254..aaeeabfab09b 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1368,7 +1368,7 @@ static void mg_copy(struct work_struct *ws)
*/
bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
- BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
+ BUG_ON(rb); /* An exclusive lock must _not_ be held for this block */
mg->overwrite_bio = NULL;
inc_io_migrations(mg->cache);
mg_full_copy(ws);
@@ -3200,8 +3200,6 @@ static int parse_cblock_range(struct cache *cache, const char *str,
* Try and parse form (ii) first.
*/
r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
- if (r < 0)
- return r;
if (r == 2) {
result->begin = to_cblock(b);
@@ -3213,8 +3211,6 @@ static int parse_cblock_range(struct cache *cache, const char *str,
* That didn't work, try form (i).
*/
r = sscanf(str, "%llu%c", &b, &dummy);
- if (r < 0)
- return r;
if (r == 1) {
result->begin = to_cblock(b);
diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
index 2db84cd2202b..14c5c28d938b 100644
--- a/drivers/md/dm-clone-metadata.c
+++ b/drivers/md/dm-clone-metadata.c
@@ -530,10 +530,7 @@ static int __load_bitset_in_core(struct dm_clone_metadata *cmd)
return r;
for (i = 0; ; i++) {
- if (dm_bitset_cursor_get_value(&c))
- __set_bit(i, cmd->region_map);
- else
- __clear_bit(i, cmd->region_map);
+ __assign_bit(i, cmd->region_map, dm_bitset_cursor_get_value(&c));
if (i >= (cmd->nr_regions - 1))
break;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 348b4b26c272..5228b03b6fe0 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -147,6 +147,7 @@ enum cipher_flags {
CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cipher */
CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */
CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */
+ CRYPT_KEY_MAC_SIZE_SET, /* The integrity_key_size option was used */
};
/*
@@ -2613,35 +2614,31 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
key = request_key(type, key_desc + 1, NULL);
if (IS_ERR(key)) {
- kfree_sensitive(new_key_string);
- return PTR_ERR(key);
+ ret = PTR_ERR(key);
+ goto free_new_key_string;
}
down_read(&key->sem);
-
ret = set_key(cc, key);
- if (ret < 0) {
- up_read(&key->sem);
- key_put(key);
- kfree_sensitive(new_key_string);
- return ret;
- }
-
up_read(&key->sem);
key_put(key);
+ if (ret < 0)
+ goto free_new_key_string;
/* clear the flag since following operations may invalidate previously valid key */
clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
ret = crypt_setkey(cc);
+ if (ret)
+ goto free_new_key_string;
- if (!ret) {
- set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
- kfree_sensitive(cc->key_string);
- cc->key_string = new_key_string;
- } else
- kfree_sensitive(new_key_string);
+ set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+ kfree_sensitive(cc->key_string);
+ cc->key_string = new_key_string;
+ return 0;
+free_new_key_string:
+ kfree_sensitive(new_key_string);
return ret;
}
@@ -2937,7 +2934,8 @@ static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
if (IS_ERR(mac))
return PTR_ERR(mac);
- cc->key_mac_size = crypto_ahash_digestsize(mac);
+ if (!test_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags))
+ cc->key_mac_size = crypto_ahash_digestsize(mac);
crypto_free_ahash(mac);
cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
@@ -3219,6 +3217,13 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
if (!cc->cipher_auth)
return -ENOMEM;
+ } else if (sscanf(opt_string, "integrity_key_size:%u%c", &val, &dummy) == 1) {
+ if (!val) {
+ ti->error = "Invalid integrity_key_size argument";
+ return -EINVAL;
+ }
+ cc->key_mac_size = val;
+ set_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags);
} else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
if (cc->sector_size < (1 << SECTOR_SHIFT) ||
cc->sector_size > 4096 ||
@@ -3607,10 +3612,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
+ num_feature_args += !!cc->used_tag_size;
num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
- if (cc->used_tag_size)
- num_feature_args++;
+ num_feature_args += test_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags);
if (num_feature_args) {
DMEMIT(" %d", num_feature_args);
if (ti->num_discard_bios)
@@ -3631,6 +3636,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
DMEMIT(" sector_size:%d", cc->sector_size);
if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
DMEMIT(" iv_large_sectors");
+ if (test_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags))
+ DMEMIT(" integrity_key_size:%u", cc->key_mac_size);
}
break;
@@ -3758,7 +3765,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 27, 0},
+ .version = {1, 28, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index acff2f64f251..ee9f7cecd78e 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -284,6 +284,7 @@ struct dm_integrity_c {
mempool_t recheck_pool;
struct bio_set recheck_bios;
+ struct bio_set recalc_bios;
struct notifier_block reboot_notifier;
};
@@ -321,7 +322,9 @@ struct dm_integrity_io {
struct dm_bio_details bio_details;
char *integrity_payload;
+ unsigned payload_len;
bool integrity_payload_from_mempool;
+ bool integrity_range_locked;
};
struct journal_completion {
@@ -359,7 +362,7 @@ static struct kmem_cache *journal_io_cache;
#endif
static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
-static int dm_integrity_map_inline(struct dm_integrity_io *dio);
+static int dm_integrity_map_inline(struct dm_integrity_io *dio, bool from_map);
static void integrity_bio_wait(struct work_struct *w);
static void dm_integrity_dtr(struct dm_target *ti);
@@ -491,7 +494,8 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
__u8 *sb = (__u8 *)ic->sb;
__u8 *mac = sb + (1 << SECTOR_SHIFT) - mac_size;
- if (sizeof(struct superblock) + mac_size > 1 << SECTOR_SHIFT) {
+ if (sizeof(struct superblock) + mac_size > 1 << SECTOR_SHIFT ||
+ mac_size > HASH_MAX_DIGESTSIZE) {
dm_integrity_io_error(ic, "digest is too long", -EINVAL);
return -EINVAL;
}
@@ -1500,15 +1504,15 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat
if (!ic->meta_dev)
flush_data = false;
if (flush_data) {
- fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
- fr.io_req.mem.type = DM_IO_KMEM,
- fr.io_req.mem.ptr.addr = NULL,
- fr.io_req.notify.fn = flush_notify,
+ fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+ fr.io_req.mem.type = DM_IO_KMEM;
+ fr.io_req.mem.ptr.addr = NULL;
+ fr.io_req.notify.fn = flush_notify;
fr.io_req.notify.context = &fr;
- fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
- fr.io_reg.bdev = ic->dev->bdev,
- fr.io_reg.sector = 0,
- fr.io_reg.count = 0,
+ fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio);
+ fr.io_reg.bdev = ic->dev->bdev;
+ fr.io_reg.sector = 0;
+ fr.io_reg.count = 0;
fr.ic = ic;
init_completion(&fr.comp);
r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL, IOPRIO_DEFAULT);
@@ -1946,8 +1950,13 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
dio->bi_status = 0;
dio->op = bio_op(bio);
- if (ic->mode == 'I')
- return dm_integrity_map_inline(dio);
+ if (ic->mode == 'I') {
+ bio->bi_iter.bi_sector = dm_target_offset(ic->ti, bio->bi_iter.bi_sector);
+ dio->integrity_payload = NULL;
+ dio->integrity_payload_from_mempool = false;
+ dio->integrity_range_locked = false;
+ return dm_integrity_map_inline(dio, true);
+ }
if (unlikely(dio->op == REQ_OP_DISCARD)) {
if (ti->max_io_len) {
@@ -2397,15 +2406,13 @@ journal_read_write:
do_endio_flush(ic, dio);
}
-static int dm_integrity_map_inline(struct dm_integrity_io *dio)
+static int dm_integrity_map_inline(struct dm_integrity_io *dio, bool from_map)
{
struct dm_integrity_c *ic = dio->ic;
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
struct bio_integrity_payload *bip;
- unsigned payload_len, digest_size, extra_size, ret;
-
- dio->integrity_payload = NULL;
- dio->integrity_payload_from_mempool = false;
+ unsigned ret;
+ sector_t recalc_sector;
if (unlikely(bio_integrity(bio))) {
bio->bi_status = BLK_STS_NOTSUPP;
@@ -2418,28 +2425,67 @@ static int dm_integrity_map_inline(struct dm_integrity_io *dio)
return DM_MAPIO_REMAPPED;
retry:
- payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block);
- digest_size = crypto_shash_digestsize(ic->internal_hash);
- extra_size = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
- payload_len += extra_size;
- dio->integrity_payload = kmalloc(payload_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
- if (unlikely(!dio->integrity_payload)) {
- const unsigned x_size = PAGE_SIZE << 1;
- if (payload_len > x_size) {
- unsigned sectors = ((x_size - extra_size) / ic->tuple_size) << ic->sb->log2_sectors_per_block;
- if (WARN_ON(!sectors || sectors >= bio_sectors(bio))) {
- bio->bi_status = BLK_STS_NOTSUPP;
- bio_endio(bio);
- return DM_MAPIO_SUBMITTED;
+ if (!dio->integrity_payload) {
+ unsigned digest_size, extra_size;
+ dio->payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block);
+ digest_size = crypto_shash_digestsize(ic->internal_hash);
+ extra_size = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
+ dio->payload_len += extra_size;
+ dio->integrity_payload = kmalloc(dio->payload_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ if (unlikely(!dio->integrity_payload)) {
+ const unsigned x_size = PAGE_SIZE << 1;
+ if (dio->payload_len > x_size) {
+ unsigned sectors = ((x_size - extra_size) / ic->tuple_size) << ic->sb->log2_sectors_per_block;
+ if (WARN_ON(!sectors || sectors >= bio_sectors(bio))) {
+ bio->bi_status = BLK_STS_NOTSUPP;
+ bio_endio(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+ dm_accept_partial_bio(bio, sectors);
+ goto retry;
}
- dm_accept_partial_bio(bio, sectors);
- goto retry;
}
+ }
+
+ dio->range.logical_sector = bio->bi_iter.bi_sector;
+ dio->range.n_sectors = bio_sectors(bio);
+
+ if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)))
+ goto skip_spinlock;
+#ifdef CONFIG_64BIT
+ /*
+ * On 64-bit CPUs we can optimize the lock away (so that it won't cause
+ * cache line bouncing) and use acquire/release barriers instead.
+ *
+ * Paired with smp_store_release in integrity_recalc_inline.
+ */
+ recalc_sector = le64_to_cpu(smp_load_acquire(&ic->sb->recalc_sector));
+ if (likely(dio->range.logical_sector + dio->range.n_sectors <= recalc_sector))
+ goto skip_spinlock;
+#endif
+ spin_lock_irq(&ic->endio_wait.lock);
+ recalc_sector = le64_to_cpu(ic->sb->recalc_sector);
+ if (dio->range.logical_sector + dio->range.n_sectors <= recalc_sector)
+ goto skip_unlock;
+ if (unlikely(!add_new_range(ic, &dio->range, true))) {
+ if (from_map) {
+ spin_unlock_irq(&ic->endio_wait.lock);
+ INIT_WORK(&dio->work, integrity_bio_wait);
+ queue_work(ic->wait_wq, &dio->work);
+ return DM_MAPIO_SUBMITTED;
+ }
+ wait_and_add_new_range(ic, &dio->range);
+ }
+ dio->integrity_range_locked = true;
+skip_unlock:
+ spin_unlock_irq(&ic->endio_wait.lock);
+skip_spinlock:
+
+ if (unlikely(!dio->integrity_payload)) {
dio->integrity_payload = page_to_virt((struct page *)mempool_alloc(&ic->recheck_pool, GFP_NOIO));
dio->integrity_payload_from_mempool = true;
}
- bio->bi_iter.bi_sector = dm_target_offset(ic->ti, bio->bi_iter.bi_sector);
dio->bio_details.bi_iter = bio->bi_iter;
if (unlikely(!dm_integrity_check_limits(ic, bio->bi_iter.bi_sector, bio))) {
@@ -2449,7 +2495,7 @@ retry:
bio->bi_iter.bi_sector += ic->start + SB_SECTORS;
bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
- if (unlikely(IS_ERR(bip))) {
+ if (IS_ERR(bip)) {
bio->bi_status = errno_to_blk_status(PTR_ERR(bip));
bio_endio(bio);
return DM_MAPIO_SUBMITTED;
@@ -2470,8 +2516,8 @@ retry:
}
ret = bio_integrity_add_page(bio, virt_to_page(dio->integrity_payload),
- payload_len, offset_in_page(dio->integrity_payload));
- if (unlikely(ret != payload_len)) {
+ dio->payload_len, offset_in_page(dio->integrity_payload));
+ if (unlikely(ret != dio->payload_len)) {
bio->bi_status = BLK_STS_RESOURCE;
bio_endio(bio);
return DM_MAPIO_SUBMITTED;
@@ -2522,7 +2568,7 @@ static void dm_integrity_inline_recheck(struct work_struct *w)
}
bip = bio_integrity_alloc(outgoing_bio, GFP_NOIO, 1);
- if (unlikely(IS_ERR(bip))) {
+ if (IS_ERR(bip)) {
bio_put(outgoing_bio);
bio->bi_status = errno_to_blk_status(PTR_ERR(bip));
bio_endio(bio);
@@ -2579,6 +2625,9 @@ static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
if (dio->op == REQ_OP_READ && likely(*status == BLK_STS_OK)) {
unsigned pos = 0;
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+ unlikely(dio->integrity_range_locked))
+ goto skip_check;
while (dio->bio_details.bi_iter.bi_size) {
char digest[HASH_MAX_DIGESTSIZE];
struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
@@ -2598,9 +2647,10 @@ static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status
bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
}
}
- if (likely(dio->op == REQ_OP_READ) || likely(dio->op == REQ_OP_WRITE)) {
- dm_integrity_free_payload(dio);
- }
+skip_check:
+ dm_integrity_free_payload(dio);
+ if (unlikely(dio->integrity_range_locked))
+ remove_range(ic, &dio->range);
}
return DM_ENDIO_DONE;
}
@@ -2608,8 +2658,26 @@ static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status
static void integrity_bio_wait(struct work_struct *w)
{
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
+ struct dm_integrity_c *ic = dio->ic;
- dm_integrity_map_continue(dio, false);
+ if (ic->mode == 'I') {
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ int r = dm_integrity_map_inline(dio, false);
+ switch (r) {
+ case DM_MAPIO_KILL:
+ bio->bi_status = BLK_STS_IOERR;
+ fallthrough;
+ case DM_MAPIO_REMAPPED:
+ submit_bio_noacct(bio);
+ fallthrough;
+ case DM_MAPIO_SUBMITTED:
+ return;
+ default:
+ BUG();
+ }
+ } else {
+ dm_integrity_map_continue(dio, false);
+ }
}
static void pad_uncommitted(struct dm_integrity_c *ic)
@@ -3081,6 +3149,133 @@ free_ret:
kvfree(recalc_tags);
}
+static void integrity_recalc_inline(struct work_struct *w)
+{
+ struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
+ size_t recalc_tags_size;
+ u8 *recalc_buffer = NULL;
+ u8 *recalc_tags = NULL;
+ struct dm_integrity_range range;
+ struct bio *bio;
+ struct bio_integrity_payload *bip;
+ __u8 *t;
+ unsigned int i;
+ int r;
+ unsigned ret;
+ unsigned int super_counter = 0;
+ unsigned recalc_sectors = RECALC_SECTORS;
+
+retry:
+ recalc_buffer = kmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO | __GFP_NOWARN);
+ if (!recalc_buffer) {
+oom:
+ recalc_sectors >>= 1;
+ if (recalc_sectors >= 1U << ic->sb->log2_sectors_per_block)
+ goto retry;
+ DMCRIT("out of memory for recalculate buffer - recalculation disabled");
+ goto free_ret;
+ }
+
+ recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tuple_size;
+ if (crypto_shash_digestsize(ic->internal_hash) > ic->tuple_size)
+ recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tuple_size;
+ recalc_tags = kmalloc(recalc_tags_size, GFP_NOIO | __GFP_NOWARN);
+ if (!recalc_tags) {
+ kfree(recalc_buffer);
+ recalc_buffer = NULL;
+ goto oom;
+ }
+
+ spin_lock_irq(&ic->endio_wait.lock);
+
+next_chunk:
+ if (unlikely(dm_post_suspending(ic->ti)))
+ goto unlock_ret;
+
+ range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
+ if (unlikely(range.logical_sector >= ic->provided_data_sectors))
+ goto unlock_ret;
+ range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector);
+
+ add_new_range_and_wait(ic, &range);
+ spin_unlock_irq(&ic->endio_wait.lock);
+
+ if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
+ recalc_write_super(ic);
+ super_counter = 0;
+ }
+
+ if (unlikely(dm_integrity_failed(ic)))
+ goto err;
+
+ DEBUG_print("recalculating: %llx - %llx\n", range.logical_sector, range.n_sectors);
+
+ bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recalc_bios);
+ bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector;
+ __bio_add_page(bio, virt_to_page(recalc_buffer), range.n_sectors << SECTOR_SHIFT, offset_in_page(recalc_buffer));
+ r = submit_bio_wait(bio);
+ bio_put(bio);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "reading data", r);
+ goto err;
+ }
+
+ t = recalc_tags;
+ for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) {
+ memset(t, 0, ic->tuple_size);
+ integrity_sector_checksum(ic, range.logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t);
+ t += ic->tuple_size;
+ }
+
+ bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_WRITE, GFP_NOIO, &ic->recalc_bios);
+ bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector;
+ __bio_add_page(bio, virt_to_page(recalc_buffer), range.n_sectors << SECTOR_SHIFT, offset_in_page(recalc_buffer));
+
+ bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
+ if (unlikely(IS_ERR(bip))) {
+ bio_put(bio);
+ DMCRIT("out of memory for bio integrity payload - recalculation disabled");
+ goto err;
+ }
+ ret = bio_integrity_add_page(bio, virt_to_page(recalc_tags), t - recalc_tags, offset_in_page(recalc_tags));
+ if (unlikely(ret != t - recalc_tags)) {
+ bio_put(bio);
+ dm_integrity_io_error(ic, "attaching integrity tags", -ENOMEM);
+ goto err;
+ }
+
+ r = submit_bio_wait(bio);
+ bio_put(bio);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "writing data", r);
+ goto err;
+ }
+
+ cond_resched();
+ spin_lock_irq(&ic->endio_wait.lock);
+ remove_range_unlocked(ic, &range);
+#ifdef CONFIG_64BIT
+ /* Paired with smp_load_acquire in dm_integrity_map_inline. */
+ smp_store_release(&ic->sb->recalc_sector, cpu_to_le64(range.logical_sector + range.n_sectors));
+#else
+ ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
+#endif
+ goto next_chunk;
+
+err:
+ remove_range(ic, &range);
+ goto free_ret;
+
+unlock_ret:
+ spin_unlock_irq(&ic->endio_wait.lock);
+
+ recalc_write_super(ic);
+
+free_ret:
+ kfree(recalc_buffer);
+ kfree(recalc_tags);
+}
+
static void bitmap_block_work(struct work_struct *w)
{
struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
@@ -4619,6 +4814,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
r = -ENOMEM;
goto bad;
}
+ r = bioset_init(&ic->recalc_bios, 1, 0, BIOSET_NEED_BVECS);
+ if (r) {
+ ti->error = "Cannot allocate bio set";
+ goto bad;
+ }
+ r = bioset_integrity_create(&ic->recalc_bios, 1);
+ if (r) {
+ ti->error = "Cannot allocate bio integrity set";
+ r = -ENOMEM;
+ goto bad;
+ }
}
ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
@@ -4717,13 +4923,18 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
ti->error = "Block size doesn't match the information in superblock";
goto bad;
}
- if (!le32_to_cpu(ic->sb->journal_sections) != (ic->mode == 'I')) {
- r = -EINVAL;
- if (ic->mode != 'I')
+ if (ic->mode != 'I') {
+ if (!le32_to_cpu(ic->sb->journal_sections)) {
+ r = -EINVAL;
ti->error = "Corrupted superblock, journal_sections is 0";
- else
+ goto bad;
+ }
+ } else {
+ if (le32_to_cpu(ic->sb->journal_sections)) {
+ r = -EINVAL;
ti->error = "Corrupted superblock, journal_sections is not 0";
- goto bad;
+ goto bad;
+ }
}
/* make sure that ti->max_io_len doesn't overflow */
if (!ic->meta_dev) {
@@ -4830,7 +5041,7 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
- INIT_WORK(&ic->recalc_work, integrity_recalc);
+ INIT_WORK(&ic->recalc_work, ic->mode == 'I' ? integrity_recalc_inline : integrity_recalc);
} else {
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
ti->error = "Recalculate can only be specified with internal_hash";
@@ -4847,17 +5058,15 @@ try_smaller_buffer:
goto bad;
}
- if (ic->mode != 'I') {
- ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
- 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0);
- if (IS_ERR(ic->bufio)) {
- r = PTR_ERR(ic->bufio);
- ti->error = "Cannot initialize dm-bufio";
- ic->bufio = NULL;
- goto bad;
- }
- dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
+ ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
+ 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0);
+ if (IS_ERR(ic->bufio)) {
+ r = PTR_ERR(ic->bufio);
+ ti->error = "Cannot initialize dm-bufio";
+ ic->bufio = NULL;
+ goto bad;
}
+ dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
if (ic->mode != 'R' && ic->mode != 'I') {
r = create_journal(ic, &ti->error);
@@ -4979,6 +5188,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
kvfree(ic->bbs);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
+ bioset_exit(&ic->recalc_bios);
bioset_exit(&ic->recheck_bios);
mempool_exit(&ic->recheck_pool);
mempool_exit(&ic->journal_io_mempool);
@@ -5033,7 +5243,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 12, 0},
+ .version = {1, 13, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 63682d27fc8d..1e0d3b9b75d6 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2519,7 +2519,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
rdev->saved_raid_disk = rdev->raid_disk;
}
- /* Reshape support -> restore repective data offsets */
+ /* Reshape support -> restore respective data offsets */
rdev->data_offset = le64_to_cpu(sb->data_offset);
rdev->new_data_offset = le64_to_cpu(sb->new_data_offset);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index f7e9a3632eb3..499f8cc8a39f 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -496,8 +496,10 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
map = dm_get_live_table(md, &srcu_idx);
if (unlikely(!map)) {
+ DMERR_LIMIT("%s: mapping table unavailable, erroring io",
+ dm_device_name(md));
dm_put_live_table(md, srcu_idx);
- return BLK_STS_RESOURCE;
+ return BLK_STS_IOERR;
}
ti = dm_table_find_target(map, 0);
dm_put_live_table(md, srcu_idx);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index a0c1620e90c8..89632ce97760 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2948,7 +2948,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
if (IS_ERR(pmd)) {
*error = "Error creating metadata object";
- return (struct pool *)pmd;
+ return ERR_CAST(pmd);
}
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c
index ab3ea8337809..0d502f6a86ad 100644
--- a/drivers/md/dm-vdo/data-vio.c
+++ b/drivers/md/dm-vdo/data-vio.c
@@ -501,6 +501,7 @@ static void launch_data_vio(struct data_vio *data_vio, logical_block_number_t lb
memset(&data_vio->record_name, 0, sizeof(data_vio->record_name));
memset(&data_vio->duplicate, 0, sizeof(data_vio->duplicate));
+ vdo_reset_completion(&data_vio->decrement_completion);
vdo_reset_completion(completion);
completion->error_handler = handle_data_vio_error;
set_data_vio_logical_callback(data_vio, attempt_logical_block_lock);
@@ -1273,12 +1274,14 @@ static void clean_hash_lock(struct vdo_completion *completion)
static void finish_cleanup(struct data_vio *data_vio)
{
struct vdo_completion *completion = &data_vio->vio.completion;
+ u32 discard_size = min_t(u32, data_vio->remaining_discard,
+ VDO_BLOCK_SIZE - data_vio->offset);
VDO_ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL,
"complete data_vio has no allocation lock");
VDO_ASSERT_LOG_ONLY(data_vio->hash_lock == NULL,
"complete data_vio has no hash lock");
- if ((data_vio->remaining_discard <= VDO_BLOCK_SIZE) ||
+ if ((data_vio->remaining_discard <= discard_size) ||
(completion->result != VDO_SUCCESS)) {
struct data_vio_pool *pool = completion->vdo->data_vio_pool;
@@ -1287,12 +1290,12 @@ static void finish_cleanup(struct data_vio *data_vio)
return;
}
- data_vio->remaining_discard -= min_t(u32, data_vio->remaining_discard,
- VDO_BLOCK_SIZE - data_vio->offset);
+ data_vio->remaining_discard -= discard_size;
data_vio->is_partial = (data_vio->remaining_discard < VDO_BLOCK_SIZE);
data_vio->read = data_vio->is_partial;
data_vio->offset = 0;
completion->requeue = true;
+ data_vio->first_reference_operation_complete = false;
launch_data_vio(data_vio, data_vio->logical.lbn + 1);
}
@@ -1965,7 +1968,8 @@ static void allocate_block(struct vdo_completion *completion)
.state = VDO_MAPPING_STATE_UNCOMPRESSED,
};
- if (data_vio->fua) {
+ if (data_vio->fua ||
+ data_vio->remaining_discard > (u32) (VDO_BLOCK_SIZE - data_vio->offset)) {
prepare_for_dedupe(data_vio);
return;
}
@@ -2042,7 +2046,6 @@ void continue_data_vio_with_block_map_slot(struct vdo_completion *completion)
return;
}
-
/*
* We don't need to write any data, so skip allocation and just update the block map and
* reference counts (via the journal).
@@ -2051,7 +2054,7 @@ void continue_data_vio_with_block_map_slot(struct vdo_completion *completion)
if (data_vio->is_zero)
data_vio->new_mapped.state = VDO_MAPPING_STATE_UNCOMPRESSED;
- if (data_vio->remaining_discard > VDO_BLOCK_SIZE) {
+ if (data_vio->remaining_discard > (u32) (VDO_BLOCK_SIZE - data_vio->offset)) {
/* This is not the final block of a discard so we can't acknowledge it yet. */
update_metadata_for_data_vio_write(data_vio, NULL);
return;
diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c
index 39ac68614419..80628ae93fba 100644
--- a/drivers/md/dm-vdo/dedupe.c
+++ b/drivers/md/dm-vdo/dedupe.c
@@ -729,6 +729,7 @@ static void process_update_result(struct data_vio *agent)
!change_context_state(context, DEDUPE_CONTEXT_COMPLETE, DEDUPE_CONTEXT_IDLE))
return;
+ agent->dedupe_context = NULL;
release_context(context);
}
@@ -1648,6 +1649,7 @@ static void process_query_result(struct data_vio *agent)
if (change_context_state(context, DEDUPE_CONTEXT_COMPLETE, DEDUPE_CONTEXT_IDLE)) {
agent->is_duplicate = decode_uds_advice(context);
+ agent->dedupe_context = NULL;
release_context(context);
}
}
@@ -2321,6 +2323,7 @@ static void timeout_index_operations_callback(struct vdo_completion *completion)
* send its requestor on its way.
*/
list_del_init(&context->list_entry);
+ context->requestor->dedupe_context = NULL;
continue_data_vio(context->requestor);
timed_out++;
}
diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c
index dd05691e4097..0e04c2021682 100644
--- a/drivers/md/dm-vdo/dm-vdo-target.c
+++ b/drivers/md/dm-vdo/dm-vdo-target.c
@@ -1105,6 +1105,9 @@ static int vdo_message(struct dm_target *ti, unsigned int argc, char **argv,
if ((argc == 1) && (strcasecmp(argv[0], "stats") == 0)) {
vdo_write_stats(vdo, result_buffer, maxlen);
result = 1;
+ } else if ((argc == 1) && (strcasecmp(argv[0], "config") == 0)) {
+ vdo_write_config(vdo, &result_buffer, &maxlen);
+ result = 1;
} else {
result = vdo_status_to_errno(process_vdo_message(vdo, argc, argv));
}
@@ -2293,6 +2296,14 @@ static void handle_load_error(struct vdo_completion *completion)
return;
}
+ if ((completion->result == VDO_UNSUPPORTED_VERSION) &&
+ (vdo->admin.phase == LOAD_PHASE_MAKE_DIRTY)) {
+ vdo_log_error("Aborting load due to unsupported version");
+ vdo->admin.phase = LOAD_PHASE_FINISHED;
+ load_callback(completion);
+ return;
+ }
+
vdo_log_error_strerror(completion->result,
"Entering read-only mode due to load error");
vdo->admin.phase = LOAD_PHASE_WAIT_FOR_READ_ONLY;
@@ -2737,6 +2748,19 @@ static int vdo_preresume_registered(struct dm_target *ti, struct vdo *vdo)
vdo_log_info("starting device '%s'", device_name);
result = perform_admin_operation(vdo, LOAD_PHASE_START, load_callback,
handle_load_error, "load");
+ if (result == VDO_UNSUPPORTED_VERSION) {
+ /*
+ * A component version is not supported. This can happen when the
+ * recovery journal metadata is in an old version format. Abort the
+ * load without saving the state.
+ */
+ vdo->suspend_type = VDO_ADMIN_STATE_SUSPENDING;
+ perform_admin_operation(vdo, SUSPEND_PHASE_START,
+ suspend_callback, suspend_callback,
+ "suspend");
+ return result;
+ }
+
if ((result != VDO_SUCCESS) && (result != VDO_READ_ONLY)) {
/*
* Something has gone very wrong. Make sure everything has drained and
@@ -2808,7 +2832,8 @@ static int vdo_preresume(struct dm_target *ti)
vdo_register_thread_device_id(&instance_thread, &vdo->instance);
result = vdo_preresume_registered(ti, vdo);
- if ((result == VDO_PARAMETER_MISMATCH) || (result == VDO_INVALID_ADMIN_STATE))
+ if ((result == VDO_PARAMETER_MISMATCH) || (result == VDO_INVALID_ADMIN_STATE) ||
+ (result == VDO_UNSUPPORTED_VERSION))
result = -EINVAL;
vdo_unregister_thread_device_id();
return vdo_status_to_errno(result);
@@ -2832,7 +2857,7 @@ static void vdo_resume(struct dm_target *ti)
static struct target_type vdo_target_bio = {
.features = DM_TARGET_SINGLETON,
.name = "vdo",
- .version = { 9, 0, 0 },
+ .version = { 9, 1, 0 },
.module = THIS_MODULE,
.ctr = vdo_ctr,
.dtr = vdo_dtr,
diff --git a/drivers/md/dm-vdo/indexer/chapter-index.c b/drivers/md/dm-vdo/indexer/chapter-index.c
index 7e32a25d3f2f..fb1db41c794b 100644
--- a/drivers/md/dm-vdo/indexer/chapter-index.c
+++ b/drivers/md/dm-vdo/indexer/chapter-index.c
@@ -177,7 +177,7 @@ int uds_pack_open_chapter_index_page(struct open_chapter_index *chapter_index,
if (list_number < 0)
return UDS_OVERFLOW;
- next_list = first_list + list_number--,
+ next_list = first_list + list_number--;
result = uds_start_delta_index_search(delta_index, next_list, 0,
&entry);
if (result != UDS_SUCCESS)
diff --git a/drivers/md/dm-vdo/io-submitter.c b/drivers/md/dm-vdo/io-submitter.c
index 9a3716bb3c05..ab62abe18827 100644
--- a/drivers/md/dm-vdo/io-submitter.c
+++ b/drivers/md/dm-vdo/io-submitter.c
@@ -346,7 +346,6 @@ void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
VDO_ASSERT_LOG_ONLY(!code->quiescent, "I/O not allowed in state %s", code->name);
- VDO_ASSERT_LOG_ONLY(vio->bio->bi_next == NULL, "metadata bio has no next bio");
vdo_reset_completion(completion);
completion->error_handler = error_handler;
diff --git a/drivers/md/dm-vdo/message-stats.c b/drivers/md/dm-vdo/message-stats.c
index 2802cf92922b..75dfcd7c5f63 100644
--- a/drivers/md/dm-vdo/message-stats.c
+++ b/drivers/md/dm-vdo/message-stats.c
@@ -4,6 +4,7 @@
*/
#include "dedupe.h"
+#include "indexer.h"
#include "logger.h"
#include "memory-alloc.h"
#include "message-stats.h"
@@ -430,3 +431,50 @@ int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen)
vdo_free(stats);
return VDO_SUCCESS;
}
+
+static void write_index_memory(u32 mem, char **buf, unsigned int *maxlen)
+{
+ char *prefix = "memorySize : ";
+
+ /* Convert index memory to fractional value */
+ if (mem == (u32)UDS_MEMORY_CONFIG_256MB)
+ write_string(prefix, "0.25, ", NULL, buf, maxlen);
+ else if (mem == (u32)UDS_MEMORY_CONFIG_512MB)
+ write_string(prefix, "0.50, ", NULL, buf, maxlen);
+ else if (mem == (u32)UDS_MEMORY_CONFIG_768MB)
+ write_string(prefix, "0.75, ", NULL, buf, maxlen);
+ else
+ write_u32(prefix, mem, ", ", buf, maxlen);
+}
+
+static void write_index_config(struct index_config *config, char **buf,
+ unsigned int *maxlen)
+{
+ write_string("index : ", "{ ", NULL, buf, maxlen);
+ /* index mem size */
+ write_index_memory(config->mem, buf, maxlen);
+ /* whether the index is sparse or not */
+ write_bool("isSparse : ", config->sparse, ", ", buf, maxlen);
+ write_string(NULL, "}", ", ", buf, maxlen);
+}
+
+int vdo_write_config(struct vdo *vdo, char **buf, unsigned int *maxlen)
+{
+ struct vdo_config *config = &vdo->states.vdo.config;
+
+ write_string(NULL, "{ ", NULL, buf, maxlen);
+ /* version */
+ write_u32("version : ", 1, ", ", buf, maxlen);
+ /* physical size */
+ write_block_count_t("physicalSize : ", config->physical_blocks * VDO_BLOCK_SIZE, ", ",
+ buf, maxlen);
+ /* logical size */
+ write_block_count_t("logicalSize : ", config->logical_blocks * VDO_BLOCK_SIZE, ", ",
+ buf, maxlen);
+ /* slab size */
+ write_block_count_t("slabSize : ", config->slab_size, ", ", buf, maxlen);
+ /* index config */
+ write_index_config(&vdo->geometry.index_config, buf, maxlen);
+ write_string(NULL, "}", NULL, buf, maxlen);
+ return VDO_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/message-stats.h b/drivers/md/dm-vdo/message-stats.h
index f7fceca9acab..f9c95eff569d 100644
--- a/drivers/md/dm-vdo/message-stats.h
+++ b/drivers/md/dm-vdo/message-stats.h
@@ -8,6 +8,7 @@
#include "types.h"
+int vdo_write_config(struct vdo *vdo, char **buf, unsigned int *maxlen);
int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen);
#endif /* VDO_MESSAGE_STATS_H */
diff --git a/drivers/md/dm-vdo/repair.c b/drivers/md/dm-vdo/repair.c
index 7e0009d2f67d..ffff2c999518 100644
--- a/drivers/md/dm-vdo/repair.c
+++ b/drivers/md/dm-vdo/repair.c
@@ -1202,17 +1202,14 @@ static bool __must_check is_valid_recovery_journal_block(const struct recovery_j
* @journal: The journal to use.
* @header: The unpacked block header to check.
* @sequence: The expected sequence number.
- * @type: The expected metadata type.
*
* Return: True if the block matches.
*/
static bool __must_check is_exact_recovery_journal_block(const struct recovery_journal *journal,
const struct recovery_block_header *header,
- sequence_number_t sequence,
- enum vdo_metadata_type type)
+ sequence_number_t sequence)
{
- return ((header->metadata_type == type) &&
- (header->sequence_number == sequence) &&
+ return ((header->sequence_number == sequence) &&
(is_valid_recovery_journal_block(journal, header, true)));
}
@@ -1371,7 +1368,8 @@ static void extract_entries_from_block(struct repair_completion *repair,
get_recovery_journal_block_header(journal, repair->journal_data,
sequence);
- if (!is_exact_recovery_journal_block(journal, &header, sequence, format)) {
+ if (!is_exact_recovery_journal_block(journal, &header, sequence) ||
+ (header.metadata_type != format)) {
/* This block is invalid, so skip it. */
return;
}
@@ -1557,10 +1555,13 @@ static int parse_journal_for_recovery(struct repair_completion *repair)
sequence_number_t i, head;
bool found_entries = false;
struct recovery_journal *journal = repair->completion.vdo->recovery_journal;
+ struct recovery_block_header header;
+ enum vdo_metadata_type expected_format;
head = min(repair->block_map_head, repair->slab_journal_head);
+ header = get_recovery_journal_block_header(journal, repair->journal_data, head);
+ expected_format = header.metadata_type;
for (i = head; i <= repair->highest_tail; i++) {
- struct recovery_block_header header;
journal_entry_count_t block_entries;
u8 j;
@@ -1572,19 +1573,15 @@ static int parse_journal_for_recovery(struct repair_completion *repair)
};
header = get_recovery_journal_block_header(journal, repair->journal_data, i);
- if (header.metadata_type == VDO_METADATA_RECOVERY_JOURNAL) {
- /* This is an old format block, so we need to upgrade */
- vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
- "Recovery journal is in the old format, a read-only rebuild is required.");
- vdo_enter_read_only_mode(repair->completion.vdo,
- VDO_UNSUPPORTED_VERSION);
- return VDO_UNSUPPORTED_VERSION;
- }
-
- if (!is_exact_recovery_journal_block(journal, &header, i,
- VDO_METADATA_RECOVERY_JOURNAL_2)) {
+ if (!is_exact_recovery_journal_block(journal, &header, i)) {
/* A bad block header was found so this must be the end of the journal. */
break;
+ } else if (header.metadata_type != expected_format) {
+ /* There is a mix of old and new format blocks, so we need to rebuild. */
+ vdo_log_error_strerror(VDO_CORRUPT_JOURNAL,
+ "Recovery journal is in an invalid format, a read-only rebuild is required.");
+ vdo_enter_read_only_mode(repair->completion.vdo, VDO_CORRUPT_JOURNAL);
+ return VDO_CORRUPT_JOURNAL;
}
block_entries = header.entry_count;
@@ -1620,8 +1617,14 @@ static int parse_journal_for_recovery(struct repair_completion *repair)
break;
}
- if (!found_entries)
+ if (!found_entries) {
return validate_heads(repair);
+ } else if (expected_format == VDO_METADATA_RECOVERY_JOURNAL) {
+ /* All journal blocks have the old format, so we need to upgrade. */
+ vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
+ "Recovery journal is in the old format. Downgrade and complete recovery, then upgrade with a clean volume");
+ return VDO_UNSUPPORTED_VERSION;
+ }
/* Set the tail to the last valid tail block, if there is one. */
if (repair->tail_recovery_point.sector_count == 0)
diff --git a/drivers/md/dm-vdo/status-codes.c b/drivers/md/dm-vdo/status-codes.c
index d3493450b169..dd252d660b6d 100644
--- a/drivers/md/dm-vdo/status-codes.c
+++ b/drivers/md/dm-vdo/status-codes.c
@@ -28,7 +28,7 @@ const struct error_info vdo_status_list[] = {
{ "VDO_LOCK_ERROR", "A lock is held incorrectly" },
{ "VDO_READ_ONLY", "The device is in read-only mode" },
{ "VDO_SHUTTING_DOWN", "The device is shutting down" },
- { "VDO_CORRUPT_JOURNAL", "Recovery journal entries corrupted" },
+ { "VDO_CORRUPT_JOURNAL", "Recovery journal corrupted" },
{ "VDO_TOO_MANY_SLABS", "Exceeds maximum number of slabs supported" },
{ "VDO_INVALID_FRAGMENT", "Compressed block fragment is invalid" },
{ "VDO_RETRY_AFTER_REBUILD", "Retry operation after rebuilding finishes" },
diff --git a/drivers/md/dm-vdo/status-codes.h b/drivers/md/dm-vdo/status-codes.h
index 72da04159f88..426dc8e2ca5d 100644
--- a/drivers/md/dm-vdo/status-codes.h
+++ b/drivers/md/dm-vdo/status-codes.h
@@ -52,7 +52,7 @@ enum vdo_status_codes {
VDO_READ_ONLY,
/* the VDO is shutting down */
VDO_SHUTTING_DOWN,
- /* the recovery journal has corrupt entries */
+ /* the recovery journal has corrupt entries or corrupt metadata */
VDO_CORRUPT_JOURNAL,
/* exceeds maximum number of slabs supported */
VDO_TOO_MANY_SLABS,
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 24ba9a10444c..36e4ddfe2d15 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -273,8 +273,10 @@ out:
if (v->mode == DM_VERITY_MODE_LOGGING)
return 0;
- if (v->mode == DM_VERITY_MODE_RESTART)
- kernel_restart("dm-verity device corrupted");
+ if (v->mode == DM_VERITY_MODE_RESTART) {
+ pr_emerg("dm-verity device corrupted\n");
+ emergency_restart();
+ }
if (v->mode == DM_VERITY_MODE_PANIC)
panic("dm-verity device corrupted");
@@ -597,6 +599,23 @@ static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
if (!static_branch_unlikely(&use_bh_wq_enabled) || !io->in_bh)
verity_fec_finish_io(io);
+ if (unlikely(status != BLK_STS_OK) &&
+ unlikely(!(bio->bi_opf & REQ_RAHEAD)) &&
+ !verity_is_system_shutting_down()) {
+ if (v->mode == DM_VERITY_MODE_RESTART ||
+ v->mode == DM_VERITY_MODE_PANIC)
+ DMERR_LIMIT("%s has error: %s", v->data_dev->name,
+ blk_status_to_str(status));
+
+ if (v->mode == DM_VERITY_MODE_RESTART) {
+ pr_emerg("dm-verity device corrupted\n");
+ emergency_restart();
+ }
+
+ if (v->mode == DM_VERITY_MODE_PANIC)
+ panic("dm-verity device corrupted");
+ }
+
bio_endio(bio);
}
diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c
index d351d7d39c60..a9e2c6c0a33c 100644
--- a/drivers/md/dm-verity-verify-sig.c
+++ b/drivers/md/dm-verity-verify-sig.c
@@ -127,7 +127,7 @@ int verity_verify_root_hash(const void *root_hash, size_t root_hash_len,
#endif
VERIFYING_UNSPECIFIED_SIGNATURE, NULL, NULL);
#ifdef CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG_PLATFORM_KEYRING
- if (ret == -ENOKEY)
+ if (ret == -ENOKEY || ret == -EKEYREJECTED)
ret = verify_pkcs7_signature(root_hash, root_hash_len, sig_data,
sig_len,
VERIFY_USE_PLATFORM_KEYRING,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 87bb90303435..ff4a6b570b76 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2030,10 +2030,15 @@ static void dm_submit_bio(struct bio *bio)
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
+ if (unlikely(!map)) {
+ DMERR_LIMIT("%s: mapping table unavailable, erroring io",
+ dm_device_name(md));
+ bio_io_error(bio);
+ goto out;
+ }
- /* If suspended, or map not yet available, queue this IO for later */
- if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
- unlikely(!map)) {
+ /* If suspended, queue this IO for later */
+ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
else if (bio->bi_opf & REQ_RAHEAD)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index cc466ad5cb1d..8ad782249af8 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -109,7 +109,6 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone);
int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
bool dm_is_zone_write(struct mapped_device *md, struct bio *bio);
-int dm_zone_map_bio(struct dm_target_io *io);
int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t,
sector_t sector, unsigned int nr_zones,
unsigned long *need_reset);
@@ -119,10 +118,6 @@ static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
{
return false;
}
-static inline int dm_zone_map_bio(struct dm_target_io *tio)
-{
- return DM_MAPIO_KILL;
-}
#endif
/*
diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c
index c75a4057f00e..c50299246fc4 100644
--- a/drivers/media/cec/core/cec-api.c
+++ b/drivers/media/cec/core/cec-api.c
@@ -698,5 +698,4 @@ const struct file_operations cec_devnode_fops = {
.compat_ioctl = cec_ioctl,
.release = cec_release,
.poll = cec_poll,
- .llseek = no_llseek,
};
diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c
index 318e267e798e..56444edaf136 100644
--- a/drivers/media/mc/mc-devnode.c
+++ b/drivers/media/mc/mc-devnode.c
@@ -204,7 +204,6 @@ static const struct file_operations media_devnode_fops = {
#endif /* CONFIG_COMPAT */
.release = media_release,
.poll = media_poll,
- .llseek = no_llseek,
};
int __must_check media_devnode_register(struct media_device *mdev,
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index b8dfd530fab7..f042f3f14afa 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -706,7 +706,6 @@ static const struct file_operations lirc_fops = {
.poll = lirc_poll,
.open = lirc_open,
.release = lirc_close,
- .llseek = no_llseek,
};
static void lirc_release_device(struct device *ld)
diff --git a/drivers/media/usb/uvc/uvc_debugfs.c b/drivers/media/usb/uvc/uvc_debugfs.c
index 1a1258d4ffca..14fa41cb8148 100644
--- a/drivers/media/usb/uvc/uvc_debugfs.c
+++ b/drivers/media/usb/uvc/uvc_debugfs.c
@@ -59,7 +59,6 @@ static int uvc_debugfs_stats_release(struct inode *inode, struct file *file)
static const struct file_operations uvc_debugfs_stats_fops = {
.owner = THIS_MODULE,
.open = uvc_debugfs_stats_open,
- .llseek = no_llseek,
.read = uvc_debugfs_stats_read,
.release = uvc_debugfs_stats_release,
};
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 570ba00e00b3..3d7711cc42bc 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -483,7 +483,6 @@ static const struct file_operations v4l2_fops = {
#endif
.release = v4l2_release,
.poll = v4l2_poll,
- .llseek = no_llseek,
};
/**
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 9f3999750c23..77fa55df70d0 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1609,7 +1609,7 @@ mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg)
maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS);
- max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents;
+ max = min(maxEvents, MPTCTL_EVENT_LOG_SIZE);
/* If fewer than 1 event is requested, there must have
* been some type of error.
@@ -2691,7 +2691,6 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
static const struct file_operations mptctl_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.fasync = mptctl_fasync,
.unlocked_ioctl = mptctl_ioctl,
#ifdef CONFIG_COMPAT
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c
index bcc005dff1c0..03633cccd043 100644
--- a/drivers/misc/cxl/of.c
+++ b/drivers/misc/cxl/of.c
@@ -7,65 +7,12 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include "cxl.h"
-
-static const __be32 *read_prop_string(const struct device_node *np,
- const char *prop_name)
-{
- const __be32 *prop;
-
- prop = of_get_property(np, prop_name, NULL);
- if (cxl_verbose && prop)
- pr_info("%s: %s\n", prop_name, (char *) prop);
- return prop;
-}
-
-static const __be32 *read_prop_dword(const struct device_node *np,
- const char *prop_name, u32 *val)
-{
- const __be32 *prop;
-
- prop = of_get_property(np, prop_name, NULL);
- if (prop)
- *val = be32_to_cpu(prop[0]);
- if (cxl_verbose && prop)
- pr_info("%s: %#x (%u)\n", prop_name, *val, *val);
- return prop;
-}
-
-static const __be64 *read_prop64_dword(const struct device_node *np,
- const char *prop_name, u64 *val)
-{
- const __be64 *prop;
-
- prop = of_get_property(np, prop_name, NULL);
- if (prop)
- *val = be64_to_cpu(prop[0]);
- if (cxl_verbose && prop)
- pr_info("%s: %#llx (%llu)\n", prop_name, *val, *val);
- return prop;
-}
-
-
-static int read_handle(struct device_node *np, u64 *handle)
-{
- const __be32 *prop;
- u64 size;
-
- /* Get address and size of the node */
- prop = of_get_address(np, 0, &size, NULL);
- if (size)
- return -EINVAL;
-
- /* Helper to read a big number; size is in cells (not bytes) */
- *handle = of_read_number(prop, of_n_addr_cells(np));
- return 0;
-}
-
static int read_phys_addr(struct device_node *np, char *prop_name,
struct cxl_afu *afu)
{
@@ -100,9 +47,6 @@ static int read_phys_addr(struct device_node *np, char *prop_name,
type, prop_name);
return -EINVAL;
}
- if (cxl_verbose)
- pr_info("%s: %#x %#llx (size %#llx)\n",
- prop_name, type, addr, size);
}
}
return 0;
@@ -130,36 +74,17 @@ static int read_vpd(struct cxl *adapter, struct cxl_afu *afu)
int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np)
{
- if (read_handle(afu_np, &afu->guest->handle))
- return -EINVAL;
- pr_devel("AFU handle: 0x%.16llx\n", afu->guest->handle);
-
- return 0;
+ return of_property_read_reg(afu_np, 0, &afu->guest->handle, NULL);
}
int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np)
{
- int i, len, rc;
- char *p;
- const __be32 *prop;
+ int i, rc;
u16 device_id, vendor_id;
u32 val = 0, class_code;
/* Properties are read in the same order as listed in PAPR */
- if (cxl_verbose) {
- pr_info("Dump of the 'ibm,coherent-platform-function' node properties:\n");
-
- prop = of_get_property(np, "compatible", &len);
- i = 0;
- while (i < len) {
- p = (char *) prop + i;
- pr_info("compatible: %s\n", p);
- i += strlen(p) + 1;
- }
- read_prop_string(np, "name");
- }
-
rc = read_phys_addr(np, "reg", afu);
if (rc)
return rc;
@@ -173,25 +98,15 @@ int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np)
else
afu->psa = true;
- if (cxl_verbose) {
- read_prop_string(np, "ibm,loc-code");
- read_prop_string(np, "device_type");
- }
+ of_property_read_u32(np, "ibm,#processes", &afu->max_procs_virtualised);
- read_prop_dword(np, "ibm,#processes", &afu->max_procs_virtualised);
-
- if (cxl_verbose) {
- read_prop_dword(np, "ibm,scratchpad-size", &val);
- read_prop_dword(np, "ibm,programmable", &val);
- read_prop_string(np, "ibm,phandle");
+ if (cxl_verbose)
read_vpd(NULL, afu);
- }
- read_prop_dword(np, "ibm,max-ints-per-process", &afu->guest->max_ints);
+ of_property_read_u32(np, "ibm,max-ints-per-process", &afu->guest->max_ints);
afu->irqs_max = afu->guest->max_ints;
- prop = read_prop_dword(np, "ibm,min-ints-per-process", &afu->pp_irqs);
- if (prop) {
+ if (!of_property_read_u32(np, "ibm,min-ints-per-process", &afu->pp_irqs)) {
/* One extra interrupt for the PSL interrupt is already
* included. Remove it now to keep only AFU interrupts and
* match the native case.
@@ -199,21 +114,13 @@ int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np)
afu->pp_irqs--;
}
- if (cxl_verbose) {
- read_prop_dword(np, "ibm,max-ints", &val);
- read_prop_dword(np, "ibm,vpd-size", &val);
- }
-
- read_prop64_dword(np, "ibm,error-buffer-size", &afu->eb_len);
+ of_property_read_u64(np, "ibm,error-buffer-size", &afu->eb_len);
afu->eb_offset = 0;
- if (cxl_verbose)
- read_prop_dword(np, "ibm,config-record-type", &val);
-
- read_prop64_dword(np, "ibm,config-record-size", &afu->crs_len);
+ of_property_read_u64(np, "ibm,config-record-size", &afu->crs_len);
afu->crs_offset = 0;
- read_prop_dword(np, "ibm,#config-records", &afu->crs_num);
+ of_property_read_u32(np, "ibm,#config-records", &afu->crs_num);
if (cxl_verbose) {
for (i = 0; i < afu->crs_num; i++) {
@@ -235,35 +142,18 @@ int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np)
i, class_code);
}
}
-
- read_prop_dword(np, "ibm,function-number", &val);
- read_prop_dword(np, "ibm,privileged-function", &val);
- read_prop_dword(np, "vendor-id", &val);
- read_prop_dword(np, "device-id", &val);
- read_prop_dword(np, "revision-id", &val);
- read_prop_dword(np, "class-code", &val);
- read_prop_dword(np, "subsystem-vendor-id", &val);
- read_prop_dword(np, "subsystem-id", &val);
}
/*
* if "ibm,process-mmio" doesn't exist then per-process mmio is
* not supported
*/
val = 0;
- prop = read_prop_dword(np, "ibm,process-mmio", &val);
- if (prop && val == 1)
+ if (!of_property_read_u32(np, "ibm,process-mmio", &val) && val == 1)
afu->pp_psa = true;
else
afu->pp_psa = false;
- if (cxl_verbose) {
- read_prop_dword(np, "ibm,supports-aur", &val);
- read_prop_dword(np, "ibm,supports-csrp", &val);
- read_prop_dword(np, "ibm,supports-prr", &val);
- }
-
- prop = read_prop_dword(np, "ibm,function-error-interrupt", &val);
- if (prop)
+ if (!of_property_read_u32(np, "ibm,function-error-interrupt", &val))
afu->serr_hwirq = val;
pr_devel("AFU handle: %#llx\n", afu->guest->handle);
@@ -334,95 +224,44 @@ err:
int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np)
{
- if (read_handle(np, &adapter->guest->handle))
- return -EINVAL;
- pr_devel("Adapter handle: 0x%.16llx\n", adapter->guest->handle);
-
- return 0;
+ return of_property_read_reg(np, 0, &adapter->guest->handle, NULL);
}
int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np)
{
- int rc, len, naddr, i;
- char *p;
- const __be32 *prop;
+ int rc;
+ const char *p;
u32 val = 0;
/* Properties are read in the same order as listed in PAPR */
- naddr = of_n_addr_cells(np);
-
- if (cxl_verbose) {
- pr_info("Dump of the 'ibm,coherent-platform-facility' node properties:\n");
-
- read_prop_dword(np, "#address-cells", &val);
- read_prop_dword(np, "#size-cells", &val);
-
- prop = of_get_property(np, "compatible", &len);
- i = 0;
- while (i < len) {
- p = (char *) prop + i;
- pr_info("compatible: %s\n", p);
- i += strlen(p) + 1;
- }
- read_prop_string(np, "name");
- read_prop_string(np, "model");
-
- prop = of_get_property(np, "reg", NULL);
- if (prop) {
- pr_info("reg: addr:%#llx size:%#x\n",
- of_read_number(prop, naddr),
- be32_to_cpu(prop[naddr]));
- }
-
- read_prop_string(np, "ibm,loc-code");
- }
-
if ((rc = read_adapter_irq_config(adapter, np)))
return rc;
- if (cxl_verbose) {
- read_prop_string(np, "device_type");
- read_prop_string(np, "ibm,phandle");
- }
-
- prop = read_prop_dword(np, "ibm,caia-version", &val);
- if (prop) {
+ if (!of_property_read_u32(np, "ibm,caia-version", &val)) {
adapter->caia_major = (val & 0xFF00) >> 8;
adapter->caia_minor = val & 0xFF;
}
- prop = read_prop_dword(np, "ibm,psl-revision", &val);
- if (prop)
+ if (!of_property_read_u32(np, "ibm,psl-revision", &val))
adapter->psl_rev = val;
- prop = read_prop_string(np, "status");
- if (prop) {
- adapter->guest->status = kasprintf(GFP_KERNEL, "%s", (char *) prop);
+ if (!of_property_read_string(np, "status", &p)) {
+ adapter->guest->status = kasprintf(GFP_KERNEL, "%s", p);
if (adapter->guest->status == NULL)
return -ENOMEM;
}
- prop = read_prop_dword(np, "vendor-id", &val);
- if (prop)
+ if (!of_property_read_u32(np, "vendor-id", &val))
adapter->guest->vendor = val;
- prop = read_prop_dword(np, "device-id", &val);
- if (prop)
+ if (!of_property_read_u32(np, "device-id", &val))
adapter->guest->device = val;
- if (cxl_verbose) {
- read_prop_dword(np, "ibm,privileged-facility", &val);
- read_prop_dword(np, "revision-id", &val);
- read_prop_dword(np, "class-code", &val);
- }
-
- prop = read_prop_dword(np, "subsystem-vendor-id", &val);
- if (prop)
+ if (!of_property_read_u32(np, "subsystem-vendor-id", &val))
adapter->guest->subsystem_vendor = val;
- prop = read_prop_dword(np, "subsystem-id", &val);
- if (prop)
+ if (!of_property_read_u32(np, "subsystem-id", &val))
adapter->guest->subsystem = val;
if (cxl_verbose)
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 4cf9e7c42a24..3d52f9b92d0d 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -363,17 +363,17 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
{
int rc;
struct device_node *np;
- const __be32 *prop;
+ u32 id;
if (!(np = pnv_pci_get_phb_node(dev)))
return -ENODEV;
- while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
+ while (np && of_property_read_u32(np, "ibm,chip-id", &id))
np = of_get_next_parent(np);
if (!np)
return -ENODEV;
- *chipid = be32_to_cpup(prop);
+ *chipid = id;
rc = get_phb_index(np, phb_index);
if (rc) {
@@ -398,32 +398,26 @@ static DEFINE_MUTEX(indications_mutex);
static int get_phb_indications(struct pci_dev *dev, u64 *capiind, u64 *asnind,
u64 *nbwind)
{
- static u64 nbw, asn, capi = 0;
+ static u32 val[3];
struct device_node *np;
- const __be32 *prop;
mutex_lock(&indications_mutex);
- if (!capi) {
+ if (!val[0]) {
if (!(np = pnv_pci_get_phb_node(dev))) {
mutex_unlock(&indications_mutex);
return -ENODEV;
}
- prop = of_get_property(np, "ibm,phb-indications", NULL);
- if (!prop) {
- nbw = 0x0300UL; /* legacy values */
- asn = 0x0400UL;
- capi = 0x0200UL;
- } else {
- nbw = (u64)be32_to_cpu(prop[2]);
- asn = (u64)be32_to_cpu(prop[1]);
- capi = (u64)be32_to_cpu(prop[0]);
+ if (of_property_read_u32_array(np, "ibm,phb-indications", val, 3)) {
+ val[2] = 0x0300UL; /* legacy values */
+ val[1] = 0x0400UL;
+ val[0] = 0x0200UL;
}
of_node_put(np);
}
- *capiind = capi;
- *asnind = asn;
- *nbwind = nbw;
+ *capiind = val[0];
+ *asnind = val[1];
+ *nbwind = val[2];
mutex_unlock(&indications_mutex);
return 0;
}
@@ -605,7 +599,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
/* Do not fail when CAPP timebase sync is not supported by OPAL */
of_node_get(np);
- if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) {
+ if (!of_property_present(np, "ibm,capp-timebase-sync")) {
of_node_put(np);
dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n");
return;
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 315c43f17dd3..409bd1c39663 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -579,7 +579,7 @@ static void release_afu_config_record(struct kobject *kobj)
kfree(cr);
}
-static struct kobj_type afu_config_record_type = {
+static const struct kobj_type afu_config_record_type = {
.sysfs_ops = &kobj_sysfs_ops,
.release = release_afu_config_record,
.default_groups = afu_cr_groups,
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index da87abe93daf..74181b8c386b 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -27,7 +27,8 @@
#define MDSP_DOMAIN_ID (1)
#define SDSP_DOMAIN_ID (2)
#define CDSP_DOMAIN_ID (3)
-#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
+#define CDSP1_DOMAIN_ID (4)
+#define FASTRPC_DEV_MAX 5 /* adsp, mdsp, slpi, cdsp, cdsp1 */
#define FASTRPC_MAX_SESSIONS 14
#define FASTRPC_MAX_VMIDS 16
#define FASTRPC_ALIGN 128
@@ -106,7 +107,7 @@
#define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
- "sdsp", "cdsp"};
+ "sdsp", "cdsp", "cdsp1" };
struct fastrpc_phy_page {
u64 addr; /* physical address */
u64 size; /* size of contiguous region */
@@ -2269,7 +2270,7 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
return err;
}
- for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
+ for (i = 0; i < FASTRPC_DEV_MAX; i++) {
if (!strcmp(domains[i], domain)) {
domain_id = i;
break;
@@ -2327,13 +2328,14 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
case ADSP_DOMAIN_ID:
case MDSP_DOMAIN_ID:
case SDSP_DOMAIN_ID:
- /* Unsigned PD offloading is only supported on CDSP*/
+ /* Unsigned PD offloading is only supported on CDSP and CDSP1 */
data->unsigned_support = false;
err = fastrpc_device_register(rdev, data, secure_dsp, domains[domain_id]);
if (err)
goto fdev_error;
break;
case CDSP_DOMAIN_ID:
+ case CDSP1_DOMAIN_ID:
data->unsigned_support = true;
/* Create both device nodes so that we can allow both Signed and Unsigned PD */
err = fastrpc_device_register(rdev, data, true, domains[domain_id]);
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 88b91ad8e541..0cf31164b470 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -95,6 +95,7 @@
#include <linux/kallsyms.h>
#include <asm/sections.h>
+#include <asm/rwonce.h>
#define v1printk(a...) do { \
if (verbose) \
@@ -126,7 +127,6 @@ static int final_ack;
static int force_hwbrks;
static int hwbreaks_ok;
static int hw_break_val;
-static int hw_break_val2;
static int cont_instead_of_sstep;
static unsigned long cont_thread_id;
static unsigned long sstep_thread_id;
@@ -284,7 +284,7 @@ static void hw_rem_access_break(char *arg)
static void hw_break_val_access(void)
{
- hw_break_val2 = hw_break_val;
+ READ_ONCE(hw_break_val);
}
static void hw_break_val_write(void)
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 49868a45c0ad..4233dc4cc7d6 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -669,7 +669,6 @@ static int lis3lv02d_misc_fasync(int fd, struct file *file, int on)
static const struct file_operations lis3lv02d_misc_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = lis3lv02d_misc_read,
.open = lis3lv02d_misc_open,
.release = lis3lv02d_misc_release,
@@ -1038,7 +1037,7 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3)
pdata->wakeup_flags |= LIS3_WAKEUP_Z_LO;
if (of_property_read_bool(np, "st,wakeup-z-hi"))
pdata->wakeup_flags |= LIS3_WAKEUP_Z_HI;
- if (of_get_property(np, "st,wakeup-threshold", &val))
+ if (!of_property_read_u32(np, "st,wakeup-threshold", &val))
pdata->wakeup_thresh = val;
if (of_property_read_bool(np, "st,wakeup2-x-lo"))
@@ -1053,7 +1052,7 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3)
pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_LO;
if (of_property_read_bool(np, "st,wakeup2-z-hi"))
pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_HI;
- if (of_get_property(np, "st,wakeup2-threshold", &val))
+ if (!of_property_read_u32(np, "st,wakeup2-threshold", &val))
pdata->wakeup_thresh2 = val;
if (!of_property_read_u32(np, "st,highpass-cutoff-hz", &val)) {
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 40c3fe26f76d..1f5aaf16e300 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -1176,7 +1176,6 @@ static const struct file_operations mei_fops = {
.poll = mei_poll,
.fsync = mei_fsync,
.fasync = mei_fasync,
- .llseek = no_llseek
};
/**
diff --git a/drivers/misc/ntsync.c b/drivers/misc/ntsync.c
index 3c2f743c58b0..4954553b7baa 100644
--- a/drivers/misc/ntsync.c
+++ b/drivers/misc/ntsync.c
@@ -126,7 +126,6 @@ static const struct file_operations ntsync_obj_fops = {
.release = ntsync_obj_release,
.unlocked_ioctl = ntsync_obj_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .llseek = no_llseek,
};
static struct ntsync_obj *ntsync_alloc_obj(struct ntsync_device *dev,
@@ -233,7 +232,6 @@ static const struct file_operations ntsync_fops = {
.release = ntsync_char_release,
.unlocked_ioctl = ntsync_char_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice ntsync_misc = {
diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h
index 10125a22d5a5..d2028d6c6f08 100644
--- a/drivers/misc/ocxl/ocxl_internal.h
+++ b/drivers/misc/ocxl/ocxl_internal.h
@@ -97,8 +97,6 @@ struct ocxl_process_element {
__be32 software_state;
};
-int ocxl_create_cdev(struct ocxl_afu *afu);
-void ocxl_destroy_cdev(struct ocxl_afu *afu);
int ocxl_file_register_afu(struct ocxl_afu *afu);
void ocxl_file_unregister_afu(struct ocxl_afu *afu);
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 30bd7c39c261..701db2c5859b 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -279,7 +279,6 @@ static const struct file_operations phantom_file_ops = {
.unlocked_ioctl = phantom_ioctl,
.compat_ioctl = phantom_compat_ioctl,
.poll = phantom_poll,
- .llseek = no_llseek,
};
static irqreturn_t phantom_isr(int irq, void *data)
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 2ad4387c9837..1a7796ab3fad 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -185,10 +185,10 @@ static ssize_t tsl2550_store_power_state(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev);
struct tsl2550_data *data = i2c_get_clientdata(client);
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
int ret;
- if (val > 1)
+ if (kstrtoul(buf, 10, &val) || val > 1)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -217,10 +217,10 @@ static ssize_t tsl2550_store_operating_mode(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev);
struct tsl2550_data *data = i2c_get_clientdata(client);
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
int ret;
- if (val > 1)
+ if (kstrtoul(buf, 10, &val) || val > 1)
return -EINVAL;
if (data->power_state == 0)
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index f58bea534004..ef06a4d5d65b 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2734,7 +2734,6 @@ static const struct file_operations mmc_rpmb_fileops = {
.release = mmc_rpmb_chrdev_release,
.open = mmc_rpmb_chrdev_open,
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = mmc_rpmb_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mmc_rpmb_ioctl_compat,
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 614257308516..d0aaccf72d78 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -448,6 +448,12 @@ config MTD_NAND_RENESAS
Enables support for the NAND controller found on Renesas R-Car
Gen3 and RZ/N1 SoC families.
+config MTD_NAND_TS72XX
+ tristate "ts72xx NAND controller"
+ depends on ARCH_EP93XX && HAS_IOMEM
+ help
+ Enables support for NAND controller on ts72xx SBCs.
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 25120a4afada..d0b0e6b83568 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_MTD_NAND_MLC_LPC32XX) += lpc32xx_mlc.o
obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
+obj-$(CONFIG_MTD_NAND_TS72XX) += technologic-nand-controller.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
obj-$(CONFIG_MTD_NAND_VF610_NFC) += vf610_nfc.o
diff --git a/drivers/mtd/nand/raw/technologic-nand-controller.c b/drivers/mtd/nand/raw/technologic-nand-controller.c
new file mode 100644
index 000000000000..0e45a6fd91dd
--- /dev/null
+++ b/drivers/mtd/nand/raw/technologic-nand-controller.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Technologic Systems TS72xx NAND controller driver
+ *
+ * Copyright (C) 2023 Nikita Shubin <nikita.shubin@maquefel.me>
+ *
+ * Derived from: plat_nand.c
+ * Author: Vitaly Wool <vitalywool@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/platnand.h>
+
+#define TS72XX_NAND_CONTROL_ADDR_LINE BIT(22) /* 0xN0400000 */
+#define TS72XX_NAND_BUSY_ADDR_LINE BIT(23) /* 0xN0800000 */
+
+#define TS72XX_NAND_ALE BIT(0)
+#define TS72XX_NAND_CLE BIT(1)
+#define TS72XX_NAND_NCE BIT(2)
+
+#define TS72XX_NAND_CTRL_CLE (TS72XX_NAND_NCE | TS72XX_NAND_CLE)
+#define TS72XX_NAND_CTRL_ALE (TS72XX_NAND_NCE | TS72XX_NAND_ALE)
+
+struct ts72xx_nand_data {
+ struct nand_controller controller;
+ struct nand_chip chip;
+ void __iomem *base;
+ void __iomem *ctrl;
+ void __iomem *busy;
+};
+
+static inline struct ts72xx_nand_data *chip_to_ts72xx(struct nand_chip *chip)
+{
+ return container_of(chip, struct ts72xx_nand_data, chip);
+}
+
+static int ts72xx_nand_attach_chip(struct nand_chip *chip)
+{
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ return -EINVAL;
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ fallthrough;
+ default:
+ return 0;
+ }
+}
+
+static void ts72xx_nand_ctrl(struct nand_chip *chip, u8 value)
+{
+ struct ts72xx_nand_data *data = chip_to_ts72xx(chip);
+ unsigned char bits = ioread8(data->ctrl) & ~GENMASK(2, 0);
+
+ iowrite8(bits | value, data->ctrl);
+}
+
+static int ts72xx_nand_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct ts72xx_nand_data *data = chip_to_ts72xx(chip);
+ unsigned int timeout_us;
+ u32 status;
+ int ret;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ts72xx_nand_ctrl(chip, TS72XX_NAND_CTRL_CLE);
+ iowrite8(instr->ctx.cmd.opcode, data->base);
+ ts72xx_nand_ctrl(chip, TS72XX_NAND_NCE);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ ts72xx_nand_ctrl(chip, TS72XX_NAND_CTRL_ALE);
+ iowrite8_rep(data->base, instr->ctx.addr.addrs, instr->ctx.addr.naddrs);
+ ts72xx_nand_ctrl(chip, TS72XX_NAND_NCE);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ ioread8_rep(data->base, instr->ctx.data.buf.in, instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ iowrite8_rep(data->base, instr->ctx.data.buf.in, instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
+ ret = readb_poll_timeout(data->busy, status, status & BIT(5), 0, timeout_us);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+
+ return 0;
+}
+
+static int ts72xx_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ unsigned int i;
+ int ret;
+
+ if (check_only)
+ return 0;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = ts72xx_nand_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops ts72xx_nand_ops = {
+ .attach_chip = ts72xx_nand_attach_chip,
+ .exec_op = ts72xx_nand_exec_op,
+};
+
+static int ts72xx_nand_probe(struct platform_device *pdev)
+{
+ struct ts72xx_nand_data *data;
+ struct fwnode_handle *child;
+ struct mtd_info *mtd;
+ int err;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ nand_controller_init(&data->controller);
+ data->controller.ops = &ts72xx_nand_ops;
+ data->chip.controller = &data->controller;
+
+ data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+ data->ctrl = data->base + TS72XX_NAND_CONTROL_ADDR_LINE;
+ data->busy = data->base + TS72XX_NAND_BUSY_ADDR_LINE;
+
+ child = fwnode_get_next_child_node(dev_fwnode(&pdev->dev), NULL);
+ if (!child)
+ return dev_err_probe(&pdev->dev, -ENXIO,
+ "ts72xx controller node should have exactly one child\n");
+
+ nand_set_flash_node(&data->chip, to_of_node(child));
+ mtd = nand_to_mtd(&data->chip);
+ mtd->dev.parent = &pdev->dev;
+ platform_set_drvdata(pdev, data);
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ data->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(&data->chip, 1);
+ if (err)
+ goto err_handle_put;
+
+ err = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+ if (err)
+ goto err_clean_nand;
+
+ return 0;
+
+err_clean_nand:
+ nand_cleanup(&data->chip);
+err_handle_put:
+ fwnode_handle_put(child);
+ return err;
+}
+
+static void ts72xx_nand_remove(struct platform_device *pdev)
+{
+ struct ts72xx_nand_data *data = platform_get_drvdata(pdev);
+ struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev);
+ struct nand_chip *chip = &data->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ fwnode_handle_put(fwnode);
+}
+
+static const struct of_device_id ts72xx_id_table[] = {
+ { .compatible = "technologic,ts7200-nand" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ts72xx_id_table);
+
+static struct platform_driver ts72xx_nand_driver = {
+ .driver = {
+ .name = "ts72xx-nand",
+ .of_match_table = ts72xx_id_table,
+ },
+ .probe = ts72xx_nand_probe,
+ .remove_new = ts72xx_nand_remove,
+};
+module_platform_driver(ts72xx_nand_driver);
+
+MODULE_AUTHOR("Nikita Shubin <nikita.shubin@maquefel.me>");
+MODULE_DESCRIPTION("Technologic Systems TS72xx NAND controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 0d8f04cf03c5..6bb80d7714bc 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -1095,7 +1095,6 @@ const struct file_operations ubi_vol_cdev_operations = {
/* UBI character device operations */
const struct file_operations ubi_cdev_operations = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = ubi_cdev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
@@ -1105,5 +1104,4 @@ const struct file_operations ubi_ctrl_cdev_operations = {
.owner = THIS_MODULE,
.unlocked_ioctl = ctrl_cdev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .llseek = no_llseek,
};
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 9ec3b8b6a0aa..d2a53961d8e2 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -470,7 +470,6 @@ static const struct file_operations dfs_fops = {
.read = dfs_file_read,
.write = dfs_file_write,
.open = simple_open,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b560644ee1b1..b1bffd8e9a95 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5610,9 +5610,9 @@ bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
break;
default:
- /* Should never happen. Mode guarded by bond_xdp_check() */
- netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
- WARN_ON_ONCE(1);
+ if (net_ratelimit())
+ netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n",
+ BOND_MODE(bond));
return NULL;
}
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 1f495cfd7959..c2007cd86416 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -16,13 +16,12 @@
#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/platform_data/eth-ep93xx.h>
-
#define DRV_MODULE_NAME "ep93xx-eth"
#define RX_QUEUE_ENTRIES 64
@@ -738,25 +737,6 @@ static const struct net_device_ops ep93xx_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
-{
- struct net_device *dev;
-
- dev = alloc_etherdev(sizeof(struct ep93xx_priv));
- if (dev == NULL)
- return NULL;
-
- eth_hw_addr_set(dev, data->dev_addr);
-
- dev->ethtool_ops = &ep93xx_ethtool_ops;
- dev->netdev_ops = &ep93xx_netdev_ops;
-
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
-
- return dev;
-}
-
-
static void ep93xx_eth_remove(struct platform_device *pdev)
{
struct net_device *dev;
@@ -786,27 +766,49 @@ static void ep93xx_eth_remove(struct platform_device *pdev)
static int ep93xx_eth_probe(struct platform_device *pdev)
{
- struct ep93xx_eth_data *data;
struct net_device *dev;
struct ep93xx_priv *ep;
struct resource *mem;
+ void __iomem *base_addr;
+ struct device_node *np;
+ u8 addr[ETH_ALEN];
+ u32 phy_id;
int irq;
int err;
if (pdev == NULL)
return -ENODEV;
- data = dev_get_platdata(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!mem || irq < 0)
return -ENXIO;
- dev = ep93xx_dev_alloc(data);
+ base_addr = ioremap(mem->start, resource_size(mem));
+ if (!base_addr)
+ return dev_err_probe(&pdev->dev, -EIO, "Failed to ioremap ethernet registers\n");
+
+ np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!np)
+ return dev_err_probe(&pdev->dev, -ENODEV, "Please provide \"phy-handle\"\n");
+
+ err = of_property_read_u32(np, "reg", &phy_id);
+ of_node_put(np);
+ if (err)
+ return dev_err_probe(&pdev->dev, -ENOENT, "Failed to locate \"phy_id\"\n");
+
+ dev = alloc_etherdev(sizeof(struct ep93xx_priv));
if (dev == NULL) {
err = -ENOMEM;
goto err_out;
}
+
+ memcpy_fromio(addr, base_addr + 0x50, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
+ dev->ethtool_ops = &ep93xx_ethtool_ops;
+ dev->netdev_ops = &ep93xx_netdev_ops;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+
ep = netdev_priv(dev);
ep->dev = dev;
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -822,15 +824,10 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
goto err_out;
}
- ep->base_addr = ioremap(mem->start, resource_size(mem));
- if (ep->base_addr == NULL) {
- dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
- err = -EIO;
- goto err_out;
- }
+ ep->base_addr = base_addr;
ep->irq = irq;
- ep->mii.phy_id = data->phy_id;
+ ep->mii.phy_id = phy_id;
ep->mii.phy_id_mask = 0x1f;
ep->mii.reg_num_mask = 0x1f;
ep->mii.dev = dev;
@@ -857,12 +854,18 @@ err_out:
return err;
}
+static const struct of_device_id ep93xx_eth_of_ids[] = {
+ { .compatible = "cirrus,ep9301-eth" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ep93xx_eth_of_ids);
static struct platform_driver ep93xx_eth_driver = {
.probe = ep93xx_eth_probe,
.remove_new = ep93xx_eth_remove,
.driver = {
.name = "ep93xx-eth",
+ .of_match_table = ep93xx_eth_of_ids,
},
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 9af8ddb4a78f..a64d96effb9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1887,10 +1887,12 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
if (throttle_op) {
- /* atomic context may not sleep */
- if (callback)
- return -EINVAL;
- down(&dev->cmd.vars.throttle_sem);
+ if (callback) {
+ if (down_trylock(&dev->cmd.vars.throttle_sem))
+ return -EBUSY;
+ } else {
+ down(&dev->cmd.vars.throttle_sem);
+ }
}
pages_queue = is_manage_pages(in);
@@ -2096,10 +2098,19 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
{
struct mlx5_async_work *work = _work;
struct mlx5_async_ctx *ctx;
+ struct mlx5_core_dev *dev;
+ u16 opcode;
ctx = work->ctx;
- status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out);
+ dev = ctx->dev;
+ opcode = work->opcode;
+ status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work);
+ /* Can't access "work" from this point on. It could have been freed in
+ * the callback.
+ */
+ if (mlx5_cmd_is_throttle_opcode(opcode))
+ up(&dev->cmd.vars.throttle_sem);
if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 45ac8befba29..305ec19ccef1 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -579,6 +579,33 @@ struct rtl8169_counters {
__le32 rx_multicast;
__le16 tx_aborted;
__le16 tx_underrun;
+ /* new since RTL8125 */
+ __le64 tx_octets;
+ __le64 rx_octets;
+ __le64 rx_multicast64;
+ __le64 tx_unicast64;
+ __le64 tx_broadcast64;
+ __le64 tx_multicast64;
+ __le32 tx_pause_on;
+ __le32 tx_pause_off;
+ __le32 tx_pause_all;
+ __le32 tx_deferred;
+ __le32 tx_late_collision;
+ __le32 tx_all_collision;
+ __le32 tx_aborted32;
+ __le32 align_errors32;
+ __le32 rx_frame_too_long;
+ __le32 rx_runt;
+ __le32 rx_pause_on;
+ __le32 rx_pause_off;
+ __le32 rx_pause_all;
+ __le32 rx_unknown_opcode;
+ __le32 rx_mac_error;
+ __le32 tx_underrun32;
+ __le32 rx_mac_missed;
+ __le32 rx_tcam_dropped;
+ __le32 tdu;
+ __le32 rdu;
};
struct rtl8169_tc_offsets {
@@ -681,6 +708,7 @@ MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
MODULE_FIRMWARE(FIRMWARE_8126A_2);
+MODULE_FIRMWARE(FIRMWARE_8126A_3);
static inline struct device *tp_to_dev(struct rtl8169_private *tp)
{
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 9893c91af105..a7de5cf6b317 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1052,6 +1052,7 @@ struct ravb_hw_info {
netdev_features_t net_features;
int stats_len;
u32 tccr_mask;
+ u32 tx_max_frame_size;
u32 rx_max_frame_size;
u32 rx_buffer_size;
u32 rx_desc_size;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c7ec23688d56..d2a6518532f3 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -555,8 +555,16 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
static void ravb_emac_init_rcar(struct net_device *ndev)
{
- /* Receive frame limit set register */
- ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ /* Set receive frame length
+ *
+ * The length set here describes the frame from the destination address
+ * up to and including the CRC data. However only the frame data,
+ * excluding the CRC, are transferred to memory. To allow for the
+ * largest frames add the CRC length to the maximum Rx descriptor size.
+ */
+ ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
ravb_write(ndev, ECMR_ZPF | ECMR_DM |
@@ -2674,6 +2682,7 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
@@ -2696,6 +2705,7 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
@@ -2721,6 +2731,7 @@ static const struct ravb_hw_info ravb_gen4_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
@@ -2770,6 +2781,7 @@ static const struct ravb_hw_info gbeth_hw_info = {
.net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
.tccr_mask = TCCR_TSRQ0,
+ .tx_max_frame_size = 1522,
.rx_max_frame_size = SZ_8K,
.rx_buffer_size = SZ_2K,
.rx_desc_size = sizeof(struct ravb_rx_desc),
@@ -2981,7 +2993,7 @@ static int ravb_probe(struct platform_device *pdev)
priv->avb_link_active_low =
of_property_read_bool(np, "renesas,ether-link-active-low");
- ndev->max_mtu = info->rx_max_frame_size -
+ ndev->max_mtu = info->tx_max_frame_size -
(ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index c672f92d65e9..9319a2675e7b 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -847,9 +847,11 @@ static void ether3_remove(struct expansion_card *ec)
{
struct net_device *dev = ecard_get_drvdata(ec);
+ ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2);
ecard_set_drvdata(ec, NULL);
unregister_netdev(dev);
+ del_timer_sync(&priv(dev)->timer);
free_netdev(dev);
ecard_release_resources(ec);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d3895d7eecfc..e2140482270a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2035,7 +2035,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
rx_q->queue_index = queue;
rx_q->priv_data = priv;
- pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.flags = PP_FLAG_DMA_MAP | (xdp_prog ? PP_FLAG_DMA_SYNC_DEV : 0);
pp_params.pool_size = dma_conf->dma_rx_size;
num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
pp_params.order = ilog2(num_pages);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 832998bc020b..75ad2da1a37f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -386,6 +386,7 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
return ret;
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ return 0;
}
/* Final adjustments for HW */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index ea7d7c03f48e..fc35fcb22d94 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -736,15 +736,15 @@ static int axienet_device_reset(struct net_device *ndev)
*
* Would either be called after a successful transmit operation, or after
* there was an error when setting up the chain.
- * Returns the number of descriptors handled.
+ * Returns the number of packets handled.
*/
static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
int nr_bds, bool force, u32 *sizep, int budget)
{
struct axidma_bd *cur_p;
unsigned int status;
+ int i, packets = 0;
dma_addr_t phys;
- int i;
for (i = 0; i < nr_bds; i++) {
cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
@@ -763,8 +763,10 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
- if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
+ if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
napi_consume_skb(cur_p->skb, budget);
+ packets++;
+ }
cur_p->app0 = 0;
cur_p->app1 = 0;
@@ -780,7 +782,13 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
}
- return i;
+ if (!force) {
+ lp->tx_bd_ci += i;
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
+ lp->tx_bd_ci %= lp->tx_bd_num;
+ }
+
+ return packets;
}
/**
@@ -953,13 +961,10 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
u32 size = 0;
int packets;
- packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
+ packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
+ &size, budget);
if (packets) {
- lp->tx_bd_ci += packets;
- if (lp->tx_bd_ci >= lp->tx_bd_num)
- lp->tx_bd_ci %= lp->tx_bd_num;
-
u64_stats_update_begin(&lp->tx_stat_sync);
u64_stats_add(&lp->tx_packets, packets);
u64_stats_add(&lp->tx_bytes, size);
@@ -1282,9 +1287,10 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
u32 cr = lp->tx_dma_cr;
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- napi_schedule(&lp->napi_tx);
+ if (napi_schedule_prep(&lp->napi_tx)) {
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ __napi_schedule(&lp->napi_tx);
+ }
}
return IRQ_HANDLED;
@@ -1326,9 +1332,10 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
u32 cr = lp->rx_dma_cr;
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- napi_schedule(&lp->napi_rx);
+ if (napi_schedule_prep(&lp->napi_rx)) {
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ __napi_schedule(&lp->napi_rx);
+ }
}
return IRQ_HANDLED;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 6ed38a3cdd73..3bf6785f9057 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -37,8 +37,6 @@
#include <linux/semaphore.h>
#include <linux/refcount.h>
-#define SIXPACK_VERSION "Revision: 0.3.0"
-
/* sixpack priority commands */
#define SIXP_SEOF 0x40 /* start and end of a 6pack frame */
#define SIXP_TX_URUN 0x48 /* transmit overrun */
@@ -88,22 +86,18 @@ struct sixpack {
struct net_device *dev; /* easy for intr handling */
/* These are pointers to the malloc()ed frame buffers. */
- unsigned char *rbuff; /* receiver buffer */
int rcount; /* received chars counter */
unsigned char *xbuff; /* transmitter buffer */
unsigned char *xhead; /* next byte to XMIT */
int xleft; /* bytes left in XMIT queue */
- unsigned char raw_buf[4];
- unsigned char cooked_buf[400];
+ u8 raw_buf[4];
+ u8 cooked_buf[400];
unsigned int rx_count;
unsigned int rx_count_cooked;
spinlock_t rxlock;
- int mtu; /* Our mtu (to spot changes!) */
- int buffsize; /* Max buffers sizes */
-
unsigned long flags; /* Flag values/ mode etc */
unsigned char mode; /* 6pack mode */
@@ -113,8 +107,8 @@ struct sixpack {
unsigned char slottime;
unsigned char duplex;
unsigned char led_state;
- unsigned char status;
- unsigned char status1;
+ u8 status;
+ u8 status1;
unsigned char status2;
unsigned char tx_enable;
unsigned char tnc_state;
@@ -128,7 +122,7 @@ struct sixpack {
#define AX25_6PACK_HEADER_LEN 0
-static void sixpack_decode(struct sixpack *, const unsigned char[], int);
+static void sixpack_decode(struct sixpack *, const u8 *, size_t);
static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
/*
@@ -167,7 +161,7 @@ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len)
unsigned char *msg, *p = icp;
int actual, count;
- if (len > sp->mtu) { /* sp->mtu = AX25_MTU = max. PACLEN = 256 */
+ if (len > AX25_MTU + 73) {
msg = "oversized transmit packet!";
goto out_drop;
}
@@ -333,7 +327,7 @@ static void sp_bump(struct sixpack *sp, char cmd)
{
struct sk_buff *skb;
int count;
- unsigned char *ptr;
+ u8 *ptr;
count = sp->rcount + 1;
@@ -431,7 +425,7 @@ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp,
const u8 *fp, size_t count)
{
struct sixpack *sp;
- int count1;
+ size_t count1;
if (!count)
return;
@@ -544,7 +538,7 @@ static inline int tnc_init(struct sixpack *sp)
*/
static int sixpack_open(struct tty_struct *tty)
{
- char *rbuff = NULL, *xbuff = NULL;
+ char *xbuff = NULL;
struct net_device *dev;
struct sixpack *sp;
unsigned long len;
@@ -574,10 +568,8 @@ static int sixpack_open(struct tty_struct *tty)
len = dev->mtu * 2;
- rbuff = kmalloc(len + 4, GFP_KERNEL);
xbuff = kmalloc(len + 4, GFP_KERNEL);
-
- if (rbuff == NULL || xbuff == NULL) {
+ if (xbuff == NULL) {
err = -ENOBUFS;
goto out_free;
}
@@ -586,11 +578,8 @@ static int sixpack_open(struct tty_struct *tty)
sp->tty = tty;
- sp->rbuff = rbuff;
sp->xbuff = xbuff;
- sp->mtu = AX25_MTU + 73;
- sp->buffsize = len;
sp->rcount = 0;
sp->rx_count = 0;
sp->rx_count_cooked = 0;
@@ -631,7 +620,6 @@ static int sixpack_open(struct tty_struct *tty)
out_free:
kfree(xbuff);
- kfree(rbuff);
free_netdev(dev);
@@ -676,7 +664,6 @@ static void sixpack_close(struct tty_struct *tty)
del_timer_sync(&sp->resync_t);
/* Free all 6pack frame buffers after unreg. */
- kfree(sp->rbuff);
kfree(sp->xbuff);
free_netdev(sp->dev);
@@ -756,21 +743,14 @@ static struct tty_ldisc_ops sp_ldisc = {
/* Initialize 6pack control device -- register 6pack line discipline */
-static const char msg_banner[] __initconst = KERN_INFO \
- "AX.25: 6pack driver, " SIXPACK_VERSION "\n";
-static const char msg_regfail[] __initconst = KERN_ERR \
- "6pack: can't register line discipline (err = %d)\n";
-
static int __init sixpack_init_driver(void)
{
int status;
- printk(msg_banner);
-
/* Register the provided line protocol discipline */
status = tty_register_ldisc(&sp_ldisc);
if (status)
- printk(msg_regfail, status);
+ pr_err("6pack: can't register line discipline (err = %d)\n", status);
return status;
}
@@ -820,9 +800,9 @@ static int encode_sixpack(unsigned char *tx_buf, unsigned char *tx_buf_raw,
/* decode 4 sixpack-encoded bytes into 3 data bytes */
-static void decode_data(struct sixpack *sp, unsigned char inbyte)
+static void decode_data(struct sixpack *sp, u8 inbyte)
{
- unsigned char *buf;
+ u8 *buf;
if (sp->rx_count != 3) {
sp->raw_buf[sp->rx_count++] = inbyte;
@@ -848,9 +828,9 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
/* identify and execute a 6pack priority command byte */
-static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
+static void decode_prio_command(struct sixpack *sp, u8 cmd)
{
- int actual;
+ ssize_t actual;
if ((cmd & SIXP_PRIO_DATA_MASK) != 0) { /* idle ? */
@@ -898,9 +878,9 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
/* identify and execute a standard 6pack command byte */
-static void decode_std_command(struct sixpack *sp, unsigned char cmd)
+static void decode_std_command(struct sixpack *sp, u8 cmd)
{
- unsigned char checksum = 0, rest = 0;
+ u8 checksum = 0, rest = 0;
short i;
switch (cmd & SIXP_CMD_MASK) { /* normal command */
@@ -948,10 +928,10 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
/* decode a 6pack packet */
static void
-sixpack_decode(struct sixpack *sp, const unsigned char *pre_rbuff, int count)
+sixpack_decode(struct sixpack *sp, const u8 *pre_rbuff, size_t count)
{
- unsigned char inbyte;
- int count1;
+ size_t count1;
+ u8 inbyte;
for (count1 = 0; count1 < count; count1++) {
inbyte = pre_rbuff[count1];
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
index f39bbe255497..e63720ec3238 100644
--- a/drivers/net/mctp/mctp-serial.c
+++ b/drivers/net/mctp/mctp-serial.c
@@ -64,18 +64,18 @@ struct mctp_serial {
u16 txfcs, rxfcs, rxfcs_rcvd;
unsigned int txlen, rxlen;
unsigned int txpos, rxpos;
- unsigned char txbuf[BUFSIZE],
+ u8 txbuf[BUFSIZE],
rxbuf[BUFSIZE];
};
-static bool needs_escape(unsigned char c)
+static bool needs_escape(u8 c)
{
return c == BYTE_ESC || c == BYTE_FRAME;
}
-static int next_chunk_len(struct mctp_serial *dev)
+static unsigned int next_chunk_len(struct mctp_serial *dev)
{
- int i;
+ unsigned int i;
/* either we have no bytes to send ... */
if (dev->txpos == dev->txlen)
@@ -99,7 +99,7 @@ static int next_chunk_len(struct mctp_serial *dev)
return i;
}
-static int write_chunk(struct mctp_serial *dev, unsigned char *buf, int len)
+static ssize_t write_chunk(struct mctp_serial *dev, u8 *buf, size_t len)
{
return dev->tty->ops->write(dev->tty, buf, len);
}
@@ -108,9 +108,10 @@ static void mctp_serial_tx_work(struct work_struct *work)
{
struct mctp_serial *dev = container_of(work, struct mctp_serial,
tx_work);
- unsigned char c, buf[3];
unsigned long flags;
- int len, txlen;
+ ssize_t txlen;
+ unsigned int len;
+ u8 c, buf[3];
spin_lock_irqsave(&dev->lock, flags);
@@ -293,7 +294,7 @@ static void mctp_serial_rx(struct mctp_serial *dev)
dev->netdev->stats.rx_bytes += dev->rxlen;
}
-static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c)
+static void mctp_serial_push_header(struct mctp_serial *dev, u8 c)
{
switch (dev->rxpos) {
case 0:
@@ -323,7 +324,7 @@ static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c)
}
}
-static void mctp_serial_push_trailer(struct mctp_serial *dev, unsigned char c)
+static void mctp_serial_push_trailer(struct mctp_serial *dev, u8 c)
{
switch (dev->rxpos) {
case 0:
@@ -347,7 +348,7 @@ static void mctp_serial_push_trailer(struct mctp_serial *dev, unsigned char c)
}
}
-static void mctp_serial_push(struct mctp_serial *dev, unsigned char c)
+static void mctp_serial_push(struct mctp_serial *dev, u8 c)
{
switch (dev->rxstate) {
case STATE_IDLE:
@@ -394,7 +395,7 @@ static void mctp_serial_tty_receive_buf(struct tty_struct *tty, const u8 *c,
const u8 *f, size_t len)
{
struct mctp_serial *dev = tty->disc_data;
- int i;
+ size_t i;
if (!netif_running(dev->netdev))
return;
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index a1f91ff8ec56..41e80f78b316 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -1414,7 +1414,6 @@ out:
static const struct file_operations nsim_nexthop_bucket_activity_fops = {
.open = simple_open,
.write = nsim_nexthop_bucket_activity_write,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/net/phy/aquantia/aquantia_firmware.c b/drivers/net/phy/aquantia/aquantia_firmware.c
index 524627a36c6f..dac6464b5fe2 100644
--- a/drivers/net/phy/aquantia/aquantia_firmware.c
+++ b/drivers/net/phy/aquantia/aquantia_firmware.c
@@ -353,26 +353,32 @@ int aqr_firmware_load(struct phy_device *phydev)
{
int ret;
- ret = aqr_wait_reset_complete(phydev);
- if (ret)
- return ret;
-
- /* Check if the firmware is not already loaded by pooling
- * the current version returned by the PHY. If 0 is returned,
- * no firmware is loaded.
+ /* Check if the firmware is not already loaded by polling
+ * the current version returned by the PHY.
*/
- ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
- if (ret > 0)
- goto exit;
-
- ret = aqr_firmware_load_nvmem(phydev);
- if (!ret)
- goto exit;
-
- ret = aqr_firmware_load_fs(phydev);
- if (ret)
+ ret = aqr_wait_reset_complete(phydev);
+ switch (ret) {
+ case 0:
+ /* Some firmware is loaded => do nothing */
+ return 0;
+ case -ETIMEDOUT:
+ /* VEND1_GLOBAL_FW_ID still reads 0 after 2 seconds of polling.
+ * We don't have full confidence that no firmware is loaded (in
+ * theory it might just not have loaded yet), but we will
+ * assume that, and load a new image.
+ */
+ ret = aqr_firmware_load_nvmem(phydev);
+ if (!ret)
+ return ret;
+
+ ret = aqr_firmware_load_fs(phydev);
+ if (ret)
+ return ret;
+ break;
+ default:
+ /* PHY read error, propagate it to the caller */
return ret;
+ }
-exit:
return 0;
}
diff --git a/drivers/net/phy/aquantia/aquantia_leds.c b/drivers/net/phy/aquantia/aquantia_leds.c
index 0516ac02c3f8..201c8df93fad 100644
--- a/drivers/net/phy/aquantia/aquantia_leds.c
+++ b/drivers/net/phy/aquantia/aquantia_leds.c
@@ -120,7 +120,8 @@ int aqr_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
int aqr_phy_led_active_low_set(struct phy_device *phydev, int index, bool enable)
{
return phy_modify_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_DRIVE(index),
- VEND1_GLOBAL_LED_DRIVE_VDD, enable);
+ VEND1_GLOBAL_LED_DRIVE_VDD,
+ enable ? VEND1_GLOBAL_LED_DRIVE_VDD : 0);
}
int aqr_phy_led_polarity_set(struct phy_device *phydev, int index, unsigned long modes)
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index e982e9ce44a5..4d156d406bab 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -435,6 +435,9 @@ static int aqr107_set_tunable(struct phy_device *phydev,
}
}
+#define AQR_FW_WAIT_SLEEP_US 20000
+#define AQR_FW_WAIT_TIMEOUT_US 2000000
+
/* If we configure settings whilst firmware is still initializing the chip,
* then these settings may be overwritten. Therefore make sure chip
* initialization has completed. Use presence of the firmware ID as
@@ -444,11 +447,19 @@ static int aqr107_set_tunable(struct phy_device *phydev,
*/
int aqr_wait_reset_complete(struct phy_device *phydev)
{
- int val;
+ int ret, val;
+
+ ret = read_poll_timeout(phy_read_mmd, val, val != 0,
+ AQR_FW_WAIT_SLEEP_US, AQR_FW_WAIT_TIMEOUT_US,
+ false, phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_FW_ID);
+ if (val < 0) {
+ phydev_err(phydev, "Failed to read VEND1_GLOBAL_FW_ID: %pe\n",
+ ERR_PTR(val));
+ return val;
+ }
- return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_FW_ID, val, val != 0,
- 20000, 2000000, false);
+ return ret;
}
static void aqr107_chip_info(struct phy_device *phydev)
@@ -478,7 +489,7 @@ static int aqr107_config_init(struct phy_device *phydev)
{
struct aqr107_priv *priv = phydev->priv;
u32 led_active_low;
- int ret, index = 0;
+ int ret;
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
@@ -505,10 +516,9 @@ static int aqr107_config_init(struct phy_device *phydev)
/* Restore LED polarity state after reset */
for_each_set_bit(led_active_low, &priv->leds_active_low, AQR_MAX_LEDS) {
- ret = aqr_phy_led_active_low_set(phydev, index, led_active_low);
+ ret = aqr_phy_led_active_low_set(phydev, led_active_low, true);
if (ret)
return ret;
- index++;
}
return 0;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 77574f7a3bd4..5aa41d5f7765 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -1162,7 +1162,6 @@ static const struct file_operations tap_fops = {
.read_iter = tap_read_iter,
.write_iter = tap_write_iter,
.poll = tap_poll,
- .llseek = no_llseek,
.unlocked_ioctl = tap_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 5f77faef0ff1..9a0f6eb32016 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -3543,7 +3543,6 @@ static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
static const struct file_operations tun_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read_iter = tun_chr_read_iter,
.write_iter = tun_chr_write_iter,
.poll = tun_chr_poll,
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 18eb5ba436df..2506aa8c603e 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -464,10 +464,15 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
void usbnet_defer_kevent (struct usbnet *dev, int work)
{
set_bit (work, &dev->flags);
- if (!schedule_work (&dev->kevent))
- netdev_dbg(dev->net, "kevent %s may have been dropped\n", usbnet_event_names[work]);
- else
- netdev_dbg(dev->net, "kevent %s scheduled\n", usbnet_event_names[work]);
+ if (!usbnet_going_away(dev)) {
+ if (!schedule_work(&dev->kevent))
+ netdev_dbg(dev->net,
+ "kevent %s may have been dropped\n",
+ usbnet_event_names[work]);
+ else
+ netdev_dbg(dev->net,
+ "kevent %s scheduled\n", usbnet_event_names[work]);
+ }
}
EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
@@ -535,7 +540,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
tasklet_schedule (&dev->bh);
break;
case 0:
- __usbnet_queue_skb(&dev->rxq, skb, rx_start);
+ if (!usbnet_going_away(dev))
+ __usbnet_queue_skb(&dev->rxq, skb, rx_start);
}
} else {
netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
@@ -843,9 +849,18 @@ int usbnet_stop (struct net_device *net)
/* deferred work (timer, softirq, task) must also stop */
dev->flags = 0;
- del_timer_sync (&dev->delay);
- tasklet_kill (&dev->bh);
+ del_timer_sync(&dev->delay);
+ tasklet_kill(&dev->bh);
cancel_work_sync(&dev->kevent);
+
+ /* We have cyclic dependencies. Those calls are needed
+ * to break a cycle. We cannot fall into the gaps because
+ * we have a flag
+ */
+ tasklet_kill(&dev->bh);
+ del_timer_sync(&dev->delay);
+ cancel_work_sync(&dev->kevent);
+
if (!pm)
usb_autopm_put_interface(dev->intf);
@@ -1171,7 +1186,8 @@ fail_halt:
status);
} else {
clear_bit (EVENT_RX_HALT, &dev->flags);
- tasklet_schedule (&dev->bh);
+ if (!usbnet_going_away(dev))
+ tasklet_schedule(&dev->bh);
}
}
@@ -1196,7 +1212,8 @@ fail_halt:
usb_autopm_put_interface(dev->intf);
fail_lowmem:
if (resched)
- tasklet_schedule (&dev->bh);
+ if (!usbnet_going_away(dev))
+ tasklet_schedule(&dev->bh);
}
}
@@ -1559,6 +1576,7 @@ static void usbnet_bh (struct timer_list *t)
} else if (netif_running (dev->net) &&
netif_device_present (dev->net) &&
netif_carrier_ok(dev->net) &&
+ !usbnet_going_away(dev) &&
!timer_pending(&dev->delay) &&
!test_bit(EVENT_RX_PAUSED, &dev->flags) &&
!test_bit(EVENT_RX_HALT, &dev->flags)) {
@@ -1606,6 +1624,7 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (!dev)
return;
+ usbnet_mark_going_away(dev);
xdev = interface_to_usbdev (intf);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6f4781ec2b36..f8131f92a392 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1807,6 +1807,11 @@ static struct sk_buff *receive_small(struct net_device *dev,
struct page *page = virt_to_head_page(buf);
struct sk_buff *skb;
+ /* We passed the address of virtnet header to virtio-core,
+ * so truncate the padding.
+ */
+ buf -= VIRTNET_RX_PAD + xdp_headroom;
+
len -= vi->hdr_len;
u64_stats_add(&stats->bytes, len);
@@ -2422,8 +2427,9 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
if (unlikely(!buf))
return -ENOMEM;
- virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
- vi->hdr_len + GOOD_PACKET_LEN);
+ buf += VIRTNET_RX_PAD + xdp_headroom;
+
+ virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index df53dd1d7e74..da72fd2d541f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1184,7 +1184,6 @@ static ssize_t bus_reset_write(struct file *file, const char __user *user_buf,
static const struct file_operations bus_reset_fops = {
.open = simple_open,
- .llseek = no_llseek,
.write = bus_reset_write,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 99a541d442bb..49a6aff42376 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -3768,7 +3768,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
}
const struct file_operations iwl_dbgfs_d3_test_ops = {
- .llseek = no_llseek,
.open = iwl_mvm_d3_test_open,
.read = iwl_mvm_d3_test_read,
.release = iwl_mvm_d3_test_release,
diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
index 35c8fbbba10e..f55d60922b87 100644
--- a/drivers/nvdimm/nd_virtio.c
+++ b/drivers/nvdimm/nd_virtio.c
@@ -44,6 +44,15 @@ static int virtio_pmem_flush(struct nd_region *nd_region)
unsigned long flags;
int err, err1;
+ /*
+ * Don't bother to submit the request to the device if the device is
+ * not activated.
+ */
+ if (vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_NEEDS_RESET) {
+ dev_info(&vdev->dev, "virtio pmem device needs a reset\n");
+ return -EIO;
+ }
+
might_sleep();
req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
if (!req_data)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ca9959a8fb9e..ba6508455e18 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2468,11 +2468,6 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
if (ret)
return ret;
- /* Flush write to device (required if transport is PCI) */
- ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config);
- if (ret)
- return ret;
-
/* CAP value may change after initial CC write */
ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
if (ret)
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 1d769c842fbf..b9b79ccfabf8 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -3,7 +3,6 @@
* Copyright (c) 2011-2014, Intel Corporation.
* Copyright (c) 2017-2021 Christoph Hellwig.
*/
-#include <linux/bio-integrity.h>
#include <linux/blk-integrity.h>
#include <linux/ptrace.h> /* for force_successful_syscall_return */
#include <linux/nvme_ioctl.h>
@@ -153,11 +152,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
bio_set_dev(bio, bdev);
if (has_metadata) {
- ret = bio_integrity_map_user(bio, meta_buffer, meta_len,
- meta_seed);
+ ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len,
+ meta_seed);
if (ret)
goto out_unmap;
- req->cmd_flags |= REQ_INTEGRITY;
}
return ret;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 518e22dd4f9b..48e7a8906d01 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -421,6 +421,9 @@ static bool nvme_available_path(struct nvme_ns_head *head)
{
struct nvme_ns *ns;
+ if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
+ return NULL;
+
list_for_each_entry_rcu(ns, &head->list, siblings) {
if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
continue;
@@ -648,7 +651,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
rc = device_add_disk(&head->subsys->dev, head->disk,
nvme_ns_attr_groups);
if (rc) {
- clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
+ clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags);
return;
}
nvme_add_ns_head_cdev(head);
@@ -969,11 +972,16 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
- kblockd_schedule_work(&head->requeue_work);
- if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
+ if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
nvme_cdev_del(&head->cdev, &head->cdev_device);
del_gendisk(head->disk);
}
+ /*
+ * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared
+ * to allow multipath to fail all I/O.
+ */
+ synchronize_srcu(&head->srcu);
+ kblockd_schedule_work(&head->requeue_work);
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 15b5e06039a5..c8fd0e8f0237 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1496,7 +1496,7 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
req->metadata_sgl->sg_table.sgl =
(struct scatterlist *)(req->metadata_sgl + 1);
ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table,
- blk_rq_count_integrity_sg(rq->q, rq->bio),
+ rq->nr_integrity_segments,
req->metadata_sgl->sg_table.sgl,
NVME_INLINE_METADATA_SG_CNT);
if (unlikely(ret)) {
@@ -1504,8 +1504,8 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
goto out_unmap_sg;
}
- req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
- rq->bio, req->metadata_sgl->sg_table.sgl);
+ req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq,
+ req->metadata_sgl->sg_table.sgl);
*pi_count = ib_dma_map_sg(ibdev,
req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents,
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index eb345551d6fe..b68a9e5f1ea3 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -767,6 +767,7 @@ static struct attribute *nvme_tls_attrs[] = {
&dev_attr_tls_key.attr,
&dev_attr_tls_configured_key.attr,
&dev_attr_tls_keyring.attr,
+ NULL,
};
static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 283134498fbc..d2c384f58028 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -363,8 +363,7 @@ config NVMEM_SUNXI_SID
config NVMEM_U_BOOT_ENV
tristate "U-Boot environment variables support"
depends on OF && MTD
- select CRC32
- select GENERIC_NET_UTILS
+ select NVMEM_LAYOUT_U_BOOT_ENV
help
U-Boot stores its setup as environment variables. This driver adds
support for verifying & exporting such data. It also exposes variables
diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c
index cf920542f939..1ba494497698 100644
--- a/drivers/nvmem/imx-ocotp-ele.c
+++ b/drivers/nvmem/imx-ocotp-ele.c
@@ -14,8 +14,9 @@
#include <linux/slab.h>
enum fuse_type {
- FUSE_FSB = 1,
- FUSE_ELE = 2,
+ FUSE_FSB = BIT(0),
+ FUSE_ELE = BIT(1),
+ FUSE_ECC = BIT(2),
FUSE_INVALID = -1
};
@@ -93,7 +94,10 @@ static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, siz
continue;
}
- *buf++ = readl_relaxed(reg + (i << 2));
+ if (type & FUSE_ECC)
+ *buf++ = readl_relaxed(reg + (i << 2)) & GENMASK(15, 0);
+ else
+ *buf++ = readl_relaxed(reg + (i << 2));
}
memcpy(val, (u8 *)p, bytes);
@@ -155,8 +159,30 @@ static const struct ocotp_devtype_data imx93_ocotp_data = {
},
};
+static const struct ocotp_devtype_data imx95_ocotp_data = {
+ .reg_off = 0x8000,
+ .reg_read = imx_ocotp_reg_read,
+ .size = 2048,
+ .num_entry = 12,
+ .entry = {
+ { 0, 1, FUSE_FSB | FUSE_ECC },
+ { 7, 1, FUSE_FSB | FUSE_ECC },
+ { 9, 3, FUSE_FSB | FUSE_ECC },
+ { 12, 24, FUSE_FSB },
+ { 36, 2, FUSE_FSB | FUSE_ECC },
+ { 38, 14, FUSE_FSB },
+ { 63, 1, FUSE_ELE },
+ { 128, 16, FUSE_ELE },
+ { 188, 1, FUSE_ELE },
+ { 317, 2, FUSE_FSB | FUSE_ECC },
+ { 320, 7, FUSE_FSB },
+ { 328, 184, FUSE_FSB }
+ },
+};
+
static const struct of_device_id imx_ele_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx93-ocotp", .data = &imx93_ocotp_data, },
+ { .compatible = "fsl,imx95-ocotp", .data = &imx95_ocotp_data, },
{},
};
MODULE_DEVICE_TABLE(of, imx_ele_ocotp_dt_ids);
diff --git a/drivers/nvmem/layouts/Kconfig b/drivers/nvmem/layouts/Kconfig
index 9c6e672fc350..5e586dfebe47 100644
--- a/drivers/nvmem/layouts/Kconfig
+++ b/drivers/nvmem/layouts/Kconfig
@@ -26,6 +26,17 @@ config NVMEM_LAYOUT_ONIE_TLV
If unsure, say N.
+config NVMEM_LAYOUT_U_BOOT_ENV
+ tristate "U-Boot environment variables layout"
+ select CRC32
+ select GENERIC_NET_UTILS
+ help
+ U-Boot stores its setup as environment variables. This driver adds
+ support for verifying & exporting such data. It also exposes variables
+ as NVMEM cells so they can be referenced by other drivers.
+
+ If unsure, say N.
+
endmenu
endif
diff --git a/drivers/nvmem/layouts/Makefile b/drivers/nvmem/layouts/Makefile
index 2974bd7d33ed..4940c9db0665 100644
--- a/drivers/nvmem/layouts/Makefile
+++ b/drivers/nvmem/layouts/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_NVMEM_LAYOUT_SL28_VPD) += sl28vpd.o
obj-$(CONFIG_NVMEM_LAYOUT_ONIE_TLV) += onie-tlv.o
+obj-$(CONFIG_NVMEM_LAYOUT_U_BOOT_ENV) += u-boot-env.o
diff --git a/drivers/nvmem/layouts/u-boot-env.c b/drivers/nvmem/layouts/u-boot-env.c
new file mode 100644
index 000000000000..731e6f4f12b2
--- /dev/null
+++ b/drivers/nvmem/layouts/u-boot-env.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 - 2023 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/crc32.h>
+#include <linux/etherdevice.h>
+#include <linux/export.h>
+#include <linux/if_ether.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "u-boot-env.h"
+
+struct u_boot_env_image_single {
+ __le32 crc32;
+ uint8_t data[];
+} __packed;
+
+struct u_boot_env_image_redundant {
+ __le32 crc32;
+ u8 mark;
+ uint8_t data[];
+} __packed;
+
+struct u_boot_env_image_broadcom {
+ __le32 magic;
+ __le32 len;
+ __le32 crc32;
+ DECLARE_FLEX_ARRAY(uint8_t, data);
+} __packed;
+
+static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, int index,
+ unsigned int offset, void *buf, size_t bytes)
+{
+ u8 mac[ETH_ALEN];
+
+ if (bytes != 3 * ETH_ALEN - 1)
+ return -EINVAL;
+
+ if (!mac_pton(buf, mac))
+ return -EINVAL;
+
+ if (index)
+ eth_addr_add(mac, index);
+
+ ether_addr_copy(buf, mac);
+
+ return 0;
+}
+
+static int u_boot_env_parse_cells(struct device *dev, struct nvmem_device *nvmem, uint8_t *buf,
+ size_t data_offset, size_t data_len)
+{
+ char *data = buf + data_offset;
+ char *var, *value, *eq;
+
+ for (var = data;
+ var < data + data_len && *var;
+ var = value + strlen(value) + 1) {
+ struct nvmem_cell_info info = {};
+
+ eq = strchr(var, '=');
+ if (!eq)
+ break;
+ *eq = '\0';
+ value = eq + 1;
+
+ info.name = devm_kstrdup(dev, var, GFP_KERNEL);
+ if (!info.name)
+ return -ENOMEM;
+ info.offset = data_offset + value - data;
+ info.bytes = strlen(value);
+ info.np = of_get_child_by_name(dev->of_node, info.name);
+ if (!strcmp(var, "ethaddr")) {
+ info.raw_len = strlen(value);
+ info.bytes = ETH_ALEN;
+ info.read_post_process = u_boot_env_read_post_process_ethaddr;
+ }
+
+ nvmem_add_one_cell(nvmem, &info);
+ }
+
+ return 0;
+}
+
+int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem,
+ enum u_boot_env_format format)
+{
+ size_t crc32_data_offset;
+ size_t crc32_data_len;
+ size_t crc32_offset;
+ __le32 *crc32_addr;
+ size_t data_offset;
+ size_t data_len;
+ size_t dev_size;
+ uint32_t crc32;
+ uint32_t calc;
+ uint8_t *buf;
+ int bytes;
+ int err;
+
+ dev_size = nvmem_dev_size(nvmem);
+
+ buf = kzalloc(dev_size, GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ bytes = nvmem_device_read(nvmem, 0, dev_size, buf);
+ if (bytes < 0) {
+ err = bytes;
+ goto err_kfree;
+ } else if (bytes != dev_size) {
+ err = -EIO;
+ goto err_kfree;
+ }
+
+ switch (format) {
+ case U_BOOT_FORMAT_SINGLE:
+ crc32_offset = offsetof(struct u_boot_env_image_single, crc32);
+ crc32_data_offset = offsetof(struct u_boot_env_image_single, data);
+ data_offset = offsetof(struct u_boot_env_image_single, data);
+ break;
+ case U_BOOT_FORMAT_REDUNDANT:
+ crc32_offset = offsetof(struct u_boot_env_image_redundant, crc32);
+ crc32_data_offset = offsetof(struct u_boot_env_image_redundant, data);
+ data_offset = offsetof(struct u_boot_env_image_redundant, data);
+ break;
+ case U_BOOT_FORMAT_BROADCOM:
+ crc32_offset = offsetof(struct u_boot_env_image_broadcom, crc32);
+ crc32_data_offset = offsetof(struct u_boot_env_image_broadcom, data);
+ data_offset = offsetof(struct u_boot_env_image_broadcom, data);
+ break;
+ }
+
+ if (dev_size < data_offset) {
+ dev_err(dev, "Device too small for u-boot-env\n");
+ err = -EIO;
+ goto err_kfree;
+ }
+
+ crc32_addr = (__le32 *)(buf + crc32_offset);
+ crc32 = le32_to_cpu(*crc32_addr);
+ crc32_data_len = dev_size - crc32_data_offset;
+ data_len = dev_size - data_offset;
+
+ calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L;
+ if (calc != crc32) {
+ dev_err(dev, "Invalid calculated CRC32: 0x%08x (expected: 0x%08x)\n", calc, crc32);
+ err = -EINVAL;
+ goto err_kfree;
+ }
+
+ buf[dev_size - 1] = '\0';
+ err = u_boot_env_parse_cells(dev, nvmem, buf, data_offset, data_len);
+
+err_kfree:
+ kfree(buf);
+err_out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(u_boot_env_parse);
+
+static int u_boot_env_add_cells(struct nvmem_layout *layout)
+{
+ struct device *dev = &layout->dev;
+ enum u_boot_env_format format;
+
+ format = (uintptr_t)device_get_match_data(dev);
+
+ return u_boot_env_parse(dev, layout->nvmem, format);
+}
+
+static int u_boot_env_probe(struct nvmem_layout *layout)
+{
+ layout->add_cells = u_boot_env_add_cells;
+
+ return nvmem_layout_register(layout);
+}
+
+static void u_boot_env_remove(struct nvmem_layout *layout)
+{
+ nvmem_layout_unregister(layout);
+}
+
+static const struct of_device_id u_boot_env_of_match_table[] = {
+ { .compatible = "u-boot,env", .data = (void *)U_BOOT_FORMAT_SINGLE, },
+ { .compatible = "u-boot,env-redundant-bool", .data = (void *)U_BOOT_FORMAT_REDUNDANT, },
+ { .compatible = "u-boot,env-redundant-count", .data = (void *)U_BOOT_FORMAT_REDUNDANT, },
+ { .compatible = "brcm,env", .data = (void *)U_BOOT_FORMAT_BROADCOM, },
+ {},
+};
+
+static struct nvmem_layout_driver u_boot_env_layout = {
+ .driver = {
+ .name = "u-boot-env-layout",
+ .of_match_table = u_boot_env_of_match_table,
+ },
+ .probe = u_boot_env_probe,
+ .remove = u_boot_env_remove,
+};
+module_nvmem_layout_driver(u_boot_env_layout);
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, u_boot_env_of_match_table);
+MODULE_DESCRIPTION("NVMEM layout driver for U-Boot environment variables");
diff --git a/drivers/nvmem/layouts/u-boot-env.h b/drivers/nvmem/layouts/u-boot-env.h
new file mode 100644
index 000000000000..dd5f280ac047
--- /dev/null
+++ b/drivers/nvmem/layouts/u-boot-env.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _LINUX_NVMEM_LAYOUTS_U_BOOT_ENV_H
+#define _LINUX_NVMEM_LAYOUTS_U_BOOT_ENV_H
+
+enum u_boot_env_format {
+ U_BOOT_FORMAT_SINGLE,
+ U_BOOT_FORMAT_REDUNDANT,
+ U_BOOT_FORMAT_BROADCOM,
+};
+
+int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem,
+ enum u_boot_env_format format);
+
+#endif /* ifndef _LINUX_NVMEM_LAYOUTS_U_BOOT_ENV_H */
diff --git a/drivers/nvmem/sunplus-ocotp.c b/drivers/nvmem/sunplus-ocotp.c
index 38f5d9df39cd..30d55b111a64 100644
--- a/drivers/nvmem/sunplus-ocotp.c
+++ b/drivers/nvmem/sunplus-ocotp.c
@@ -159,7 +159,6 @@ static int sp_ocotp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct nvmem_device *nvmem;
struct sp_ocotp_priv *otp;
- struct resource *res;
int ret;
otp = devm_kzalloc(dev, sizeof(*otp), GFP_KERNEL);
@@ -168,13 +167,11 @@ static int sp_ocotp_probe(struct platform_device *pdev)
otp->dev = dev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hb_gpio");
- otp->base[HB_GPIO] = devm_ioremap_resource(dev, res);
+ otp->base[HB_GPIO] = devm_platform_ioremap_resource_byname(pdev, "hb_gpio");
if (IS_ERR(otp->base[HB_GPIO]))
return PTR_ERR(otp->base[HB_GPIO]);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otprx");
- otp->base[OTPRX] = devm_ioremap_resource(dev, res);
+ otp->base[OTPRX] = devm_platform_ioremap_resource_byname(pdev, "otprx");
if (IS_ERR(otp->base[OTPRX]))
return PTR_ERR(otp->base[OTPRX]);
diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c
index 593f0bf4a395..ced414fc9e60 100644
--- a/drivers/nvmem/u-boot-env.c
+++ b/drivers/nvmem/u-boot-env.c
@@ -3,23 +3,15 @@
* Copyright (C) 2022 Rafał Miłecki <rafal@milecki.pl>
*/
-#include <linux/crc32.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
-#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-enum u_boot_env_format {
- U_BOOT_FORMAT_SINGLE,
- U_BOOT_FORMAT_REDUNDANT,
- U_BOOT_FORMAT_BROADCOM,
-};
+#include "layouts/u-boot-env.h"
struct u_boot_env {
struct device *dev;
@@ -29,24 +21,6 @@ struct u_boot_env {
struct mtd_info *mtd;
};
-struct u_boot_env_image_single {
- __le32 crc32;
- uint8_t data[];
-} __packed;
-
-struct u_boot_env_image_redundant {
- __le32 crc32;
- u8 mark;
- uint8_t data[];
-} __packed;
-
-struct u_boot_env_image_broadcom {
- __le32 magic;
- __le32 len;
- __le32 crc32;
- DECLARE_FLEX_ARRAY(uint8_t, data);
-} __packed;
-
static int u_boot_env_read(void *context, unsigned int offset, void *val,
size_t bytes)
{
@@ -69,141 +43,6 @@ static int u_boot_env_read(void *context, unsigned int offset, void *val,
return 0;
}
-static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, int index,
- unsigned int offset, void *buf, size_t bytes)
-{
- u8 mac[ETH_ALEN];
-
- if (bytes != 3 * ETH_ALEN - 1)
- return -EINVAL;
-
- if (!mac_pton(buf, mac))
- return -EINVAL;
-
- if (index)
- eth_addr_add(mac, index);
-
- ether_addr_copy(buf, mac);
-
- return 0;
-}
-
-static int u_boot_env_add_cells(struct u_boot_env *priv, uint8_t *buf,
- size_t data_offset, size_t data_len)
-{
- struct nvmem_device *nvmem = priv->nvmem;
- struct device *dev = priv->dev;
- char *data = buf + data_offset;
- char *var, *value, *eq;
-
- for (var = data;
- var < data + data_len && *var;
- var = value + strlen(value) + 1) {
- struct nvmem_cell_info info = {};
-
- eq = strchr(var, '=');
- if (!eq)
- break;
- *eq = '\0';
- value = eq + 1;
-
- info.name = devm_kstrdup(dev, var, GFP_KERNEL);
- if (!info.name)
- return -ENOMEM;
- info.offset = data_offset + value - data;
- info.bytes = strlen(value);
- info.np = of_get_child_by_name(dev->of_node, info.name);
- if (!strcmp(var, "ethaddr")) {
- info.raw_len = strlen(value);
- info.bytes = ETH_ALEN;
- info.read_post_process = u_boot_env_read_post_process_ethaddr;
- }
-
- nvmem_add_one_cell(nvmem, &info);
- }
-
- return 0;
-}
-
-static int u_boot_env_parse(struct u_boot_env *priv)
-{
- struct nvmem_device *nvmem = priv->nvmem;
- struct device *dev = priv->dev;
- size_t crc32_data_offset;
- size_t crc32_data_len;
- size_t crc32_offset;
- __le32 *crc32_addr;
- size_t data_offset;
- size_t data_len;
- size_t dev_size;
- uint32_t crc32;
- uint32_t calc;
- uint8_t *buf;
- int bytes;
- int err;
-
- dev_size = nvmem_dev_size(nvmem);
-
- buf = kzalloc(dev_size, GFP_KERNEL);
- if (!buf) {
- err = -ENOMEM;
- goto err_out;
- }
-
- bytes = nvmem_device_read(nvmem, 0, dev_size, buf);
- if (bytes < 0) {
- err = bytes;
- goto err_kfree;
- } else if (bytes != dev_size) {
- err = -EIO;
- goto err_kfree;
- }
-
- switch (priv->format) {
- case U_BOOT_FORMAT_SINGLE:
- crc32_offset = offsetof(struct u_boot_env_image_single, crc32);
- crc32_data_offset = offsetof(struct u_boot_env_image_single, data);
- data_offset = offsetof(struct u_boot_env_image_single, data);
- break;
- case U_BOOT_FORMAT_REDUNDANT:
- crc32_offset = offsetof(struct u_boot_env_image_redundant, crc32);
- crc32_data_offset = offsetof(struct u_boot_env_image_redundant, data);
- data_offset = offsetof(struct u_boot_env_image_redundant, data);
- break;
- case U_BOOT_FORMAT_BROADCOM:
- crc32_offset = offsetof(struct u_boot_env_image_broadcom, crc32);
- crc32_data_offset = offsetof(struct u_boot_env_image_broadcom, data);
- data_offset = offsetof(struct u_boot_env_image_broadcom, data);
- break;
- }
-
- if (dev_size < data_offset) {
- dev_err(dev, "Device too small for u-boot-env\n");
- err = -EIO;
- goto err_kfree;
- }
-
- crc32_addr = (__le32 *)(buf + crc32_offset);
- crc32 = le32_to_cpu(*crc32_addr);
- crc32_data_len = dev_size - crc32_data_offset;
- data_len = dev_size - data_offset;
-
- calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L;
- if (calc != crc32) {
- dev_err(dev, "Invalid calculated CRC32: 0x%08x (expected: 0x%08x)\n", calc, crc32);
- err = -EINVAL;
- goto err_kfree;
- }
-
- buf[dev_size - 1] = '\0';
- err = u_boot_env_add_cells(priv, buf, data_offset, data_len);
-
-err_kfree:
- kfree(buf);
-err_out:
- return err;
-}
-
static int u_boot_env_probe(struct platform_device *pdev)
{
struct nvmem_config config = {
@@ -235,7 +74,7 @@ static int u_boot_env_probe(struct platform_device *pdev)
if (IS_ERR(priv->nvmem))
return PTR_ERR(priv->nvmem);
- return u_boot_env_parse(priv);
+ return u_boot_env_parse(dev, priv->nvmem, priv->format);
}
static const struct of_device_id u_boot_env_of_match_table[] = {
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 18306569ef50..354536de564b 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -194,6 +194,13 @@ config PINCTRL_DIGICOLOR
select PINMUX
select GENERIC_PINCONF
+config PINCTRL_EP93XX
+ bool
+ depends on ARCH_EP93XX || COMPILE_TEST
+ select PINMUX
+ select GENERIC_PINCONF
+ select MFD_SYSCON
+
config PINCTRL_EQUILIBRIUM
tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC"
depends on OF && HAS_IOMEM
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 3c2355150961..97823f52b972 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_PINCTRL_DA850_PUPD) += pinctrl-da850-pupd.o
obj-$(CONFIG_PINCTRL_DA9062) += pinctrl-da9062.o
obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o
+obj-$(CONFIG_PINCTRL_EP93XX) += pinctrl-ep93xx.o
obj-$(CONFIG_PINCTRL_EYEQ5) += pinctrl-eyeq5.o
obj-$(CONFIG_PINCTRL_GEMINI) += pinctrl-gemini.o
obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
diff --git a/drivers/pinctrl/pinctrl-ep93xx.c b/drivers/pinctrl/pinctrl-ep93xx.c
new file mode 100644
index 000000000000..abafbbb8fd6a
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-ep93xx.c
@@ -0,0 +1,1434 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the EP93xx pin controller
+ * based on linux/drivers/pinctrl/pinmux-gemini.c
+ *
+ * Copyright (C) 2022 Nikita Shubin <nikita.shubin@maquefel.me>
+ *
+ * This is a group-only pin controller.
+ */
+#include <linux/array_size.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include <linux/soc/cirrus/ep93xx.h>
+
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-utils.h"
+
+#define DRIVER_NAME "pinctrl-ep93xx"
+
+enum ep93xx_pinctrl_model {
+ EP93XX_9301_PINCTRL,
+ EP93XX_9307_PINCTRL,
+ EP93XX_9312_PINCTRL,
+};
+
+struct ep93xx_pmx {
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+ struct ep93xx_regmap_adev *aux_dev;
+ struct regmap *map;
+ enum ep93xx_pinctrl_model model;
+};
+
+static void ep93xx_pinctrl_update_bits(struct ep93xx_pmx *pmx, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ struct ep93xx_regmap_adev *aux = pmx->aux_dev;
+
+ aux->update_bits(aux->map, aux->lock, reg, mask, val);
+}
+
+struct ep93xx_pin_group {
+ struct pingroup grp;
+ u32 mask;
+ u32 value;
+};
+
+#define PMX_GROUP(_name, _pins, _mask, _value) \
+ { \
+ .grp = PINCTRL_PINGROUP(_name, _pins, ARRAY_SIZE(_pins)), \
+ .mask = _mask, \
+ .value = _value, \
+ }
+
+#define EP93XX_SYSCON_DEVCFG 0x80
+
+/*
+ * There are several system configuration options selectable by the DeviceCfg and SysCfg
+ * registers. These registers provide the selection of several pin multiplexing options and also
+ * provide software access to the system reset configuration options. Please refer to the
+ * descriptions of the registers, “DeviceCfg” on page 5-25 and “SysCfg” on page 5-34, for a
+ * detailed explanation.
+ */
+#define EP93XX_SYSCON_DEVCFG_D1ONG BIT(30)
+#define EP93XX_SYSCON_DEVCFG_D0ONG BIT(29)
+#define EP93XX_SYSCON_DEVCFG_IONU2 BIT(28)
+#define EP93XX_SYSCON_DEVCFG_GONK BIT(27)
+#define EP93XX_SYSCON_DEVCFG_TONG BIT(26)
+#define EP93XX_SYSCON_DEVCFG_MONG BIT(25)
+#define EP93XX_SYSCON_DEVCFG_A2ONG BIT(22)
+#define EP93XX_SYSCON_DEVCFG_A1ONG BIT(21)
+#define EP93XX_SYSCON_DEVCFG_HONIDE BIT(11)
+#define EP93XX_SYSCON_DEVCFG_GONIDE BIT(10)
+#define EP93XX_SYSCON_DEVCFG_PONG BIT(9)
+#define EP93XX_SYSCON_DEVCFG_EONIDE BIT(8)
+#define EP93XX_SYSCON_DEVCFG_I2SONSSP BIT(7)
+#define EP93XX_SYSCON_DEVCFG_I2SONAC97 BIT(6)
+#define EP93XX_SYSCON_DEVCFG_RASONP3 BIT(4)
+
+#define PADS_MASK (GENMASK(30, 25) | BIT(22) | BIT(21) | GENMASK(11, 6) | BIT(4))
+#define PADS_MAXBIT 30
+
+/* Ordered by bit index */
+static const char * const ep93xx_padgroups[] = {
+ NULL, NULL, NULL, NULL,
+ "RasOnP3",
+ NULL,
+ "I2SonAC97",
+ "I2SonSSP",
+ "EonIDE",
+ "PonG",
+ "GonIDE",
+ "HonIDE",
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "A1onG",
+ "A2onG",
+ NULL, NULL,
+ "MonG",
+ "TonG",
+ "GonK",
+ "IonU2",
+ "D0onG",
+ "D1onG",
+};
+
+/* ep9301, ep9302 */
+static const struct pinctrl_pin_desc ep9301_pins[] = {
+ PINCTRL_PIN(1, "CSn[7]"),
+ PINCTRL_PIN(2, "CSn[6]"),
+ PINCTRL_PIN(3, "CSn[3]"),
+ PINCTRL_PIN(4, "CSn[2]"),
+ PINCTRL_PIN(5, "CSn[1]"),
+ PINCTRL_PIN(6, "AD[25]"),
+ PINCTRL_PIN(7, "vdd_ring"),
+ PINCTRL_PIN(8, "gnd_ring"),
+ PINCTRL_PIN(9, "AD[24]"),
+ PINCTRL_PIN(10, "SDCLK"),
+ PINCTRL_PIN(11, "AD[23]"),
+ PINCTRL_PIN(12, "vdd_core"),
+ PINCTRL_PIN(13, "gnd_core"),
+ PINCTRL_PIN(14, "SDWEn"),
+ PINCTRL_PIN(15, "SDCSn[3]"),
+ PINCTRL_PIN(16, "SDCSn[2]"),
+ PINCTRL_PIN(17, "SDCSn[1]"),
+ PINCTRL_PIN(18, "SDCSn[0]"),
+ PINCTRL_PIN(19, "vdd_ring"),
+ PINCTRL_PIN(20, "gnd_ring"),
+ PINCTRL_PIN(21, "RASn"),
+ PINCTRL_PIN(22, "CASn"),
+ PINCTRL_PIN(23, "DQMn[1]"),
+ PINCTRL_PIN(24, "DQMn[0]"),
+ PINCTRL_PIN(25, "AD[22]"),
+ PINCTRL_PIN(26, "AD[21]"),
+ PINCTRL_PIN(27, "vdd_ring"),
+ PINCTRL_PIN(28, "gnd_ring"),
+ PINCTRL_PIN(29, "DA[15]"),
+ PINCTRL_PIN(30, "AD[7]"),
+ PINCTRL_PIN(31, "DA[14]"),
+ PINCTRL_PIN(32, "AD[6]"),
+ PINCTRL_PIN(33, "DA[13]"),
+ PINCTRL_PIN(34, "vdd_core"),
+ PINCTRL_PIN(35, "gnd_core"),
+ PINCTRL_PIN(36, "AD[5]"),
+ PINCTRL_PIN(37, "DA[12]"),
+ PINCTRL_PIN(38, "AD[4]"),
+ PINCTRL_PIN(39, "DA[11]"),
+ PINCTRL_PIN(40, "AD[3]"),
+ PINCTRL_PIN(41, "vdd_ring"),
+ PINCTRL_PIN(42, "gnd_ring"),
+ PINCTRL_PIN(43, "DA[10]"),
+ PINCTRL_PIN(44, "AD[2]"),
+ PINCTRL_PIN(45, "DA[9]"),
+ PINCTRL_PIN(46, "AD[1]"),
+ PINCTRL_PIN(47, "DA[8]"),
+ PINCTRL_PIN(48, "AD[0]"),
+ PINCTRL_PIN(49, "vdd_ring"),
+ PINCTRL_PIN(50, "gnd_ring"),
+ PINCTRL_PIN(51, "NC"),
+ PINCTRL_PIN(52, "NC"),
+ PINCTRL_PIN(53, "vdd_ring"),
+ PINCTRL_PIN(54, "gnd_ring"),
+ PINCTRL_PIN(55, "AD[15]"),
+ PINCTRL_PIN(56, "DA[7]"),
+ PINCTRL_PIN(57, "vdd_core"),
+ PINCTRL_PIN(58, "gnd_core"),
+ PINCTRL_PIN(59, "AD[14]"),
+ PINCTRL_PIN(60, "DA[6]"),
+ PINCTRL_PIN(61, "AD[13]"),
+ PINCTRL_PIN(62, "DA[5]"),
+ PINCTRL_PIN(63, "AD[12]"),
+ PINCTRL_PIN(64, "DA[4]"),
+ PINCTRL_PIN(65, "AD[11]"),
+ PINCTRL_PIN(66, "vdd_ring"),
+ PINCTRL_PIN(67, "gnd_ring"),
+ PINCTRL_PIN(68, "DA[3]"),
+ PINCTRL_PIN(69, "AD[10]"),
+ PINCTRL_PIN(70, "DA[2]"),
+ PINCTRL_PIN(71, "AD[9]"),
+ PINCTRL_PIN(72, "DA[1]"),
+ PINCTRL_PIN(73, "AD[8]"),
+ PINCTRL_PIN(74, "DA[0]"),
+ PINCTRL_PIN(75, "DSRn"),
+ PINCTRL_PIN(76, "DTRn"),
+ PINCTRL_PIN(77, "TCK"),
+ PINCTRL_PIN(78, "TDI"),
+ PINCTRL_PIN(79, "TDO"),
+ PINCTRL_PIN(80, "TMS"),
+ PINCTRL_PIN(81, "vdd_ring"),
+ PINCTRL_PIN(82, "gnd_ring"),
+ PINCTRL_PIN(83, "BOOT[1]"),
+ PINCTRL_PIN(84, "BOOT[0]"),
+ PINCTRL_PIN(85, "gnd_ring"),
+ PINCTRL_PIN(86, "NC"),
+ PINCTRL_PIN(87, "EECLK"),
+ PINCTRL_PIN(88, "EEDAT"),
+ PINCTRL_PIN(89, "ASYNC"),
+ PINCTRL_PIN(90, "vdd_core"),
+ PINCTRL_PIN(91, "gnd_core"),
+ PINCTRL_PIN(92, "ASDO"),
+ PINCTRL_PIN(93, "SCLK1"),
+ PINCTRL_PIN(94, "SFRM1"),
+ PINCTRL_PIN(95, "SSPRX1"),
+ PINCTRL_PIN(96, "SSPTX1"),
+ PINCTRL_PIN(97, "GRLED"),
+ PINCTRL_PIN(98, "RDLED"),
+ PINCTRL_PIN(99, "vdd_ring"),
+ PINCTRL_PIN(100, "gnd_ring"),
+ PINCTRL_PIN(101, "INT[3]"),
+ PINCTRL_PIN(102, "INT[1]"),
+ PINCTRL_PIN(103, "INT[0]"),
+ PINCTRL_PIN(104, "RTSn"),
+ PINCTRL_PIN(105, "USBm[0]"),
+ PINCTRL_PIN(106, "USBp[0]"),
+ PINCTRL_PIN(107, "ABITCLK"),
+ PINCTRL_PIN(108, "CTSn"),
+ PINCTRL_PIN(109, "RXD[0]"),
+ PINCTRL_PIN(110, "RXD[1]"),
+ PINCTRL_PIN(111, "vdd_ring"),
+ PINCTRL_PIN(112, "gnd_ring"),
+ PINCTRL_PIN(113, "TXD[0]"),
+ PINCTRL_PIN(114, "TXD[1]"),
+ PINCTRL_PIN(115, "CGPIO[0]"),
+ PINCTRL_PIN(116, "gnd_core"),
+ PINCTRL_PIN(117, "PLL_GND"),
+ PINCTRL_PIN(118, "XTALI"),
+ PINCTRL_PIN(119, "XTALO"),
+ PINCTRL_PIN(120, "PLL_VDD"),
+ PINCTRL_PIN(121, "vdd_core"),
+ PINCTRL_PIN(122, "gnd_ring"),
+ PINCTRL_PIN(123, "vdd_ring"),
+ PINCTRL_PIN(124, "RSTOn"),
+ PINCTRL_PIN(125, "PRSTn"),
+ PINCTRL_PIN(126, "CSn[0]"),
+ PINCTRL_PIN(127, "gnd_core"),
+ PINCTRL_PIN(128, "vdd_core"),
+ PINCTRL_PIN(129, "gnd_ring"),
+ PINCTRL_PIN(130, "vdd_ring"),
+ PINCTRL_PIN(131, "ADC[4]"),
+ PINCTRL_PIN(132, "ADC[3]"),
+ PINCTRL_PIN(133, "ADC[2]"),
+ PINCTRL_PIN(134, "ADC[1]"),
+ PINCTRL_PIN(135, "ADC[0]"),
+ PINCTRL_PIN(136, "ADC_VDD"),
+ PINCTRL_PIN(137, "RTCXTALI"),
+ PINCTRL_PIN(138, "RTCXTALO"),
+ PINCTRL_PIN(139, "ADC_GND"),
+ PINCTRL_PIN(140, "EGPIO[11]"),
+ PINCTRL_PIN(141, "EGPIO[10]"),
+ PINCTRL_PIN(142, "EGPIO[9]"),
+ PINCTRL_PIN(143, "EGPIO[8]"),
+ PINCTRL_PIN(144, "EGPIO[7]"),
+ PINCTRL_PIN(145, "EGPIO[6]"),
+ PINCTRL_PIN(146, "EGPIO[5]"),
+ PINCTRL_PIN(147, "EGPIO[4]"),
+ PINCTRL_PIN(148, "EGPIO[3]"),
+ PINCTRL_PIN(149, "gnd_ring"),
+ PINCTRL_PIN(150, "vdd_ring"),
+ PINCTRL_PIN(151, "EGPIO[2]"),
+ PINCTRL_PIN(152, "EGPIO[1]"),
+ PINCTRL_PIN(153, "EGPIO[0]"),
+ PINCTRL_PIN(154, "ARSTn"),
+ PINCTRL_PIN(155, "TRSTn"),
+ PINCTRL_PIN(156, "ASDI"),
+ PINCTRL_PIN(157, "USBm[2]"),
+ PINCTRL_PIN(158, "USBp[2]"),
+ PINCTRL_PIN(159, "WAITn"),
+ PINCTRL_PIN(160, "EGPIO[15]"),
+ PINCTRL_PIN(161, "gnd_ring"),
+ PINCTRL_PIN(162, "vdd_ring"),
+ PINCTRL_PIN(163, "EGPIO[14]"),
+ PINCTRL_PIN(164, "EGPIO[13]"),
+ PINCTRL_PIN(165, "EGPIO[12]"),
+ PINCTRL_PIN(166, "gnd_core"),
+ PINCTRL_PIN(167, "vdd_core"),
+ PINCTRL_PIN(168, "FGPIO[3]"),
+ PINCTRL_PIN(169, "FGPIO[2]"),
+ PINCTRL_PIN(170, "FGPIO[1]"),
+ PINCTRL_PIN(171, "gnd_ring"),
+ PINCTRL_PIN(172, "vdd_ring"),
+ PINCTRL_PIN(173, "CLD"),
+ PINCTRL_PIN(174, "CRS"),
+ PINCTRL_PIN(175, "TXERR"),
+ PINCTRL_PIN(176, "TXEN"),
+ PINCTRL_PIN(177, "MIITXD[0]"),
+ PINCTRL_PIN(178, "MIITXD[1]"),
+ PINCTRL_PIN(179, "MIITXD[2]"),
+ PINCTRL_PIN(180, "MIITXD[3]"),
+ PINCTRL_PIN(181, "TXCLK"),
+ PINCTRL_PIN(182, "RXERR"),
+ PINCTRL_PIN(183, "RXDVAL"),
+ PINCTRL_PIN(184, "MIIRXD[0]"),
+ PINCTRL_PIN(185, "MIIRXD[1]"),
+ PINCTRL_PIN(186, "MIIRXD[2]"),
+ PINCTRL_PIN(187, "gnd_ring"),
+ PINCTRL_PIN(188, "vdd_ring"),
+ PINCTRL_PIN(189, "MIIRXD[3]"),
+ PINCTRL_PIN(190, "RXCLK"),
+ PINCTRL_PIN(191, "MDIO"),
+ PINCTRL_PIN(192, "MDC"),
+ PINCTRL_PIN(193, "RDn"),
+ PINCTRL_PIN(194, "WRn"),
+ PINCTRL_PIN(195, "AD[16]"),
+ PINCTRL_PIN(196, "AD[17]"),
+ PINCTRL_PIN(197, "gnd_core"),
+ PINCTRL_PIN(198, "vdd_core"),
+ PINCTRL_PIN(199, "HGPIO[2]"),
+ PINCTRL_PIN(200, "HGPIO[3]"),
+ PINCTRL_PIN(201, "HGPIO[4]"),
+ PINCTRL_PIN(202, "HGPIO[5]"),
+ PINCTRL_PIN(203, "gnd_ring"),
+ PINCTRL_PIN(204, "vdd_ring"),
+ PINCTRL_PIN(205, "AD[18]"),
+ PINCTRL_PIN(206, "AD[19]"),
+ PINCTRL_PIN(207, "AD[20]"),
+ PINCTRL_PIN(208, "SDCLKEN"),
+};
+
+static const unsigned int ssp_ep9301_pins[] = {
+ 93, 94, 95, 96,
+};
+
+static const unsigned int ac97_ep9301_pins[] = {
+ 89, 92, 107, 154, 156,
+};
+
+/*
+ * Note: The EP9307 processor has one PWM with one output, PWMOUT.
+ * Note: The EP9301, EP9302, EP9312, and EP9315 processors each have two PWMs with
+ * two outputs, PWMOUT and PWMO1. PWMO1 is an alternate function for EGPIO14.
+ */
+/* The GPIO14E (14) pin overlap with pwm1 */
+static const unsigned int pwm_9301_pins[] = { 163 };
+
+static const unsigned int gpio1a_9301_pins[] = { 163 };
+
+/* ep9301/9302 have only 0 pin of GPIO C Port exposed */
+static const unsigned int gpio2a_9301_pins[] = { 115 };
+
+/* ep9301/9302 have only 4,5 pin of GPIO E Port exposed */
+static const unsigned int gpio4a_9301_pins[] = { 97, 98 };
+
+/* ep9301/9302 have only 4,5 pin of GPIO G Port exposed */
+static const unsigned int gpio6a_9301_pins[] = { 87, 88 };
+
+static const unsigned int gpio7a_9301_pins[] = { 199, 200, 201, 202 };
+
+/* Groups for the ep9301/ep9302 SoC/package */
+static const struct ep93xx_pin_group ep9301_pin_groups[] = {
+ PMX_GROUP("ssp", ssp_ep9301_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP, 0),
+ PMX_GROUP("i2s_on_ssp", ssp_ep9301_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP,
+ EP93XX_SYSCON_DEVCFG_I2SONSSP),
+ PMX_GROUP("ac97", ac97_ep9301_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97, 0),
+ PMX_GROUP("i2s_on_ac97", ac97_ep9301_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97,
+ EP93XX_SYSCON_DEVCFG_I2SONAC97),
+ PMX_GROUP("pwm1", pwm_9301_pins, EP93XX_SYSCON_DEVCFG_PONG, EP93XX_SYSCON_DEVCFG_PONG),
+ PMX_GROUP("gpio1agrp", gpio1a_9301_pins, EP93XX_SYSCON_DEVCFG_PONG, 0),
+ PMX_GROUP("gpio2agrp", gpio2a_9301_pins, EP93XX_SYSCON_DEVCFG_GONK,
+ EP93XX_SYSCON_DEVCFG_GONK),
+ PMX_GROUP("gpio4agrp", gpio4a_9301_pins, EP93XX_SYSCON_DEVCFG_EONIDE,
+ EP93XX_SYSCON_DEVCFG_EONIDE),
+ PMX_GROUP("gpio6agrp", gpio6a_9301_pins, EP93XX_SYSCON_DEVCFG_GONIDE,
+ EP93XX_SYSCON_DEVCFG_GONIDE),
+ PMX_GROUP("gpio7agrp", gpio7a_9301_pins, EP93XX_SYSCON_DEVCFG_HONIDE,
+ EP93XX_SYSCON_DEVCFG_HONIDE),
+};
+
+static const struct pinctrl_pin_desc ep9307_pins[] = {
+ /* Row A */
+ PINCTRL_PIN(0, "CSn[1]"), /* A1 */
+ PINCTRL_PIN(1, "CSn[7]"), /* A2 */
+ PINCTRL_PIN(2, "SDCLKEN"), /* A3 */
+ PINCTRL_PIN(3, "DA[31]"), /* A4 */
+ PINCTRL_PIN(4, "DA[29]"), /* A5 */
+ PINCTRL_PIN(5, "DA[27]"), /* A6 */
+ PINCTRL_PIN(6, "HGPIO[2]"), /* A7 */
+ PINCTRL_PIN(7, "RDn"), /* A8 */
+ PINCTRL_PIN(8, "MIIRXD[3]"), /* A9 */
+ PINCTRL_PIN(9, "RXDVAL"), /* A10 */
+ PINCTRL_PIN(10, "MIITXD[1]"), /* A11 */
+ PINCTRL_PIN(11, "CRS"), /* A12 */
+ PINCTRL_PIN(12, "FGPIO[7]"), /* A13 */
+ PINCTRL_PIN(13, "FGPIO[0]"), /* A14 */
+ PINCTRL_PIN(14, "WAITn"), /* A15 */
+ PINCTRL_PIN(15, "USBm[2]"), /* A16 */
+ PINCTRL_PIN(16, "ASDI"), /* A17 */
+ /* Row B */
+ PINCTRL_PIN(17, "AD[25]"), /* B1 */
+ PINCTRL_PIN(18, "CSn[2]"), /* B2 */
+ PINCTRL_PIN(19, "CSn[6]"), /* B3 */
+ PINCTRL_PIN(20, "AD[20]"), /* B4 */
+ PINCTRL_PIN(21, "DA[30]"), /* B5 */
+ PINCTRL_PIN(22, "AD[18]"), /* B6 */
+ PINCTRL_PIN(23, "HGPIO[3]"), /* B7 */
+ PINCTRL_PIN(24, "AD[17]"), /* B8 */
+ PINCTRL_PIN(25, "RXCLK"), /* B9 */
+ PINCTRL_PIN(26, "MIIRXD[1]"), /* B10 */
+ PINCTRL_PIN(27, "MIITXD[2]"), /* B11 */
+ PINCTRL_PIN(28, "TXEN"), /* B12 */
+ PINCTRL_PIN(29, "FGPIO[5]"), /* B13 */
+ PINCTRL_PIN(30, "EGPIO[15]"), /* B14 */
+ PINCTRL_PIN(31, "USBp[2]"), /* B15 */
+ PINCTRL_PIN(32, "ARSTn"), /* B16 */
+ PINCTRL_PIN(33, "ADC_VDD"), /* B17 */
+ /* Row C */
+ PINCTRL_PIN(34, "AD[23]"), /* C1 */
+ PINCTRL_PIN(35, "DA[26]"), /* C2 */
+ PINCTRL_PIN(36, "CSn[3]"), /* C3 */
+ PINCTRL_PIN(37, "DA[25]"), /* C4 */
+ PINCTRL_PIN(38, "AD[24]"), /* C5 */
+ PINCTRL_PIN(39, "AD[19]"), /* C6 */
+ PINCTRL_PIN(40, "HGPIO[5]"), /* C7 */
+ PINCTRL_PIN(41, "WRn"), /* C8 */
+ PINCTRL_PIN(42, "MDIO"), /* C9 */
+ PINCTRL_PIN(43, "MIIRXD[2]"), /* C10 */
+ PINCTRL_PIN(44, "TXCLK"), /* C11 */
+ PINCTRL_PIN(45, "MIITXD[0]"), /* C12 */
+ PINCTRL_PIN(46, "CLD"), /* C13 */
+ PINCTRL_PIN(47, "EGPIO[13]"), /* C14 */
+ PINCTRL_PIN(48, "TRSTn"), /* C15 */
+ PINCTRL_PIN(49, "Xp"), /* C16 */
+ PINCTRL_PIN(50, "Xm"), /* C17 */
+ /* Row D */
+ PINCTRL_PIN(51, "SDCSn[3]"), /* D1 */
+ PINCTRL_PIN(52, "DA[23]"), /* D2 */
+ PINCTRL_PIN(53, "SDCLK"), /* D3 */
+ PINCTRL_PIN(54, "DA[24]"), /* D4 */
+ PINCTRL_PIN(55, "HGPIO[7]"), /* D5 */
+ PINCTRL_PIN(56, "HGPIO[6]"), /* D6 */
+ PINCTRL_PIN(57, "A[28]"), /* D7 */
+ PINCTRL_PIN(58, "HGPIO[4]"), /* D8 */
+ PINCTRL_PIN(59, "AD[16]"), /* D9 */
+ PINCTRL_PIN(60, "MDC"), /* D10 */
+ PINCTRL_PIN(61, "RXERR"), /* D11 */
+ PINCTRL_PIN(62, "MIITXD[3]"), /* D12 */
+ PINCTRL_PIN(63, "EGPIO[12]"), /* D13 */
+ PINCTRL_PIN(64, "EGPIO[1]"), /* D14 */
+ PINCTRL_PIN(65, "EGPIO[0]"), /* D15 */
+ PINCTRL_PIN(66, "Ym"), /* D16 */
+ PINCTRL_PIN(67, "Yp"), /* D17 */
+ /* Row E */
+ PINCTRL_PIN(68, "SDCSn[2]"), /* E1 */
+ PINCTRL_PIN(69, "SDWEN"), /* E2 */
+ PINCTRL_PIN(70, "DA[22]"), /* E3 */
+ PINCTRL_PIN(71, "AD[3]"), /* E4 */
+ PINCTRL_PIN(72, "DA[15]"), /* E5 */
+ PINCTRL_PIN(73, "AD[21]"), /* E6 */
+ PINCTRL_PIN(74, "DA[17]"), /* E7 */
+ PINCTRL_PIN(75, "vddr"), /* E8 */
+ PINCTRL_PIN(76, "vddr"), /* E9 */
+ PINCTRL_PIN(77, "vddr"), /* E10 */
+ PINCTRL_PIN(78, "MIIRXD[0]"), /* E11 */
+ PINCTRL_PIN(79, "TXERR"), /* E12 */
+ PINCTRL_PIN(80, "EGPIO[2]"), /* E13 */
+ PINCTRL_PIN(81, "EGPIO[4]"), /* E14 */
+ PINCTRL_PIN(82, "EGPIO[3]"), /* E15 */
+ PINCTRL_PIN(83, "sXp"), /* E16 */
+ PINCTRL_PIN(84, "sXm"), /* E17 */
+ /* Row F */
+ PINCTRL_PIN(85, "RASn"), /* F1 */
+ PINCTRL_PIN(86, "SDCSn[1]"), /* F2 */
+ PINCTRL_PIN(87, "SDCSn[0]"), /* F3 */
+ PINCTRL_PIN(88, "DQMn[3]"), /* F4 */
+ PINCTRL_PIN(89, "AD[5]"), /* F5 */
+ PINCTRL_PIN(90, "gndr"), /* F6 */
+ PINCTRL_PIN(91, "gndr"), /* F7 */
+ PINCTRL_PIN(92, "gndr"), /* F8 */
+ PINCTRL_PIN(93, "vddc"), /* F9 */
+ PINCTRL_PIN(94, "vddc"), /* F10 */
+ PINCTRL_PIN(95, "gndr"), /* F11 */
+ PINCTRL_PIN(96, "EGPIO[7]"), /* F12 */
+ PINCTRL_PIN(97, "EGPIO[5]"), /* F13 */
+ PINCTRL_PIN(98, "ADC GND"), /* F14 */
+ PINCTRL_PIN(99, "EGPIO[6]"), /* F15 */
+ PINCTRL_PIN(100, "sYm"), /* F16 */
+ PINCTRL_PIN(101, "syp"), /* F17 */
+ /* Row G */
+ PINCTRL_PIN(102, "DQMn[0]"), /* G1 */
+ PINCTRL_PIN(103, "CASn"), /* G2 */
+ PINCTRL_PIN(104, "DA[21]"), /* G3 */
+ PINCTRL_PIN(105, "AD[22]"), /* G4 */
+ PINCTRL_PIN(106, "vddr"), /* G5 */
+ PINCTRL_PIN(107, "gndr"), /* G6 */
+ PINCTRL_PIN(108, "gndr"), /* G12 */
+ PINCTRL_PIN(109, "EGPIO[9]"), /* G13 */
+ PINCTRL_PIN(110, "EGPIO[10]"), /* G14 */
+ PINCTRL_PIN(111, "EGPIO[11]"), /* G15 */
+ PINCTRL_PIN(112, "RTCXTALO"), /* G16 */
+ PINCTRL_PIN(113, "RTCXTALI"), /* G17 */
+ /* Row H */
+ PINCTRL_PIN(114, "DA[18]"), /* H1 */
+ PINCTRL_PIN(115, "DA[20]"), /* H2 */
+ PINCTRL_PIN(116, "DA[19]"), /* H3 */
+ PINCTRL_PIN(117, "DA[16]"), /* H4 */
+ PINCTRL_PIN(118, "vddr"), /* H5 */
+ PINCTRL_PIN(119, "vddc"), /* H6 */
+ PINCTRL_PIN(120, "gndc"), /* H7 */
+ PINCTRL_PIN(121, "gndc"), /* H9 */
+ PINCTRL_PIN(122, "gndc"), /* H10 */
+ PINCTRL_PIN(123, "gndr"), /* H12 */
+ PINCTRL_PIN(124, "vddr"), /* H13 */
+ PINCTRL_PIN(125, "EGPIO[8]"), /* H14 */
+ PINCTRL_PIN(126, "PRSTN"), /* H15 */
+ PINCTRL_PIN(127, "COL[7]"), /* H16 */
+ PINCTRL_PIN(128, "RSTON"), /* H17 */
+ /* Row J */
+ PINCTRL_PIN(129, "AD[6]"), /* J1 */
+ PINCTRL_PIN(130, "DA[14]"), /* J2 */
+ PINCTRL_PIN(131, "AD[7]"), /* J3 */
+ PINCTRL_PIN(132, "DA[13]"), /* J4 */
+ PINCTRL_PIN(133, "vddr"), /* J5 */
+ PINCTRL_PIN(134, "vddc"), /* J6 */
+ PINCTRL_PIN(135, "gndc"), /* J8 */
+ PINCTRL_PIN(136, "gndc"), /* J10 */
+ PINCTRL_PIN(137, "vddc"), /* J12 */
+ PINCTRL_PIN(138, "vddr"), /* J13 */
+ PINCTRL_PIN(139, "COL[5]"), /* J14 */
+ PINCTRL_PIN(140, "COL[6]"), /* J15 */
+ PINCTRL_PIN(141, "CSn[0]"), /* J16 */
+ PINCTRL_PIN(142, "COL[3]"), /* J17 */
+ /* Row K */
+ PINCTRL_PIN(143, "AD[4]"), /* K1 */
+ PINCTRL_PIN(144, "DA[12]"), /* K2 */
+ PINCTRL_PIN(145, "DA[10]"), /* K3 */
+ PINCTRL_PIN(146, "DA[11]"), /* K4 */
+ PINCTRL_PIN(147, "vddr"), /* K5 */
+ PINCTRL_PIN(148, "gndr"), /* K6 */
+ PINCTRL_PIN(149, "gndc"), /* K8 */
+ PINCTRL_PIN(150, "gndc"), /* K9 */
+ PINCTRL_PIN(151, "gndc"), /* K10 */
+ PINCTRL_PIN(152, "vddc"), /* K12 */
+ PINCTRL_PIN(153, "COL[4]"), /* K13 */
+ PINCTRL_PIN(154, "PLL_VDD"), /* K14 */
+ PINCTRL_PIN(155, "COL[2]"), /* K15 */
+ PINCTRL_PIN(156, "COL[1]"), /* K16 */
+ PINCTRL_PIN(157, "COL[0]"), /* K17 */
+ /* Row L */
+ PINCTRL_PIN(158, "DA[9]"), /* L1 */
+ PINCTRL_PIN(159, "AD[2]"), /* L2 */
+ PINCTRL_PIN(160, "AD[1]"), /* L3 */
+ PINCTRL_PIN(161, "DA[8]"), /* L4 */
+ PINCTRL_PIN(162, "BLANK"), /* L5 */
+ PINCTRL_PIN(163, "gndr"), /* L6 */
+ PINCTRL_PIN(164, "gndr"), /* L7 */
+ PINCTRL_PIN(165, "ROW[7]"), /* L8 */
+ PINCTRL_PIN(166, "ROW[5]"), /* L9 */
+ PINCTRL_PIN(167, "PLL GND"), /* L10 */
+ PINCTRL_PIN(168, "XTALI"), /* L11 */
+ PINCTRL_PIN(169, "XTALO"), /* L12 */
+ /* Row M */
+ PINCTRL_PIN(170, "BRIGHT"), /* M1 */
+ PINCTRL_PIN(171, "AD[0]"), /* M2 */
+ PINCTRL_PIN(172, "DQMn[1]"), /* M3 */
+ PINCTRL_PIN(173, "DQMn[2]"), /* M4 */
+ PINCTRL_PIN(174, "P[17]"), /* M5 */
+ PINCTRL_PIN(175, "gndr"), /* M6 */
+ PINCTRL_PIN(176, "gndr"), /* M7 */
+ PINCTRL_PIN(177, "vddc"), /* M8 */
+ PINCTRL_PIN(178, "vddc"), /* M9 */
+ PINCTRL_PIN(179, "gndr"), /* M10 */
+ PINCTRL_PIN(180, "gndr"), /* M11 */
+ PINCTRL_PIN(181, "ROW[6]"), /* M12 */
+ PINCTRL_PIN(182, "ROW[4]"), /* M13 */
+ PINCTRL_PIN(183, "ROW[1]"), /* M14 */
+ PINCTRL_PIN(184, "ROW[0]"), /* M15 */
+ PINCTRL_PIN(185, "ROW[3]"), /* M16 */
+ PINCTRL_PIN(186, "ROW[2]"), /* M17 */
+ /* Row N */
+ PINCTRL_PIN(187, "P[14]"), /* N1 */
+ PINCTRL_PIN(188, "P[16]"), /* N2 */
+ PINCTRL_PIN(189, "P[15]"), /* N3 */
+ PINCTRL_PIN(190, "P[13]"), /* N4 */
+ PINCTRL_PIN(191, "P[12]"), /* N5 */
+ PINCTRL_PIN(192, "DA[5]"), /* N6 */
+ PINCTRL_PIN(193, "vddr"), /* N7 */
+ PINCTRL_PIN(194, "vddr"), /* N8 */
+ PINCTRL_PIN(195, "vddr"), /* N9 */
+ PINCTRL_PIN(196, "vddr"), /* N10 */
+ PINCTRL_PIN(197, "EECLK"), /* N11 */
+ PINCTRL_PIN(198, "ASDO"), /* N12 */
+ PINCTRL_PIN(199, "CTSn"), /* N13 */
+ PINCTRL_PIN(200, "RXD[0]"), /* N14 */
+ PINCTRL_PIN(201, "TXD[0]"), /* N15 */
+ PINCTRL_PIN(202, "TXD[1]"), /* N16 */
+ PINCTRL_PIN(203, "TXD[2]"), /* N17 */
+ /* Row P */
+ PINCTRL_PIN(204, "SPCLK"), /* P1 */
+ PINCTRL_PIN(205, "P[10]"), /* P2 */
+ PINCTRL_PIN(206, "P[11]"), /* P3 */
+ PINCTRL_PIN(207, "P[3]"), /* P4 */
+ PINCTRL_PIN(208, "AD[15]"), /* P5 */
+ PINCTRL_PIN(209, "AD[13]"), /* P6 */
+ PINCTRL_PIN(210, "AD[12]"), /* P7 */
+ PINCTRL_PIN(211, "DA[2]"), /* P8 */
+ PINCTRL_PIN(212, "AD[8]"), /* P9 */
+ PINCTRL_PIN(213, "TCK"), /* P10 */
+ PINCTRL_PIN(214, "BOOT[1]"), /* P11 */
+ PINCTRL_PIN(215, "EEDAT"), /* P12 */
+ PINCTRL_PIN(216, "GRLED"), /* P13 */
+ PINCTRL_PIN(217, "RDLED"), /* P14 */
+ PINCTRL_PIN(218, "GGPIO[2]"), /* P15 */
+ PINCTRL_PIN(219, "RXD[1]"), /* P16 */
+ PINCTRL_PIN(220, "RXD[2]"), /* P17 */
+ /* Row R */
+ PINCTRL_PIN(221, "P[9]"), /* R1 */
+ PINCTRL_PIN(222, "HSYNC"), /* R2 */
+ PINCTRL_PIN(223, "P[6]"), /* R3 */
+ PINCTRL_PIN(224, "P[5]"), /* R4 */
+ PINCTRL_PIN(225, "P[0]"), /* R5 */
+ PINCTRL_PIN(226, "AD[14]"), /* R6 */
+ PINCTRL_PIN(227, "DA[4]"), /* R7 */
+ PINCTRL_PIN(228, "DA[1]"), /* R8 */
+ PINCTRL_PIN(229, "DTRn"), /* R9 */
+ PINCTRL_PIN(230, "TDI"), /* R10 */
+ PINCTRL_PIN(231, "BOOT[0]"), /* R11 */
+ PINCTRL_PIN(232, "ASYNC"), /* R12 */
+ PINCTRL_PIN(233, "SSPTX[1]"), /* R13 */
+ PINCTRL_PIN(234, "PWMOUT"), /* R14 */
+ PINCTRL_PIN(235, "USBm[0]"), /* R15 */
+ PINCTRL_PIN(236, "ABITCLK"), /* R16 */
+ PINCTRL_PIN(237, "USBp[0]"), /* R17 */
+ /* Row T */
+ PINCTRL_PIN(238, "NC"), /* T1 */
+ PINCTRL_PIN(239, "NC"), /* T2 */
+ PINCTRL_PIN(240, "V_CSYNC"), /* T3 */
+ PINCTRL_PIN(241, "P[7]"), /* T4 */
+ PINCTRL_PIN(242, "P[2]"), /* T5 */
+ PINCTRL_PIN(243, "DA[7]"), /* T6 */
+ PINCTRL_PIN(244, "AD[11]"), /* T7 */
+ PINCTRL_PIN(245, "AD[9]"), /* T8 */
+ PINCTRL_PIN(246, "DSRn"), /* T9 */
+ PINCTRL_PIN(247, "TMS"), /* T10 */
+ PINCTRL_PIN(248, "gndr"), /* T11 */
+ PINCTRL_PIN(249, "SFRM[1]"), /* T12 */
+ PINCTRL_PIN(250, "INT[2]"), /* T13 */
+ PINCTRL_PIN(251, "INT[0]"), /* T14 */
+ PINCTRL_PIN(252, "USBp[1]"), /* T15 */
+ PINCTRL_PIN(253, "NC"), /* T16 */
+ PINCTRL_PIN(254, "NC"), /* T17 */
+ /* Row U */
+ PINCTRL_PIN(255, "NC"), /* U1 */
+ PINCTRL_PIN(256, "NC"), /* U2 */
+ PINCTRL_PIN(257, "P[8]"), /* U3 */
+ PINCTRL_PIN(258, "P[4]"), /* U4 */
+ PINCTRL_PIN(259, "P[1]"), /* U5 */
+ PINCTRL_PIN(260, "DA[6]"), /* U6 */
+ PINCTRL_PIN(261, "DA[3]"), /* U7 */
+ PINCTRL_PIN(262, "AD[10]"), /* U8 */
+ PINCTRL_PIN(263, "DA[0]"), /* U9 */
+ PINCTRL_PIN(264, "TDO"), /* U10 */
+ PINCTRL_PIN(265, "NC"), /* U11 */
+ PINCTRL_PIN(266, "SCLK[1]"), /* U12 */
+ PINCTRL_PIN(267, "SSPRX[1]"), /* U13 */
+ PINCTRL_PIN(268, "INT[1]"), /* U14 */
+ PINCTRL_PIN(269, "RTSn"), /* U15 */
+ PINCTRL_PIN(270, "USBm[1]"), /* U16 */
+ PINCTRL_PIN(271, "NC"), /* U17 */
+};
+
+static const unsigned int ssp_ep9307_pins[] = {
+ 233, 249, 266, 267,
+};
+
+static const unsigned int ac97_ep9307_pins[] = {
+ 16, 32, 198, 232, 236,
+};
+
+/* I can't find info on those - it's some internal state */
+static const unsigned int raster_on_sdram0_pins[] = {
+};
+
+static const unsigned int raster_on_sdram3_pins[] = {
+};
+
+/* ROW[N] */
+static const unsigned int gpio2a_9307_pins[] = {
+ 165, 166, 181, 182, 183, 184, 185, 186,
+};
+
+/* COL[N] */
+static const unsigned int gpio3a_9307_pins[] = {
+ 127, 139, 140, 142, 153, 155, 156, 157,
+};
+
+static const unsigned int keypad_9307_pins[] = {
+ 127, 139, 140, 142, 153, 155, 156, 157,
+ 165, 166, 181, 182, 183, 184, 185, 186,
+};
+
+/* ep9307 have only 4,5 pin of GPIO E Port exposed */
+static const unsigned int gpio4a_9307_pins[] = { 216, 217 };
+
+/* ep9307 have only 2 pin of GPIO G Port exposed */
+static const unsigned int gpio6a_9307_pins[] = { 219 };
+
+static const unsigned int gpio7a_9307_pins[] = { 7, 24, 41, 56, 57, 59 };
+
+static const struct ep93xx_pin_group ep9307_pin_groups[] = {
+ PMX_GROUP("ssp", ssp_ep9307_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP, 0),
+ PMX_GROUP("i2s_on_ssp", ssp_ep9307_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP,
+ EP93XX_SYSCON_DEVCFG_I2SONSSP),
+ PMX_GROUP("ac97", ac97_ep9307_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97, 0),
+ PMX_GROUP("i2s_on_ac97", ac97_ep9301_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97,
+ EP93XX_SYSCON_DEVCFG_I2SONAC97),
+ PMX_GROUP("rasteronsdram0grp", raster_on_sdram0_pins, EP93XX_SYSCON_DEVCFG_RASONP3, 0),
+ PMX_GROUP("rasteronsdram3grp", raster_on_sdram3_pins, EP93XX_SYSCON_DEVCFG_RASONP3,
+ EP93XX_SYSCON_DEVCFG_RASONP3),
+ PMX_GROUP("gpio2agrp", gpio2a_9307_pins, EP93XX_SYSCON_DEVCFG_GONK,
+ EP93XX_SYSCON_DEVCFG_GONK),
+ PMX_GROUP("gpio3agrp", gpio3a_9307_pins, EP93XX_SYSCON_DEVCFG_GONK,
+ EP93XX_SYSCON_DEVCFG_GONK),
+ PMX_GROUP("keypadgrp", keypad_9307_pins, EP93XX_SYSCON_DEVCFG_GONK, 0),
+ PMX_GROUP("gpio4agrp", gpio4a_9307_pins, EP93XX_SYSCON_DEVCFG_EONIDE,
+ EP93XX_SYSCON_DEVCFG_EONIDE),
+ PMX_GROUP("gpio6agrp", gpio6a_9307_pins, EP93XX_SYSCON_DEVCFG_GONIDE,
+ EP93XX_SYSCON_DEVCFG_GONIDE),
+ PMX_GROUP("gpio7agrp", gpio7a_9307_pins, EP93XX_SYSCON_DEVCFG_HONIDE,
+ EP93XX_SYSCON_DEVCFG_HONIDE),
+};
+
+/* ep9312, ep9315 */
+static const struct pinctrl_pin_desc ep9312_pins[] = {
+ /* Row A */
+ PINCTRL_PIN(0, "CSN[7]"), /* A1 */
+ PINCTRL_PIN(1, "DA[28]"), /* A2 */
+ PINCTRL_PIN(2, "AD[18]"), /* A3 */
+ PINCTRL_PIN(3, "DD[8]"), /* A4 */
+ PINCTRL_PIN(4, "DD[4]"), /* A5 */
+ PINCTRL_PIN(5, "AD[17]"), /* A6 */
+ PINCTRL_PIN(6, "RDN"), /* A7 */
+ PINCTRL_PIN(7, "RXCLK"), /* A8 */
+ PINCTRL_PIN(8, "MIIRXD[0]"), /* A9 */
+ PINCTRL_PIN(9, "RXDVAL"), /* A10 */
+ PINCTRL_PIN(10, "MIITXD[2]"), /* A11 */
+ PINCTRL_PIN(11, "TXERR"), /* A12 */
+ PINCTRL_PIN(12, "CLD"), /* A13 */
+ PINCTRL_PIN(13, "NC"), /* A14 */
+ PINCTRL_PIN(14, "NC"), /* A15 */
+ PINCTRL_PIN(15, "NC"), /* A16 */
+ PINCTRL_PIN(16, "EGPIO[12]"), /* A17 */
+ PINCTRL_PIN(17, "EGPIO[15]"), /* A18 */
+ PINCTRL_PIN(18, "NC"), /* A19 */
+ PINCTRL_PIN(19, "NC"), /* A20 */
+ /* Row B */
+ PINCTRL_PIN(20, "CSN[2]"), /* B1 */
+ PINCTRL_PIN(21, "DA[31]"), /* B2 */
+ PINCTRL_PIN(22, "DA[30]"), /* B3 */
+ PINCTRL_PIN(23, "DA[27]"), /* B4 */
+ PINCTRL_PIN(24, "DD[7]"), /* B5 */
+ PINCTRL_PIN(25, "DD[3]"), /* B6 */
+ PINCTRL_PIN(26, "WRN"), /* B7 */
+ PINCTRL_PIN(27, "MDIO"), /* B8 */
+ PINCTRL_PIN(28, "MIIRXD[1]"), /* B9 */
+ PINCTRL_PIN(29, "RXERR"), /* B10 */
+ PINCTRL_PIN(30, "MIITXD[1]"), /* B11 */
+ PINCTRL_PIN(31, "CRS"), /* B12 */
+ PINCTRL_PIN(32, "NC"), /* B13 */
+ PINCTRL_PIN(33, "NC"), /* B14 */
+ PINCTRL_PIN(34, "NC"), /* B15 */
+ PINCTRL_PIN(35, "NC"), /* B16 */
+ PINCTRL_PIN(36, "EGPIO[13]"), /* B17 */
+ PINCTRL_PIN(37, "NC"), /* B18 */
+ PINCTRL_PIN(38, "WAITN"), /* B19 */
+ PINCTRL_PIN(39, "TRSTN"), /* B20 */
+ /* Row C */
+ PINCTRL_PIN(40, "CSN[1]"), /* C1 */
+ PINCTRL_PIN(41, "CSN[3]"), /* C2 */
+ PINCTRL_PIN(42, "AD[20]"), /* C3 */
+ PINCTRL_PIN(43, "DA[29]"), /* C4 */
+ PINCTRL_PIN(44, "DD[10]"), /* C5 */
+ PINCTRL_PIN(45, "DD[6]"), /* C6 */
+ PINCTRL_PIN(46, "DD[2]"), /* C7 */
+ PINCTRL_PIN(47, "MDC"), /* C8 */
+ PINCTRL_PIN(48, "MIIRXD[3]"), /* C9 */
+ PINCTRL_PIN(49, "TXCLK"), /* C10 */
+ PINCTRL_PIN(50, "MIITXD[0]"), /* C11 */
+ PINCTRL_PIN(51, "NC"), /* C12 */
+ PINCTRL_PIN(52, "NC"), /* C13 */
+ PINCTRL_PIN(53, "NC"), /* C14 */
+ PINCTRL_PIN(54, "NC"), /* C15 */
+ PINCTRL_PIN(55, "NC"), /* C16 */
+ PINCTRL_PIN(56, "NC"), /* C17 */
+ PINCTRL_PIN(57, "USBP[2]"), /* C18 */
+ PINCTRL_PIN(58, "IORDY"), /* C19 */
+ PINCTRL_PIN(59, "DMACKN"), /* C20 */
+ /* Row D */
+ PINCTRL_PIN(60, "AD[24]"), /* D1 */
+ PINCTRL_PIN(61, "DA[25]"), /* D2 */
+ PINCTRL_PIN(62, "DD[11]"), /* D3 */
+ PINCTRL_PIN(63, "SDCLKEN"), /* D4 */
+ PINCTRL_PIN(64, "AD[19]"), /* D5 */
+ PINCTRL_PIN(65, "DD[9]"), /* D6 */
+ PINCTRL_PIN(66, "DD[5]"), /* D7 */
+ PINCTRL_PIN(67, "AD[16]"), /* D8 */
+ PINCTRL_PIN(68, "MIIRXD[2]"), /* D9 */
+ PINCTRL_PIN(69, "MIITXD[3]"), /* D10 */
+ PINCTRL_PIN(70, "TXEN"), /* D11 */
+ PINCTRL_PIN(71, "NC"), /* D12 */
+ PINCTRL_PIN(72, "NC"), /* D13 */
+ PINCTRL_PIN(73, "NC"), /* D14 */
+ PINCTRL_PIN(74, "EGPIO[14]"), /* D15 */
+ PINCTRL_PIN(75, "NC"), /* D16 */
+ PINCTRL_PIN(76, "USBM[2]"), /* D17 */
+ PINCTRL_PIN(77, "ARSTN"), /* D18 */
+ PINCTRL_PIN(78, "DIORN"), /* D19 */
+ PINCTRL_PIN(79, "EGPIO[1]"), /* D20 */
+ /* Row E */
+ PINCTRL_PIN(80, "AD[23]"), /* E1 */
+ PINCTRL_PIN(81, "DA[23]"), /* E2 */
+ PINCTRL_PIN(82, "DA[26]"), /* E3 */
+ PINCTRL_PIN(83, "CSN[6]"), /* E4 */
+ PINCTRL_PIN(84, "GND"), /* E5 */
+ PINCTRL_PIN(85, "GND"), /* E6 */
+ PINCTRL_PIN(86, "CVDD"), /* E7 */
+ PINCTRL_PIN(87, "CVDD"), /* E8 */
+ PINCTRL_PIN(88, "RVDD"), /* E9 */
+ PINCTRL_PIN(89, "GND"), /* E10 */
+ PINCTRL_PIN(90, "GND"), /* E11 */
+ PINCTRL_PIN(91, "RVDD"), /* E12 */
+ PINCTRL_PIN(92, "CVDD"), /* E13 */
+ PINCTRL_PIN(93, "CVDD"), /* E14 */
+ PINCTRL_PIN(94, "GND"), /* E15 */
+ PINCTRL_PIN(95, "ASDI"), /* E16 */
+ PINCTRL_PIN(96, "DIOWN"), /* E17 */
+ PINCTRL_PIN(97, "EGPIO[0]"), /* E18 */
+ PINCTRL_PIN(98, "EGPIO[3]"), /* E19 */
+ PINCTRL_PIN(99, "EGPIO[5]"), /* E20 */
+ /* Row F */
+ PINCTRL_PIN(100, "SDCSN[3]"), /* F1 */
+ PINCTRL_PIN(101, "DA[22]"), /* F2 */
+ PINCTRL_PIN(102, "DA[24]"), /* F3 */
+ PINCTRL_PIN(103, "AD[25]"), /* F4 */
+ PINCTRL_PIN(104, "RVDD"), /* F5 */
+ PINCTRL_PIN(105, "GND"), /* F6 */
+ PINCTRL_PIN(106, "CVDD"), /* F7 */
+ PINCTRL_PIN(107, "CVDD"), /* F14 */
+ PINCTRL_PIN(108, "GND"), /* F15 */
+ PINCTRL_PIN(109, "GND"), /* F16 */
+ PINCTRL_PIN(110, "EGPIO[2]"), /* F17 */
+ PINCTRL_PIN(111, "EGPIO[4]"), /* F18 */
+ PINCTRL_PIN(112, "EGPIO[6]"), /* F19 */
+ PINCTRL_PIN(113, "EGPIO[8]"), /* F20 */
+ /* Row G */
+ PINCTRL_PIN(114, "SDCSN[0]"), /* G1 */
+ PINCTRL_PIN(115, "SDCSN[1]"), /* G2 */
+ PINCTRL_PIN(116, "SDWEN"), /* G3 */
+ PINCTRL_PIN(117, "SDCLK"), /* G4 */
+ PINCTRL_PIN(118, "RVDD"), /* G5 */
+ PINCTRL_PIN(119, "RVDD"), /* G6 */
+ PINCTRL_PIN(120, "RVDD"), /* G15 */
+ PINCTRL_PIN(121, "RVDD"), /* G16 */
+ PINCTRL_PIN(122, "EGPIO[7]"), /* G17 */
+ PINCTRL_PIN(123, "EGPIO[9]"), /* G18 */
+ PINCTRL_PIN(124, "EGPIO[10]"), /* G19 */
+ PINCTRL_PIN(125, "EGPIO[11]"), /* G20 */
+ /* Row H */
+ PINCTRL_PIN(126, "DQMN[3]"), /* H1 */
+ PINCTRL_PIN(127, "CASN"), /* H2 */
+ PINCTRL_PIN(128, "RASN"), /* H3 */
+ PINCTRL_PIN(129, "SDCSN[2]"), /* H4 */
+ PINCTRL_PIN(130, "CVDD"), /* H5 */
+ PINCTRL_PIN(131, "GND"), /* H8 */
+ PINCTRL_PIN(132, "GND"), /* H9 */
+ PINCTRL_PIN(133, "GND"), /* H10 */
+ PINCTRL_PIN(134, "GND"), /* H11 */
+ PINCTRL_PIN(135, "GND"), /* H12 */
+ PINCTRL_PIN(136, "GND"), /* H13 */
+ PINCTRL_PIN(137, "RVDD"), /* H16 */
+ PINCTRL_PIN(138, "RTCXTALO"), /* H17 */
+ PINCTRL_PIN(139, "ADC_VDD"), /* H18 */
+ PINCTRL_PIN(140, "ADC_GND"), /* H19 */
+ PINCTRL_PIN(141, "XP"), /* H20 */
+ /* Row J */
+ PINCTRL_PIN(142, "DA[21]"), /* J1 */
+ PINCTRL_PIN(143, "DQMN[0]"), /* J2 */
+ PINCTRL_PIN(144, "DQMN[1]"), /* J3 */
+ PINCTRL_PIN(145, "DQMN[2]"), /* J4 */
+ PINCTRL_PIN(146, "GND"), /* J5 */
+ PINCTRL_PIN(147, "GND"), /* J8 */
+ PINCTRL_PIN(148, "GND"), /* J9 */
+ PINCTRL_PIN(149, "GND"), /* J10 */
+ PINCTRL_PIN(150, "GND"), /* J11 */
+ PINCTRL_PIN(151, "GND"), /* J12 */
+ PINCTRL_PIN(152, "GND"), /* J13 */
+ PINCTRL_PIN(153, "CVDD"), /* J16 */
+ PINCTRL_PIN(154, "RTCXTALI"), /* J17 */
+ PINCTRL_PIN(155, "XM"), /* J18 */
+ PINCTRL_PIN(156, "YP"), /* J19 */
+ PINCTRL_PIN(157, "YM"), /* J20 */
+ /* Row K */
+ PINCTRL_PIN(158, "AD[22]"), /* K1 */
+ PINCTRL_PIN(159, "DA[20]"), /* K2 */
+ PINCTRL_PIN(160, "AD[21]"), /* K3 */
+ PINCTRL_PIN(161, "DA[19]"), /* K4 */
+ PINCTRL_PIN(162, "RVDD"), /* K5 */
+ PINCTRL_PIN(163, "GND"), /* K8 */
+ PINCTRL_PIN(164, "GND"), /* K9 */
+ PINCTRL_PIN(165, "GND"), /* K10 */
+ PINCTRL_PIN(166, "GND"), /* K11 */
+ PINCTRL_PIN(167, "GND"), /* K12 */
+ PINCTRL_PIN(168, "GND"), /* K13 */
+ PINCTRL_PIN(169, "CVDD"), /* K16 */
+ PINCTRL_PIN(170, "SYM"), /* K17 */
+ PINCTRL_PIN(171, "SYP"), /* K18 */
+ PINCTRL_PIN(172, "SXM"), /* K19 */
+ PINCTRL_PIN(173, "SXP"), /* K20 */
+ /* Row L */
+ PINCTRL_PIN(174, "DA[18]"), /* L1 */
+ PINCTRL_PIN(175, "DA[17]"), /* L2 */
+ PINCTRL_PIN(176, "DA[16]"), /* L3 */
+ PINCTRL_PIN(177, "DA[15]"), /* L4 */
+ PINCTRL_PIN(178, "GND"), /* L5 */
+ PINCTRL_PIN(179, "GND"), /* L8 */
+ PINCTRL_PIN(180, "GND"), /* L9 */
+ PINCTRL_PIN(181, "GND"), /* L10 */
+ PINCTRL_PIN(182, "GND"), /* L11 */
+ PINCTRL_PIN(183, "GND"), /* L12 */
+ PINCTRL_PIN(184, "GND"), /* L13 */
+ PINCTRL_PIN(185, "CVDD"), /* L16 */
+ PINCTRL_PIN(186, "COL[5]"), /* L17 */
+ PINCTRL_PIN(187, "COL[7]"), /* L18 */
+ PINCTRL_PIN(188, "RSTON"), /* L19 */
+ PINCTRL_PIN(189, "PRSTN"), /* L20 */
+ /* Row M */
+ PINCTRL_PIN(190, "AD[7]"), /* M1 */
+ PINCTRL_PIN(191, "DA[14]"), /* M2 */
+ PINCTRL_PIN(192, "AD[6]"), /* M3 */
+ PINCTRL_PIN(193, "AD[5]"), /* M4 */
+ PINCTRL_PIN(194, "CVDD"), /* M5 */
+ PINCTRL_PIN(195, "GND"), /* M8 */
+ PINCTRL_PIN(196, "GND"), /* M9 */
+ PINCTRL_PIN(197, "GND"), /* M10 */
+ PINCTRL_PIN(198, "GND"), /* M11 */
+ PINCTRL_PIN(199, "GND"), /* M12 */
+ PINCTRL_PIN(200, "GND"), /* M13 */
+ PINCTRL_PIN(201, "GND"), /* M16 */
+ PINCTRL_PIN(202, "COL[4]"), /* M17 */
+ PINCTRL_PIN(203, "COL[3]"), /* M18 */
+ PINCTRL_PIN(204, "COL[6]"), /* M19 */
+ PINCTRL_PIN(205, "CSN[0]"), /* M20 */
+ /* Row N */
+ PINCTRL_PIN(206, "DA[13]"), /* N1 */
+ PINCTRL_PIN(207, "DA[12]"), /* N2 */
+ PINCTRL_PIN(208, "DA[11]"), /* N3 */
+ PINCTRL_PIN(209, "AD[3]"), /* N4 */
+ PINCTRL_PIN(210, "CVDD"), /* N5 */
+ PINCTRL_PIN(211, "CVDD"), /* N6 */
+ PINCTRL_PIN(212, "GND"), /* N8 */
+ PINCTRL_PIN(213, "GND"), /* N9 */
+ PINCTRL_PIN(214, "GND"), /* N10 */
+ PINCTRL_PIN(215, "GND"), /* N11 */
+ PINCTRL_PIN(216, "GND"), /* N12 */
+ PINCTRL_PIN(217, "GND"), /* N13 */
+ PINCTRL_PIN(218, "GND"), /* N15 */
+ PINCTRL_PIN(219, "GND"), /* N16 */
+ PINCTRL_PIN(220, "XTALO"), /* N17 */
+ PINCTRL_PIN(221, "COL[0]"), /* N18 */
+ PINCTRL_PIN(222, "COL[1]"), /* N19 */
+ PINCTRL_PIN(223, "COL[2]"), /* N20 */
+ /* Row P */
+ PINCTRL_PIN(224, "AD[4]"), /* P1 */
+ PINCTRL_PIN(225, "DA[10]"), /* P2 */
+ PINCTRL_PIN(226, "DA[9]"), /* P3 */
+ PINCTRL_PIN(227, "BRIGHT"), /* P4 */
+ PINCTRL_PIN(228, "RVDD"), /* P5 */
+ PINCTRL_PIN(229, "RVDD"), /* P6 */
+ PINCTRL_PIN(230, "RVDD"), /* P15 */
+ PINCTRL_PIN(231, "RVDD"), /* P16 */
+ PINCTRL_PIN(232, "XTALI"), /* P17 */
+ PINCTRL_PIN(233, "PLL_VDD"), /* P18 */
+ PINCTRL_PIN(234, "ROW[6]"), /* P19 */
+ PINCTRL_PIN(235, "ROW[7]"), /* P20 */
+ /* Row R */
+ PINCTRL_PIN(236, "AD[2]"), /* R1 */
+ PINCTRL_PIN(237, "AD[1]"), /* R2 */
+ PINCTRL_PIN(238, "P[17]"), /* R3 */
+ PINCTRL_PIN(239, "P[14]"), /* R4 */
+ PINCTRL_PIN(240, "RVDD"), /* R5 */
+ PINCTRL_PIN(241, "RVDD"), /* R6 */
+ PINCTRL_PIN(242, "GND"), /* R7 */
+ PINCTRL_PIN(243, "CVDD"), /* R8 */
+ PINCTRL_PIN(244, "CVDD"), /* R13 */
+ PINCTRL_PIN(245, "GND"), /* R14 */
+ PINCTRL_PIN(246, "RVDD"), /* R15 */
+ PINCTRL_PIN(247, "RVDD"), /* R16 */
+ PINCTRL_PIN(248, "ROW[0]"), /* R17 */
+ PINCTRL_PIN(249, "ROW[3]"), /* R18 */
+ PINCTRL_PIN(250, "PLL_GND"), /* R19 */
+ PINCTRL_PIN(251, "ROW[5]"), /* R20 */
+ /* Row T */
+ PINCTRL_PIN(252, "DA[8]"), /* T1 */
+ PINCTRL_PIN(253, "BLANK"), /* T2 */
+ PINCTRL_PIN(254, "P[13]"), /* T3 */
+ PINCTRL_PIN(255, "SPCLK"), /* T4 */
+ PINCTRL_PIN(256, "V_CSYNC"), /* T5 */
+ PINCTRL_PIN(257, "DD[14]"), /* T6 */
+ PINCTRL_PIN(258, "GND"), /* T7 */
+ PINCTRL_PIN(259, "CVDD"), /* T8 */
+ PINCTRL_PIN(260, "RVDD"), /* T9 */
+ PINCTRL_PIN(261, "GND"), /* T10 */
+ PINCTRL_PIN(262, "GND"), /* T11 */
+ PINCTRL_PIN(263, "RVDD"), /* T12 */
+ PINCTRL_PIN(264, "CVDD"), /* T13 */
+ PINCTRL_PIN(265, "GND"), /* T14 */
+ PINCTRL_PIN(266, "INT[0]"), /* T15 */
+ PINCTRL_PIN(267, "USBM[1]"), /* T16 */
+ PINCTRL_PIN(268, "RXD[0]"), /* T17 */
+ PINCTRL_PIN(269, "TXD[2]"), /* T18 */
+ PINCTRL_PIN(270, "ROW[2]"), /* T19 */
+ PINCTRL_PIN(271, "ROW[4]"), /* T20 */
+ /* Row U */
+ PINCTRL_PIN(272, "AD[0]"), /* U1 */
+ PINCTRL_PIN(273, "P[15]"), /* U2 */
+ PINCTRL_PIN(274, "P[10]"), /* U3 */
+ PINCTRL_PIN(275, "P[7]"), /* U4 */
+ PINCTRL_PIN(276, "P[6]"), /* U5 */
+ PINCTRL_PIN(277, "P[4]"), /* U6 */
+ PINCTRL_PIN(278, "P[0]"), /* U7 */
+ PINCTRL_PIN(279, "AD[13]"), /* U8 */
+ PINCTRL_PIN(280, "DA[3]"), /* U9 */
+ PINCTRL_PIN(281, "DA[0]"), /* U10 */
+ PINCTRL_PIN(282, "DSRN"), /* U11 */
+ PINCTRL_PIN(283, "BOOT[1]"), /* U12 */
+ PINCTRL_PIN(284, "NC"), /* U13 */
+ PINCTRL_PIN(285, "SSPRX1"), /* U14 */
+ PINCTRL_PIN(286, "INT[1]"), /* U15 */
+ PINCTRL_PIN(287, "PWMOUT"), /* U16 */
+ PINCTRL_PIN(288, "USBM[0]"), /* U17 */
+ PINCTRL_PIN(289, "RXD[1]"), /* U18 */
+ PINCTRL_PIN(290, "TXD[1]"), /* U19 */
+ PINCTRL_PIN(291, "ROW[1]"), /* U20 */
+ /* Row V */
+ PINCTRL_PIN(292, "P[16]"), /* V1 */
+ PINCTRL_PIN(293, "P[11]"), /* V2 */
+ PINCTRL_PIN(294, "P[8]"), /* V3 */
+ PINCTRL_PIN(295, "DD[15]"), /* V4 */
+ PINCTRL_PIN(296, "DD[13]"), /* V5 */
+ PINCTRL_PIN(297, "P[1]"), /* V6 */
+ PINCTRL_PIN(298, "AD[14]"), /* V7 */
+ PINCTRL_PIN(299, "AD[12]"), /* V8 */
+ PINCTRL_PIN(300, "DA[2]"), /* V9 */
+ PINCTRL_PIN(301, "IDECS0N"), /* V10 */
+ PINCTRL_PIN(302, "IDEDA[2]"), /* V11 */
+ PINCTRL_PIN(303, "TDI"), /* V12 */
+ PINCTRL_PIN(304, "GND"), /* V13 */
+ PINCTRL_PIN(305, "ASYNC"), /* V14 */
+ PINCTRL_PIN(306, "SSPTX1"), /* V15 */
+ PINCTRL_PIN(307, "INT[2]"), /* V16 */
+ PINCTRL_PIN(308, "RTSN"), /* V17 */
+ PINCTRL_PIN(309, "USBP[0]"), /* V18 */
+ PINCTRL_PIN(310, "CTSN"), /* V19 */
+ PINCTRL_PIN(311, "TXD[0]"), /* V20 */
+ /* Row W */
+ PINCTRL_PIN(312, "P[12]"), /* W1 */
+ PINCTRL_PIN(313, "P[9]"), /* W2 */
+ PINCTRL_PIN(314, "DD[0]"), /* W3 */
+ PINCTRL_PIN(315, "P[5]"), /* W4 */
+ PINCTRL_PIN(316, "P[3]"), /* W5 */
+ PINCTRL_PIN(317, "DA[7]"), /* W6 */
+ PINCTRL_PIN(318, "DA[5]"), /* W7 */
+ PINCTRL_PIN(319, "AD[11]"), /* W8 */
+ PINCTRL_PIN(320, "AD[9]"), /* W9 */
+ PINCTRL_PIN(321, "IDECS1N"), /* W10 */
+ PINCTRL_PIN(322, "IDEDA[1]"), /* W11 */
+ PINCTRL_PIN(323, "TCK"), /* W12 */
+ PINCTRL_PIN(324, "TMS"), /* W13 */
+ PINCTRL_PIN(325, "EECLK"), /* W14 */
+ PINCTRL_PIN(326, "SCLK1"), /* W15 */
+ PINCTRL_PIN(327, "GRLED"), /* W16 */
+ PINCTRL_PIN(328, "INT[3]"), /* W17 */
+ PINCTRL_PIN(329, "SLA[1]"), /* W18 */
+ PINCTRL_PIN(330, "SLA[0]"), /* W19 */
+ PINCTRL_PIN(331, "RXD[2]"), /* W20 */
+ /* Row Y */
+ PINCTRL_PIN(332, "HSYNC"), /* Y1 */
+ PINCTRL_PIN(333, "DD[1]"), /* Y2 */
+ PINCTRL_PIN(334, "DD[12]"), /* Y3 */
+ PINCTRL_PIN(335, "P[2]"), /* Y4 */
+ PINCTRL_PIN(336, "AD[15]"), /* Y5 */
+ PINCTRL_PIN(337, "DA[6]"), /* Y6 */
+ PINCTRL_PIN(338, "DA[4]"), /* Y7 */
+ PINCTRL_PIN(339, "AD[10]"), /* Y8 */
+ PINCTRL_PIN(340, "DA[1]"), /* Y9 */
+ PINCTRL_PIN(341, "AD[8]"), /* Y10 */
+ PINCTRL_PIN(342, "IDEDA[0]"), /* Y11 */
+ PINCTRL_PIN(343, "DTRN"), /* Y12 */
+ PINCTRL_PIN(344, "TDO"), /* Y13 */
+ PINCTRL_PIN(345, "BOOT[0]"), /* Y14 */
+ PINCTRL_PIN(346, "EEDAT"), /* Y15 */
+ PINCTRL_PIN(347, "ASDO"), /* Y16 */
+ PINCTRL_PIN(348, "SFRM1"), /* Y17 */
+ PINCTRL_PIN(349, "RDLED"), /* Y18 */
+ PINCTRL_PIN(350, "USBP[1]"), /* Y19 */
+ PINCTRL_PIN(351, "ABITCLK"), /* Y20 */
+};
+
+static const unsigned int ssp_ep9312_pins[] = {
+ 285, 306, 326, 348,
+};
+
+static const unsigned int ac97_ep9312_pins[] = {
+ 77, 95, 305, 347, 351,
+};
+
+static const unsigned int pwm_ep9312_pins[] = { 74 };
+
+static const unsigned int gpio1a_ep9312_pins[] = { 74 };
+
+static const unsigned int gpio2a_9312_pins[] = {
+ 234, 235, 248, 249, 251, 270, 271, 291,
+};
+
+static const unsigned int gpio3a_9312_pins[] = {
+ 186, 187, 202, 203, 204, 221, 222, 223,
+};
+
+static const unsigned int keypad_9312_pins[] = {
+ 186, 187, 202, 203, 204, 221, 222, 223,
+ 234, 235, 248, 249, 251, 270, 271, 291,
+};
+
+static const unsigned int gpio4a_9312_pins[] = {
+ 78, 301, 302, 321, 322, 342,
+};
+
+static const unsigned int gpio6a_9312_pins[] = {
+ 257, 295, 296, 334,
+};
+
+static const unsigned int gpio7a_9312_pins[] = {
+ 4, 24, 25, 45, 46, 66, 314, 333,
+};
+
+static const unsigned int ide_9312_pins[] = {
+ 78, 301, 302, 321, 322, 342, 257, 295,
+ 296, 334, 4, 24, 25, 45, 46, 66,
+ 314, 333,
+};
+
+static const struct ep93xx_pin_group ep9312_pin_groups[] = {
+ PMX_GROUP("ssp", ssp_ep9312_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP, 0),
+ PMX_GROUP("i2s_on_ssp", ssp_ep9312_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP,
+ EP93XX_SYSCON_DEVCFG_I2SONSSP),
+ PMX_GROUP("pwm1", pwm_ep9312_pins, EP93XX_SYSCON_DEVCFG_PONG,
+ EP93XX_SYSCON_DEVCFG_PONG),
+ PMX_GROUP("gpio1agrp", gpio1a_ep9312_pins, EP93XX_SYSCON_DEVCFG_PONG, 0),
+ PMX_GROUP("ac97", ac97_ep9312_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97, 0),
+ PMX_GROUP("i2s_on_ac97", ac97_ep9312_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97,
+ EP93XX_SYSCON_DEVCFG_I2SONAC97),
+ PMX_GROUP("rasteronsdram0grp", raster_on_sdram0_pins, EP93XX_SYSCON_DEVCFG_RASONP3, 0),
+ PMX_GROUP("rasteronsdram3grp", raster_on_sdram3_pins, EP93XX_SYSCON_DEVCFG_RASONP3,
+ EP93XX_SYSCON_DEVCFG_RASONP3),
+ PMX_GROUP("gpio2agrp", gpio2a_9312_pins, EP93XX_SYSCON_DEVCFG_GONK,
+ EP93XX_SYSCON_DEVCFG_GONK),
+ PMX_GROUP("gpio3agrp", gpio3a_9312_pins, EP93XX_SYSCON_DEVCFG_GONK,
+ EP93XX_SYSCON_DEVCFG_GONK),
+ PMX_GROUP("keypadgrp", keypad_9312_pins, EP93XX_SYSCON_DEVCFG_GONK, 0),
+ PMX_GROUP("gpio4agrp", gpio4a_9312_pins, EP93XX_SYSCON_DEVCFG_EONIDE,
+ EP93XX_SYSCON_DEVCFG_EONIDE),
+ PMX_GROUP("gpio6agrp", gpio6a_9312_pins, EP93XX_SYSCON_DEVCFG_GONIDE,
+ EP93XX_SYSCON_DEVCFG_GONIDE),
+ PMX_GROUP("gpio7agrp", gpio7a_9312_pins, EP93XX_SYSCON_DEVCFG_HONIDE,
+ EP93XX_SYSCON_DEVCFG_HONIDE),
+ PMX_GROUP("idegrp", ide_9312_pins, EP93XX_SYSCON_DEVCFG_EONIDE |
+ EP93XX_SYSCON_DEVCFG_GONIDE | EP93XX_SYSCON_DEVCFG_HONIDE, 0),
+};
+
+static int ep93xx_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct ep93xx_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ switch (pmx->model) {
+ case EP93XX_9301_PINCTRL:
+ return ARRAY_SIZE(ep9301_pin_groups);
+ case EP93XX_9307_PINCTRL:
+ return ARRAY_SIZE(ep9307_pin_groups);
+ case EP93XX_9312_PINCTRL:
+ return ARRAY_SIZE(ep9312_pin_groups);
+ default:
+ return 0;
+ }
+}
+
+static const char *ep93xx_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct ep93xx_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ switch (pmx->model) {
+ case EP93XX_9301_PINCTRL:
+ return ep9301_pin_groups[selector].grp.name;
+ case EP93XX_9307_PINCTRL:
+ return ep9307_pin_groups[selector].grp.name;
+ case EP93XX_9312_PINCTRL:
+ return ep9312_pin_groups[selector].grp.name;
+ default:
+ return NULL;
+ }
+}
+
+static int ep93xx_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct ep93xx_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ switch (pmx->model) {
+ case EP93XX_9301_PINCTRL:
+ *pins = ep9301_pin_groups[selector].grp.pins;
+ *num_pins = ep9301_pin_groups[selector].grp.npins;
+ break;
+ case EP93XX_9307_PINCTRL:
+ *pins = ep9307_pin_groups[selector].grp.pins;
+ *num_pins = ep9307_pin_groups[selector].grp.npins;
+ break;
+ case EP93XX_9312_PINCTRL:
+ *pins = ep9312_pin_groups[selector].grp.pins;
+ *num_pins = ep9312_pin_groups[selector].grp.npins;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pinctrl_ops ep93xx_pctrl_ops = {
+ .get_groups_count = ep93xx_get_groups_count,
+ .get_group_name = ep93xx_get_group_name,
+ .get_group_pins = ep93xx_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static const char * const spigrps[] = { "ssp" };
+static const char * const ac97grps[] = { "ac97" };
+static const char * const i2sgrps[] = { "i2s_on_ssp", "i2s_on_ac97" };
+static const char * const pwm1grps[] = { "pwm1" };
+static const char * const gpiogrps[] = { "gpio1agrp", "gpio2agrp", "gpio3agrp",
+ "gpio4agrp", "gpio6agrp", "gpio7agrp" };
+static const char * const rastergrps[] = { "rasteronsdram0grp", "rasteronsdram3grp"};
+static const char * const keypadgrps[] = { "keypadgrp"};
+static const char * const idegrps[] = { "idegrp"};
+
+static const struct pinfunction ep93xx_pmx_functions[] = {
+ PINCTRL_PINFUNCTION("spi", spigrps, ARRAY_SIZE(spigrps)),
+ PINCTRL_PINFUNCTION("ac97", ac97grps, ARRAY_SIZE(ac97grps)),
+ PINCTRL_PINFUNCTION("i2s", i2sgrps, ARRAY_SIZE(i2sgrps)),
+ PINCTRL_PINFUNCTION("pwm", pwm1grps, ARRAY_SIZE(pwm1grps)),
+ PINCTRL_PINFUNCTION("keypad", keypadgrps, ARRAY_SIZE(keypadgrps)),
+ PINCTRL_PINFUNCTION("pata", idegrps, ARRAY_SIZE(idegrps)),
+ PINCTRL_PINFUNCTION("lcd", rastergrps, ARRAY_SIZE(rastergrps)),
+ PINCTRL_PINFUNCTION("gpio", gpiogrps, ARRAY_SIZE(gpiogrps)),
+};
+
+static int ep93xx_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned int group)
+{
+ struct ep93xx_pmx *pmx;
+ const struct pinfunction *func;
+ const struct ep93xx_pin_group *grp;
+ u32 before, after, expected;
+ unsigned long tmp;
+ int i;
+
+ pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ switch (pmx->model) {
+ case EP93XX_9301_PINCTRL:
+ grp = &ep9301_pin_groups[group];
+ break;
+ case EP93XX_9307_PINCTRL:
+ grp = &ep9307_pin_groups[group];
+ break;
+ case EP93XX_9312_PINCTRL:
+ grp = &ep9312_pin_groups[group];
+ break;
+ default:
+ dev_err(pmx->dev, "invalid SoC type\n");
+ return -ENODEV;
+ }
+
+ func = &ep93xx_pmx_functions[selector];
+
+ dev_dbg(pmx->dev,
+ "ACTIVATE function \"%s\" with group \"%s\" (mask=0x%x, value=0x%x)\n",
+ func->name, grp->grp.name, grp->mask, grp->value);
+
+ regmap_read(pmx->map, EP93XX_SYSCON_DEVCFG, &before);
+ ep93xx_pinctrl_update_bits(pmx, EP93XX_SYSCON_DEVCFG,
+ grp->mask, grp->value);
+ regmap_read(pmx->map, EP93XX_SYSCON_DEVCFG, &after);
+
+ dev_dbg(pmx->dev, "before=0x%x, after=0x%x, mask=0x%lx\n",
+ before, after, PADS_MASK);
+
+ /* Which bits changed */
+ before &= PADS_MASK;
+ after &= PADS_MASK;
+ expected = before & ~grp->mask;
+ expected |= grp->value;
+ expected &= PADS_MASK;
+
+ /* Print changed states */
+ tmp = expected ^ after;
+ for_each_set_bit(i, &tmp, PADS_MAXBIT) {
+ bool enabled = expected & BIT(i);
+
+ dev_err(pmx->dev,
+ "pin group %s could not be %s: probably a hardware limitation\n",
+ ep93xx_padgroups[i], str_enabled_disabled(enabled));
+ dev_err(pmx->dev,
+ "DeviceCfg before: %08x, after %08x, expected %08x\n",
+ before, after, expected);
+ }
+
+ return tmp ? -EINVAL : 0;
+};
+
+static int ep93xx_pmx_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(ep93xx_pmx_functions);
+}
+
+static const char *ep93xx_pmx_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return ep93xx_pmx_functions[selector].name;
+}
+
+static int ep93xx_pmx_get_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ *groups = ep93xx_pmx_functions[selector].groups;
+ *num_groups = ep93xx_pmx_functions[selector].ngroups;
+ return 0;
+}
+
+static const struct pinmux_ops ep93xx_pmx_ops = {
+ .get_functions_count = ep93xx_pmx_get_funcs_count,
+ .get_function_name = ep93xx_pmx_get_func_name,
+ .get_function_groups = ep93xx_pmx_get_groups,
+ .set_mux = ep93xx_pmx_set_mux,
+};
+
+static struct pinctrl_desc ep93xx_pmx_desc = {
+ .name = DRIVER_NAME,
+ .pctlops = &ep93xx_pctrl_ops,
+ .pmxops = &ep93xx_pmx_ops,
+ .owner = THIS_MODULE,
+};
+
+static int ep93xx_pmx_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ep93xx_regmap_adev *rdev = to_ep93xx_regmap_adev(adev);
+ struct device *dev = &adev->dev;
+ struct ep93xx_pmx *pmx;
+
+ /* Create state holders etc for this driver */
+ pmx = devm_kzalloc(dev, sizeof(*pmx), GFP_KERNEL);
+ if (!pmx)
+ return -ENOMEM;
+
+ pmx->dev = dev;
+ pmx->map = rdev->map;
+ pmx->aux_dev = rdev;
+ pmx->model = (enum ep93xx_pinctrl_model)(uintptr_t)id->driver_data;
+ switch (pmx->model) {
+ case EP93XX_9301_PINCTRL:
+ ep93xx_pmx_desc.pins = ep9301_pins;
+ ep93xx_pmx_desc.npins = ARRAY_SIZE(ep9301_pins);
+ dev_info(dev, "detected 9301/9302 chip variant\n");
+ break;
+ case EP93XX_9307_PINCTRL:
+ ep93xx_pmx_desc.pins = ep9307_pins;
+ ep93xx_pmx_desc.npins = ARRAY_SIZE(ep9307_pins);
+ dev_info(dev, "detected 9307 chip variant\n");
+ break;
+ case EP93XX_9312_PINCTRL:
+ ep93xx_pmx_desc.pins = ep9312_pins;
+ ep93xx_pmx_desc.npins = ARRAY_SIZE(ep9312_pins);
+ dev_info(dev, "detected 9312/9315 chip variant\n");
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "unknown pin control model: %u\n", pmx->model);
+ }
+
+ /* using parent of_node to match in get_pinctrl_dev_from_of_node() */
+ device_set_node(dev, dev_fwnode(adev->dev.parent));
+ pmx->pctl = devm_pinctrl_register(dev, &ep93xx_pmx_desc, pmx);
+ if (IS_ERR(pmx->pctl))
+ return dev_err_probe(dev, PTR_ERR(pmx->pctl), "could not register pinmux driver\n");
+
+ return 0;
+};
+
+static const struct auxiliary_device_id ep93xx_pinctrl_ids[] = {
+ {
+ .name = "soc_ep93xx.pinctrl-ep9301",
+ .driver_data = (kernel_ulong_t)EP93XX_9301_PINCTRL,
+ },
+ {
+ .name = "soc_ep93xx.pinctrl-ep9307",
+ .driver_data = (kernel_ulong_t)EP93XX_9307_PINCTRL,
+ },
+ {
+ .name = "soc_ep93xx.pinctrl-ep9312",
+ .driver_data = (kernel_ulong_t)EP93XX_9312_PINCTRL,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(auxiliary, ep93xx_pinctrl_ids);
+
+static struct auxiliary_driver ep93xx_pmx_driver = {
+ .probe = ep93xx_pmx_probe,
+ .id_table = ep93xx_pinctrl_ids,
+};
+module_auxiliary_driver(ep93xx_pmx_driver);
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 4525ad1b59f4..839154c46e46 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -302,7 +302,6 @@ static const struct file_operations cros_ec_console_log_fops = {
.owner = THIS_MODULE,
.open = cros_ec_console_log_open,
.read = cros_ec_console_log_read,
- .llseek = no_llseek,
.poll = cros_ec_console_log_poll,
.release = cros_ec_console_log_release,
};
diff --git a/drivers/platform/chrome/wilco_ec/debugfs.c b/drivers/platform/chrome/wilco_ec/debugfs.c
index 983f2fa44ba5..99486086af6a 100644
--- a/drivers/platform/chrome/wilco_ec/debugfs.c
+++ b/drivers/platform/chrome/wilco_ec/debugfs.c
@@ -156,7 +156,6 @@ static const struct file_operations fops_raw = {
.owner = THIS_MODULE,
.read = raw_read,
.write = raw_write,
- .llseek = no_llseek,
};
#define CMD_KB_CHROME 0x88
diff --git a/drivers/platform/chrome/wilco_ec/event.c b/drivers/platform/chrome/wilco_ec/event.c
index bd1fb53ba028..196e46a1d489 100644
--- a/drivers/platform/chrome/wilco_ec/event.c
+++ b/drivers/platform/chrome/wilco_ec/event.c
@@ -403,7 +403,6 @@ static const struct file_operations event_fops = {
.poll = event_poll,
.read = event_read,
.release = event_release,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/platform/chrome/wilco_ec/telemetry.c b/drivers/platform/chrome/wilco_ec/telemetry.c
index 21d4cbbb009a..a87877e4300a 100644
--- a/drivers/platform/chrome/wilco_ec/telemetry.c
+++ b/drivers/platform/chrome/wilco_ec/telemetry.c
@@ -330,7 +330,6 @@ static const struct file_operations telem_fops = {
.write = telem_write,
.read = telem_read,
.release = telem_release,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/platform/surface/surface_aggregator_cdev.c b/drivers/platform/surface/surface_aggregator_cdev.c
index 07e065b9159f..165b1416230d 100644
--- a/drivers/platform/surface/surface_aggregator_cdev.c
+++ b/drivers/platform/surface/surface_aggregator_cdev.c
@@ -670,7 +670,6 @@ static const struct file_operations ssam_controller_fops = {
.fasync = ssam_cdev_fasync,
.unlocked_ioctl = ssam_cdev_device_ioctl,
.compat_ioctl = ssam_cdev_device_ioctl,
- .llseek = no_llseek,
};
diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
index 2de843b7ea70..89ca6b50e812 100644
--- a/drivers/platform/surface/surface_dtx.c
+++ b/drivers/platform/surface/surface_dtx.c
@@ -555,7 +555,6 @@ static const struct file_operations surface_dtx_fops = {
.fasync = surface_dtx_fasync,
.unlocked_ioctl = surface_dtx_ioctl,
.compat_ioctl = surface_dtx_ioctl,
- .llseek = no_llseek,
};
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index fece990af4a7..389d5a193e5d 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -75,6 +75,16 @@ config POWER_RESET_BRCMSTB
Say Y here if you have a Broadcom STB board and you wish
to have restart support.
+config POWER_RESET_EP93XX
+ bool "Cirrus EP93XX reset driver" if COMPILE_TEST
+ depends on MFD_SYSCON
+ default ARCH_EP93XX
+ help
+ This driver provides restart support for Cirrus EP93XX SoC.
+
+ Say Y here if you have a Cirrus EP93XX SoC and you wish
+ to have restart support.
+
config POWER_RESET_GEMINI_POWEROFF
bool "Cortina Gemini power-off driver"
depends on ARCH_GEMINI || COMPILE_TEST
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index a95d1bd275d1..10782d32e1da 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_POWER_RESET_ATC260X) += atc260x-poweroff.o
obj-$(CONFIG_POWER_RESET_AXXIA) += axxia-reset.o
obj-$(CONFIG_POWER_RESET_BRCMKONA) += brcm-kona-reset.o
obj-$(CONFIG_POWER_RESET_BRCMSTB) += brcmstb-reboot.o
+obj-$(CONFIG_POWER_RESET_EP93XX) += ep93xx-restart.o
obj-$(CONFIG_POWER_RESET_GEMINI_POWEROFF) += gemini-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
diff --git a/drivers/power/reset/ep93xx-restart.c b/drivers/power/reset/ep93xx-restart.c
new file mode 100644
index 000000000000..57cfb8620faf
--- /dev/null
+++ b/drivers/power/reset/ep93xx-restart.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Cirrus EP93xx SoC reset driver
+ *
+ * Copyright (C) 2021 Nikita Shubin <nikita.shubin@maquefel.me>
+ */
+
+#include <linux/bits.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+
+#include <linux/soc/cirrus/ep93xx.h>
+
+#define EP93XX_SYSCON_DEVCFG 0x80
+#define EP93XX_SYSCON_DEVCFG_SWRST BIT(31)
+
+struct ep93xx_restart {
+ struct ep93xx_regmap_adev *aux_dev;
+ struct notifier_block restart_handler;
+};
+
+static int ep93xx_restart_handle(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ struct ep93xx_restart *priv =
+ container_of(this, struct ep93xx_restart, restart_handler);
+ struct ep93xx_regmap_adev *aux = priv->aux_dev;
+
+ /* Issue the reboot */
+ aux->update_bits(aux->map, aux->lock, EP93XX_SYSCON_DEVCFG,
+ EP93XX_SYSCON_DEVCFG_SWRST, EP93XX_SYSCON_DEVCFG_SWRST);
+ aux->update_bits(aux->map, aux->lock, EP93XX_SYSCON_DEVCFG,
+ EP93XX_SYSCON_DEVCFG_SWRST, 0);
+
+ return NOTIFY_DONE;
+}
+
+static int ep93xx_reboot_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ep93xx_regmap_adev *rdev = to_ep93xx_regmap_adev(adev);
+ struct device *dev = &adev->dev;
+ struct ep93xx_restart *priv;
+ int err;
+
+ if (!rdev->update_bits)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->aux_dev = rdev;
+
+ priv->restart_handler.notifier_call = ep93xx_restart_handle;
+ priv->restart_handler.priority = 128;
+
+ err = register_restart_handler(&priv->restart_handler);
+ if (err)
+ return dev_err_probe(dev, err, "can't register restart notifier\n");
+
+ return 0;
+}
+
+static const struct auxiliary_device_id ep93xx_reboot_ids[] = {
+ {
+ .name = "soc_ep93xx.reset-ep93xx",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(auxiliary, ep93xx_reboot_ids);
+
+static struct auxiliary_driver ep93xx_reboot_driver = {
+ .probe = ep93xx_reboot_probe,
+ .id_table = ep93xx_reboot_ids,
+};
+module_auxiliary_driver(ep93xx_reboot_driver);
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
index 63d03a0df5cc..abaffb4e1c1c 100644
--- a/drivers/pps/clients/pps_parport.c
+++ b/drivers/pps/clients/pps_parport.c
@@ -149,6 +149,9 @@ static void parport_attach(struct parport *port)
}
index = ida_alloc(&pps_client_index, GFP_KERNEL);
+ if (index < 0)
+ goto err_free_device;
+
memset(&pps_client_cb, 0, sizeof(pps_client_cb));
pps_client_cb.private = device;
pps_client_cb.irq_func = parport_irq;
@@ -159,7 +162,7 @@ static void parport_attach(struct parport *port)
index);
if (!device->pardev) {
pr_err("couldn't register with %s\n", port->name);
- goto err_free;
+ goto err_free_ida;
}
if (parport_claim_or_block(device->pardev) < 0) {
@@ -187,8 +190,9 @@ err_release_dev:
parport_release(device->pardev);
err_unregister_dev:
parport_unregister_device(device->pardev);
-err_free:
+err_free_ida:
ida_free(&pps_client_index, index);
+err_free_device:
kfree(device);
}
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 5d19baae6a38..25d47907db17 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -319,7 +319,6 @@ static int pps_cdev_release(struct inode *inode, struct file *file)
static const struct file_operations pps_cdev_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.poll = pps_cdev_poll,
.fasync = pps_cdev_fasync,
.compat_ioctl = pps_cdev_compat_ioctl,
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index 666f2954133c..994f89ac43b4 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -17,6 +17,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
@@ -26,8 +27,6 @@
#include <asm/div64.h>
-#include <linux/soc/cirrus/ep93xx.h> /* for ep93xx_pwm_{acquire,release}_gpio() */
-
#define EP93XX_PWMx_TERM_COUNT 0x00
#define EP93XX_PWMx_DUTY_CYCLE 0x04
#define EP93XX_PWMx_ENABLE 0x08
@@ -43,20 +42,6 @@ static inline struct ep93xx_pwm *to_ep93xx_pwm(struct pwm_chip *chip)
return pwmchip_get_drvdata(chip);
}
-static int ep93xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct platform_device *pdev = to_platform_device(pwmchip_parent(chip));
-
- return ep93xx_pwm_acquire_gpio(pdev);
-}
-
-static void ep93xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct platform_device *pdev = to_platform_device(pwmchip_parent(chip));
-
- ep93xx_pwm_release_gpio(pdev);
-}
-
static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
@@ -155,8 +140,6 @@ static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
}
static const struct pwm_ops ep93xx_pwm_ops = {
- .request = ep93xx_pwm_request,
- .free = ep93xx_pwm_free,
.apply = ep93xx_pwm_apply,
};
@@ -188,9 +171,16 @@ static int ep93xx_pwm_probe(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ep93xx_pwm_of_ids[] = {
+ { .compatible = "cirrus,ep9301-pwm" },
+ { /* sentinel */}
+};
+MODULE_DEVICE_TABLE(of, ep93xx_pwm_of_ids);
+
static struct platform_driver ep93xx_pwm_driver = {
.driver = {
.name = "ep93xx-pwm",
+ .of_match_table = ep93xx_pwm_of_ids,
},
.probe = ep93xx_pwm_probe,
};
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 7a80c92b785e..955e4e38477e 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -330,8 +330,7 @@ config STM32_RPROC
config TI_K3_DSP_REMOTEPROC
tristate "TI K3 DSP remoteproc support"
depends on ARCH_K3
- select MAILBOX
- select OMAP2PLUS_MBOX
+ depends on OMAP2PLUS_MBOX
help
Say m here to support TI's C66x and C71x DSP remote processor
subsystems on various TI K3 family of SoCs through the remote
@@ -356,8 +355,7 @@ config TI_K3_M4_REMOTEPROC
config TI_K3_R5_REMOTEPROC
tristate "TI K3 R5 remoteproc support"
depends on ARCH_K3
- select MAILBOX
- select OMAP2PLUS_MBOX
+ depends on OMAP2PLUS_MBOX
help
Say m here to support TI's R5F remote processor subsystems
on various TI K3 family of SoCs through the remote processor
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e87c3d74565c..66eb1122248b 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -743,6 +743,16 @@ config RTC_DRV_S5M
This driver can also be built as a module. If so, the module
will be called rtc-s5m.
+config RTC_DRV_SD2405AL
+ tristate "DFRobot SD2405AL"
+ select REGMAP_I2C
+ help
+ If you say yes here you will get support for the
+ DFRobot SD2405AL I2C RTC Module.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-sd2405al.
+
config RTC_DRV_SD3078
tristate "ZXW Shenzhen whwave SD3078"
select REGMAP_I2C
@@ -1934,6 +1944,12 @@ config RTC_DRV_STM32
tristate "STM32 RTC"
select REGMAP_MMIO
depends on ARCH_STM32 || COMPILE_TEST
+ depends on OF
+ depends on PINCTRL
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ depends on COMMON_CLK
help
If you say yes here you get support for the STM32 On-Chip
Real Time Clock.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 8ee79cb18322..f62340ecc534 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -163,6 +163,7 @@ obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
obj-$(CONFIG_RTC_DRV_S5M) += rtc-s5m.o
obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
obj-$(CONFIG_RTC_DRV_SC27XX) += rtc-sc27xx.o
+obj-$(CONFIG_RTC_DRV_SD2405AL) += rtc-sd2405al.o
obj-$(CONFIG_RTC_DRV_SD3078) += rtc-sd3078.o
obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
obj-$(CONFIG_RTC_DRV_SNVS) += rtc-snvs.o
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index 4aad9bb99868..c4a3ab53dcd4 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -523,7 +523,6 @@ static int rtc_dev_release(struct inode *inode, struct file *file)
static const struct file_operations rtc_dev_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = rtc_dev_read,
.poll = rtc_dev_poll,
.unlocked_ioctl = rtc_dev_ioctl,
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index f93bee96e362..993c0878fb66 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -368,6 +368,7 @@ static int at91_rtc_probe(struct platform_device *pdev)
return ret;
rtc->gpbr = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
rtc->gpbr_offset = args.args[0];
if (IS_ERR(rtc->gpbr)) {
dev_err(&pdev->dev, "failed to retrieve gpbr regmap, aborting.\n");
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 0013bff0447d..1f58ae8b151e 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -850,7 +850,6 @@ static const struct file_operations wdt_fops = {
.write = wdt_write,
.open = wdt_open,
.release = wdt_release,
- .llseek = no_llseek,
};
static struct miscdevice wdt_dev = {
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index f0f6b9b6daec..5d30ce8e13ca 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -132,7 +132,7 @@ static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm)
M48T59_WRITE((bin2bcd(tm->tm_mon + 1) & 0x1F), M48T59_MONTH);
M48T59_WRITE(bin2bcd(year % 100), M48T59_YEAR);
- if (pdata->type == M48T59RTC_TYPE_M48T59 && (year / 100))
+ if (pdata->type == M48T59RTC_TYPE_M48T59 && (year >= 100))
val = (M48T59_WDAY_CEB | M48T59_WDAY_CB);
val |= (bin2bcd(tm->tm_wday) & 0x07);
M48T59_WRITE(val, M48T59_WDAY);
@@ -458,6 +458,8 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, m48t59);
m48t59->rtc->ops = &m48t59_rtc_ops;
+ m48t59->rtc->range_min = RTC_TIMESTAMP_BEGIN_1900;
+ m48t59->rtc->range_max = RTC_TIMESTAMP_END_2099;
nvmem_cfg.size = pdata->offset;
ret = devm_rtc_nvmem_register(m48t59->rtc, &nvmem_cfg);
diff --git a/drivers/rtc/rtc-rc5t619.c b/drivers/rtc/rtc-rc5t619.c
index e73102a39f1b..711f62eecd79 100644
--- a/drivers/rtc/rtc-rc5t619.c
+++ b/drivers/rtc/rtc-rc5t619.c
@@ -429,14 +429,23 @@ static int rc5t619_rtc_probe(struct platform_device *pdev)
return devm_rtc_register_device(rtc->rtc);
}
+static const struct platform_device_id rc5t619_rtc_id[] = {
+ {
+ .name = "rc5t619-rtc",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, rc5t619_rtc_id);
+
static struct platform_driver rc5t619_rtc_driver = {
.driver = {
.name = "rc5t619-rtc",
},
.probe = rc5t619_rtc_probe,
+ .id_table = rc5t619_rtc_id,
};
-
module_platform_driver(rc5t619_rtc_driver);
-MODULE_ALIAS("platform:rc5t619-rtc");
+
MODULE_DESCRIPTION("RICOH RC5T619 RTC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index 2d6b655a4b25..e3dc18882f41 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -56,7 +56,6 @@ static const struct i2c_device_id s35390a_id[] = {
MODULE_DEVICE_TABLE(i2c, s35390a_id);
static const __maybe_unused struct of_device_id s35390a_of_match[] = {
- { .compatible = "s35390a" },
{ .compatible = "sii,s35390a" },
{ }
};
diff --git a/drivers/rtc/rtc-sd2405al.c b/drivers/rtc/rtc-sd2405al.c
new file mode 100644
index 000000000000..d2568c3e3876
--- /dev/null
+++ b/drivers/rtc/rtc-sd2405al.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RTC driver for the SD2405AL Real-Time Clock
+ *
+ * Datasheet:
+ * https://image.dfrobot.com/image/data/TOY0021/SD2405AL%20datasheet%20(Angelo%20v0.1).pdf
+ *
+ * Copyright (C) 2024 Tóth János <gomba007@gmail.com>
+ */
+
+#include <linux/bcd.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+/* Real time clock registers */
+#define SD2405AL_REG_T_SEC 0x00
+#define SD2405AL_REG_T_MIN 0x01
+#define SD2405AL_REG_T_HOUR 0x02
+# define SD2405AL_BIT_12H_PM BIT(5)
+# define SD2405AL_BIT_24H BIT(7)
+#define SD2405AL_REG_T_WEEK 0x03
+#define SD2405AL_REG_T_DAY 0x04
+#define SD2405AL_REG_T_MON 0x05
+#define SD2405AL_REG_T_YEAR 0x06
+
+#define SD2405AL_NUM_T_REGS (SD2405AL_REG_T_YEAR - SD2405AL_REG_T_SEC + 1)
+
+/* Control registers */
+#define SD2405AL_REG_CTR1 0x0F
+# define SD2405AL_BIT_WRTC2 BIT(2)
+# define SD2405AL_BIT_WRTC3 BIT(7)
+#define SD2405AL_REG_CTR2 0x10
+# define SD2405AL_BIT_WRTC1 BIT(7)
+#define SD2405AL_REG_CTR3 0x11
+#define SD2405AL_REG_TTF 0x12
+#define SD2405AL_REG_CNTDWN 0x13
+
+/* General RAM */
+#define SD2405AL_REG_M_START 0x14
+#define SD2405AL_REG_M_END 0x1F
+
+struct sd2405al {
+ struct device *dev;
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+};
+
+static int sd2405al_enable_reg_write(struct sd2405al *sd2405al)
+{
+ int ret;
+
+ /* order of writes is important */
+ ret = regmap_update_bits(sd2405al->regmap, SD2405AL_REG_CTR2,
+ SD2405AL_BIT_WRTC1, SD2405AL_BIT_WRTC1);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(sd2405al->regmap, SD2405AL_REG_CTR1,
+ SD2405AL_BIT_WRTC2 | SD2405AL_BIT_WRTC3,
+ SD2405AL_BIT_WRTC2 | SD2405AL_BIT_WRTC3);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int sd2405al_disable_reg_write(struct sd2405al *sd2405al)
+{
+ int ret;
+
+ /* order of writes is important */
+ ret = regmap_update_bits(sd2405al->regmap, SD2405AL_REG_CTR1,
+ SD2405AL_BIT_WRTC2 | SD2405AL_BIT_WRTC3, 0x00);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(sd2405al->regmap, SD2405AL_REG_CTR2,
+ SD2405AL_BIT_WRTC1, 0x00);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int sd2405al_read_time(struct device *dev, struct rtc_time *time)
+{
+ u8 data[SD2405AL_NUM_T_REGS] = { 0 };
+ struct sd2405al *sd2405al = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_bulk_read(sd2405al->regmap, SD2405AL_REG_T_SEC, data,
+ SD2405AL_NUM_T_REGS);
+ if (ret < 0)
+ return ret;
+
+ time->tm_sec = bcd2bin(data[SD2405AL_REG_T_SEC] & 0x7F);
+ time->tm_min = bcd2bin(data[SD2405AL_REG_T_MIN] & 0x7F);
+
+ if (data[SD2405AL_REG_T_HOUR] & SD2405AL_BIT_24H)
+ time->tm_hour = bcd2bin(data[SD2405AL_REG_T_HOUR] & 0x3F);
+ else
+ if (data[SD2405AL_REG_T_HOUR] & SD2405AL_BIT_12H_PM)
+ time->tm_hour = bcd2bin(data[SD2405AL_REG_T_HOUR]
+ & 0x1F) + 12;
+ else /* 12 hour mode, AM */
+ time->tm_hour = bcd2bin(data[SD2405AL_REG_T_HOUR]
+ & 0x1F);
+
+ time->tm_wday = bcd2bin(data[SD2405AL_REG_T_WEEK] & 0x07);
+ time->tm_mday = bcd2bin(data[SD2405AL_REG_T_DAY] & 0x3F);
+ time->tm_mon = bcd2bin(data[SD2405AL_REG_T_MON] & 0x1F) - 1;
+ time->tm_year = bcd2bin(data[SD2405AL_REG_T_YEAR]) + 100;
+
+ dev_dbg(sd2405al->dev, "read time: %ptR (%d)\n", time, time->tm_wday);
+
+ return 0;
+}
+
+static int sd2405al_set_time(struct device *dev, struct rtc_time *time)
+{
+ u8 data[SD2405AL_NUM_T_REGS];
+ struct sd2405al *sd2405al = dev_get_drvdata(dev);
+ int ret;
+
+ data[SD2405AL_REG_T_SEC] = bin2bcd(time->tm_sec);
+ data[SD2405AL_REG_T_MIN] = bin2bcd(time->tm_min);
+ data[SD2405AL_REG_T_HOUR] = bin2bcd(time->tm_hour) | SD2405AL_BIT_24H;
+ data[SD2405AL_REG_T_DAY] = bin2bcd(time->tm_mday);
+ data[SD2405AL_REG_T_WEEK] = bin2bcd(time->tm_wday);
+ data[SD2405AL_REG_T_MON] = bin2bcd(time->tm_mon) + 1;
+ data[SD2405AL_REG_T_YEAR] = bin2bcd(time->tm_year - 100);
+
+ ret = sd2405al_enable_reg_write(sd2405al);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_write(sd2405al->regmap, SD2405AL_REG_T_SEC, data,
+ SD2405AL_NUM_T_REGS);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(sd2405al->regmap, SD2405AL_REG_TTF, 0x00);
+ if (ret < 0)
+ return ret;
+
+ ret = sd2405al_disable_reg_write(sd2405al);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(sd2405al->dev, "set time: %ptR (%d)\n", time, time->tm_wday);
+
+ return 0;
+}
+
+static const struct rtc_class_ops sd2405al_rtc_ops = {
+ .read_time = sd2405al_read_time,
+ .set_time = sd2405al_set_time,
+};
+
+static const struct regmap_config sd2405al_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = SD2405AL_REG_M_END,
+};
+
+static int sd2405al_probe(struct i2c_client *client)
+{
+ struct sd2405al *sd2405al;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ sd2405al = devm_kzalloc(&client->dev, sizeof(*sd2405al), GFP_KERNEL);
+ if (!sd2405al)
+ return -ENOMEM;
+
+ sd2405al->dev = &client->dev;
+
+ sd2405al->regmap = devm_regmap_init_i2c(client, &sd2405al_regmap_conf);
+ if (IS_ERR(sd2405al->regmap))
+ return PTR_ERR(sd2405al->regmap);
+
+ sd2405al->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(sd2405al->rtc))
+ return PTR_ERR(sd2405al->rtc);
+
+ sd2405al->rtc->ops = &sd2405al_rtc_ops;
+ sd2405al->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ sd2405al->rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ dev_set_drvdata(&client->dev, sd2405al);
+
+ ret = devm_rtc_register_device(sd2405al->rtc);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct i2c_device_id sd2405al_id[] = {
+ { "sd2405al" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, sd2405al_id);
+
+static const __maybe_unused struct of_device_id sd2405al_of_match[] = {
+ { .compatible = "dfrobot,sd2405al" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sd2405al_of_match);
+
+static struct i2c_driver sd2405al_driver = {
+ .driver = {
+ .name = "sd2405al",
+ .of_match_table = of_match_ptr(sd2405al_of_match),
+ },
+ .probe = sd2405al_probe,
+ .id_table = sd2405al_id,
+};
+
+module_i2c_driver(sd2405al_driver);
+
+MODULE_AUTHOR("Tóth János <gomba007@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SD2405AL RTC driver");
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index 98b07969609d..3e4f2ee22b0b 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -7,12 +7,16 @@
#include <linux/bcd.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/errno.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
@@ -42,6 +46,12 @@
#define STM32_RTC_CR_FMT BIT(6)
#define STM32_RTC_CR_ALRAE BIT(8)
#define STM32_RTC_CR_ALRAIE BIT(12)
+#define STM32_RTC_CR_OSEL GENMASK(22, 21)
+#define STM32_RTC_CR_OSEL_ALARM_A FIELD_PREP(STM32_RTC_CR_OSEL, 0x01)
+#define STM32_RTC_CR_COE BIT(23)
+#define STM32_RTC_CR_TAMPOE BIT(26)
+#define STM32_RTC_CR_TAMPALRM_TYPE BIT(30)
+#define STM32_RTC_CR_OUT2EN BIT(31)
/* STM32_RTC_ISR/STM32_RTC_ICSR bit fields */
#define STM32_RTC_ISR_ALRAWF BIT(0)
@@ -78,6 +88,12 @@
/* STM32_RTC_SR/_SCR bit fields */
#define STM32_RTC_SR_ALRA BIT(0)
+/* STM32_RTC_CFGR bit fields */
+#define STM32_RTC_CFGR_OUT2_RMP BIT(0)
+#define STM32_RTC_CFGR_LSCOEN GENMASK(2, 1)
+#define STM32_RTC_CFGR_LSCOEN_OUT1 1
+#define STM32_RTC_CFGR_LSCOEN_OUT2_RMP 2
+
/* STM32_RTC_VERR bit fields */
#define STM32_RTC_VERR_MINREV_SHIFT 0
#define STM32_RTC_VERR_MINREV GENMASK(3, 0)
@@ -107,6 +123,14 @@
/* STM32 RTC driver time helpers */
#define SEC_PER_DAY (24 * 60 * 60)
+/* STM32 RTC pinctrl helpers */
+#define STM32_RTC_PINMUX(_name, _action, ...) { \
+ .name = (_name), \
+ .action = (_action), \
+ .groups = ((const char *[]){ __VA_ARGS__ }), \
+ .num_groups = ARRAY_SIZE(((const char *[]){ __VA_ARGS__ })), \
+}
+
struct stm32_rtc;
struct stm32_rtc_registers {
@@ -119,6 +143,7 @@ struct stm32_rtc_registers {
u16 wpr;
u16 sr;
u16 scr;
+ u16 cfgr;
u16 verr;
};
@@ -134,6 +159,8 @@ struct stm32_rtc_data {
bool need_dbp;
bool need_accuracy;
bool rif_protected;
+ bool has_lsco;
+ bool has_alarm_out;
};
struct stm32_rtc {
@@ -146,6 +173,7 @@ struct stm32_rtc {
struct clk *rtc_ck;
const struct stm32_rtc_data *data;
int irq_alarm;
+ struct clk *clk_lsco;
};
struct stm32_rtc_rif_resource {
@@ -171,6 +199,209 @@ static void stm32_rtc_wpr_lock(struct stm32_rtc *rtc)
writel_relaxed(RTC_WPR_WRONG_KEY, rtc->base + regs->wpr);
}
+enum stm32_rtc_pin_name {
+ NONE,
+ OUT1,
+ OUT2,
+ OUT2_RMP
+};
+
+static const struct pinctrl_pin_desc stm32_rtc_pinctrl_pins[] = {
+ PINCTRL_PIN(OUT1, "out1"),
+ PINCTRL_PIN(OUT2, "out2"),
+ PINCTRL_PIN(OUT2_RMP, "out2_rmp"),
+};
+
+static int stm32_rtc_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(stm32_rtc_pinctrl_pins);
+}
+
+static const char *stm32_rtc_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return stm32_rtc_pinctrl_pins[selector].name;
+}
+
+static int stm32_rtc_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ *pins = &stm32_rtc_pinctrl_pins[selector].number;
+ *num_pins = 1;
+ return 0;
+}
+
+static const struct pinctrl_ops stm32_rtc_pinctrl_ops = {
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+ .get_groups_count = stm32_rtc_pinctrl_get_groups_count,
+ .get_group_name = stm32_rtc_pinctrl_get_group_name,
+ .get_group_pins = stm32_rtc_pinctrl_get_group_pins,
+};
+
+struct stm32_rtc_pinmux_func {
+ const char *name;
+ const char * const *groups;
+ const unsigned int num_groups;
+ int (*action)(struct pinctrl_dev *pctl_dev, unsigned int pin);
+};
+
+static int stm32_rtc_pinmux_action_alarm(struct pinctrl_dev *pctldev, unsigned int pin)
+{
+ struct stm32_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+ struct stm32_rtc_registers regs = rtc->data->regs;
+ unsigned int cr = readl_relaxed(rtc->base + regs.cr);
+ unsigned int cfgr = readl_relaxed(rtc->base + regs.cfgr);
+
+ if (!rtc->data->has_alarm_out)
+ return -EPERM;
+
+ cr &= ~STM32_RTC_CR_OSEL;
+ cr |= STM32_RTC_CR_OSEL_ALARM_A;
+ cr &= ~STM32_RTC_CR_TAMPOE;
+ cr &= ~STM32_RTC_CR_COE;
+ cr &= ~STM32_RTC_CR_TAMPALRM_TYPE;
+
+ switch (pin) {
+ case OUT1:
+ cr &= ~STM32_RTC_CR_OUT2EN;
+ cfgr &= ~STM32_RTC_CFGR_OUT2_RMP;
+ break;
+ case OUT2:
+ cr |= STM32_RTC_CR_OUT2EN;
+ cfgr &= ~STM32_RTC_CFGR_OUT2_RMP;
+ break;
+ case OUT2_RMP:
+ cr |= STM32_RTC_CR_OUT2EN;
+ cfgr |= STM32_RTC_CFGR_OUT2_RMP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ stm32_rtc_wpr_unlock(rtc);
+ writel_relaxed(cr, rtc->base + regs.cr);
+ writel_relaxed(cfgr, rtc->base + regs.cfgr);
+ stm32_rtc_wpr_lock(rtc);
+
+ return 0;
+}
+
+static int stm32_rtc_pinmux_lsco_available(struct pinctrl_dev *pctldev, unsigned int pin)
+{
+ struct stm32_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+ struct stm32_rtc_registers regs = rtc->data->regs;
+ unsigned int cr = readl_relaxed(rtc->base + regs.cr);
+ unsigned int cfgr = readl_relaxed(rtc->base + regs.cfgr);
+ unsigned int calib = STM32_RTC_CR_COE;
+ unsigned int tampalrm = STM32_RTC_CR_TAMPOE | STM32_RTC_CR_OSEL;
+
+ switch (pin) {
+ case OUT1:
+ if ((!(cr & STM32_RTC_CR_OUT2EN) &&
+ ((cr & calib) || cr & tampalrm)) ||
+ ((cr & calib) && (cr & tampalrm)))
+ return -EBUSY;
+ break;
+ case OUT2_RMP:
+ if ((cr & STM32_RTC_CR_OUT2EN) &&
+ (cfgr & STM32_RTC_CFGR_OUT2_RMP) &&
+ ((cr & calib) || (cr & tampalrm)))
+ return -EBUSY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (clk_get_rate(rtc->rtc_ck) != 32768)
+ return -ERANGE;
+
+ return 0;
+}
+
+static int stm32_rtc_pinmux_action_lsco(struct pinctrl_dev *pctldev, unsigned int pin)
+{
+ struct stm32_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+ struct stm32_rtc_registers regs = rtc->data->regs;
+ struct device *dev = rtc->rtc_dev->dev.parent;
+ u8 lscoen;
+ int ret;
+
+ if (!rtc->data->has_lsco)
+ return -EPERM;
+
+ ret = stm32_rtc_pinmux_lsco_available(pctldev, pin);
+ if (ret)
+ return ret;
+
+ lscoen = (pin == OUT1) ? STM32_RTC_CFGR_LSCOEN_OUT1 : STM32_RTC_CFGR_LSCOEN_OUT2_RMP;
+
+ rtc->clk_lsco = clk_register_gate(dev, "rtc_lsco", __clk_get_name(rtc->rtc_ck),
+ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL,
+ rtc->base + regs.cfgr, lscoen, 0, NULL);
+ if (IS_ERR(rtc->clk_lsco))
+ return PTR_ERR(rtc->clk_lsco);
+
+ of_clk_add_provider(dev->of_node, of_clk_src_simple_get, rtc->clk_lsco);
+
+ return 0;
+}
+
+static const struct stm32_rtc_pinmux_func stm32_rtc_pinmux_functions[] = {
+ STM32_RTC_PINMUX("lsco", &stm32_rtc_pinmux_action_lsco, "out1", "out2_rmp"),
+ STM32_RTC_PINMUX("alarm-a", &stm32_rtc_pinmux_action_alarm, "out1", "out2", "out2_rmp"),
+};
+
+static int stm32_rtc_pinmux_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(stm32_rtc_pinmux_functions);
+}
+
+static const char *stm32_rtc_pinmux_get_fname(struct pinctrl_dev *pctldev, unsigned int selector)
+{
+ return stm32_rtc_pinmux_functions[selector].name;
+}
+
+static int stm32_rtc_pinmux_get_groups(struct pinctrl_dev *pctldev, unsigned int selector,
+ const char * const **groups, unsigned int * const num_groups)
+{
+ *groups = stm32_rtc_pinmux_functions[selector].groups;
+ *num_groups = stm32_rtc_pinmux_functions[selector].num_groups;
+ return 0;
+}
+
+static int stm32_rtc_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
+ unsigned int group)
+{
+ struct stm32_rtc_pinmux_func selected_func = stm32_rtc_pinmux_functions[selector];
+ struct pinctrl_pin_desc pin = stm32_rtc_pinctrl_pins[group];
+
+ /* Call action */
+ if (selected_func.action)
+ return selected_func.action(pctldev, pin.number);
+
+ return -EINVAL;
+}
+
+static const struct pinmux_ops stm32_rtc_pinmux_ops = {
+ .get_functions_count = stm32_rtc_pinmux_get_functions_count,
+ .get_function_name = stm32_rtc_pinmux_get_fname,
+ .get_function_groups = stm32_rtc_pinmux_get_groups,
+ .set_mux = stm32_rtc_pinmux_set_mux,
+ .strict = true,
+};
+
+static struct pinctrl_desc stm32_rtc_pdesc = {
+ .name = DRIVER_NAME,
+ .pins = stm32_rtc_pinctrl_pins,
+ .npins = ARRAY_SIZE(stm32_rtc_pinctrl_pins),
+ .owner = THIS_MODULE,
+ .pctlops = &stm32_rtc_pinctrl_ops,
+ .pmxops = &stm32_rtc_pinmux_ops,
+};
+
static int stm32_rtc_enter_init_mode(struct stm32_rtc *rtc)
{
const struct stm32_rtc_registers *regs = &rtc->data->regs;
@@ -576,6 +807,8 @@ static const struct stm32_rtc_data stm32_rtc_data = {
.need_dbp = true,
.need_accuracy = false,
.rif_protected = false,
+ .has_lsco = false,
+ .has_alarm_out = false,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -586,6 +819,7 @@ static const struct stm32_rtc_data stm32_rtc_data = {
.wpr = 0x24,
.sr = 0x0C, /* set to ISR offset to ease alarm management */
.scr = UNDEF_REG,
+ .cfgr = UNDEF_REG,
.verr = UNDEF_REG,
},
.events = {
@@ -599,6 +833,8 @@ static const struct stm32_rtc_data stm32h7_rtc_data = {
.need_dbp = true,
.need_accuracy = false,
.rif_protected = false,
+ .has_lsco = false,
+ .has_alarm_out = false,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -609,6 +845,7 @@ static const struct stm32_rtc_data stm32h7_rtc_data = {
.wpr = 0x24,
.sr = 0x0C, /* set to ISR offset to ease alarm management */
.scr = UNDEF_REG,
+ .cfgr = UNDEF_REG,
.verr = UNDEF_REG,
},
.events = {
@@ -631,6 +868,8 @@ static const struct stm32_rtc_data stm32mp1_data = {
.need_dbp = false,
.need_accuracy = true,
.rif_protected = false,
+ .has_lsco = true,
+ .has_alarm_out = true,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -641,6 +880,7 @@ static const struct stm32_rtc_data stm32mp1_data = {
.wpr = 0x24,
.sr = 0x50,
.scr = 0x5C,
+ .cfgr = 0x60,
.verr = 0x3F4,
},
.events = {
@@ -654,6 +894,8 @@ static const struct stm32_rtc_data stm32mp25_data = {
.need_dbp = false,
.need_accuracy = true,
.rif_protected = true,
+ .has_lsco = true,
+ .has_alarm_out = true,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -664,6 +906,7 @@ static const struct stm32_rtc_data stm32mp25_data = {
.wpr = 0x24,
.sr = 0x50,
.scr = 0x5C,
+ .cfgr = 0x60,
.verr = 0x3F4,
},
.events = {
@@ -681,6 +924,30 @@ static const struct of_device_id stm32_rtc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, stm32_rtc_of_match);
+static void stm32_rtc_clean_outs(struct stm32_rtc *rtc)
+{
+ struct stm32_rtc_registers regs = rtc->data->regs;
+ unsigned int cr = readl_relaxed(rtc->base + regs.cr);
+
+ cr &= ~STM32_RTC_CR_OSEL;
+ cr &= ~STM32_RTC_CR_TAMPOE;
+ cr &= ~STM32_RTC_CR_COE;
+ cr &= ~STM32_RTC_CR_TAMPALRM_TYPE;
+ cr &= ~STM32_RTC_CR_OUT2EN;
+
+ stm32_rtc_wpr_unlock(rtc);
+ writel_relaxed(cr, rtc->base + regs.cr);
+ stm32_rtc_wpr_lock(rtc);
+
+ if (regs.cfgr != UNDEF_REG) {
+ unsigned int cfgr = readl_relaxed(rtc->base + regs.cfgr);
+
+ cfgr &= ~STM32_RTC_CFGR_LSCOEN;
+ cfgr &= ~STM32_RTC_CFGR_OUT2_RMP;
+ writel_relaxed(cfgr, rtc->base + regs.cfgr);
+ }
+}
+
static int stm32_rtc_check_rif(struct stm32_rtc *stm32_rtc,
struct stm32_rtc_rif_resource res)
{
@@ -791,6 +1058,7 @@ static int stm32_rtc_probe(struct platform_device *pdev)
{
struct stm32_rtc *rtc;
const struct stm32_rtc_registers *regs;
+ struct pinctrl_dev *pctl;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
@@ -912,6 +1180,16 @@ static int stm32_rtc_probe(struct platform_device *pdev)
goto err;
}
+ stm32_rtc_clean_outs(rtc);
+
+ ret = devm_pinctrl_register_and_init(&pdev->dev, &stm32_rtc_pdesc, rtc, &pctl);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "pinctrl register failed");
+
+ ret = pinctrl_enable(pctl);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "pinctrl enable failed");
+
/*
* If INITS flag is reset (calendar year field set to 0x00), calendar
* must be initialized
@@ -950,6 +1228,9 @@ static void stm32_rtc_remove(struct platform_device *pdev)
const struct stm32_rtc_registers *regs = &rtc->data->regs;
unsigned int cr;
+ if (!IS_ERR_OR_NULL(rtc->clk_lsco))
+ clk_unregister_gate(rtc->clk_lsco);
+
/* Disable interrupts */
stm32_rtc_wpr_unlock(rtc);
cr = readl_relaxed(rtc->base + regs->cr);
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 8e0c66906103..e681c1745866 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -402,6 +402,7 @@ CLK_OF_DECLARE_DRIVER(sun8i_r40_rtc_clk, "allwinner,sun8i-r40-rtc",
static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = {
.rc_osc_rate = 32000,
.has_out_clk = 1,
+ .has_auto_swt = 1,
};
static void __init sun8i_v3_rtc_clk_init(struct device_node *node)
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 2cfacdd37e09..4e24c12004f1 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -591,8 +591,8 @@ static int twl_rtc_probe(struct platform_device *pdev)
memset(&nvmem_cfg, 0, sizeof(nvmem_cfg));
nvmem_cfg.name = "twl-secured-";
nvmem_cfg.type = NVMEM_TYPE_BATTERY_BACKED;
- nvmem_cfg.reg_read = twl_nvram_read,
- nvmem_cfg.reg_write = twl_nvram_write,
+ nvmem_cfg.reg_read = twl_nvram_read;
+ nvmem_cfg.reg_write = twl_nvram_write;
nvmem_cfg.word_size = 1;
nvmem_cfg.stride = 1;
if (twl_class_is_4030()) {
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 61515781c5dd..cfe7efd5b5da 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -515,7 +515,6 @@ static const struct file_operations fs3270_fops = {
.compat_ioctl = fs3270_ioctl, /* ioctl */
.open = fs3270_open, /* open */
.release = fs3270_close, /* release */
- .llseek = no_llseek,
};
static void fs3270_create_cb(int minor)
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index 248b5db3eaa8..dd6051602070 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -115,7 +115,6 @@ static const struct file_operations sclp_ctl_fops = {
.open = nonseekable_open,
.unlocked_ioctl = sclp_ctl_ioctl,
.compat_ioctl = sclp_ctl_ioctl,
- .llseek = no_llseek,
};
/*
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index cc8237afeffa..89778d922d9f 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -52,7 +52,6 @@ static const struct file_operations tape_fops =
#endif
.open = tapechar_open,
.release = tapechar_release,
- .llseek = no_llseek,
};
static int tapechar_major = TAPECHAR_MAJOR;
diff --git a/drivers/s390/char/uvdevice.c b/drivers/s390/char/uvdevice.c
index 42c9f77f8da0..f598edc5f251 100644
--- a/drivers/s390/char/uvdevice.c
+++ b/drivers/s390/char/uvdevice.c
@@ -448,7 +448,6 @@ static long uvio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static const struct file_operations uvio_dev_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = uvio_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice uvio_dev_miscdev = {
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index eb0520a9d4af..c6d58335beb4 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -242,7 +242,6 @@ static const struct file_operations vmcp_fops = {
.write = vmcp_write,
.unlocked_ioctl = vmcp_ioctl,
.compat_ioctl = vmcp_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice vmcp_dev = {
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index c09e1e09fb66..bd5cecc44123 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -96,7 +96,6 @@ static const struct file_operations vmlogrdr_fops = {
.open = vmlogrdr_open,
.release = vmlogrdr_release,
.read = vmlogrdr_read,
- .llseek = no_llseek,
};
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 0969fa01df58..33cebb91b933 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -165,7 +165,6 @@ static const struct file_operations zcore_reipl_fops = {
.write = zcore_reipl_write,
.open = zcore_reipl_open,
.release = zcore_reipl_release,
- .llseek = no_llseek,
};
static ssize_t zcore_hsa_read(struct file *filp, char __user *buf,
@@ -200,7 +199,6 @@ static const struct file_operations zcore_hsa_fops = {
.write = zcore_hsa_write,
.read = zcore_hsa_read,
.open = nonseekable_open,
- .llseek = no_llseek,
};
static int __init check_sdias(void)
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index e6c800653f98..1e58ee3cc87d 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -924,7 +924,6 @@ static const struct file_operations chsc_fops = {
.release = chsc_release,
.unlocked_ioctl = chsc_ioctl,
.compat_ioctl = chsc_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice chsc_misc_device = {
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 53b68f8c32f3..7b59d20bf785 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1332,7 +1332,6 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf,
static const struct proc_ops cio_settle_proc_ops = {
.proc_open = nonseekable_open,
.proc_write = cio_settle_write,
- .proc_lseek = no_llseek,
};
static int __init cio_settle_init(void)
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index c20251e00cf9..3a39e167bdbf 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -776,7 +776,6 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
static const struct file_operations pkey_fops = {
.owner = THIS_MODULE,
.open = nonseekable_open,
- .llseek = no_llseek,
.unlocked_ioctl = pkey_unlocked_ioctl,
};
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 4aeb3e1213c7..67a807e2e75b 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -26,6 +26,18 @@ MODULE_LICENSE("GPL v2");
struct ap_matrix_dev *matrix_dev;
debug_info_t *vfio_ap_dbf_info;
+static ssize_t features_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "guest_matrix hotplug ap_config\n");
+}
+static DEVICE_ATTR_RO(features);
+
+static struct attribute *matrix_dev_attrs[] = {
+ &dev_attr_features.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(matrix_dev);
+
/* Only type 10 adapters (CEX4 and later) are supported
* by the AP matrix device driver
*/
@@ -68,6 +80,7 @@ static struct device_driver matrix_driver = {
.name = "vfio_ap",
.bus = &matrix_bus,
.suppress_bind_attrs = true,
+ .dev_groups = matrix_dev_groups,
};
static int vfio_ap_matrix_dev_create(void)
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index f9a47b54c51a..5020696f1379 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -1908,7 +1908,6 @@ static const struct file_operations zcrypt_fops = {
#endif
.open = zcrypt_open,
.release = zcrypt_release,
- .llseek = no_llseek,
};
/*
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index cc178874c4a6..8643947fee8e 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -687,7 +687,6 @@ static int openprom_release(struct inode * inode, struct file * file)
static const struct file_operations openprom_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = openprom_ioctl,
.compat_ioctl = openprom_compat_ioctl,
.open = openprom_open,
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index 3c88f29f4c47..8bbed7a7afb7 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -221,7 +221,6 @@ static irqreturn_t uctrl_interrupt(int irq, void *dev_id)
static const struct file_operations uctrl_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = uctrl_ioctl,
.open = uctrl_open,
};
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index d92cf1dccc2f..0909b03e2497 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -485,7 +485,6 @@ struct cxgbi_device {
unsigned char nmtus;
unsigned char nports;
struct pci_dev *pdev;
- struct dentry *debugfs_root;
struct iscsi_transport *itp;
struct module *owner;
@@ -499,7 +498,6 @@ struct cxgbi_device {
unsigned int rxq_idx_cntr;
struct cxgbi_ports_map pmap;
- void (*dev_ddp_cleanup)(struct cxgbi_device *);
struct cxgbi_ppm* (*cdev2ppm)(struct cxgbi_device *);
int (*csk_ddp_set_map)(struct cxgbi_ppm *, struct cxgbi_sock *,
struct cxgbi_task_tag_info *);
@@ -512,7 +510,6 @@ struct cxgbi_device {
unsigned int, int);
void (*csk_release_offload_resources)(struct cxgbi_sock *);
- int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32);
int (*csk_push_tx_frames)(struct cxgbi_sock *, int);
void (*csk_send_abort_req)(struct cxgbi_sock *);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index feda9b54b443..4cd3a3eab6f1 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2421,7 +2421,7 @@ out:
spin_lock_irqsave(&device->done_lock, flags);
if (test_bit(SAS_HA_FROZEN, &ha->state)) {
spin_unlock_irqrestore(&device->done_lock, flags);
- dev_info(dev, "slot complete: task(%pK) ignored\n ",
+ dev_info(dev, "slot complete: task(%pK) ignored\n",
task);
return;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index a3d1013c8307..e66c3ef74267 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -37,6 +37,7 @@ static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
static u64 max_lun = IBMVFC_MAX_LUN;
static unsigned int max_targets = IBMVFC_MAX_TARGETS;
static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
+static u16 max_sectors = IBMVFC_MAX_SECTORS;
static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH;
static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
@@ -83,6 +84,9 @@ MODULE_PARM_DESC(default_timeout,
module_param_named(max_requests, max_requests, uint, S_IRUGO);
MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
"[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
+module_param_named(max_sectors, max_sectors, ushort, S_IRUGO);
+MODULE_PARM_DESC(max_sectors, "Maximum sectors for this adapter. "
+ "[Default=" __stringify(IBMVFC_MAX_SECTORS) "]");
module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO);
MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. "
"[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]");
@@ -1494,7 +1498,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
memset(login_info, 0, sizeof(*login_info));
login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
- login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
+ login_info->max_dma_len = cpu_to_be64(max_sectors << 9);
login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
login_info->partition_num = cpu_to_be32(vhost->partition_number);
@@ -5230,7 +5234,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
}
vhost->logged_in = 1;
- npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
+ npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), max_sectors);
dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
rsp->partition_name, rsp->device_name, rsp->port_loc_code,
rsp->drc_name, npiv_max_sectors);
@@ -6329,7 +6333,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
shost->can_queue = scsi_qdepth;
shost->max_lun = max_lun;
shost->max_id = max_targets;
- shost->max_sectors = IBMVFC_MAX_SECTORS;
+ shost->max_sectors = max_sectors;
shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
shost->unique_id = shost->host_no;
shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
@@ -6556,6 +6560,7 @@ static struct fc_function_template ibmvfc_transport_functions = {
**/
static int __init ibmvfc_module_init(void)
{
+ int min_max_sectors = PAGE_SIZE >> 9;
int rc;
if (!firmware_has_feature(FW_FEATURE_VIO))
@@ -6564,6 +6569,16 @@ static int __init ibmvfc_module_init(void)
printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
+ /*
+ * Range check the max_sectors module parameter. The upper bounds is
+ * implicity checked since the parameter is a ushort.
+ */
+ if (max_sectors < min_max_sectors) {
+ printk(KERN_ERR IBMVFC_NAME ": max_sectors must be at least %d.\n",
+ min_max_sectors);
+ max_sectors = min_max_sectors;
+ }
+
ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
if (!ibmvfc_transport_template)
return -ENOMEM;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 745ad5ac7251..c73ed2314ad0 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -32,7 +32,7 @@
#define IBMVFC_DEBUG 0
#define IBMVFC_MAX_TARGETS 1024
#define IBMVFC_MAX_LUN 0xffffffff
-#define IBMVFC_MAX_SECTORS 0xffffu
+#define IBMVFC_MAX_SECTORS 2048
#define IBMVFC_MAX_DISC_THREADS 4
#define IBMVFC_TGT_MEMPOOL_SZ 64
#define IBMVFC_MAX_CMDS_PER_LUN 64
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 4756a3f82531..85059b83ea6b 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -3208,6 +3208,9 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
cmdiocbq->num_bdes = num_bde;
cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
+ if (phba->cfg_vmid_app_header)
+ cmdiocbq->cmd_flag |= LPFC_IO_VMID;
+
cmdiocbq->vport = phba->pport;
cmdiocbq->cmd_cmpl = NULL;
cmdiocbq->bpl_dmabuf = txbmp;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 2dedd1493e5b..134bc96dd134 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1572,8 +1572,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
}
} else
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "3065 GFT_ID failed x%08x\n", ulp_status);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_DISCOVERY,
+ "3065 GFT_ID status x%08x\n", ulp_status);
out:
lpfc_ct_free_iocb(phba, cmdiocb);
@@ -1647,6 +1647,18 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
out:
+ /* If the caller wanted a synchronous DA_ID completion, signal the
+ * wait obj and clear flag to reset the vport.
+ */
+ if (ndlp->save_flags & NLP_WAIT_FOR_DA_ID) {
+ if (ndlp->da_id_waitq)
+ wake_up(ndlp->da_id_waitq);
+ }
+
+ spin_lock_irq(&ndlp->lock);
+ ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
+ spin_unlock_irq(&ndlp->lock);
+
lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
return;
@@ -2246,7 +2258,7 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0229 FDMI cmd %04x failed, latt = %d "
+ "0229 FDMI cmd %04x latt = %d "
"ulp_status: x%x, rid x%x\n",
be16_to_cpu(fdmi_cmd), latt, ulp_status,
ulp_word4);
@@ -2263,9 +2275,9 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check for a CT LS_RJT response */
cmd = be16_to_cpu(fdmi_cmd);
if (be16_to_cpu(fdmi_rsp) == SLI_CT_RESPONSE_FS_RJT) {
- /* FDMI rsp failed */
+ /* Log FDMI reject */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS,
- "0220 FDMI cmd failed FS_RJT Data: x%x", cmd);
+ "0220 FDMI cmd FS_RJT Data: x%x", cmd);
/* Should we fallback to FDMI-2 / FDMI-1 ? */
switch (cmd) {
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index f82615d87c4b..f5ae8cc15820 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -90,6 +90,8 @@ enum lpfc_nlp_save_flags {
NLP_IN_RECOV_POST_DEV_LOSS = 0x1,
/* wait for outstanding LOGO to cmpl */
NLP_WAIT_FOR_LOGO = 0x2,
+ /* wait for outstanding DA_ID to finish */
+ NLP_WAIT_FOR_DA_ID = 0x4
};
struct lpfc_nodelist {
@@ -159,7 +161,12 @@ struct lpfc_nodelist {
uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
uint32_t nlp_defer_did;
+
+ /* These wait objects are NPIV specific. These IOs must complete
+ * synchronously.
+ */
wait_queue_head_t *logo_waitq;
+ wait_queue_head_t *da_id_waitq;
};
struct lpfc_node_rrq {
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index de0ec945d2f1..d737b897ddd8 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -979,7 +979,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
phba->fcoe_cvl_eventtag_attn =
phba->fcoe_cvl_eventtag;
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
- "2611 FLOGI failed on FCF (x%x), "
+ "2611 FLOGI FCF (x%x), "
"status:x%x/x%x, tmo:x%x, perform "
"roundrobin FCF failover\n",
phba->fcf.current_rec.fcf_indx,
@@ -997,11 +997,11 @@ stop_rr_fcf_flogi:
if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE)))
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2858 FLOGI failure Status:x%x/x%x TMO"
- ":x%x Data x%lx x%x\n",
- ulp_status, ulp_word4, tmo,
- phba->hba_flag, phba->fcf.fcf_flag);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "2858 FLOGI Status:x%x/x%x TMO"
+ ":x%x Data x%lx x%x\n",
+ ulp_status, ulp_word4, tmo,
+ phba->hba_flag, phba->fcf.fcf_flag);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
@@ -1023,7 +1023,7 @@ stop_rr_fcf_flogi:
lpfc_nlp_put(ndlp);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
- "0150 FLOGI failure Status:x%x/x%x "
+ "0150 FLOGI Status:x%x/x%x "
"xri x%x TMO:x%x refcnt %d\n",
ulp_status, ulp_word4, cmdiocb->sli4_xritag,
tmo, kref_read(&ndlp->kref));
@@ -1032,11 +1032,11 @@ stop_rr_fcf_flogi:
if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE))) {
- /* FLOGI failure */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0100 FLOGI failure Status:x%x/x%x "
- "TMO:x%x\n",
- ulp_status, ulp_word4, tmo);
+ /* Warn FLOGI status */
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "0100 FLOGI Status:x%x/x%x "
+ "TMO:x%x\n",
+ ulp_status, ulp_word4, tmo);
goto flogifail;
}
@@ -1964,16 +1964,16 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (ulp_status) {
/* Check for retry */
- /* RRQ failed Don't print the vport to vport rjts */
+ /* Warn RRQ status Don't print the vport to vport rjts */
if (ulp_status != IOSTAT_LS_RJT ||
(((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
(phba)->pport->cfg_log_verbose & LOG_ELS)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2881 RRQ failure DID:%06X Status:"
- "x%x/x%x\n",
- ndlp->nlp_DID, ulp_status,
- ulp_word4);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "2881 RRQ DID:%06X Status:"
+ "x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
}
lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
@@ -2077,16 +2077,16 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
goto out;
}
- /* PLOGI failed Don't print the vport to vport rjts */
+ /* Warn PLOGI status Don't print the vport to vport rjts */
if (ulp_status != IOSTAT_LS_RJT ||
(((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
(phba)->pport->cfg_log_verbose & LOG_ELS)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2753 PLOGI failure DID:%06X "
- "Status:x%x/x%x\n",
- ndlp->nlp_DID, ulp_status,
- ulp_word4);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "2753 PLOGI DID:%06X "
+ "Status:x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
@@ -2323,7 +2323,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_nodelist *ndlp;
char *mode;
- u32 loglevel;
u32 ulp_status;
u32 ulp_word4;
bool release_node = false;
@@ -2372,17 +2371,14 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* could be expected.
*/
if (test_bit(FC_FABRIC, &vport->fc_flag) ||
- vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH) {
- mode = KERN_ERR;
- loglevel = LOG_TRACE_EVENT;
- } else {
+ vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)
+ mode = KERN_WARNING;
+ else
mode = KERN_INFO;
- loglevel = LOG_ELS;
- }
- /* PRLI failed */
- lpfc_printf_vlog(vport, mode, loglevel,
- "2754 PRLI failure DID:%06X Status:x%x/x%x, "
+ /* Warn PRLI status */
+ lpfc_printf_vlog(vport, mode, LOG_ELS,
+ "2754 PRLI DID:%06X Status:x%x/x%x, "
"data: x%x x%x x%x\n",
ndlp->nlp_DID, ulp_status,
ulp_word4, ndlp->nlp_state,
@@ -2854,11 +2850,11 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
goto out;
}
- /* ADISC failed */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
- ndlp->nlp_DID, ulp_status,
- ulp_word4);
+ /* Warn ADISC status */
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "2755 ADISC DID:%06X Status:x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_ADISC);
@@ -3045,12 +3041,12 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* discovery. The PLOGI will retry.
*/
if (ulp_status) {
- /* LOGO failed */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2756 LOGO failure, No Retry DID:%06X "
- "Status:x%x/x%x\n",
- ndlp->nlp_DID, ulp_status,
- ulp_word4);
+ /* Warn LOGO status */
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "2756 LOGO, No Retry DID:%06X "
+ "Status:x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
if (lpfc_error_lost_link(vport, ulp_status, ulp_word4))
skip_recovery = 1;
@@ -4837,11 +4833,10 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
(cmd == ELS_CMD_FDISC) &&
(stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_TRACE_EVENT,
- "0125 FDISC Failed (x%x). "
- "Fabric out of resources\n",
- stat.un.lsRjtError);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "0125 FDISC (x%x). "
+ "Fabric out of resources\n",
+ stat.un.lsRjtError);
lpfc_vport_set_state(vport,
FC_VPORT_NO_FABRIC_RSCS);
}
@@ -4877,11 +4872,10 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
LSEXP_NOTHING_MORE) {
vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
retry = 1;
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_TRACE_EVENT,
- "0820 FLOGI Failed (x%x). "
- "BBCredit Not Supported\n",
- stat.un.lsRjtError);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "0820 FLOGI (x%x). "
+ "BBCredit Not Supported\n",
+ stat.un.lsRjtError);
}
break;
@@ -4891,11 +4885,10 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
(stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
) {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_TRACE_EVENT,
- "0122 FDISC Failed (x%x). "
- "Fabric Detected Bad WWN\n",
- stat.un.lsRjtError);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "0122 FDISC (x%x). "
+ "Fabric Detected Bad WWN\n",
+ stat.un.lsRjtError);
lpfc_vport_set_state(vport,
FC_VPORT_FABRIC_REJ_WWN);
}
@@ -5355,8 +5348,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
u32 ulp_status, ulp_word4, tmo, did, iotag;
if (!vport) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "3177 ELS response failed\n");
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+ "3177 null vport in ELS rsp\n");
goto out;
}
if (cmdiocb->context_un.mbox)
@@ -9658,11 +9651,12 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err)
continue;
- /* On the ELS ring we can have ELS_REQUESTs or
- * GEN_REQUESTs waiting for a response.
+ /* On the ELS ring we can have ELS_REQUESTs, ELS_RSPs,
+ * or GEN_REQUESTs waiting for a CQE response.
*/
ulp_command = get_job_cmnd(phba, piocb);
- if (ulp_command == CMD_ELS_REQUEST64_CR) {
+ if (ulp_command == CMD_ELS_REQUEST64_WQE ||
+ ulp_command == CMD_XMIT_ELS_RSP64_WQE) {
list_add_tail(&piocb->dlist, &abort_list);
/* If the link is down when flushing ELS commands
@@ -11327,10 +11321,10 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
- /* FDISC failed */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0126 FDISC failed. (x%x/x%x)\n",
- ulp_status, ulp_word4);
+ /* Warn FDISC status */
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "0126 FDISC cmpl status: x%x/x%x)\n",
+ ulp_status, ulp_word4);
goto fdisc_failed;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 35c9181c6608..9241075f72fa 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -527,6 +527,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
* the following lpfc_nlp_put is necessary after fabric node is
* recovered.
*/
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
if (recovering) {
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY | LOG_NODE,
@@ -539,6 +542,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS;
spin_unlock_irqrestore(&ndlp->lock, iflags);
+ return fcf_inuse;
} else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Fabric node fully recovered before this dev_loss_tmo
* queue work is processed. Thus, ignore the
@@ -552,15 +556,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, kref_read(&ndlp->kref),
ndlp, ndlp->nlp_flag,
vport->port_state);
- spin_lock_irqsave(&ndlp->lock, iflags);
- ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
- spin_unlock_irqrestore(&ndlp->lock, iflags);
return fcf_inuse;
}
- spin_lock_irqsave(&ndlp->lock, iflags);
- ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
- spin_unlock_irqrestore(&ndlp->lock, iflags);
lpfc_nlp_put(ndlp);
return fcf_inuse;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 2108b4cb7815..d5c15742f7f2 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -562,6 +562,27 @@ struct fc_vft_header {
#include <uapi/scsi/fc/fc_els.h>
/*
+ * Application Header
+ */
+struct fc_app_header {
+ uint32_t dst_app_id;
+ uint32_t src_app_id;
+#define LOOPBACK_SRC_APPID 0x4321
+ uint32_t word2;
+ uint32_t word3;
+};
+
+/*
+ * dfctl optional header definition
+ */
+enum lpfc_fc_dfctl {
+ LPFC_FC_NO_DEVICE_HEADER,
+ LPFC_FC_16B_DEVICE_HEADER,
+ LPFC_FC_32B_DEVICE_HEADER,
+ LPFC_FC_64B_DEVICE_HEADER,
+};
+
+/*
* Extended Link Service LS_COMMAND codes (Payload Word 0)
*/
#ifdef __BIG_ENDIAN_BITFIELD
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 500253007b1d..26e1313ebb21 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -4847,6 +4847,7 @@ struct fcp_iwrite64_wqe {
#define cmd_buff_len_SHIFT 16
#define cmd_buff_len_MASK 0x00000ffff
#define cmd_buff_len_WORD word3
+/* Note: payload_offset_len field depends on ASIC support */
#define payload_offset_len_SHIFT 0
#define payload_offset_len_MASK 0x0000ffff
#define payload_offset_len_WORD word3
@@ -4863,6 +4864,7 @@ struct fcp_iread64_wqe {
#define cmd_buff_len_SHIFT 16
#define cmd_buff_len_MASK 0x00000ffff
#define cmd_buff_len_WORD word3
+/* Note: payload_offset_len field depends on ASIC support */
#define payload_offset_len_SHIFT 0
#define payload_offset_len_MASK 0x0000ffff
#define payload_offset_len_WORD word3
@@ -4879,6 +4881,7 @@ struct fcp_icmnd64_wqe {
#define cmd_buff_len_SHIFT 16
#define cmd_buff_len_MASK 0x00000ffff
#define cmd_buff_len_WORD word3
+/* Note: payload_offset_len field depends on ASIC support */
#define payload_offset_len_SHIFT 0
#define payload_offset_len_MASK 0x0000ffff
#define payload_offset_len_WORD word3
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 50620918becd..0dd451009b07 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4699,6 +4699,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
uint64_t wwn;
bool use_no_reset_hba = false;
int rc;
+ u8 if_type;
if (lpfc_no_hba_reset_cnt) {
if (phba->sli_rev < LPFC_SLI_REV4 &&
@@ -4773,10 +4774,24 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->max_id = LPFC_MAX_TARGET;
shost->max_lun = vport->cfg_max_luns;
shost->this_id = -1;
- if (phba->sli_rev == LPFC_SLI_REV4)
- shost->max_cmd_len = LPFC_FCP_CDB_LEN_32;
- else
+
+ /* Set max_cmd_len applicable to ASIC support */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ switch (if_type) {
+ case LPFC_SLI_INTF_IF_TYPE_2:
+ fallthrough;
+ case LPFC_SLI_INTF_IF_TYPE_6:
+ shost->max_cmd_len = LPFC_FCP_CDB_LEN_32;
+ break;
+ default:
+ shost->max_cmd_len = LPFC_FCP_CDB_LEN;
+ break;
+ }
+ } else {
shost->max_cmd_len = LPFC_FCP_CDB_LEN;
+ }
if (phba->sli_rev == LPFC_SLI_REV4) {
if (!phba->cfg_fcp_mq_threshold ||
@@ -10436,6 +10451,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
struct lpfc_vector_map_info *cpup;
struct lpfc_vector_map_info *eqcpup;
struct lpfc_eq_intr_info *eqi;
+ u32 wqesize;
/*
* Create HBA Record arrays.
@@ -10655,9 +10671,15 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
* Create ELS Work Queues
*/
- /* Create slow-path ELS Work Queue */
+ /*
+ * Create slow-path ELS Work Queue.
+ * Increase the ELS WQ size when WQEs contain an embedded cdb
+ */
+ wqesize = (phba->fcp_embed_io) ?
+ LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
+
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
- phba->sli4_hba.wq_esize,
+ wqesize,
phba->sli4_hba.wq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 60cd60ebff38..0eaede8275da 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4760,7 +4760,7 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
/* Word 3 */
bf_set(payload_offset_len, &wqe->fcp_icmd,
- sizeof(struct fcp_cmnd32) + sizeof(struct fcp_rsp));
+ sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
/* Word 6 */
bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 332b8d2348e9..2ec6e55771b4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1940,12 +1940,15 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
+ spin_lock_irqsave(&phba->hbalock, iflags);
+
/* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
- phba->link_state == LPFC_LINK_DOWN)
- return 0;
+ phba->link_state < LPFC_LINK_UP) {
+ ret_val = 0;
+ goto out_unlock;
+ }
- spin_lock_irqsave(&phba->hbalock, iflags);
sync_buf = __lpfc_sli_get_iocbq(phba);
if (!sync_buf) {
lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
@@ -8818,7 +8821,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = lpfc_sli4_queue_setup(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0381 Error %d during queue setup.\n ", rc);
+ "0381 Error %d during queue setup.\n", rc);
goto out_stop_timers;
}
/* Initialize the driver internal SLI layer lists. */
@@ -11090,9 +11093,17 @@ __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
/* Word 9 */
bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
- /* Word 12 */
- if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
+ if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) {
+ /* Word 10 */
+ if (cmdiocbq->cmd_flag & LPFC_IO_VMID) {
+ bf_set(wqe_appid, &wqe->xmit_sequence.wqe_com, 1);
+ bf_set(wqe_wqes, &wqe->xmit_sequence.wqe_com, 1);
+ wqe->words[31] = LOOPBACK_SRC_APPID;
+ }
+
+ /* Word 12 */
wqe->xmit_sequence.xmit_len = full_size;
+ }
else
wqe->xmit_sequence.xmit_len =
wqe->xmit_sequence.bde.tus.f.bdeSize;
@@ -18431,6 +18442,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
{
/* make rctl_names static to save stack space */
struct fc_vft_header *fc_vft_hdr;
+ struct fc_app_header *fc_app_hdr;
uint32_t *header = (uint32_t *) fc_hdr;
#define FC_RCTL_MDS_DIAGS 0xF4
@@ -18486,6 +18498,32 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
goto drop;
}
+ if (unlikely(phba->link_flag == LS_LOOPBACK_MODE &&
+ phba->cfg_vmid_app_header)) {
+ /* Application header is 16B device header */
+ if (fc_hdr->fh_df_ctl & LPFC_FC_16B_DEVICE_HEADER) {
+ fc_app_hdr = (struct fc_app_header *) (fc_hdr + 1);
+ if (be32_to_cpu(fc_app_hdr->src_app_id) !=
+ LOOPBACK_SRC_APPID) {
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_LIBDFC,
+ "1932 Loopback src app id "
+ "not matched, app_id:x%x\n",
+ be32_to_cpu(fc_app_hdr->src_app_id));
+
+ goto drop;
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_LIBDFC,
+ "1933 Loopback df_ctl bit not set, "
+ "df_ctl:x%x\n",
+ fc_hdr->fh_df_ctl);
+
+ goto drop;
+ }
+ }
+
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"2538 Received frame rctl:x%x, type:x%x, "
"frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
@@ -21149,7 +21187,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
if (!piocbq) {
spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2823 txq empty and txq_cnt is %d\n ",
+ "2823 txq empty and txq_cnt is %d\n",
txq_cnt);
break;
}
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 2fe0386a1fee..e70f163fab90 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.4"
+#define LPFC_DRIVER_VERSION "14.4.0.5"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 4439167a5188..7a4d4d8e2ad5 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -626,6 +626,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
int rc;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -679,21 +680,49 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
if (!ndlp)
goto skip_logo;
+ /* Send the DA_ID and Fabric LOGO to cleanup the NPIV fabric entries. */
if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
phba->link_state >= LPFC_LINK_UP &&
phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
if (vport->cfg_enable_da_id) {
- /* Send DA_ID and wait for a completion. */
+ /* Send DA_ID and wait for a completion. This is best
+ * effort. If the DA_ID fails, likely the fabric will
+ * "leak" NportIDs but at least the driver issued the
+ * command.
+ */
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (!ndlp)
+ goto issue_logo;
+
+ spin_lock_irq(&ndlp->lock);
+ ndlp->da_id_waitq = &waitq;
+ ndlp->save_flags |= NLP_WAIT_FOR_DA_ID;
+ spin_unlock_irq(&ndlp->lock);
+
rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0);
- if (rc) {
- lpfc_printf_log(vport->phba, KERN_WARNING,
- LOG_VPORT,
- "1829 CT command failed to "
- "delete objects on fabric, "
- "rc %d\n", rc);
+ if (!rc) {
+ wait_event_timeout(waitq,
+ !(ndlp->save_flags & NLP_WAIT_FOR_DA_ID),
+ msecs_to_jiffies(phba->fc_ratov * 2000));
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS,
+ "1829 DA_ID issue status %d. "
+ "SFlag x%x NState x%x, NFlag x%x "
+ "Rpi x%x\n",
+ rc, ndlp->save_flags, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
+
+ /* Remove the waitq and save_flags. It no
+ * longer matters if the wake happened.
+ */
+ spin_lock_irq(&ndlp->lock);
+ ndlp->da_id_waitq = NULL;
+ ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
+ spin_unlock_irq(&ndlp->lock);
}
+issue_logo:
/*
* If the vpi is not registered, then a valid FDISC doesn't
* exist and there is no need for a ELS LOGO. Just cleanup
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 6c79c350a4d5..4ecf5284c0fc 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6380,7 +6380,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
GFP_KERNEL);
if (!fusion->stream_detect_by_ld[i]) {
dev_err(&instance->pdev->dev,
- "unable to allocate stream detect by LD\n ");
+ "unable to allocate stream detect by LD\n");
for (j = 0; j < i; ++j)
kfree(fusion->stream_detect_by_ld[j]);
kfree(fusion->stream_detect_by_ld);
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index 4b7a8f6314a3..00cd18edfad6 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -67,6 +67,7 @@
#define MPI3_SECURITY_PGAD_SLOT_GROUP_MASK (0x0000ff00)
#define MPI3_SECURITY_PGAD_SLOT_GROUP_SHIFT (8)
#define MPI3_SECURITY_PGAD_SLOT_MASK (0x000000ff)
+#define MPI3_INSTANCE_PGAD_INSTANCE_MASK (0x0000ffff)
struct mpi3_config_request {
__le16 host_tag;
u8 ioc_use_only02;
@@ -75,7 +76,8 @@ struct mpi3_config_request {
u8 ioc_use_only06;
u8 msg_flags;
__le16 change_count;
- __le16 reserved0a;
+ u8 proxy_ioc_number;
+ u8 reserved0b;
u8 page_version;
u8 page_number;
u8 page_type;
@@ -206,6 +208,9 @@ struct mpi3_config_page_header {
#define MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT (0x00b5)
#define MPI3_MFGPAGE_DEVID_SAS5116_NVME_MGMT (0x00b6)
#define MPI3_MFGPAGE_DEVID_SAS5116_PCIE_SWITCH (0x00b8)
+#define MPI3_MFGPAGE_DEVID_SAS5248_MPI (0x00f0)
+#define MPI3_MFGPAGE_DEVID_SAS5248_MPI_NS (0x00f1)
+#define MPI3_MFGPAGE_DEVID_SAS5248_PCIE_SWITCH (0x00f2)
struct mpi3_man_page0 {
struct mpi3_config_page_header header;
u8 chip_revision[8];
@@ -1074,6 +1079,8 @@ struct mpi3_io_unit_page8 {
#define MPI3_IOUNIT8_SBSTATE_SVN_UPDATE_PENDING (0x04)
#define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02)
#define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01)
+#define MPI3_IOUNIT8_SBMODE_CURRENT_KEY_IOUNIT17 (0x10)
+#define MPI3_IOUNIT8_SBMODE_HARD_SECURE_RECERTIFIED (0x08)
struct mpi3_io_unit_page9 {
struct mpi3_config_page_header header;
__le32 flags;
@@ -1089,6 +1096,8 @@ struct mpi3_io_unit_page9 {
#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_BACKPLANE_TYPE (0x00000004)
#define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x00000001)
#define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff)
+#define MPI3_IOUNIT9_FIRSTDEVICE_IN_DRIVER_PAGE_0 (0xfffe)
+
struct mpi3_io_unit_page10 {
struct mpi3_config_page_header header;
u8 flags;
@@ -1224,6 +1233,19 @@ struct mpi3_io_unit_page15 {
#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITHOUT_POWER_BRAKE_GPIO (0x01)
#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITH_POWER_BRAKE_GPIO (0x02)
#define MPI3_IOUNIT15_NUMPOWERBUDGETDATA_POWER_BUDGETING_DISABLED (0x00)
+
+struct mpi3_io_unit_page17 {
+ struct mpi3_config_page_header header;
+ u8 num_instances;
+ u8 instance;
+ __le16 reserved0a;
+ __le32 reserved0c[4];
+ __le16 key_length;
+ u8 encryption_algorithm;
+ u8 reserved1f;
+ __le32 current_key[];
+};
+#define MPI3_IOUNIT17_PAGEVERSION (0x00)
struct mpi3_ioc_page0 {
struct mpi3_config_page_header header;
__le32 reserved08;
@@ -1311,7 +1333,7 @@ struct mpi3_driver_page0 {
u8 tur_interval;
u8 reserved10;
u8 security_key_timeout;
- __le16 reserved12;
+ __le16 first_device;
__le32 reserved14;
__le32 reserved18;
};
@@ -1324,10 +1346,13 @@ struct mpi3_driver_page0 {
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_INTERNAL_DEVS (0x00000002)
+#define MPI3_DRIVER0_FIRSTDEVICE_IGNORE1 (0x0000)
+#define MPI3_DRIVER0_FIRSTDEVICE_IGNORE2 (0xffff)
struct mpi3_driver_page1 {
struct mpi3_config_page_header header;
__le32 flags;
- __le32 reserved0c;
+ u8 time_stamp_update;
+ u8 reserved0d[3];
__le16 host_diag_trace_max_size;
__le16 host_diag_trace_min_size;
__le16 host_diag_trace_decrement_size;
@@ -2347,6 +2372,10 @@ struct mpi3_device0_vd_format {
#define MPI3_DEVICE0_VD_DEVICE_INFO_SAS (0x0001)
#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK (0xf000)
#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_SHIFT (12)
+#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_MASK (0x0003)
+#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_HDD (0x0000)
+#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_SSD (0x0001)
+#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_NO_GUIDANCE (0x0002)
union mpi3_device0_dev_spec_format {
struct mpi3_device0_sas_sata_format sas_sata_format;
struct mpi3_device0_pcie_format pcie_format;
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
index 7df242190135..2c6e548cbd0f 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_image.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -205,13 +205,14 @@ struct mpi3_encrypted_hash_entry {
u8 hash_image_type;
u8 hash_algorithm;
u8 encryption_algorithm;
- u8 reserved03;
+ u8 flags;
__le16 public_key_size;
__le16 signature_size;
__le32 public_key[MPI3_PUBLIC_KEY_MAX];
};
-
-#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_SIGNATURE (0x03)
+#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH (0x03)
+#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_1_OF_2 (0x04)
+#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_2_OF_2 (0x05)
#define MPI3_HASH_ALGORITHM_VERSION_MASK (0xe0)
#define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00)
#define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20)
@@ -230,6 +231,12 @@ struct mpi3_encrypted_hash_entry {
#define MPI3_ENCRYPTION_ALGORITHM_RSA4096 (0x05)
#define MPI3_ENCRYPTION_ALGORITHM_RSA3072 (0x06)
+/* hierarchical signature system (hss) */
+#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_87 (0x0b)
+#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_65 (0x0c)
+#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_44 (0x0d)
+#define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_MASK (0x0f)
+
#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX
#define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1)
#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index c9fa0d69b75f..c374867f9ba0 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -39,6 +39,12 @@ struct mpi3_ioc_init_request {
#define MPI3_WHOINIT_HOST_DRIVER (0x03)
#define MPI3_WHOINIT_MANUFACTURER (0x04)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_MASK (0x00000003)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_GUIDANCE (0x00000000)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_SPECIAL (0x00000001)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_HDD (0x00000002)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_SSD (0x00000003)
+
struct mpi3_ioc_facts_request {
__le16 host_tag;
u8 ioc_use_only02;
@@ -140,6 +146,8 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020)
#define MPI3_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0010)
#define MPI3_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0008)
+#define MPI3_IOCFACTS_EXCEPT_BLOCKING_BOOT_EVENT (0x0004)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_SELFTEST_FAILURE (0x0002)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index fdc3d1968e43..b2ab25a1cfeb 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -18,7 +18,7 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (31)
+#define MPI3_VERSION_UNIT (34)
#define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
@@ -158,6 +158,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_NEEDED (0x0000f004)
#define MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED (0x0000f005)
#define MPI3_SYSIF_FAULT_CODE_TEMP_THRESHOLD_EXCEEDED (0x0000f006)
+#define MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER (0x0000f007)
#define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14)
#define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18)
#define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c)
@@ -410,6 +411,7 @@ struct mpi3_default_reply {
#define MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
#define MPI3_IOCSTATUS_INVALID_FIELD (0x0007)
#define MPI3_IOCSTATUS_INVALID_STATE (0x0008)
+#define MPI3_IOCSTATUS_SHUTDOWN_ACTIVE (0x0009)
#define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a)
#define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b)
#define MPI3_IOCSTATUS_ALLOWED_CMD_BLOCK (0x000c)
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 1dc640de3efc..fcb0fa31536b 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -57,8 +57,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.10.0.5.50"
-#define MPI3MR_DRIVER_RELDATE "08-Aug-2024"
+#define MPI3MR_DRIVER_VERSION "8.12.0.0.50"
+#define MPI3MR_DRIVER_RELDATE "05-Sept-2024"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -178,7 +178,7 @@ extern atomic64_t event_counter;
#define MPI3MR_DEFAULT_SDEV_QD 32
/* Definitions for Threaded IRQ poll*/
-#define MPI3MR_IRQ_POLL_SLEEP 2
+#define MPI3MR_IRQ_POLL_SLEEP 20
#define MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT 8
/* Definitions for the controller security status*/
@@ -1090,6 +1090,7 @@ struct scmd_priv {
* @evtack_cmds_bitmap: Event Ack bitmap
* @delayed_evtack_cmds_list: Delayed event acknowledgment list
* @ts_update_counter: Timestamp update counter
+ * @ts_update_interval: Timestamp update interval
* @reset_in_progress: Reset in progress flag
* @unrecoverable: Controller unrecoverable flag
* @prev_reset_result: Result of previous reset
@@ -1277,7 +1278,8 @@ struct mpi3mr_ioc {
unsigned long *evtack_cmds_bitmap;
struct list_head delayed_evtack_cmds_list;
- u32 ts_update_counter;
+ u16 ts_update_counter;
+ u16 ts_update_interval;
u8 reset_in_progress;
u8 unrecoverable;
int prev_reset_result;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 2e1a92d306b2..f1ab76351bd8 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -728,7 +728,7 @@ static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
mpi3mr_process_op_reply_q(mrioc,
intr_info->op_reply_q);
- usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP);
+ usleep_range(MPI3MR_IRQ_POLL_SLEEP, MPI3MR_IRQ_POLL_SLEEP + 1);
} while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
(num_op_reply < mrioc->max_host_ios));
@@ -1362,6 +1362,10 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
int retval = 0;
enum mpi3mr_iocstate ioc_state;
u64 base_info;
+ u8 retry = 0;
+ u64 start_time, elapsed_time_sec;
+
+retry_bring_ioc_ready:
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
@@ -1380,26 +1384,23 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
ioc_info(mrioc, "controller is in %s state during detection\n",
mpi3mr_iocstate_name(ioc_state));
- if (ioc_state == MRIOC_STATE_BECOMING_READY ||
- ioc_state == MRIOC_STATE_RESET_REQUESTED) {
- timeout = mrioc->ready_timeout * 10;
- do {
- msleep(100);
- } while (--timeout);
+ timeout = mrioc->ready_timeout * 10;
+
+ do {
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+
+ if (ioc_state != MRIOC_STATE_BECOMING_READY &&
+ ioc_state != MRIOC_STATE_RESET_REQUESTED)
+ break;
if (!pci_device_is_present(mrioc->pdev)) {
mrioc->unrecoverable = 1;
- ioc_err(mrioc,
- "controller is not present while waiting to reset\n");
- retval = -1;
+ ioc_err(mrioc, "controller is not present while waiting to reset\n");
goto out_device_not_present;
}
- ioc_state = mpi3mr_get_iocstate(mrioc);
- ioc_info(mrioc,
- "controller is in %s state after waiting to reset\n",
- mpi3mr_iocstate_name(ioc_state));
- }
+ msleep(100);
+ } while (--timeout);
if (ioc_state == MRIOC_STATE_READY) {
ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
@@ -1460,6 +1461,9 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+ if (retry == 0)
+ start_time = jiffies;
+
timeout = mrioc->ready_timeout * 10;
do {
ioc_state = mpi3mr_get_iocstate(mrioc);
@@ -1469,6 +1473,12 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
mpi3mr_iocstate_name(ioc_state));
return 0;
}
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ mpi3mr_print_fault_info(mrioc);
+ goto out_failed;
+ }
if (!pci_device_is_present(mrioc->pdev)) {
mrioc->unrecoverable = 1;
ioc_err(mrioc,
@@ -1477,9 +1487,19 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
goto out_device_not_present;
}
msleep(100);
- } while (--timeout);
+ elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
+ } while (elapsed_time_sec < mrioc->ready_timeout);
out_failed:
+ elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
+ if ((retry < 2) && (elapsed_time_sec < (mrioc->ready_timeout - 60))) {
+ retry++;
+
+ ioc_warn(mrioc, "retrying to bring IOC ready, retry_count:%d\n"
+ " elapsed time =%llu\n", retry, elapsed_time_sec);
+
+ goto retry_bring_ioc_ready;
+ }
ioc_state = mpi3mr_get_iocstate(mrioc);
ioc_err(mrioc,
"failed to bring to ready state, current state: %s\n",
@@ -2671,7 +2691,7 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
return;
}
- if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
+ if (mrioc->ts_update_counter++ >= mrioc->ts_update_interval) {
mrioc->ts_update_counter = 0;
mpi3mr_sync_timestamp(mrioc);
}
@@ -3845,6 +3865,29 @@ static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc)
}
/**
+ * mpi3mr_read_tsu_interval - Update time stamp interval
+ * @mrioc: Adapter instance reference
+ *
+ * Update time stamp interval if its defined in driver page 1,
+ * otherwise use default value.
+ *
+ * Return: Nothing
+ */
+static void
+mpi3mr_read_tsu_interval(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_driver_page1 driver_pg1;
+ u16 pg_sz = sizeof(driver_pg1);
+ int retval = 0;
+
+ mrioc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL;
+
+ retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz);
+ if (!retval && driver_pg1.time_stamp_update)
+ mrioc->ts_update_interval = (driver_pg1.time_stamp_update * 60);
+}
+
+/**
* mpi3mr_print_ioc_info - Display controller information
* @mrioc: Adapter instance reference
*
@@ -4140,6 +4183,7 @@ retry_init:
goto out_failed_noretry;
}
+ mpi3mr_read_tsu_interval(mrioc);
mpi3mr_print_ioc_info(mrioc);
if (!mrioc->cfg_page) {
@@ -4321,6 +4365,7 @@ retry_init:
goto out_failed_noretry;
}
+ mpi3mr_read_tsu_interval(mrioc);
mpi3mr_print_ioc_info(mrioc);
if (is_resume) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 9a24f7776d64..ed5046593fda 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -8898,9 +8898,8 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
if (!device_remove_in_progress) {
ioc_info(ioc,
- "Unable to allocate the memory for "
- "device_remove_in_progress of sz: %d\n "
- , pd_handles_sz);
+ "Unable to allocate the memory for device_remove_in_progress of sz: %d\n",
+ pd_handles_sz);
return -ENOMEM;
}
memset(device_remove_in_progress +
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 1e63cb6cd8e3..33e1eba62ca1 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -100,10 +100,12 @@ static void pm8001_map_queues(struct Scsi_Host *shost)
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- if (pm8001_ha->number_of_intr > 1)
+ if (pm8001_ha->number_of_intr > 1) {
blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
+ return;
+ }
- return blk_mq_map_queues(qmap);
+ blk_mq_map_queues(qmap);
}
/*
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 8fe886dc5e47..a9869cd8c4c0 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2037,7 +2037,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
atomic_dec(&pm8001_dev->running_req);
break;
}
- pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n ",
+ pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n",
psspPayload->ssp_resp_iu.status);
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 72a4c6e3d0c8..4c5881917d76 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1946,7 +1946,7 @@ static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
}
iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg);
- ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
+ ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
pmcraid_info("Waiting for IOA to become operational %x:%x\n",
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 054a51713d55..fcfc3bed02c6 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -310,7 +310,7 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
if (!free_sqes) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
- "Returning NULL, free_sqes=%d.\n ",
+ "Returning NULL, free_sqes=%d.\n",
free_sqes);
goto out_failed;
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index a9d8a9c62663..d95f417e24c0 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2760,7 +2760,6 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
else
bd_len = 0;
alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
- memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
if (0x3 == pcontrol) { /* Saving values not supported */
mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
return check_condition_result;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7f0394c44920..0561b318dade 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1163,7 +1163,6 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
if (blk_integrity_rq(rq)) {
struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
- int ivecs;
if (WARN_ON_ONCE(!prot_sdb)) {
/*
@@ -1175,20 +1174,15 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
goto out_free_sgtables;
}
- ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
-
- if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
+ if (sg_alloc_table_chained(&prot_sdb->table,
+ rq->nr_integrity_segments,
prot_sdb->table.sgl,
SCSI_INLINE_PROT_SG_CNT)) {
ret = BLK_STS_RESOURCE;
goto out_free_sgtables;
}
- count = blk_rq_map_integrity_sg(rq->q, rq->bio,
- prot_sdb->table.sgl);
- BUG_ON(count > ivecs);
- BUG_ON(count > queue_max_integrity_segments(rq->q));
-
+ count = blk_rq_map_integrity_sg(rq, prot_sdb->table.sgl);
cmd->prot_sdb = prot_sdb;
cmd->prot_sdb->table.nents = count;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 76f488ef6a7e..41e2dfa2d67d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -38,7 +38,6 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bio-integrity.h>
#include <linux/hdreg.h>
#include <linux/errno.h>
#include <linux/idr.h>
@@ -3404,7 +3403,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp,
rcu_read_lock();
vpd = rcu_dereference(sdkp->device->vpd_pgb1);
- if (!vpd || vpd->len < 8) {
+ if (!vpd || vpd->len <= 8) {
rcu_read_unlock();
return;
}
@@ -4093,9 +4092,38 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
{
unsigned char cmd[6] = { START_STOP }; /* START_VALID */
struct scsi_sense_hdr sshdr;
+ struct scsi_failure failure_defs[] = {
+ {
+ /* Power on, reset, or bus device reset occurred */
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .ascq = 0,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ /* Power on occurred */
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .ascq = 1,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ /* SCSI bus reset */
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .ascq = 2,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .total_allowed = 3,
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
.req_flags = BLK_MQ_REQ_PM,
+ .failures = &failures,
};
struct scsi_device *sdp = sdkp->device;
int res;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index baf870a03ecf..f86be197fedd 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1424,7 +1424,6 @@ static const struct file_operations sg_fops = {
.mmap = sg_mmap,
.release = sg_release,
.fasync = sg_fasync,
- .llseek = no_llseek,
};
static const struct class sg_sysfs_class = {
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 0d8ce1a92168..d50bad3a2ce9 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -834,6 +834,9 @@ static int flush_buffer(struct scsi_tape *STp, int seek_next)
int backspace, result;
struct st_partstat *STps;
+ if (STp->ready != ST_READY)
+ return 0;
+
/*
* If there was a bus reset, block further access
* to this device.
@@ -841,8 +844,6 @@ static int flush_buffer(struct scsi_tape *STp, int seek_next)
if (STp->pos_unknown)
return (-EIO);
- if (STp->ready != ST_READY)
- return 0;
STps = &(STp->ps[STp->partition]);
if (STps->rw == ST_WRITING) /* Writing */
return st_flush_write_buffer(STp);
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 22d412cab91d..15602ec862e3 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -139,7 +139,7 @@ zalon_probe(struct parisc_device *dev)
return -ENODEV;
if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) {
- dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ",
+ dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n",
dev->irq);
goto fail;
}
diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c
index abe9091827cd..a363f77881d1 100644
--- a/drivers/sh/intc/userimask.c
+++ b/drivers/sh/intc/userimask.c
@@ -32,8 +32,11 @@ store_intc_userimask(struct device *dev,
const char *buf, size_t count)
{
unsigned long level;
+ int ret;
- level = simple_strtoul(buf, NULL, 10);
+ ret = kstrtoul(buf, 10, &level);
+ if (ret != 0)
+ return ret;
/*
* Minimal acceptable IRQ levels are in the 2 - 16 range, but
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index 4ce0cb61e481..242570a5e565 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -111,7 +111,8 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
{
DECLARE_COMPLETION_ONSTACK(done);
bool need_tid = false, clk_pause_msg = false;
- int ret, timeout;
+ int ret;
+ unsigned long time_left;
/*
* do not vote for runtime-PM if the transactions are part of clock
@@ -151,9 +152,9 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
if (!ret && need_tid && !txn->msg->comp) {
unsigned long ms = txn->rl + HZ;
- timeout = wait_for_completion_timeout(txn->comp,
- msecs_to_jiffies(ms));
- if (!timeout) {
+ time_left = wait_for_completion_timeout(txn->comp,
+ msecs_to_jiffies(ms));
+ if (!time_left) {
ret = -ETIMEDOUT;
slim_free_txn_tid(ctrl, txn);
}
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
index 0274bc285b60..e25f9bdd9b23 100644
--- a/drivers/slimbus/qcom-ctrl.c
+++ b/drivers/slimbus/qcom-ctrl.c
@@ -330,7 +330,8 @@ static int qcom_xfer_msg(struct slim_controller *sctrl,
void *pbuf = slim_alloc_txbuf(ctrl, txn, &done);
unsigned long ms = txn->rl + HZ;
u8 *puc;
- int ret = 0, timeout, retries = QCOM_BUF_ALLOC_RETRIES;
+ int ret = 0, retries = QCOM_BUF_ALLOC_RETRIES;
+ unsigned long time_left;
u8 la = txn->la;
u32 *head;
/* HW expects length field to be excluded */
@@ -374,9 +375,9 @@ static int qcom_xfer_msg(struct slim_controller *sctrl,
memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes);
qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG);
- timeout = wait_for_completion_timeout(&done, msecs_to_jiffies(ms));
+ time_left = wait_for_completion_timeout(&done, msecs_to_jiffies(ms));
- if (!timeout) {
+ if (!time_left) {
dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
txn->mt);
ret = -ETIMEDOUT;
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index e0b21f0f79c1..1ac8e2912fd1 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -788,7 +788,8 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
DECLARE_COMPLETION_ONSTACK(tx_sent);
DECLARE_COMPLETION_ONSTACK(done);
- int ret, timeout, i;
+ int ret, i;
+ unsigned long time_left;
u8 wbuf[SLIM_MSGQ_BUF_LEN];
u8 rbuf[SLIM_MSGQ_BUF_LEN];
u32 *pbuf;
@@ -890,8 +891,8 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
return ret;
}
- timeout = wait_for_completion_timeout(&tx_sent, HZ);
- if (!timeout) {
+ time_left = wait_for_completion_timeout(&tx_sent, HZ);
+ if (!time_left) {
dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
txn->mt);
mutex_unlock(&ctrl->tx_lock);
@@ -899,8 +900,8 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
}
if (usr_msg) {
- timeout = wait_for_completion_timeout(&done, HZ);
- if (!timeout) {
+ time_left = wait_for_completion_timeout(&done, HZ);
+ if (!time_left) {
dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x",
txn->mc, txn->mt);
mutex_unlock(&ctrl->tx_lock);
@@ -916,7 +917,8 @@ static int qcom_slim_ngd_xfer_msg_sync(struct slim_controller *ctrl,
struct slim_msg_txn *txn)
{
DECLARE_COMPLETION_ONSTACK(done);
- int ret, timeout;
+ int ret;
+ unsigned long time_left;
ret = pm_runtime_get_sync(ctrl->dev);
if (ret < 0)
@@ -928,8 +930,8 @@ static int qcom_slim_ngd_xfer_msg_sync(struct slim_controller *ctrl,
if (ret)
goto pm_put;
- timeout = wait_for_completion_timeout(&done, HZ);
- if (!timeout) {
+ time_left = wait_for_completion_timeout(&done, HZ);
+ if (!time_left) {
dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
txn->mt);
ret = -ETIMEDOUT;
@@ -1168,11 +1170,12 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
enum qcom_slim_ngd_state cur_state = ctrl->state;
struct qcom_slim_ngd *ngd = ctrl->ngd;
u32 laddr, rx_msgq;
- int timeout, ret = 0;
+ int ret = 0;
+ unsigned long time_left;
if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) {
- timeout = wait_for_completion_timeout(&ctrl->qmi.qmi_comp, HZ);
- if (!timeout)
+ time_left = wait_for_completion_timeout(&ctrl->qmi.qmi_comp, HZ);
+ if (!time_left)
return -EREMOTEIO;
}
@@ -1217,8 +1220,8 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
ngd->base + NGD_RX_MSGQ_CFG);
qcom_slim_ngd_setup(ctrl);
- timeout = wait_for_completion_timeout(&ctrl->reconf, HZ);
- if (!timeout) {
+ time_left = wait_for_completion_timeout(&ctrl->reconf, HZ);
+ if (!time_left) {
dev_err(ctrl->dev, "capability exchange timed-out\n");
return -ETIMEDOUT;
}
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 5d924e946507..6a8daeb8c4b9 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -7,6 +7,7 @@ source "drivers/soc/aspeed/Kconfig"
source "drivers/soc/atmel/Kconfig"
source "drivers/soc/bcm/Kconfig"
source "drivers/soc/canaan/Kconfig"
+source "drivers/soc/cirrus/Kconfig"
source "drivers/soc/fsl/Kconfig"
source "drivers/soc/fujitsu/Kconfig"
source "drivers/soc/hisilicon/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 56f476a12847..2037a8695cb2 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -8,6 +8,7 @@ obj-y += aspeed/
obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/
obj-$(CONFIG_ARCH_CANAAN) += canaan/
+obj-$(CONFIG_EP93XX_SOC) += cirrus/
obj-$(CONFIG_ARCH_DOVE) += dove/
obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
diff --git a/drivers/soc/cirrus/Kconfig b/drivers/soc/cirrus/Kconfig
new file mode 100644
index 000000000000..d8b3b1e68998
--- /dev/null
+++ b/drivers/soc/cirrus/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+if ARCH_EP93XX
+
+config EP93XX_SOC
+ bool "Cirrus EP93xx chips SoC"
+ select SOC_BUS
+ select AUXILIARY_BUS
+ default y
+ help
+ Enable support SoC for Cirrus EP93xx chips.
+
+ Cirrus EP93xx chips have several swlocked registers,
+ this driver provides locked access for reset, pinctrl
+ and clk devices implemented as auxiliary devices.
+
+endif
diff --git a/drivers/soc/cirrus/Makefile b/drivers/soc/cirrus/Makefile
new file mode 100644
index 000000000000..9e6608b67f76
--- /dev/null
+++ b/drivers/soc/cirrus/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += soc-ep93xx.o
diff --git a/drivers/soc/cirrus/soc-ep93xx.c b/drivers/soc/cirrus/soc-ep93xx.c
new file mode 100644
index 000000000000..3e79b3b13aef
--- /dev/null
+++ b/drivers/soc/cirrus/soc-ep93xx.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * SoC driver for Cirrus EP93xx chips.
+ * Copyright (C) 2022 Nikita Shubin <nikita.shubin@maquefel.me>
+ *
+ * Based on a rewrite of arch/arm/mach-ep93xx/core.c
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Copyright (C) 2007 Herbert Valerio Riedel <hvr@gnu.org>
+ *
+ * Thanks go to Michael Burian and Ray Lehtiniemi for their key
+ * role in the ep93xx Linux community.
+ */
+
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/init.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
+
+#include <linux/soc/cirrus/ep93xx.h>
+
+#define EP93XX_SYSCON_DEVCFG 0x80
+
+#define EP93XX_SWLOCK_MAGICK 0xaa
+#define EP93XX_SYSCON_SWLOCK 0xc0
+#define EP93XX_SYSCON_SYSCFG 0x9c
+#define EP93XX_SYSCON_SYSCFG_REV_MASK GENMASK(31, 28)
+#define EP93XX_SYSCON_SYSCFG_REV_SHIFT 28
+
+struct ep93xx_map_info {
+ spinlock_t lock;
+ void __iomem *base;
+ struct regmap *map;
+};
+
+/*
+ * EP93xx System Controller software locked register write
+ *
+ * Logic safeguards are included to condition the control signals for
+ * power connection to the matrix to prevent part damage. In addition, a
+ * software lock register is included that must be written with 0xAA
+ * before each register write to change the values of the four switch
+ * matrix control registers.
+ */
+static void ep93xx_regmap_write(struct regmap *map, spinlock_t *lock,
+ unsigned int reg, unsigned int val)
+{
+ guard(spinlock_irqsave)(lock);
+
+ regmap_write(map, EP93XX_SYSCON_SWLOCK, EP93XX_SWLOCK_MAGICK);
+ regmap_write(map, reg, val);
+}
+
+static void ep93xx_regmap_update_bits(struct regmap *map, spinlock_t *lock,
+ unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ guard(spinlock_irqsave)(lock);
+
+ regmap_write(map, EP93XX_SYSCON_SWLOCK, EP93XX_SWLOCK_MAGICK);
+ /* force write is required to clear swlock if no changes are made */
+ regmap_update_bits_base(map, reg, mask, val, NULL, false, true);
+}
+
+static void ep93xx_unregister_adev(void *_adev)
+{
+ struct auxiliary_device *adev = _adev;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+static void ep93xx_adev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ struct ep93xx_regmap_adev *rdev = to_ep93xx_regmap_adev(adev);
+
+ kfree(rdev);
+}
+
+static struct auxiliary_device __init *ep93xx_adev_alloc(struct device *parent,
+ const char *name,
+ struct ep93xx_map_info *info)
+{
+ struct ep93xx_regmap_adev *rdev __free(kfree) = NULL;
+ struct auxiliary_device *adev;
+ int ret;
+
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev)
+ return ERR_PTR(-ENOMEM);
+
+ rdev->map = info->map;
+ rdev->base = info->base;
+ rdev->lock = &info->lock;
+ rdev->write = ep93xx_regmap_write;
+ rdev->update_bits = ep93xx_regmap_update_bits;
+
+ adev = &rdev->adev;
+ adev->name = name;
+ adev->dev.parent = parent;
+ adev->dev.release = ep93xx_adev_release;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &no_free_ptr(rdev)->adev;
+}
+
+static int __init ep93xx_controller_register(struct device *parent, const char *name,
+ struct ep93xx_map_info *info)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = ep93xx_adev_alloc(parent, name, info);
+ if (IS_ERR(adev))
+ return PTR_ERR(adev);
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(parent, ep93xx_unregister_adev, adev);
+}
+
+static unsigned int __init ep93xx_soc_revision(struct regmap *map)
+{
+ unsigned int val;
+
+ regmap_read(map, EP93XX_SYSCON_SYSCFG, &val);
+ val &= EP93XX_SYSCON_SYSCFG_REV_MASK;
+ val >>= EP93XX_SYSCON_SYSCFG_REV_SHIFT;
+ return val;
+}
+
+static const char __init *ep93xx_get_soc_rev(unsigned int rev)
+{
+ switch (rev) {
+ case EP93XX_CHIP_REV_D0:
+ return "D0";
+ case EP93XX_CHIP_REV_D1:
+ return "D1";
+ case EP93XX_CHIP_REV_E0:
+ return "E0";
+ case EP93XX_CHIP_REV_E1:
+ return "E1";
+ case EP93XX_CHIP_REV_E2:
+ return "E2";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *pinctrl_names[] __initconst = {
+ "pinctrl-ep9301", /* EP93XX_9301_SOC */
+ "pinctrl-ep9307", /* EP93XX_9307_SOC */
+ "pinctrl-ep9312", /* EP93XX_9312_SOC */
+};
+
+static int __init ep93xx_syscon_probe(struct platform_device *pdev)
+{
+ enum ep93xx_soc_model model;
+ struct ep93xx_map_info *map_info;
+ struct soc_device_attribute *attrs;
+ struct soc_device *soc_dev;
+ struct device *dev = &pdev->dev;
+ struct regmap *map;
+ void __iomem *base;
+ unsigned int rev;
+ int ret;
+
+ model = (enum ep93xx_soc_model)(uintptr_t)device_get_match_data(dev);
+
+ map = device_node_to_regmap(dev->of_node);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ attrs = devm_kzalloc(dev, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ rev = ep93xx_soc_revision(map);
+
+ attrs->machine = of_flat_dt_get_machine_name();
+ attrs->family = "Cirrus Logic EP93xx";
+ attrs->revision = ep93xx_get_soc_rev(rev);
+
+ soc_dev = soc_device_register(attrs);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ map_info = devm_kzalloc(dev, sizeof(*map_info), GFP_KERNEL);
+ if (!map_info)
+ return -ENOMEM;
+
+ spin_lock_init(&map_info->lock);
+ map_info->map = map;
+ map_info->base = base;
+
+ ret = ep93xx_controller_register(dev, pinctrl_names[model], map_info);
+ if (ret)
+ dev_err(dev, "registering pinctrl controller failed\n");
+
+ /*
+ * EP93xx SSP clock rate was doubled in version E2. For more information
+ * see section 6 "2x SSP (Synchronous Serial Port) Clock – Revision E2 only":
+ * http://www.cirrus.com/en/pubs/appNote/AN273REV4.pdf
+ */
+ if (rev == EP93XX_CHIP_REV_E2)
+ ret = ep93xx_controller_register(dev, "clk-ep93xx.e2", map_info);
+ else
+ ret = ep93xx_controller_register(dev, "clk-ep93xx", map_info);
+ if (ret)
+ dev_err(dev, "registering clock controller failed\n");
+
+ ret = ep93xx_controller_register(dev, "reset-ep93xx", map_info);
+ if (ret)
+ dev_err(dev, "registering reset controller failed\n");
+
+ return 0;
+}
+
+static const struct of_device_id ep9301_syscon_of_device_ids[] = {
+ { .compatible = "cirrus,ep9301-syscon", .data = (void *)EP93XX_9301_SOC },
+ { .compatible = "cirrus,ep9302-syscon", .data = (void *)EP93XX_9301_SOC },
+ { .compatible = "cirrus,ep9307-syscon", .data = (void *)EP93XX_9307_SOC },
+ { .compatible = "cirrus,ep9312-syscon", .data = (void *)EP93XX_9312_SOC },
+ { .compatible = "cirrus,ep9315-syscon", .data = (void *)EP93XX_9312_SOC },
+ { /* sentinel */ }
+};
+
+static struct platform_driver ep9301_syscon_driver = {
+ .driver = {
+ .name = "ep9301-syscon",
+ .of_match_table = ep9301_syscon_of_device_ids,
+ },
+};
+builtin_platform_driver_probe(ep9301_syscon_driver, ep93xx_syscon_probe);
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 936d57869493..4f288f07e38f 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -375,9 +375,9 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
* If the QSPI controller is set in regular SPI mode, set it in
* Serial Memory Mode (SMM).
*/
- if (aq->mr != QSPI_MR_SMM) {
- atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
- aq->mr = QSPI_MR_SMM;
+ if (!(aq->mr & QSPI_MR_SMM)) {
+ aq->mr |= QSPI_MR_SMM;
+ atmel_qspi_write(aq->scr, aq, QSPI_MR);
}
/* Clear pending interrupts */
@@ -501,7 +501,8 @@ static int atmel_qspi_setup(struct spi_device *spi)
if (ret < 0)
return ret;
- aq->scr = QSPI_SCR_SCBR(scbr);
+ aq->scr &= ~QSPI_SCR_SCBR_MASK;
+ aq->scr |= QSPI_SCR_SCBR(scbr);
atmel_qspi_write(aq->scr, aq, QSPI_SCR);
pm_runtime_mark_last_busy(ctrl->dev.parent);
@@ -534,6 +535,7 @@ static int atmel_qspi_set_cs_timing(struct spi_device *spi)
if (ret < 0)
return ret;
+ aq->scr &= ~QSPI_SCR_DLYBS_MASK;
aq->scr |= QSPI_SCR_DLYBS(cs_setup);
atmel_qspi_write(aq->scr, aq, QSPI_SCR);
@@ -549,8 +551,8 @@ static void atmel_qspi_init(struct atmel_qspi *aq)
atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
/* Set the QSPI controller by default in Serial Memory Mode */
- atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
- aq->mr = QSPI_MR_SMM;
+ aq->mr |= QSPI_MR_SMM;
+ atmel_qspi_write(aq->mr, aq, QSPI_MR);
/* Enable the QSPI controller */
atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
@@ -721,6 +723,7 @@ static void atmel_qspi_remove(struct platform_device *pdev)
clk_unprepare(aq->pclk);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
diff --git a/drivers/spi/spi-airoha-snfi.c b/drivers/spi/spi-airoha-snfi.c
index 9d97ec98881c..94458df53eae 100644
--- a/drivers/spi/spi-airoha-snfi.c
+++ b/drivers/spi/spi-airoha-snfi.c
@@ -211,9 +211,6 @@ struct airoha_snand_dev {
u8 *txrx_buf;
dma_addr_t dma_addr;
-
- u64 cur_page_num;
- bool data_need_update;
};
struct airoha_snand_ctrl {
@@ -405,7 +402,7 @@ static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd,
for (i = 0; i < len; i += data_len) {
int err;
- data_len = min(len, SPI_MAX_TRANSFER_SIZE);
+ data_len = min(len - i, SPI_MAX_TRANSFER_SIZE);
err = airoha_snand_set_fifo_op(as_ctrl, cmd, data_len);
if (err)
return err;
@@ -427,7 +424,7 @@ static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, u8 *data,
for (i = 0; i < len; i += data_len) {
int err;
- data_len = min(len, SPI_MAX_TRANSFER_SIZE);
+ data_len = min(len - i, SPI_MAX_TRANSFER_SIZE);
err = airoha_snand_set_fifo_op(as_ctrl, 0xc, data_len);
if (err)
return err;
@@ -644,11 +641,6 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
u32 val, rd_mode;
int err;
- if (!as_dev->data_need_update)
- return len;
-
- as_dev->data_need_update = false;
-
switch (op->cmd.opcode) {
case SPI_NAND_OP_READ_FROM_CACHE_DUAL:
rd_mode = 1;
@@ -739,8 +731,13 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
if (err)
return err;
- err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
- SPI_NFI_READ_FROM_CACHE_DONE);
+ /*
+ * SPI_NFI_READ_FROM_CACHE_DONE bit must be written at the end
+ * of dirmap_read operation even if it is already set.
+ */
+ err = regmap_write_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
+ SPI_NFI_READ_FROM_CACHE_DONE,
+ SPI_NFI_READ_FROM_CACHE_DONE);
if (err)
return err;
@@ -870,8 +867,13 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
if (err)
return err;
- err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
- SPI_NFI_LOAD_TO_CACHE_DONE);
+ /*
+ * SPI_NFI_LOAD_TO_CACHE_DONE bit must be written at the end
+ * of dirmap_write operation even if it is already set.
+ */
+ err = regmap_write_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
+ SPI_NFI_LOAD_TO_CACHE_DONE,
+ SPI_NFI_LOAD_TO_CACHE_DONE);
if (err)
return err;
@@ -885,23 +887,11 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
static int airoha_snand_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- struct airoha_snand_dev *as_dev = spi_get_ctldata(mem->spi);
u8 data[8], cmd, opcode = op->cmd.opcode;
struct airoha_snand_ctrl *as_ctrl;
int i, err;
as_ctrl = spi_controller_get_devdata(mem->spi->controller);
- if (opcode == SPI_NAND_OP_PROGRAM_EXECUTE &&
- op->addr.val == as_dev->cur_page_num) {
- as_dev->data_need_update = true;
- } else if (opcode == SPI_NAND_OP_PAGE_READ) {
- if (!as_dev->data_need_update &&
- op->addr.val == as_dev->cur_page_num)
- return 0;
-
- as_dev->data_need_update = true;
- as_dev->cur_page_num = op->addr.val;
- }
/* switch to manual mode */
err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
@@ -986,7 +976,6 @@ static int airoha_snand_setup(struct spi_device *spi)
if (dma_mapping_error(as_ctrl->dev, as_dev->dma_addr))
return -ENOMEM;
- as_dev->data_need_update = true;
spi_set_ctldata(spi, as_dev);
return 0;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index a1d60e51c053..dc6bdc74643d 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -18,18 +18,18 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
#include <linux/spi/spi.h>
-#include <linux/platform_data/dma-ep93xx.h>
-#include <linux/platform_data/spi-ep93xx.h>
-
#define SSPCR0 0x0000
#define SSPCR0_SPO BIT(6)
#define SSPCR0_SPH BIT(7)
@@ -76,8 +76,6 @@
* frame decreases this level and sending one frame increases it.
* @dma_rx: RX DMA channel
* @dma_tx: TX DMA channel
- * @dma_rx_data: RX parameters passed to the DMA engine
- * @dma_tx_data: TX parameters passed to the DMA engine
* @rx_sgt: sg table for RX transfers
* @tx_sgt: sg table for TX transfers
* @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
@@ -92,8 +90,6 @@ struct ep93xx_spi {
size_t fifo_level;
struct dma_chan *dma_rx;
struct dma_chan *dma_tx;
- struct ep93xx_dma_data dma_rx_data;
- struct ep93xx_dma_data dma_tx_data;
struct sg_table rx_sgt;
struct sg_table tx_sgt;
void *zeropage;
@@ -575,46 +571,23 @@ static int ep93xx_spi_unprepare_hardware(struct spi_controller *host)
return 0;
}
-static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
+static int ep93xx_spi_setup_dma(struct device *dev, struct ep93xx_spi *espi)
{
- if (ep93xx_dma_chan_is_m2p(chan))
- return false;
-
- chan->private = filter_param;
- return true;
-}
-
-static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
-{
- dma_cap_mask_t mask;
int ret;
espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
if (!espi->zeropage)
return -ENOMEM;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- espi->dma_rx_data.port = EP93XX_DMA_SSP;
- espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
- espi->dma_rx_data.name = "ep93xx-spi-rx";
-
- espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
- &espi->dma_rx_data);
- if (!espi->dma_rx) {
- ret = -ENODEV;
+ espi->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(espi->dma_rx)) {
+ ret = dev_err_probe(dev, PTR_ERR(espi->dma_rx), "rx DMA setup failed");
goto fail_free_page;
}
- espi->dma_tx_data.port = EP93XX_DMA_SSP;
- espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
- espi->dma_tx_data.name = "ep93xx-spi-tx";
-
- espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
- &espi->dma_tx_data);
- if (!espi->dma_tx) {
- ret = -ENODEV;
+ espi->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(espi->dma_tx)) {
+ ret = dev_err_probe(dev, PTR_ERR(espi->dma_tx), "tx DMA setup failed");
goto fail_release_rx;
}
@@ -647,18 +620,11 @@ static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
static int ep93xx_spi_probe(struct platform_device *pdev)
{
struct spi_controller *host;
- struct ep93xx_spi_info *info;
struct ep93xx_spi *espi;
struct resource *res;
int irq;
int error;
- info = dev_get_platdata(&pdev->dev);
- if (!info) {
- dev_err(&pdev->dev, "missing platform data\n");
- return -EINVAL;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -713,12 +679,17 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
goto fail_release_host;
}
- if (info->use_dma && ep93xx_spi_setup_dma(espi))
+ error = ep93xx_spi_setup_dma(&pdev->dev, espi);
+ if (error == -EPROBE_DEFER)
+ goto fail_release_host;
+
+ if (error)
dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
/* make sure that the hardware is disabled */
writel(0, espi->mmio + SSPCR1);
+ device_set_node(&host->dev, dev_fwnode(&pdev->dev));
error = devm_spi_register_controller(&pdev->dev, host);
if (error) {
dev_err(&pdev->dev, "failed to register SPI host\n");
@@ -746,9 +717,16 @@ static void ep93xx_spi_remove(struct platform_device *pdev)
ep93xx_spi_release_dma(espi);
}
+static const struct of_device_id ep93xx_spi_of_ids[] = {
+ { .compatible = "cirrus,ep9301-spi" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ep93xx_spi_of_ids);
+
static struct platform_driver ep93xx_spi_driver = {
.driver = {
.name = "ep93xx-spi",
+ .of_match_table = ep93xx_spi_of_ids,
},
.probe = ep93xx_spi_probe,
.remove_new = ep93xx_spi_remove,
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 8ecb426be45c..977e8b55c82b 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -986,6 +986,7 @@ static void fsl_lpspi_remove(struct platform_device *pdev)
fsl_lpspi_dma_exit(controller);
+ pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
pm_runtime_disable(fsl_lpspi->dev);
}
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 5539c5d139d4..653f82984216 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -685,7 +685,6 @@ static const struct file_operations spidev_fops = {
.compat_ioctl = spidev_compat_ioctl,
.open = spidev_open,
.release = spidev_release,
- .llseek = no_llseek,
};
/*-------------------------------------------------------------------------*/
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index db4a392841b1..3fb68d60dfc1 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -54,8 +54,6 @@ source "drivers/staging/fbtft/Kconfig"
source "drivers/staging/most/Kconfig"
-source "drivers/staging/ks7010/Kconfig"
-
source "drivers/staging/greybus/Kconfig"
source "drivers/staging/vc04_services/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 5390879b5d1b..c977aa13fad4 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -17,7 +17,6 @@ obj-$(CONFIG_MFD_NVEC) += nvec/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_MOST) += most/
-obj-$(CONFIG_KS7010) += ks7010/
obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c
index 0be7c2d51548..050fc2367c12 100644
--- a/drivers/staging/fbtft/fb_ili9320.c
+++ b/drivers/staging/fbtft/fb_ili9320.c
@@ -35,8 +35,6 @@ static int init_display(struct fbtft_par *par)
par->fbtftops.reset(par);
devcode = read_devicecode(par);
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "Device code: 0x%04X\n",
- devcode);
if ((devcode != 0x0000) && (devcode != 0x9320))
dev_warn(par->info->device,
"Unrecognized Device code: 0x%04X (expected 0x9320)\n",
diff --git a/drivers/staging/fbtft/fb_ra8875.c b/drivers/staging/fbtft/fb_ra8875.c
index 398bdbf53c9a..0ab1de6647d0 100644
--- a/drivers/staging/fbtft/fb_ra8875.c
+++ b/drivers/staging/fbtft/fb_ra8875.c
@@ -41,13 +41,6 @@ static int init_display(struct fbtft_par *par)
{
gpiod_set_value(par->gpio.dc, 1);
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "%s()\n", __func__);
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "display size %dx%d\n",
- par->info->var.xres,
- par->info->var.yres);
-
par->fbtftops.reset(par);
if ((par->info->var.xres == 320) && (par->info->var.yres == 240)) {
diff --git a/drivers/staging/fbtft/fb_sh1106.c b/drivers/staging/fbtft/fb_sh1106.c
index 9685ca516a0e..e4c50c1ffed0 100644
--- a/drivers/staging/fbtft/fb_sh1106.c
+++ b/drivers/staging/fbtft/fb_sh1106.c
@@ -88,9 +88,6 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
- __func__, on ? "true" : "false");
-
write_reg(par, on ? 0xAE : 0xAF);
return 0;
diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
index f27bab38b3ec..255a6d21ca8e 100644
--- a/drivers/staging/fbtft/fb_ssd1289.c
+++ b/drivers/staging/fbtft/fb_ssd1289.c
@@ -93,9 +93,6 @@ static int set_var(struct fbtft_par *par)
{
if (par->fbtftops.init_display != init_display) {
/* don't risk messing up register 11h */
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "%s: skipping since custom init_display() is used\n",
- __func__);
return 0;
}
diff --git a/drivers/staging/fbtft/fb_ssd1306.c b/drivers/staging/fbtft/fb_ssd1306.c
index 6cf9df579e88..478d710469b9 100644
--- a/drivers/staging/fbtft/fb_ssd1306.c
+++ b/drivers/staging/fbtft/fb_ssd1306.c
@@ -148,9 +148,6 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
- __func__, on ? "true" : "false");
-
if (on)
write_reg(par, 0xAE);
else
diff --git a/drivers/staging/fbtft/fb_ssd1325.c b/drivers/staging/fbtft/fb_ssd1325.c
index 796a2ac3e194..256b0b87a930 100644
--- a/drivers/staging/fbtft/fb_ssd1325.c
+++ b/drivers/staging/fbtft/fb_ssd1325.c
@@ -72,10 +72,6 @@ static uint8_t rgb565_to_g16(u16 pixel)
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
- "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe,
- ye);
-
write_reg(par, 0x75);
write_reg(par, 0x00);
write_reg(par, 0x3f);
@@ -86,9 +82,6 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
- __func__, on ? "true" : "false");
-
if (on)
write_reg(par, 0xAE);
else
@@ -109,8 +102,6 @@ static int set_gamma(struct fbtft_par *par, u32 *curves)
{
int i;
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
-
for (i = 0; i < GAMMA_LEN; i++) {
if (i > 0 && curves[i] < 1) {
dev_err(par->info->device,
diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
index ec5eced7f8cb..06b7056d6c71 100644
--- a/drivers/staging/fbtft/fb_ssd1331.c
+++ b/drivers/staging/fbtft/fb_ssd1331.c
@@ -167,8 +167,6 @@ static int set_gamma(struct fbtft_par *par, u32 *curves)
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
- __func__, on ? "true" : "false");
if (on)
write_reg(par, 0xAE);
else
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
index ca2cba2185ae..f6db2933ebba 100644
--- a/drivers/staging/fbtft/fb_ssd1351.c
+++ b/drivers/staging/fbtft/fb_ssd1351.c
@@ -72,9 +72,6 @@ static int set_var(struct fbtft_par *par)
if (par->fbtftops.init_display != init_display) {
/* don't risk messing up register A0h */
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "%s: skipping since custom init_display() is used\n",
- __func__);
return 0;
}
@@ -213,7 +210,7 @@ static void register_onboard_backlight(struct fbtft_par *par)
struct backlight_properties bl_props = { 0, };
bl_props.type = BACKLIGHT_RAW;
- bl_props.power = FB_BLANK_POWERDOWN;
+ bl_props.power = BACKLIGHT_POWER_OFF;
bd = backlight_device_register(dev_driver_string(par->info->device),
par->info->device, par, &bl_ops,
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index f61e373c75e9..ca35b386a12d 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -135,9 +135,6 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
- __func__, on ? "true" : "false");
-
if (on)
write_reg(par, 0xA8 | 0x00);
else
diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
index 3d422bc11641..30e436ff19e4 100644
--- a/drivers/staging/fbtft/fbtft-bus.c
+++ b/drivers/staging/fbtft/fbtft-bus.c
@@ -129,9 +129,6 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
int ret = 0;
size_t startbyte_size = 0;
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
- __func__, offset, len);
-
remain = len / 2;
vmem16 = (u16 *)(par->info->screen_buffer + offset);
@@ -182,9 +179,6 @@ int fbtft_write_vmem16_bus9(struct fbtft_par *par, size_t offset, size_t len)
int i;
int ret = 0;
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
- __func__, offset, len);
-
if (!par->txbuf.buf) {
dev_err(par->info->device, "%s: txbuf.buf is NULL\n", __func__);
return -1;
@@ -232,9 +226,6 @@ int fbtft_write_vmem16_bus16(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16;
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
- __func__, offset, len);
-
vmem16 = (u16 *)(par->info->screen_buffer + offset);
/* no need for buffered write with 16-bit bus */
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 8e2fd0c0fee2..4cfa494243b9 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -152,7 +152,7 @@ static int fbtft_backlight_get_brightness(struct backlight_device *bd)
void fbtft_unregister_backlight(struct fbtft_par *par)
{
if (par->info->bl_dev) {
- par->info->bl_dev->props.power = FB_BLANK_POWERDOWN;
+ par->info->bl_dev->props.power = BACKLIGHT_POWER_OFF;
backlight_update_status(par->info->bl_dev);
backlight_device_unregister(par->info->bl_dev);
par->info->bl_dev = NULL;
@@ -178,7 +178,7 @@ void fbtft_register_backlight(struct fbtft_par *par)
bl_props.type = BACKLIGHT_RAW;
/* Assume backlight is off, get polarity from current state of pin */
- bl_props.power = FB_BLANK_POWERDOWN;
+ bl_props.power = BACKLIGHT_POWER_OFF;
if (!gpiod_get_value(par->gpio.led[0]))
par->polarity = true;
@@ -215,8 +215,6 @@ static void fbtft_reset(struct fbtft_par *par)
if (!par->gpio.reset)
return;
- fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__);
-
gpiod_set_value_cansleep(par->gpio.reset, 1);
usleep_range(20, 40);
gpiod_set_value_cansleep(par->gpio.reset, 0);
@@ -801,7 +799,7 @@ int fbtft_register_framebuffer(struct fb_info *fb_info)
/* Turn on backlight if available */
if (fb_info->bl_dev) {
- fb_info->bl_dev->props.power = FB_BLANK_UNBLANK;
+ fb_info->bl_dev->props.power = BACKLIGHT_POWER_ON;
fb_info->bl_dev->ops->update_status(fb_info->bl_dev);
}
@@ -1052,8 +1050,6 @@ static int fbtft_verify_gpios(struct fbtft_par *par)
struct fbtft_platform_data *pdata = par->pdata;
int i;
- fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
-
if (pdata->display.buswidth != 9 && par->startbyte == 0 &&
!par->gpio.dc) {
dev_err(par->info->device,
@@ -1157,9 +1153,6 @@ int fbtft_probe_common(struct fbtft_display *display,
else
dev = &pdev->dev;
- if (unlikely(display->debug & DEBUG_DRIVER_INIT_FUNCTIONS))
- dev_info(dev, "%s()\n", __func__);
-
pdata = dev->platform_data;
if (!pdata) {
pdata = fbtft_properties_read(dev);
diff --git a/drivers/staging/fbtft/fbtft-sysfs.c b/drivers/staging/fbtft/fbtft-sysfs.c
index 39e8d28066cb..e45c90a03a90 100644
--- a/drivers/staging/fbtft/fbtft-sysfs.c
+++ b/drivers/staging/fbtft/fbtft-sysfs.c
@@ -27,13 +27,9 @@ int fbtft_gamma_parse_str(struct fbtft_par *par, u32 *curves,
int curve_counter, value_counter;
int _count;
- fbtft_par_dbg(DEBUG_SYSFS, par, "%s() str=\n", __func__);
-
if (!str || !curves)
return -EINVAL;
- fbtft_par_dbg(DEBUG_SYSFS, par, "%s\n", str);
-
tmp = kmemdup(str, size + 1, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index f86ed9d470b8..3e00a26a29d5 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -202,6 +202,7 @@ struct fbtft_par {
u8 *buf;
u8 startbyte;
struct fbtft_ops fbtftops;
+ /* Spinlock to ensure thread-safe access to dirty_lines_start and dirty_lines_end */
spinlock_t dirty_lock;
unsigned int dirty_lines_start;
unsigned int dirty_lines_end;
@@ -218,6 +219,7 @@ struct fbtft_par {
} gpio;
const s16 *init_sequence;
struct {
+ /* Mutex to synchronize access to gamma curve locking */
struct mutex lock;
u32 *curves;
int num_values;
diff --git a/drivers/staging/greybus/gb-camera.h b/drivers/staging/greybus/gb-camera.h
index 5fc469101fc1..3e09147435a5 100644
--- a/drivers/staging/greybus/gb-camera.h
+++ b/drivers/staging/greybus/gb-camera.h
@@ -92,8 +92,8 @@ struct gb_camera_ops {
unsigned int *flags, struct gb_camera_stream *streams,
struct gb_camera_csi_params *csi_params);
int (*capture)(void *priv, u32 request_id,
- unsigned int streams, unsigned int num_frames,
- size_t settings_size, const void *settings);
+ unsigned int streams, unsigned int num_frames,
+ size_t settings_size, const void *settings);
int (*flush)(void *priv, u32 *request_id);
};
diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c
index 0e4ae01eb00f..24e9c909fa02 100644
--- a/drivers/staging/greybus/spilib.c
+++ b/drivers/staging/greybus/spilib.c
@@ -490,10 +490,10 @@ int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
int ret;
u8 i;
- /* Allocate master with space for data */
- ctlr = spi_alloc_master(dev, sizeof(*spi));
+ /* Allocate host with space for data */
+ ctlr = spi_alloc_host(dev, sizeof(*spi));
if (!ctlr) {
- dev_err(dev, "cannot alloc SPI master\n");
+ dev_err(dev, "cannot alloc SPI host\n");
return -ENOMEM;
}
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index cd00d9607565..4ae1a7039418 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -547,7 +547,8 @@ static int ad5933_ring_preenable(struct iio_dev *indio_dev)
struct ad5933_state *st = iio_priv(indio_dev);
int ret;
- if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
+ if (bitmap_empty(indio_dev->active_scan_mask,
+ iio_get_masklength(indio_dev)))
return -EINVAL;
ret = ad5933_reset(st);
@@ -625,7 +626,7 @@ static void ad5933_work(struct work_struct *work)
if (status & AD5933_STAT_DATA_VALID) {
int scan_count = bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
ret = ad5933_i2c_read(st->client,
test_bit(1, indio_dev->active_scan_mask) ?
AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
diff --git a/drivers/staging/ks7010/Kconfig b/drivers/staging/ks7010/Kconfig
deleted file mode 100644
index 8ea6c0928679..000000000000
--- a/drivers/staging/ks7010/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config KS7010
- tristate "KeyStream KS7010 SDIO support"
- depends on MMC && WIRELESS
- select WIRELESS_EXT
- select WEXT_PRIV
- select FW_LOADER
- select CRYPTO
- select CRYPTO_HASH
- select CRYPTO_MICHAEL_MIC
- help
- This is a driver for KeyStream KS7010 based SDIO WIFI cards. It is
- found on at least later Spectec SDW-821 (FCC-ID "S2Y-WLAN-11G-K" only,
- sadly not FCC-ID "S2Y-WLAN-11B-G") and Spectec SDW-823 microSD cards.
diff --git a/drivers/staging/ks7010/Makefile b/drivers/staging/ks7010/Makefile
deleted file mode 100644
index 009851a32310..000000000000
--- a/drivers/staging/ks7010/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_KS7010) += ks7010.o
-
-ks7010-y := ks_hostif.o ks_wlan_net.o ks7010_sdio.o
diff --git a/drivers/staging/ks7010/TODO b/drivers/staging/ks7010/TODO
deleted file mode 100644
index 80c97543b977..000000000000
--- a/drivers/staging/ks7010/TODO
+++ /dev/null
@@ -1,36 +0,0 @@
-KS7010 Linux driver
-===================
-
-This driver is based on source code from the Ben Nanonote extra repository [1]
-which is based on the original v007 release from Renesas [2]. Some more
-background info about the chipset can be found here [3] and here [4]. Thank
-you to all which already participated in cleaning up the driver so far!
-
-[1] http://projects.qi-hardware.com/index.php/p/openwrt-packages/source/tree/master/ks7010/src
-[2] http://downloads.qi-hardware.com/software/ks7010_sdio_v007.tar.bz2
-[3] http://en.qi-hardware.com/wiki/Ben_NanoNote_Wi-Fi
-[4] https://wikidevi.com/wiki/Renesas
-
-TODO
-----
-
-First a few words what not to do (at least not blindly):
-
-- don't be overly strict with the 80 char limit. Only if it REALLY makes the
- code more readable
-
-Now the TODOs:
-
-- fix codechecker warnings (checkpatch, sparse, smatch). But PLEASE make sure
- that you are not only silencing the warning but really fixing code. You
- should understand the change you submit.
-- fix the 'card removal' event when card is inserted when booting
-- check what other upstream wireless mechanisms can be used instead of the
- custom ones here
-- Switch to use LIB80211.
-- Switch to use MAC80211.
-- Switch to use CFG80211.
-
-Please send any patches to:
-Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Linux Driver Project Developer List <driverdev-devel@linuxdriverproject.org>
diff --git a/drivers/staging/ks7010/eap_packet.h b/drivers/staging/ks7010/eap_packet.h
deleted file mode 100644
index 1eee774319ad..000000000000
--- a/drivers/staging/ks7010/eap_packet.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef EAP_PACKET_H
-#define EAP_PACKET_H
-
-#include <linux/compiler.h>
-#include <linux/bitops.h>
-#include <uapi/linux/if_ether.h>
-
-struct ether_hdr {
- unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
- unsigned char h_source[ETH_ALEN]; /* source ether addr */
- unsigned char h_dest_snap;
- unsigned char h_source_snap;
- unsigned char h_command;
- unsigned char h_vendor_id[3];
- __be16 h_proto; /* packet type ID field */
- /* followed by length octets of data */
-} __packed;
-
-#define ETHER_HDR_SIZE sizeof(struct ether_hdr)
-
-struct ieee802_1x_hdr {
- unsigned char version;
- unsigned char type;
- unsigned short length;
- /* followed by length octets of data */
-} __packed;
-
-enum {
- IEEE802_1X_TYPE_EAP_PACKET = 0,
- IEEE802_1X_TYPE_EAPOL_START = 1,
- IEEE802_1X_TYPE_EAPOL_LOGOFF = 2,
- IEEE802_1X_TYPE_EAPOL_KEY = 3,
- IEEE802_1X_TYPE_EAPOL_ENCAPSULATED_ASF_ALERT = 4
-};
-
-#define WPA_NONCE_LEN 32
-#define WPA_REPLAY_COUNTER_LEN 8
-
-struct wpa_eapol_key {
- unsigned char type;
- __be16 key_info;
- unsigned short key_length;
- unsigned char replay_counter[WPA_REPLAY_COUNTER_LEN];
- unsigned char key_nonce[WPA_NONCE_LEN];
- unsigned char key_iv[16];
- unsigned char key_rsc[8];
- unsigned char key_id[8]; /* Reserved in IEEE 802.11i/RSN */
- unsigned char key_mic[16];
- unsigned short key_data_length;
- /* followed by key_data_length bytes of key_data */
-} __packed;
-
-#define WPA_KEY_INFO_TYPE_MASK GENMASK(2, 0)
-#define WPA_KEY_INFO_TYPE_HMAC_MD5_RC4 BIT(0)
-#define WPA_KEY_INFO_TYPE_HMAC_SHA1_AES BIT(1)
-#define WPA_KEY_INFO_KEY_TYPE BIT(3) /* 1 = Pairwise, 0 = Group key */
-/* bit4..5 is used in WPA, but is reserved in IEEE 802.11i/RSN */
-#define WPA_KEY_INFO_KEY_INDEX_MASK GENMASK(5, 4)
-#define WPA_KEY_INFO_KEY_INDEX_SHIFT 4
-#define WPA_KEY_INFO_INSTALL BIT(6) /* pairwise */
-#define WPA_KEY_INFO_TXRX BIT(6) /* group */
-#define WPA_KEY_INFO_ACK BIT(7)
-#define WPA_KEY_INFO_MIC BIT(8)
-#define WPA_KEY_INFO_SECURE BIT(9)
-#define WPA_KEY_INFO_ERROR BIT(10)
-#define WPA_KEY_INFO_REQUEST BIT(11)
-#define WPA_KEY_INFO_ENCR_KEY_DATA BIT(12) /* IEEE 802.11i/RSN only */
-
-#endif /* EAP_PACKET_H */
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
deleted file mode 100644
index 8df0e77b57f6..000000000000
--- a/drivers/staging/ks7010/ks7010_sdio.c
+++ /dev/null
@@ -1,1143 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for KeyStream, KS7010 based SDIO cards.
- *
- * Copyright (C) 2006-2008 KeyStream Corp.
- * Copyright (C) 2009 Renesas Technology Corp.
- * Copyright (C) 2016 Sang Engineering, Wolfram Sang
- */
-
-#include <linux/atomic.h>
-#include <linux/firmware.h>
-#include <linux/jiffies.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/module.h>
-#include <linux/workqueue.h>
-#include "ks_wlan.h"
-#include "ks_hostif.h"
-
-#define ROM_FILE "ks7010sd.rom"
-
-/* SDIO KeyStream vendor and device */
-#define SDIO_VENDOR_ID_KS_CODE_A 0x005b
-#define SDIO_VENDOR_ID_KS_CODE_B 0x0023
-
-/* Older sources suggest earlier versions were named 7910 or 79xx */
-#define SDIO_DEVICE_ID_KS_7010 0x7910
-
-/* Read/Write Status Register */
-#define READ_STATUS_REG 0x000000
-#define WRITE_STATUS_REG 0x00000C
-enum reg_status_type {
- REG_STATUS_BUSY,
- REG_STATUS_IDLE
-};
-
-/* Read Index Register */
-#define READ_INDEX_REG 0x000004
-
-/* Read Data Size Register */
-#define READ_DATA_SIZE_REG 0x000008
-
-/* Write Index Register */
-#define WRITE_INDEX_REG 0x000010
-
-/*
- * Write Status/Read Data Size Register
- * for network packet (less than 2048 bytes data)
- */
-#define WSTATUS_RSIZE_REG 0x000014
-
-/* Write Status Register value */
-#define WSTATUS_MASK 0x80
-
-/* Read Data Size Register value [10:4] */
-#define RSIZE_MASK 0x7F
-
-/* ARM to SD interrupt Enable */
-#define INT_ENABLE_REG 0x000020
-/* ARM to SD interrupt Pending */
-#define INT_PENDING_REG 0x000024
-
-#define INT_GCR_B BIT(7)
-#define INT_GCR_A BIT(6)
-#define INT_WRITE_STATUS BIT(5)
-#define INT_WRITE_INDEX BIT(4)
-#define INT_WRITE_SIZE BIT(3)
-#define INT_READ_STATUS BIT(2)
-#define INT_READ_INDEX BIT(1)
-#define INT_READ_SIZE BIT(0)
-
-/* General Communication Register A */
-#define GCR_A_REG 0x000028
-enum gen_com_reg_a {
- GCR_A_INIT,
- GCR_A_REMAP,
- GCR_A_RUN
-};
-
-/* General Communication Register B */
-#define GCR_B_REG 0x00002C
-enum gen_com_reg_b {
- GCR_B_ACTIVE,
- GCR_B_DOZE
-};
-
-/* Wakeup Register */
-#define WAKEUP_REG 0x008018
-#define WAKEUP_REQ 0x5a
-
-/* AHB Data Window 0x010000-0x01FFFF */
-#define DATA_WINDOW 0x010000
-#define WINDOW_SIZE (64 * 1024)
-
-#define KS7010_IRAM_ADDRESS 0x06000000
-
-#define KS7010_IO_BLOCK_SIZE 512
-
-/**
- * struct ks_sdio_card - SDIO device data.
- *
- * Structure is used as the &struct sdio_func private data.
- *
- * @func: Pointer to the SDIO function device.
- * @priv: Pointer to the &struct net_device private data.
- */
-struct ks_sdio_card {
- struct sdio_func *func;
- struct ks_wlan_private *priv;
-};
-
-static struct sdio_func *ks7010_to_func(struct ks_wlan_private *priv)
-{
- struct ks_sdio_card *ks_sdio = priv->if_hw;
-
- return ks_sdio->func;
-}
-
-/* Read single byte from device address into byte (CMD52) */
-static int ks7010_sdio_readb(struct ks_wlan_private *priv,
- u32 address, u8 *byte)
-{
- struct sdio_func *func = ks7010_to_func(priv);
- int ret;
-
- *byte = sdio_readb(func, address, &ret);
-
- return ret;
-}
-
-/* Read length bytes from device address into buffer (CMD53) */
-static int ks7010_sdio_read(struct ks_wlan_private *priv, u32 address,
- u8 *buffer, unsigned int length)
-{
- struct sdio_func *func = ks7010_to_func(priv);
-
- return sdio_memcpy_fromio(func, buffer, address, length);
-}
-
-/* Write single byte to device address (CMD52) */
-static int ks7010_sdio_writeb(struct ks_wlan_private *priv,
- u32 address, u8 byte)
-{
- struct sdio_func *func = ks7010_to_func(priv);
- int ret;
-
- sdio_writeb(func, byte, address, &ret);
-
- return ret;
-}
-
-/* Write length bytes to device address from buffer (CMD53) */
-static int ks7010_sdio_write(struct ks_wlan_private *priv, u32 address,
- u8 *buffer, unsigned int length)
-{
- struct sdio_func *func = ks7010_to_func(priv);
-
- return sdio_memcpy_toio(func, address, buffer, length);
-}
-
-static void ks_wlan_hw_sleep_doze_request(struct ks_wlan_private *priv)
-{
- int ret;
-
- /* clear request */
- atomic_set(&priv->sleepstatus.doze_request, 0);
-
- if (atomic_read(&priv->sleepstatus.status) == 0) {
- ret = ks7010_sdio_writeb(priv, GCR_B_REG, GCR_B_DOZE);
- if (ret) {
- netdev_err(priv->net_dev, "write GCR_B_REG\n");
- goto set_sleep_mode;
- }
- atomic_set(&priv->sleepstatus.status, 1);
- priv->last_doze = jiffies;
- }
-
-set_sleep_mode:
- priv->sleep_mode = atomic_read(&priv->sleepstatus.status);
-}
-
-static void ks_wlan_hw_sleep_wakeup_request(struct ks_wlan_private *priv)
-{
- int ret;
-
- /* clear request */
- atomic_set(&priv->sleepstatus.wakeup_request, 0);
-
- if (atomic_read(&priv->sleepstatus.status) == 1) {
- ret = ks7010_sdio_writeb(priv, WAKEUP_REG, WAKEUP_REQ);
- if (ret) {
- netdev_err(priv->net_dev, "write WAKEUP_REG\n");
- goto set_sleep_mode;
- }
- atomic_set(&priv->sleepstatus.status, 0);
- priv->last_wakeup = jiffies;
- ++priv->wakeup_count;
- }
-
-set_sleep_mode:
- priv->sleep_mode = atomic_read(&priv->sleepstatus.status);
-}
-
-void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv)
-{
- int ret;
-
- if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- ret = ks7010_sdio_writeb(priv, WAKEUP_REG, WAKEUP_REQ);
- if (ret)
- netdev_err(priv->net_dev, "write WAKEUP_REG\n");
-
- priv->last_wakeup = jiffies;
- ++priv->wakeup_count;
- }
-}
-
-static void _ks_wlan_hw_power_save(struct ks_wlan_private *priv)
-{
- u8 byte;
- int ret;
-
- if (priv->reg.power_mgmt == POWER_MGMT_ACTIVE)
- return;
-
- if (priv->reg.operation_mode != MODE_INFRASTRUCTURE)
- return;
-
- if (!is_connect_status(priv->connect_status))
- return;
-
- if (priv->dev_state != DEVICE_STATE_SLEEP)
- return;
-
- if (atomic_read(&priv->psstatus.status) == PS_SNOOZE)
- return;
-
- netdev_dbg(priv->net_dev,
- "STATUS:\n"
- "- psstatus.status = %d\n"
- "- psstatus.confirm_wait = %d\n"
- "- psstatus.snooze_guard = %d\n"
- "- txq_count = %d\n",
- atomic_read(&priv->psstatus.status),
- atomic_read(&priv->psstatus.confirm_wait),
- atomic_read(&priv->psstatus.snooze_guard),
- txq_count(priv));
-
- if (atomic_read(&priv->psstatus.confirm_wait) ||
- atomic_read(&priv->psstatus.snooze_guard) ||
- txq_has_space(priv)) {
- queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
- return;
- }
-
- ret = ks7010_sdio_readb(priv, INT_PENDING_REG, &byte);
- if (ret) {
- netdev_err(priv->net_dev, "read INT_PENDING_REG\n");
- goto queue_delayed_work;
- }
- if (byte)
- goto queue_delayed_work;
-
- ret = ks7010_sdio_writeb(priv, GCR_B_REG, GCR_B_DOZE);
- if (ret) {
- netdev_err(priv->net_dev, "write GCR_B_REG\n");
- goto queue_delayed_work;
- }
- atomic_set(&priv->psstatus.status, PS_SNOOZE);
-
- return;
-
-queue_delayed_work:
- queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
-}
-
-int ks_wlan_hw_power_save(struct ks_wlan_private *priv)
-{
- queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
- return 0;
-}
-
-static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
- unsigned long size,
- void (*complete_handler)(struct ks_wlan_private *priv,
- struct sk_buff *skb),
- struct sk_buff *skb)
-{
- struct tx_device_buffer *sp;
- int ret;
-
- if (priv->dev_state < DEVICE_STATE_BOOT) {
- ret = -EPERM;
- goto err_complete;
- }
-
- if ((TX_DEVICE_BUFF_SIZE - 1) <= txq_count(priv)) {
- netdev_err(priv->net_dev, "tx buffer overflow\n");
- ret = -EOVERFLOW;
- goto err_complete;
- }
-
- sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qtail];
- sp->sendp = p;
- sp->size = size;
- sp->complete_handler = complete_handler;
- sp->skb = skb;
- inc_txqtail(priv);
-
- return 0;
-
-err_complete:
- kfree(p);
- if (complete_handler)
- (*complete_handler)(priv, skb);
-
- return ret;
-}
-
-/* write data */
-static int write_to_device(struct ks_wlan_private *priv, u8 *buffer,
- unsigned long size)
-{
- struct hostif_hdr *hdr;
- int ret;
-
- hdr = (struct hostif_hdr *)buffer;
-
- if (le16_to_cpu(hdr->event) < HIF_DATA_REQ ||
- le16_to_cpu(hdr->event) > HIF_REQ_MAX) {
- netdev_err(priv->net_dev, "unknown event=%04X\n", hdr->event);
- return 0;
- }
-
- ret = ks7010_sdio_write(priv, DATA_WINDOW, buffer, size);
- if (ret) {
- netdev_err(priv->net_dev, "write DATA_WINDOW\n");
- return ret;
- }
-
- ret = ks7010_sdio_writeb(priv, WRITE_STATUS_REG, REG_STATUS_BUSY);
- if (ret) {
- netdev_err(priv->net_dev, "write WRITE_STATUS_REG\n");
- return ret;
- }
-
- return 0;
-}
-
-static void tx_device_task(struct ks_wlan_private *priv)
-{
- struct tx_device_buffer *sp;
- int ret;
-
- if (!txq_has_space(priv) ||
- atomic_read(&priv->psstatus.status) == PS_SNOOZE)
- return;
-
- sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
- if (priv->dev_state >= DEVICE_STATE_BOOT) {
- ret = write_to_device(priv, sp->sendp, sp->size);
- if (ret) {
- netdev_err(priv->net_dev,
- "write_to_device error !!(%d)\n", ret);
- queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
- return;
- }
- }
- kfree(sp->sendp);
- if (sp->complete_handler) /* TX Complete */
- (*sp->complete_handler)(priv, sp->skb);
- inc_txqhead(priv);
-
- if (txq_has_space(priv))
- queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
-}
-
-int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
- void (*complete_handler)(struct ks_wlan_private *priv,
- struct sk_buff *skb),
- struct sk_buff *skb)
-{
- int result;
- struct hostif_hdr *hdr;
-
- hdr = (struct hostif_hdr *)p;
-
- if (le16_to_cpu(hdr->event) < HIF_DATA_REQ ||
- le16_to_cpu(hdr->event) > HIF_REQ_MAX) {
- netdev_err(priv->net_dev, "unknown event=%04X\n", hdr->event);
- return 0;
- }
-
- /* add event to hostt buffer */
- priv->hostt.buff[priv->hostt.qtail] = le16_to_cpu(hdr->event);
- priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
-
- spin_lock_bh(&priv->tx_dev.tx_dev_lock);
- result = enqueue_txdev(priv, p, size, complete_handler, skb);
- spin_unlock_bh(&priv->tx_dev.tx_dev_lock);
-
- if (txq_has_space(priv))
- queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
-
- return result;
-}
-
-static void rx_event_task(struct tasklet_struct *t)
-{
- struct ks_wlan_private *priv = from_tasklet(priv, t, rx_bh_task);
- struct rx_device_buffer *rp;
-
- if (rxq_has_space(priv) && priv->dev_state >= DEVICE_STATE_BOOT) {
- rp = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qhead];
- hostif_receive(priv, rp->data, rp->size);
- inc_rxqhead(priv);
-
- if (rxq_has_space(priv))
- tasklet_schedule(&priv->rx_bh_task);
- }
-}
-
-static void ks_wlan_hw_rx(struct ks_wlan_private *priv, size_t size)
-{
- int ret;
- struct rx_device_buffer *rx_buffer;
- struct hostif_hdr *hdr;
- u16 event = 0;
-
- /* receive data */
- if (rxq_count(priv) >= (RX_DEVICE_BUFF_SIZE - 1)) {
- netdev_err(priv->net_dev, "rx buffer overflow\n");
- return;
- }
- rx_buffer = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qtail];
-
- ret = ks7010_sdio_read(priv, DATA_WINDOW, &rx_buffer->data[0],
- hif_align_size(size));
- if (ret)
- return;
-
- /* length check */
- if (size > 2046 || size == 0) {
-#ifdef DEBUG
- print_hex_dump_bytes("INVALID DATA dump: ",
- DUMP_PREFIX_OFFSET,
- rx_buffer->data, 32);
-#endif
- ret = ks7010_sdio_writeb(priv, READ_STATUS_REG,
- REG_STATUS_IDLE);
- if (ret)
- netdev_err(priv->net_dev, "write READ_STATUS_REG\n");
-
- /* length check fail */
- return;
- }
-
- hdr = (struct hostif_hdr *)&rx_buffer->data[0];
- rx_buffer->size = le16_to_cpu(hdr->size) + sizeof(hdr->size);
- event = le16_to_cpu(hdr->event);
- inc_rxqtail(priv);
-
- ret = ks7010_sdio_writeb(priv, READ_STATUS_REG, REG_STATUS_IDLE);
- if (ret)
- netdev_err(priv->net_dev, "write READ_STATUS_REG\n");
-
- if (atomic_read(&priv->psstatus.confirm_wait) && is_hif_conf(event)) {
- netdev_dbg(priv->net_dev, "IS_HIF_CONF true !!\n");
- atomic_dec(&priv->psstatus.confirm_wait);
- }
-
- tasklet_schedule(&priv->rx_bh_task);
-}
-
-static void ks7010_rw_function(struct work_struct *work)
-{
- struct ks_wlan_private *priv = container_of(work,
- struct ks_wlan_private,
- rw_dwork.work);
- struct sdio_func *func = ks7010_to_func(priv);
- u8 byte;
- int ret;
-
- /* wait after DOZE */
- if (time_after(priv->last_doze + msecs_to_jiffies(30), jiffies)) {
- netdev_dbg(priv->net_dev, "wait after DOZE\n");
- queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
- return;
- }
-
- /* wait after WAKEUP */
- while (time_after(priv->last_wakeup + msecs_to_jiffies(30), jiffies)) {
- netdev_dbg(priv->net_dev, "wait after WAKEUP\n");
- dev_info(&func->dev, "wake: %lu %lu\n",
- priv->last_wakeup + msecs_to_jiffies(30), jiffies);
- msleep(30);
- }
-
- sdio_claim_host(func);
-
- /* power save wakeup */
- if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- if (txq_has_space(priv)) {
- ks_wlan_hw_wakeup_request(priv);
- queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
- }
- goto release_host;
- }
-
- /* sleep mode doze */
- if (atomic_read(&priv->sleepstatus.doze_request) == 1) {
- ks_wlan_hw_sleep_doze_request(priv);
- goto release_host;
- }
- /* sleep mode wakeup */
- if (atomic_read(&priv->sleepstatus.wakeup_request) == 1) {
- ks_wlan_hw_sleep_wakeup_request(priv);
- goto release_host;
- }
-
- /* read (WriteStatus/ReadDataSize FN1:00_0014) */
- ret = ks7010_sdio_readb(priv, WSTATUS_RSIZE_REG, &byte);
- if (ret) {
- netdev_err(priv->net_dev, "read WSTATUS_RSIZE_REG psstatus=%d\n",
- atomic_read(&priv->psstatus.status));
- goto release_host;
- }
-
- if (byte & RSIZE_MASK) { /* Read schedule */
- ks_wlan_hw_rx(priv, (size_t)((byte & RSIZE_MASK) << 4));
- }
- if ((byte & WSTATUS_MASK))
- tx_device_task(priv);
-
- _ks_wlan_hw_power_save(priv);
-
-release_host:
- sdio_release_host(func);
-}
-
-static void ks_sdio_interrupt(struct sdio_func *func)
-{
- int ret;
- struct ks_sdio_card *card;
- struct ks_wlan_private *priv;
- u8 status, rsize, byte;
-
- card = sdio_get_drvdata(func);
- priv = card->priv;
-
- if (priv->dev_state < DEVICE_STATE_BOOT)
- goto queue_delayed_work;
-
- ret = ks7010_sdio_readb(priv, INT_PENDING_REG, &status);
- if (ret) {
- netdev_err(priv->net_dev, "read INT_PENDING_REG\n");
- goto queue_delayed_work;
- }
-
- /* schedule task for interrupt status */
- /* bit7 -> Write General Communication B register */
- /* read (General Communication B register) */
- /* bit5 -> Write Status Idle */
- /* bit2 -> Read Status Busy */
- if (status & INT_GCR_B ||
- atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- ret = ks7010_sdio_readb(priv, GCR_B_REG, &byte);
- if (ret) {
- netdev_err(priv->net_dev, "read GCR_B_REG\n");
- goto queue_delayed_work;
- }
- if (byte == GCR_B_ACTIVE) {
- if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- atomic_set(&priv->psstatus.status, PS_WAKEUP);
- priv->wakeup_count = 0;
- }
- complete(&priv->psstatus.wakeup_wait);
- }
- }
-
- do {
- /* read (WriteStatus/ReadDataSize FN1:00_0014) */
- ret = ks7010_sdio_readb(priv, WSTATUS_RSIZE_REG, &byte);
- if (ret) {
- netdev_err(priv->net_dev, "read WSTATUS_RSIZE_REG\n");
- goto queue_delayed_work;
- }
- rsize = byte & RSIZE_MASK;
- if (rsize != 0) /* Read schedule */
- ks_wlan_hw_rx(priv, (size_t)(rsize << 4));
-
- if (byte & WSTATUS_MASK) {
- if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- if (txq_has_space(priv)) {
- ks_wlan_hw_wakeup_request(priv);
- queue_delayed_work(priv->wq,
- &priv->rw_dwork, 1);
- return;
- }
- } else {
- tx_device_task(priv);
- }
- }
- } while (rsize);
-
-queue_delayed_work:
- queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
-}
-
-static int trx_device_init(struct ks_wlan_private *priv)
-{
- priv->tx_dev.qhead = 0;
- priv->tx_dev.qtail = 0;
-
- priv->rx_dev.qhead = 0;
- priv->rx_dev.qtail = 0;
-
- spin_lock_init(&priv->tx_dev.tx_dev_lock);
- spin_lock_init(&priv->rx_dev.rx_dev_lock);
-
- tasklet_setup(&priv->rx_bh_task, rx_event_task);
-
- return 0;
-}
-
-static void trx_device_exit(struct ks_wlan_private *priv)
-{
- struct tx_device_buffer *sp;
-
- /* tx buffer clear */
- while (txq_has_space(priv)) {
- sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
- kfree(sp->sendp);
- if (sp->complete_handler) /* TX Complete */
- (*sp->complete_handler)(priv, sp->skb);
- inc_txqhead(priv);
- }
-
- tasklet_kill(&priv->rx_bh_task);
-}
-
-static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
-{
- int ret;
- unsigned char *data_buf;
-
- data_buf = kmemdup(&index, sizeof(u32), GFP_KERNEL);
- if (!data_buf)
- return -ENOMEM;
-
- ret = ks7010_sdio_write(priv, WRITE_INDEX_REG, data_buf, sizeof(index));
- if (ret)
- goto err_free_data_buf;
-
- ret = ks7010_sdio_write(priv, READ_INDEX_REG, data_buf, sizeof(index));
- if (ret)
- goto err_free_data_buf;
-
- return 0;
-
-err_free_data_buf:
- kfree(data_buf);
-
- return ret;
-}
-
-#define ROM_BUFF_SIZE (64 * 1024)
-static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
- u8 *data, unsigned int size)
-{
- int ret;
- u8 *read_buf;
-
- read_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
- if (!read_buf)
- return -ENOMEM;
-
- ret = ks7010_sdio_read(priv, address, read_buf, size);
- if (ret)
- goto err_free_read_buf;
-
- if (memcmp(data, read_buf, size) != 0) {
- ret = -EIO;
- netdev_err(priv->net_dev, "data compare error (%d)\n", ret);
- goto err_free_read_buf;
- }
-
- return 0;
-
-err_free_read_buf:
- kfree(read_buf);
-
- return ret;
-}
-
-static int ks7010_copy_firmware(struct ks_wlan_private *priv,
- const struct firmware *fw_entry)
-{
- unsigned int length;
- unsigned int size;
- unsigned int offset;
- unsigned int n = 0;
- u8 *rom_buf;
- int ret;
-
- rom_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
- if (!rom_buf)
- return -ENOMEM;
-
- length = fw_entry->size;
-
- do {
- if (length >= ROM_BUFF_SIZE) {
- size = ROM_BUFF_SIZE;
- length = length - ROM_BUFF_SIZE;
- } else {
- size = length;
- length = 0;
- }
- if (size == 0)
- break;
-
- memcpy(rom_buf, fw_entry->data + n, size);
-
- offset = n;
- ret = ks7010_sdio_update_index(priv,
- KS7010_IRAM_ADDRESS + offset);
- if (ret)
- goto free_rom_buf;
-
- ret = ks7010_sdio_write(priv, DATA_WINDOW, rom_buf, size);
- if (ret)
- goto free_rom_buf;
-
- ret = ks7010_sdio_data_compare(priv,
- DATA_WINDOW, rom_buf, size);
- if (ret)
- goto free_rom_buf;
-
- n += size;
-
- } while (size);
-
- ret = ks7010_sdio_writeb(priv, GCR_A_REG, GCR_A_REMAP);
-
-free_rom_buf:
- kfree(rom_buf);
- return ret;
-}
-
-static int ks7010_upload_firmware(struct ks_sdio_card *card)
-{
- struct ks_wlan_private *priv = card->priv;
- struct sdio_func *func = ks7010_to_func(priv);
- unsigned int n;
- u8 byte = 0;
- int ret;
- const struct firmware *fw_entry = NULL;
-
- sdio_claim_host(func);
-
- /* Firmware running ? */
- ret = ks7010_sdio_readb(priv, GCR_A_REG, &byte);
- if (ret)
- goto release_host;
- if (byte == GCR_A_RUN) {
- netdev_dbg(priv->net_dev, "MAC firmware running ...\n");
- ret = -EBUSY;
- goto release_host;
- }
-
- ret = request_firmware(&fw_entry, ROM_FILE,
- &func->dev);
- if (ret)
- goto release_host;
-
- ret = ks7010_copy_firmware(priv, fw_entry);
- if (ret)
- goto release_firmware;
-
- /* Firmware running check */
- for (n = 0; n < 50; ++n) {
- usleep_range(10000, 11000); /* wait_ms(10); */
- ret = ks7010_sdio_readb(priv, GCR_A_REG, &byte);
- if (ret)
- goto release_firmware;
-
- if (byte == GCR_A_RUN)
- break;
- }
- if ((50) <= n) {
- netdev_err(priv->net_dev, "firmware can't start\n");
- ret = -EIO;
- goto release_firmware;
- }
-
- ret = 0;
-
- release_firmware:
- release_firmware(fw_entry);
- release_host:
- sdio_release_host(func);
-
- return ret;
-}
-
-static void ks7010_sme_enqueue_events(struct ks_wlan_private *priv)
-{
- static const u16 init_events[] = {
- SME_GET_EEPROM_CKSUM, SME_STOP_REQUEST,
- SME_RTS_THRESHOLD_REQUEST, SME_FRAGMENTATION_THRESHOLD_REQUEST,
- SME_WEP_INDEX_REQUEST, SME_WEP_KEY1_REQUEST,
- SME_WEP_KEY2_REQUEST, SME_WEP_KEY3_REQUEST,
- SME_WEP_KEY4_REQUEST, SME_WEP_FLAG_REQUEST,
- SME_RSN_ENABLED_REQUEST, SME_MODE_SET_REQUEST,
- SME_START_REQUEST
- };
- int ev;
-
- for (ev = 0; ev < ARRAY_SIZE(init_events); ev++)
- hostif_sme_enqueue(priv, init_events[ev]);
-}
-
-static void ks7010_card_init(struct ks_wlan_private *priv)
-{
- init_completion(&priv->confirm_wait);
-
- /* get mac address & firmware version */
- hostif_sme_enqueue(priv, SME_START);
-
- if (!wait_for_completion_interruptible_timeout
- (&priv->confirm_wait, 5 * HZ)) {
- netdev_dbg(priv->net_dev, "wait time out!! SME_START\n");
- }
-
- if (priv->mac_address_valid && priv->version_size != 0)
- priv->dev_state = DEVICE_STATE_PREINIT;
-
- ks7010_sme_enqueue_events(priv);
-
- if (!wait_for_completion_interruptible_timeout
- (&priv->confirm_wait, 5 * HZ)) {
- netdev_dbg(priv->net_dev, "wait time out!! wireless parameter set\n");
- }
-
- if (priv->dev_state >= DEVICE_STATE_PREINIT) {
- netdev_dbg(priv->net_dev, "DEVICE READY!!\n");
- priv->dev_state = DEVICE_STATE_READY;
- }
-}
-
-static void ks7010_init_defaults(struct ks_wlan_private *priv)
-{
- priv->reg.tx_rate = TX_RATE_AUTO;
- priv->reg.preamble = LONG_PREAMBLE;
- priv->reg.power_mgmt = POWER_MGMT_ACTIVE;
- priv->reg.scan_type = ACTIVE_SCAN;
- priv->reg.beacon_lost_count = 20;
- priv->reg.rts = 2347UL;
- priv->reg.fragment = 2346UL;
- priv->reg.phy_type = D_11BG_COMPATIBLE_MODE;
- priv->reg.cts_mode = CTS_MODE_FALSE;
- priv->reg.rate_set.body[11] = TX_RATE_54M;
- priv->reg.rate_set.body[10] = TX_RATE_48M;
- priv->reg.rate_set.body[9] = TX_RATE_36M;
- priv->reg.rate_set.body[8] = TX_RATE_18M;
- priv->reg.rate_set.body[7] = TX_RATE_9M;
- priv->reg.rate_set.body[6] = TX_RATE_24M | BASIC_RATE;
- priv->reg.rate_set.body[5] = TX_RATE_12M | BASIC_RATE;
- priv->reg.rate_set.body[4] = TX_RATE_6M | BASIC_RATE;
- priv->reg.rate_set.body[3] = TX_RATE_11M | BASIC_RATE;
- priv->reg.rate_set.body[2] = TX_RATE_5M | BASIC_RATE;
- priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
- priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
- priv->reg.tx_rate = TX_RATE_FULL_AUTO;
- priv->reg.rate_set.size = 12;
-}
-
-static int ks7010_sdio_setup_irqs(struct sdio_func *func)
-{
- int ret;
-
- /* interrupt disable */
- sdio_writeb(func, 0, INT_ENABLE_REG, &ret);
- if (ret)
- goto irq_error;
-
- sdio_writeb(func, 0xff, INT_PENDING_REG, &ret);
- if (ret)
- goto irq_error;
-
- /* setup interrupt handler */
- ret = sdio_claim_irq(func, ks_sdio_interrupt);
-
-irq_error:
- return ret;
-}
-
-static void ks7010_sdio_init_irqs(struct sdio_func *func,
- struct ks_wlan_private *priv)
-{
- u8 byte;
- int ret;
-
- /*
- * interrupt setting
- * clear Interrupt status write
- * (ARMtoSD_InterruptPending FN1:00_0024)
- */
- sdio_claim_host(func);
- ret = ks7010_sdio_writeb(priv, INT_PENDING_REG, 0xff);
- sdio_release_host(func);
- if (ret)
- netdev_err(priv->net_dev, "write INT_PENDING_REG\n");
-
- /* enable ks7010sdio interrupt */
- byte = (INT_GCR_B | INT_READ_STATUS | INT_WRITE_STATUS);
- sdio_claim_host(func);
- ret = ks7010_sdio_writeb(priv, INT_ENABLE_REG, byte);
- sdio_release_host(func);
- if (ret)
- netdev_err(priv->net_dev, "write INT_ENABLE_REG\n");
-}
-
-static void ks7010_private_init(struct ks_wlan_private *priv,
- struct ks_sdio_card *card,
- struct net_device *netdev)
-{
- /* private memory initialize */
- priv->if_hw = card;
-
- priv->dev_state = DEVICE_STATE_PREBOOT;
- priv->net_dev = netdev;
- priv->firmware_version[0] = '\0';
- priv->version_size = 0;
- priv->last_doze = jiffies;
- priv->last_wakeup = jiffies;
- memset(&priv->nstats, 0, sizeof(priv->nstats));
- memset(&priv->wstats, 0, sizeof(priv->wstats));
-
- /* sleep mode */
- atomic_set(&priv->sleepstatus.status, 0);
- atomic_set(&priv->sleepstatus.doze_request, 0);
- atomic_set(&priv->sleepstatus.wakeup_request, 0);
-
- trx_device_init(priv);
- hostif_init(priv);
- ks_wlan_net_start(netdev);
- ks7010_init_defaults(priv);
-}
-
-static int ks7010_sdio_probe(struct sdio_func *func,
- const struct sdio_device_id *device)
-{
- struct ks_wlan_private *priv = NULL;
- struct net_device *netdev = NULL;
- struct ks_sdio_card *card;
- int ret;
-
- card = kzalloc(sizeof(*card), GFP_KERNEL);
- if (!card)
- return -ENOMEM;
-
- card->func = func;
-
- sdio_claim_host(func);
-
- ret = sdio_set_block_size(func, KS7010_IO_BLOCK_SIZE);
- if (ret)
- goto err_free_card;
-
- dev_dbg(&card->func->dev, "multi_block=%d sdio_set_block_size()=%d %d\n",
- func->card->cccr.multi_block, func->cur_blksize, ret);
-
- ret = sdio_enable_func(func);
- if (ret)
- goto err_free_card;
-
- ret = ks7010_sdio_setup_irqs(func);
- if (ret)
- goto err_disable_func;
-
- sdio_release_host(func);
-
- sdio_set_drvdata(func, card);
-
- dev_dbg(&card->func->dev, "class = 0x%X, vendor = 0x%X, device = 0x%X\n",
- func->class, func->vendor, func->device);
-
- /* private memory allocate */
- netdev = alloc_etherdev(sizeof(*priv));
- if (!netdev) {
- dev_err(&card->func->dev, "Unable to alloc new net device\n");
- goto err_release_irq;
- }
-
- ret = dev_alloc_name(netdev, "wlan%d");
- if (ret < 0) {
- dev_err(&card->func->dev, "Couldn't get name!\n");
- goto err_free_netdev;
- }
-
- priv = netdev_priv(netdev);
-
- card->priv = priv;
- SET_NETDEV_DEV(netdev, &card->func->dev);
-
- ks7010_private_init(priv, card, netdev);
-
- ret = ks7010_upload_firmware(card);
- if (ret) {
- netdev_err(priv->net_dev,
- "firmware load failed !! ret = %d\n", ret);
- goto err_free_netdev;
- }
-
- ks7010_sdio_init_irqs(func, priv);
-
- priv->dev_state = DEVICE_STATE_BOOT;
-
- priv->wq = alloc_workqueue("wq", WQ_MEM_RECLAIM, 1);
- if (!priv->wq) {
- netdev_err(priv->net_dev, "create_workqueue failed !!\n");
- goto err_free_netdev;
- }
-
- INIT_DELAYED_WORK(&priv->rw_dwork, ks7010_rw_function);
- ks7010_card_init(priv);
-
- ret = register_netdev(priv->net_dev);
- if (ret)
- goto err_destroy_wq;
-
- return 0;
-
- err_destroy_wq:
- destroy_workqueue(priv->wq);
- err_free_netdev:
- free_netdev(netdev);
- err_release_irq:
- sdio_claim_host(func);
- sdio_release_irq(func);
- err_disable_func:
- sdio_disable_func(func);
- err_free_card:
- sdio_release_host(func);
- sdio_set_drvdata(func, NULL);
- kfree(card);
-
- return -ENODEV;
-}
-
-/* send stop request to MAC */
-static int send_stop_request(struct sdio_func *func)
-{
- struct hostif_stop_request *pp;
- struct ks_sdio_card *card;
- size_t size;
-
- card = sdio_get_drvdata(func);
-
- pp = kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL);
- if (!pp)
- return -ENOMEM;
-
- size = sizeof(*pp) - sizeof(pp->header.size);
- pp->header.size = cpu_to_le16(size);
- pp->header.event = cpu_to_le16(HIF_STOP_REQ);
-
- sdio_claim_host(func);
- write_to_device(card->priv, (u8 *)pp, hif_align_size(sizeof(*pp)));
- sdio_release_host(func);
-
- kfree(pp);
- return 0;
-}
-
-static void ks7010_sdio_remove(struct sdio_func *func)
-{
- int ret;
- struct ks_sdio_card *card;
- struct ks_wlan_private *priv;
-
- card = sdio_get_drvdata(func);
-
- if (!card)
- return;
-
- priv = card->priv;
- if (!priv)
- goto err_free_card;
-
- ks_wlan_net_stop(priv->net_dev);
-
- /* interrupt disable */
- sdio_claim_host(func);
- sdio_writeb(func, 0, INT_ENABLE_REG, &ret);
- sdio_writeb(func, 0xff, INT_PENDING_REG, &ret);
- sdio_release_host(func);
-
- ret = send_stop_request(func);
- if (ret) /* memory allocation failure */
- goto err_free_card;
-
- if (priv->wq)
- destroy_workqueue(priv->wq);
-
- hostif_exit(priv);
-
- unregister_netdev(priv->net_dev);
-
- trx_device_exit(priv);
- free_netdev(priv->net_dev);
- card->priv = NULL;
-
- sdio_claim_host(func);
- sdio_release_irq(func);
- sdio_disable_func(func);
- sdio_release_host(func);
-err_free_card:
- sdio_set_drvdata(func, NULL);
- kfree(card);
-}
-
-static const struct sdio_device_id ks7010_sdio_ids[] = {
- {SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_A, SDIO_DEVICE_ID_KS_7010)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_B, SDIO_DEVICE_ID_KS_7010)},
- { /* all zero */ }
-};
-MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids);
-
-static struct sdio_driver ks7010_sdio_driver = {
- .name = "ks7010_sdio",
- .id_table = ks7010_sdio_ids,
- .probe = ks7010_sdio_probe,
- .remove = ks7010_sdio_remove,
-};
-
-module_sdio_driver(ks7010_sdio_driver);
-MODULE_AUTHOR("Sang Engineering, Qi-Hardware, KeyStream");
-MODULE_DESCRIPTION("Driver for KeyStream KS7010 based SDIO cards");
-MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE(ROM_FILE);
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
deleted file mode 100644
index af3825578d85..000000000000
--- a/drivers/staging/ks7010/ks_hostif.c
+++ /dev/null
@@ -1,2312 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for KeyStream wireless LAN cards.
- *
- * Copyright (C) 2005-2008 KeyStream Corp.
- * Copyright (C) 2009 Renesas Technology Corp.
- */
-
-#include <crypto/hash.h>
-#include <linux/circ_buf.h>
-#include <linux/if_arp.h>
-#include <net/iw_handler.h>
-#include <uapi/linux/llc.h>
-#include "eap_packet.h"
-#include "ks_wlan.h"
-#include "ks_hostif.h"
-
-#define MICHAEL_MIC_KEY_LEN 8
-#define MICHAEL_MIC_LEN 8
-
-static inline void inc_smeqhead(struct ks_wlan_private *priv)
-{
- priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE;
-}
-
-static inline void inc_smeqtail(struct ks_wlan_private *priv)
-{
- priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE;
-}
-
-static inline unsigned int cnt_smeqbody(struct ks_wlan_private *priv)
-{
- return CIRC_CNT_TO_END(priv->sme_i.qhead, priv->sme_i.qtail,
- SME_EVENT_BUFF_SIZE);
-}
-
-static inline u8 get_byte(struct ks_wlan_private *priv)
-{
- u8 data;
-
- data = *priv->rxp++;
- /* length check in advance ! */
- --(priv->rx_size);
- return data;
-}
-
-static inline u16 get_word(struct ks_wlan_private *priv)
-{
- u16 data;
-
- data = (get_byte(priv) & 0xff);
- data |= ((get_byte(priv) << 8) & 0xff00);
- return data;
-}
-
-static inline u32 get_dword(struct ks_wlan_private *priv)
-{
- u32 data;
-
- data = (get_byte(priv) & 0xff);
- data |= ((get_byte(priv) << 8) & 0x0000ff00);
- data |= ((get_byte(priv) << 16) & 0x00ff0000);
- data |= ((get_byte(priv) << 24) & 0xff000000);
- return data;
-}
-
-static void ks_wlan_hw_wakeup_task(struct work_struct *work)
-{
- struct ks_wlan_private *priv;
- int ps_status;
- long time_left;
-
- priv = container_of(work, struct ks_wlan_private, wakeup_work);
- ps_status = atomic_read(&priv->psstatus.status);
-
- if (ps_status == PS_SNOOZE) {
- ks_wlan_hw_wakeup_request(priv);
- time_left = wait_for_completion_interruptible_timeout(&priv->psstatus.wakeup_wait,
- msecs_to_jiffies(20));
- if (time_left <= 0) {
- netdev_dbg(priv->net_dev, "wake up timeout or interrupted !!!\n");
- schedule_work(&priv->wakeup_work);
- return;
- }
- }
-}
-
-static void ks_wlan_do_power_save(struct ks_wlan_private *priv)
-{
- if (is_connect_status(priv->connect_status))
- hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
- else
- priv->dev_state = DEVICE_STATE_READY;
-}
-
-static
-int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info *ap_info)
-{
- struct local_ap *ap;
- union iwreq_data wrqu;
- struct net_device *netdev = priv->net_dev;
- u8 size;
-
- ap = &priv->current_ap;
-
- if (is_disconnect_status(priv->connect_status)) {
- memset(ap, 0, sizeof(struct local_ap));
- return -EPERM;
- }
-
- ether_addr_copy(ap->bssid, ap_info->bssid);
- memcpy(ap->ssid.body, priv->reg.ssid.body,
- priv->reg.ssid.size);
- ap->ssid.size = priv->reg.ssid.size;
- memcpy(ap->rate_set.body, ap_info->rate_set.body,
- ap_info->rate_set.size);
- ap->rate_set.size = ap_info->rate_set.size;
- if (ap_info->ext_rate_set.size != 0) {
- memcpy(&ap->rate_set.body[ap->rate_set.size],
- ap_info->ext_rate_set.body,
- ap_info->ext_rate_set.size);
- ap->rate_set.size += ap_info->ext_rate_set.size;
- }
- ap->channel = ap_info->ds_parameter.channel;
- ap->rssi = ap_info->rssi;
- ap->sq = ap_info->sq;
- ap->noise = ap_info->noise;
- ap->capability = le16_to_cpu(ap_info->capability);
- size = (ap_info->rsn.size <= RSN_IE_BODY_MAX) ?
- ap_info->rsn.size : RSN_IE_BODY_MAX;
- if ((ap_info->rsn_mode & RSN_MODE_WPA2) &&
- (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)) {
- ap->rsn_ie.id = RSN_INFO_ELEM_ID;
- ap->rsn_ie.size = size;
- memcpy(ap->rsn_ie.body, ap_info->rsn.body, size);
- } else if ((ap_info->rsn_mode & RSN_MODE_WPA) &&
- (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA)) {
- ap->wpa_ie.id = WPA_INFO_ELEM_ID;
- ap->wpa_ie.size = size;
- memcpy(ap->wpa_ie.body, ap_info->rsn.body, size);
- } else {
- ap->rsn_ie.id = 0;
- ap->rsn_ie.size = 0;
- ap->wpa_ie.id = 0;
- ap->wpa_ie.size = 0;
- }
-
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- if (is_connect_status(priv->connect_status)) {
- ether_addr_copy(wrqu.ap_addr.sa_data, priv->current_ap.bssid);
- netdev_dbg(priv->net_dev,
- "IWEVENT: connect bssid=%pM\n",
- wrqu.ap_addr.sa_data);
- wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL);
- }
- netdev_dbg(priv->net_dev, "Link AP\n"
- "- bssid=%pM\n"
- "- essid=%s\n"
- "- rate_set=%02X,%02X,%02X,%02X,%02X,%02X,%02X,%02X\n"
- "- channel=%d\n"
- "- rssi=%d\n"
- "- sq=%d\n"
- "- capability=%04X\n"
- "- rsn.mode=%d\n"
- "- rsn.size=%d\n"
- "- ext_rate_set_size=%d\n"
- "- rate_set_size=%d\n",
- ap->bssid,
- &ap->ssid.body[0],
- ap->rate_set.body[0], ap->rate_set.body[1],
- ap->rate_set.body[2], ap->rate_set.body[3],
- ap->rate_set.body[4], ap->rate_set.body[5],
- ap->rate_set.body[6], ap->rate_set.body[7],
- ap->channel, ap->rssi, ap->sq, ap->capability,
- ap_info->rsn_mode, ap_info->rsn.size,
- ap_info->ext_rate_set.size, ap_info->rate_set.size);
-
- return 0;
-}
-
-static u8 read_ie(unsigned char *bp, u8 max, u8 *body)
-{
- u8 size = (*(bp + 1) <= max) ? *(bp + 1) : max;
-
- memcpy(body, bp + 2, size);
- return size;
-}
-
-static int
-michael_mic(u8 *key, u8 *data, unsigned int len, u8 priority, u8 *result)
-{
- u8 pad_data[4] = { priority, 0, 0, 0 };
- struct crypto_shash *tfm = NULL;
- struct shash_desc *desc = NULL;
- int ret;
-
- tfm = crypto_alloc_shash("michael_mic", 0, 0);
- if (IS_ERR(tfm)) {
- ret = PTR_ERR(tfm);
- goto err;
- }
-
- ret = crypto_shash_setkey(tfm, key, MICHAEL_MIC_KEY_LEN);
- if (ret < 0)
- goto err_free_tfm;
-
- desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
- if (!desc) {
- ret = -ENOMEM;
- goto err_free_tfm;
- }
-
- desc->tfm = tfm;
-
- ret = crypto_shash_init(desc);
- if (ret < 0)
- goto err_free_desc;
-
- // Compute the MIC value
- /*
- * IEEE802.11i page 47
- * Figure 43g TKIP MIC processing format
- * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
- * |6 |6 |1 |3 |M |1 |1 |1 |1 |1 |1 |1 |1 | Octet
- * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
- * |DA|SA|Priority|0 |Data|M0|M1|M2|M3|M4|M5|M6|M7|
- * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
- */
-
- ret = crypto_shash_update(desc, data, 12);
- if (ret < 0)
- goto err_free_desc;
-
- ret = crypto_shash_update(desc, pad_data, 4);
- if (ret < 0)
- goto err_free_desc;
-
- ret = crypto_shash_finup(desc, data + 12, len - 12, result);
-
-err_free_desc:
- kfree_sensitive(desc);
-
-err_free_tfm:
- crypto_free_shash(tfm);
-
-err:
- return ret;
-}
-
-static
-int get_ap_information(struct ks_wlan_private *priv, struct ap_info *ap_info,
- struct local_ap *ap)
-{
- unsigned char *bp;
- int bsize, offset;
-
- memset(ap, 0, sizeof(struct local_ap));
-
- ether_addr_copy(ap->bssid, ap_info->bssid);
- ap->rssi = ap_info->rssi;
- ap->sq = ap_info->sq;
- ap->noise = ap_info->noise;
- ap->capability = le16_to_cpu(ap_info->capability);
- ap->channel = ap_info->ch_info;
-
- bp = ap_info->body;
- bsize = le16_to_cpu(ap_info->body_size);
- offset = 0;
-
- while (bsize > offset) {
- switch (*bp) { /* Information Element ID */
- case WLAN_EID_SSID:
- ap->ssid.size = read_ie(bp, IEEE80211_MAX_SSID_LEN,
- ap->ssid.body);
- break;
- case WLAN_EID_SUPP_RATES:
- case WLAN_EID_EXT_SUPP_RATES:
- if ((*(bp + 1) + ap->rate_set.size) <=
- RATE_SET_MAX_SIZE) {
- memcpy(&ap->rate_set.body[ap->rate_set.size],
- bp + 2, *(bp + 1));
- ap->rate_set.size += *(bp + 1);
- } else {
- memcpy(&ap->rate_set.body[ap->rate_set.size],
- bp + 2,
- RATE_SET_MAX_SIZE - ap->rate_set.size);
- ap->rate_set.size +=
- (RATE_SET_MAX_SIZE - ap->rate_set.size);
- }
- break;
- case WLAN_EID_RSN:
- ap->rsn_ie.id = *bp;
- ap->rsn_ie.size = read_ie(bp, RSN_IE_BODY_MAX,
- ap->rsn_ie.body);
- break;
- case WLAN_EID_VENDOR_SPECIFIC: /* WPA */
- /* WPA OUI check */
- if (memcmp(bp + 2, CIPHER_ID_WPA_WEP40, 4) == 0) {
- ap->wpa_ie.id = *bp;
- ap->wpa_ie.size = read_ie(bp, RSN_IE_BODY_MAX,
- ap->wpa_ie.body);
- }
- break;
- case WLAN_EID_DS_PARAMS:
- case WLAN_EID_FH_PARAMS:
- case WLAN_EID_CF_PARAMS:
- case WLAN_EID_TIM:
- case WLAN_EID_IBSS_PARAMS:
- case WLAN_EID_COUNTRY:
- case WLAN_EID_ERP_INFO:
- break;
- default:
- netdev_err(priv->net_dev,
- "unknown Element ID=%d\n", *bp);
- break;
- }
-
- offset += 2; /* id & size field */
- offset += *(bp + 1); /* +size offset */
- bp += (*(bp + 1) + 2); /* pointer update */
- }
-
- return 0;
-}
-
-static
-int hostif_data_indication_wpa(struct ks_wlan_private *priv,
- unsigned short auth_type)
-{
- struct ether_hdr *eth_hdr;
- unsigned short eth_proto;
- unsigned char recv_mic[MICHAEL_MIC_LEN];
- char buf[128];
- unsigned long now;
- struct mic_failure *mic_failure;
- u8 mic[MICHAEL_MIC_LEN];
- union iwreq_data wrqu;
- unsigned int key_index = auth_type - 1;
- struct wpa_key *key = &priv->wpa.key[key_index];
-
- eth_hdr = (struct ether_hdr *)(priv->rxp);
- eth_proto = ntohs(eth_hdr->h_proto);
-
- if (eth_hdr->h_dest_snap != eth_hdr->h_source_snap) {
- netdev_err(priv->net_dev, "invalid data format\n");
- priv->nstats.rx_errors++;
- return -EINVAL;
- }
- if (((auth_type == TYPE_PMK1 &&
- priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) ||
- (auth_type == TYPE_GMK1 &&
- priv->wpa.group_suite == IW_AUTH_CIPHER_TKIP) ||
- (auth_type == TYPE_GMK2 &&
- priv->wpa.group_suite == IW_AUTH_CIPHER_TKIP)) &&
- key->key_len) {
- int ret;
-
- netdev_dbg(priv->net_dev, "TKIP: protocol=%04X: size=%u\n",
- eth_proto, priv->rx_size);
- /* MIC save */
- memcpy(&recv_mic[0],
- (priv->rxp) + ((priv->rx_size) - sizeof(recv_mic)),
- sizeof(recv_mic));
- priv->rx_size = priv->rx_size - sizeof(recv_mic);
-
- ret = michael_mic(key->rx_mic_key, priv->rxp, priv->rx_size,
- 0, mic);
- if (ret < 0)
- return ret;
- if (memcmp(mic, recv_mic, sizeof(mic)) != 0) {
- now = jiffies;
- mic_failure = &priv->wpa.mic_failure;
- /* MIC FAILURE */
- if (mic_failure->last_failure_time &&
- (now - mic_failure->last_failure_time) / HZ >= 60) {
- mic_failure->failure = 0;
- }
- netdev_err(priv->net_dev, "MIC FAILURE\n");
- if (mic_failure->failure == 0) {
- mic_failure->failure = 1;
- mic_failure->counter = 0;
- } else if (mic_failure->failure == 1) {
- mic_failure->failure = 2;
- mic_failure->counter =
- (u16)((now - mic_failure->last_failure_time) / HZ);
- /* range 1-60 */
- if (!mic_failure->counter)
- mic_failure->counter = 1;
- }
- priv->wpa.mic_failure.last_failure_time = now;
-
- /* needed parameters: count, keyid, key type, TSC */
- sprintf(buf,
- "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr=%pM)",
- key_index,
- eth_hdr->h_dest[0] & 0x01 ? "broad" : "uni",
- eth_hdr->h_source);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = strlen(buf);
- wireless_send_event(priv->net_dev, IWEVCUSTOM, &wrqu,
- buf);
- return -EINVAL;
- }
- }
- return 0;
-}
-
-static
-void hostif_data_indication(struct ks_wlan_private *priv)
-{
- unsigned int rx_ind_size; /* indicate data size */
- struct sk_buff *skb;
- u16 auth_type;
- unsigned char temp[256];
- struct ether_hdr *eth_hdr;
- struct ieee802_1x_hdr *aa1x_hdr;
- size_t size;
- int ret;
-
- /* min length check */
- if (priv->rx_size <= ETH_HLEN) {
- priv->nstats.rx_errors++;
- return;
- }
-
- auth_type = get_word(priv); /* AuthType */
- get_word(priv); /* Reserve Area */
-
- eth_hdr = (struct ether_hdr *)(priv->rxp);
-
- /* source address check */
- if (ether_addr_equal(&priv->eth_addr[0], eth_hdr->h_source)) {
- netdev_err(priv->net_dev, "invalid : source is own mac address !!\n");
- netdev_err(priv->net_dev, "eth_hdrernet->h_dest=%pM\n", eth_hdr->h_source);
- priv->nstats.rx_errors++;
- return;
- }
-
- /* for WPA */
- if (auth_type != TYPE_DATA && priv->wpa.rsn_enabled) {
- ret = hostif_data_indication_wpa(priv, auth_type);
- if (ret)
- return;
- }
-
- if ((priv->connect_status & FORCE_DISCONNECT) ||
- priv->wpa.mic_failure.failure == 2) {
- return;
- }
-
- /* check 13th byte at rx data */
- switch (*(priv->rxp + 12)) {
- case LLC_SAP_SNAP:
- rx_ind_size = priv->rx_size - 6;
- skb = dev_alloc_skb(rx_ind_size);
- if (!skb) {
- priv->nstats.rx_dropped++;
- return;
- }
- netdev_dbg(priv->net_dev, "SNAP, rx_ind_size = %d\n",
- rx_ind_size);
-
- size = ETH_ALEN * 2;
- skb_put_data(skb, priv->rxp, size);
-
- /* (SNAP+UI..) skip */
-
- size = rx_ind_size - (ETH_ALEN * 2);
- skb_put_data(skb, &eth_hdr->h_proto, size);
-
- aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + ETHER_HDR_SIZE);
- break;
- case LLC_SAP_NETBEUI:
- rx_ind_size = (priv->rx_size + 2);
- skb = dev_alloc_skb(rx_ind_size);
- if (!skb) {
- priv->nstats.rx_dropped++;
- return;
- }
- netdev_dbg(priv->net_dev, "NETBEUI/NetBIOS rx_ind_size=%d\n",
- rx_ind_size);
-
- /* 8802/FDDI MAC copy */
- skb_put_data(skb, priv->rxp, 12);
-
- /* NETBEUI size add */
- temp[0] = (((rx_ind_size - 12) >> 8) & 0xff);
- temp[1] = ((rx_ind_size - 12) & 0xff);
- skb_put_data(skb, temp, 2);
-
- /* copy after Type */
- skb_put_data(skb, priv->rxp + 12, rx_ind_size - 14);
-
- aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + 14);
- break;
- default: /* other rx data */
- netdev_err(priv->net_dev, "invalid data format\n");
- priv->nstats.rx_errors++;
- return;
- }
-
- if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY &&
- priv->wpa.rsn_enabled)
- atomic_set(&priv->psstatus.snooze_guard, 1);
-
- /* rx indication */
- skb->dev = priv->net_dev;
- skb->protocol = eth_type_trans(skb, skb->dev);
- priv->nstats.rx_packets++;
- priv->nstats.rx_bytes += rx_ind_size;
- netif_rx(skb);
-}
-
-static
-void hostif_mib_get_confirm(struct ks_wlan_private *priv)
-{
- struct net_device *dev = priv->net_dev;
- u32 mib_status;
- u32 mib_attribute;
-
- mib_status = get_dword(priv);
- mib_attribute = get_dword(priv);
- get_word(priv); /* mib_val_size */
- get_word(priv); /* mib_val_type */
-
- if (mib_status) {
- netdev_err(priv->net_dev, "attribute=%08X, status=%08X\n",
- mib_attribute, mib_status);
- return;
- }
-
- switch (mib_attribute) {
- case DOT11_MAC_ADDRESS:
- hostif_sme_enqueue(priv, SME_GET_MAC_ADDRESS);
- ether_addr_copy(priv->eth_addr, priv->rxp);
- priv->mac_address_valid = true;
- eth_hw_addr_set(dev, priv->eth_addr);
- netdev_info(dev, "MAC ADDRESS = %pM\n", priv->eth_addr);
- break;
- case DOT11_PRODUCT_VERSION:
- priv->version_size = priv->rx_size;
- memcpy(priv->firmware_version, priv->rxp, priv->rx_size);
- priv->firmware_version[priv->rx_size] = '\0';
- netdev_info(dev, "firmware ver. = %s\n",
- priv->firmware_version);
- hostif_sme_enqueue(priv, SME_GET_PRODUCT_VERSION);
- /* wake_up_interruptible_all(&priv->confirm_wait); */
- complete(&priv->confirm_wait);
- break;
- case LOCAL_GAIN:
- memcpy(&priv->gain, priv->rxp, sizeof(priv->gain));
- netdev_dbg(priv->net_dev, "tx_mode=%d, rx_mode=%d, tx_gain=%d, rx_gain=%d\n",
- priv->gain.tx_mode, priv->gain.rx_mode,
- priv->gain.tx_gain, priv->gain.rx_gain);
- break;
- case LOCAL_EEPROM_SUM:
- memcpy(&priv->eeprom_sum, priv->rxp, sizeof(priv->eeprom_sum));
- if (priv->eeprom_sum.type != 0 &&
- priv->eeprom_sum.type != 1) {
- netdev_err(dev, "LOCAL_EEPROM_SUM error!\n");
- return;
- }
- priv->eeprom_checksum = (priv->eeprom_sum.type == 0) ?
- EEPROM_CHECKSUM_NONE :
- (priv->eeprom_sum.result == 0) ?
- EEPROM_NG : EEPROM_OK;
- break;
- default:
- netdev_err(priv->net_dev, "mib_attribute=%08x\n",
- (unsigned int)mib_attribute);
- break;
- }
-}
-
-static
-void hostif_mib_set_confirm(struct ks_wlan_private *priv)
-{
- u32 mib_status;
- u32 mib_attribute;
-
- mib_status = get_dword(priv);
- mib_attribute = get_dword(priv);
-
- if (mib_status) {
- /* in case of error */
- netdev_err(priv->net_dev, "error :: attribute=%08X, status=%08X\n",
- mib_attribute, mib_status);
- }
-
- switch (mib_attribute) {
- case DOT11_RTS_THRESHOLD:
- hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_CONFIRM);
- break;
- case DOT11_FRAGMENTATION_THRESHOLD:
- hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_CONFIRM);
- break;
- case DOT11_WEP_DEFAULT_KEY_ID:
- if (!priv->wpa.wpa_enabled)
- hostif_sme_enqueue(priv, SME_WEP_INDEX_CONFIRM);
- break;
- case DOT11_WEP_DEFAULT_KEY_VALUE1:
- if (priv->wpa.rsn_enabled)
- hostif_sme_enqueue(priv, SME_SET_PMK_TSC);
- else
- hostif_sme_enqueue(priv, SME_WEP_KEY1_CONFIRM);
- break;
- case DOT11_WEP_DEFAULT_KEY_VALUE2:
- if (priv->wpa.rsn_enabled)
- hostif_sme_enqueue(priv, SME_SET_GMK1_TSC);
- else
- hostif_sme_enqueue(priv, SME_WEP_KEY2_CONFIRM);
- break;
- case DOT11_WEP_DEFAULT_KEY_VALUE3:
- if (priv->wpa.rsn_enabled)
- hostif_sme_enqueue(priv, SME_SET_GMK2_TSC);
- else
- hostif_sme_enqueue(priv, SME_WEP_KEY3_CONFIRM);
- break;
- case DOT11_WEP_DEFAULT_KEY_VALUE4:
- if (!priv->wpa.rsn_enabled)
- hostif_sme_enqueue(priv, SME_WEP_KEY4_CONFIRM);
- break;
- case DOT11_PRIVACY_INVOKED:
- if (!priv->wpa.rsn_enabled)
- hostif_sme_enqueue(priv, SME_WEP_FLAG_CONFIRM);
- break;
- case DOT11_RSN_ENABLED:
- hostif_sme_enqueue(priv, SME_RSN_ENABLED_CONFIRM);
- break;
- case LOCAL_RSN_MODE:
- hostif_sme_enqueue(priv, SME_RSN_MODE_CONFIRM);
- break;
- case LOCAL_MULTICAST_ADDRESS:
- hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
- break;
- case LOCAL_MULTICAST_FILTER:
- hostif_sme_enqueue(priv, SME_MULTICAST_CONFIRM);
- break;
- case LOCAL_CURRENTADDRESS:
- priv->mac_address_valid = true;
- break;
- case DOT11_RSN_CONFIG_MULTICAST_CIPHER:
- hostif_sme_enqueue(priv, SME_RSN_MCAST_CONFIRM);
- break;
- case DOT11_RSN_CONFIG_UNICAST_CIPHER:
- hostif_sme_enqueue(priv, SME_RSN_UCAST_CONFIRM);
- break;
- case DOT11_RSN_CONFIG_AUTH_SUITE:
- hostif_sme_enqueue(priv, SME_RSN_AUTH_CONFIRM);
- break;
- case DOT11_GMK1_TSC:
- if (atomic_read(&priv->psstatus.snooze_guard))
- atomic_set(&priv->psstatus.snooze_guard, 0);
- break;
- case DOT11_GMK2_TSC:
- if (atomic_read(&priv->psstatus.snooze_guard))
- atomic_set(&priv->psstatus.snooze_guard, 0);
- break;
- case DOT11_PMK_TSC:
- case LOCAL_PMK:
- case LOCAL_GAIN:
- case LOCAL_WPS_ENABLE:
- case LOCAL_WPS_PROBE_REQ:
- case LOCAL_REGION:
- default:
- break;
- }
-}
-
-static
-void hostif_power_mgmt_confirm(struct ks_wlan_private *priv)
-{
- if (priv->reg.power_mgmt > POWER_MGMT_ACTIVE &&
- priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
- atomic_set(&priv->psstatus.confirm_wait, 0);
- priv->dev_state = DEVICE_STATE_SLEEP;
- ks_wlan_hw_power_save(priv);
- } else {
- priv->dev_state = DEVICE_STATE_READY;
- }
-}
-
-static
-void hostif_sleep_confirm(struct ks_wlan_private *priv)
-{
- atomic_set(&priv->sleepstatus.doze_request, 1);
- queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
-}
-
-static
-void hostif_start_confirm(struct ks_wlan_private *priv)
-{
- union iwreq_data wrqu;
-
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- if (is_connect_status(priv->connect_status)) {
- eth_zero_addr(wrqu.ap_addr.sa_data);
- wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
- }
- netdev_dbg(priv->net_dev, " scan_ind_count=%d\n", priv->scan_ind_count);
- hostif_sme_enqueue(priv, SME_START_CONFIRM);
-}
-
-static
-void hostif_connect_indication(struct ks_wlan_private *priv)
-{
- u16 connect_code;
- unsigned int tmp = 0;
- unsigned int old_status = priv->connect_status;
- struct net_device *netdev = priv->net_dev;
- union iwreq_data wrqu0;
-
- connect_code = get_word(priv);
-
- switch (connect_code) {
- case RESULT_CONNECT:
- if (!(priv->connect_status & FORCE_DISCONNECT))
- netif_carrier_on(netdev);
- tmp = FORCE_DISCONNECT & priv->connect_status;
- priv->connect_status = tmp + CONNECT_STATUS;
- break;
- case RESULT_DISCONNECT:
- netif_carrier_off(netdev);
- tmp = FORCE_DISCONNECT & priv->connect_status;
- priv->connect_status = tmp + DISCONNECT_STATUS;
- break;
- default:
- netdev_dbg(priv->net_dev, "unknown connect_code=%d :: scan_ind_count=%d\n",
- connect_code, priv->scan_ind_count);
- netif_carrier_off(netdev);
- tmp = FORCE_DISCONNECT & priv->connect_status;
- priv->connect_status = tmp + DISCONNECT_STATUS;
- break;
- }
-
- get_current_ap(priv, (struct link_ap_info *)priv->rxp);
- if (is_connect_status(priv->connect_status) &&
- is_disconnect_status(old_status)) {
- /* for power save */
- atomic_set(&priv->psstatus.snooze_guard, 0);
- atomic_set(&priv->psstatus.confirm_wait, 0);
- }
- ks_wlan_do_power_save(priv);
-
- wrqu0.data.length = 0;
- wrqu0.data.flags = 0;
- wrqu0.ap_addr.sa_family = ARPHRD_ETHER;
- if (is_disconnect_status(priv->connect_status) &&
- is_connect_status(old_status)) {
- eth_zero_addr(wrqu0.ap_addr.sa_data);
- netdev_dbg(priv->net_dev, "disconnect :: scan_ind_count=%d\n",
- priv->scan_ind_count);
- wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL);
- }
- priv->scan_ind_count = 0;
-}
-
-static
-void hostif_scan_indication(struct ks_wlan_private *priv)
-{
- int i;
- struct ap_info *ap_info;
-
- netdev_dbg(priv->net_dev,
- "scan_ind_count = %d\n", priv->scan_ind_count);
- ap_info = (struct ap_info *)(priv->rxp);
-
- if (priv->scan_ind_count) {
- /* bssid check */
- for (i = 0; i < priv->aplist.size; i++) {
- u8 *bssid = priv->aplist.ap[i].bssid;
-
- if (ether_addr_equal(ap_info->bssid, bssid))
- continue;
-
- if (ap_info->frame_type == IEEE80211_STYPE_PROBE_RESP)
- get_ap_information(priv, ap_info,
- &priv->aplist.ap[i]);
- return;
- }
- }
- priv->scan_ind_count++;
- if (priv->scan_ind_count < LOCAL_APLIST_MAX + 1) {
- netdev_dbg(priv->net_dev, " scan_ind_count=%d :: aplist.size=%d\n",
- priv->scan_ind_count, priv->aplist.size);
- get_ap_information(priv, (struct ap_info *)(priv->rxp),
- &priv->aplist.ap[priv->scan_ind_count - 1]);
- priv->aplist.size = priv->scan_ind_count;
- } else {
- netdev_dbg(priv->net_dev, " count over :: scan_ind_count=%d\n",
- priv->scan_ind_count);
- }
-}
-
-static
-void hostif_stop_confirm(struct ks_wlan_private *priv)
-{
- unsigned int tmp = 0;
- unsigned int old_status = priv->connect_status;
- struct net_device *netdev = priv->net_dev;
- union iwreq_data wrqu0;
-
- if (priv->dev_state == DEVICE_STATE_SLEEP)
- priv->dev_state = DEVICE_STATE_READY;
-
- /* disconnect indication */
- if (is_connect_status(priv->connect_status)) {
- netif_carrier_off(netdev);
- tmp = FORCE_DISCONNECT & priv->connect_status;
- priv->connect_status = tmp | DISCONNECT_STATUS;
- netdev_info(netdev, "IWEVENT: disconnect\n");
-
- wrqu0.data.length = 0;
- wrqu0.data.flags = 0;
- wrqu0.ap_addr.sa_family = ARPHRD_ETHER;
- if (is_disconnect_status(priv->connect_status) &&
- is_connect_status(old_status)) {
- eth_zero_addr(wrqu0.ap_addr.sa_data);
- netdev_info(netdev, "IWEVENT: disconnect\n");
- wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL);
- }
- priv->scan_ind_count = 0;
- }
-
- hostif_sme_enqueue(priv, SME_STOP_CONFIRM);
-}
-
-static
-void hostif_ps_adhoc_set_confirm(struct ks_wlan_private *priv)
-{
- priv->infra_status = 0; /* infrastructure mode cancel */
- hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
-}
-
-static
-void hostif_infrastructure_set_confirm(struct ks_wlan_private *priv)
-{
- get_word(priv); /* result_code */
- priv->infra_status = 1; /* infrastructure mode set */
- hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
-}
-
-static
-void hostif_adhoc_set_confirm(struct ks_wlan_private *priv)
-{
- priv->infra_status = 1; /* infrastructure mode set */
- hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
-}
-
-static
-void hostif_associate_indication(struct ks_wlan_private *priv)
-{
- struct association_request *assoc_req;
- struct association_response *assoc_resp;
- unsigned char *pb;
- union iwreq_data wrqu;
- char buf[IW_CUSTOM_MAX];
- char *pbuf = &buf[0];
- int i;
-
- static const char associnfo_leader0[] = "ASSOCINFO(ReqIEs=";
- static const char associnfo_leader1[] = " RespIEs=";
-
- assoc_req = (struct association_request *)(priv->rxp);
- assoc_resp = (struct association_response *)(assoc_req + 1);
- pb = (unsigned char *)(assoc_resp + 1);
-
- memset(&wrqu, 0, sizeof(wrqu));
- memcpy(pbuf, associnfo_leader0, sizeof(associnfo_leader0) - 1);
- wrqu.data.length += sizeof(associnfo_leader0) - 1;
- pbuf += sizeof(associnfo_leader0) - 1;
-
- for (i = 0; i < le16_to_cpu(assoc_req->req_ies_size); i++)
- pbuf += sprintf(pbuf, "%02x", *(pb + i));
- wrqu.data.length += (le16_to_cpu(assoc_req->req_ies_size)) * 2;
-
- memcpy(pbuf, associnfo_leader1, sizeof(associnfo_leader1) - 1);
- wrqu.data.length += sizeof(associnfo_leader1) - 1;
- pbuf += sizeof(associnfo_leader1) - 1;
-
- pb += le16_to_cpu(assoc_req->req_ies_size);
- for (i = 0; i < le16_to_cpu(assoc_resp->resp_ies_size); i++)
- pbuf += sprintf(pbuf, "%02x", *(pb + i));
- wrqu.data.length += (le16_to_cpu(assoc_resp->resp_ies_size)) * 2;
-
- pbuf += sprintf(pbuf, ")");
- wrqu.data.length += 1;
-
- wireless_send_event(priv->net_dev, IWEVCUSTOM, &wrqu, buf);
-}
-
-static
-void hostif_bss_scan_confirm(struct ks_wlan_private *priv)
-{
- u32 result_code;
- struct net_device *dev = priv->net_dev;
- union iwreq_data wrqu;
-
- result_code = get_dword(priv);
- netdev_dbg(priv->net_dev, "result=%d :: scan_ind_count=%d\n",
- result_code, priv->scan_ind_count);
-
- priv->sme_i.sme_flag &= ~SME_AP_SCAN;
- hostif_sme_enqueue(priv, SME_BSS_SCAN_CONFIRM);
-
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
- priv->scan_ind_count = 0;
-}
-
-static
-void hostif_phy_information_confirm(struct ks_wlan_private *priv)
-{
- struct iw_statistics *wstats = &priv->wstats;
- u8 rssi, signal;
- u8 link_speed;
- u32 transmitted_frame_count, received_fragment_count;
- u32 failed_count, fcs_error_count;
-
- rssi = get_byte(priv);
- signal = get_byte(priv);
- get_byte(priv); /* noise */
- link_speed = get_byte(priv);
- transmitted_frame_count = get_dword(priv);
- received_fragment_count = get_dword(priv);
- failed_count = get_dword(priv);
- fcs_error_count = get_dword(priv);
-
- netdev_dbg(priv->net_dev, "phyinfo confirm rssi=%d signal=%d\n",
- rssi, signal);
- priv->current_rate = (link_speed & RATE_MASK);
- wstats->qual.qual = signal;
- wstats->qual.level = 256 - rssi;
- wstats->qual.noise = 0; /* invalid noise value */
- wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
-
- netdev_dbg(priv->net_dev, "\n rssi=%u\n"
- " signal=%u\n"
- " link_speed=%ux500Kbps\n"
- " transmitted_frame_count=%u\n"
- " received_fragment_count=%u\n"
- " failed_count=%u\n"
- " fcs_error_count=%u\n",
- rssi, signal, link_speed, transmitted_frame_count,
- received_fragment_count, failed_count, fcs_error_count);
- /* wake_up_interruptible_all(&priv->confirm_wait); */
- complete(&priv->confirm_wait);
-}
-
-static
-void hostif_mic_failure_confirm(struct ks_wlan_private *priv)
-{
- netdev_dbg(priv->net_dev, "mic_failure=%u\n",
- priv->wpa.mic_failure.failure);
- hostif_sme_enqueue(priv, SME_MIC_FAILURE_CONFIRM);
-}
-
-static
-void hostif_event_check(struct ks_wlan_private *priv)
-{
- u16 event;
-
- event = get_word(priv);
- switch (event) {
- case HIF_DATA_IND:
- hostif_data_indication(priv);
- break;
- case HIF_MIB_GET_CONF:
- hostif_mib_get_confirm(priv);
- break;
- case HIF_MIB_SET_CONF:
- hostif_mib_set_confirm(priv);
- break;
- case HIF_POWER_MGMT_CONF:
- hostif_power_mgmt_confirm(priv);
- break;
- case HIF_SLEEP_CONF:
- hostif_sleep_confirm(priv);
- break;
- case HIF_START_CONF:
- hostif_start_confirm(priv);
- break;
- case HIF_CONNECT_IND:
- hostif_connect_indication(priv);
- break;
- case HIF_STOP_CONF:
- hostif_stop_confirm(priv);
- break;
- case HIF_PS_ADH_SET_CONF:
- hostif_ps_adhoc_set_confirm(priv);
- break;
- case HIF_INFRA_SET_CONF:
- case HIF_INFRA_SET2_CONF:
- hostif_infrastructure_set_confirm(priv);
- break;
- case HIF_ADH_SET_CONF:
- case HIF_ADH_SET2_CONF:
- hostif_adhoc_set_confirm(priv);
- break;
- case HIF_ASSOC_INFO_IND:
- hostif_associate_indication(priv);
- break;
- case HIF_MIC_FAILURE_CONF:
- hostif_mic_failure_confirm(priv);
- break;
- case HIF_SCAN_CONF:
- hostif_bss_scan_confirm(priv);
- break;
- case HIF_PHY_INFO_CONF:
- case HIF_PHY_INFO_IND:
- hostif_phy_information_confirm(priv);
- break;
- case HIF_SCAN_IND:
- hostif_scan_indication(priv);
- break;
- case HIF_AP_SET_CONF:
- default:
- netdev_err(priv->net_dev, "undefined event[%04X]\n", event);
- /* wake_up_all(&priv->confirm_wait); */
- complete(&priv->confirm_wait);
- break;
- }
-
- /* add event to hostt buffer */
- priv->hostt.buff[priv->hostt.qtail] = event;
- priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
-}
-
-/* allocate size bytes, set header size and event */
-static void *hostif_generic_request(size_t size, int event)
-{
- struct hostif_hdr *p;
-
- p = kzalloc(hif_align_size(size), GFP_ATOMIC);
- if (!p)
- return NULL;
-
- p->size = cpu_to_le16(size - sizeof(p->size));
- p->event = cpu_to_le16(event);
-
- return p;
-}
-
-int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb)
-{
- unsigned int skb_len = 0;
- unsigned char *buffer = NULL;
- unsigned int length = 0;
- struct hostif_data_request *pp;
- unsigned char *p;
- unsigned short eth_proto;
- struct ether_hdr *eth_hdr;
- unsigned short keyinfo = 0;
- struct ieee802_1x_hdr *aa1x_hdr;
- struct wpa_eapol_key *eap_key;
- struct ethhdr *eth;
- size_t size;
- int ret;
-
- skb_len = skb->len;
- if (skb_len > ETH_FRAME_LEN) {
- netdev_err(priv->net_dev, "bad length skb_len=%d\n", skb_len);
- ret = -EOVERFLOW;
- goto err_kfree_skb;
- }
-
- if (is_disconnect_status(priv->connect_status) ||
- (priv->connect_status & FORCE_DISCONNECT) ||
- priv->wpa.mic_failure.stop) {
- if (netif_queue_stopped(priv->net_dev))
- netif_wake_queue(priv->net_dev);
-
- dev_kfree_skb(skb);
-
- return 0;
- }
-
- /* power save wakeup */
- if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- if (!netif_queue_stopped(priv->net_dev))
- netif_stop_queue(priv->net_dev);
- }
-
- size = sizeof(*pp) + 6 + skb_len + 8;
- pp = kmalloc(hif_align_size(size), GFP_ATOMIC);
- if (!pp) {
- ret = -ENOMEM;
- goto err_kfree_skb;
- }
-
- p = (unsigned char *)pp->data;
-
- buffer = skb->data;
- length = skb->len;
-
- /* skb check */
- eth = (struct ethhdr *)skb->data;
- if (!ether_addr_equal(&priv->eth_addr[0], eth->h_source)) {
- netdev_err(priv->net_dev,
- "Invalid mac address: ethernet->h_source=%pM\n",
- eth->h_source);
- ret = -ENXIO;
- goto err_kfree;
- }
-
- /* dest and src MAC address copy */
- size = ETH_ALEN * 2;
- memcpy(p, buffer, size);
- p += size;
- buffer += size;
- length -= size;
-
- /* EtherType/Length check */
- if (*(buffer + 1) + (*buffer << 8) > 1500) {
- /* ProtocolEAP = *(buffer+1) + (*buffer << 8); */
- /* SAP/CTL/OUI(6 byte) add */
- *p++ = 0xAA; /* DSAP */
- *p++ = 0xAA; /* SSAP */
- *p++ = 0x03; /* CTL */
- *p++ = 0x00; /* OUI ("000000") */
- *p++ = 0x00; /* OUI ("000000") */
- *p++ = 0x00; /* OUI ("000000") */
- skb_len += 6;
- } else {
- /* Length(2 byte) delete */
- buffer += 2;
- length -= 2;
- skb_len -= 2;
- }
-
- /* pp->data copy */
- memcpy(p, buffer, length);
-
- p += length;
-
- /* for WPA */
- eth_hdr = (struct ether_hdr *)&pp->data[0];
- eth_proto = ntohs(eth_hdr->h_proto);
-
- /* for MIC FAILURE REPORT check */
- if (eth_proto == ETH_P_PAE &&
- priv->wpa.mic_failure.failure > 0) {
- aa1x_hdr = (struct ieee802_1x_hdr *)(eth_hdr + 1);
- if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY) {
- eap_key = (struct wpa_eapol_key *)(aa1x_hdr + 1);
- keyinfo = ntohs(eap_key->key_info);
- }
- }
-
- if (priv->wpa.rsn_enabled && priv->wpa.key[0].key_len) {
- /* no encryption */
- if (eth_proto == ETH_P_PAE &&
- priv->wpa.key[1].key_len == 0 &&
- priv->wpa.key[2].key_len == 0 &&
- priv->wpa.key[3].key_len == 0) {
- pp->auth_type = cpu_to_le16(TYPE_AUTH);
- } else {
- if (priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) {
- u8 mic[MICHAEL_MIC_LEN];
-
- ret = michael_mic(priv->wpa.key[0].tx_mic_key,
- &pp->data[0], skb_len,
- 0, mic);
- if (ret < 0)
- goto err_kfree;
-
- memcpy(p, mic, sizeof(mic));
- length += sizeof(mic);
- skb_len += sizeof(mic);
- p += sizeof(mic);
- pp->auth_type =
- cpu_to_le16(TYPE_DATA);
- } else if (priv->wpa.pairwise_suite ==
- IW_AUTH_CIPHER_CCMP) {
- pp->auth_type =
- cpu_to_le16(TYPE_DATA);
- }
- }
- } else {
- if (eth_proto == ETH_P_PAE)
- pp->auth_type = cpu_to_le16(TYPE_AUTH);
- else
- pp->auth_type = cpu_to_le16(TYPE_DATA);
- }
-
- /* header value set */
- pp->header.size =
- cpu_to_le16((sizeof(*pp) - sizeof(pp->header.size) + skb_len));
- pp->header.event = cpu_to_le16(HIF_DATA_REQ);
-
- /* tx request */
- ret = ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + skb_len),
- send_packet_complete, skb);
-
- /* MIC FAILURE REPORT check */
- if (eth_proto == ETH_P_PAE &&
- priv->wpa.mic_failure.failure > 0) {
- if (keyinfo & WPA_KEY_INFO_ERROR &&
- keyinfo & WPA_KEY_INFO_REQUEST) {
- netdev_err(priv->net_dev,
- "MIC ERROR Report SET : %04X\n", keyinfo);
- hostif_sme_enqueue(priv, SME_MIC_FAILURE_REQUEST);
- }
- if (priv->wpa.mic_failure.failure == 2)
- priv->wpa.mic_failure.stop = 1;
- }
-
- return ret;
-
-err_kfree:
- kfree(pp);
-err_kfree_skb:
- dev_kfree_skb(skb);
-
- return ret;
-}
-
-static inline void ps_confirm_wait_inc(struct ks_wlan_private *priv)
-{
- if (atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET)
- atomic_inc(&priv->psstatus.confirm_wait);
-}
-
-static inline void send_request_to_device(struct ks_wlan_private *priv,
- void *data, size_t size)
-{
- ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, data, size, NULL, NULL);
-}
-
-static void hostif_mib_get_request(struct ks_wlan_private *priv,
- u32 mib_attribute)
-{
- struct hostif_mib_get_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_MIB_GET_REQ);
- if (!pp)
- return;
-
- pp->mib_attribute = cpu_to_le32(mib_attribute);
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-static void hostif_mib_set_request(struct ks_wlan_private *priv,
- enum mib_attribute attr,
- enum mib_data_type type,
- void *data, size_t size)
-{
- struct hostif_mib_set_request_t *pp;
-
- if (priv->dev_state < DEVICE_STATE_BOOT)
- return;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_MIB_SET_REQ);
- if (!pp)
- return;
-
- pp->mib_attribute = cpu_to_le32(attr);
- pp->mib_value.size = cpu_to_le16(size);
- pp->mib_value.type = cpu_to_le16(type);
- memcpy(&pp->mib_value.body, data, size);
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp) + size));
-}
-
-static inline void hostif_mib_set_request_int(struct ks_wlan_private *priv,
- enum mib_attribute attr, int val)
-{
- __le32 v = cpu_to_le32(val);
- size_t size = sizeof(v);
-
- hostif_mib_set_request(priv, attr, MIB_VALUE_TYPE_INT, &v, size);
-}
-
-static inline void hostif_mib_set_request_bool(struct ks_wlan_private *priv,
- enum mib_attribute attr,
- bool val)
-{
- __le32 v = cpu_to_le32(val);
- size_t size = sizeof(v);
-
- hostif_mib_set_request(priv, attr, MIB_VALUE_TYPE_BOOL, &v, size);
-}
-
-static inline void hostif_mib_set_request_ostring(struct ks_wlan_private *priv,
- enum mib_attribute attr,
- void *data, size_t size)
-{
- hostif_mib_set_request(priv, attr, MIB_VALUE_TYPE_OSTRING, data, size);
-}
-
-static
-void hostif_start_request(struct ks_wlan_private *priv, unsigned char mode)
-{
- struct hostif_start_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_START_REQ);
- if (!pp)
- return;
-
- pp->mode = cpu_to_le16(mode);
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-
- priv->aplist.size = 0;
- priv->scan_ind_count = 0;
-}
-
-static __le16 ks_wlan_cap(struct ks_wlan_private *priv)
-{
- u16 capability = 0x0000;
-
- if (priv->reg.preamble == SHORT_PREAMBLE)
- capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
-
- capability &= ~(WLAN_CAPABILITY_PBCC); /* pbcc not support */
-
- if (priv->reg.phy_type != D_11B_ONLY_MODE) {
- capability |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
- capability &= ~(WLAN_CAPABILITY_DSSS_OFDM);
- }
-
- return cpu_to_le16(capability);
-}
-
-static void init_request(struct ks_wlan_private *priv,
- struct hostif_request *req)
-{
- req->phy_type = cpu_to_le16(priv->reg.phy_type);
- req->cts_mode = cpu_to_le16(priv->reg.cts_mode);
- req->scan_type = cpu_to_le16(priv->reg.scan_type);
- req->rate_set.size = priv->reg.rate_set.size;
- req->capability = ks_wlan_cap(priv);
- memcpy(&req->rate_set.body[0], &priv->reg.rate_set.body[0],
- priv->reg.rate_set.size);
-}
-
-static
-void hostif_ps_adhoc_set_request(struct ks_wlan_private *priv)
-{
- struct hostif_ps_adhoc_set_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_PS_ADH_SET_REQ);
- if (!pp)
- return;
-
- init_request(priv, &pp->request);
- pp->channel = cpu_to_le16(priv->reg.channel);
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-static
-void hostif_infrastructure_set_request(struct ks_wlan_private *priv, int event)
-{
- struct hostif_infrastructure_set_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), event);
- if (!pp)
- return;
-
- init_request(priv, &pp->request);
- pp->ssid.size = priv->reg.ssid.size;
- memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
- pp->beacon_lost_count =
- cpu_to_le16(priv->reg.beacon_lost_count);
- pp->auth_type = cpu_to_le16(priv->reg.authenticate_type);
-
- pp->channel_list.body[0] = 1;
- pp->channel_list.body[1] = 8;
- pp->channel_list.body[2] = 2;
- pp->channel_list.body[3] = 9;
- pp->channel_list.body[4] = 3;
- pp->channel_list.body[5] = 10;
- pp->channel_list.body[6] = 4;
- pp->channel_list.body[7] = 11;
- pp->channel_list.body[8] = 5;
- pp->channel_list.body[9] = 12;
- pp->channel_list.body[10] = 6;
- pp->channel_list.body[11] = 13;
- pp->channel_list.body[12] = 7;
- if (priv->reg.phy_type == D_11G_ONLY_MODE) {
- pp->channel_list.size = 13;
- } else {
- pp->channel_list.body[13] = 14;
- pp->channel_list.size = 14;
- }
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-static
-void hostif_adhoc_set_request(struct ks_wlan_private *priv)
-{
- struct hostif_adhoc_set_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_ADH_SET_REQ);
- if (!pp)
- return;
-
- init_request(priv, &pp->request);
- pp->channel = cpu_to_le16(priv->reg.channel);
- pp->ssid.size = priv->reg.ssid.size;
- memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-static
-void hostif_adhoc_set2_request(struct ks_wlan_private *priv)
-{
- struct hostif_adhoc_set2_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_ADH_SET_REQ);
- if (!pp)
- return;
-
- init_request(priv, &pp->request);
- pp->ssid.size = priv->reg.ssid.size;
- memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
-
- pp->channel_list.body[0] = priv->reg.channel;
- pp->channel_list.size = 1;
- memcpy(pp->bssid, priv->reg.bssid, ETH_ALEN);
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-static
-void hostif_stop_request(struct ks_wlan_private *priv)
-{
- struct hostif_stop_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_STOP_REQ);
- if (!pp)
- return;
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-static
-void hostif_phy_information_request(struct ks_wlan_private *priv)
-{
- struct hostif_phy_information_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_PHY_INFO_REQ);
- if (!pp)
- return;
-
- if (priv->reg.phy_info_timer) {
- pp->type = cpu_to_le16(TIME_TYPE);
- pp->time = cpu_to_le16(priv->reg.phy_info_timer);
- } else {
- pp->type = cpu_to_le16(NORMAL_TYPE);
- pp->time = cpu_to_le16(0);
- }
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-static
-void hostif_power_mgmt_request(struct ks_wlan_private *priv,
- u32 mode, u32 wake_up, u32 receive_dtims)
-{
- struct hostif_power_mgmt_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_POWER_MGMT_REQ);
- if (!pp)
- return;
-
- pp->mode = cpu_to_le32(mode);
- pp->wake_up = cpu_to_le32(wake_up);
- pp->receive_dtims = cpu_to_le32(receive_dtims);
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-static
-void hostif_sleep_request(struct ks_wlan_private *priv,
- enum sleep_mode_type mode)
-{
- struct hostif_sleep_request *pp;
-
- if (mode == SLP_SLEEP) {
- pp = hostif_generic_request(sizeof(*pp), HIF_SLEEP_REQ);
- if (!pp)
- return;
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
- } else if (mode == SLP_ACTIVE) {
- atomic_set(&priv->sleepstatus.wakeup_request, 1);
- queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
- } else {
- netdev_err(priv->net_dev, "invalid mode %ld\n", (long)mode);
- return;
- }
-}
-
-static
-void hostif_bss_scan_request(struct ks_wlan_private *priv,
- unsigned long scan_type, u8 *scan_ssid,
- u8 scan_ssid_len)
-{
- struct hostif_bss_scan_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_SCAN_REQ);
- if (!pp)
- return;
-
- pp->scan_type = scan_type;
-
- pp->ch_time_min = cpu_to_le32(110); /* default value */
- pp->ch_time_max = cpu_to_le32(130); /* default value */
- pp->channel_list.body[0] = 1;
- pp->channel_list.body[1] = 8;
- pp->channel_list.body[2] = 2;
- pp->channel_list.body[3] = 9;
- pp->channel_list.body[4] = 3;
- pp->channel_list.body[5] = 10;
- pp->channel_list.body[6] = 4;
- pp->channel_list.body[7] = 11;
- pp->channel_list.body[8] = 5;
- pp->channel_list.body[9] = 12;
- pp->channel_list.body[10] = 6;
- pp->channel_list.body[11] = 13;
- pp->channel_list.body[12] = 7;
- if (priv->reg.phy_type == D_11G_ONLY_MODE) {
- pp->channel_list.size = 13;
- } else {
- pp->channel_list.body[13] = 14;
- pp->channel_list.size = 14;
- }
- pp->ssid.size = 0;
-
- /* specified SSID SCAN */
- if (scan_ssid_len > 0 && scan_ssid_len <= 32) {
- pp->ssid.size = scan_ssid_len;
- memcpy(&pp->ssid.body[0], scan_ssid, scan_ssid_len);
- }
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-
- priv->aplist.size = 0;
- priv->scan_ind_count = 0;
-}
-
-static
-void hostif_mic_failure_request(struct ks_wlan_private *priv,
- u16 failure_count, u16 timer)
-{
- struct hostif_mic_failure_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_MIC_FAILURE_REQ);
- if (!pp)
- return;
-
- pp->failure_count = cpu_to_le16(failure_count);
- pp->timer = cpu_to_le16(timer);
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-/* Device I/O Receive indicate */
-static void devio_rec_ind(struct ks_wlan_private *priv, unsigned char *p,
- unsigned int size)
-{
- if (!priv->is_device_open)
- return;
-
- spin_lock(&priv->dev_read_lock);
- priv->dev_data[atomic_read(&priv->rec_count)] = p;
- priv->dev_size[atomic_read(&priv->rec_count)] = size;
-
- if (atomic_read(&priv->event_count) != DEVICE_STOCK_COUNT) {
- /* rx event count inc */
- atomic_inc(&priv->event_count);
- }
- atomic_inc(&priv->rec_count);
- if (atomic_read(&priv->rec_count) == DEVICE_STOCK_COUNT)
- atomic_set(&priv->rec_count, 0);
-
- wake_up_interruptible_all(&priv->devread_wait);
-
- spin_unlock(&priv->dev_read_lock);
-}
-
-void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
- unsigned int size)
-{
- devio_rec_ind(priv, p, size);
-
- priv->rxp = p;
- priv->rx_size = size;
-
- if (get_word(priv) == priv->rx_size)
- hostif_event_check(priv);
-}
-
-static void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
-{
- switch (type) {
- case SME_WEP_INDEX_REQUEST:
- hostif_mib_set_request_int(priv, DOT11_WEP_DEFAULT_KEY_ID,
- priv->reg.wep_index);
- break;
- case SME_WEP_KEY1_REQUEST:
- if (priv->wpa.wpa_enabled)
- return;
- hostif_mib_set_request_ostring(priv,
- DOT11_WEP_DEFAULT_KEY_VALUE1,
- &priv->reg.wep_key[0].val[0],
- priv->reg.wep_key[0].size);
- break;
- case SME_WEP_KEY2_REQUEST:
- if (priv->wpa.wpa_enabled)
- return;
- hostif_mib_set_request_ostring(priv,
- DOT11_WEP_DEFAULT_KEY_VALUE2,
- &priv->reg.wep_key[1].val[0],
- priv->reg.wep_key[1].size);
- break;
- case SME_WEP_KEY3_REQUEST:
- if (priv->wpa.wpa_enabled)
- return;
- hostif_mib_set_request_ostring(priv,
- DOT11_WEP_DEFAULT_KEY_VALUE3,
- &priv->reg.wep_key[2].val[0],
- priv->reg.wep_key[2].size);
- break;
- case SME_WEP_KEY4_REQUEST:
- if (priv->wpa.wpa_enabled)
- return;
- hostif_mib_set_request_ostring(priv,
- DOT11_WEP_DEFAULT_KEY_VALUE4,
- &priv->reg.wep_key[3].val[0],
- priv->reg.wep_key[3].size);
- break;
- case SME_WEP_FLAG_REQUEST:
- hostif_mib_set_request_bool(priv, DOT11_PRIVACY_INVOKED,
- priv->reg.privacy_invoked);
- break;
- }
-}
-
-struct wpa_suite {
- __le16 size;
- unsigned char suite[4][CIPHER_ID_LEN];
-} __packed;
-
-struct rsn_mode {
- __le32 rsn_mode;
- __le16 rsn_capability;
-} __packed;
-
-static void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
-{
- struct wpa_suite wpa_suite;
- struct rsn_mode rsn_mode;
- size_t size;
- u32 mode;
- const u8 *buf = NULL;
-
- memset(&wpa_suite, 0, sizeof(wpa_suite));
-
- switch (type) {
- case SME_RSN_UCAST_REQUEST:
- wpa_suite.size = cpu_to_le16(1);
- switch (priv->wpa.pairwise_suite) {
- case IW_AUTH_CIPHER_NONE:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_NONE : CIPHER_ID_WPA_NONE;
- break;
- case IW_AUTH_CIPHER_WEP40:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_WEP40 : CIPHER_ID_WPA_WEP40;
- break;
- case IW_AUTH_CIPHER_TKIP:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_TKIP : CIPHER_ID_WPA_TKIP;
- break;
- case IW_AUTH_CIPHER_CCMP:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_CCMP : CIPHER_ID_WPA_CCMP;
- break;
- case IW_AUTH_CIPHER_WEP104:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_WEP104 : CIPHER_ID_WPA_WEP104;
- break;
- }
-
- if (buf)
- memcpy(&wpa_suite.suite[0][0], buf, CIPHER_ID_LEN);
- size = sizeof(wpa_suite.size) +
- (CIPHER_ID_LEN * le16_to_cpu(wpa_suite.size));
- hostif_mib_set_request_ostring(priv,
- DOT11_RSN_CONFIG_UNICAST_CIPHER,
- &wpa_suite, size);
- break;
- case SME_RSN_MCAST_REQUEST:
- switch (priv->wpa.group_suite) {
- case IW_AUTH_CIPHER_NONE:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_NONE : CIPHER_ID_WPA_NONE;
- break;
- case IW_AUTH_CIPHER_WEP40:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_WEP40 : CIPHER_ID_WPA_WEP40;
- break;
- case IW_AUTH_CIPHER_TKIP:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_TKIP : CIPHER_ID_WPA_TKIP;
- break;
- case IW_AUTH_CIPHER_CCMP:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_CCMP : CIPHER_ID_WPA_CCMP;
- break;
- case IW_AUTH_CIPHER_WEP104:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_WEP104 : CIPHER_ID_WPA_WEP104;
- break;
- }
- if (buf)
- memcpy(&wpa_suite.suite[0][0], buf, CIPHER_ID_LEN);
- hostif_mib_set_request_ostring(priv,
- DOT11_RSN_CONFIG_MULTICAST_CIPHER,
- &wpa_suite.suite[0][0],
- CIPHER_ID_LEN);
- break;
- case SME_RSN_AUTH_REQUEST:
- wpa_suite.size = cpu_to_le16(1);
- switch (priv->wpa.key_mgmt_suite) {
- case IW_AUTH_KEY_MGMT_802_1X:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- KEY_MGMT_ID_WPA2_1X : KEY_MGMT_ID_WPA_1X;
- break;
- case IW_AUTH_KEY_MGMT_PSK:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- KEY_MGMT_ID_WPA2_PSK : KEY_MGMT_ID_WPA_PSK;
- break;
- case 0:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- KEY_MGMT_ID_WPA2_NONE : KEY_MGMT_ID_WPA_NONE;
- break;
- case 4:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- KEY_MGMT_ID_WPA2_WPANONE :
- KEY_MGMT_ID_WPA_WPANONE;
- break;
- }
-
- if (buf)
- memcpy(&wpa_suite.suite[0][0], buf, KEY_MGMT_ID_LEN);
- size = sizeof(wpa_suite.size) +
- (KEY_MGMT_ID_LEN * le16_to_cpu(wpa_suite.size));
- hostif_mib_set_request_ostring(priv,
- DOT11_RSN_CONFIG_AUTH_SUITE,
- &wpa_suite, size);
- break;
- case SME_RSN_ENABLED_REQUEST:
- hostif_mib_set_request_bool(priv, DOT11_RSN_ENABLED,
- priv->wpa.rsn_enabled);
- break;
- case SME_RSN_MODE_REQUEST:
- mode = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- RSN_MODE_WPA2 :
- (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA) ?
- RSN_MODE_WPA : RSN_MODE_NONE;
- rsn_mode.rsn_mode = cpu_to_le32(mode);
- rsn_mode.rsn_capability = cpu_to_le16(0);
- hostif_mib_set_request_ostring(priv, LOCAL_RSN_MODE,
- &rsn_mode, sizeof(rsn_mode));
- break;
- }
-}
-
-static
-void hostif_sme_mode_setup(struct ks_wlan_private *priv)
-{
- unsigned char rate_size;
- unsigned char rate_octet[RATE_SET_MAX_SIZE];
- int i = 0;
-
- /* rate setting if rate segging is auto for changing phy_type (#94) */
- if (priv->reg.tx_rate == TX_RATE_FULL_AUTO) {
- if (priv->reg.phy_type == D_11B_ONLY_MODE) {
- priv->reg.rate_set.body[3] = TX_RATE_11M;
- priv->reg.rate_set.body[2] = TX_RATE_5M;
- priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
- priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
- priv->reg.rate_set.size = 4;
- } else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
- priv->reg.rate_set.body[11] = TX_RATE_54M;
- priv->reg.rate_set.body[10] = TX_RATE_48M;
- priv->reg.rate_set.body[9] = TX_RATE_36M;
- priv->reg.rate_set.body[8] = TX_RATE_18M;
- priv->reg.rate_set.body[7] = TX_RATE_9M;
- priv->reg.rate_set.body[6] = TX_RATE_24M | BASIC_RATE;
- priv->reg.rate_set.body[5] = TX_RATE_12M | BASIC_RATE;
- priv->reg.rate_set.body[4] = TX_RATE_6M | BASIC_RATE;
- priv->reg.rate_set.body[3] = TX_RATE_11M | BASIC_RATE;
- priv->reg.rate_set.body[2] = TX_RATE_5M | BASIC_RATE;
- priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
- priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
- priv->reg.rate_set.size = 12;
- }
- }
-
- /* rate mask by phy setting */
- if (priv->reg.phy_type == D_11B_ONLY_MODE) {
- for (i = 0; i < priv->reg.rate_set.size; i++) {
- if (!is_11b_rate(priv->reg.rate_set.body[i]))
- break;
-
- if ((priv->reg.rate_set.body[i] & RATE_MASK) >= TX_RATE_5M) {
- rate_octet[i] = priv->reg.rate_set.body[i] &
- RATE_MASK;
- } else {
- rate_octet[i] = priv->reg.rate_set.body[i];
- }
- }
-
- } else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
- for (i = 0; i < priv->reg.rate_set.size; i++) {
- if (!is_11bg_rate(priv->reg.rate_set.body[i]))
- break;
-
- if (is_ofdm_ext_rate(priv->reg.rate_set.body[i])) {
- rate_octet[i] = priv->reg.rate_set.body[i] &
- RATE_MASK;
- } else {
- rate_octet[i] = priv->reg.rate_set.body[i];
- }
- }
- }
- rate_size = i;
- if (rate_size == 0) {
- if (priv->reg.phy_type == D_11G_ONLY_MODE)
- rate_octet[0] = TX_RATE_6M | BASIC_RATE;
- else
- rate_octet[0] = TX_RATE_2M | BASIC_RATE;
- rate_size = 1;
- }
-
- /* rate set update */
- priv->reg.rate_set.size = rate_size;
- memcpy(&priv->reg.rate_set.body[0], &rate_octet[0], rate_size);
-
- switch (priv->reg.operation_mode) {
- case MODE_PSEUDO_ADHOC:
- hostif_ps_adhoc_set_request(priv);
- break;
- case MODE_INFRASTRUCTURE:
- if (!is_valid_ether_addr((u8 *)priv->reg.bssid)) {
- hostif_infrastructure_set_request(priv,
- HIF_INFRA_SET_REQ);
- } else {
- hostif_infrastructure_set_request(priv,
- HIF_INFRA_SET2_REQ);
- netdev_dbg(priv->net_dev,
- "Infra bssid = %pM\n", priv->reg.bssid);
- }
- break;
- case MODE_ADHOC:
- if (!is_valid_ether_addr((u8 *)priv->reg.bssid)) {
- hostif_adhoc_set_request(priv);
- } else {
- hostif_adhoc_set2_request(priv);
- netdev_dbg(priv->net_dev,
- "Adhoc bssid = %pM\n", priv->reg.bssid);
- }
- break;
- default:
- break;
- }
-}
-
-static
-void hostif_sme_multicast_set(struct ks_wlan_private *priv)
-{
- struct net_device *dev = priv->net_dev;
- int mc_count;
- struct netdev_hw_addr *ha;
- char set_address[NIC_MAX_MCAST_LIST * ETH_ALEN];
- int i = 0;
-
- spin_lock(&priv->multicast_spin);
-
- memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN);
-
- if (dev->flags & IFF_PROMISC) {
- hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
- MCAST_FILTER_PROMISC);
- goto spin_unlock;
- }
-
- if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) ||
- (dev->flags & IFF_ALLMULTI)) {
- hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
- MCAST_FILTER_MCASTALL);
- goto spin_unlock;
- }
-
- if (priv->sme_i.sme_flag & SME_MULTICAST) {
- mc_count = netdev_mc_count(dev);
- netdev_for_each_mc_addr(ha, dev) {
- ether_addr_copy(&set_address[i * ETH_ALEN], ha->addr);
- i++;
- }
- priv->sme_i.sme_flag &= ~SME_MULTICAST;
- hostif_mib_set_request_ostring(priv, LOCAL_MULTICAST_ADDRESS,
- &set_address[0],
- ETH_ALEN * mc_count);
- } else {
- priv->sme_i.sme_flag |= SME_MULTICAST;
- hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
- MCAST_FILTER_MCAST);
- }
-
-spin_unlock:
- spin_unlock(&priv->multicast_spin);
-}
-
-static void hostif_sme_power_mgmt_set(struct ks_wlan_private *priv)
-{
- u32 mode, wake_up, receive_dtims;
-
- if (priv->reg.power_mgmt != POWER_MGMT_SAVE1 &&
- priv->reg.power_mgmt != POWER_MGMT_SAVE2) {
- mode = POWER_ACTIVE;
- wake_up = 0;
- receive_dtims = 0;
- } else {
- mode = (priv->reg.operation_mode == MODE_INFRASTRUCTURE) ?
- POWER_SAVE : POWER_ACTIVE;
- wake_up = 0;
- receive_dtims = (priv->reg.operation_mode == MODE_INFRASTRUCTURE &&
- priv->reg.power_mgmt == POWER_MGMT_SAVE2);
- }
-
- hostif_power_mgmt_request(priv, mode, wake_up, receive_dtims);
-}
-
-static void hostif_sme_sleep_set(struct ks_wlan_private *priv)
-{
- if (priv->sleep_mode != SLP_SLEEP &&
- priv->sleep_mode != SLP_ACTIVE)
- return;
-
- hostif_sleep_request(priv, priv->sleep_mode);
-}
-
-static
-void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
-{
- switch (type) {
- case SME_SET_FLAG:
- hostif_mib_set_request_bool(priv, DOT11_PRIVACY_INVOKED,
- priv->reg.privacy_invoked);
- break;
- case SME_SET_TXKEY:
- hostif_mib_set_request_int(priv, DOT11_WEP_DEFAULT_KEY_ID,
- priv->wpa.txkey);
- break;
- case SME_SET_KEY1:
- hostif_mib_set_request_ostring(priv,
- DOT11_WEP_DEFAULT_KEY_VALUE1,
- &priv->wpa.key[0].key_val[0],
- priv->wpa.key[0].key_len);
- break;
- case SME_SET_KEY2:
- hostif_mib_set_request_ostring(priv,
- DOT11_WEP_DEFAULT_KEY_VALUE2,
- &priv->wpa.key[1].key_val[0],
- priv->wpa.key[1].key_len);
- break;
- case SME_SET_KEY3:
- hostif_mib_set_request_ostring(priv,
- DOT11_WEP_DEFAULT_KEY_VALUE3,
- &priv->wpa.key[2].key_val[0],
- priv->wpa.key[2].key_len);
- break;
- case SME_SET_KEY4:
- hostif_mib_set_request_ostring(priv,
- DOT11_WEP_DEFAULT_KEY_VALUE4,
- &priv->wpa.key[3].key_val[0],
- priv->wpa.key[3].key_len);
- break;
- case SME_SET_PMK_TSC:
- hostif_mib_set_request_ostring(priv, DOT11_PMK_TSC,
- &priv->wpa.key[0].rx_seq[0],
- WPA_RX_SEQ_LEN);
- break;
- case SME_SET_GMK1_TSC:
- hostif_mib_set_request_ostring(priv, DOT11_GMK1_TSC,
- &priv->wpa.key[1].rx_seq[0],
- WPA_RX_SEQ_LEN);
- break;
- case SME_SET_GMK2_TSC:
- hostif_mib_set_request_ostring(priv, DOT11_GMK2_TSC,
- &priv->wpa.key[2].rx_seq[0],
- WPA_RX_SEQ_LEN);
- break;
- }
-}
-
-static
-void hostif_sme_set_pmksa(struct ks_wlan_private *priv)
-{
- struct pmk_cache {
- __le16 size;
- struct {
- u8 bssid[ETH_ALEN];
- u8 pmkid[IW_PMKID_LEN];
- } __packed list[PMK_LIST_MAX];
- } __packed pmkcache;
- struct pmk *pmk;
- size_t size;
- int i = 0;
-
- list_for_each_entry(pmk, &priv->pmklist.head, list) {
- if (i >= PMK_LIST_MAX)
- break;
- ether_addr_copy(pmkcache.list[i].bssid, pmk->bssid);
- memcpy(pmkcache.list[i].pmkid, pmk->pmkid, IW_PMKID_LEN);
- i++;
- }
- pmkcache.size = cpu_to_le16(priv->pmklist.size);
- size = sizeof(priv->pmklist.size) +
- ((ETH_ALEN + IW_PMKID_LEN) * priv->pmklist.size);
- hostif_mib_set_request_ostring(priv, LOCAL_PMK, &pmkcache, size);
-}
-
-/* execute sme */
-static void hostif_sme_execute(struct ks_wlan_private *priv, int event)
-{
- u16 failure;
-
- switch (event) {
- case SME_START:
- if (priv->dev_state == DEVICE_STATE_BOOT)
- hostif_mib_get_request(priv, DOT11_MAC_ADDRESS);
- break;
- case SME_MULTICAST_REQUEST:
- hostif_sme_multicast_set(priv);
- break;
- case SME_MACADDRESS_SET_REQUEST:
- hostif_mib_set_request_ostring(priv, LOCAL_CURRENTADDRESS,
- &priv->eth_addr[0], ETH_ALEN);
- break;
- case SME_BSS_SCAN_REQUEST:
- hostif_bss_scan_request(priv, priv->reg.scan_type,
- priv->scan_ssid, priv->scan_ssid_len);
- break;
- case SME_POW_MNGMT_REQUEST:
- hostif_sme_power_mgmt_set(priv);
- break;
- case SME_PHY_INFO_REQUEST:
- hostif_phy_information_request(priv);
- break;
- case SME_MIC_FAILURE_REQUEST:
- failure = priv->wpa.mic_failure.failure;
- if (failure != 1 && failure != 2) {
- netdev_err(priv->net_dev,
- "SME_MIC_FAILURE_REQUEST: failure count=%u error?\n",
- failure);
- return;
- }
- hostif_mic_failure_request(priv, failure - 1, (failure == 1) ?
- 0 : priv->wpa.mic_failure.counter);
- break;
- case SME_MIC_FAILURE_CONFIRM:
- if (priv->wpa.mic_failure.failure == 2) {
- if (priv->wpa.mic_failure.stop)
- priv->wpa.mic_failure.stop = 0;
- priv->wpa.mic_failure.failure = 0;
- hostif_start_request(priv, priv->reg.operation_mode);
- }
- break;
- case SME_GET_MAC_ADDRESS:
- if (priv->dev_state == DEVICE_STATE_BOOT)
- hostif_mib_get_request(priv, DOT11_PRODUCT_VERSION);
- break;
- case SME_GET_PRODUCT_VERSION:
- if (priv->dev_state == DEVICE_STATE_BOOT)
- priv->dev_state = DEVICE_STATE_PREINIT;
- break;
- case SME_STOP_REQUEST:
- hostif_stop_request(priv);
- break;
- case SME_RTS_THRESHOLD_REQUEST:
- hostif_mib_set_request_int(priv, DOT11_RTS_THRESHOLD,
- priv->reg.rts);
- break;
- case SME_FRAGMENTATION_THRESHOLD_REQUEST:
- hostif_mib_set_request_int(priv, DOT11_FRAGMENTATION_THRESHOLD,
- priv->reg.fragment);
- break;
- case SME_WEP_INDEX_REQUEST:
- case SME_WEP_KEY1_REQUEST:
- case SME_WEP_KEY2_REQUEST:
- case SME_WEP_KEY3_REQUEST:
- case SME_WEP_KEY4_REQUEST:
- case SME_WEP_FLAG_REQUEST:
- hostif_sme_set_wep(priv, event);
- break;
- case SME_RSN_UCAST_REQUEST:
- case SME_RSN_MCAST_REQUEST:
- case SME_RSN_AUTH_REQUEST:
- case SME_RSN_ENABLED_REQUEST:
- case SME_RSN_MODE_REQUEST:
- hostif_sme_set_rsn(priv, event);
- break;
- case SME_SET_FLAG:
- case SME_SET_TXKEY:
- case SME_SET_KEY1:
- case SME_SET_KEY2:
- case SME_SET_KEY3:
- case SME_SET_KEY4:
- case SME_SET_PMK_TSC:
- case SME_SET_GMK1_TSC:
- case SME_SET_GMK2_TSC:
- hostif_sme_set_key(priv, event);
- break;
- case SME_SET_PMKSA:
- hostif_sme_set_pmksa(priv);
- break;
- case SME_WPS_ENABLE_REQUEST:
- hostif_mib_set_request_int(priv, LOCAL_WPS_ENABLE,
- priv->wps.wps_enabled);
- break;
- case SME_WPS_PROBE_REQUEST:
- hostif_mib_set_request_ostring(priv, LOCAL_WPS_PROBE_REQ,
- priv->wps.ie, priv->wps.ielen);
- break;
- case SME_MODE_SET_REQUEST:
- hostif_sme_mode_setup(priv);
- break;
- case SME_SET_GAIN:
- hostif_mib_set_request_ostring(priv, LOCAL_GAIN,
- &priv->gain, sizeof(priv->gain));
- break;
- case SME_GET_GAIN:
- hostif_mib_get_request(priv, LOCAL_GAIN);
- break;
- case SME_GET_EEPROM_CKSUM:
- priv->eeprom_checksum = EEPROM_FW_NOT_SUPPORT; /* initialize */
- hostif_mib_get_request(priv, LOCAL_EEPROM_SUM);
- break;
- case SME_START_REQUEST:
- hostif_start_request(priv, priv->reg.operation_mode);
- break;
- case SME_START_CONFIRM:
- /* for power save */
- atomic_set(&priv->psstatus.snooze_guard, 0);
- atomic_set(&priv->psstatus.confirm_wait, 0);
- if (priv->dev_state == DEVICE_STATE_PREINIT)
- priv->dev_state = DEVICE_STATE_INIT;
- /* wake_up_interruptible_all(&priv->confirm_wait); */
- complete(&priv->confirm_wait);
- break;
- case SME_SLEEP_REQUEST:
- hostif_sme_sleep_set(priv);
- break;
- case SME_SET_REGION:
- hostif_mib_set_request_int(priv, LOCAL_REGION, priv->region);
- break;
- case SME_MULTICAST_CONFIRM:
- case SME_BSS_SCAN_CONFIRM:
- case SME_POW_MNGMT_CONFIRM:
- case SME_PHY_INFO_CONFIRM:
- case SME_STOP_CONFIRM:
- case SME_RTS_THRESHOLD_CONFIRM:
- case SME_FRAGMENTATION_THRESHOLD_CONFIRM:
- case SME_WEP_INDEX_CONFIRM:
- case SME_WEP_KEY1_CONFIRM:
- case SME_WEP_KEY2_CONFIRM:
- case SME_WEP_KEY3_CONFIRM:
- case SME_WEP_KEY4_CONFIRM:
- case SME_WEP_FLAG_CONFIRM:
- case SME_RSN_UCAST_CONFIRM:
- case SME_RSN_MCAST_CONFIRM:
- case SME_RSN_AUTH_CONFIRM:
- case SME_RSN_ENABLED_CONFIRM:
- case SME_RSN_MODE_CONFIRM:
- case SME_MODE_SET_CONFIRM:
- case SME_TERMINATE:
- default:
- break;
- }
-}
-
-static void hostif_sme_work(struct work_struct *work)
-{
- struct ks_wlan_private *priv;
-
- priv = container_of(work, struct ks_wlan_private, sme_work);
-
- if (priv->dev_state < DEVICE_STATE_BOOT)
- return;
-
- if (cnt_smeqbody(priv) <= 0)
- return;
-
- hostif_sme_execute(priv, priv->sme_i.event_buff[priv->sme_i.qhead]);
- inc_smeqhead(priv);
- if (cnt_smeqbody(priv) > 0)
- schedule_work(&priv->sme_work);
-}
-
-/* send to Station Management Entity module */
-void hostif_sme_enqueue(struct ks_wlan_private *priv, u16 event)
-{
- /* enqueue sme event */
- if (cnt_smeqbody(priv) < (SME_EVENT_BUFF_SIZE - 1)) {
- priv->sme_i.event_buff[priv->sme_i.qtail] = event;
- inc_smeqtail(priv);
- } else {
- /* in case of buffer overflow */
- netdev_err(priv->net_dev, "sme queue buffer overflow\n");
- }
-
- schedule_work(&priv->sme_work);
-}
-
-static inline void hostif_aplist_init(struct ks_wlan_private *priv)
-{
- size_t size = LOCAL_APLIST_MAX * sizeof(struct local_ap);
-
- priv->aplist.size = 0;
- memset(&priv->aplist.ap[0], 0, size);
-}
-
-static inline void hostif_status_init(struct ks_wlan_private *priv)
-{
- priv->infra_status = 0;
- priv->current_rate = 4;
- priv->connect_status = DISCONNECT_STATUS;
-}
-
-static inline void hostif_sme_init(struct ks_wlan_private *priv)
-{
- priv->sme_i.sme_status = SME_IDLE;
- priv->sme_i.qhead = 0;
- priv->sme_i.qtail = 0;
- spin_lock_init(&priv->sme_i.sme_spin);
- priv->sme_i.sme_flag = 0;
- INIT_WORK(&priv->sme_work, hostif_sme_work);
-}
-
-static inline void hostif_wpa_init(struct ks_wlan_private *priv)
-{
- memset(&priv->wpa, 0, sizeof(priv->wpa));
- priv->wpa.rsn_enabled = false;
- priv->wpa.mic_failure.failure = 0;
- priv->wpa.mic_failure.last_failure_time = 0;
- priv->wpa.mic_failure.stop = 0;
-}
-
-static inline void hostif_power_save_init(struct ks_wlan_private *priv)
-{
- atomic_set(&priv->psstatus.status, PS_NONE);
- atomic_set(&priv->psstatus.confirm_wait, 0);
- atomic_set(&priv->psstatus.snooze_guard, 0);
- init_completion(&priv->psstatus.wakeup_wait);
- INIT_WORK(&priv->wakeup_work, ks_wlan_hw_wakeup_task);
-}
-
-static inline void hostif_pmklist_init(struct ks_wlan_private *priv)
-{
- int i;
-
- memset(&priv->pmklist, 0, sizeof(priv->pmklist));
- INIT_LIST_HEAD(&priv->pmklist.head);
- for (i = 0; i < PMK_LIST_MAX; i++)
- INIT_LIST_HEAD(&priv->pmklist.pmk[i].list);
-}
-
-static inline void hostif_counters_init(struct ks_wlan_private *priv)
-{
- priv->dev_count = 0;
- atomic_set(&priv->event_count, 0);
- atomic_set(&priv->rec_count, 0);
-}
-
-int hostif_init(struct ks_wlan_private *priv)
-{
- hostif_aplist_init(priv);
- hostif_status_init(priv);
-
- spin_lock_init(&priv->multicast_spin);
- spin_lock_init(&priv->dev_read_lock);
- init_waitqueue_head(&priv->devread_wait);
-
- hostif_counters_init(priv);
- hostif_power_save_init(priv);
- hostif_wpa_init(priv);
- hostif_pmklist_init(priv);
- hostif_sme_init(priv);
-
- return 0;
-}
-
-void hostif_exit(struct ks_wlan_private *priv)
-{
- cancel_work_sync(&priv->sme_work);
-}
diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h
deleted file mode 100644
index c62a494ed6bb..000000000000
--- a/drivers/staging/ks7010/ks_hostif.h
+++ /dev/null
@@ -1,617 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Driver for KeyStream wireless LAN
- *
- * Copyright (c) 2005-2008 KeyStream Corp.
- * Copyright (C) 2009 Renesas Technology Corp.
- */
-
-#ifndef _KS_HOSTIF_H_
-#define _KS_HOSTIF_H_
-
-#include <linux/compiler.h>
-#include <linux/ieee80211.h>
-
-/*
- * HOST-MAC I/F events
- */
-#define HIF_DATA_REQ 0xE001
-#define HIF_DATA_IND 0xE801
-#define HIF_MIB_GET_REQ 0xE002
-#define HIF_MIB_GET_CONF 0xE802
-#define HIF_MIB_SET_REQ 0xE003
-#define HIF_MIB_SET_CONF 0xE803
-#define HIF_POWER_MGMT_REQ 0xE004
-#define HIF_POWER_MGMT_CONF 0xE804
-#define HIF_START_REQ 0xE005
-#define HIF_START_CONF 0xE805
-#define HIF_CONNECT_IND 0xE806
-#define HIF_STOP_REQ 0xE006
-#define HIF_STOP_CONF 0xE807
-#define HIF_PS_ADH_SET_REQ 0xE007
-#define HIF_PS_ADH_SET_CONF 0xE808
-#define HIF_INFRA_SET_REQ 0xE008
-#define HIF_INFRA_SET_CONF 0xE809
-#define HIF_ADH_SET_REQ 0xE009
-#define HIF_ADH_SET_CONF 0xE80A
-#define HIF_AP_SET_REQ 0xE00A
-#define HIF_AP_SET_CONF 0xE80B
-#define HIF_ASSOC_INFO_IND 0xE80C
-#define HIF_MIC_FAILURE_REQ 0xE00B
-#define HIF_MIC_FAILURE_CONF 0xE80D
-#define HIF_SCAN_REQ 0xE00C
-#define HIF_SCAN_CONF 0xE80E
-#define HIF_PHY_INFO_REQ 0xE00D
-#define HIF_PHY_INFO_CONF 0xE80F
-#define HIF_SLEEP_REQ 0xE00E
-#define HIF_SLEEP_CONF 0xE810
-#define HIF_PHY_INFO_IND 0xE811
-#define HIF_SCAN_IND 0xE812
-#define HIF_INFRA_SET2_REQ 0xE00F
-#define HIF_INFRA_SET2_CONF 0xE813
-#define HIF_ADH_SET2_REQ 0xE010
-#define HIF_ADH_SET2_CONF 0xE814
-
-#define HIF_REQ_MAX 0xE010
-
-/*
- * HOST-MAC I/F data structure
- * Byte alignment Little Endian
- */
-
-struct hostif_hdr {
- __le16 size;
- __le16 event;
-} __packed;
-
-struct hostif_data_request {
- struct hostif_hdr header;
- __le16 auth_type;
-#define TYPE_DATA 0x0000
-#define TYPE_AUTH 0x0001
- __le16 reserved;
- u8 data[];
-} __packed;
-
-#define TYPE_PMK1 0x0001
-#define TYPE_GMK1 0x0002
-#define TYPE_GMK2 0x0003
-
-#define CHANNEL_LIST_MAX_SIZE 14
-struct channel_list {
- u8 size;
- u8 body[CHANNEL_LIST_MAX_SIZE];
- u8 pad;
-} __packed;
-
-/**
- * enum mib_attribute - Management Information Base attribute
- * Attribute value used for accessing and updating MIB
- *
- * @DOT11_MAC_ADDRESS: MAC Address (R)
- * @DOT11_PRODUCT_VERSION: FirmWare Version (R)
- * @DOT11_RTS_THRESHOLD: RTS Threshold (R/W)
- * @DOT11_FRAGMENTATION_THRESHOLD: Fragment Threshold (R/W)
- * @DOT11_PRIVACY_INVOKED: WEP ON/OFF (W)
- * @DOT11_WEP_DEFAULT_KEY_ID: WEP Index (W)
- * @DOT11_WEP_DEFAULT_KEY_VALUE1: WEP Key#1(TKIP AES: PairwiseTemporalKey) (W)
- * @DOT11_WEP_DEFAULT_KEY_VALUE2: WEP Key#2(TKIP AES: GroupKey1) (W)
- * @DOT11_WEP_DEFAULT_KEY_VALUE3: WEP Key#3(TKIP AES: GroupKey2) (W)
- * @DOT11_WEP_DEFAULT_KEY_VALUE4: WEP Key#4 (W)
- * @DOT11_WEP_LIST: WEP LIST
- * @DOT11_DESIRED_SSID: SSID
- * @DOT11_CURRENT_CHANNEL: channel set
- * @DOT11_OPERATION_RATE_SET: rate set
- * @LOCAL_AP_SEARCH_INTERVAL: AP search interval (R/W)
- * @LOCAL_CURRENTADDRESS: MAC Address change (W)
- * @LOCAL_MULTICAST_ADDRESS: Multicast Address (W)
- * @LOCAL_MULTICAST_FILTER: Multicast Address Filter enable/disable (W)
- * @LOCAL_SEARCHED_AP_LIST: AP list (R)
- * @LOCAL_LINK_AP_STATUS: Link AP status (R)
- * @LOCAL_PACKET_STATISTICS: tx,rx packets statistics
- * @LOCAL_AP_SCAN_LIST_TYPE_SET: AP_SCAN_LIST_TYPE
- * @DOT11_RSN_ENABLED: WPA enable/disable (W)
- * @LOCAL_RSN_MODE: RSN mode WPA/WPA2 (W)
- * @DOT11_RSN_CONFIG_MULTICAST_CIPHER: GroupKeyCipherSuite (W)
- * @DOT11_RSN_CONFIG_UNICAST_CIPHER: PairwiseKeyCipherSuite (W)
- * @DOT11_RSN_CONFIG_AUTH_SUITE: AuthenticationKeyManagementSuite (W)
- * @DOT11_RSN_CONFIG_VERSION: RSN version (W)
- * @LOCAL_RSN_CONFIG_ALL: RSN CONFIG ALL (W)
- * @DOT11_PMK_TSC: PMK_TSC (W)
- * @DOT11_GMK1_TSC: GMK1_TSC (W)
- * @DOT11_GMK2_TSC: GMK2_TSC (W)
- * @DOT11_GMK3_TSC: GMK3_TSC
- * @LOCAL_PMK: Pairwise Master Key cache (W)
- * @LOCAL_REGION: Region setting
- * @LOCAL_WPS_ENABLE: WiFi Protected Setup
- * @LOCAL_WPS_PROBE_REQ: WPS Probe Request
- * @LOCAL_GAIN: Carrer sense threshold for demo ato show
- * @LOCAL_EEPROM_SUM: EEPROM checksum information
- */
-enum mib_attribute {
- DOT11_MAC_ADDRESS = 0x21010100,
- DOT11_PRODUCT_VERSION = 0x31024100,
- DOT11_RTS_THRESHOLD = 0x21020100,
- DOT11_FRAGMENTATION_THRESHOLD = 0x21050100,
- DOT11_PRIVACY_INVOKED = 0x15010100,
- DOT11_WEP_DEFAULT_KEY_ID = 0x15020100,
- DOT11_WEP_DEFAULT_KEY_VALUE1 = 0x13020101,
- DOT11_WEP_DEFAULT_KEY_VALUE2 = 0x13020102,
- DOT11_WEP_DEFAULT_KEY_VALUE3 = 0x13020103,
- DOT11_WEP_DEFAULT_KEY_VALUE4 = 0x13020104,
- DOT11_WEP_LIST = 0x13020100,
- DOT11_DESIRED_SSID = 0x11090100,
- DOT11_CURRENT_CHANNEL = 0x45010100,
- DOT11_OPERATION_RATE_SET = 0x11110100,
- LOCAL_AP_SEARCH_INTERVAL = 0xF1010100,
- LOCAL_CURRENTADDRESS = 0xF1050100,
- LOCAL_MULTICAST_ADDRESS = 0xF1060100,
- LOCAL_MULTICAST_FILTER = 0xF1060200,
- LOCAL_SEARCHED_AP_LIST = 0xF1030100,
- LOCAL_LINK_AP_STATUS = 0xF1040100,
- LOCAL_PACKET_STATISTICS = 0xF1020100,
- LOCAL_AP_SCAN_LIST_TYPE_SET = 0xF1030200,
- DOT11_RSN_ENABLED = 0x15070100,
- LOCAL_RSN_MODE = 0x56010100,
- DOT11_RSN_CONFIG_MULTICAST_CIPHER = 0x51040100,
- DOT11_RSN_CONFIG_UNICAST_CIPHER = 0x52020100,
- DOT11_RSN_CONFIG_AUTH_SUITE = 0x53020100,
- DOT11_RSN_CONFIG_VERSION = 0x51020100,
- LOCAL_RSN_CONFIG_ALL = 0x5F010100,
- DOT11_PMK_TSC = 0x55010100,
- DOT11_GMK1_TSC = 0x55010101,
- DOT11_GMK2_TSC = 0x55010102,
- DOT11_GMK3_TSC = 0x55010103,
- LOCAL_PMK = 0x58010100,
- LOCAL_REGION = 0xF10A0100,
- LOCAL_WPS_ENABLE = 0xF10B0100,
- LOCAL_WPS_PROBE_REQ = 0xF10C0100,
- LOCAL_GAIN = 0xF10D0100,
- LOCAL_EEPROM_SUM = 0xF10E0100
-};
-
-struct hostif_mib_get_request {
- struct hostif_hdr header;
- __le32 mib_attribute;
-} __packed;
-
-/**
- * enum mib_data_type - Message Information Base data type.
- * @MIB_VALUE_TYPE_NULL: NULL type
- * @MIB_VALUE_TYPE_INT: INTEGER type
- * @MIB_VALUE_TYPE_BOOL: BOOL type
- * @MIB_VALUE_TYPE_COUNT32: unused
- * @MIB_VALUE_TYPE_OSTRING: Chunk of memory
- */
-enum mib_data_type {
- MIB_VALUE_TYPE_NULL = 0,
- MIB_VALUE_TYPE_INT,
- MIB_VALUE_TYPE_BOOL,
- MIB_VALUE_TYPE_COUNT32,
- MIB_VALUE_TYPE_OSTRING
-};
-
-struct hostif_mib_value {
- __le16 size;
- __le16 type;
- u8 body[];
-} __packed;
-
-struct hostif_mib_get_confirm_t {
- struct hostif_hdr header;
- __le32 mib_status;
-#define MIB_SUCCESS 0
-#define MIB_INVALID 1
-#define MIB_READ_ONLY 2
-#define MIB_WRITE_ONLY 3
- __le32 mib_attribute;
- struct hostif_mib_value mib_value;
-} __packed;
-
-struct hostif_mib_set_request_t {
- struct hostif_hdr header;
- __le32 mib_attribute;
- struct hostif_mib_value mib_value;
-} __packed;
-
-struct hostif_power_mgmt_request {
- struct hostif_hdr header;
- __le32 mode;
-#define POWER_ACTIVE 1
-#define POWER_SAVE 2
- __le32 wake_up;
-#define SLEEP_FALSE 0
-#define SLEEP_TRUE 1 /* not used */
- __le32 receive_dtims;
-#define DTIM_FALSE 0
-#define DTIM_TRUE 1
-} __packed;
-
-enum power_mgmt_mode_type {
- POWER_MGMT_ACTIVE,
- POWER_MGMT_SAVE1,
- POWER_MGMT_SAVE2
-};
-
-#define RESULT_SUCCESS 0
-#define RESULT_INVALID_PARAMETERS 1
-#define RESULT_NOT_SUPPORTED 2
-/* #define RESULT_ALREADY_RUNNING 3 */
-#define RESULT_ALREADY_RUNNING 7
-
-struct hostif_start_request {
- struct hostif_hdr header;
- __le16 mode;
-#define MODE_PSEUDO_ADHOC 0
-#define MODE_INFRASTRUCTURE 1
-#define MODE_AP 2 /* not used */
-#define MODE_ADHOC 3
-} __packed;
-
-struct ssid {
- u8 size;
- u8 body[IEEE80211_MAX_SSID_LEN];
- u8 ssid_pad;
-} __packed;
-
-#define RATE_SET_MAX_SIZE 16
-struct rate_set8 {
- u8 size;
- u8 body[8];
- u8 rate_pad;
-} __packed;
-
-struct fh_parms {
- __le16 dwell_time;
- u8 hop_set;
- u8 hop_pattern;
- u8 hop_index;
-} __packed;
-
-struct ds_parms {
- u8 channel;
-} __packed;
-
-struct cf_parms {
- u8 count;
- u8 period;
- __le16 max_duration;
- __le16 dur_remaining;
-} __packed;
-
-struct ibss_parms {
- __le16 atim_window;
-} __packed;
-
-struct rsn_t {
- u8 size;
-#define RSN_BODY_SIZE 64
- u8 body[RSN_BODY_SIZE];
-} __packed;
-
-struct erp_params_t {
- u8 erp_info;
-} __packed;
-
-struct rate_set16 {
- u8 size;
- u8 body[16];
- u8 rate_pad;
-} __packed;
-
-struct ap_info {
- u8 bssid[6]; /* +00 */
- u8 rssi; /* +06 */
- u8 sq; /* +07 */
- u8 noise; /* +08 */
- u8 pad0; /* +09 */
- __le16 beacon_period; /* +10 */
- __le16 capability; /* +12 */
- u8 frame_type; /* +14 */
- u8 ch_info; /* +15 */
- __le16 body_size; /* +16 */
- u8 body[1024]; /* +18 */
- /* +1032 */
-} __packed;
-
-struct link_ap_info {
- u8 bssid[6]; /* +00 */
- u8 rssi; /* +06 */
- u8 sq; /* +07 */
- u8 noise; /* +08 */
- u8 pad0; /* +09 */
- __le16 beacon_period; /* +10 */
- __le16 capability; /* +12 */
- struct rate_set8 rate_set; /* +14 */
- struct fh_parms fh_parameter; /* +24 */
- struct ds_parms ds_parameter; /* +29 */
- struct cf_parms cf_parameter; /* +30 */
- struct ibss_parms ibss_parameter; /* +36 */
- struct erp_params_t erp_parameter; /* +38 */
- u8 pad1; /* +39 */
- struct rate_set8 ext_rate_set; /* +40 */
- u8 DTIM_period; /* +50 */
- u8 rsn_mode; /* +51 */
-#define RSN_MODE_NONE 0
-#define RSN_MODE_WPA 1
-#define RSN_MODE_WPA2 2
- struct {
- u8 size; /* +52 */
- u8 body[128]; /* +53 */
- } __packed rsn;
-} __packed;
-
-#define RESULT_CONNECT 0
-#define RESULT_DISCONNECT 1
-
-struct hostif_stop_request {
- struct hostif_hdr header;
-} __packed;
-
-#define D_11B_ONLY_MODE 0
-#define D_11G_ONLY_MODE 1
-#define D_11BG_COMPATIBLE_MODE 2
-#define D_11A_ONLY_MODE 3
-
-#define CTS_MODE_FALSE 0
-#define CTS_MODE_TRUE 1
-
-struct hostif_request {
- __le16 phy_type;
- __le16 cts_mode;
- __le16 scan_type;
- __le16 capability;
- struct rate_set16 rate_set;
-} __packed;
-
-/**
- * struct hostif_ps_adhoc_set_request - pseudo adhoc mode
- * @capability: bit5 : preamble
- * bit6 : pbcc - Not supported always 0
- * bit10 : ShortSlotTime
- * bit13 : DSSS-OFDM - Not supported always 0
- */
-struct hostif_ps_adhoc_set_request {
- struct hostif_hdr header;
- struct hostif_request request;
- __le16 channel;
-} __packed;
-
-#define AUTH_TYPE_OPEN_SYSTEM 0
-#define AUTH_TYPE_SHARED_KEY 1
-
-/**
- * struct hostif_infrastructure_set_request
- * @capability: bit5 : preamble
- * bit6 : pbcc - Not supported always 0
- * bit10 : ShortSlotTime
- * bit13 : DSSS-OFDM - Not supported always 0
- */
-struct hostif_infrastructure_set_request {
- struct hostif_hdr header;
- struct hostif_request request;
- struct ssid ssid;
- __le16 beacon_lost_count;
- __le16 auth_type;
- struct channel_list channel_list;
- u8 bssid[ETH_ALEN];
-} __packed;
-
-/**
- * struct hostif_adhoc_set_request
- * @capability: bit5 : preamble
- * bit6 : pbcc - Not supported always 0
- * bit10 : ShortSlotTime
- * bit13 : DSSS-OFDM - Not supported always 0
- */
-struct hostif_adhoc_set_request {
- struct hostif_hdr header;
- struct hostif_request request;
- struct ssid ssid;
- __le16 channel;
-} __packed;
-
-/**
- * struct hostif_adhoc_set2_request
- * @capability: bit5 : preamble
- * bit6 : pbcc - Not supported always 0
- * bit10 : ShortSlotTime
- * bit13 : DSSS-OFDM - Not supported always 0
- */
-struct hostif_adhoc_set2_request {
- struct hostif_hdr header;
- struct hostif_request request;
- __le16 reserved;
- struct ssid ssid;
- struct channel_list channel_list;
- u8 bssid[ETH_ALEN];
-} __packed;
-
-struct association_request {
- u8 type;
- u8 pad;
- __le16 capability;
- __le16 listen_interval;
- u8 ap_address[6];
- __le16 req_ies_size;
-} __packed;
-
-struct association_response {
- u8 type;
- u8 pad;
- __le16 capability;
- __le16 status;
- __le16 association_id;
- __le16 resp_ies_size;
-} __packed;
-
-struct hostif_bss_scan_request {
- struct hostif_hdr header;
- u8 scan_type;
-#define ACTIVE_SCAN 0
-#define PASSIVE_SCAN 1
- u8 pad[3];
- __le32 ch_time_min;
- __le32 ch_time_max;
- struct channel_list channel_list;
- struct ssid ssid;
-} __packed;
-
-struct hostif_phy_information_request {
- struct hostif_hdr header;
- __le16 type;
-#define NORMAL_TYPE 0
-#define TIME_TYPE 1
- __le16 time; /* unit 100ms */
-} __packed;
-
-enum sleep_mode_type {
- SLP_ACTIVE,
- SLP_SLEEP
-};
-
-struct hostif_sleep_request {
- struct hostif_hdr header;
-} __packed;
-
-struct hostif_mic_failure_request {
- struct hostif_hdr header;
- __le16 failure_count;
- __le16 timer;
-} __packed;
-
-#define BASIC_RATE 0x80
-#define RATE_MASK 0x7F
-
-#define TX_RATE_AUTO 0xff
-#define TX_RATE_1M_FIXED 0
-#define TX_RATE_2M_FIXED 1
-#define TX_RATE_1_2M_AUTO 2
-#define TX_RATE_5M_FIXED 3
-#define TX_RATE_11M_FIXED 4
-
-#define TX_RATE_FULL_AUTO 0
-#define TX_RATE_11_AUTO 1
-#define TX_RATE_11B_AUTO 2
-#define TX_RATE_11BG_AUTO 3
-#define TX_RATE_MANUAL_AUTO 4
-#define TX_RATE_FIXED 5
-
-/* 11b rate */
-#define TX_RATE_1M ((u8)(10 / 5)) /* 11b 11g basic rate */
-#define TX_RATE_2M ((u8)(20 / 5)) /* 11b 11g basic rate */
-#define TX_RATE_5M ((u8)(55 / 5)) /* 11g basic rate */
-#define TX_RATE_11M ((u8)(110 / 5)) /* 11g basic rate */
-
-/* 11g rate */
-#define TX_RATE_6M ((u8)(60 / 5)) /* 11g basic rate */
-#define TX_RATE_12M ((u8)(120 / 5)) /* 11g basic rate */
-#define TX_RATE_24M ((u8)(240 / 5)) /* 11g basic rate */
-#define TX_RATE_9M ((u8)(90 / 5))
-#define TX_RATE_18M ((u8)(180 / 5))
-#define TX_RATE_36M ((u8)(360 / 5))
-#define TX_RATE_48M ((u8)(480 / 5))
-#define TX_RATE_54M ((u8)(540 / 5))
-
-static inline bool is_11b_rate(u8 rate)
-{
- return (((rate & RATE_MASK) == TX_RATE_1M) ||
- ((rate & RATE_MASK) == TX_RATE_2M) ||
- ((rate & RATE_MASK) == TX_RATE_5M) ||
- ((rate & RATE_MASK) == TX_RATE_11M));
-}
-
-static inline bool is_ofdm_rate(u8 rate)
-{
- return (((rate & RATE_MASK) == TX_RATE_6M) ||
- ((rate & RATE_MASK) == TX_RATE_12M) ||
- ((rate & RATE_MASK) == TX_RATE_24M) ||
- ((rate & RATE_MASK) == TX_RATE_9M) ||
- ((rate & RATE_MASK) == TX_RATE_18M) ||
- ((rate & RATE_MASK) == TX_RATE_36M) ||
- ((rate & RATE_MASK) == TX_RATE_48M) ||
- ((rate & RATE_MASK) == TX_RATE_54M));
-}
-
-static inline bool is_11bg_rate(u8 rate)
-{
- return (is_11b_rate(rate) || is_ofdm_rate(rate));
-}
-
-static inline bool is_ofdm_ext_rate(u8 rate)
-{
- return (((rate & RATE_MASK) == TX_RATE_9M) ||
- ((rate & RATE_MASK) == TX_RATE_18M) ||
- ((rate & RATE_MASK) == TX_RATE_36M) ||
- ((rate & RATE_MASK) == TX_RATE_48M) ||
- ((rate & RATE_MASK) == TX_RATE_54M));
-}
-
-enum connect_status_type {
- CONNECT_STATUS,
- DISCONNECT_STATUS
-};
-
-enum preamble_type {
- LONG_PREAMBLE,
- SHORT_PREAMBLE
-};
-
-enum multicast_filter_type {
- MCAST_FILTER_MCAST,
- MCAST_FILTER_MCASTALL,
- MCAST_FILTER_PROMISC,
-};
-
-#define NIC_MAX_MCAST_LIST 32
-
-#define HIF_EVENT_MASK 0xE800
-
-static inline bool is_hif_ind(unsigned short event)
-{
- return (((event & HIF_EVENT_MASK) == HIF_EVENT_MASK) &&
- (((event & ~HIF_EVENT_MASK) == 0x0001) ||
- ((event & ~HIF_EVENT_MASK) == 0x0006) ||
- ((event & ~HIF_EVENT_MASK) == 0x000C) ||
- ((event & ~HIF_EVENT_MASK) == 0x0011) ||
- ((event & ~HIF_EVENT_MASK) == 0x0012)));
-}
-
-static inline bool is_hif_conf(unsigned short event)
-{
- return (((event & HIF_EVENT_MASK) == HIF_EVENT_MASK) &&
- ((event & ~HIF_EVENT_MASK) > 0x0000) &&
- ((event & ~HIF_EVENT_MASK) < 0x0012) &&
- !is_hif_ind(event));
-}
-
-#ifdef __KERNEL__
-
-#include "ks_wlan.h"
-
-/* function prototype */
-int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb);
-void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
- unsigned int size);
-void hostif_sme_enqueue(struct ks_wlan_private *priv, u16 event);
-int hostif_init(struct ks_wlan_private *priv);
-void hostif_exit(struct ks_wlan_private *priv);
-int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
- void (*complete_handler)(struct ks_wlan_private *priv,
- struct sk_buff *skb),
- struct sk_buff *skb);
-void send_packet_complete(struct ks_wlan_private *priv, struct sk_buff *skb);
-
-void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv);
-int ks_wlan_hw_power_save(struct ks_wlan_private *priv);
-
-#define KS7010_SIZE_ALIGNMENT 32
-
-static inline size_t hif_align_size(size_t size)
-{
- return ALIGN(size, KS7010_SIZE_ALIGNMENT);
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _KS_HOSTIF_H_ */
diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h
deleted file mode 100644
index 3e9a91b5131c..000000000000
--- a/drivers/staging/ks7010/ks_wlan.h
+++ /dev/null
@@ -1,567 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Driver for KeyStream IEEE802.11 b/g wireless LAN cards.
- *
- * Copyright (C) 2006-2008 KeyStream Corp.
- * Copyright (C) 2009 Renesas Technology Corp.
- */
-
-#ifndef _KS_WLAN_H
-#define _KS_WLAN_H
-
-#include <linux/atomic.h>
-#include <linux/circ_buf.h>
-#include <linux/completion.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/wireless.h>
-
-struct ks_wlan_parameter {
- u8 operation_mode;
- u8 channel;
- u8 tx_rate;
- struct {
- u8 size;
- u8 body[16];
- } rate_set;
- u8 bssid[ETH_ALEN];
- struct {
- u8 size;
- u8 body[32 + 1];
- } ssid;
- u8 preamble;
- u8 power_mgmt;
- u32 scan_type;
-#define BEACON_LOST_COUNT_MAX 65535
- u32 beacon_lost_count;
- u32 rts;
- u32 fragment;
- u32 privacy_invoked;
- u32 wep_index;
- struct {
- u8 size;
- u8 val[13 * 2 + 1];
- } wep_key[4];
- u16 authenticate_type;
- u16 phy_type;
- u16 cts_mode;
- u16 phy_info_timer;
-};
-
-enum {
- DEVICE_STATE_OFF = 0, /* this means hw_unavailable is != 0 */
- DEVICE_STATE_PREBOOT, /* we are in a pre-boot state (empty RAM) */
- DEVICE_STATE_BOOT, /* boot state (fw upload, run fw) */
- DEVICE_STATE_PREINIT, /* pre-init state */
- DEVICE_STATE_INIT, /* init state (restore MIB backup to device) */
- DEVICE_STATE_READY, /* driver&device are in operational state */
- DEVICE_STATE_SLEEP /* device in sleep mode */
-};
-
-/* SME flag */
-#define SME_MODE_SET BIT(0)
-#define SME_RTS BIT(1)
-#define SME_FRAG BIT(2)
-#define SME_WEP_FLAG BIT(3)
-#define SME_WEP_INDEX BIT(4)
-#define SME_WEP_VAL1 BIT(5)
-#define SME_WEP_VAL2 BIT(6)
-#define SME_WEP_VAL3 BIT(7)
-#define SME_WEP_VAL4 BIT(8)
-#define SME_WEP_VAL_MASK GENMASK(8, 5)
-#define SME_RSN BIT(9)
-#define SME_RSN_MULTICAST BIT(10)
-#define SME_RSN_UNICAST BIT(11)
-#define SME_RSN_AUTH BIT(12)
-
-#define SME_AP_SCAN BIT(13)
-#define SME_MULTICAST BIT(14)
-
-/* SME Event */
-enum {
- SME_START,
-
- SME_MULTICAST_REQUEST,
- SME_MACADDRESS_SET_REQUEST,
- SME_BSS_SCAN_REQUEST,
- SME_SET_FLAG,
- SME_SET_TXKEY,
- SME_SET_KEY1,
- SME_SET_KEY2,
- SME_SET_KEY3,
- SME_SET_KEY4,
- SME_SET_PMK_TSC,
- SME_SET_GMK1_TSC,
- SME_SET_GMK2_TSC,
- SME_SET_GMK3_TSC,
- SME_SET_PMKSA,
- SME_POW_MNGMT_REQUEST,
- SME_PHY_INFO_REQUEST,
- SME_MIC_FAILURE_REQUEST,
- SME_GET_MAC_ADDRESS,
- SME_GET_PRODUCT_VERSION,
- SME_STOP_REQUEST,
- SME_RTS_THRESHOLD_REQUEST,
- SME_FRAGMENTATION_THRESHOLD_REQUEST,
- SME_WEP_INDEX_REQUEST,
- SME_WEP_KEY1_REQUEST,
- SME_WEP_KEY2_REQUEST,
- SME_WEP_KEY3_REQUEST,
- SME_WEP_KEY4_REQUEST,
- SME_WEP_FLAG_REQUEST,
- SME_RSN_UCAST_REQUEST,
- SME_RSN_MCAST_REQUEST,
- SME_RSN_AUTH_REQUEST,
- SME_RSN_ENABLED_REQUEST,
- SME_RSN_MODE_REQUEST,
- SME_WPS_ENABLE_REQUEST,
- SME_WPS_PROBE_REQUEST,
- SME_SET_GAIN,
- SME_GET_GAIN,
- SME_SLEEP_REQUEST,
- SME_SET_REGION,
- SME_MODE_SET_REQUEST,
- SME_START_REQUEST,
- SME_GET_EEPROM_CKSUM,
-
- SME_MIC_FAILURE_CONFIRM,
- SME_START_CONFIRM,
-
- SME_MULTICAST_CONFIRM,
- SME_BSS_SCAN_CONFIRM,
- SME_GET_CURRENT_AP,
- SME_POW_MNGMT_CONFIRM,
- SME_PHY_INFO_CONFIRM,
- SME_STOP_CONFIRM,
- SME_RTS_THRESHOLD_CONFIRM,
- SME_FRAGMENTATION_THRESHOLD_CONFIRM,
- SME_WEP_INDEX_CONFIRM,
- SME_WEP_KEY1_CONFIRM,
- SME_WEP_KEY2_CONFIRM,
- SME_WEP_KEY3_CONFIRM,
- SME_WEP_KEY4_CONFIRM,
- SME_WEP_FLAG_CONFIRM,
- SME_RSN_UCAST_CONFIRM,
- SME_RSN_MCAST_CONFIRM,
- SME_RSN_AUTH_CONFIRM,
- SME_RSN_ENABLED_CONFIRM,
- SME_RSN_MODE_CONFIRM,
- SME_MODE_SET_CONFIRM,
- SME_SLEEP_CONFIRM,
-
- SME_RSN_SET_CONFIRM,
- SME_WEP_SET_CONFIRM,
- SME_TERMINATE,
-
- SME_EVENT_SIZE
-};
-
-/* SME Status */
-enum {
- SME_IDLE,
- SME_SETUP,
- SME_DISCONNECT,
- SME_CONNECT
-};
-
-#define SME_EVENT_BUFF_SIZE 128
-
-struct sme_info {
- int sme_status;
- int event_buff[SME_EVENT_BUFF_SIZE];
- unsigned int qhead;
- unsigned int qtail;
- spinlock_t sme_spin;
- unsigned long sme_flag;
-};
-
-struct hostt {
- int buff[SME_EVENT_BUFF_SIZE];
- unsigned int qhead;
- unsigned int qtail;
-};
-
-#define RSN_IE_BODY_MAX 64
-struct rsn_ie {
- u8 id; /* 0xdd = WPA or 0x30 = RSN */
- u8 size; /* max ? 255 ? */
- u8 body[RSN_IE_BODY_MAX];
-} __packed;
-
-#define WPA_INFO_ELEM_ID 0xdd
-#define RSN_INFO_ELEM_ID 0x30
-
-#define WPS_IE_BODY_MAX 255
-struct wps_ie {
- u8 id; /* 221 'dd <len> 00 50 F2 04' */
- u8 size; /* max ? 255 ? */
- u8 body[WPS_IE_BODY_MAX];
-} __packed;
-
-struct local_ap {
- u8 bssid[6];
- u8 rssi;
- u8 sq;
- struct {
- u8 size;
- u8 body[32];
- u8 ssid_pad;
- } ssid;
- struct {
- u8 size;
- u8 body[16];
- u8 rate_pad;
- } rate_set;
- u16 capability;
- u8 channel;
- u8 noise;
- struct rsn_ie wpa_ie;
- struct rsn_ie rsn_ie;
- struct wps_ie wps_ie;
-};
-
-#define LOCAL_APLIST_MAX 31
-#define LOCAL_CURRENT_AP LOCAL_APLIST_MAX
-struct local_aplist {
- int size;
- struct local_ap ap[LOCAL_APLIST_MAX + 1];
-};
-
-struct local_gain {
- u8 tx_mode;
- u8 rx_mode;
- u8 tx_gain;
- u8 rx_gain;
-};
-
-struct local_eeprom_sum {
- u8 type;
- u8 result;
-};
-
-enum {
- EEPROM_OK,
- EEPROM_CHECKSUM_NONE,
- EEPROM_FW_NOT_SUPPORT,
- EEPROM_NG,
-};
-
-/* Power Save Status */
-enum {
- PS_NONE,
- PS_ACTIVE_SET,
- PS_SAVE_SET,
- PS_CONF_WAIT,
- PS_SNOOZE,
- PS_WAKEUP
-};
-
-struct power_save_status {
- atomic_t status; /* initialvalue 0 */
- struct completion wakeup_wait;
- atomic_t confirm_wait;
- atomic_t snooze_guard;
-};
-
-struct sleep_status {
- atomic_t status; /* initialvalue 0 */
- atomic_t doze_request;
- atomic_t wakeup_request;
-};
-
-/* WPA */
-struct scan_ext {
- unsigned int flag;
- char ssid[IW_ESSID_MAX_SIZE + 1];
-};
-
-#define CIPHER_ID_WPA_NONE "\x00\x50\xf2\x00"
-#define CIPHER_ID_WPA_WEP40 "\x00\x50\xf2\x01"
-#define CIPHER_ID_WPA_TKIP "\x00\x50\xf2\x02"
-#define CIPHER_ID_WPA_CCMP "\x00\x50\xf2\x04"
-#define CIPHER_ID_WPA_WEP104 "\x00\x50\xf2\x05"
-
-#define CIPHER_ID_WPA2_NONE "\x00\x0f\xac\x00"
-#define CIPHER_ID_WPA2_WEP40 "\x00\x0f\xac\x01"
-#define CIPHER_ID_WPA2_TKIP "\x00\x0f\xac\x02"
-#define CIPHER_ID_WPA2_CCMP "\x00\x0f\xac\x04"
-#define CIPHER_ID_WPA2_WEP104 "\x00\x0f\xac\x05"
-
-#define CIPHER_ID_LEN 4
-
-enum {
- KEY_MGMT_802_1X,
- KEY_MGMT_PSK,
- KEY_MGMT_WPANONE,
-};
-
-#define KEY_MGMT_ID_WPA_NONE "\x00\x50\xf2\x00"
-#define KEY_MGMT_ID_WPA_1X "\x00\x50\xf2\x01"
-#define KEY_MGMT_ID_WPA_PSK "\x00\x50\xf2\x02"
-#define KEY_MGMT_ID_WPA_WPANONE "\x00\x50\xf2\xff"
-
-#define KEY_MGMT_ID_WPA2_NONE "\x00\x0f\xac\x00"
-#define KEY_MGMT_ID_WPA2_1X "\x00\x0f\xac\x01"
-#define KEY_MGMT_ID_WPA2_PSK "\x00\x0f\xac\x02"
-#define KEY_MGMT_ID_WPA2_WPANONE "\x00\x0f\xac\xff"
-
-#define KEY_MGMT_ID_LEN 4
-
-#define MIC_KEY_SIZE 8
-
-struct wpa_key {
- u32 ext_flags; /* IW_ENCODE_EXT_xxx */
- u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
- u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
- struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast
- * (group) keys or unicast address for
- * individual keys
- */
- u16 alg;
- u16 key_len; /* WEP: 5 or 13, TKIP: 32, CCMP: 16 */
- u8 key_val[IW_ENCODING_TOKEN_MAX];
- u8 tx_mic_key[MIC_KEY_SIZE];
- u8 rx_mic_key[MIC_KEY_SIZE];
-};
-
-#define WPA_KEY_INDEX_MAX 4
-#define WPA_RX_SEQ_LEN 6
-
-struct mic_failure {
- u16 failure; /* MIC Failure counter 0 or 1 or 2 */
- u16 counter; /* 1sec counter 0-60 */
- u32 last_failure_time;
- int stop;
-};
-
-struct wpa_status {
- int wpa_enabled;
- bool rsn_enabled;
- int version;
- int pairwise_suite; /* unicast cipher */
- int group_suite; /* multicast cipher */
- int key_mgmt_suite;
- int auth_alg;
- int txkey;
- struct wpa_key key[WPA_KEY_INDEX_MAX];
- struct scan_ext scan_ext;
- struct mic_failure mic_failure;
-};
-
-#include <linux/list.h>
-#define PMK_LIST_MAX 8
-struct pmk_list {
- u16 size;
- struct list_head head;
- struct pmk {
- struct list_head list;
- u8 bssid[ETH_ALEN];
- u8 pmkid[IW_PMKID_LEN];
- } pmk[PMK_LIST_MAX];
-};
-
-struct wps_status {
- int wps_enabled;
- int ielen;
- u8 ie[255];
-};
-
-/* Tx Device struct */
-#define TX_DEVICE_BUFF_SIZE 1024
-
-struct ks_wlan_private;
-
-/**
- * struct tx_device_buffer - Queue item for the tx queue.
- * @sendp: Pointer to the send request data.
- * @size: Size of @sendp data.
- * @complete_handler: Function called once data write to device is complete.
- * @arg1: First argument to @complete_handler.
- * @arg2: Second argument to @complete_handler.
- */
-struct tx_device_buffer {
- unsigned char *sendp;
- unsigned int size;
- void (*complete_handler)(struct ks_wlan_private *priv,
- struct sk_buff *skb);
- struct sk_buff *skb;
-};
-
-/**
- * struct tx_device - Tx buffer queue.
- * @tx_device_buffer: Queue buffer.
- * @qhead: Head of tx queue.
- * @qtail: Tail of tx queue.
- * @tx_dev_lock: Queue lock.
- */
-struct tx_device {
- struct tx_device_buffer tx_dev_buff[TX_DEVICE_BUFF_SIZE];
- unsigned int qhead;
- unsigned int qtail;
- spinlock_t tx_dev_lock; /* protect access to the queue */
-};
-
-/* Rx Device struct */
-#define RX_DATA_SIZE (2 + 2 + 2347 + 1)
-#define RX_DEVICE_BUFF_SIZE 32
-
-/**
- * struct rx_device_buffer - Queue item for the rx queue.
- * @data: rx data.
- * @size: Size of @data.
- */
-struct rx_device_buffer {
- unsigned char data[RX_DATA_SIZE];
- unsigned int size;
-};
-
-/**
- * struct rx_device - Rx buffer queue.
- * @rx_device_buffer: Queue buffer.
- * @qhead: Head of rx queue.
- * @qtail: Tail of rx queue.
- * @rx_dev_lock: Queue lock.
- */
-struct rx_device {
- struct rx_device_buffer rx_dev_buff[RX_DEVICE_BUFF_SIZE];
- unsigned int qhead;
- unsigned int qtail;
- spinlock_t rx_dev_lock; /* protect access to the queue */
-};
-
-struct ks_wlan_private {
- /* hardware information */
- void *if_hw;
- struct workqueue_struct *wq;
- struct delayed_work rw_dwork;
- struct tasklet_struct rx_bh_task;
-
- struct net_device *net_dev;
- struct net_device_stats nstats;
- struct iw_statistics wstats;
-
- struct completion confirm_wait;
-
- /* trx device & sme */
- struct tx_device tx_dev;
- struct rx_device rx_dev;
- struct sme_info sme_i;
- u8 *rxp;
- unsigned int rx_size;
- struct work_struct sme_work;
- struct work_struct wakeup_work;
- int scan_ind_count;
-
- unsigned char eth_addr[ETH_ALEN];
-
- struct local_aplist aplist;
- struct local_ap current_ap;
- struct power_save_status psstatus;
- struct sleep_status sleepstatus;
- struct wpa_status wpa;
- struct pmk_list pmklist;
- /* wireless parameter */
- struct ks_wlan_parameter reg;
- u8 current_rate;
-
- char nick[IW_ESSID_MAX_SIZE + 1];
-
- spinlock_t multicast_spin;
-
- spinlock_t dev_read_lock;
- wait_queue_head_t devread_wait;
-
- unsigned int need_commit; /* for ioctl */
-
- /* DeviceIoControl */
- bool is_device_open;
- atomic_t event_count;
- atomic_t rec_count;
- int dev_count;
-#define DEVICE_STOCK_COUNT 20
- unsigned char *dev_data[DEVICE_STOCK_COUNT];
- int dev_size[DEVICE_STOCK_COUNT];
-
- /* ioctl : IOCTL_FIRMWARE_VERSION */
- unsigned char firmware_version[128 + 1];
- int version_size;
-
- bool mac_address_valid;
-
- int dev_state;
-
- struct sk_buff *skb;
- unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */
-#define FORCE_DISCONNECT 0x80000000
-#define CONNECT_STATUS_MASK 0x7FFFFFFF
- u32 connect_status;
- int infra_status;
- u8 scan_ssid_len;
- u8 scan_ssid[IW_ESSID_MAX_SIZE + 1];
- struct local_gain gain;
- struct wps_status wps;
- u8 sleep_mode;
-
- u8 region;
- struct local_eeprom_sum eeprom_sum;
- u8 eeprom_checksum;
-
- struct hostt hostt;
-
- unsigned long last_doze;
- unsigned long last_wakeup;
-
- unsigned int wakeup_count; /* for detect wakeup loop */
-};
-
-static inline void inc_txqhead(struct ks_wlan_private *priv)
-{
- priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE;
-}
-
-static inline void inc_txqtail(struct ks_wlan_private *priv)
-{
- priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE;
-}
-
-static inline bool txq_has_space(struct ks_wlan_private *priv)
-{
- return (CIRC_SPACE(priv->tx_dev.qhead, priv->tx_dev.qtail,
- TX_DEVICE_BUFF_SIZE) > 0);
-}
-
-static inline void inc_rxqhead(struct ks_wlan_private *priv)
-{
- priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE;
-}
-
-static inline void inc_rxqtail(struct ks_wlan_private *priv)
-{
- priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE;
-}
-
-static inline bool rxq_has_space(struct ks_wlan_private *priv)
-{
- return (CIRC_SPACE(priv->rx_dev.qhead, priv->rx_dev.qtail,
- RX_DEVICE_BUFF_SIZE) > 0);
-}
-
-static inline unsigned int txq_count(struct ks_wlan_private *priv)
-{
- return CIRC_CNT_TO_END(priv->tx_dev.qhead, priv->tx_dev.qtail,
- TX_DEVICE_BUFF_SIZE);
-}
-
-static inline unsigned int rxq_count(struct ks_wlan_private *priv)
-{
- return CIRC_CNT_TO_END(priv->rx_dev.qhead, priv->rx_dev.qtail,
- RX_DEVICE_BUFF_SIZE);
-}
-
-int ks_wlan_net_start(struct net_device *dev);
-int ks_wlan_net_stop(struct net_device *dev);
-bool is_connect_status(u32 status);
-bool is_disconnect_status(u32 status);
-
-#endif /* _KS_WLAN_H */
diff --git a/drivers/staging/ks7010/ks_wlan_ioctl.h b/drivers/staging/ks7010/ks_wlan_ioctl.h
deleted file mode 100644
index 97c7d95de411..000000000000
--- a/drivers/staging/ks7010/ks_wlan_ioctl.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Driver for KeyStream 11b/g wireless LAN
- *
- * Copyright (c) 2005-2008 KeyStream Corp.
- * Copyright (C) 2009 Renesas Technology Corp.
- */
-
-#ifndef _KS_WLAN_IOCTL_H
-#define _KS_WLAN_IOCTL_H
-
-#include <linux/wireless.h>
-/* The low order bit identify a SET (0) or a GET (1) ioctl. */
-
-/* (SIOCIWFIRSTPRIV + 0) */
-/* former KS_WLAN_GET_DRIVER_VERSION (SIOCIWFIRSTPRIV + 1) */
-/* (SIOCIWFIRSTPRIV + 2) */
-#define KS_WLAN_GET_FIRM_VERSION (SIOCIWFIRSTPRIV + 3)
-#define KS_WLAN_SET_WPS_ENABLE (SIOCIWFIRSTPRIV + 4)
-#define KS_WLAN_GET_WPS_ENABLE (SIOCIWFIRSTPRIV + 5)
-#define KS_WLAN_SET_WPS_PROBE_REQ (SIOCIWFIRSTPRIV + 6)
-#define KS_WLAN_GET_EEPROM_CKSUM (SIOCIWFIRSTPRIV + 7)
-#define KS_WLAN_SET_PREAMBLE (SIOCIWFIRSTPRIV + 8)
-#define KS_WLAN_GET_PREAMBLE (SIOCIWFIRSTPRIV + 9)
-#define KS_WLAN_SET_POWER_SAVE (SIOCIWFIRSTPRIV + 10)
-#define KS_WLAN_GET_POWER_SAVE (SIOCIWFIRSTPRIV + 11)
-#define KS_WLAN_SET_SCAN_TYPE (SIOCIWFIRSTPRIV + 12)
-#define KS_WLAN_GET_SCAN_TYPE (SIOCIWFIRSTPRIV + 13)
-#define KS_WLAN_SET_RX_GAIN (SIOCIWFIRSTPRIV + 14)
-#define KS_WLAN_GET_RX_GAIN (SIOCIWFIRSTPRIV + 15)
-#define KS_WLAN_HOSTT (SIOCIWFIRSTPRIV + 16) /* unused */
-//#define KS_WLAN_SET_REGION (SIOCIWFIRSTPRIV + 17)
-#define KS_WLAN_SET_BEACON_LOST (SIOCIWFIRSTPRIV + 18)
-#define KS_WLAN_GET_BEACON_LOST (SIOCIWFIRSTPRIV + 19)
-
-#define KS_WLAN_SET_TX_GAIN (SIOCIWFIRSTPRIV + 20)
-#define KS_WLAN_GET_TX_GAIN (SIOCIWFIRSTPRIV + 21)
-
-/* for KS7010 */
-#define KS_WLAN_SET_PHY_TYPE (SIOCIWFIRSTPRIV + 22)
-#define KS_WLAN_GET_PHY_TYPE (SIOCIWFIRSTPRIV + 23)
-#define KS_WLAN_SET_CTS_MODE (SIOCIWFIRSTPRIV + 24)
-#define KS_WLAN_GET_CTS_MODE (SIOCIWFIRSTPRIV + 25)
-/* (SIOCIWFIRSTPRIV + 26) */
-/* (SIOCIWFIRSTPRIV + 27) */
-#define KS_WLAN_SET_SLEEP_MODE (SIOCIWFIRSTPRIV + 28) /* sleep mode */
-#define KS_WLAN_GET_SLEEP_MODE (SIOCIWFIRSTPRIV + 29) /* sleep mode */
-/* (SIOCIWFIRSTPRIV + 30) */
-/* (SIOCIWFIRSTPRIV + 31) */
-
-#ifdef __KERNEL__
-
-#include "ks_wlan.h"
-#include <linux/netdevice.h>
-
-int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
- unsigned int commit_flag);
-
-#endif /* __KERNEL__ */
-
-#endif /* _KS_WLAN_IOCTL_H */
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
deleted file mode 100644
index 0fb97a79ad0b..000000000000
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ /dev/null
@@ -1,2676 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for KeyStream 11b/g wireless LAN
- *
- * Copyright (C) 2005-2008 KeyStream Corp.
- * Copyright (C) 2009 Renesas Technology Corp.
- */
-
-#include <linux/atomic.h>
-#include <linux/completion.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
-#include <linux/timer.h>
-#include <linux/uaccess.h>
-
-static int wep_on_off;
-#define WEP_OFF 0
-#define WEP_ON_64BIT 1
-#define WEP_ON_128BIT 2
-
-#include "ks_wlan.h"
-#include "ks_hostif.h"
-#include "ks_wlan_ioctl.h"
-
-/* Include Wireless Extension definition and check version */
-#include <linux/wireless.h>
-#define WIRELESS_SPY /* enable iwspy support */
-#include <net/iw_handler.h> /* New driver API */
-
-/* Frequency list (map channels to frequencies) */
-static const long frequency_list[] = {
- 2412, 2417, 2422, 2427, 2432, 2437, 2442,
- 2447, 2452, 2457, 2462, 2467, 2472, 2484
-};
-
-/* A few details needed for WEP (Wireless Equivalent Privacy) */
-#define MAX_KEY_SIZE 13 /* 128 (?) bits */
-#define MIN_KEY_SIZE 5 /* 40 bits RC4 - WEP */
-struct wep_key {
- u16 len;
- u8 key[16]; /* 40-bit and 104-bit keys */
-};
-
-/*
- * function prototypes
- */
-static int ks_wlan_open(struct net_device *dev);
-static void ks_wlan_tx_timeout(struct net_device *dev, unsigned int txqueue);
-static netdev_tx_t ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static int ks_wlan_close(struct net_device *dev);
-static void ks_wlan_set_rx_mode(struct net_device *dev);
-static struct net_device_stats *ks_wlan_get_stats(struct net_device *dev);
-static int ks_wlan_set_mac_address(struct net_device *dev, void *addr);
-static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
- int cmd);
-
-static atomic_t update_phyinfo;
-static struct timer_list update_phyinfo_timer;
-static
-int ks_wlan_update_phy_information(struct ks_wlan_private *priv)
-{
- struct iw_statistics *wstats = &priv->wstats;
-
- netdev_dbg(priv->net_dev, "in_interrupt = %ld\n", in_interrupt());
-
- if (priv->dev_state < DEVICE_STATE_READY)
- return -EBUSY; /* not finished initialize */
-
- if (atomic_read(&update_phyinfo))
- return -EPERM;
-
- /* The status */
- wstats->status = priv->reg.operation_mode; /* Operation mode */
-
- /* Signal quality and co. But where is the noise level ??? */
- hostif_sme_enqueue(priv, SME_PHY_INFO_REQUEST);
-
- /* interruptible_sleep_on_timeout(&priv->confirm_wait, HZ/2); */
- if (!wait_for_completion_interruptible_timeout
- (&priv->confirm_wait, HZ / 2)) {
- netdev_dbg(priv->net_dev, "wait time out!!\n");
- }
-
- atomic_inc(&update_phyinfo);
- update_phyinfo_timer.expires = jiffies + HZ; /* 1sec */
- add_timer(&update_phyinfo_timer);
-
- return 0;
-}
-
-static
-void ks_wlan_update_phyinfo_timeout(struct timer_list *unused)
-{
- pr_debug("in_interrupt = %ld\n", in_interrupt());
- atomic_set(&update_phyinfo, 0);
-}
-
-int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
- unsigned int commit_flag)
-{
- hostif_sme_enqueue(priv, SME_STOP_REQUEST);
-
- if (commit_flag & SME_RTS)
- hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_REQUEST);
- if (commit_flag & SME_FRAG)
- hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_REQUEST);
-
- if (commit_flag & SME_WEP_INDEX)
- hostif_sme_enqueue(priv, SME_WEP_INDEX_REQUEST);
- if (commit_flag & SME_WEP_VAL1)
- hostif_sme_enqueue(priv, SME_WEP_KEY1_REQUEST);
- if (commit_flag & SME_WEP_VAL2)
- hostif_sme_enqueue(priv, SME_WEP_KEY2_REQUEST);
- if (commit_flag & SME_WEP_VAL3)
- hostif_sme_enqueue(priv, SME_WEP_KEY3_REQUEST);
- if (commit_flag & SME_WEP_VAL4)
- hostif_sme_enqueue(priv, SME_WEP_KEY4_REQUEST);
- if (commit_flag & SME_WEP_FLAG)
- hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST);
-
- if (commit_flag & SME_RSN) {
- hostif_sme_enqueue(priv, SME_RSN_ENABLED_REQUEST);
- hostif_sme_enqueue(priv, SME_RSN_MODE_REQUEST);
- }
- if (commit_flag & SME_RSN_MULTICAST)
- hostif_sme_enqueue(priv, SME_RSN_MCAST_REQUEST);
- if (commit_flag & SME_RSN_UNICAST)
- hostif_sme_enqueue(priv, SME_RSN_UCAST_REQUEST);
- if (commit_flag & SME_RSN_AUTH)
- hostif_sme_enqueue(priv, SME_RSN_AUTH_REQUEST);
-
- hostif_sme_enqueue(priv, SME_MODE_SET_REQUEST);
-
- hostif_sme_enqueue(priv, SME_START_REQUEST);
-
- return 0;
-}
-
-/*
- * Initial Wireless Extension code for Ks_Wlannet driver by :
- * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 17 November 00
- * Conversion to new driver API by :
- * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 26 March 02
- * Javier also did a good amount of work here, adding some new extensions
- * and fixing my code. Let's just say that without him this code just
- * would not work at all... - Jean II
- */
-
-static int ks_wlan_get_name(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *cwrq,
- char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (priv->dev_state < DEVICE_STATE_READY)
- strscpy(cwrq->name, "NOT READY!", sizeof(cwrq->name));
- else if (priv->reg.phy_type == D_11B_ONLY_MODE)
- strscpy(cwrq->name, "IEEE 802.11b", sizeof(cwrq->name));
- else if (priv->reg.phy_type == D_11G_ONLY_MODE)
- strscpy(cwrq->name, "IEEE 802.11g", sizeof(cwrq->name));
- else
- strscpy(cwrq->name, "IEEE 802.11b/g", sizeof(cwrq->name));
-
- return 0;
-}
-
-static int ks_wlan_set_freq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *fwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- int channel;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- /* If setting by frequency, convert to a channel */
- if ((fwrq->freq.e == 1) &&
- (fwrq->freq.m >= 241200000) && (fwrq->freq.m <= 248700000)) {
- int f = fwrq->freq.m / 100000;
- int c = 0;
-
- while ((c < 14) && (f != frequency_list[c]))
- c++;
- /* Hack to fall through... */
- fwrq->freq.e = 0;
- fwrq->freq.m = c + 1;
- }
- /* Setting by channel number */
- if ((fwrq->freq.m > 1000) || (fwrq->freq.e > 0))
- return -EOPNOTSUPP;
-
- channel = fwrq->freq.m;
- /* We should do a better check than that,
- * based on the card capability !!!
- */
- if ((channel < 1) || (channel > 14)) {
- netdev_dbg(dev, "%s: New channel value of %d is invalid!\n",
- dev->name, fwrq->freq.m);
- return -EINVAL;
- }
-
- /* Yes ! We can set it !!! */
- priv->reg.channel = (u8)(channel);
- priv->need_commit |= SME_MODE_SET;
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_freq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *fwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- int f;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (is_connect_status(priv->connect_status))
- f = (int)priv->current_ap.channel;
- else
- f = (int)priv->reg.channel;
-
- fwrq->freq.m = frequency_list[f - 1] * 100000;
- fwrq->freq.e = 1;
-
- return 0;
-}
-
-static int ks_wlan_set_essid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- size_t len;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- /* Check if we asked for `any' */
- if (!dwrq->essid.flags) {
- /* Just send an empty SSID list */
- memset(priv->reg.ssid.body, 0, sizeof(priv->reg.ssid.body));
- priv->reg.ssid.size = 0;
- } else {
- len = dwrq->essid.length;
- /* iwconfig uses nul termination in SSID.. */
- if (len > 0 && extra[len - 1] == '\0')
- len--;
-
- /* Check the size of the string */
- if (len > IW_ESSID_MAX_SIZE)
- return -EINVAL;
-
- /* Set the SSID */
- memset(priv->reg.ssid.body, 0, sizeof(priv->reg.ssid.body));
- memcpy(priv->reg.ssid.body, extra, len);
- priv->reg.ssid.size = len;
- }
- /* Write it to the card */
- priv->need_commit |= SME_MODE_SET;
-
- ks_wlan_setup_parameter(priv, priv->need_commit);
- priv->need_commit = 0;
- return 0;
-}
-
-static int ks_wlan_get_essid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- /* Note : if dwrq->flags != 0, we should
- * get the relevant SSID from the SSID list...
- */
- if (priv->reg.ssid.size != 0) {
- /* Get the current SSID */
- memcpy(extra, priv->reg.ssid.body, priv->reg.ssid.size);
-
- /* If none, we may want to get the one that was set */
-
- /* Push it out ! */
- dwrq->essid.length = priv->reg.ssid.size;
- dwrq->essid.flags = 1; /* active */
- } else {
- dwrq->essid.length = 0;
- dwrq->essid.flags = 0; /* ANY */
- }
-
- return 0;
-}
-
-static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *awrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (priv->reg.operation_mode != MODE_ADHOC &&
- priv->reg.operation_mode != MODE_INFRASTRUCTURE) {
- eth_zero_addr(priv->reg.bssid);
- return -EOPNOTSUPP;
- }
-
- ether_addr_copy(priv->reg.bssid, awrq->ap_addr.sa_data);
- if (is_valid_ether_addr((u8 *)priv->reg.bssid))
- priv->need_commit |= SME_MODE_SET;
-
- netdev_dbg(dev, "bssid = %pM\n", priv->reg.bssid);
-
- /* Write it to the card */
- if (priv->need_commit) {
- priv->need_commit |= SME_MODE_SET;
- return -EINPROGRESS; /* Call commit handler */
- }
- return 0;
-}
-
-static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *awrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (is_connect_status(priv->connect_status))
- ether_addr_copy(awrq->ap_addr.sa_data, priv->current_ap.bssid);
- else
- eth_zero_addr(awrq->ap_addr.sa_data);
-
- awrq->ap_addr.sa_family = ARPHRD_ETHER;
-
- return 0;
-}
-
-static int ks_wlan_set_nick(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- /* Check the size of the string */
- if (dwrq->data.length > 16 + 1)
- return -E2BIG;
-
- memset(priv->nick, 0, sizeof(priv->nick));
- memcpy(priv->nick, extra, dwrq->data.length);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_nick(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- strscpy(extra, priv->nick, 17);
- dwrq->data.length = strlen(extra) + 1;
-
- return 0;
-}
-
-static int ks_wlan_set_rate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- int i = 0;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (priv->reg.phy_type == D_11B_ONLY_MODE) {
- if (vwrq->bitrate.fixed == 1) {
- switch (vwrq->bitrate.value) {
- case 11000000:
- case 5500000:
- priv->reg.rate_set.body[0] =
- (u8)(vwrq->bitrate.value / 500000);
- break;
- case 2000000:
- case 1000000:
- priv->reg.rate_set.body[0] =
- ((u8)(vwrq->bitrate.value / 500000)) |
- BASIC_RATE;
- break;
- default:
- return -EINVAL;
- }
- priv->reg.tx_rate = TX_RATE_FIXED;
- priv->reg.rate_set.size = 1;
- } else { /* vwrq->fixed == 0 */
- if (vwrq->bitrate.value > 0) {
- switch (vwrq->bitrate.value) {
- case 11000000:
- priv->reg.rate_set.body[3] =
- TX_RATE_11M;
- i++;
- fallthrough;
- case 5500000:
- priv->reg.rate_set.body[2] = TX_RATE_5M;
- i++;
- fallthrough;
- case 2000000:
- priv->reg.rate_set.body[1] =
- TX_RATE_2M | BASIC_RATE;
- i++;
- fallthrough;
- case 1000000:
- priv->reg.rate_set.body[0] =
- TX_RATE_1M | BASIC_RATE;
- i++;
- break;
- default:
- return -EINVAL;
- }
- priv->reg.tx_rate = TX_RATE_MANUAL_AUTO;
- priv->reg.rate_set.size = i;
- } else {
- priv->reg.rate_set.body[3] = TX_RATE_11M;
- priv->reg.rate_set.body[2] = TX_RATE_5M;
- priv->reg.rate_set.body[1] =
- TX_RATE_2M | BASIC_RATE;
- priv->reg.rate_set.body[0] =
- TX_RATE_1M | BASIC_RATE;
- priv->reg.tx_rate = TX_RATE_FULL_AUTO;
- priv->reg.rate_set.size = 4;
- }
- }
- } else { /* D_11B_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
- if (vwrq->bitrate.fixed == 1) {
- switch (vwrq->bitrate.value) {
- case 54000000:
- case 48000000:
- case 36000000:
- case 18000000:
- case 9000000:
- priv->reg.rate_set.body[0] =
- (u8)(vwrq->bitrate.value / 500000);
- break;
- case 24000000:
- case 12000000:
- case 11000000:
- case 6000000:
- case 5500000:
- case 2000000:
- case 1000000:
- priv->reg.rate_set.body[0] =
- ((u8)(vwrq->bitrate.value / 500000)) |
- BASIC_RATE;
- break;
- default:
- return -EINVAL;
- }
- priv->reg.tx_rate = TX_RATE_FIXED;
- priv->reg.rate_set.size = 1;
- } else { /* vwrq->fixed == 0 */
- if (vwrq->bitrate.value > 0) {
- switch (vwrq->bitrate.value) {
- case 54000000:
- priv->reg.rate_set.body[11] =
- TX_RATE_54M;
- i++;
- fallthrough;
- case 48000000:
- priv->reg.rate_set.body[10] =
- TX_RATE_48M;
- i++;
- fallthrough;
- case 36000000:
- priv->reg.rate_set.body[9] =
- TX_RATE_36M;
- i++;
- fallthrough;
- case 24000000:
- case 18000000:
- case 12000000:
- case 11000000:
- case 9000000:
- case 6000000:
- if (vwrq->bitrate.value == 24000000) {
- priv->reg.rate_set.body[8] =
- TX_RATE_18M;
- i++;
- priv->reg.rate_set.body[7] =
- TX_RATE_9M;
- i++;
- priv->reg.rate_set.body[6] =
- TX_RATE_24M | BASIC_RATE;
- i++;
- priv->reg.rate_set.body[5] =
- TX_RATE_12M | BASIC_RATE;
- i++;
- priv->reg.rate_set.body[4] =
- TX_RATE_6M | BASIC_RATE;
- i++;
- priv->reg.rate_set.body[3] =
- TX_RATE_11M | BASIC_RATE;
- i++;
- } else if (vwrq->bitrate.value == 18000000) {
- priv->reg.rate_set.body[7] =
- TX_RATE_18M;
- i++;
- priv->reg.rate_set.body[6] =
- TX_RATE_9M;
- i++;
- priv->reg.rate_set.body[5] =
- TX_RATE_12M | BASIC_RATE;
- i++;
- priv->reg.rate_set.body[4] =
- TX_RATE_6M | BASIC_RATE;
- i++;
- priv->reg.rate_set.body[3] =
- TX_RATE_11M | BASIC_RATE;
- i++;
- } else if (vwrq->bitrate.value == 12000000) {
- priv->reg.rate_set.body[6] =
- TX_RATE_9M;
- i++;
- priv->reg.rate_set.body[5] =
- TX_RATE_12M | BASIC_RATE;
- i++;
- priv->reg.rate_set.body[4] =
- TX_RATE_6M | BASIC_RATE;
- i++;
- priv->reg.rate_set.body[3] =
- TX_RATE_11M | BASIC_RATE;
- i++;
- } else if (vwrq->bitrate.value == 11000000) {
- priv->reg.rate_set.body[5] =
- TX_RATE_9M;
- i++;
- priv->reg.rate_set.body[4] =
- TX_RATE_6M | BASIC_RATE;
- i++;
- priv->reg.rate_set.body[3] =
- TX_RATE_11M | BASIC_RATE;
- i++;
- } else if (vwrq->bitrate.value == 9000000) {
- priv->reg.rate_set.body[4] =
- TX_RATE_9M;
- i++;
- priv->reg.rate_set.body[3] =
- TX_RATE_6M | BASIC_RATE;
- i++;
- } else { /* vwrq->value == 6000000 */
- priv->reg.rate_set.body[3] =
- TX_RATE_6M | BASIC_RATE;
- i++;
- }
- fallthrough;
- case 5500000:
- priv->reg.rate_set.body[2] =
- TX_RATE_5M | BASIC_RATE;
- i++;
- fallthrough;
- case 2000000:
- priv->reg.rate_set.body[1] =
- TX_RATE_2M | BASIC_RATE;
- i++;
- fallthrough;
- case 1000000:
- priv->reg.rate_set.body[0] =
- TX_RATE_1M | BASIC_RATE;
- i++;
- break;
- default:
- return -EINVAL;
- }
- priv->reg.tx_rate = TX_RATE_MANUAL_AUTO;
- priv->reg.rate_set.size = i;
- } else {
- priv->reg.rate_set.body[11] = TX_RATE_54M;
- priv->reg.rate_set.body[10] = TX_RATE_48M;
- priv->reg.rate_set.body[9] = TX_RATE_36M;
- priv->reg.rate_set.body[8] = TX_RATE_18M;
- priv->reg.rate_set.body[7] = TX_RATE_9M;
- priv->reg.rate_set.body[6] =
- TX_RATE_24M | BASIC_RATE;
- priv->reg.rate_set.body[5] =
- TX_RATE_12M | BASIC_RATE;
- priv->reg.rate_set.body[4] =
- TX_RATE_6M | BASIC_RATE;
- priv->reg.rate_set.body[3] =
- TX_RATE_11M | BASIC_RATE;
- priv->reg.rate_set.body[2] =
- TX_RATE_5M | BASIC_RATE;
- priv->reg.rate_set.body[1] =
- TX_RATE_2M | BASIC_RATE;
- priv->reg.rate_set.body[0] =
- TX_RATE_1M | BASIC_RATE;
- priv->reg.tx_rate = TX_RATE_FULL_AUTO;
- priv->reg.rate_set.size = 12;
- }
- }
- }
-
- priv->need_commit |= SME_MODE_SET;
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_rate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- netdev_dbg(dev, "in_interrupt = %ld update_phyinfo = %d\n",
- in_interrupt(), atomic_read(&update_phyinfo));
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (!atomic_read(&update_phyinfo))
- ks_wlan_update_phy_information(priv);
-
- vwrq->bitrate.value = ((priv->current_rate) & RATE_MASK) * 500000;
- vwrq->bitrate.fixed = (priv->reg.tx_rate == TX_RATE_FIXED) ? 1 : 0;
-
- return 0;
-}
-
-static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- int rthr = vwrq->rts.value;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (vwrq->rts.disabled)
- rthr = 2347;
- if ((rthr < 0) || (rthr > 2347))
- return -EINVAL;
-
- priv->reg.rts = rthr;
- priv->need_commit |= SME_RTS;
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- vwrq->rts.value = priv->reg.rts;
- vwrq->rts.disabled = (vwrq->rts.value >= 2347);
- vwrq->rts.fixed = 1;
-
- return 0;
-}
-
-static int ks_wlan_set_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- int fthr = vwrq->frag.value;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (vwrq->frag.disabled)
- fthr = 2346;
- if ((fthr < 256) || (fthr > 2346))
- return -EINVAL;
-
- fthr &= ~0x1; /* Get an even value - is it really needed ??? */
- priv->reg.fragment = fthr;
- priv->need_commit |= SME_FRAG;
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- vwrq->frag.value = priv->reg.fragment;
- vwrq->frag.disabled = (vwrq->frag.value >= 2346);
- vwrq->frag.fixed = 1;
-
- return 0;
-}
-
-static int ks_wlan_set_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- if (uwrq->mode != IW_MODE_ADHOC &&
- uwrq->mode != IW_MODE_INFRA)
- return -EINVAL;
-
- priv->reg.operation_mode = (uwrq->mode == IW_MODE_ADHOC) ?
- MODE_ADHOC : MODE_INFRASTRUCTURE;
- priv->need_commit |= SME_MODE_SET;
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* If not managed, assume it's ad-hoc */
- uwrq->mode = (priv->reg.operation_mode == MODE_INFRASTRUCTURE) ?
- IW_MODE_INFRA : IW_MODE_ADHOC;
-
- return 0;
-}
-
-static int ks_wlan_set_encode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_point *enc = &dwrq->encoding;
- struct wep_key key;
- int index = (enc->flags & IW_ENCODE_INDEX);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- if (enc->length > MAX_KEY_SIZE)
- return -EINVAL;
-
- /* for SLEEP MODE */
- if ((index < 0) || (index > 4))
- return -EINVAL;
-
- index = (index == 0) ? priv->reg.wep_index : (index - 1);
-
- /* Is WEP supported ? */
- /* Basic checking: do we have a key to set ? */
- if (enc->length > 0) {
- key.len = (enc->length > MIN_KEY_SIZE) ?
- MAX_KEY_SIZE : MIN_KEY_SIZE;
- priv->reg.privacy_invoked = 0x01;
- priv->need_commit |= SME_WEP_FLAG;
- wep_on_off = (enc->length > MIN_KEY_SIZE) ?
- WEP_ON_128BIT : WEP_ON_64BIT;
- /* Check if the key is not marked as invalid */
- if (enc->flags & IW_ENCODE_NOKEY)
- return 0;
-
- /* Cleanup */
- memset(key.key, 0, MAX_KEY_SIZE);
- /* Copy the key in the driver */
- if (copy_from_user(key.key, enc->pointer, enc->length)) {
- key.len = 0;
- return -EFAULT;
- }
- /* Send the key to the card */
- priv->reg.wep_key[index].size = key.len;
- memcpy(&priv->reg.wep_key[index].val[0], &key.key[0],
- priv->reg.wep_key[index].size);
- priv->need_commit |= (SME_WEP_VAL1 << index);
- priv->reg.wep_index = index;
- priv->need_commit |= SME_WEP_INDEX;
- } else {
- if (enc->flags & IW_ENCODE_DISABLED) {
- priv->reg.wep_key[0].size = 0;
- priv->reg.wep_key[1].size = 0;
- priv->reg.wep_key[2].size = 0;
- priv->reg.wep_key[3].size = 0;
- priv->reg.privacy_invoked = 0x00;
- if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY)
- priv->need_commit |= SME_MODE_SET;
-
- priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
- wep_on_off = WEP_OFF;
- priv->need_commit |= SME_WEP_FLAG;
- } else {
- /* set_wep_key(priv, index, 0, 0, 1); xxx */
- if (priv->reg.wep_key[index].size == 0)
- return -EINVAL;
- priv->reg.wep_index = index;
- priv->need_commit |= SME_WEP_INDEX;
- }
- }
-
- /* Commit the changes if needed */
- if (enc->flags & IW_ENCODE_MODE)
- priv->need_commit |= SME_WEP_FLAG;
-
- if (enc->flags & IW_ENCODE_OPEN) {
- if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY)
- priv->need_commit |= SME_MODE_SET;
-
- priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
- } else if (enc->flags & IW_ENCODE_RESTRICTED) {
- if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM)
- priv->need_commit |= SME_MODE_SET;
-
- priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
- }
- if (priv->need_commit) {
- ks_wlan_setup_parameter(priv, priv->need_commit);
- priv->need_commit = 0;
- }
- return 0;
-}
-
-static int ks_wlan_get_encode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_point *enc = &dwrq->encoding;
- int index = (enc->flags & IW_ENCODE_INDEX) - 1;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- enc->flags = IW_ENCODE_DISABLED;
-
- /* Check encryption mode */
- switch (priv->reg.authenticate_type) {
- case AUTH_TYPE_OPEN_SYSTEM:
- enc->flags = IW_ENCODE_OPEN;
- break;
- case AUTH_TYPE_SHARED_KEY:
- enc->flags = IW_ENCODE_RESTRICTED;
- break;
- }
-
- /* Which key do we want ? -1 -> tx index */
- if ((index < 0) || (index >= 4))
- index = priv->reg.wep_index;
- if (priv->reg.privacy_invoked) {
- enc->flags &= ~IW_ENCODE_DISABLED;
- /* dwrq->flags |= IW_ENCODE_NOKEY; */
- }
- enc->flags |= index + 1;
- /* Copy the key to the user buffer */
- if (index >= 0 && index < 4) {
- enc->length = (priv->reg.wep_key[index].size <= 16) ?
- priv->reg.wep_key[index].size : 0;
- memcpy(extra, priv->reg.wep_key[index].val, enc->length);
- }
-
- return 0;
-}
-
-static int ks_wlan_get_range(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_range *range = (struct iw_range *)extra;
- int i, k;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- dwrq->data.length = sizeof(struct iw_range);
- memset(range, 0, sizeof(*range));
- range->min_nwid = 0x0000;
- range->max_nwid = 0x0000;
- range->num_channels = 14;
- /* Should be based on cap_rid.country to give only
- * what the current card support
- */
- k = 0;
- for (i = 0; i < 13; i++) { /* channel 1 -- 13 */
- range->freq[k].i = i + 1; /* List index */
- range->freq[k].m = frequency_list[i] * 100000;
- range->freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
- }
- range->num_frequency = k;
- if (priv->reg.phy_type == D_11B_ONLY_MODE ||
- priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) { /* channel 14 */
- range->freq[13].i = 14; /* List index */
- range->freq[13].m = frequency_list[13] * 100000;
- range->freq[13].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
- range->num_frequency = 14;
- }
-
- /* Hum... Should put the right values there */
- range->max_qual.qual = 100;
- range->max_qual.level = 256 - 128; /* 0 dBm? */
- range->max_qual.noise = 256 - 128;
- range->sensitivity = 1;
-
- if (priv->reg.phy_type == D_11B_ONLY_MODE) {
- range->bitrate[0] = 1e6;
- range->bitrate[1] = 2e6;
- range->bitrate[2] = 5.5e6;
- range->bitrate[3] = 11e6;
- range->num_bitrates = 4;
- } else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
- range->bitrate[0] = 1e6;
- range->bitrate[1] = 2e6;
- range->bitrate[2] = 5.5e6;
- range->bitrate[3] = 11e6;
-
- range->bitrate[4] = 6e6;
- range->bitrate[5] = 9e6;
- range->bitrate[6] = 12e6;
- if (IW_MAX_BITRATES < 9) {
- range->bitrate[7] = 54e6;
- range->num_bitrates = 8;
- } else {
- range->bitrate[7] = 18e6;
- range->bitrate[8] = 24e6;
- range->bitrate[9] = 36e6;
- range->bitrate[10] = 48e6;
- range->bitrate[11] = 54e6;
-
- range->num_bitrates = 12;
- }
- }
-
- /* Set an indication of the max TCP throughput
- * in bit/s that we can expect using this interface.
- * May be use for QoS stuff... Jean II
- */
- if (i > 2)
- range->throughput = 5000 * 1000;
- else
- range->throughput = 1500 * 1000;
-
- range->min_rts = 0;
- range->max_rts = 2347;
- range->min_frag = 256;
- range->max_frag = 2346;
-
- range->encoding_size[0] = 5; /* WEP: RC4 40 bits */
- range->encoding_size[1] = 13; /* WEP: RC4 ~128 bits */
- range->num_encoding_sizes = 2;
- range->max_encoding_tokens = 4;
-
- /* power management not support */
- range->pmp_flags = IW_POWER_ON;
- range->pmt_flags = IW_POWER_ON;
- range->pm_capa = 0;
-
- /* Transmit Power - values are in dBm( or mW) */
- range->txpower[0] = -256;
- range->num_txpower = 1;
- range->txpower_capa = IW_TXPOW_DBM;
- /* range->txpower_capa = IW_TXPOW_MWATT; */
-
- range->we_version_source = 21;
- range->we_version_compiled = WIRELESS_EXT;
-
- range->retry_capa = IW_RETRY_ON;
- range->retry_flags = IW_RETRY_ON;
- range->r_time_flags = IW_RETRY_ON;
-
- /* Experimental measurements - boundary 11/5.5 Mb/s
- *
- * Note : with or without the (local->rssi), results
- * are somewhat different. - Jean II
- */
- range->avg_qual.qual = 50;
- range->avg_qual.level = 186; /* -70 dBm */
- range->avg_qual.noise = 0;
-
- /* Event capability (kernel + driver) */
- range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
- IW_EVENT_CAPA_MASK(SIOCGIWAP) |
- IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
- range->event_capa[1] = IW_EVENT_CAPA_K_1;
- range->event_capa[4] = (IW_EVENT_CAPA_MASK(IWEVCUSTOM) |
- IW_EVENT_CAPA_MASK(IWEVMICHAELMICFAILURE));
-
- /* encode extension (WPA) capability */
- range->enc_capa = (IW_ENC_CAPA_WPA |
- IW_ENC_CAPA_WPA2 |
- IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP);
- return 0;
-}
-
-static int ks_wlan_set_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- if (vwrq->power.disabled) {
- priv->reg.power_mgmt = POWER_MGMT_ACTIVE;
- } else {
- if (priv->reg.operation_mode != MODE_INFRASTRUCTURE)
- return -EINVAL;
- priv->reg.power_mgmt = POWER_MGMT_SAVE1;
- }
-
- hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
-
- return 0;
-}
-
-static int ks_wlan_get_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- vwrq->power.disabled = (priv->reg.power_mgmt <= 0);
-
- return 0;
-}
-
-static int ks_wlan_get_iwstats(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- vwrq->qual.qual = 0; /* not supported */
- vwrq->qual.level = priv->wstats.qual.level;
- vwrq->qual.noise = 0; /* not supported */
- vwrq->qual.updated = 0;
-
- return 0;
-}
-
-/* Note : this is deprecated in favor of IWSCAN */
-static int ks_wlan_get_aplist(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct sockaddr *address = (struct sockaddr *)extra;
- struct iw_quality qual[LOCAL_APLIST_MAX];
- int i;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- for (i = 0; i < priv->aplist.size; i++) {
- ether_addr_copy(address[i].sa_data, priv->aplist.ap[i].bssid);
- address[i].sa_family = ARPHRD_ETHER;
- qual[i].level = 256 - priv->aplist.ap[i].rssi;
- qual[i].qual = priv->aplist.ap[i].sq;
- qual[i].noise = 0; /* invalid noise value */
- qual[i].updated = 7;
- }
- if (i) {
- dwrq->data.flags = 1; /* Should be define'd */
- memcpy(extra + sizeof(struct sockaddr) * i,
- &qual, sizeof(struct iw_quality) * i);
- }
- dwrq->data.length = i;
-
- return 0;
-}
-
-static int ks_wlan_set_scan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_scan_req *req = NULL;
- int len;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- /* specified SSID SCAN */
- if (wrqu->data.length == sizeof(struct iw_scan_req) &&
- wrqu->data.flags & IW_SCAN_THIS_ESSID) {
- req = (struct iw_scan_req *)extra;
- len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE);
- priv->scan_ssid_len = len;
- memcpy(priv->scan_ssid, req->essid, len);
- } else {
- priv->scan_ssid_len = 0;
- }
-
- priv->sme_i.sme_flag |= SME_AP_SCAN;
- hostif_sme_enqueue(priv, SME_BSS_SCAN_REQUEST);
-
- /* At this point, just return to the user. */
-
- return 0;
-}
-
-static char *ks_wlan_add_leader_event(const char *rsn_leader, char *end_buf,
- char *current_ev, struct rsn_ie *rsn,
- struct iw_event *iwe,
- struct iw_request_info *info)
-{
- char buffer[RSN_IE_BODY_MAX * 2 + 30];
- char *pbuf;
- int i;
-
- pbuf = &buffer[0];
- memset(iwe, 0, sizeof(*iwe));
- iwe->cmd = IWEVCUSTOM;
- memcpy(buffer, rsn_leader, sizeof(rsn_leader) - 1);
- iwe->u.data.length += sizeof(rsn_leader) - 1;
- pbuf += sizeof(rsn_leader) - 1;
- pbuf += sprintf(pbuf, "%02x", rsn->id);
- pbuf += sprintf(pbuf, "%02x", rsn->size);
- iwe->u.data.length += 4;
-
- for (i = 0; i < rsn->size; i++)
- pbuf += sprintf(pbuf, "%02x", rsn->body[i]);
-
- iwe->u.data.length += rsn->size * 2;
-
- return iwe_stream_add_point(info, current_ev, end_buf, iwe, &buffer[0]);
-}
-
-/*
- * Translate scan data returned from the card to a card independent
- * format that the Wireless Tools will understand - Jean II
- */
-static inline char *ks_wlan_translate_scan(struct net_device *dev,
- struct iw_request_info *info,
- char *current_ev, char *end_buf,
- struct local_ap *ap)
-{
- /* struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; */
- static const char rsn_leader[] = "rsn_ie=";
- static const char wpa_leader[] = "wpa_ie=";
- struct iw_event iwe; /* Temporary buffer */
- u16 capabilities;
- char *current_val; /* For rates */
- int i;
-
- /* First entry *MUST* be the AP MAC address */
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- ether_addr_copy(iwe.u.ap_addr.sa_data, ap->bssid);
- current_ev = iwe_stream_add_event(info, current_ev,
- end_buf, &iwe, IW_EV_ADDR_LEN);
-
- /* Other entries will be displayed in the order we give them */
-
- /* Add the ESSID */
- iwe.u.data.length = ap->ssid.size;
- if (iwe.u.data.length > 32)
- iwe.u.data.length = 32;
- iwe.cmd = SIOCGIWESSID;
- iwe.u.data.flags = 1;
- current_ev = iwe_stream_add_point(info, current_ev,
- end_buf, &iwe, ap->ssid.body);
-
- /* Add mode */
- iwe.cmd = SIOCGIWMODE;
- capabilities = ap->capability;
- if (capabilities & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
- iwe.u.mode = (capabilities & WLAN_CAPABILITY_ESS) ?
- IW_MODE_INFRA : IW_MODE_ADHOC;
- current_ev = iwe_stream_add_event(info, current_ev,
- end_buf, &iwe, IW_EV_UINT_LEN);
- }
-
- /* Add frequency */
- iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = ap->channel;
- iwe.u.freq.m = frequency_list[iwe.u.freq.m - 1] * 100000;
- iwe.u.freq.e = 1;
- current_ev = iwe_stream_add_event(info, current_ev,
- end_buf, &iwe, IW_EV_FREQ_LEN);
-
- /* Add quality statistics */
- iwe.cmd = IWEVQUAL;
- iwe.u.qual.level = 256 - ap->rssi;
- iwe.u.qual.qual = ap->sq;
- iwe.u.qual.noise = 0; /* invalid noise value */
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_QUAL_LEN);
-
- /* Add encryption capability */
- iwe.cmd = SIOCGIWENCODE;
- iwe.u.data.flags = (capabilities & WLAN_CAPABILITY_PRIVACY) ?
- (IW_ENCODE_ENABLED | IW_ENCODE_NOKEY) :
- IW_ENCODE_DISABLED;
- iwe.u.data.length = 0;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, ap->ssid.body);
-
- /*
- * Rate : stuffing multiple values in a single event
- * require a bit more of magic - Jean II
- */
- current_val = current_ev + IW_EV_LCP_LEN;
-
- iwe.cmd = SIOCGIWRATE;
-
- /* These two flags are ignored... */
- iwe.u.bitrate.fixed = 0;
- iwe.u.bitrate.disabled = 0;
-
- /* Max 16 values */
- for (i = 0; i < 16; i++) {
- /* NULL terminated */
- if (i >= ap->rate_set.size)
- break;
- /* Bit rate given in 500 kb/s units (+ 0x80) */
- iwe.u.bitrate.value = ((ap->rate_set.body[i] & 0x7f) * 500000);
- /* Add new value to event */
- current_val = iwe_stream_add_value(info, current_ev,
- current_val, end_buf, &iwe,
- IW_EV_PARAM_LEN);
- }
- /* Check if we added any event */
- if ((current_val - current_ev) > IW_EV_LCP_LEN)
- current_ev = current_val;
-
- if (ap->rsn_ie.id == RSN_INFO_ELEM_ID && ap->rsn_ie.size != 0)
- current_ev = ks_wlan_add_leader_event(rsn_leader, end_buf,
- current_ev, &ap->rsn_ie,
- &iwe, info);
-
- if (ap->wpa_ie.id == WPA_INFO_ELEM_ID && ap->wpa_ie.size != 0)
- current_ev = ks_wlan_add_leader_event(wpa_leader, end_buf,
- current_ev, &ap->wpa_ie,
- &iwe, info);
-
- /*
- * The other data in the scan result are not really
- * interesting, so for now drop it - Jean II
- */
- return current_ev;
-}
-
-static int ks_wlan_get_scan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- int i;
- char *current_ev = extra;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- if (priv->sme_i.sme_flag & SME_AP_SCAN)
- return -EAGAIN;
-
- if (priv->aplist.size == 0) {
- /* Client error, no scan results...
- * The caller need to restart the scan.
- */
- return -ENODATA;
- }
-
- /* Read and parse all entries */
- for (i = 0; i < priv->aplist.size; i++) {
- if ((extra + dwrq->data.length) - current_ev <= IW_EV_ADDR_LEN) {
- dwrq->data.length = 0;
- return -E2BIG;
- }
- /* Translate to WE format this entry */
- current_ev = ks_wlan_translate_scan(dev, info, current_ev,
- extra + dwrq->data.length,
- &priv->aplist.ap[i]);
- }
- /* Length of data */
- dwrq->data.length = (current_ev - extra);
- dwrq->data.flags = 0;
-
- return 0;
-}
-
-/* called after a bunch of SET operations */
-static int ks_wlan_config_commit(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *zwrq,
- char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (!priv->need_commit)
- return 0;
-
- ks_wlan_setup_parameter(priv, priv->need_commit);
- priv->need_commit = 0;
- return 0;
-}
-
-/* set association ie params */
-static int ks_wlan_set_genie(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- return 0;
-// return -EOPNOTSUPP;
-}
-
-static int ks_wlan_set_auth_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_param *param = &vwrq->param;
- int index = (param->flags & IW_AUTH_INDEX);
- int value = param->value;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- switch (index) {
- case IW_AUTH_WPA_VERSION: /* 0 */
- switch (value) {
- case IW_AUTH_WPA_VERSION_DISABLED:
- priv->wpa.version = value;
- if (priv->wpa.rsn_enabled)
- priv->wpa.rsn_enabled = false;
- priv->need_commit |= SME_RSN;
- break;
- case IW_AUTH_WPA_VERSION_WPA:
- case IW_AUTH_WPA_VERSION_WPA2:
- priv->wpa.version = value;
- if (!(priv->wpa.rsn_enabled))
- priv->wpa.rsn_enabled = true;
- priv->need_commit |= SME_RSN;
- break;
- default:
- return -EOPNOTSUPP;
- }
- break;
- case IW_AUTH_CIPHER_PAIRWISE: /* 1 */
- switch (value) {
- case IW_AUTH_CIPHER_NONE:
- if (priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x00;
- priv->need_commit |= SME_WEP_FLAG;
- }
- break;
- case IW_AUTH_CIPHER_WEP40:
- case IW_AUTH_CIPHER_TKIP:
- case IW_AUTH_CIPHER_CCMP:
- case IW_AUTH_CIPHER_WEP104:
- if (!priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x01;
- priv->need_commit |= SME_WEP_FLAG;
- }
- priv->wpa.pairwise_suite = value;
- priv->need_commit |= SME_RSN_UNICAST;
- break;
- default:
- return -EOPNOTSUPP;
- }
- break;
- case IW_AUTH_CIPHER_GROUP: /* 2 */
- switch (value) {
- case IW_AUTH_CIPHER_NONE:
- if (priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x00;
- priv->need_commit |= SME_WEP_FLAG;
- }
- break;
- case IW_AUTH_CIPHER_WEP40:
- case IW_AUTH_CIPHER_TKIP:
- case IW_AUTH_CIPHER_CCMP:
- case IW_AUTH_CIPHER_WEP104:
- if (!priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x01;
- priv->need_commit |= SME_WEP_FLAG;
- }
- priv->wpa.group_suite = value;
- priv->need_commit |= SME_RSN_MULTICAST;
- break;
- default:
- return -EOPNOTSUPP;
- }
- break;
- case IW_AUTH_KEY_MGMT: /* 3 */
- switch (value) {
- case IW_AUTH_KEY_MGMT_802_1X:
- case IW_AUTH_KEY_MGMT_PSK:
- case 0: /* NONE or 802_1X_NO_WPA */
- case 4: /* WPA_NONE */
- priv->wpa.key_mgmt_suite = value;
- priv->need_commit |= SME_RSN_AUTH;
- break;
- default:
- return -EOPNOTSUPP;
- }
- break;
- case IW_AUTH_80211_AUTH_ALG: /* 6 */
- switch (value) {
- case IW_AUTH_ALG_OPEN_SYSTEM:
- priv->wpa.auth_alg = value;
- priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
- break;
- case IW_AUTH_ALG_SHARED_KEY:
- priv->wpa.auth_alg = value;
- priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
- break;
- case IW_AUTH_ALG_LEAP:
- default:
- return -EOPNOTSUPP;
- }
- priv->need_commit |= SME_MODE_SET;
- break;
- case IW_AUTH_WPA_ENABLED: /* 7 */
- priv->wpa.wpa_enabled = value;
- break;
- case IW_AUTH_PRIVACY_INVOKED: /* 10 */
- if ((value && !priv->reg.privacy_invoked) ||
- (!value && priv->reg.privacy_invoked)) {
- priv->reg.privacy_invoked = value ? 0x01 : 0x00;
- priv->need_commit |= SME_WEP_FLAG;
- }
- break;
- case IW_AUTH_RX_UNENCRYPTED_EAPOL: /* 4 */
- case IW_AUTH_TKIP_COUNTERMEASURES: /* 5 */
- case IW_AUTH_DROP_UNENCRYPTED: /* 8 */
- case IW_AUTH_ROAMING_CONTROL: /* 9 */
- default:
- break;
- }
-
- /* return -EINPROGRESS; */
- if (priv->need_commit) {
- ks_wlan_setup_parameter(priv, priv->need_commit);
- priv->need_commit = 0;
- }
- return 0;
-}
-
-static int ks_wlan_get_auth_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_param *param = &vwrq->param;
- int index = (param->flags & IW_AUTH_INDEX);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- /* WPA (not used ?? wpa_supplicant) */
- switch (index) {
- case IW_AUTH_WPA_VERSION:
- param->value = priv->wpa.version;
- break;
- case IW_AUTH_CIPHER_PAIRWISE:
- param->value = priv->wpa.pairwise_suite;
- break;
- case IW_AUTH_CIPHER_GROUP:
- param->value = priv->wpa.group_suite;
- break;
- case IW_AUTH_KEY_MGMT:
- param->value = priv->wpa.key_mgmt_suite;
- break;
- case IW_AUTH_80211_AUTH_ALG:
- param->value = priv->wpa.auth_alg;
- break;
- case IW_AUTH_WPA_ENABLED:
- param->value = priv->wpa.rsn_enabled;
- break;
- case IW_AUTH_RX_UNENCRYPTED_EAPOL: /* OK??? */
- case IW_AUTH_TKIP_COUNTERMEASURES:
- case IW_AUTH_DROP_UNENCRYPTED:
- default:
- /* return -EOPNOTSUPP; */
- break;
- }
- return 0;
-}
-
-/* set encoding token & mode (WPA)*/
-static int ks_wlan_set_encode_ext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_encode_ext *enc;
- int index = dwrq->encoding.flags & IW_ENCODE_INDEX;
- unsigned int commit = 0;
- struct wpa_key *key;
-
- enc = (struct iw_encode_ext *)extra;
- if (!enc)
- return -EINVAL;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (index < 1 || index > 4)
- return -EINVAL;
- index--;
- key = &priv->wpa.key[index];
-
- if (dwrq->encoding.flags & IW_ENCODE_DISABLED)
- key->key_len = 0;
-
- key->ext_flags = enc->ext_flags;
- if (enc->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- priv->wpa.txkey = index;
- commit |= SME_WEP_INDEX;
- } else if (enc->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
- memcpy(&key->rx_seq[0], &enc->rx_seq[0], IW_ENCODE_SEQ_MAX_SIZE);
- }
-
- ether_addr_copy(&key->addr.sa_data[0], &enc->addr.sa_data[0]);
-
- switch (enc->alg) {
- case IW_ENCODE_ALG_NONE:
- if (priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x00;
- commit |= SME_WEP_FLAG;
- }
- key->key_len = 0;
-
- break;
- case IW_ENCODE_ALG_WEP:
- case IW_ENCODE_ALG_CCMP:
- if (!priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x01;
- commit |= SME_WEP_FLAG;
- }
- if (enc->key_len) {
- int key_len = clamp_val(enc->key_len, 0, IW_ENCODING_TOKEN_MAX);
-
- memcpy(&key->key_val[0], &enc->key[0], key_len);
- key->key_len = key_len;
- commit |= (SME_WEP_VAL1 << index);
- }
- break;
- case IW_ENCODE_ALG_TKIP:
- if (!priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x01;
- commit |= SME_WEP_FLAG;
- }
- if (enc->key_len == 32) {
- memcpy(&key->key_val[0], &enc->key[0], enc->key_len - 16);
- key->key_len = enc->key_len - 16;
- if (priv->wpa.key_mgmt_suite == 4) { /* WPA_NONE */
- memcpy(&key->tx_mic_key[0], &enc->key[16], 8);
- memcpy(&key->rx_mic_key[0], &enc->key[16], 8);
- } else {
- memcpy(&key->tx_mic_key[0], &enc->key[16], 8);
- memcpy(&key->rx_mic_key[0], &enc->key[24], 8);
- }
- commit |= (SME_WEP_VAL1 << index);
- }
- break;
- default:
- return -EINVAL;
- }
- key->alg = enc->alg;
-
- if (commit) {
- if (commit & SME_WEP_INDEX)
- hostif_sme_enqueue(priv, SME_SET_TXKEY);
- if (commit & SME_WEP_VAL_MASK)
- hostif_sme_enqueue(priv, SME_SET_KEY1 + index);
- if (commit & SME_WEP_FLAG)
- hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST);
- }
-
- return 0;
-}
-
-/* get encoding token & mode (WPA)*/
-static int ks_wlan_get_encode_ext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- /* WPA (not used ?? wpa_supplicant)
- * struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- * struct iw_encode_ext *enc;
- * enc = (struct iw_encode_ext *)extra;
- * int index = dwrq->flags & IW_ENCODE_INDEX;
- * WPA (not used ?? wpa_supplicant)
- */
- return 0;
-}
-
-static int ks_wlan_set_pmksa(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_pmksa *pmksa;
- int i;
- struct pmk *pmk;
- struct list_head *ptr;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (!extra)
- return -EINVAL;
-
- pmksa = (struct iw_pmksa *)extra;
-
- switch (pmksa->cmd) {
- case IW_PMKSA_ADD:
- if (list_empty(&priv->pmklist.head)) {
- for (i = 0; i < PMK_LIST_MAX; i++) {
- pmk = &priv->pmklist.pmk[i];
- if (is_zero_ether_addr(pmk->bssid))
- break;
- }
- ether_addr_copy(pmk->bssid, pmksa->bssid.sa_data);
- memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
- list_add(&pmk->list, &priv->pmklist.head);
- priv->pmklist.size++;
- break;
- }
- /* search cache data */
- list_for_each(ptr, &priv->pmklist.head) {
- pmk = list_entry(ptr, struct pmk, list);
- if (ether_addr_equal(pmksa->bssid.sa_data, pmk->bssid)) {
- memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
- list_move(&pmk->list, &priv->pmklist.head);
- break;
- }
- }
- /* not find address. */
- if (ptr != &priv->pmklist.head)
- break;
- /* new cache data */
- if (priv->pmklist.size < PMK_LIST_MAX) {
- for (i = 0; i < PMK_LIST_MAX; i++) {
- pmk = &priv->pmklist.pmk[i];
- if (is_zero_ether_addr(pmk->bssid))
- break;
- }
- ether_addr_copy(pmk->bssid, pmksa->bssid.sa_data);
- memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
- list_add(&pmk->list, &priv->pmklist.head);
- priv->pmklist.size++;
- } else { /* overwrite old cache data */
- pmk = list_entry(priv->pmklist.head.prev, struct pmk,
- list);
- ether_addr_copy(pmk->bssid, pmksa->bssid.sa_data);
- memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
- list_move(&pmk->list, &priv->pmklist.head);
- }
- break;
- case IW_PMKSA_REMOVE:
- if (list_empty(&priv->pmklist.head))
- return -EINVAL;
- /* search cache data */
- list_for_each(ptr, &priv->pmklist.head) {
- pmk = list_entry(ptr, struct pmk, list);
- if (ether_addr_equal(pmksa->bssid.sa_data, pmk->bssid)) {
- eth_zero_addr(pmk->bssid);
- memset(pmk->pmkid, 0, IW_PMKID_LEN);
- list_del_init(&pmk->list);
- break;
- }
- }
- /* not find address. */
- if (ptr == &priv->pmklist.head)
- return 0;
- break;
- case IW_PMKSA_FLUSH:
- memset(&priv->pmklist, 0, sizeof(priv->pmklist));
- INIT_LIST_HEAD(&priv->pmklist.head);
- for (i = 0; i < PMK_LIST_MAX; i++)
- INIT_LIST_HEAD(&priv->pmklist.pmk[i].list);
- break;
- default:
- return -EINVAL;
- }
-
- hostif_sme_enqueue(priv, SME_SET_PMKSA);
- return 0;
-}
-
-static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_statistics *wstats = &priv->wstats;
-
- if (!atomic_read(&update_phyinfo))
- return (priv->dev_state < DEVICE_STATE_READY) ? NULL : wstats;
-
- /*
- * Packets discarded in the wireless adapter due to wireless
- * specific problems
- */
- wstats->discard.nwid = 0; /* Rx invalid nwid */
- wstats->discard.code = 0; /* Rx invalid crypt */
- wstats->discard.fragment = 0; /* Rx invalid frag */
- wstats->discard.retries = 0; /* Tx excessive retries */
- wstats->discard.misc = 0; /* Invalid misc */
- wstats->miss.beacon = 0; /* Missed beacon */
-
- return wstats;
-}
-
-static int ks_wlan_set_stop_request(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (!(uwrq->mode))
- return -EINVAL;
-
- hostif_sme_enqueue(priv, SME_STOP_REQUEST);
- return 0;
-}
-
-#include <linux/ieee80211.h>
-static int ks_wlan_set_mlme(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_mlme *mlme = (struct iw_mlme *)extra;
- union iwreq_data uwrq;
-
- uwrq.mode = 1;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- if (mlme->cmd != IW_MLME_DEAUTH &&
- mlme->cmd != IW_MLME_DISASSOC)
- return -EOPNOTSUPP;
-
- if (mlme->cmd == IW_MLME_DEAUTH &&
- mlme->reason_code == WLAN_REASON_MIC_FAILURE)
- return 0;
-
- return ks_wlan_set_stop_request(dev, NULL, &uwrq, NULL);
-}
-
-static int ks_wlan_get_firmware_version(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct iw_point *dwrq = &uwrq->data;
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- dwrq->length = priv->version_size + 1;
- strscpy(extra, priv->firmware_version, dwrq->length);
- return 0;
-}
-
-static int ks_wlan_set_preamble(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (uwrq->mode != LONG_PREAMBLE && uwrq->mode != SHORT_PREAMBLE)
- return -EINVAL;
-
- priv->reg.preamble = uwrq->mode;
- priv->need_commit |= SME_MODE_SET;
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_preamble(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- uwrq->mode = priv->reg.preamble;
- return 0;
-}
-
-static int ks_wlan_set_power_mgmt(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- if (uwrq->mode != POWER_MGMT_ACTIVE &&
- uwrq->mode != POWER_MGMT_SAVE1 &&
- uwrq->mode != POWER_MGMT_SAVE2)
- return -EINVAL;
-
- if ((uwrq->mode == POWER_MGMT_SAVE1 || uwrq->mode == POWER_MGMT_SAVE2) &&
- (priv->reg.operation_mode != MODE_INFRASTRUCTURE))
- return -EINVAL;
-
- priv->reg.power_mgmt = uwrq->mode;
- hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
-
- return 0;
-}
-
-static int ks_wlan_get_power_mgmt(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- uwrq->mode = priv->reg.power_mgmt;
- return 0;
-}
-
-static int ks_wlan_set_scan_type(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
-
- if (uwrq->mode != ACTIVE_SCAN && uwrq->mode != PASSIVE_SCAN)
- return -EINVAL;
-
- priv->reg.scan_type = uwrq->mode;
- return 0;
-}
-
-static int ks_wlan_get_scan_type(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- uwrq->mode = priv->reg.scan_type;
- return 0;
-}
-
-static int ks_wlan_set_beacon_lost(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- if (uwrq->mode > BEACON_LOST_COUNT_MAX)
- return -EINVAL;
-
- priv->reg.beacon_lost_count = uwrq->mode;
-
- if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
- priv->need_commit |= SME_MODE_SET;
- return -EINPROGRESS; /* Call commit handler */
- }
-
- return 0;
-}
-
-static int ks_wlan_get_beacon_lost(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- uwrq->mode = priv->reg.beacon_lost_count;
- return 0;
-}
-
-static int ks_wlan_set_phy_type(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- if (uwrq->mode != D_11B_ONLY_MODE &&
- uwrq->mode != D_11G_ONLY_MODE &&
- uwrq->mode != D_11BG_COMPATIBLE_MODE)
- return -EINVAL;
-
- /* for SLEEP MODE */
- priv->reg.phy_type = uwrq->mode;
- priv->need_commit |= SME_MODE_SET;
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_phy_type(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- uwrq->mode = priv->reg.phy_type;
- return 0;
-}
-
-static int ks_wlan_set_cts_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- if (uwrq->mode != CTS_MODE_FALSE && uwrq->mode != CTS_MODE_TRUE)
- return -EINVAL;
-
- priv->reg.cts_mode = (uwrq->mode == CTS_MODE_FALSE) ? uwrq->mode :
- (priv->reg.phy_type == D_11G_ONLY_MODE ||
- priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) ?
- uwrq->mode : !uwrq->mode;
-
- priv->need_commit |= SME_MODE_SET;
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_cts_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- uwrq->mode = priv->reg.cts_mode;
- return 0;
-}
-
-static int ks_wlan_set_sleep_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (uwrq->mode != SLP_SLEEP &&
- uwrq->mode != SLP_ACTIVE) {
- netdev_err(dev, "SET_SLEEP_MODE %d error\n", uwrq->mode);
- return -EINVAL;
- }
-
- priv->sleep_mode = uwrq->mode;
- netdev_info(dev, "SET_SLEEP_MODE %d\n", priv->sleep_mode);
-
- if (uwrq->mode == SLP_SLEEP)
- hostif_sme_enqueue(priv, SME_STOP_REQUEST);
-
- hostif_sme_enqueue(priv, SME_SLEEP_REQUEST);
-
- return 0;
-}
-
-static int ks_wlan_get_sleep_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- uwrq->mode = priv->sleep_mode;
-
- return 0;
-}
-
-static int ks_wlan_set_wps_enable(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- if (uwrq->mode != 0 && uwrq->mode != 1)
- return -EINVAL;
-
- priv->wps.wps_enabled = uwrq->mode;
- hostif_sme_enqueue(priv, SME_WPS_ENABLE_REQUEST);
-
- return 0;
-}
-
-static int ks_wlan_get_wps_enable(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- uwrq->mode = priv->wps.wps_enabled;
- netdev_info(dev, "return=%d\n", uwrq->mode);
-
- return 0;
-}
-
-static int ks_wlan_set_wps_probe_req(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct iw_point *dwrq = &uwrq->data;
- u8 *p = extra;
- unsigned char len;
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* length check */
- if (p[1] + 2 != dwrq->length || dwrq->length > 256)
- return -EINVAL;
-
- priv->wps.ielen = p[1] + 2 + 1; /* IE header + IE + sizeof(len) */
- len = p[1] + 2; /* IE header + IE */
-
- memcpy(priv->wps.ie, &len, sizeof(len));
- p = memcpy(priv->wps.ie + 1, p, len);
-
- netdev_dbg(dev, "%d(%#x): %02X %02X %02X %02X ... %02X %02X %02X\n",
- priv->wps.ielen, priv->wps.ielen, p[0], p[1], p[2], p[3],
- p[priv->wps.ielen - 3], p[priv->wps.ielen - 2],
- p[priv->wps.ielen - 1]);
-
- hostif_sme_enqueue(priv, SME_WPS_PROBE_REQUEST);
-
- return 0;
-}
-
-static int ks_wlan_set_tx_gain(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- if (uwrq->mode > 0xFF)
- return -EINVAL;
-
- priv->gain.tx_gain = (u8)uwrq->mode;
- priv->gain.tx_mode = (priv->gain.tx_gain < 0xFF) ? 1 : 0;
- hostif_sme_enqueue(priv, SME_SET_GAIN);
- return 0;
-}
-
-static int ks_wlan_get_tx_gain(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- uwrq->mode = priv->gain.tx_gain;
- hostif_sme_enqueue(priv, SME_GET_GAIN);
- return 0;
-}
-
-static int ks_wlan_set_rx_gain(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- if (uwrq->mode > 0xFF)
- return -EINVAL;
-
- priv->gain.rx_gain = (u8)uwrq->mode;
- priv->gain.rx_mode = (priv->gain.rx_gain < 0xFF) ? 1 : 0;
- hostif_sme_enqueue(priv, SME_SET_GAIN);
- return 0;
-}
-
-static int ks_wlan_get_rx_gain(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- uwrq->mode = priv->gain.rx_gain;
- hostif_sme_enqueue(priv, SME_GET_GAIN);
- return 0;
-}
-
-static int ks_wlan_get_eeprom_cksum(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- uwrq->mode = priv->eeprom_checksum;
- return 0;
-}
-
-static void print_hif_event(struct net_device *dev, int event)
-{
- switch (event) {
- case HIF_DATA_REQ:
- netdev_info(dev, "HIF_DATA_REQ\n");
- break;
- case HIF_DATA_IND:
- netdev_info(dev, "HIF_DATA_IND\n");
- break;
- case HIF_MIB_GET_REQ:
- netdev_info(dev, "HIF_MIB_GET_REQ\n");
- break;
- case HIF_MIB_GET_CONF:
- netdev_info(dev, "HIF_MIB_GET_CONF\n");
- break;
- case HIF_MIB_SET_REQ:
- netdev_info(dev, "HIF_MIB_SET_REQ\n");
- break;
- case HIF_MIB_SET_CONF:
- netdev_info(dev, "HIF_MIB_SET_CONF\n");
- break;
- case HIF_POWER_MGMT_REQ:
- netdev_info(dev, "HIF_POWER_MGMT_REQ\n");
- break;
- case HIF_POWER_MGMT_CONF:
- netdev_info(dev, "HIF_POWER_MGMT_CONF\n");
- break;
- case HIF_START_REQ:
- netdev_info(dev, "HIF_START_REQ\n");
- break;
- case HIF_START_CONF:
- netdev_info(dev, "HIF_START_CONF\n");
- break;
- case HIF_CONNECT_IND:
- netdev_info(dev, "HIF_CONNECT_IND\n");
- break;
- case HIF_STOP_REQ:
- netdev_info(dev, "HIF_STOP_REQ\n");
- break;
- case HIF_STOP_CONF:
- netdev_info(dev, "HIF_STOP_CONF\n");
- break;
- case HIF_PS_ADH_SET_REQ:
- netdev_info(dev, "HIF_PS_ADH_SET_REQ\n");
- break;
- case HIF_PS_ADH_SET_CONF:
- netdev_info(dev, "HIF_PS_ADH_SET_CONF\n");
- break;
- case HIF_INFRA_SET_REQ:
- netdev_info(dev, "HIF_INFRA_SET_REQ\n");
- break;
- case HIF_INFRA_SET_CONF:
- netdev_info(dev, "HIF_INFRA_SET_CONF\n");
- break;
- case HIF_ADH_SET_REQ:
- netdev_info(dev, "HIF_ADH_SET_REQ\n");
- break;
- case HIF_ADH_SET_CONF:
- netdev_info(dev, "HIF_ADH_SET_CONF\n");
- break;
- case HIF_AP_SET_REQ:
- netdev_info(dev, "HIF_AP_SET_REQ\n");
- break;
- case HIF_AP_SET_CONF:
- netdev_info(dev, "HIF_AP_SET_CONF\n");
- break;
- case HIF_ASSOC_INFO_IND:
- netdev_info(dev, "HIF_ASSOC_INFO_IND\n");
- break;
- case HIF_MIC_FAILURE_REQ:
- netdev_info(dev, "HIF_MIC_FAILURE_REQ\n");
- break;
- case HIF_MIC_FAILURE_CONF:
- netdev_info(dev, "HIF_MIC_FAILURE_CONF\n");
- break;
- case HIF_SCAN_REQ:
- netdev_info(dev, "HIF_SCAN_REQ\n");
- break;
- case HIF_SCAN_CONF:
- netdev_info(dev, "HIF_SCAN_CONF\n");
- break;
- case HIF_PHY_INFO_REQ:
- netdev_info(dev, "HIF_PHY_INFO_REQ\n");
- break;
- case HIF_PHY_INFO_CONF:
- netdev_info(dev, "HIF_PHY_INFO_CONF\n");
- break;
- case HIF_SLEEP_REQ:
- netdev_info(dev, "HIF_SLEEP_REQ\n");
- break;
- case HIF_SLEEP_CONF:
- netdev_info(dev, "HIF_SLEEP_CONF\n");
- break;
- case HIF_PHY_INFO_IND:
- netdev_info(dev, "HIF_PHY_INFO_IND\n");
- break;
- case HIF_SCAN_IND:
- netdev_info(dev, "HIF_SCAN_IND\n");
- break;
- case HIF_INFRA_SET2_REQ:
- netdev_info(dev, "HIF_INFRA_SET2_REQ\n");
- break;
- case HIF_INFRA_SET2_CONF:
- netdev_info(dev, "HIF_INFRA_SET2_CONF\n");
- break;
- case HIF_ADH_SET2_REQ:
- netdev_info(dev, "HIF_ADH_SET2_REQ\n");
- break;
- case HIF_ADH_SET2_CONF:
- netdev_info(dev, "HIF_ADH_SET2_CONF\n");
- }
-}
-
-/* get host command history */
-static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- int i, event;
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- for (i = 63; i >= 0; i--) {
- event =
- priv->hostt.buff[(priv->hostt.qtail - 1 - i) %
- SME_EVENT_BUFF_SIZE];
- print_hif_event(dev, event);
- }
- return 0;
-}
-
-/* Structures to export the Wireless Handlers */
-
-static const struct iw_priv_args ks_wlan_private_args[] = {
-/*{ cmd, set_args, get_args, name[16] } */
- {KS_WLAN_GET_FIRM_VERSION, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_CHAR | (128 + 1), "GetFirmwareVer"},
- {KS_WLAN_SET_WPS_ENABLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetWPSEnable"},
- {KS_WLAN_GET_WPS_ENABLE, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetW"},
- {KS_WLAN_SET_WPS_PROBE_REQ, IW_PRIV_TYPE_BYTE | 2047, IW_PRIV_TYPE_NONE,
- "SetWPSProbeReq"},
- {KS_WLAN_SET_PREAMBLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetPreamble"},
- {KS_WLAN_GET_PREAMBLE, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPreamble"},
- {KS_WLAN_SET_POWER_SAVE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetPowerSave"},
- {KS_WLAN_GET_POWER_SAVE, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPowerSave"},
- {KS_WLAN_SET_SCAN_TYPE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetScanType"},
- {KS_WLAN_GET_SCAN_TYPE, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetScanType"},
- {KS_WLAN_SET_RX_GAIN, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetRxGain"},
- {KS_WLAN_GET_RX_GAIN, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetRxGain"},
- {KS_WLAN_HOSTT, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_CHAR | (128 + 1),
- "hostt"},
- {KS_WLAN_SET_BEACON_LOST, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetBeaconLost"},
- {KS_WLAN_GET_BEACON_LOST, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetBeaconLost"},
- {KS_WLAN_SET_SLEEP_MODE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetSleepMode"},
- {KS_WLAN_GET_SLEEP_MODE, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetSleepMode"},
- {KS_WLAN_SET_TX_GAIN, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetTxGain"},
- {KS_WLAN_GET_TX_GAIN, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetTxGain"},
- {KS_WLAN_SET_PHY_TYPE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetPhyType"},
- {KS_WLAN_GET_PHY_TYPE, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPhyType"},
- {KS_WLAN_SET_CTS_MODE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetCtsMode"},
- {KS_WLAN_GET_CTS_MODE, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetCtsMode"},
- {KS_WLAN_GET_EEPROM_CKSUM, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetChecksum"},
-};
-
-static const iw_handler ks_wlan_handler[] = {
- IW_HANDLER(SIOCSIWCOMMIT, ks_wlan_config_commit),
- IW_HANDLER(SIOCGIWNAME, ks_wlan_get_name),
- IW_HANDLER(SIOCSIWFREQ, ks_wlan_set_freq),
- IW_HANDLER(SIOCGIWFREQ, ks_wlan_get_freq),
- IW_HANDLER(SIOCSIWMODE, ks_wlan_set_mode),
- IW_HANDLER(SIOCGIWMODE, ks_wlan_get_mode),
- IW_HANDLER(SIOCGIWRANGE, ks_wlan_get_range),
- IW_HANDLER(SIOCGIWSTATS, ks_wlan_get_iwstats),
- IW_HANDLER(SIOCSIWAP, ks_wlan_set_wap),
- IW_HANDLER(SIOCGIWAP, ks_wlan_get_wap),
- IW_HANDLER(SIOCSIWMLME, ks_wlan_set_mlme),
- IW_HANDLER(SIOCGIWAPLIST, ks_wlan_get_aplist),
- IW_HANDLER(SIOCSIWSCAN, ks_wlan_set_scan),
- IW_HANDLER(SIOCGIWSCAN, ks_wlan_get_scan),
- IW_HANDLER(SIOCSIWESSID, ks_wlan_set_essid),
- IW_HANDLER(SIOCGIWESSID, ks_wlan_get_essid),
- IW_HANDLER(SIOCSIWNICKN, ks_wlan_set_nick),
- IW_HANDLER(SIOCGIWNICKN, ks_wlan_get_nick),
- IW_HANDLER(SIOCSIWRATE, ks_wlan_set_rate),
- IW_HANDLER(SIOCGIWRATE, ks_wlan_get_rate),
- IW_HANDLER(SIOCSIWRTS, ks_wlan_set_rts),
- IW_HANDLER(SIOCGIWRTS, ks_wlan_get_rts),
- IW_HANDLER(SIOCSIWFRAG, ks_wlan_set_frag),
- IW_HANDLER(SIOCGIWFRAG, ks_wlan_get_frag),
- IW_HANDLER(SIOCSIWENCODE, ks_wlan_set_encode),
- IW_HANDLER(SIOCGIWENCODE, ks_wlan_get_encode),
- IW_HANDLER(SIOCSIWPOWER, ks_wlan_set_power),
- IW_HANDLER(SIOCGIWPOWER, ks_wlan_get_power),
- IW_HANDLER(SIOCSIWGENIE, ks_wlan_set_genie),
- IW_HANDLER(SIOCSIWAUTH, ks_wlan_set_auth_mode),
- IW_HANDLER(SIOCGIWAUTH, ks_wlan_get_auth_mode),
- IW_HANDLER(SIOCSIWENCODEEXT, ks_wlan_set_encode_ext),
- IW_HANDLER(SIOCGIWENCODEEXT, ks_wlan_get_encode_ext),
- IW_HANDLER(SIOCSIWPMKSA, ks_wlan_set_pmksa),
-};
-
-/* private_handler */
-static const iw_handler ks_wlan_private_handler[] = {
- NULL, /* 0 */
- NULL, /* 1, KS_WLAN_GET_DRIVER_VERSION */
- NULL, /* 2 */
- ks_wlan_get_firmware_version, /* 3 KS_WLAN_GET_FIRM_VERSION */
- ks_wlan_set_wps_enable, /* 4 KS_WLAN_SET_WPS_ENABLE */
- ks_wlan_get_wps_enable, /* 5 KS_WLAN_GET_WPS_ENABLE */
- ks_wlan_set_wps_probe_req, /* 6 KS_WLAN_SET_WPS_PROBE_REQ */
- ks_wlan_get_eeprom_cksum, /* 7 KS_WLAN_GET_CONNECT */
- ks_wlan_set_preamble, /* 8 KS_WLAN_SET_PREAMBLE */
- ks_wlan_get_preamble, /* 9 KS_WLAN_GET_PREAMBLE */
- ks_wlan_set_power_mgmt, /* 10 KS_WLAN_SET_POWER_SAVE */
- ks_wlan_get_power_mgmt, /* 11 KS_WLAN_GET_POWER_SAVE */
- ks_wlan_set_scan_type, /* 12 KS_WLAN_SET_SCAN_TYPE */
- ks_wlan_get_scan_type, /* 13 KS_WLAN_GET_SCAN_TYPE */
- ks_wlan_set_rx_gain, /* 14 KS_WLAN_SET_RX_GAIN */
- ks_wlan_get_rx_gain, /* 15 KS_WLAN_GET_RX_GAIN */
- ks_wlan_hostt, /* 16 KS_WLAN_HOSTT */
- NULL, /* 17 */
- ks_wlan_set_beacon_lost, /* 18 KS_WLAN_SET_BECAN_LOST */
- ks_wlan_get_beacon_lost, /* 19 KS_WLAN_GET_BECAN_LOST */
- ks_wlan_set_tx_gain, /* 20 KS_WLAN_SET_TX_GAIN */
- ks_wlan_get_tx_gain, /* 21 KS_WLAN_GET_TX_GAIN */
- ks_wlan_set_phy_type, /* 22 KS_WLAN_SET_PHY_TYPE */
- ks_wlan_get_phy_type, /* 23 KS_WLAN_GET_PHY_TYPE */
- ks_wlan_set_cts_mode, /* 24 KS_WLAN_SET_CTS_MODE */
- ks_wlan_get_cts_mode, /* 25 KS_WLAN_GET_CTS_MODE */
- NULL, /* 26 */
- NULL, /* 27 */
- ks_wlan_set_sleep_mode, /* 28 KS_WLAN_SET_SLEEP_MODE */
- ks_wlan_get_sleep_mode, /* 29 KS_WLAN_GET_SLEEP_MODE */
- NULL, /* 30 */
- NULL, /* 31 */
-};
-
-static const struct iw_handler_def ks_wlan_handler_def = {
- .num_standard = ARRAY_SIZE(ks_wlan_handler),
- .num_private = ARRAY_SIZE(ks_wlan_private_handler),
- .num_private_args = ARRAY_SIZE(ks_wlan_private_args),
- .standard = ks_wlan_handler,
- .private = ks_wlan_private_handler,
- .private_args = ks_wlan_private_args,
- .get_wireless_stats = ks_get_wireless_stats,
-};
-
-static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
- int cmd)
-{
- int ret;
- struct iwreq *wrq = (struct iwreq *)rq;
-
- switch (cmd) {
- case SIOCIWFIRSTPRIV + 20: /* KS_WLAN_SET_STOP_REQ */
- ret = ks_wlan_set_stop_request(dev, NULL, &wrq->u, NULL);
- break;
- // All other calls are currently unsupported
- default:
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-static
-struct net_device_stats *ks_wlan_get_stats(struct net_device *dev)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->dev_state < DEVICE_STATE_READY)
- return NULL; /* not finished initialize */
-
- return &priv->nstats;
-}
-
-static
-int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct sockaddr *mac_addr = (struct sockaddr *)addr;
-
- if (netif_running(dev))
- return -EBUSY;
- eth_hw_addr_set(dev, mac_addr->sa_data);
- ether_addr_copy(priv->eth_addr, mac_addr->sa_data);
-
- priv->mac_address_valid = false;
- hostif_sme_enqueue(priv, SME_MACADDRESS_SET_REQUEST);
- netdev_info(dev, "ks_wlan: MAC ADDRESS = %pM\n", priv->eth_addr);
- return 0;
-}
-
-static
-void ks_wlan_tx_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- netdev_dbg(dev, "head(%d) tail(%d)!!\n", priv->tx_dev.qhead,
- priv->tx_dev.qtail);
- if (!netif_queue_stopped(dev))
- netif_stop_queue(dev);
- priv->nstats.tx_errors++;
- netif_wake_queue(dev);
-}
-
-static
-netdev_tx_t ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- int ret;
-
- netdev_dbg(dev, "in_interrupt()=%ld\n", in_interrupt());
-
- if (!skb) {
- netdev_err(dev, "ks_wlan: skb == NULL!!!\n");
- return 0;
- }
- if (priv->dev_state < DEVICE_STATE_READY) {
- dev_kfree_skb(skb);
- return 0; /* not finished initialize */
- }
-
- if (netif_running(dev))
- netif_stop_queue(dev);
-
- ret = hostif_data_request(priv, skb);
- netif_trans_update(dev);
-
- if (ret)
- netdev_err(dev, "hostif_data_request error: =%d\n", ret);
-
- return 0;
-}
-
-void send_packet_complete(struct ks_wlan_private *priv, struct sk_buff *skb)
-{
- priv->nstats.tx_packets++;
-
- if (netif_queue_stopped(priv->net_dev))
- netif_wake_queue(priv->net_dev);
-
- if (skb) {
- priv->nstats.tx_bytes += skb->len;
- dev_kfree_skb(skb);
- }
-}
-
-/*
- * Set or clear the multicast filter for this adaptor.
- * This routine is not state sensitive and need not be SMP locked.
- */
-static
-void ks_wlan_set_rx_mode(struct net_device *dev)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->dev_state < DEVICE_STATE_READY)
- return; /* not finished initialize */
- hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
-}
-
-static
-int ks_wlan_open(struct net_device *dev)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- priv->cur_rx = 0;
-
- if (!priv->mac_address_valid) {
- netdev_err(dev, "ks_wlan : %s Not READY !!\n", dev->name);
- return -EBUSY;
- }
- netif_start_queue(dev);
-
- return 0;
-}
-
-static
-int ks_wlan_close(struct net_device *dev)
-{
- netif_stop_queue(dev);
-
- return 0;
-}
-
-/* Operational parameters that usually are not changed. */
-/* Time in jiffies before concluding the transmitter is hung. */
-#define TX_TIMEOUT (3 * HZ)
-static const unsigned char dummy_addr[] = {
- 0x00, 0x0b, 0xe3, 0x00, 0x00, 0x00
-};
-
-static const struct net_device_ops ks_wlan_netdev_ops = {
- .ndo_start_xmit = ks_wlan_start_xmit,
- .ndo_open = ks_wlan_open,
- .ndo_stop = ks_wlan_close,
- .ndo_do_ioctl = ks_wlan_netdev_ioctl,
- .ndo_set_mac_address = ks_wlan_set_mac_address,
- .ndo_get_stats = ks_wlan_get_stats,
- .ndo_tx_timeout = ks_wlan_tx_timeout,
- .ndo_set_rx_mode = ks_wlan_set_rx_mode,
-};
-
-int ks_wlan_net_start(struct net_device *dev)
-{
- struct ks_wlan_private *priv;
- /* int rc; */
-
- priv = netdev_priv(dev);
- priv->mac_address_valid = false;
- priv->is_device_open = true;
- priv->need_commit = 0;
- /* phy information update timer */
- atomic_set(&update_phyinfo, 0);
- timer_setup(&update_phyinfo_timer, ks_wlan_update_phyinfo_timeout, 0);
-
- /* dummy address set */
- ether_addr_copy(priv->eth_addr, dummy_addr);
- eth_hw_addr_set(dev, priv->eth_addr);
-
- /* The ks_wlan-specific entries in the device structure. */
- dev->netdev_ops = &ks_wlan_netdev_ops;
- dev->wireless_handlers = &ks_wlan_handler_def;
- dev->watchdog_timeo = TX_TIMEOUT;
-
- netif_carrier_off(dev);
-
- return 0;
-}
-
-int ks_wlan_net_stop(struct net_device *dev)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- priv->is_device_open = false;
- del_timer_sync(&update_phyinfo_timer);
-
- if (netif_running(dev))
- netif_stop_queue(dev);
-
- return 0;
-}
-
-/**
- * is_connect_status() - return true if status is 'connected'
- * @status: high bit is used as FORCE_DISCONNECT, low bits used for
- * connect status.
- */
-bool is_connect_status(u32 status)
-{
- return (status & CONNECT_STATUS_MASK) == CONNECT_STATUS;
-}
-
-/**
- * is_disconnect_status() - return true if status is 'disconnected'
- * @status: high bit is used as FORCE_DISCONNECT, low bits used for
- * disconnect status.
- */
-bool is_disconnect_status(u32 status)
-{
- return (status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS;
-}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c
index 5f186fb03642..15386a773dc5 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c
@@ -65,8 +65,7 @@ int ia_css_iterator_configure(const struct ia_css_binary *binary,
* the original out res. for video pipe, it has two output pins --- out and
* vf_out, so it can keep these two resolutions already. */
if (binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW &&
- binary->vf_downscale_log2 > 0)
- {
+ binary->vf_downscale_log2 > 0) {
/* TODO: Remove this after preview output decimation is fixed
* by configuring out&vf info files properly */
my_info.padded_width <<= binary->vf_downscale_log2;
diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
index 6254a5df2502..2b3cdb1ce140 100644
--- a/drivers/staging/most/video/video.c
+++ b/drivers/staging/most/video/video.c
@@ -454,18 +454,18 @@ static int comp_probe_channel(struct most_interface *iface, int channel_idx,
struct most_video_dev *mdev = get_comp_dev(iface, channel_idx);
if (mdev) {
- pr_err("channel already linked\n");
+ pr_err("Channel already linked\n");
return -EEXIST;
}
if (ccfg->direction != MOST_CH_RX) {
- pr_err("wrong direction, expect rx\n");
+ pr_err("Wrong direction, expected rx\n");
return -EINVAL;
}
if (ccfg->data_type != MOST_CH_SYNC &&
ccfg->data_type != MOST_CH_ISOC) {
- pr_err("wrong channel type, expect sync or isoc\n");
+ pr_err("Wrong channel type, expected sync or isoc\n");
return -EINVAL;
}
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index d09211589d1c..977f8fc29e63 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -175,7 +175,7 @@ static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
}
}
- dev_err(nvec->dev, "could not allocate %s buffer\n",
+ dev_err(nvec->dev, "Could not allocate %s buffer\n",
(category == NVEC_MSG_TX) ? "TX" : "RX");
return NULL;
@@ -315,7 +315,7 @@ int nvec_write_sync(struct nvec_chip *nvec,
if (!(wait_for_completion_timeout(&nvec->sync_write,
msecs_to_jiffies(2000)))) {
dev_warn(nvec->dev,
- "timeout waiting for sync write to complete\n");
+ "Timeout waiting for sync write to complete\n");
mutex_unlock(&nvec->sync_write_mutex);
return -ETIMEDOUT;
}
@@ -392,7 +392,7 @@ static void nvec_request_master(struct work_struct *work)
msecs_to_jiffies(5000));
if (err == 0) {
- dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
+ dev_warn(nvec->dev, "Timeout waiting for ec transfer\n");
nvec_gpio_set_value(nvec, 1);
msg->pos = 0;
}
@@ -454,7 +454,7 @@ static void nvec_dispatch(struct work_struct *work)
if (nvec->sync_write_pending ==
(msg->data[2] << 8) + msg->data[0]) {
- dev_dbg(nvec->dev, "sync write completed!\n");
+ dev_dbg(nvec->dev, "Sync write completed!\n");
nvec->sync_write_pending = 0;
nvec->last_sync_msg = msg;
complete(&nvec->sync_write);
@@ -477,7 +477,7 @@ static void nvec_tx_completed(struct nvec_chip *nvec)
{
/* We got an END_TRANS, let's skip this, maybe there's an event */
if (nvec->tx->pos != nvec->tx->size) {
- dev_err(nvec->dev, "premature END_TRANS, resending\n");
+ dev_err(nvec->dev, "Premature END_TRANS, resending\n");
nvec->tx->pos = 0;
nvec_gpio_set_value(nvec, 0);
} else {
@@ -608,7 +608,7 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
/* Filter out some errors */
if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
- dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
+ dev_err(nvec->dev, "Unexpected irq mask %lx\n", status);
return IRQ_HANDLED;
}
if ((status & I2C_SL_IRQ) == 0) {
@@ -631,7 +631,7 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
if (status != (I2C_SL_IRQ | RCVD))
nvec_invalid_flags(nvec, status, false);
break;
- case 1: /* command byte */
+ case 1: /* Command byte */
if (status != I2C_SL_IRQ) {
nvec_invalid_flags(nvec, status, true);
} else {
@@ -845,13 +845,12 @@ static int tegra_nvec_probe(struct platform_device *pdev)
return PTR_ERR(nvec->gpiod);
}
- err = devm_request_irq(dev, nvec->irq, nvec_interrupt, 0,
+ err = devm_request_irq(dev, nvec->irq, nvec_interrupt, IRQF_NO_AUTOEN,
"nvec", nvec);
if (err) {
dev_err(dev, "couldn't request irq\n");
return -ENODEV;
}
- disable_irq(nvec->irq);
tegra_init_i2c_slave(nvec);
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 08ec3aae90ea..4cb904a5f8f4 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -544,7 +544,7 @@ static const struct backlight_ops dcon_bl_ops = {
static struct backlight_properties dcon_bl_props = {
.max_brightness = 15,
.type = BACKLIGHT_RAW,
- .power = FB_BLANK_UNBLANK,
+ .power = BACKLIGHT_POWER_ON,
};
static int dcon_reboot_notify(struct notifier_block *nb,
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
index d87bace0a19b..552fd9b6e3e5 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
@@ -8,6 +8,7 @@
#define R8190P_DEF_H
#include <linux/types.h>
+#include "r8192E_phy.h"
#define MAX_SILENT_RESET_RX_SLOT_NUM 10
@@ -137,7 +138,7 @@ struct tx_fwinfo_8190pci {
};
struct phy_sts_ofdm_819xpci {
- u8 trsw_gain_X[4];
+ u8 trsw_gain_X[RF90_PATH_MAX];
u8 pwdb_all;
u8 cfosho_X[4];
u8 cfotail_X[4];
@@ -226,7 +227,7 @@ struct rx_desc {
u16 Length:14;
u16 CRC32:1;
u16 ICV:1;
- u8 RxDrvInfoSize;
+ u8 rx_drv_info_size;
u8 Shift:2;
u8 PHYStatus:1;
u8 SWDec:1;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index e470b49b0ff7..533cc4e723f6 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -16,18 +16,18 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
struct sk_buff *skb;
unsigned char *seg_ptr;
struct cb_desc *tcb_desc;
- u8 bLastIniPkt;
+ u8 last_ini_pkt;
struct tx_fwinfo_8190pci *pTxFwInfo = NULL;
do {
if ((len - frag_offset) > CMDPACKET_FRAG_SIZE) {
frag_length = CMDPACKET_FRAG_SIZE;
- bLastIniPkt = 0;
+ last_ini_pkt = 0;
} else {
frag_length = (u16)(len - frag_offset);
- bLastIniPkt = 1;
+ last_ini_pkt = 1;
}
if (type == DESC_PACKET_TYPE_NORMAL)
@@ -42,8 +42,8 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
- tcb_desc->bCmdOrInit = type;
- tcb_desc->bLastIniPkt = bLastIniPkt;
+ tcb_desc->cmd_or_init = type;
+ tcb_desc->last_ini_pkt = last_ini_pkt;
if (type == DESC_PACKET_TYPE_NORMAL) {
tcb_desc->pkt_size = frag_length;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index b3d4b3394284..2672b1ddf88e 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -289,7 +289,7 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
for (i = 0; i < 6; i += 2) {
usValue = rtl92e_eeprom_read(dev,
- (EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1);
+ (EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1);
*(u16 *)(&addr[i]) = usValue;
}
eth_hw_addr_set(dev, addr);
@@ -987,10 +987,10 @@ void rtl92e_fill_tx_cmd_desc(struct net_device *dev, struct tx_desc_cmd *entry,
if (dma_mapping_error(&priv->pdev->dev, mapping))
netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
memset(entry, 0, 12);
- entry->LINIP = cb_desc->bLastIniPkt;
+ entry->LINIP = cb_desc->last_ini_pkt;
entry->FirstSeg = 1;
entry->LastSeg = 1;
- if (cb_desc->bCmdOrInit == DESC_PACKET_TYPE_INIT) {
+ if (cb_desc->cmd_or_init == DESC_PACKET_TYPE_INIT) {
entry->CmdInit = DESC_PACKET_TYPE_INIT;
} else {
struct tx_desc *entry_tmp = (struct tx_desc *)entry;
@@ -1145,23 +1145,21 @@ static long _rtl92e_signal_scale_mapping(struct r8192_priv *priv, long currsig)
_pdrvinfo->RxRate == DESC90_RATE11M) &&\
!_pdrvinfo->RxHT)
-static void _rtl92e_query_rxphystatus(
- struct r8192_priv *priv,
- struct rtllib_rx_stats *pstats,
- struct rx_desc *pdesc,
- struct rx_fwinfo *pdrvinfo,
- struct rtllib_rx_stats *precord_stats,
- bool bpacket_match_bssid,
- bool bpacket_toself,
- bool bPacketBeacon,
- bool bToSelfBA
- )
+static void _rtl92e_query_rxphystatus(struct r8192_priv *priv,
+ struct rtllib_rx_stats *pstats,
+ struct rx_desc *pdesc,
+ struct rx_fwinfo *pdrvinfo,
+ struct rtllib_rx_stats *precord_stats,
+ bool bpacket_match_bssid,
+ bool bpacket_toself,
+ bool bPacketBeacon,
+ bool bToSelfBA)
{
struct phy_sts_ofdm_819xpci *pofdm_buf;
struct phy_sts_cck_819xpci *pcck_buf;
u8 *prxpkt;
u8 i, max_spatial_stream, tmp_rxevm;
- s8 rx_pwr[4], rx_pwr_all = 0;
+ s8 rx_pwr[RF90_PATH_MAX], rx_pwr_all = 0;
s8 rx_evmX;
u8 evm, pwdb_all;
u32 RSSI, total_rssi = 0;
@@ -1174,7 +1172,7 @@ static void _rtl92e_query_rxphystatus(
memset(precord_stats, 0, sizeof(struct rtllib_rx_stats));
pstats->bPacketMatchBSSID = precord_stats->bPacketMatchBSSID =
bpacket_match_bssid;
- pstats->bPacketToSelf = precord_stats->bPacketToSelf = bpacket_toself;
+ pstats->packet_to_self = precord_stats->packet_to_self = bpacket_toself;
pstats->bIsCCK = precord_stats->bIsCCK = is_cck_rate;
pstats->bPacketBeacon = precord_stats->bPacketBeacon = bPacketBeacon;
pstats->bToSelfBA = precord_stats->bToSelfBA = bToSelfBA;
@@ -1266,8 +1264,8 @@ static void _rtl92e_query_rxphystatus(
else
sq = ((64 - sq) * 100) / 44;
}
- pstats->SignalQuality = sq;
- precord_stats->SignalQuality = sq;
+ pstats->signal_quality = sq;
+ precord_stats->signal_quality = sq;
pstats->RxMIMOSignalQuality[0] = sq;
precord_stats->RxMIMOSignalQuality[0] = sq;
pstats->RxMIMOSignalQuality[1] = -1;
@@ -1311,8 +1309,8 @@ static void _rtl92e_query_rxphystatus(
evm = rtl92e_evm_db_to_percent(rx_evmX);
if (bpacket_match_bssid) {
if (i == 0) {
- pstats->SignalQuality = evm & 0xff;
- precord_stats->SignalQuality = evm & 0xff;
+ pstats->signal_quality = evm & 0xff;
+ precord_stats->signal_quality = evm & 0xff;
}
pstats->RxMIMOSignalQuality[i] = evm & 0xff;
precord_stats->RxMIMOSignalQuality[i] = evm & 0xff;
@@ -1321,13 +1319,12 @@ static void _rtl92e_query_rxphystatus(
}
if (is_cck_rate) {
- pstats->SignalStrength = precord_stats->SignalStrength =
- _rtl92e_signal_scale_mapping(priv,
- (long)pwdb_all);
+ pstats->signal_strength = precord_stats->signal_strength =
+ _rtl92e_signal_scale_mapping(priv, (long)pwdb_all);
} else {
if (rf_rx_num != 0)
- pstats->SignalStrength = precord_stats->SignalStrength =
+ pstats->signal_strength = precord_stats->signal_strength =
_rtl92e_signal_scale_mapping(priv,
(long)(total_rssi /= rf_rx_num));
}
@@ -1355,10 +1352,10 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
last_rssi = priv->stats.slide_signal_strength[slide_rssi_index];
priv->stats.slide_rssi_total -= last_rssi;
}
- priv->stats.slide_rssi_total += prev_st->SignalStrength;
+ priv->stats.slide_rssi_total += prev_st->signal_strength;
priv->stats.slide_signal_strength[slide_rssi_index++] =
- prev_st->SignalStrength;
+ prev_st->signal_strength;
if (slide_rssi_index >= PHY_RSSI_SLID_WIN_MAX)
slide_rssi_index = 0;
@@ -1373,7 +1370,7 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
if (!bcheck)
return;
- if (!prev_st->bIsCCK && prev_st->bPacketToSelf) {
+ if (!prev_st->bIsCCK && prev_st->packet_to_self) {
for (rfpath = RF90_PATH_A; rfpath < priv->num_total_rf_path; rfpath++) {
if (priv->stats.rx_rssi_percentage[rfpath] == 0) {
priv->stats.rx_rssi_percentage[rfpath] =
@@ -1419,7 +1416,7 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
if (prev_st->RxPWDBAll >= 3)
prev_st->RxPWDBAll -= 3;
}
- if (prev_st->bPacketToSelf || prev_st->bPacketBeacon ||
+ if (prev_st->packet_to_self || prev_st->bPacketBeacon ||
prev_st->bToSelfBA) {
if (priv->undecorated_smoothed_pwdb < 0)
priv->undecorated_smoothed_pwdb = prev_st->RxPWDBAll;
@@ -1439,8 +1436,8 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
rtl92e_update_rx_statistics(priv, prev_st);
}
- if (prev_st->SignalQuality != 0) {
- if (prev_st->bPacketToSelf || prev_st->bPacketBeacon ||
+ if (prev_st->signal_quality != 0) {
+ if (prev_st->packet_to_self || prev_st->bPacketBeacon ||
prev_st->bToSelfBA) {
if (slide_evm_statistics++ >= PHY_RSSI_SLID_WIN_MAX) {
slide_evm_statistics = PHY_RSSI_SLID_WIN_MAX;
@@ -1449,10 +1446,10 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
priv->stats.slide_evm_total -= last_evm;
}
- priv->stats.slide_evm_total += prev_st->SignalQuality;
+ priv->stats.slide_evm_total += prev_st->signal_quality;
priv->stats.slide_evm[slide_evm_index++] =
- prev_st->SignalQuality;
+ prev_st->signal_quality;
if (slide_evm_index >= PHY_RSSI_SLID_WIN_MAX)
slide_evm_index = 0;
@@ -1461,7 +1458,7 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
priv->stats.last_signal_strength_inpercent = tmp_val;
}
- if (prev_st->bPacketToSelf ||
+ if (prev_st->packet_to_self ||
prev_st->bPacketBeacon ||
prev_st->bToSelfBA) {
for (ij = 0; ij < 2; ij++) {
@@ -1496,7 +1493,7 @@ static void _rtl92e_translate_rx_signal_stats(struct net_device *dev,
u8 *tmp_buf;
u8 *praddr;
- tmp_buf = skb->data + pstats->RxDrvInfoSize + pstats->RxBufShift;
+ tmp_buf = skb->data + pstats->rx_drv_info_size + pstats->rx_buf_shift;
hdr = (struct ieee80211_hdr_3addr *)tmp_buf;
fc = le16_to_cpu(hdr->frame_control);
@@ -1509,7 +1506,7 @@ static void _rtl92e_translate_rx_signal_stats(struct net_device *dev,
(fc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
(fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
hdr->addr3) &&
- (!pstats->bHwError) && (!pstats->bCRC) && (!pstats->bICV));
+ (!pstats->hw_error) && (!pstats->bCRC) && (!pstats->bICV));
bpacket_toself = bpacket_match_bssid && /* check this */
ether_addr_equal(praddr, priv->rtllib->dev->dev_addr);
if (ieee80211_is_beacon(hdr->frame_control))
@@ -1521,9 +1518,8 @@ static void _rtl92e_translate_rx_signal_stats(struct net_device *dev,
rtl92e_copy_mpdu_stats(pstats, &previous_stats);
}
-static void _rtl92e_update_received_rate_histogram_stats(
- struct net_device *dev,
- struct rtllib_rx_stats *pstats)
+static void _rtl92e_update_received_rate_histogram_stats(struct net_device *dev,
+ struct rtllib_rx_stats *pstats)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
u32 rcvType = 1;
@@ -1634,20 +1630,20 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
stats->bICV = pdesc->ICV;
stats->bCRC = pdesc->CRC32;
- stats->bHwError = pdesc->CRC32 | pdesc->ICV;
+ stats->hw_error = pdesc->CRC32 | pdesc->ICV;
stats->Length = pdesc->Length;
if (stats->Length < 24)
- stats->bHwError |= 1;
+ stats->hw_error |= 1;
- if (stats->bHwError)
+ if (stats->hw_error)
return false;
- stats->RxDrvInfoSize = pdesc->RxDrvInfoSize;
- stats->RxBufShift = (pdesc->Shift) & 0x03;
+ stats->rx_drv_info_size = pdesc->rx_drv_info_size;
+ stats->rx_buf_shift = (pdesc->Shift) & 0x03;
stats->decrypted = !pdesc->SWDec;
- pDrvInfo = (struct rx_fwinfo *)(skb->data + stats->RxBufShift);
+ pDrvInfo = (struct rx_fwinfo *)(skb->data + stats->rx_buf_shift);
stats->rate = _rtl92e_rate_hw_to_mgn((bool)pDrvInfo->RxHT,
pDrvInfo->RxRate);
@@ -1837,8 +1833,8 @@ bool rtl92e_is_rx_stuck(struct net_device *dev)
rx_chk_cnt++;
if (priv->undecorated_smoothed_pwdb >= (RATE_ADAPTIVE_TH_HIGH + 5)) {
rx_chk_cnt = 0;
- } else if ((priv->undecorated_smoothed_pwdb < (RATE_ADAPTIVE_TH_HIGH + 5))
- && (((priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) &&
+ } else if ((priv->undecorated_smoothed_pwdb < (RATE_ADAPTIVE_TH_HIGH + 5)) &&
+ (((priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) &&
(priv->undecorated_smoothed_pwdb >= RATE_ADAPTIVE_TH_LOW_40M))
|| ((priv->current_chnl_bw == HT_CHANNEL_WIDTH_20) &&
(priv->undecorated_smoothed_pwdb >= RATE_ADAPTIVE_TH_LOW_20M)))) {
@@ -1859,7 +1855,6 @@ bool rtl92e_is_rx_stuck(struct net_device *dev)
rx_chk_cnt = 0;
}
-
slot_index = (priv->silent_reset_rx_slot_index++) % SilentResetRxSoltNum;
if (priv->rx_ctr == RegRxCounter) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
index 1b444529b59c..e507593b939c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
@@ -229,7 +229,7 @@ enum _RTL8192PCI_HW {
RATR_MCS6 | RATR_MCS7)
#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | \
RATR_MCS11 | RATR_MCS12 | RATR_MCS13 | \
- RATR_MCS14|RATR_MCS15)
+ RATR_MCS14 | RATR_MCS15)
DRIVER_RSSI = 0x32c,
MCS_TXAGC = 0x340,
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index 18b948d4d86d..fbe624e235df 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -416,6 +416,7 @@ static bool _rtl92e_bb_config_para_file(struct net_device *dev)
return rtStatus;
}
+
bool rtl92e_config_bb(struct net_device *dev)
{
_rtl92e_init_bb_rf_reg_def(dev);
@@ -508,8 +509,8 @@ static void _rtl92e_set_tx_power_level(struct net_device *dev, u8 channel)
static u8 _rtl92e_phy_set_sw_chnl_cmd_array(struct net_device *dev,
struct sw_chnl_cmd *CmdTable,
u32 CmdTableIdx, u32 CmdTableSz,
- enum sw_chnl_cmd_id CmdID,
- u32 Para1, u32 Para2, u32 msDelay)
+ enum sw_chnl_cmd_id cmd_id,
+ u32 para1, u32 para2, u32 ms_delay)
{
struct sw_chnl_cmd *pCmd;
@@ -523,10 +524,10 @@ static u8 _rtl92e_phy_set_sw_chnl_cmd_array(struct net_device *dev,
}
pCmd = CmdTable + CmdTableIdx;
- pCmd->CmdID = CmdID;
- pCmd->Para1 = Para1;
- pCmd->Para2 = Para2;
- pCmd->msDelay = msDelay;
+ pCmd->cmd_id = cmd_id;
+ pCmd->para1 = para1;
+ pCmd->para2 = para2;
+ pCmd->ms_delay = ms_delay;
return true;
}
@@ -552,18 +553,18 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
_rtl92e_phy_set_sw_chnl_cmd_array(dev, ieee->PreCommonCmd,
PreCommonCmdCnt++,
MAX_PRECMD_CNT,
- CmdID_SetTxPowerLevel,
+ cmd_id_set_tx_power_level,
0, 0, 0);
_rtl92e_phy_set_sw_chnl_cmd_array(dev, ieee->PreCommonCmd,
PreCommonCmdCnt++,
- MAX_PRECMD_CNT, CmdID_End,
+ MAX_PRECMD_CNT, cmd_id_end,
0, 0, 0);
PostCommonCmdCnt = 0;
_rtl92e_phy_set_sw_chnl_cmd_array(dev, ieee->PostCommonCmd,
PostCommonCmdCnt++,
- MAX_POSTCMD_CNT, CmdID_End,
+ MAX_POSTCMD_CNT, cmd_id_end,
0, 0, 0);
RfDependCmdCnt = 0;
@@ -578,14 +579,14 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
ieee->RfDependCmd,
RfDependCmdCnt++,
MAX_RFDEPENDCMD_CNT,
- CmdID_RF_WriteReg,
+ cmd_id_rf_write_reg,
rZebra1_Channel,
channel, 10);
_rtl92e_phy_set_sw_chnl_cmd_array(dev,
ieee->RfDependCmd,
RfDependCmdCnt++,
MAX_RFDEPENDCMD_CNT,
- CmdID_End, 0, 0, 0);
+ cmd_id_end, 0, 0, 0);
do {
switch (*stage) {
@@ -600,7 +601,7 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
break;
}
- if (CurrentCmd && CurrentCmd->CmdID == CmdID_End) {
+ if (CurrentCmd && CurrentCmd->cmd_id == cmd_id_end) {
if ((*stage) == 2)
return true;
(*stage)++;
@@ -610,31 +611,31 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
if (!CurrentCmd)
continue;
- switch (CurrentCmd->CmdID) {
- case CmdID_SetTxPowerLevel:
+ switch (CurrentCmd->cmd_id) {
+ case cmd_id_set_tx_power_level:
if (priv->ic_cut > VERSION_8190_BD)
_rtl92e_set_tx_power_level(dev,
channel);
break;
- case CmdID_WritePortUlong:
- rtl92e_writel(dev, CurrentCmd->Para1,
- CurrentCmd->Para2);
+ case cmd_id_write_port_ulong:
+ rtl92e_writel(dev, CurrentCmd->para1,
+ CurrentCmd->para2);
break;
- case CmdID_WritePortUshort:
- rtl92e_writew(dev, CurrentCmd->Para1,
- CurrentCmd->Para2);
+ case cmd_id_write_port_ushort:
+ rtl92e_writew(dev, CurrentCmd->para1,
+ CurrentCmd->para2);
break;
- case CmdID_WritePortUchar:
- rtl92e_writeb(dev, CurrentCmd->Para1,
- CurrentCmd->Para2);
+ case cmd_id_write_port_uchar:
+ rtl92e_writeb(dev, CurrentCmd->para1,
+ CurrentCmd->para2);
break;
- case CmdID_RF_WriteReg:
+ case cmd_id_rf_write_reg:
for (eRFPath = 0; eRFPath <
priv->num_total_rf_path; eRFPath++)
rtl92e_set_rf_reg(dev,
(enum rf90_radio_path)eRFPath,
- CurrentCmd->Para1, bMask12Bits,
- CurrentCmd->Para2 << 7);
+ CurrentCmd->para1, bMask12Bits,
+ CurrentCmd->para2 << 7);
break;
default:
break;
@@ -644,7 +645,7 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
} while (true);
} /*for (Number of RF paths)*/
- (*delay) = CurrentCmd->msDelay;
+ (*delay) = CurrentCmd->ms_delay;
(*step)++;
return false;
}
@@ -944,19 +945,19 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
case IG_Restore:
BitMask = 0x7f;
rtl92e_set_bb_reg(dev, rOFDM0_XAAGCCore1, BitMask,
- (u32)priv->initgain_backup.xaagccore1);
+ (u32)priv->initgain_backup.xaagccore1);
rtl92e_set_bb_reg(dev, rOFDM0_XBAGCCore1, BitMask,
- (u32)priv->initgain_backup.xbagccore1);
+ (u32)priv->initgain_backup.xbagccore1);
rtl92e_set_bb_reg(dev, rOFDM0_XCAGCCore1, BitMask,
- (u32)priv->initgain_backup.xcagccore1);
+ (u32)priv->initgain_backup.xcagccore1);
rtl92e_set_bb_reg(dev, rOFDM0_XDAGCCore1, BitMask,
- (u32)priv->initgain_backup.xdagccore1);
+ (u32)priv->initgain_backup.xdagccore1);
BitMask = bMaskByte2;
rtl92e_set_bb_reg(dev, rCCK0_CCA, BitMask,
- (u32)priv->initgain_backup.cca);
+ (u32)priv->initgain_backup.cca);
rtl92e_set_tx_power(dev,
- priv->rtllib->current_network.channel);
+ priv->rtllib->current_network.channel);
break;
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
index ff4b4004b0d0..956dfbdd5b68 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
@@ -20,8 +20,6 @@ enum hw90_block {
enum rf90_radio_path {
RF90_PATH_A = 0,
RF90_PATH_B = 1,
- RF90_PATH_C = 2,
- RF90_PATH_D = 3,
RF90_PATH_MAX
};
@@ -45,13 +43,13 @@ void rtl92e_set_channel(struct net_device *dev, u8 channel);
void rtl92e_set_bw_mode(struct net_device *dev,
enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
-void rtl92e_init_gain(struct net_device *dev, u8 Operation);
+void rtl92e_init_gain(struct net_device *dev, u8 operation);
void rtl92e_set_rf_off(struct net_device *dev);
bool rtl92e_set_rf_power_state(struct net_device *dev,
enum rt_rf_power_state rf_power_state);
-void rtl92e_scan_op_backup(struct net_device *dev, u8 Operation);
+void rtl92e_scan_op_backup(struct net_device *dev, u8 operation);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 9eeae01dc98d..dc1301f1a0c1 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -25,7 +25,7 @@
int hwwep = 1;
static char *ifname = "wlan%d";
-static struct pci_device_id rtl8192_pci_id_tbl[] = {
+static const struct pci_device_id rtl8192_pci_id_tbl[] = {
{PCI_DEVICE(0x10ec, 0x8192)},
{PCI_DEVICE(0x07aa, 0x0044)},
{PCI_DEVICE(0x07aa, 0x0047)},
@@ -173,7 +173,7 @@ bool rtl92e_set_rf_state(struct net_device *dev,
else
priv->blinked_ingpio = false;
rtllib_mgnt_disconnect(priv->rtllib,
- WLAN_REASON_DISASSOC_STA_HAS_LEFT);
+ WLAN_REASON_DISASSOC_STA_HAS_LEFT);
}
}
if ((change_source == RF_CHANGE_BY_HW) && !priv->hw_radio_off)
@@ -322,7 +322,7 @@ static int _rtl92e_qos_handle_probe_response(struct r8192_priv *priv,
if (network->flags & NETWORK_HAS_QOS_MASK) {
if (active_network &&
- (network->flags & NETWORK_HAS_QOS_PARAMETERS))
+ (network->flags & NETWORK_HAS_QOS_PARAMETERS))
network->qos_data.active = network->qos_data.supported;
if ((network->qos_data.active == 1) && (active_network == 1) &&
@@ -665,7 +665,7 @@ static void _rtl92e_init_priv_handler(struct net_device *dev)
priv->rtllib->init_gain_handler = rtl92e_init_gain;
priv->rtllib->rtllib_ips_leave_wq = rtl92e_rtllib_ips_leave_wq;
priv->rtllib->rtllib_ips_leave = rtl92e_rtllib_ips_leave;
- priv->rtllib->ScanOperationBackupHandler = rtl92e_scan_op_backup;
+ priv->rtllib->scan_operation_backup_handler = rtl92e_scan_op_backup;
}
static void _rtl92e_init_priv_variable(struct net_device *dev)
@@ -860,13 +860,13 @@ static enum reset_type _rtl92e_tx_check_stuck(struct net_device *dev)
skb = __skb_peek(&ring->queue);
tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
- tcb_desc->nStuckCount++;
+ tcb_desc->stuck_count++;
bCheckFwTxCnt = true;
- if (tcb_desc->nStuckCount > 1)
+ if (tcb_desc->stuck_count > 1)
netdev_info(dev,
- "%s: QueueID=%d tcb_desc->nStuckCount=%d\n",
+ "%s: QueueID=%d tcb_desc->stuck_count=%d\n",
__func__, QueueID,
- tcb_desc->nStuckCount);
+ tcb_desc->stuck_count);
}
}
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
@@ -1522,8 +1522,8 @@ static void _rtl92e_rx_normal(struct net_device *dev)
priv->rxbuffersize, DMA_FROM_DEVICE);
skb_put(skb, pdesc->Length);
- skb_reserve(skb, stats.RxDrvInfoSize +
- stats.RxBufShift);
+ skb_reserve(skb, stats.rx_drv_info_size +
+ stats.rx_buf_shift);
skb_trim(skb, skb->len - S_CRC_LEN);
rtllib_hdr = (struct ieee80211_hdr *)skb->data;
if (!is_multicast_ether_addr(rtllib_hdr->addr1)) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index 1d6d31292f41..8297d7e59415 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -300,7 +300,7 @@ struct r8192_priv {
u32 rf_reg_0value[4];
u8 num_total_rf_path;
- bool brfpath_rxenable[4];
+ bool brfpath_rxenable[RF90_PATH_MAX];
bool tx_pwr_data_read_from_eeprom;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 0c7f38a4a7db..e9ca5a8768ad 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -1484,8 +1484,7 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
rtl92e_set_bb_reg(dev,
rOFDM1_TRxPathEnable,
0x1 << i, 0x1);
- dm_rx_path_sel_table.rf_enable_rssi_th[i]
- = 100;
+ dm_rx_path_sel_table.rf_enable_rssi_th[i] = 100;
disabled_rf_cnt--;
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index 5aac9110bff6..7b6247acf6f4 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -204,12 +204,11 @@ void rtl92e_leisure_ps_enter(struct net_device *dev)
&priv->rtllib->pwr_save_ctrl;
if (!((priv->rtllib->iw_mode == IW_MODE_INFRA) &&
- (priv->rtllib->link_state == MAC80211_LINKED)))
+ (priv->rtllib->link_state == MAC80211_LINKED)))
return;
if (psc->bLeisurePs) {
if (psc->lps_idle_count >= RT_CHECK_FOR_HANG_PERIOD) {
-
if (priv->rtllib->ps == RTLLIB_PS_DISABLED)
_rtl92e_ps_set_mode(dev, RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST);
} else {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index c21a0560410a..fe3a42a4fcd5 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -288,11 +288,11 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
if (priv->rtllib->rf_power_state != rf_off) {
priv->rtllib->actscanning = true;
- ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_BACKUP);
+ ieee->scan_operation_backup_handler(ieee->dev, SCAN_OPT_BACKUP);
rtllib_start_scan_syncro(priv->rtllib);
- ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_RESTORE);
+ ieee->scan_operation_backup_handler(ieee->dev, SCAN_OPT_RESTORE);
}
ret = 0;
} else {
@@ -526,7 +526,8 @@ static int _rtl92e_wx_set_enc(struct net_device *dev,
mutex_unlock(&priv->wx_mutex);
if (wrqu->encoding.flags & IW_ENCODE_DISABLED) {
- ieee->pairwise_key_type = ieee->group_key_type = KEY_TYPE_NA;
+ ieee->pairwise_key_type = KEY_TYPE_NA;
+ ieee->group_key_type = KEY_TYPE_NA;
rtl92e_cam_reset(dev);
memset(priv->rtllib->swcamtable, 0,
sizeof(struct sw_cam_table) * 32);
@@ -675,9 +676,9 @@ static int _rtl92e_wx_set_encode_ext(struct net_device *dev,
u8 idx = 0, alg = 0, group = 0;
if ((encoding->flags & IW_ENCODE_DISABLED) ||
- ext->alg == IW_ENCODE_ALG_NONE) {
- ieee->pairwise_key_type = ieee->group_key_type
- = KEY_TYPE_NA;
+ ext->alg == IW_ENCODE_ALG_NONE) {
+ ieee->pairwise_key_type = KEY_TYPE_NA;
+ ieee->group_key_type = KEY_TYPE_NA;
rtl92e_cam_reset(dev);
memset(priv->rtllib->swcamtable, 0,
sizeof(struct sw_cam_table) * 32);
@@ -710,7 +711,7 @@ static int _rtl92e_wx_set_encode_ext(struct net_device *dev,
rtl92e_set_swcam(dev, idx, idx, alg, broadcast_addr, key);
} else {
if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) &&
- ieee->ht_info->current_ht_support)
+ ieee->ht_info->current_ht_support)
rtl92e_writeb(dev, 0x173, 1);
rtl92e_set_key(dev, 4, idx, alg,
(u8 *)ieee->ap_mac_addr, 0, key);
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index e38cd0c9c013..9c9c0bc0cfde 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -188,7 +188,7 @@ static void ht_iot_peer_determine(struct rtllib_device *ieee)
}
static u8 ht_iot_act_is_mgnt_use_cck_6m(struct rtllib_device *ieee,
- struct rtllib_network *network)
+ struct rtllib_network *network)
{
u8 retValue = 0;
@@ -559,7 +559,7 @@ void ht_initialize_bss_desc(struct bss_ht *bss_ht)
}
void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee,
- struct rtllib_network *pNetwork)
+ struct rtllib_network *network)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
u8 bIOTAction = 0;
@@ -567,32 +567,32 @@ void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee,
/* unmark enable_ht flag here is the same reason why unmarked in
* function rtllib_softmac_new_net. WB 2008.09.10
*/
- if (pNetwork->bssht.bd_support_ht) {
+ if (network->bssht.bd_support_ht) {
ht_info->current_ht_support = true;
- ht_info->peer_ht_spec_ver = pNetwork->bssht.bd_ht_spec_ver;
+ ht_info->peer_ht_spec_ver = network->bssht.bd_ht_spec_ver;
- if (pNetwork->bssht.bd_ht_cap_len > 0 &&
- pNetwork->bssht.bd_ht_cap_len <= sizeof(ht_info->peer_ht_cap_buf))
+ if (network->bssht.bd_ht_cap_len > 0 &&
+ network->bssht.bd_ht_cap_len <= sizeof(ht_info->peer_ht_cap_buf))
memcpy(ht_info->peer_ht_cap_buf,
- pNetwork->bssht.bd_ht_cap_buf,
- pNetwork->bssht.bd_ht_cap_len);
+ network->bssht.bd_ht_cap_buf,
+ network->bssht.bd_ht_cap_len);
- if (pNetwork->bssht.bd_ht_info_len > 0 &&
- pNetwork->bssht.bd_ht_info_len <=
+ if (network->bssht.bd_ht_info_len > 0 &&
+ network->bssht.bd_ht_info_len <=
sizeof(ht_info->peer_ht_info_buf))
memcpy(ht_info->peer_ht_info_buf,
- pNetwork->bssht.bd_ht_info_buf,
- pNetwork->bssht.bd_ht_info_len);
+ network->bssht.bd_ht_info_buf,
+ network->bssht.bd_ht_info_len);
ht_info->current_rt2rt_aggregation =
- pNetwork->bssht.bd_rt2rt_aggregation;
+ network->bssht.bd_rt2rt_aggregation;
ht_info->current_rt2rt_long_slot_time =
- pNetwork->bssht.bd_rt2rt_long_slot_time;
+ network->bssht.bd_rt2rt_long_slot_time;
ht_iot_peer_determine(ieee);
ht_info->iot_action = 0;
- bIOTAction = ht_iot_act_is_mgnt_use_cck_6m(ieee, pNetwork);
+ bIOTAction = ht_iot_act_is_mgnt_use_cck_6m(ieee, network);
if (bIOTAction)
ht_info->iot_action |= HT_IOT_ACT_MGNT_USE_CCK_6M;
bIOTAction = ht_iot_act_is_ccd_fsync(ieee);
@@ -609,23 +609,23 @@ void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee,
}
void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
- struct rtllib_network *pNetwork)
+ struct rtllib_network *network)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
struct ht_info_ele *pPeerHTInfo =
- (struct ht_info_ele *)pNetwork->bssht.bd_ht_info_buf;
+ (struct ht_info_ele *)network->bssht.bd_ht_info_buf;
if (ht_info->current_ht_support) {
- if (pNetwork->bssht.bd_ht_info_len != 0)
+ if (network->bssht.bd_ht_info_len != 0)
ht_info->current_op_mode = pPeerHTInfo->opt_mode;
}
}
EXPORT_SYMBOL(HT_update_self_and_peer_setting);
-u8 ht_c_check(struct rtllib_device *ieee, u8 *pFrame)
+u8 ht_c_check(struct rtllib_device *ieee, u8 *frame)
{
if (ieee->ht_info->current_ht_support) {
- if ((is_qos_data_frame(pFrame) && frame_order(pFrame)) == 1) {
+ if ((is_qos_data_frame(frame) && frame_order(frame)) == 1) {
netdev_dbg(ieee->dev, "HT CONTROL FILED EXIST!!\n");
return true;
}
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index ed6a488bc7ac..89092cd434de 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -14,7 +14,7 @@ static void RxPktPendingTimeout(struct timer_list *t)
struct rtllib_device *ieee = container_of(ts, struct rtllib_device,
rx_ts_records[ts->num]);
- struct rx_reorder_entry *pReorderEntry = NULL;
+ struct rx_reorder_entry *reorder_entry = NULL;
unsigned long flags = 0;
u8 index = 0;
@@ -23,31 +23,31 @@ static void RxPktPendingTimeout(struct timer_list *t)
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
if (ts->rx_timeout_indicate_seq != 0xffff) {
while (!list_empty(&ts->rx_pending_pkt_list)) {
- pReorderEntry = (struct rx_reorder_entry *)
+ reorder_entry = (struct rx_reorder_entry *)
list_entry(ts->rx_pending_pkt_list.prev,
struct rx_reorder_entry, list);
if (index == 0)
- ts->rx_indicate_seq = pReorderEntry->SeqNum;
+ ts->rx_indicate_seq = reorder_entry->seq_num;
- if (SN_LESS(pReorderEntry->SeqNum,
+ if (SN_LESS(reorder_entry->seq_num,
ts->rx_indicate_seq) ||
- SN_EQUAL(pReorderEntry->SeqNum,
+ SN_EQUAL(reorder_entry->seq_num,
ts->rx_indicate_seq)) {
- list_del_init(&pReorderEntry->list);
+ list_del_init(&reorder_entry->list);
- if (SN_EQUAL(pReorderEntry->SeqNum,
+ if (SN_EQUAL(reorder_entry->seq_num,
ts->rx_indicate_seq))
ts->rx_indicate_seq =
(ts->rx_indicate_seq + 1) % 4096;
netdev_dbg(ieee->dev,
- "%s(): Indicate SeqNum: %d\n",
- __func__, pReorderEntry->SeqNum);
+ "%s(): Indicate seq_num: %d\n",
+ __func__, reorder_entry->seq_num);
ieee->stats_IndicateArray[index] =
- pReorderEntry->prxb;
+ reorder_entry->prxb;
index++;
- list_add_tail(&pReorderEntry->list,
+ list_add_tail(&reorder_entry->list,
&ieee->RxReorder_Unused_List);
} else {
pkt_in_buf = true;
@@ -225,7 +225,7 @@ static void MakeTSEntry(struct ts_common_info *ts_common_info, u8 *addr,
}
bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
- u8 *addr, u8 TID, enum tr_select tx_rx_select, bool bAddNewTs)
+ u8 *addr, u8 TID, enum tr_select tx_rx_select, bool add_new_ts)
{
u8 UP = 0;
struct qos_tsinfo tspec;
@@ -269,7 +269,7 @@ bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
if (*ppTS)
return true;
- if (!bAddNewTs) {
+ if (!add_new_ts) {
netdev_dbg(ieee->dev, "add new TS failed(tid:%d)\n", UP);
return false;
}
@@ -336,8 +336,8 @@ static void RemoveTsEntry(struct rtllib_device *ieee,
pRxReorderEntry = (struct rx_reorder_entry *)
list_entry(ts->rx_pending_pkt_list.prev,
struct rx_reorder_entry, list);
- netdev_dbg(ieee->dev, "%s(): Delete SeqNum %d!\n",
- __func__, pRxReorderEntry->SeqNum);
+ netdev_dbg(ieee->dev, "%s(): Delete seq_num %d!\n",
+ __func__, pRxReorderEntry->seq_num);
list_del_init(&pRxReorderEntry->list);
{
int i = 0;
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 022851b7f1a9..d6615f787d53 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -93,21 +93,21 @@ static inline void *netdev_priv_rsl(struct net_device *dev)
#define SUPPORT_CKIP_PK 0x10
#define RT_RF_OFF_LEVL_HALT_NIC BIT(3)
#define RT_IN_PS_LEVEL(psc, _PS_FLAG) \
- ((psc->CurPsLevel & _PS_FLAG) ? true : false)
+ ((psc->cur_ps_level & _PS_FLAG) ? true : false)
#define RT_CLEAR_PS_LEVEL(psc, _PS_FLAG) \
- (psc->CurPsLevel &= (~(_PS_FLAG)))
+ (psc->cur_ps_level &= (~(_PS_FLAG)))
/* defined for skb cb field */
/* At most 28 byte */
struct cb_desc {
/* Tx Desc Related flags (8-9) */
- u8 bLastIniPkt:1;
- u8 bCmdOrInit:1;
+ u8 last_ini_pkt:1;
+ u8 cmd_or_init:1;
u8 tx_dis_rate_fallback:1;
u8 tx_use_drv_assinged_rate:1;
u8 hw_sec:1;
- u8 nStuckCount;
+ u8 stuck_count;
/* Tx Firmware Related flags (10-11)*/
u8 cts_enable:1;
@@ -153,20 +153,20 @@ struct cb_desc {
};
enum sw_chnl_cmd_id {
- CmdID_End,
- CmdID_SetTxPowerLevel,
- CmdID_BBRegWrite10,
- CmdID_WritePortUlong,
- CmdID_WritePortUshort,
- CmdID_WritePortUchar,
- CmdID_RF_WriteReg,
+ cmd_id_end,
+ cmd_id_set_tx_power_level,
+ cmd_id_bbreg_write10,
+ cmd_id_write_port_ulong,
+ cmd_id_write_port_ushort,
+ cmd_id_write_port_uchar,
+ cmd_id_rf_write_reg,
};
struct sw_chnl_cmd {
- enum sw_chnl_cmd_id CmdID;
- u32 Para1;
- u32 Para2;
- u32 msDelay;
+ enum sw_chnl_cmd_id cmd_id;
+ u32 para1;
+ u32 para2;
+ u32 ms_delay;
};
/*--------------------------Define -------------------------------------------*/
@@ -339,12 +339,12 @@ enum rt_op_mode {
#define FC_QOS_BIT BIT(7)
#define is_data_frame(pdu) (((pdu[0] & 0x0C) == 0x08) ? true : false)
-#define is_legacy_data_frame(pdu) (is_data_frame(pdu) && (!(pdu[0]&FC_QOS_BIT)))
+#define is_legacy_data_frame(pdu) (is_data_frame(pdu) && (!(pdu[0] & FC_QOS_BIT)))
#define is_qos_data_frame(pframe) \
- ((*(u16 *)pframe&(IEEE80211_STYPE_QOS_DATA|RTLLIB_FTYPE_DATA)) == \
- (IEEE80211_STYPE_QOS_DATA|RTLLIB_FTYPE_DATA))
-#define frame_order(pframe) (*(u16 *)pframe&IEEE80211_FCTL_ORDER)
-#define SN_LESS(a, b) (((a-b)&0x800) != 0)
+ ((*(u16 *)pframe & (IEEE80211_STYPE_QOS_DATA | RTLLIB_FTYPE_DATA)) == \
+ (IEEE80211_STYPE_QOS_DATA | RTLLIB_FTYPE_DATA))
+#define frame_order(pframe) (*(u16 *)pframe & IEEE80211_FCTL_ORDER)
+#define SN_LESS(a, b) (((a - b) & 0x800) != 0)
#define SN_EQUAL(a, b) (a == b)
#define MAX_DEV_ADDR_SIZE 8
@@ -414,24 +414,13 @@ enum _REG_PREAMBLE_MODE {
#define WLAN_GET_SEQ_FRAG(seq) ((seq) & RTLLIB_SCTL_FRAG)
#define WLAN_GET_SEQ_SEQ(seq) (((seq) & RTLLIB_SCTL_SEQ) >> 4)
-/* Authentication algorithms */
-#define WLAN_AUTH_OPEN 0
-#define WLAN_AUTH_SHARED_KEY 1
-#define WLAN_AUTH_LEAP 128
-
-#define WLAN_CAPABILITY_ESS (1<<0)
-#define WLAN_CAPABILITY_IBSS (1<<1)
-#define WLAN_CAPABILITY_PRIVACY (1<<4)
-#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5)
-#define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10)
-
-#define RTLLIB_STATMASK_SIGNAL (1<<0)
-#define RTLLIB_STATMASK_RSSI (1<<1)
-#define RTLLIB_STATMASK_NOISE (1<<2)
+#define RTLLIB_STATMASK_SIGNAL (1 << 0)
+#define RTLLIB_STATMASK_RSSI (1 << 1)
+#define RTLLIB_STATMASK_NOISE (1 << 2)
#define RTLLIB_STATMASK_WEMASK 0x7
-#define RTLLIB_CCK_MODULATION (1<<0)
-#define RTLLIB_OFDM_MODULATION (1<<1)
+#define RTLLIB_CCK_MODULATION (1 << 0)
+#define RTLLIB_OFDM_MODULATION (1 << 1)
#define RTLLIB_CCK_RATE_LEN 4
#define RTLLIB_CCK_RATE_1MB 0x02
@@ -475,27 +464,27 @@ struct rtllib_rx_stats {
u8 mask;
u16 len;
u16 Length;
- u8 SignalQuality;
+ u8 signal_quality;
s32 RecvSignalPower;
- u8 SignalStrength;
- u16 bHwError:1;
+ u8 signal_strength;
+ u16 hw_error:1;
u16 bCRC:1;
u16 bICV:1;
u16 decrypted:1;
u32 time_stamp_low;
u32 time_stamp_high;
- u8 RxDrvInfoSize;
- u8 RxBufShift;
+ u8 rx_drv_info_size;
+ u8 rx_buf_shift;
bool bIsAMPDU;
bool bFirstMPDU;
bool contain_htc;
u32 RxPWDBAll;
- u8 RxMIMOSignalStrength[4];
+ u8 RxMIMOSignalStrength[2];
s8 RxMIMOSignalQuality[2];
bool bPacketMatchBSSID;
bool bIsCCK;
- bool bPacketToSelf;
+ bool packet_to_self;
bool bPacketBeacon;
bool bToSelfBA;
};
@@ -518,11 +507,11 @@ struct rtllib_frag_entry {
struct rtllib_device;
-#define SEC_ACTIVE_KEY (1<<4)
-#define SEC_AUTH_MODE (1<<5)
-#define SEC_UNICAST_GROUP (1<<6)
-#define SEC_LEVEL (1<<7)
-#define SEC_ENABLED (1<<8)
+#define SEC_ACTIVE_KEY (1 << 4)
+#define SEC_AUTH_MODE (1 << 5)
+#define SEC_UNICAST_GROUP (1 << 6)
+#define SEC_LEVEL (1 << 7)
+#define SEC_ENABLED (1 << 8)
#define SEC_LEVEL_0 0 /* None */
#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
@@ -707,17 +696,17 @@ union frameqos {
#define MAX_WPA_IE_LEN 64
#define MAX_WZC_IE_LEN 256
-#define NETWORK_EMPTY_ESSID (1<<0)
-#define NETWORK_HAS_OFDM (1<<1)
-#define NETWORK_HAS_CCK (1<<2)
+#define NETWORK_EMPTY_ESSID (1 << 0)
+#define NETWORK_HAS_OFDM (1 << 1)
+#define NETWORK_HAS_CCK (1 << 2)
/* QoS structure */
-#define NETWORK_HAS_QOS_PARAMETERS (1<<3)
-#define NETWORK_HAS_QOS_INFORMATION (1<<4)
+#define NETWORK_HAS_QOS_PARAMETERS (1 << 3)
+#define NETWORK_HAS_QOS_INFORMATION (1 << 4)
#define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | \
NETWORK_HAS_QOS_INFORMATION)
/* 802.11h */
-#define NETWORK_HAS_ERP_VALUE (1<<10)
+#define NETWORK_HAS_ERP_VALUE (1 << 10)
#define QOS_QUEUE_NUM 4
#define QOS_OUI_LEN 3
@@ -962,7 +951,7 @@ struct rtllib_network {
bool unknown_cap_exist;
bool berp_info_valid;
bool buseprotection;
- u8 SignalStrength;
+ u8 signal_strength;
u8 RSSI;
struct list_head list;
};
@@ -1007,8 +996,8 @@ enum rtl_link_state {
#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
#define DEFAULT_FTS 2346
-#define CFG_RTLLIB_RESERVE_FCS (1<<0)
-#define CFG_RTLLIB_COMPUTE_FCS (1<<1)
+#define CFG_RTLLIB_RESERVE_FCS (1 << 0)
+#define CFG_RTLLIB_COMPUTE_FCS (1 << 1)
struct tx_pending {
int frag;
@@ -1026,7 +1015,7 @@ struct bandwidth_autoswitch {
#define REORDER_ENTRY_NUM 128
struct rx_reorder_entry {
struct list_head list;
- u16 SeqNum;
+ u16 seq_num;
struct rtllib_rxb *prxb;
};
@@ -1057,7 +1046,7 @@ struct rt_pwr_save_ctrl {
u8 lps_idle_count;
u8 lps_awake_intvl;
- u32 CurPsLevel;
+ u32 cur_ps_level;
};
#define RT_RF_CHANGE_SOURCE u32
@@ -1299,7 +1288,7 @@ struct rtllib_device {
u16 scan_watch_dog;
/* map of allowed channels. 0 is dummy */
- u8 active_channel_map[MAX_CHANNEL_NUMBER+1];
+ u8 active_channel_map[MAX_CHANNEL_NUMBER + 1];
int rate; /* current rate */
int basic_rate;
@@ -1471,9 +1460,9 @@ struct rtllib_device {
void (*set_wireless_mode)(struct net_device *dev, u8 wireless_mode);
bool (*get_half_nmode_support_by_aps_handler)(struct net_device *dev);
u8 (*rtllib_ap_sec_type)(struct rtllib_device *ieee);
- void (*init_gain_handler)(struct net_device *dev, u8 Operation);
- void (*ScanOperationBackupHandler)(struct net_device *dev,
- u8 Operation);
+ void (*init_gain_handler)(struct net_device *dev, u8 operation);
+ void (*scan_operation_backup_handler)(struct net_device *dev,
+ u8 operation);
void (*set_hw_reg_handler)(struct net_device *dev, u8 variable, u8 *val);
void (*allow_all_dest_addr_handler)(struct net_device *dev,
@@ -1497,32 +1486,32 @@ struct rtllib_device {
/* Uses the channel change callback directly
* instead of [start/stop] scan callbacks
*/
-#define IEEE_SOFTMAC_SCAN (1<<2)
+#define IEEE_SOFTMAC_SCAN (1 << 2)
/* Perform authentication and association handshake */
-#define IEEE_SOFTMAC_ASSOCIATE (1<<3)
+#define IEEE_SOFTMAC_ASSOCIATE (1 << 3)
/* Generate probe requests */
-#define IEEE_SOFTMAC_PROBERQ (1<<4)
+#define IEEE_SOFTMAC_PROBERQ (1 << 4)
/* Generate response to probe requests */
-#define IEEE_SOFTMAC_PROBERS (1<<5)
+#define IEEE_SOFTMAC_PROBERS (1 << 5)
/* The ieee802.11 stack will manage the netif queue
* wake/stop for the driver, taking care of 802.11
* fragmentation. See softmac.c for details.
*/
-#define IEEE_SOFTMAC_TX_QUEUE (1<<7)
+#define IEEE_SOFTMAC_TX_QUEUE (1 << 7)
/* Uses only the softmac_data_hard_start_xmit
* even for TX management frames.
*/
-#define IEEE_SOFTMAC_SINGLE_QUEUE (1<<8)
+#define IEEE_SOFTMAC_SINGLE_QUEUE (1 << 8)
/* Generate beacons. The stack will enqueue beacons
* to the card
*/
-#define IEEE_SOFTMAC_BEACONS (1<<6)
+#define IEEE_SOFTMAC_BEACONS (1 << 6)
static inline void *rtllib_priv(struct net_device *dev)
{
@@ -1737,21 +1726,21 @@ void ht_set_connect_bw_mode(struct rtllib_device *ieee,
void ht_update_default_setting(struct rtllib_device *ieee);
void ht_construct_capability_element(struct rtllib_device *ieee,
u8 *pos_ht_cap, u8 *len,
- u8 isEncrypt, bool bAssoc);
+ u8 is_encrypt, bool assoc);
void ht_construct_rt2rt_agg_element(struct rtllib_device *ieee,
u8 *posRT2RTAgg, u8 *len);
void ht_on_assoc_rsp(struct rtllib_device *ieee);
void ht_initialize_ht_info(struct rtllib_device *ieee);
void ht_initialize_bss_desc(struct bss_ht *bss_ht);
void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee,
- struct rtllib_network *pNetwork);
+ struct rtllib_network *network);
void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
- struct rtllib_network *pNetwork);
+ struct rtllib_network *network);
u8 ht_get_highest_mcs_rate(struct rtllib_device *ieee, u8 *pMCSRateSet,
u8 *pMCSFilter);
extern u8 MCS_FILTER_ALL[];
extern u16 MCS_DATA_RATE[2][2][77];
-u8 ht_c_check(struct rtllib_device *ieee, u8 *pFrame);
+u8 ht_c_check(struct rtllib_device *ieee, u8 *frame);
void ht_reset_iot_setting(struct rt_hi_throughput *ht_info);
bool is_ht_half_nmode_aps(struct rtllib_device *ieee);
u16 tx_count_to_data_rate(struct rtllib_device *ieee, u8 nDataRate);
@@ -1768,7 +1757,7 @@ void rtllib_tx_ba_inact_timeout(struct timer_list *t);
void rtllib_rx_ba_inact_timeout(struct timer_list *t);
void rtllib_reset_ba_entry(struct ba_record *ba);
bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS, u8 *addr,
- u8 TID, enum tr_select tx_rx_select, bool bAddNewTs);
+ u8 TID, enum tr_select tx_rx_select, bool add_new_ts);
void rtllib_ts_init(struct rtllib_device *ieee);
void rtllib_ts_start_add_ba_process(struct rtllib_device *ieee,
struct tx_ts_record *pTxTS);
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 74dc8326c886..e544379bfa91 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -637,12 +637,6 @@ static int rtllib_tkip_get_key(void *key, int len, u8 *seq, void *priv)
if (seq) {
/* Return the sequence number of the last transmitted frame. */
- u16 iv16 = tkey->tx_iv16;
- u32 iv32 = tkey->tx_iv32;
-
- if (iv16 == 0)
- iv32--;
- iv16--;
seq[0] = tkey->tx_iv16;
seq[1] = tkey->tx_iv16 >> 8;
seq[2] = tkey->tx_iv32;
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 84ca5d769b7e..8fe224a83dd6 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -403,26 +403,26 @@ drop:
}
static bool add_reorder_entry(struct rx_ts_record *ts,
- struct rx_reorder_entry *pReorderEntry)
+ struct rx_reorder_entry *reorder_entry)
{
struct list_head *list = &ts->rx_pending_pkt_list;
while (list->next != &ts->rx_pending_pkt_list) {
- if (SN_LESS(pReorderEntry->SeqNum, ((struct rx_reorder_entry *)
+ if (SN_LESS(reorder_entry->seq_num, ((struct rx_reorder_entry *)
list_entry(list->next, struct rx_reorder_entry,
- list))->SeqNum))
+ list))->seq_num))
list = list->next;
- else if (SN_EQUAL(pReorderEntry->SeqNum,
+ else if (SN_EQUAL(reorder_entry->seq_num,
((struct rx_reorder_entry *)list_entry(list->next,
- struct rx_reorder_entry, list))->SeqNum))
+ struct rx_reorder_entry, list))->seq_num))
return false;
else
break;
}
- pReorderEntry->list.next = list->next;
- pReorderEntry->list.next->prev = &pReorderEntry->list;
- pReorderEntry->list.prev = list;
- list->next = &pReorderEntry->list;
+ reorder_entry->list.next = list->next;
+ reorder_entry->list.next->prev = &reorder_entry->list;
+ reorder_entry->list.prev = list;
+ list->next = &reorder_entry->list;
return true;
}
@@ -504,8 +504,8 @@ void rtllib_flush_rx_ts_pending_pkts(struct rtllib_device *ieee,
pRxReorderEntry = (struct rx_reorder_entry *)
list_entry(ts->rx_pending_pkt_list.prev,
struct rx_reorder_entry, list);
- netdev_dbg(ieee->dev, "%s(): Indicate SeqNum %d!\n", __func__,
- pRxReorderEntry->SeqNum);
+ netdev_dbg(ieee->dev, "%s(): Indicate seq_num %d!\n", __func__,
+ pRxReorderEntry->seq_num);
list_del_init(&pRxReorderEntry->list);
ieee->rfd_array[rfd_cnt] = pRxReorderEntry->prxb;
@@ -521,10 +521,10 @@ void rtllib_flush_rx_ts_pending_pkts(struct rtllib_device *ieee,
static void rx_reorder_indicate_packet(struct rtllib_device *ieee,
struct rtllib_rxb *prxb,
- struct rx_ts_record *ts, u16 SeqNum)
+ struct rx_ts_record *ts, u16 seq_num)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
- struct rx_reorder_entry *pReorderEntry = NULL;
+ struct rx_reorder_entry *reorder_entry = NULL;
u8 win_size = ht_info->rx_reorder_win_size;
u16 win_end = 0;
u8 index = 0;
@@ -533,20 +533,20 @@ static void rx_reorder_indicate_packet(struct rtllib_device *ieee,
netdev_dbg(ieee->dev,
"%s(): Seq is %d, ts->rx_indicate_seq is %d, win_size is %d\n",
- __func__, SeqNum, ts->rx_indicate_seq, win_size);
+ __func__, seq_num, ts->rx_indicate_seq, win_size);
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
win_end = (ts->rx_indicate_seq + win_size - 1) % 4096;
/* Rx Reorder initialize condition.*/
if (ts->rx_indicate_seq == 0xffff)
- ts->rx_indicate_seq = SeqNum;
+ ts->rx_indicate_seq = seq_num;
- /* Drop out the packet which SeqNum is smaller than WinStart */
- if (SN_LESS(SeqNum, ts->rx_indicate_seq)) {
+ /* Drop out the packet which seq_num is smaller than WinStart */
+ if (SN_LESS(seq_num, ts->rx_indicate_seq)) {
netdev_dbg(ieee->dev,
"Packet Drop! IndicateSeq: %d, NewSeq: %d\n",
- ts->rx_indicate_seq, SeqNum);
+ ts->rx_indicate_seq, seq_num);
ht_info->rx_reorder_drop_counter++;
{
int i;
@@ -561,62 +561,62 @@ static void rx_reorder_indicate_packet(struct rtllib_device *ieee,
}
/* Sliding window manipulation. Conditions includes:
- * 1. Incoming SeqNum is equal to WinStart =>Window shift 1
- * 2. Incoming SeqNum is larger than the win_end => Window shift N
+ * 1. Incoming seq_num is equal to WinStart =>Window shift 1
+ * 2. Incoming seq_num is larger than the win_end => Window shift N
*/
- if (SN_EQUAL(SeqNum, ts->rx_indicate_seq)) {
+ if (SN_EQUAL(seq_num, ts->rx_indicate_seq)) {
ts->rx_indicate_seq = (ts->rx_indicate_seq + 1) % 4096;
match_win_start = true;
- } else if (SN_LESS(win_end, SeqNum)) {
- if (SeqNum >= (win_size - 1))
- ts->rx_indicate_seq = SeqNum + 1 - win_size;
+ } else if (SN_LESS(win_end, seq_num)) {
+ if (seq_num >= (win_size - 1))
+ ts->rx_indicate_seq = seq_num + 1 - win_size;
else
ts->rx_indicate_seq = 4095 -
- (win_size - (SeqNum + 1)) + 1;
+ (win_size - (seq_num + 1)) + 1;
netdev_dbg(ieee->dev,
"Window Shift! IndicateSeq: %d, NewSeq: %d\n",
- ts->rx_indicate_seq, SeqNum);
+ ts->rx_indicate_seq, seq_num);
}
/* Indication process.
* After Packet dropping and Sliding Window shifting as above, we can
- * now just indicate the packets with the SeqNum smaller than latest
+ * now just indicate the packets with the seq_num smaller than latest
* WinStart and struct buffer other packets.
*
* For Rx Reorder condition:
- * 1. All packets with SeqNum smaller than WinStart => Indicate
- * 2. All packets with SeqNum larger than or equal to
+ * 1. All packets with seq_num smaller than WinStart => Indicate
+ * 2. All packets with seq_num larger than or equal to
* WinStart => Buffer it.
*/
if (match_win_start) {
/* Current packet is going to be indicated.*/
netdev_dbg(ieee->dev,
"Packets indication! IndicateSeq: %d, NewSeq: %d\n",
- ts->rx_indicate_seq, SeqNum);
+ ts->rx_indicate_seq, seq_num);
ieee->prxb_indicate_array[0] = prxb;
index = 1;
} else {
/* Current packet is going to be inserted into pending list.*/
if (!list_empty(&ieee->RxReorder_Unused_List)) {
- pReorderEntry = (struct rx_reorder_entry *)
+ reorder_entry = (struct rx_reorder_entry *)
list_entry(ieee->RxReorder_Unused_List.next,
struct rx_reorder_entry, list);
- list_del_init(&pReorderEntry->list);
+ list_del_init(&reorder_entry->list);
/* Make a reorder entry and insert
* into a the packet list.
*/
- pReorderEntry->SeqNum = SeqNum;
- pReorderEntry->prxb = prxb;
+ reorder_entry->seq_num = seq_num;
+ reorder_entry->prxb = prxb;
- if (!add_reorder_entry(ts, pReorderEntry)) {
+ if (!add_reorder_entry(ts, reorder_entry)) {
int i;
netdev_dbg(ieee->dev,
"%s(): Duplicate packet is dropped. IndicateSeq: %d, NewSeq: %d\n",
__func__, ts->rx_indicate_seq,
- SeqNum);
- list_add_tail(&pReorderEntry->list,
+ seq_num);
+ list_add_tail(&reorder_entry->list,
&ieee->RxReorder_Unused_List);
for (i = 0; i < prxb->nr_subframes; i++)
@@ -626,7 +626,7 @@ static void rx_reorder_indicate_packet(struct rtllib_device *ieee,
} else {
netdev_dbg(ieee->dev,
"Pkt insert into struct buffer. IndicateSeq: %d, NewSeq: %d\n",
- ts->rx_indicate_seq, SeqNum);
+ ts->rx_indicate_seq, seq_num);
}
} else {
/* Packets are dropped if there are not enough reorder
@@ -653,12 +653,12 @@ static void rx_reorder_indicate_packet(struct rtllib_device *ieee,
netdev_dbg(ieee->dev, "%s(): start RREORDER indicate\n",
__func__);
- pReorderEntry = (struct rx_reorder_entry *)
+ reorder_entry = (struct rx_reorder_entry *)
list_entry(ts->rx_pending_pkt_list.prev,
struct rx_reorder_entry,
list);
- if (SN_LESS(pReorderEntry->SeqNum, ts->rx_indicate_seq) ||
- SN_EQUAL(pReorderEntry->SeqNum, ts->rx_indicate_seq)) {
+ if (SN_LESS(reorder_entry->seq_num, ts->rx_indicate_seq) ||
+ SN_EQUAL(reorder_entry->seq_num, ts->rx_indicate_seq)) {
/* This protect struct buffer from overflow. */
if (index >= REORDER_WIN_SIZE) {
netdev_err(ieee->dev,
@@ -668,18 +668,18 @@ static void rx_reorder_indicate_packet(struct rtllib_device *ieee,
break;
}
- list_del_init(&pReorderEntry->list);
+ list_del_init(&reorder_entry->list);
- if (SN_EQUAL(pReorderEntry->SeqNum, ts->rx_indicate_seq))
+ if (SN_EQUAL(reorder_entry->seq_num, ts->rx_indicate_seq))
ts->rx_indicate_seq = (ts->rx_indicate_seq + 1) %
4096;
- ieee->prxb_indicate_array[index] = pReorderEntry->prxb;
- netdev_dbg(ieee->dev, "%s(): Indicate SeqNum %d!\n",
- __func__, pReorderEntry->SeqNum);
+ ieee->prxb_indicate_array[index] = reorder_entry->prxb;
+ netdev_dbg(ieee->dev, "%s(): Indicate seq_num %d!\n",
+ __func__, reorder_entry->seq_num);
index++;
- list_add_tail(&pReorderEntry->list,
+ list_add_tail(&reorder_entry->list,
&ieee->RxReorder_Unused_List);
} else {
pkt_in_buf = true;
@@ -729,12 +729,12 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
u16 llc_offset = sizeof(struct ieee80211_hdr_3addr);
bool is_aggregate_frame = false;
- u16 nSubframe_Length;
+ u16 subframe_len;
u8 pad_len = 0;
- u16 SeqNum = 0;
+ u16 seq_num = 0;
struct sk_buff *sub_skb;
/* just for debug purpose */
- SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctrl));
+ seq_num = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctrl));
if ((RTLLIB_QOS_HAS_SEQ(fc)) &&
(((union frameqos *)(skb->data + RTLLIB_3ADDR_LEN))->field.reserved))
is_aggregate_frame = true;
@@ -781,23 +781,23 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
memcpy(rxb->dst, dst, ETH_ALEN);
while (skb->len > ETHERNET_HEADER_SIZE) {
/* Offset 12 denote 2 mac address */
- nSubframe_Length = *((u16 *)(skb->data + 12));
- nSubframe_Length = (nSubframe_Length >> 8) +
- (nSubframe_Length << 8);
+ subframe_len = *((u16 *)(skb->data + 12));
+ subframe_len = (subframe_len >> 8) +
+ (subframe_len << 8);
- if (skb->len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
+ if (skb->len < (ETHERNET_HEADER_SIZE + subframe_len)) {
netdev_info(ieee->dev,
"%s: A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",
__func__, rxb->nr_subframes);
netdev_info(ieee->dev,
"%s: A-MSDU parse error!! Subframe Length: %d\n",
- __func__, nSubframe_Length);
+ __func__, subframe_len);
netdev_info(ieee->dev,
- "nRemain_Length is %d and nSubframe_Length is : %d\n",
- skb->len, nSubframe_Length);
+ "nRemain_Length is %d and subframe_len is : %d\n",
+ skb->len, subframe_len);
netdev_info(ieee->dev,
- "The Packet SeqNum is %d\n",
- SeqNum);
+ "The Packet seq_num is %d\n",
+ seq_num);
return 0;
}
@@ -813,11 +813,11 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
*/
/* Allocate new skb for releasing to upper layer */
- sub_skb = dev_alloc_skb(nSubframe_Length + 12);
+ sub_skb = dev_alloc_skb(subframe_len + 12);
if (!sub_skb)
return 0;
skb_reserve(sub_skb, 12);
- skb_put_data(sub_skb, skb->data, nSubframe_Length);
+ skb_put_data(sub_skb, skb->data, subframe_len);
sub_skb->dev = ieee->dev;
rxb->subframes[rxb->nr_subframes++] = sub_skb;
@@ -826,10 +826,10 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
"ParseSubframe(): Too many Subframes! Packets dropped!\n");
break;
}
- skb_pull(skb, nSubframe_Length);
+ skb_pull(skb, subframe_len);
if (skb->len != 0) {
- pad_len = 4 - ((nSubframe_Length +
+ pad_len = 4 - ((subframe_len +
ETHERNET_HEADER_SIZE) % 4);
if (pad_len == 4)
pad_len = 0;
@@ -1227,7 +1227,7 @@ static int rtllib_rx_infra_adhoc(struct rtllib_device *ieee, struct sk_buff *skb
struct lib80211_crypt_data *crypt = NULL;
struct rtllib_rxb *rxb = NULL;
struct rx_ts_record *ts = NULL;
- u16 fc, sc, SeqNum = 0;
+ u16 fc, sc, seq_num = 0;
u8 type, stype, multicast = 0, unicast = 0, nr_subframes = 0, TID = 0;
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
@@ -1321,7 +1321,7 @@ static int rtllib_rx_infra_adhoc(struct rtllib_device *ieee, struct sk_buff *skb
if (ieee->current_network.qos_data.active && is_qos_data_frame(skb->data)
&& !is_multicast_ether_addr(hdr->addr1)) {
TID = frame_qos_tid(skb->data);
- SeqNum = WLAN_GET_SEQ_SEQ(sc);
+ seq_num = WLAN_GET_SEQ_SEQ(sc);
rtllib_get_ts(ieee, (struct ts_common_info **)&ts, hdr->addr2, TID,
RX_DIR, true);
if (TID != 0 && TID != 3)
@@ -1362,7 +1362,7 @@ static int rtllib_rx_infra_adhoc(struct rtllib_device *ieee, struct sk_buff *skb
if (!ieee->ht_info->cur_rx_reorder_enable || !ts)
rtllib_rx_indicate_pkt_legacy(ieee, rx_stats, rxb, dst, src);
else
- rx_reorder_indicate_packet(ieee, rxb, ts, SeqNum);
+ rx_reorder_indicate_packet(ieee, rxb, ts, seq_num);
dev_kfree_skb(skb);
@@ -2177,8 +2177,8 @@ static inline int rtllib_network_init(
network->marvell_cap_exist = false;
network->airgo_cap_exist = false;
network->turbo_enable = 0;
- network->SignalStrength = stats->SignalStrength;
- network->RSSI = stats->SignalStrength;
+ network->signal_strength = stats->signal_strength;
+ network->RSSI = stats->signal_strength;
network->country_ie_len = 0;
memset(network->country_ie_buf, 0, MAX_IE_LEN);
ht_initialize_bss_desc(&network->bssht);
@@ -2215,7 +2215,7 @@ static inline int rtllib_network_init(
}
if (rtllib_is_empty_essid(network->ssid, network->ssid_len))
network->flags |= NETWORK_EMPTY_ESSID;
- stats->signal = 30 + (stats->SignalStrength * 70) / 100;
+ stats->signal = 30 + (stats->signal_strength * 70) / 100;
stats->noise = rtllib_translate_todbm((u8)(100 - stats->signal)) - 25;
memcpy(&network->stats, stats, sizeof(network->stats));
@@ -2334,7 +2334,7 @@ static inline void update_network(struct rtllib_device *ieee,
src->wmm_param[3].ac_aci_acm_aifsn)
memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN);
- dst->SignalStrength = src->SignalStrength;
+ dst->signal_strength = src->signal_strength;
dst->RSSI = src->RSSI;
dst->turbo_enable = src->turbo_enable;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 11542aea4a20..c59686d68a33 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -314,7 +314,7 @@ void rtllib_wx_sync_scan_wq(void *data)
/* wait for ps packet to be kicked out successfully */
msleep(50);
- ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_BACKUP);
+ ieee->scan_operation_backup_handler(ieee->dev, SCAN_OPT_BACKUP);
if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht &&
ieee->ht_info->cur_bw_40mhz) {
@@ -339,7 +339,7 @@ void rtllib_wx_sync_scan_wq(void *data)
ieee->set_chan(ieee->dev, chan);
}
- ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_RESTORE);
+ ieee->scan_operation_backup_handler(ieee->dev, SCAN_OPT_RESTORE);
ieee->link_state = MAC80211_LINKED;
ieee->link_change(ieee->dev);
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 1fabc5137a4c..ab344d676bb9 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -136,10 +136,6 @@ void r8712_free_recvframe(union recv_frame *precvframe,
static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
struct recv_stat *prxstat)
{
- u16 drvinfo_sz;
-
- drvinfo_sz = (le32_to_cpu(prxstat->rxdw0) & 0x000f0000) >> 16;
- drvinfo_sz <<= 3;
/*TODO:
* Offset 0
*/
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index bbd4a13c7bb9..ffeb91dd28c4 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -528,8 +528,9 @@ void r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key)
if (unicast_key)
memcpy(&psetstakey_para->key, &sta->x_UncstKey, 16);
else
- memcpy(&psetstakey_para->key, &psecuritypriv->XGrpKey[psecuritypriv->XGrpKeyid - 1].
- skey, 16);
+ memcpy(&psetstakey_para->key,
+ &psecuritypriv->XGrpKey[psecuritypriv->XGrpKeyid - 1].skey,
+ 16);
r8712_enqueue_cmd(pcmdpriv, ph2c);
}
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h
index 2613b3c2acfc..268844af57f0 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.h
+++ b/drivers/staging/rtl8712/rtl871x_cmd.h
@@ -736,7 +736,7 @@ void r8712_getbbrfreg_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj
void r8712_readtssi_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
void r8712_setstaKey_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
void r8712_setassocsta_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
-void r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl, u32 tryPktCnt,
+void r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl, u32 tryPktCnt,
u32 tryPktInterval, u32 firstStageTO);
struct _cmd_callback {
diff --git a/drivers/staging/rtl8712/rtl871x_io.c b/drivers/staging/rtl8712/rtl871x_io.c
index 6789a4c98564..20e080e284dd 100644
--- a/drivers/staging/rtl8712/rtl871x_io.c
+++ b/drivers/staging/rtl8712/rtl871x_io.c
@@ -48,8 +48,8 @@ static uint _init_intf_hdl(struct _adapter *padapter,
set_intf_funs = &(r8712_usb_set_intf_funs);
set_intf_ops = &r8712_usb_set_intf_ops;
init_intf_priv = &r8712_usb_init_intf_priv;
- pintf_priv = pintf_hdl->pintfpriv = kmalloc(sizeof(struct intf_priv),
- GFP_ATOMIC);
+ pintf_priv = kmalloc(sizeof(*pintf_priv), GFP_ATOMIC);
+ pintf_hdl->pintfpriv = pintf_priv;
if (!pintf_priv)
goto _init_intf_hdl_fail;
pintf_hdl->adapter = (u8 *)padapter;
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 0a3451cdc8a1..4a34824830e3 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -221,7 +221,7 @@ static void r8712_usb_read_port_complete(struct urb *purb)
fallthrough;
case -EPROTO:
r8712_read_port(padapter, precvpriv->ff_hwaddr, 0,
- (unsigned char *)precvbuf);
+ (unsigned char *)precvbuf);
break;
case -EINPROGRESS:
netdev_err(padapter->pnetdev, "ERROR: URB IS IN PROGRESS!\n");
diff --git a/drivers/staging/rtl8723bs/Kconfig b/drivers/staging/rtl8723bs/Kconfig
index f23e29b679fb..8d48c61961a6 100644
--- a/drivers/staging/rtl8723bs/Kconfig
+++ b/drivers/staging/rtl8723bs/Kconfig
@@ -3,7 +3,6 @@ config RTL8723BS
tristate "Realtek RTL8723BS SDIO Wireless LAN NIC driver"
depends on WLAN && MMC && CFG80211
depends on m
- select CFG80211_WEXT
select CRYPTO
select CRYPTO_LIB_ARC4
help
diff --git a/drivers/staging/rtl8723bs/Makefile b/drivers/staging/rtl8723bs/Makefile
index 7f5067e89295..ba200ee669f3 100644
--- a/drivers/staging/rtl8723bs/Makefile
+++ b/drivers/staging/rtl8723bs/Makefile
@@ -3,7 +3,6 @@ r8723bs-y = \
core/rtw_ap.o \
core/rtw_btcoex.o \
core/rtw_cmd.o \
- core/rtw_debug.o \
core/rtw_efuse.o \
core/rtw_io.o \
core/rtw_ioctl_set.o \
@@ -12,7 +11,6 @@ r8723bs-y = \
core/rtw_mlme_ext.o \
core/rtw_pwrctrl.o \
core/rtw_recv.o \
- core/rtw_rf.o \
core/rtw_security.o \
core/rtw_sta_mgt.o \
core/rtw_wlan_util.o \
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index e4063713fecc..e55b4f7e0aef 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <asm/unaligned.h>
void init_mlme_ap_info(struct adapter *padapter)
@@ -277,7 +276,7 @@ void expire_timeout_chk(struct adapter *padapter)
/* switch to correct channel of current network before issue keep-alive frames */
if (rtw_get_oper_ch(padapter) != pmlmeext->cur_channel) {
backup_oper_channel = rtw_get_oper_ch(padapter);
- SelectChannel(padapter, pmlmeext->cur_channel);
+ r8723bs_select_channel(padapter, pmlmeext->cur_channel);
}
/* issue null data to check sta alive*/
@@ -315,7 +314,7 @@ void expire_timeout_chk(struct adapter *padapter)
}
if (backup_oper_channel > 0) /* back to the original operation channel */
- SelectChannel(padapter, backup_oper_channel);
+ r8723bs_select_channel(padapter, backup_oper_channel);
}
associated_clients_update(padapter, updated);
diff --git a/drivers/staging/rtl8723bs/core/rtw_btcoex.c b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
index 62cbf84b079a..d54095f50113 100644
--- a/drivers/staging/rtl8723bs/core/rtw_btcoex.c
+++ b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtw_btcoex.h>
#include <hal_btcoex.h>
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index d3f10a3cf972..84ce7307d8f3 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_btcoex.h>
#include <linux/jiffies.h>
@@ -1884,9 +1883,6 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
/* copy pdev_network information to pmlmepriv->cur_network */
memcpy(&tgt_network->network, pnetwork, (get_wlan_bssid_ex_sz(pnetwork)));
- /* reset ds_config */
- /* tgt_network->network.configuration.ds_config = (u32)rtw_ch2freq(pnetwork->configuration.ds_config); */
-
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
diff --git a/drivers/staging/rtl8723bs/core/rtw_debug.c b/drivers/staging/rtl8723bs/core/rtw_debug.c
deleted file mode 100644
index 5354fdd11c9b..000000000000
--- a/drivers/staging/rtl8723bs/core/rtw_debug.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-#include <drv_types.h>
-#include <rtw_debug.h>
-#include <hal_btcoex.h>
-
-#include <rtw_version.h>
-
-static void dump_4_regs(struct adapter *adapter, int offset)
-{
- u32 reg[4];
- int i;
-
- for (i = 0; i < 4; i++)
- reg[i] = rtw_read32(adapter, offset + i);
-
- netdev_dbg(adapter->pnetdev, "0x%03x 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i, reg[0], reg[1], reg[2], reg[3]);
-}
-
-void mac_reg_dump(struct adapter *adapter)
-{
- int i;
-
- netdev_dbg(adapter->pnetdev, "======= MAC REG =======\n");
-
- for (i = 0x0; i < 0x800; i += 4)
- dump_4_regs(adapter, i);
-}
-
-void bb_reg_dump(struct adapter *adapter)
-{
- int i;
-
- netdev_dbg(adapter->pnetdev, "======= BB REG =======\n");
-
- for (i = 0x800; i < 0x1000 ; i += 4)
- dump_4_regs(adapter, i);
-}
-
-static void dump_4_rf_regs(struct adapter *adapter, int path, int offset)
-{
- u8 reg[4];
- int i;
-
- for (i = 0; i < 4; i++)
- reg[i] = rtw_hal_read_rfreg(adapter, path, offset + i,
- 0xffffffff);
-
- netdev_dbg(adapter->pnetdev, "0x%02x 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i, reg[0], reg[1], reg[2], reg[3]);
-}
-
-void rf_reg_dump(struct adapter *adapter)
-{
- int i, path = 0;
-
- netdev_dbg(adapter->pnetdev, "======= RF REG =======\n");
-
- netdev_dbg(adapter->pnetdev, "RF_Path(%x)\n", path);
- for (i = 0; i < 0x100; i++)
- dump_4_rf_regs(adapter, path, i);
-}
diff --git a/drivers/staging/rtl8723bs/core/rtw_efuse.c b/drivers/staging/rtl8723bs/core/rtw_efuse.c
index eb848f9bbf2c..8b671f8a7965 100644
--- a/drivers/staging/rtl8723bs/core/rtw_efuse.c
+++ b/drivers/staging/rtl8723bs/core/rtw_efuse.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_data.h>
#include <linux/jiffies.h>
@@ -38,7 +37,7 @@ Efuse_Read1ByteFromFakeContent(u16 Offset, u8 *Value)
if (fakeEfuseBank == 0)
*Value = fakeEfuseContent[Offset];
else
- *Value = fakeBTEfuseContent[fakeEfuseBank-1][Offset];
+ *Value = fakeBTEfuseContent[fakeEfuseBank - 1][Offset];
return true;
}
@@ -50,7 +49,7 @@ Efuse_Write1ByteToFakeContent(u16 Offset, u8 Value)
if (fakeEfuseBank == 0)
fakeEfuseContent[Offset] = Value;
else
- fakeBTEfuseContent[fakeEfuseBank-1][Offset] = Value;
+ fakeBTEfuseContent[fakeEfuseBank - 1][Offset] = Value;
return true;
}
@@ -206,21 +205,21 @@ u16 Address)
if (Address < contentLen) {/* E-fuse 512Byte */
/* Write E-fuse Register address bit0~7 */
temp = Address & 0xFF;
- rtw_write8(Adapter, EFUSE_CTRL+1, temp);
- Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+2);
+ rtw_write8(Adapter, EFUSE_CTRL + 1, temp);
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL + 2);
/* Write E-fuse Register address bit8~9 */
temp = ((Address >> 8) & 0x03) | (Bytetemp & 0xFC);
- rtw_write8(Adapter, EFUSE_CTRL+2, temp);
+ rtw_write8(Adapter, EFUSE_CTRL + 2, temp);
/* Write 0x30[31]= 0 */
- Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL + 3);
temp = Bytetemp & 0x7F;
- rtw_write8(Adapter, EFUSE_CTRL+3, temp);
+ rtw_write8(Adapter, EFUSE_CTRL + 3, temp);
/* Wait Write-ready (0x30[31]= 1) */
- Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL + 3);
while (!(Bytetemp & 0x80)) {
- Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL + 3);
k++;
if (k == 1000)
break;
@@ -253,16 +252,16 @@ bool bPseudoTest)
/* -----------------e-fuse reg ctrl --------------------------------- */
/* address */
- rtw_write8(padapter, EFUSE_CTRL+1, (u8)(addr&0xff));
- rtw_write8(padapter, EFUSE_CTRL+2, ((u8)((addr>>8) & 0x03)) |
- (rtw_read8(padapter, EFUSE_CTRL+2)&0xFC));
+ rtw_write8(padapter, EFUSE_CTRL + 1, (u8)(addr & 0xff));
+ rtw_write8(padapter, EFUSE_CTRL + 2, ((u8)((addr >> 8) & 0x03)) |
+ (rtw_read8(padapter, EFUSE_CTRL + 2) & 0xFC));
/* rtw_write8(padapter, EFUSE_CTRL+3, 0x72); read cmd */
/* Write bit 32 0 */
- readbyte = rtw_read8(padapter, EFUSE_CTRL+3);
- rtw_write8(padapter, EFUSE_CTRL+3, (readbyte & 0x7f));
+ readbyte = rtw_read8(padapter, EFUSE_CTRL + 3);
+ rtw_write8(padapter, EFUSE_CTRL + 3, (readbyte & 0x7f));
- while (!(0x80 & rtw_read8(padapter, EFUSE_CTRL+3)) && (tmpidx < 1000)) {
+ while (!(0x80 & rtw_read8(padapter, EFUSE_CTRL + 3)) && (tmpidx < 1000)) {
mdelay(1);
tmpidx++;
}
@@ -282,31 +281,22 @@ u8 efuse_OneByteWrite(struct adapter *padapter, u16 addr, u8 data, bool bPseudoT
{
u8 tmpidx = 0;
u8 bResult = false;
- u32 efuseValue;
if (bPseudoTest)
return Efuse_Write1ByteToFakeContent(addr, data);
-
/* -----------------e-fuse reg ctrl --------------------------------- */
/* address */
-
- efuseValue = rtw_read32(padapter, EFUSE_CTRL);
- efuseValue |= (BIT21|BIT31);
- efuseValue &= ~(0x3FFFF);
- efuseValue |= ((addr<<8 | data) & 0x3FFFF);
-
-
/* <20130227, Kordan> 8192E MP chip A-cut had better not set 0x34[11] until B-Cut. */
/* <20130121, Kordan> For SMIC EFUSE specificatoin. */
/* 0x34[11]: SW force PGMEN input of efuse to high. (for the bank selected by 0x34[9:8]) */
/* PHY_SetMacReg(padapter, 0x34, BIT11, 1); */
rtw_write16(padapter, 0x34, rtw_read16(padapter, 0x34) | (BIT11));
- rtw_write32(padapter, EFUSE_CTRL, 0x90600000|((addr<<8 | data)));
+ rtw_write32(padapter, EFUSE_CTRL, 0x90600000 | ((addr << 8 | data)));
- while ((0x80 & rtw_read8(padapter, EFUSE_CTRL+3)) && (tmpidx < 100)) {
+ while ((0x80 & rtw_read8(padapter, EFUSE_CTRL + 3)) && (tmpidx < 100)) {
mdelay(1);
tmpidx++;
}
@@ -365,19 +355,19 @@ efuse_WordEnableDataRead(u8 word_en,
u8 *sourdata,
u8 *targetdata)
{
- if (!(word_en&BIT(0))) {
+ if (!(word_en & BIT(0))) {
targetdata[0] = sourdata[0];
targetdata[1] = sourdata[1];
}
- if (!(word_en&BIT(1))) {
+ if (!(word_en & BIT(1))) {
targetdata[2] = sourdata[2];
targetdata[3] = sourdata[3];
}
- if (!(word_en&BIT(2))) {
+ if (!(word_en & BIT(2))) {
targetdata[4] = sourdata[4];
targetdata[5] = sourdata[5];
}
- if (!(word_en&BIT(3))) {
+ if (!(word_en & BIT(3))) {
targetdata[6] = sourdata[6];
targetdata[7] = sourdata[7];
}
@@ -463,7 +453,7 @@ static void efuse_ShadowRead2Byte(struct adapter *padapter, u16 Offset, u16 *Val
struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
*Value = pEEPROM->efuse_eeprom_data[Offset];
- *Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8;
+ *Value |= pEEPROM->efuse_eeprom_data[Offset + 1] << 8;
} /* EFUSE_ShadowRead2Byte */
@@ -473,9 +463,9 @@ static void efuse_ShadowRead4Byte(struct adapter *padapter, u16 Offset, u32 *Val
struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
*Value = pEEPROM->efuse_eeprom_data[Offset];
- *Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8;
- *Value |= pEEPROM->efuse_eeprom_data[Offset+2]<<16;
- *Value |= pEEPROM->efuse_eeprom_data[Offset+3]<<24;
+ *Value |= pEEPROM->efuse_eeprom_data[Offset + 1] << 8;
+ *Value |= pEEPROM->efuse_eeprom_data[Offset + 2] << 16;
+ *Value |= pEEPROM->efuse_eeprom_data[Offset + 3] << 24;
} /* efuse_ShadowRead4Byte */
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index b89e88d6a82d..5a76069a8222 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <linux/of.h>
#include <asm/unaligned.h>
@@ -55,7 +54,9 @@ static u8 WIFI_OFDMRATES[] = {
int rtw_get_bit_value_from_ieee_value(u8 val)
{
- unsigned char dot11_rate_table[] = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 0}; /* last element must be zero!! */
+ static const unsigned char dot11_rate_table[] = {
+ 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 0
+ }; /* last element must be zero!! */
int i = 0;
while (dot11_rate_table[i] != 0) {
diff --git a/drivers/staging/rtl8723bs/core/rtw_io.c b/drivers/staging/rtl8723bs/core/rtw_io.c
index 4d3c30ec93b5..fcda9db6ebb5 100644
--- a/drivers/staging/rtl8723bs/core/rtw_io.c
+++ b/drivers/staging/rtl8723bs/core/rtw_io.c
@@ -26,7 +26,6 @@ jackson@realtek.com.tw
*/
#include <drv_types.h>
-#include <rtw_debug.h>
u8 rtw_read8(struct adapter *adapter, u32 addr)
{
diff --git a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
index 3b44f0dd5b0a..587a87fbffeb 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
u8 rtw_validate_bssid(u8 *bssid)
{
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index 8c487b7b7a40..cbdb134278d3 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <linux/etherdevice.h>
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_btcoex.h>
#include <linux/jiffies.h>
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 9ebf25a0ef9b..bbdd5fce28a1 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtw_wifi_regd.h>
#include <hal_btcoex.h>
#include <linux/kernel.h>
@@ -628,7 +627,7 @@ unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
ret = rtw_check_bcn_info(padapter, pframe, len);
if (!ret) {
netdev_dbg(padapter->pnetdev,
- "ap has changed, disconnect now\n ");
+ "ap has changed, disconnect now\n");
receive_disconnect(padapter,
pmlmeinfo->network.mac_address, 0);
return _SUCCESS;
@@ -3831,10 +3830,10 @@ void site_survey(struct adapter *padapter)
} else {
#ifdef DBG_FIXED_CHAN
if (pmlmeext->fixed_chan != 0xff)
- SelectChannel(padapter, pmlmeext->fixed_chan);
+ r8723bs_select_channel(padapter, pmlmeext->fixed_chan);
else
#endif
- SelectChannel(padapter, survey_channel);
+ r8723bs_select_channel(padapter, survey_channel);
}
if (ScanType == SCAN_ACTIVE) { /* obey the channel plan setting... */
diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
index e9763eab16f6..dbfcbac3d855 100644
--- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_data.h>
#include <linux/jiffies.h>
@@ -285,14 +284,12 @@ void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
if (rpwm & PS_ACK) {
unsigned long start_time;
u8 cpwm_now;
- u8 poll_cnt = 0;
start_time = jiffies;
/* polling cpwm */
do {
mdelay(1);
- poll_cnt++;
rtw_hal_get_hwreg(padapter, HW_VAR_CPWM, &cpwm_now);
if ((cpwm_orig ^ cpwm_now) & 0x80) {
pwrpriv->cpwm = PS_STATE_S4;
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 0eadc23a7d54..b30f026789b6 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <linux/jiffies.h>
#include <rtw_recv.h>
#include <net/cfg80211.h>
@@ -2027,12 +2026,9 @@ static int recv_func(struct adapter *padapter, union recv_frame *rframe)
/* check if need to handle uc_swdec_pending_queue*/
if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) {
union recv_frame *pending_frame;
- int cnt = 0;
- while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue))) {
- cnt++;
+ while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue)))
recv_func_posthandle(padapter, pending_frame);
- }
}
ret = recv_func_prehandle(padapter, rframe);
diff --git a/drivers/staging/rtl8723bs/core/rtw_rf.c b/drivers/staging/rtl8723bs/core/rtw_rf.c
deleted file mode 100644
index 4f120c894998..000000000000
--- a/drivers/staging/rtl8723bs/core/rtw_rf.c
+++ /dev/null
@@ -1,34 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-#include <drv_types.h>
-#include <linux/kernel.h>
-
-static const u32 ch_freq_map[] = {
- 2412,
- 2417,
- 2422,
- 2427,
- 2432,
- 2437,
- 2442,
- 2447,
- 2452,
- 2457,
- 2462,
- 2467,
- 2472,
- 2484
-};
-
-u32 rtw_ch2freq(u32 channel)
-{
- if (channel == 0 || channel > ARRAY_SIZE(ch_freq_map))
- return 2412;
-
- return ch_freq_map[channel - 1];
-}
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 7ecdaa2eeaf3..1e9eff01b1aa 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <linux/crc32.h>
#include <drv_types.h>
-#include <rtw_debug.h>
#include <crypto/aes.h>
static const char * const _security_type_str[] = {
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index 0145c4da5ac0..1b72f2196a1c 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
void _rtw_init_stainfo(struct sta_info *psta);
void _rtw_init_stainfo(struct sta_info *psta)
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index 7fac9ca3e9a0..f37fec1efaf9 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_com_h2c.h>
static unsigned char ARTHEROS_OUI1[] = {0x00, 0x03, 0x7f};
@@ -333,7 +332,7 @@ inline unsigned long rtw_get_on_cur_ch_time(struct adapter *adapter)
return 0;
}
-void SelectChannel(struct adapter *padapter, unsigned char channel)
+void r8723bs_select_channel(struct adapter *padapter, unsigned char channel)
{
if (mutex_lock_interruptible(&(adapter_to_dvobj(padapter)->setch_mutex)))
return;
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index b1965ec0181f..3e88f14e3bf7 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
@@ -45,7 +44,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
init_completion(&pxmitpriv->terminate_xmitthread_comp);
/*
- * Please insert all the queue initializaiton using _rtw_init_queue below
+ * Please insert all the queue initialization using _rtw_init_queue below
*/
pxmitpriv->adapter = padapter;
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
index 22e33b97800d..81149ab81904 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include "odm_precomp.h"
/* MACRO definition for pRFCalibrateInfo->TxIQC_8723B[0] */
diff --git a/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c b/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
index 5f9e94a7e6ad..86404b5e6c52 100644
--- a/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
+++ b/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
@@ -21,7 +21,6 @@ Major Change History:
--*/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <HalPwrSeqCmd.h>
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index e26b789b9cdd..b72cf520d576 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <hal_data.h>
-#include <rtw_debug.h>
#include <hal_btcoex.h>
#include <Mp_Precomp.h>
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index 852232102433..719dd116d807 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <drv_types.h>
-#include <rtw_debug.h>
#include "hal_com_h2c.h"
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
index 3e814a15e893..d5649e7d8f99 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_data.h>
#include <linux/kernel.h>
diff --git a/drivers/staging/rtl8723bs/hal/hal_intf.c b/drivers/staging/rtl8723bs/hal/hal_intf.c
index 7e3db8d3c910..0a3900548fd2 100644
--- a/drivers/staging/rtl8723bs/hal/hal_intf.c
+++ b/drivers/staging/rtl8723bs/hal/hal_intf.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_data.h>
void rtw_hal_chip_configure(struct adapter *padapter)
@@ -160,12 +159,6 @@ void rtw_hal_set_odm_var(struct adapter *padapter, enum hal_odm_variable eVariab
padapter->HalFunc.SetHalODMVarHandler(padapter, eVariable, pValue1, bSet);
}
-void rtw_hal_get_odm_var(struct adapter *padapter, enum hal_odm_variable eVariable, void *pValue1, void *pValue2)
-{
- if (padapter->HalFunc.GetHalODMVarHandler)
- padapter->HalFunc.GetHalODMVarHandler(padapter, eVariable, pValue1, pValue2);
-}
-
void rtw_hal_enable_interrupt(struct adapter *padapter)
{
if (padapter->HalFunc.enable_interrupt)
diff --git a/drivers/staging/rtl8723bs/hal/hal_sdio.c b/drivers/staging/rtl8723bs/hal/hal_sdio.c
index 9de62a0f5d35..665c85eccbdf 100644
--- a/drivers/staging/rtl8723bs/hal/hal_sdio.c
+++ b/drivers/staging/rtl8723bs/hal/hal_sdio.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_data.h>
u8 rtw_hal_sdio_max_txoqt_free_space(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
index d1ac2f44939c..56526056dd1d 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtl8723b_hal.h>
#include "hal_com_h2c.h"
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
index 2028791988e7..d1c875cf8e6d 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
@@ -8,7 +8,6 @@
/* This file is for 92CE/92CU dynamic mechanism only */
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtl8723b_hal.h>
/* Global var */
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index 7a5c3a98183b..37ebbbf408ec 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -8,7 +8,6 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtl8723b_hal.h>
#include "hal_com_h2c.h"
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
index 7764896a04ea..4ff092b7c9c9 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtl8723b_hal.h>
/**
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index 74e75dc970f7..28c914ec2604 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtl8723b_hal.h>
static void initrecvbuf(struct recv_buf *precvbuf, struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
index 15810438a472..78298e63edce 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtl8723b_hal.h>
static u8 rtw_sdio_wait_enough_TxOQT_space(struct adapter *padapter, u8 agg_num)
diff --git a/drivers/staging/rtl8723bs/hal/sdio_halinit.c b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
index c9cd6578f7f8..d3aae413fc0f 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_halinit.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtl8723b_hal.h>
#include "hal_com_h2c.h"
@@ -380,8 +379,8 @@ static void _InitWMACSetting(struct adapter *padapter)
rtw_write32(padapter, REG_RCR, pHalData->ReceiveConfig);
/* Accept all multicast address */
- rtw_write32(padapter, REG_MAR, 0xFFFFFFFF);
- rtw_write32(padapter, REG_MAR + 4, 0xFFFFFFFF);
+ rtw_write32(padapter, REG_MAR, 0xFFFFFFFF); /* Offset 0x0620-0x0623 */
+ rtw_write32(padapter, REG_MAR + 4, 0xFFFFFFFF); /* Offset 0x0624-0x0627 */
/* Accept all data frames */
value16 = 0xFFFF;
diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c
index 107f427ee4aa..21e9f1858745 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_ops.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c
@@ -5,7 +5,6 @@
*
*******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtl8723b_hal.h>
/* */
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 9e6ca1dec525..0b35c97843cc 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -452,14 +452,7 @@ struct adapter {
#define DF_RX_BIT BIT1
#define DF_IO_BIT BIT2
-/* define RTW_DISABLE_FUNC(padapter, func) (atomic_add(&adapter_to_dvobj(padapter)->disable_func, (func))) */
/* define RTW_ENABLE_FUNC(padapter, func) (atomic_sub(&adapter_to_dvobj(padapter)->disable_func, (func))) */
-static inline void RTW_DISABLE_FUNC(struct adapter *padapter, int func_bit)
-{
- int df = atomic_read(&adapter_to_dvobj(padapter)->disable_func);
- df |= func_bit;
- atomic_set(&adapter_to_dvobj(padapter)->disable_func, df);
-}
static inline void RTW_ENABLE_FUNC(struct adapter *padapter, int func_bit)
{
diff --git a/drivers/staging/rtl8723bs/include/hal_intf.h b/drivers/staging/rtl8723bs/include/hal_intf.h
index 5cffab2d06ff..efdd1f912b5d 100644
--- a/drivers/staging/rtl8723bs/include/hal_intf.h
+++ b/drivers/staging/rtl8723bs/include/hal_intf.h
@@ -301,7 +301,6 @@ u8 rtw_hal_set_def_var(struct adapter *padapter, enum hal_def_variable eVariable
u8 rtw_hal_get_def_var(struct adapter *padapter, enum hal_def_variable eVariable, void *pValue);
void rtw_hal_set_odm_var(struct adapter *padapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet);
-void rtw_hal_get_odm_var(struct adapter *padapter, enum hal_odm_variable eVariable, void *pValue1, void *pValue2);
void rtw_hal_enable_interrupt(struct adapter *padapter);
void rtw_hal_disable_interrupt(struct adapter *padapter);
diff --git a/drivers/staging/rtl8723bs/include/hal_pwr_seq.h b/drivers/staging/rtl8723bs/include/hal_pwr_seq.h
index 5e43cc89f535..b93d74a5b9a5 100644
--- a/drivers/staging/rtl8723bs/include/hal_pwr_seq.h
+++ b/drivers/staging/rtl8723bs/include/hal_pwr_seq.h
@@ -101,7 +101,7 @@
{0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07 = 0x20 , SOP option to disable BG/MB*/ \
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT2, BIT2}, /*0x04[10] = 1, enable SW LPS*/ \
- {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 1}, /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/ \
+ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 1}, /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/ \
{0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \
{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/
diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h
index cf96b5f7a776..b21267d7ef72 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service.h
@@ -81,9 +81,7 @@ static inline void thread_enter(char *name)
static inline void flush_signals_thread(void)
{
if (signal_pending(current))
- {
flush_signals(current);
- }
}
#define rtw_warn_on(condition) WARN_ON(condition)
@@ -102,7 +100,7 @@ static inline int rtw_bug_check(void *parg1, void *parg2, void *parg3, void *par
#define MAC_ARG(x) (x)
#endif
-extern void rtw_free_netdev(struct net_device * netdev);
+extern void rtw_free_netdev(struct net_device *netdev);
/* Macros for handling unaligned memory accesses */
diff --git a/drivers/staging/rtl8723bs/include/osdep_service_linux.h b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
index 188ed7e26550..2ec54f9e180c 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service_linux.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
@@ -7,43 +7,41 @@
#ifndef __OSDEP_LINUX_SERVICE_H_
#define __OSDEP_LINUX_SERVICE_H_
- #include <linux/spinlock.h>
- #include <linux/compiler.h>
- #include <linux/kernel.h>
- #include <linux/errno.h>
- #include <linux/init.h>
- #include <linux/slab.h>
- #include <linux/module.h>
- #include <linux/kref.h>
- /* include <linux/smp_lock.h> */
- #include <linux/netdevice.h>
- #include <linux/skbuff.h>
- #include <linux/uaccess.h>
- #include <asm/byteorder.h>
- #include <linux/atomic.h>
- #include <linux/io.h>
- #include <linux/sem.h>
- #include <linux/sched.h>
- #include <linux/etherdevice.h>
- #include <linux/wireless.h>
- #include <net/iw_handler.h>
- #include <linux/if_arp.h>
- #include <linux/rtnetlink.h>
- #include <linux/delay.h>
- #include <linux/interrupt.h> /* for struct tasklet_struct */
- #include <linux/ip.h>
- #include <linux/kthread.h>
- #include <linux/list.h>
- #include <linux/vmalloc.h>
-
-/* #include <linux/ieee80211.h> */
- #include <net/ieee80211_radiotap.h>
- #include <net/cfg80211.h>
-
- struct __queue {
- struct list_head queue;
- spinlock_t lock;
- };
+#include <linux/spinlock.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+#include <linux/atomic.h>
+#include <linux/io.h>
+#include <linux/sem.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+#include <linux/if_arp.h>
+#include <linux/rtnetlink.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h> /* for struct tasklet_struct */
+#include <linux/ip.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+
+#include <net/ieee80211_radiotap.h>
+#include <net/cfg80211.h>
+
+struct __queue {
+ struct list_head queue;
+ spinlock_t lock;
+};
static inline struct list_head *get_next(struct list_head *list)
{
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
index f9ecd9047d52..e6d6e9de5474 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
@@ -38,7 +38,7 @@ struct rt_firmware {
/* This structure must be carefully byte-ordered. */
struct rt_firmware_hdr {
- /* 8-byte alinment required */
+ /* 8-byte alignment required */
/* LONG WORD 0 ---- */
__le16 signature; /* 92C0: test chip; 92C, 88C0: test chip;
diff --git a/drivers/staging/rtl8723bs/include/rtw_cmd.h b/drivers/staging/rtl8723bs/include/rtw_cmd.h
index fe1b03101203..cb44119ce9a9 100644
--- a/drivers/staging/rtl8723bs/include/rtw_cmd.h
+++ b/drivers/staging/rtl8723bs/include/rtw_cmd.h
@@ -516,10 +516,6 @@ struct drvextra_cmd_parm {
/*------------------- Below are used for RF/BB tuning ---------------------*/
-struct getcountjudge_rsp {
- u8 count_judge[MAX_RATES_LENGTH];
-};
-
struct addBaReq_parm {
unsigned int tid;
u8 addr[ETH_ALEN];
diff --git a/drivers/staging/rtl8723bs/include/rtw_debug.h b/drivers/staging/rtl8723bs/include/rtw_debug.h
deleted file mode 100644
index 7f96ff66915f..000000000000
--- a/drivers/staging/rtl8723bs/include/rtw_debug.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#ifndef __RTW_DEBUG_H__
-#define __RTW_DEBUG_H__
-
-void mac_reg_dump(struct adapter *adapter);
-void bb_reg_dump(struct adapter *adapter);
-void rf_reg_dump(struct adapter *adapter);
-
-#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_event.h b/drivers/staging/rtl8723bs/include/rtw_event.h
index d48bae5416fe..62e0dec249ad 100644
--- a/drivers/staging/rtl8723bs/include/rtw_event.h
+++ b/drivers/staging/rtl8723bs/include/rtw_event.h
@@ -28,7 +28,7 @@ struct surveydone_event {
};
/*
-Used to report the link result of joinning the given bss
+Used to report the link result of joining the given bss
join_res:
diff --git a/drivers/staging/rtl8723bs/include/rtw_io.h b/drivers/staging/rtl8723bs/include/rtw_io.h
index be9741a056e5..0ee87be6dc4f 100644
--- a/drivers/staging/rtl8723bs/include/rtw_io.h
+++ b/drivers/staging/rtl8723bs/include/rtw_io.h
@@ -13,7 +13,7 @@
Otherwise, io_handler will free io_req
*/
-/* below is for the intf_option bit defition... */
+/* below is for the intf_option bit definition... */
struct intf_priv;
struct intf_hdl;
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h
index e103c4a15d1a..e665479babc2 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h
@@ -131,7 +131,7 @@ struct mlme_priv {
u8 roam_rssi_diff_th; /* rssi difference threshold for active scan candidate selection */
u32 roam_scan_int_ms; /* scan interval for active roam */
u32 roam_scanr_exp_ms; /* scan result expire time in ms for roam */
- u8 roam_tgt_addr[ETH_ALEN]; /* request to roam to speicific target without other consideration */
+ u8 roam_tgt_addr[ETH_ALEN]; /* request to roam to specific target without other consideration */
u8 *nic_hdl;
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
index 5b8574f5a251..8315399b64fd 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
@@ -384,8 +384,8 @@ struct mlme_ext_priv {
unsigned char default_supported_mcs_set[16];
struct ss_res sitesurvey_res;
- struct mlme_ext_info mlmext_info;/* for sta/adhoc mode, including current scanning/connecting/connected related info. */
- /* for ap mode, network includes ap's cap_info */
+ struct mlme_ext_info mlmext_info; /* for sta/adhoc mode, including current scanning/connecting/connected related info. */
+ /* for ap mode, network includes ap's cap_info */
struct timer_list survey_timer;
struct timer_list link_timer;
struct timer_list sa_query_timer;
@@ -455,7 +455,7 @@ u8 rtw_get_center_ch(u8 channel, u8 chnl_bw, u8 chnl_offset);
unsigned long rtw_get_on_cur_ch_time(struct adapter *adapter);
void set_channel_bwmode(struct adapter *padapter, unsigned char channel, unsigned char channel_offset, unsigned short bwmode);
-void SelectChannel(struct adapter *padapter, unsigned char channel);
+void r8723bs_select_channel(struct adapter *padapter, unsigned char channel);
unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval);
diff --git a/drivers/staging/rtl8723bs/include/rtw_recv.h b/drivers/staging/rtl8723bs/include/rtw_recv.h
index c93594f75436..18dd1464e0c2 100644
--- a/drivers/staging/rtl8723bs/include/rtw_recv.h
+++ b/drivers/staging/rtl8723bs/include/rtw_recv.h
@@ -444,16 +444,6 @@ static inline u8 *recvframe_pull_tail(union recv_frame *precvframe, signed int s
}
-static inline union recv_frame *rxmem_to_recvframe(u8 *rxmem)
-{
- /* due to the design of 2048 bytes alignment of recv_frame, we can reference the union recv_frame */
- /* from any given member of recv_frame. */
- /* rxmem indicates the any member/address in recv_frame */
-
- return (union recv_frame *)(((SIZE_PTR)rxmem >> RXFRAME_ALIGN) << RXFRAME_ALIGN);
-
-}
-
static inline signed int get_recvframe_len(union recv_frame *precvframe)
{
return precvframe->u.hdr.len;
diff --git a/drivers/staging/rtl8723bs/include/rtw_rf.h b/drivers/staging/rtl8723bs/include/rtw_rf.h
index 718275ee4500..9f98b3f5a2e3 100644
--- a/drivers/staging/rtl8723bs/include/rtw_rf.h
+++ b/drivers/staging/rtl8723bs/include/rtw_rf.h
@@ -97,6 +97,4 @@ enum {
HT_DATA_SC_20_LOWER_OF_40MHZ = 2,
};
-u32 rtw_ch2freq(u32 ch);
-
#endif /* _RTL8711_RF_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_security.h b/drivers/staging/rtl8723bs/include/rtw_security.h
index 98afbd3054a4..32f6d3a5e309 100644
--- a/drivers/staging/rtl8723bs/include/rtw_security.h
+++ b/drivers/staging/rtl8723bs/include/rtw_security.h
@@ -50,43 +50,43 @@ union pn48 {
#ifdef __LITTLE_ENDIAN
struct {
- u8 TSC0;
- u8 TSC1;
- u8 TSC2;
- u8 TSC3;
- u8 TSC4;
- u8 TSC5;
- u8 TSC6;
- u8 TSC7;
+ u8 TSC0;
+ u8 TSC1;
+ u8 TSC2;
+ u8 TSC3;
+ u8 TSC4;
+ u8 TSC5;
+ u8 TSC6;
+ u8 TSC7;
} _byte_;
#else
struct {
- u8 TSC7;
- u8 TSC6;
- u8 TSC5;
- u8 TSC4;
- u8 TSC3;
- u8 TSC2;
- u8 TSC1;
- u8 TSC0;
+ u8 TSC7;
+ u8 TSC6;
+ u8 TSC5;
+ u8 TSC4;
+ u8 TSC3;
+ u8 TSC2;
+ u8 TSC1;
+ u8 TSC0;
} _byte_;
#endif
};
union Keytype {
- u8 skey[16];
- u32 lkey[4];
+ u8 skey[16];
+ u32 lkey[4];
};
struct rt_pmkid_list {
- u8 bUsed;
- u8 Bssid[6];
- u8 PMKID[16];
- u8 SsidBuf[33];
+ u8 bUsed;
+ u8 Bssid[6];
+ u8 PMKID[16];
+ u8 SsidBuf[33];
u8 *ssid_octet;
- u16 ssid_length;
+ u16 ssid_length;
};
@@ -162,7 +162,7 @@ struct security_priv {
/* For WPA2 Pre-Authentication. */
struct rt_pmkid_list PMKIDList[NUM_PMKID_CACHE]; /* Renamed from PreAuthKey[NUM_PRE_AUTH_KEY]. Annie, 2006-10-13. */
- u8 PMKIDIndex;
+ u8 PMKIDIndex;
u8 bWepDefaultKeyIdxSet;
@@ -170,50 +170,48 @@ struct security_priv {
#define GET_ENCRY_ALGO(psecuritypriv, psta, encry_algo, bmcst)\
do {\
- switch (psecuritypriv->dot11AuthAlgrthm)\
- {\
- case dot11AuthAlgrthm_Open:\
- case dot11AuthAlgrthm_Shared:\
- case dot11AuthAlgrthm_Auto:\
- encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\
- break;\
- case dot11AuthAlgrthm_8021X:\
- if (bmcst)\
- encry_algo = (u8)psecuritypriv->dot118021XGrpPrivacy;\
- else\
- encry_algo = (u8)psta->dot118021XPrivacy;\
- break;\
- case dot11AuthAlgrthm_WAPI:\
- encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\
- break;\
+ switch (psecuritypriv->dot11AuthAlgrthm) {\
+ case dot11AuthAlgrthm_Open:\
+ case dot11AuthAlgrthm_Shared:\
+ case dot11AuthAlgrthm_Auto:\
+ encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\
+ break;\
+ case dot11AuthAlgrthm_8021X:\
+ if (bmcst)\
+ encry_algo = (u8)psecuritypriv->dot118021XGrpPrivacy;\
+ else\
+ encry_algo = (u8)psta->dot118021XPrivacy;\
+ break;\
+ case dot11AuthAlgrthm_WAPI:\
+ encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\
+ break;\
} \
} while (0)
#define SET_ICE_IV_LEN(iv_len, icv_len, encrypt)\
do {\
- switch (encrypt)\
- {\
- case _WEP40_:\
- case _WEP104_:\
- iv_len = 4;\
- icv_len = 4;\
- break;\
- case _TKIP_:\
- iv_len = 8;\
- icv_len = 4;\
- break;\
- case _AES_:\
- iv_len = 8;\
- icv_len = 8;\
- break;\
- case _SMS4_:\
- iv_len = 18;\
- icv_len = 16;\
- break;\
- default:\
- iv_len = 0;\
- icv_len = 0;\
- break;\
+ switch (encrypt) {\
+ case _WEP40_:\
+ case _WEP104_:\
+ iv_len = 4;\
+ icv_len = 4;\
+ break;\
+ case _TKIP_:\
+ iv_len = 8;\
+ icv_len = 4;\
+ break;\
+ case _AES_:\
+ iv_len = 8;\
+ icv_len = 8;\
+ break;\
+ case _SMS4_:\
+ iv_len = 18;\
+ icv_len = 16;\
+ break;\
+ default:\
+ iv_len = 0;\
+ icv_len = 0;\
+ break;\
} \
} while (0)
@@ -242,7 +240,8 @@ struct mic_data {
/* ===== start - public domain SHA256 implementation ===== */
/* This is based on SHA256 implementation in LibTomCrypt that was released into
- * public domain by Tom St Denis. */
+ * public domain by Tom St Denis.
+ */
int omac1_aes_128(u8 *key, u8 *data, size_t data_len, u8 *mac);
void rtw_secmicsetkey(struct mic_data *pmicdata, u8 *key);
diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h
index a3b4310caddf..544468f57692 100644
--- a/drivers/staging/rtl8723bs/include/rtw_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h
@@ -15,7 +15,7 @@
#define XMITBUF_ALIGN_SZ 512
-/* xmit extension buff defination */
+/* xmit extension buff definition */
#define MAX_XMIT_EXTBUF_SZ (1536)
#define NR_XMIT_EXTBUFF (32)
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index eb3c73cc2662..b63a74e669bc 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -7,7 +7,6 @@
#include <linux/etherdevice.h>
#include <drv_types.h>
-#include <rtw_debug.h>
#include <linux/jiffies.h>
#include <rtw_wifi_regd.h>
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index c81b30f1f1b0..a9e481e182ad 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -7,7 +7,6 @@
#include <linux/etherdevice.h>
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtw_mp.h>
#include <hal_btcoex.h>
#include <linux/jiffies.h>
diff --git a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
index 1341801e5c21..1904e82a24b5 100644
--- a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
static void _dynamic_check_timer_handler(struct timer_list *t)
{
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 55d0140cd543..fc9b9c5efb50 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_data.h>
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index f09c1324c39c..a00f9f0c85c5 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
/*
* Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE
diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
index 4d28b300b235..746f45cf9aac 100644
--- a/drivers/staging/rtl8723bs/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <linux/jiffies.h>
#include <net/cfg80211.h>
#include <asm/unaligned.h>
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index 490431484524..d18fde4e5d6c 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_btcoex.h>
#include <linux/jiffies.h>
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
index 0a0b04088e66..4a7c0c9cc7ef 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
@@ -6,7 +6,6 @@
*******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
static bool rtw_sdio_claim_host_needed(struct sdio_func *func)
{
diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
index 5eef1d68c6f0..dbd4bf531339 100644
--- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
@@ -6,7 +6,6 @@
*****************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtw_wifi_regd.h>
diff --git a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
index 1eeabfffd6d2..944b9c724b32 100644
--- a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
uint rtw_remainder_len(struct pkt_file *pfile)
@@ -144,9 +143,8 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
psta = list_entry(plist, struct sta_info, asoc_list);
stainfo_offset = rtw_stainfo_offset(pstapriv, psta);
- if (stainfo_offset_valid(stainfo_offset)) {
+ if (stainfo_offset_valid(stainfo_offset))
chk_alive_list[chk_alive_num++] = stainfo_offset;
- }
}
spin_unlock_bh(&pstapriv->asoc_list_lock);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index c4d97dbf6ba8..3dbeffc650d3 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -857,10 +857,10 @@ vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
case VCHIQ_BULK_MODE_CALLBACK:
- ret = vchiq_bulk_transfer(instance, handle,
- (void *)data, NULL,
- size, userdata, mode,
- VCHIQ_BULK_TRANSMIT);
+ ret = vchiq_bulk_xfer_callback_interruptible(instance, handle,
+ (void *)data, NULL,
+ size, mode, userdata,
+ VCHIQ_BULK_TRANSMIT);
break;
case VCHIQ_BULK_MODE_BLOCKING:
ret = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
@@ -895,9 +895,10 @@ int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
case VCHIQ_BULK_MODE_CALLBACK:
- ret = vchiq_bulk_transfer(instance, handle, data, NULL,
- size, userdata,
- mode, VCHIQ_BULK_RECEIVE);
+ ret = vchiq_bulk_xfer_callback_interruptible(instance, handle,
+ (void *)data, NULL,
+ size, mode, userdata,
+ VCHIQ_BULK_RECEIVE);
break;
case VCHIQ_BULK_MODE_BLOCKING:
ret = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
@@ -968,9 +969,8 @@ vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handl
return -ENOMEM;
}
- ret = vchiq_bulk_transfer(instance, handle, data, NULL, size,
- &waiter->bulk_waiter,
- VCHIQ_BULK_MODE_BLOCKING, dir);
+ ret = vchiq_bulk_xfer_blocking_interruptible(instance, handle, data, NULL, size,
+ &waiter->bulk_waiter, dir);
if ((ret != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 50af04b217f4..1f94db6e0cd9 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -1139,7 +1139,7 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
int msgid,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
- void *context, int size, int is_blocking)
+ void *context, int size)
{
struct vchiq_shared_state *local;
struct vchiq_header *header;
@@ -1517,7 +1517,7 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header)
/* Acknowledge the OPEN */
if (service->sync) {
if (queue_message_sync(state, NULL, openack_id, memcpy_copy_callback,
- &ack_payload, sizeof(ack_payload), 0) == -EAGAIN)
+ &ack_payload, sizeof(ack_payload)) == -EAGAIN)
goto bail_not_ready;
/* The service is now open */
@@ -2655,6 +2655,132 @@ close_service_complete(struct vchiq_service *service, int failstate)
return status;
}
+/*
+ * Prepares a bulk transfer to be queued. The function is interruptible and is
+ * intended to be called from user threads. It may return -EAGAIN to indicate
+ * that a signal has been received and the call should be retried after being
+ * returned to user context.
+ */
+static int
+vchiq_bulk_xfer_queue_msg_interruptible(struct vchiq_service *service,
+ void *offset, void __user *uoffset,
+ int size, void *userdata,
+ enum vchiq_bulk_mode mode,
+ enum vchiq_bulk_dir dir)
+{
+ struct vchiq_bulk_queue *queue;
+ struct bulk_waiter *bulk_waiter = NULL;
+ struct vchiq_bulk *bulk;
+ struct vchiq_state *state = service->state;
+ const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
+ const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
+ VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
+ int status = -EINVAL;
+ int payload[2];
+
+ if (mode == VCHIQ_BULK_MODE_BLOCKING) {
+ bulk_waiter = userdata;
+ init_completion(&bulk_waiter->event);
+ bulk_waiter->actual = 0;
+ bulk_waiter->bulk = NULL;
+ }
+
+ queue = (dir == VCHIQ_BULK_TRANSMIT) ?
+ &service->bulk_tx : &service->bulk_rx;
+
+ if (mutex_lock_killable(&service->bulk_mutex))
+ return -EAGAIN;
+
+ if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
+ VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
+ do {
+ mutex_unlock(&service->bulk_mutex);
+ if (wait_for_completion_interruptible(&service->bulk_remove_event))
+ return -EAGAIN;
+ if (mutex_lock_killable(&service->bulk_mutex))
+ return -EAGAIN;
+ } while (queue->local_insert == queue->remove +
+ VCHIQ_NUM_SERVICE_BULKS);
+ }
+
+ bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
+
+ bulk->mode = mode;
+ bulk->dir = dir;
+ bulk->userdata = userdata;
+ bulk->size = size;
+ bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
+
+ if (vchiq_prepare_bulk_data(service->instance, bulk, offset, uoffset, size, dir))
+ goto unlock_error_exit;
+
+ /*
+ * Ensure that the bulk data record is visible to the peer
+ * before proceeding.
+ */
+ wmb();
+
+ dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %pK\n",
+ state->id, service->localport, service->remoteport,
+ dir_char, size, &bulk->data, userdata);
+
+ /*
+ * The slot mutex must be held when the service is being closed, so
+ * claim it here to ensure that isn't happening
+ */
+ if (mutex_lock_killable(&state->slot_mutex)) {
+ status = -EAGAIN;
+ goto cancel_bulk_error_exit;
+ }
+
+ if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
+ goto unlock_both_error_exit;
+
+ payload[0] = lower_32_bits(bulk->data);
+ payload[1] = bulk->size;
+ status = queue_message(state,
+ NULL,
+ VCHIQ_MAKE_MSG(dir_msgtype,
+ service->localport,
+ service->remoteport),
+ memcpy_copy_callback,
+ &payload,
+ sizeof(payload),
+ QMFLAGS_IS_BLOCKING |
+ QMFLAGS_NO_MUTEX_LOCK |
+ QMFLAGS_NO_MUTEX_UNLOCK);
+ if (status)
+ goto unlock_both_error_exit;
+
+ queue->local_insert++;
+
+ mutex_unlock(&state->slot_mutex);
+ mutex_unlock(&service->bulk_mutex);
+
+ dev_dbg(state->dev, "core: %d: bt:%d %cx li=%x ri=%x p=%x\n",
+ state->id, service->localport, dir_char, queue->local_insert,
+ queue->remote_insert, queue->process);
+
+ if (bulk_waiter) {
+ bulk_waiter->bulk = bulk;
+ if (wait_for_completion_interruptible(&bulk_waiter->event))
+ status = -EAGAIN;
+ else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
+ status = -EINVAL;
+ }
+
+ return status;
+
+unlock_both_error_exit:
+ mutex_unlock(&state->slot_mutex);
+cancel_bulk_error_exit:
+ vchiq_complete_bulk(service->instance, bulk);
+unlock_error_exit:
+ mutex_unlock(&service->bulk_mutex);
+
+ return status;
+}
+
/* Called by the slot handler */
int
vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
@@ -2978,31 +3104,17 @@ vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
return status;
}
-/*
- * This function may be called by kernel threads or user threads.
- * User threads may receive -EAGAIN to indicate that a signal has been
- * received and the call should be retried after being returned to user
- * context.
- * When called in blocking mode, the userdata field points to a bulk_waiter
- * structure.
- */
-int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
- void *offset, void __user *uoffset, int size, void *userdata,
- enum vchiq_bulk_mode mode, enum vchiq_bulk_dir dir)
+int
+vchiq_bulk_xfer_blocking_interruptible(struct vchiq_instance *instance, unsigned int handle,
+ void *offset, void __user *uoffset, int size,
+ void __user *userdata, enum vchiq_bulk_dir dir)
{
struct vchiq_service *service = find_service_by_handle(instance, handle);
- struct vchiq_bulk_queue *queue;
- struct vchiq_bulk *bulk;
- struct vchiq_state *state;
- struct bulk_waiter *bulk_waiter = NULL;
- const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
- const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
- VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
+ enum vchiq_bulk_mode mode = VCHIQ_BULK_MODE_BLOCKING;
int status = -EINVAL;
- int payload[2];
if (!service)
- goto error_exit;
+ return -EINVAL;
if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
goto error_exit;
@@ -3013,133 +3125,91 @@ int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
if (vchiq_check_service(service))
goto error_exit;
- switch (mode) {
- case VCHIQ_BULK_MODE_NOCALLBACK:
- case VCHIQ_BULK_MODE_CALLBACK:
- break;
- case VCHIQ_BULK_MODE_BLOCKING:
- bulk_waiter = userdata;
- init_completion(&bulk_waiter->event);
- bulk_waiter->actual = 0;
- bulk_waiter->bulk = NULL;
- break;
- case VCHIQ_BULK_MODE_WAITING:
- bulk_waiter = userdata;
- bulk = bulk_waiter->bulk;
- goto waiting;
- default:
- goto error_exit;
- }
- state = service->state;
+ status = vchiq_bulk_xfer_queue_msg_interruptible(service, offset, uoffset, size,
+ userdata, mode, dir);
- queue = (dir == VCHIQ_BULK_TRANSMIT) ?
- &service->bulk_tx : &service->bulk_rx;
+error_exit:
+ vchiq_service_put(service);
- if (mutex_lock_killable(&service->bulk_mutex)) {
- status = -EAGAIN;
+ return status;
+}
+
+int
+vchiq_bulk_xfer_callback_interruptible(struct vchiq_instance *instance, unsigned int handle,
+ void *offset, void __user *uoffset, int size,
+ enum vchiq_bulk_mode mode, void *userdata,
+ enum vchiq_bulk_dir dir)
+{
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+ int status = -EINVAL;
+
+ if (!service)
+ return -EINVAL;
+
+ if (mode != VCHIQ_BULK_MODE_CALLBACK &&
+ mode != VCHIQ_BULK_MODE_NOCALLBACK)
goto error_exit;
- }
- if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
- VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
- do {
- mutex_unlock(&service->bulk_mutex);
- if (wait_for_completion_interruptible(&service->bulk_remove_event)) {
- status = -EAGAIN;
- goto error_exit;
- }
- if (mutex_lock_killable(&service->bulk_mutex)) {
- status = -EAGAIN;
- goto error_exit;
- }
- } while (queue->local_insert == queue->remove +
- VCHIQ_NUM_SERVICE_BULKS);
- }
+ if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
+ goto error_exit;
- bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
+ if (!offset && !uoffset)
+ goto error_exit;
- bulk->mode = mode;
- bulk->dir = dir;
- bulk->userdata = userdata;
- bulk->size = size;
- bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
+ if (vchiq_check_service(service))
+ goto error_exit;
- if (vchiq_prepare_bulk_data(instance, bulk, offset, uoffset, size, dir))
- goto unlock_error_exit;
+ status = vchiq_bulk_xfer_queue_msg_interruptible(service, offset, uoffset,
+ size, userdata, mode, dir);
- /*
- * Ensure that the bulk data record is visible to the peer
- * before proceeding.
- */
- wmb();
+error_exit:
+ vchiq_service_put(service);
- dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %pK\n",
- state->id, service->localport, service->remoteport,
- dir_char, size, &bulk->data, userdata);
+ return status;
+}
- /*
- * The slot mutex must be held when the service is being closed, so
- * claim it here to ensure that isn't happening
- */
- if (mutex_lock_killable(&state->slot_mutex)) {
- status = -EAGAIN;
- goto cancel_bulk_error_exit;
- }
+/*
+ * This function is called by VCHIQ ioctl interface and is interruptible.
+ * It may receive -EAGAIN to indicate that a signal has been received
+ * and the call should be retried after being returned to user context.
+ */
+int
+vchiq_bulk_xfer_waiting_interruptible(struct vchiq_instance *instance,
+ unsigned int handle, struct bulk_waiter *userdata)
+{
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+ struct bulk_waiter *bulk_waiter;
+ int status = -EINVAL;
- if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
- goto unlock_both_error_exit;
+ if (!service)
+ return -EINVAL;
- payload[0] = lower_32_bits(bulk->data);
- payload[1] = bulk->size;
- status = queue_message(state,
- NULL,
- VCHIQ_MAKE_MSG(dir_msgtype,
- service->localport,
- service->remoteport),
- memcpy_copy_callback,
- &payload,
- sizeof(payload),
- QMFLAGS_IS_BLOCKING |
- QMFLAGS_NO_MUTEX_LOCK |
- QMFLAGS_NO_MUTEX_UNLOCK);
- if (status)
- goto unlock_both_error_exit;
+ if (!userdata)
+ goto error_exit;
- queue->local_insert++;
+ if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
+ goto error_exit;
- mutex_unlock(&state->slot_mutex);
- mutex_unlock(&service->bulk_mutex);
+ if (vchiq_check_service(service))
+ goto error_exit;
- dev_dbg(state->dev, "core: %d: bt:%d %cx li=%x ri=%x p=%x\n",
- state->id, service->localport, dir_char, queue->local_insert,
- queue->remote_insert, queue->process);
+ bulk_waiter = userdata;
-waiting:
vchiq_service_put(service);
status = 0;
- if (bulk_waiter) {
- bulk_waiter->bulk = bulk;
- if (wait_for_completion_interruptible(&bulk_waiter->event))
- status = -EAGAIN;
- else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
- status = -EINVAL;
- }
+ if (wait_for_completion_interruptible(&bulk_waiter->event))
+ return -EAGAIN;
+ else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
+ return -EINVAL;
return status;
-unlock_both_error_exit:
- mutex_unlock(&state->slot_mutex);
-cancel_bulk_error_exit:
- vchiq_complete_bulk(service->instance, bulk);
-unlock_error_exit:
- mutex_unlock(&service->bulk_mutex);
-
error_exit:
- if (service)
- vchiq_service_put(service);
+ vchiq_service_put(service);
+
return status;
}
@@ -3175,11 +3245,12 @@ vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
switch (service->srvstate) {
case VCHIQ_SRVSTATE_OPEN:
status = queue_message(service->state, service, data_id,
- copy_callback, context, size, 1);
+ copy_callback, context, size,
+ QMFLAGS_IS_BLOCKING);
break;
case VCHIQ_SRVSTATE_OPENSYNC:
status = queue_message_sync(service->state, service, data_id,
- copy_callback, context, size, 1);
+ copy_callback, context, size);
break;
default:
status = -EINVAL;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 77cc4d7ac077..468463f31801 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -471,9 +471,19 @@ extern void
remote_event_pollall(struct vchiq_state *state);
extern int
-vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *offset,
- void __user *uoffset, int size, void *userdata, enum vchiq_bulk_mode mode,
- enum vchiq_bulk_dir dir);
+vchiq_bulk_xfer_waiting_interruptible(struct vchiq_instance *instance,
+ unsigned int handle, struct bulk_waiter *userdata);
+
+extern int
+vchiq_bulk_xfer_blocking_interruptible(struct vchiq_instance *instance, unsigned int handle,
+ void *offset, void __user *uoffset, int size,
+ void __user *userdata, enum vchiq_bulk_dir dir);
+
+extern int
+vchiq_bulk_xfer_callback_interruptible(struct vchiq_instance *instance, unsigned int handle,
+ void *offset, void __user *uoffset, int size,
+ enum vchiq_bulk_mode mode, void *userdata,
+ enum vchiq_bulk_dir dir);
extern void
vchiq_dump_state(struct seq_file *f, struct vchiq_state *state);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
index 9cd2a64dce5e..d41a4624cc92 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
@@ -304,6 +304,11 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
}
userdata = &waiter->bulk_waiter;
+
+ status = vchiq_bulk_xfer_blocking_interruptible(instance, args->handle,
+ NULL, args->data, args->size,
+ userdata, dir);
+
} else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
mutex_lock(&instance->bulk_waiter_list_mutex);
list_for_each_entry(iter, &instance->bulk_waiter_list,
@@ -324,12 +329,16 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
dev_dbg(service->state->dev, "arm: found bulk_waiter %pK for pid %d\n",
waiter, current->pid);
userdata = &waiter->bulk_waiter;
+
+ status = vchiq_bulk_xfer_waiting_interruptible(instance, args->handle, userdata);
} else {
userdata = args->userdata;
- }
- status = vchiq_bulk_transfer(instance, args->handle, NULL, args->data, args->size,
- userdata, args->mode, dir);
+ status = vchiq_bulk_xfer_callback_interruptible(instance, args->handle, NULL,
+ args->data, args->size,
+ args->mode, userdata, dir);
+
+ }
if (!waiter) {
ret = 0;
diff --git a/drivers/staging/vme_user/vme.c b/drivers/staging/vme_user/vme.c
index 9a091463656d..42304c9f83a2 100644
--- a/drivers/staging/vme_user/vme.c
+++ b/drivers/staging/vme_user/vme.c
@@ -416,10 +416,6 @@ void vme_slave_free(struct vme_resource *resource)
slave_image = list_entry(resource->entry, struct vme_slave_resource,
list);
- if (!slave_image) {
- dev_err(bridge->parent, "Can't find slave resource\n");
- return;
- }
/* Unlock image */
mutex_lock(&slave_image->mtx);
@@ -794,10 +790,6 @@ void vme_master_free(struct vme_resource *resource)
master_image = list_entry(resource->entry, struct vme_master_resource,
list);
- if (!master_image) {
- dev_err(bridge->parent, "Can't find master resource\n");
- return;
- }
/* Unlock image */
spin_lock(&master_image->lock);
@@ -1265,7 +1257,7 @@ EXPORT_SYMBOL(vme_unregister_error_handler);
void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
{
- void (*call)(int, int, void *);
+ void (*call)(int level, int statid, void *priv_data);
void *priv_data;
call = bridge->irq[level - 1].callback[statid].func;
diff --git a/drivers/staging/vme_user/vme.h b/drivers/staging/vme_user/vme.h
index 26aa40f78a74..7753e736f9fd 100644
--- a/drivers/staging/vme_user/vme.h
+++ b/drivers/staging/vme_user/vme.h
@@ -129,8 +129,7 @@ struct vme_driver {
};
void *vme_alloc_consistent(struct vme_resource *, size_t, dma_addr_t *);
-void vme_free_consistent(struct vme_resource *, size_t, void *,
- dma_addr_t);
+void vme_free_consistent(struct vme_resource *, size_t, void *, dma_addr_t);
size_t vme_get_size(struct vme_resource *);
int vme_check_window(struct vme_bridge *bridge, u32 aspace,
@@ -138,20 +137,20 @@ int vme_check_window(struct vme_bridge *bridge, u32 aspace,
struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32);
int vme_slave_set(struct vme_resource *, int, unsigned long long,
- unsigned long long, dma_addr_t, u32, u32);
+ unsigned long long, dma_addr_t, u32, u32);
int vme_slave_get(struct vme_resource *, int *, unsigned long long *,
- unsigned long long *, dma_addr_t *, u32 *, u32 *);
+ unsigned long long *, dma_addr_t *, u32 *, u32 *);
void vme_slave_free(struct vme_resource *);
struct vme_resource *vme_master_request(struct vme_dev *, u32, u32, u32);
int vme_master_set(struct vme_resource *, int, unsigned long long,
- unsigned long long, u32, u32, u32);
+ unsigned long long, u32, u32, u32);
int vme_master_get(struct vme_resource *, int *, unsigned long long *,
- unsigned long long *, u32 *, u32 *, u32 *);
+ unsigned long long *, u32 *, u32 *, u32 *);
ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t);
ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t);
unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int,
- unsigned int, loff_t);
+ unsigned int, loff_t);
int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma);
void vme_master_free(struct vme_resource *);
@@ -162,13 +161,13 @@ struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t);
struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, u32, u32, u32);
void vme_dma_free_attribute(struct vme_dma_attr *);
int vme_dma_list_add(struct vme_dma_list *, struct vme_dma_attr *,
- struct vme_dma_attr *, size_t);
+ struct vme_dma_attr *, size_t);
int vme_dma_list_exec(struct vme_dma_list *);
int vme_dma_list_free(struct vme_dma_list *);
int vme_dma_free(struct vme_resource *);
int vme_irq_request(struct vme_dev *, int, int,
- void (*callback)(int, int, void *), void *);
+ void (*callback)(int, int, void *), void *);
void vme_irq_free(struct vme_dev *, int, int);
int vme_irq_generate(struct vme_dev *, int, int);
diff --git a/drivers/staging/vme_user/vme_fake.c b/drivers/staging/vme_user/vme_fake.c
index 7f84d1c86f29..4a59c9069605 100644
--- a/drivers/staging/vme_user/vme_fake.c
+++ b/drivers/staging/vme_user/vme_fake.c
@@ -79,7 +79,7 @@ struct fake_driver {
};
/* Module parameter */
-static int geoid;
+static u32 geoid;
static const char driver_name[] = "vme_fake";
@@ -1059,6 +1059,12 @@ static int __init fake_init(void)
struct vme_slave_resource *slave_image;
struct vme_lm_resource *lm;
+ if (geoid >= VME_MAX_SLOTS) {
+ pr_err("VME geographical address must be between 0 and %d (exclusive), but got %d\n",
+ VME_MAX_SLOTS, geoid);
+ return -EINVAL;
+ }
+
/* We need a fake parent device */
vme_root = root_device_register("vme");
if (IS_ERR(vme_root))
@@ -1283,7 +1289,7 @@ static void __exit fake_exit(void)
}
MODULE_PARM_DESC(geoid, "Set geographical addressing");
-module_param(geoid, int, 0);
+module_param(geoid, uint, 0);
MODULE_DESCRIPTION("Fake VME bridge driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/vme_user/vme_tsi148.c b/drivers/staging/vme_user/vme_tsi148.c
index 2ec9c2904404..31a44025e08f 100644
--- a/drivers/staging/vme_user/vme_tsi148.c
+++ b/drivers/staging/vme_user/vme_tsi148.c
@@ -36,7 +36,7 @@ static void tsi148_remove(struct pci_dev *);
/* Module parameter */
static bool err_chk;
-static int geoid;
+static u32 geoid;
static const char driver_name[] = "vme_tsi148";
@@ -55,14 +55,14 @@ static struct pci_driver tsi148_driver = {
};
static void reg_join(unsigned int high, unsigned int low,
- unsigned long long *variable)
+ unsigned long long *variable)
{
*variable = (unsigned long long)high << 32;
*variable |= (unsigned long long)low;
}
static void reg_split(unsigned long long variable, unsigned int *high,
- unsigned int *low)
+ unsigned int *low)
{
*low = (unsigned int)variable & 0xFFFFFFFF;
*high = (unsigned int)(variable >> 32);
@@ -72,7 +72,7 @@ static void reg_split(unsigned long long variable, unsigned int *high,
* Wakes up DMA queue.
*/
static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
- int channel_mask)
+ int channel_mask)
{
u32 serviced = 0;
@@ -207,7 +207,7 @@ static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
* Calling VME bus interrupt callback if provided.
*/
static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
- u32 stat)
+ u32 stat)
{
int vec, i, serviced = 0;
struct tsi148_driver *bridge;
@@ -358,7 +358,7 @@ static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
}
static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
- struct pci_dev *pdev)
+ struct pci_dev *pdev)
{
struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
@@ -392,7 +392,7 @@ static int tsi148_iack_received(struct tsi148_driver *bridge)
* Configure VME interrupt
*/
static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
- int state, int sync)
+ int state, int sync)
{
struct pci_dev *pdev;
u32 tmp;
@@ -430,7 +430,7 @@ static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
* interrupt to be acked.
*/
static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
- int statid)
+ int statid)
{
u32 tmp;
struct tsi148_driver *bridge;
@@ -453,7 +453,7 @@ static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
/* XXX Consider implementing a timeout? */
wait_event_interruptible(bridge->iack_queue,
- tsi148_iack_received(bridge));
+ tsi148_iack_received(bridge));
mutex_unlock(&bridge->vme_int);
@@ -464,8 +464,8 @@ static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
* Initialize a slave window with the requested attributes.
*/
static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size,
- dma_addr_t pci_base, u32 aspace, u32 cycle)
+ unsigned long long vme_base, unsigned long long size,
+ dma_addr_t pci_base, u32 aspace, u32 cycle)
{
unsigned int i, addr = 0, granularity = 0;
unsigned int temp_ctl = 0;
@@ -607,8 +607,8 @@ static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
* Get slave window configuration.
*/
static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
+ unsigned long long *vme_base, unsigned long long *size,
+ dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
{
unsigned int i, granularity = 0, ctl = 0;
unsigned int vme_base_low, vme_base_high;
@@ -706,7 +706,7 @@ static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
* Allocate and map PCI Resource
*/
static int tsi148_alloc_resource(struct vme_master_resource *image,
- unsigned long long size)
+ unsigned long long size)
{
unsigned long long existing_size;
int retval = 0;
@@ -751,9 +751,9 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
image->bus_resource.end = (unsigned long)size;
image->bus_resource.flags = IORESOURCE_MEM;
- retval = pci_bus_alloc_resource(pdev->bus,
- &image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
- 0, NULL, NULL);
+ retval = pci_bus_alloc_resource(pdev->bus, &image->bus_resource,
+ size, 0x10000, PCIBIOS_MIN_MEM,
+ 0, NULL, NULL);
if (retval) {
dev_err(tsi148_bridge->parent, "Failed to allocate mem resource for window %d size 0x%lx start 0x%lx\n",
image->number, (unsigned long)size,
@@ -796,8 +796,8 @@ static void tsi148_free_resource(struct vme_master_resource *image)
* Set the attributes of an outbound window.
*/
static int tsi148_master_set(struct vme_master_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size, u32 aspace,
- u32 cycle, u32 dwidth)
+ unsigned long long vme_base, unsigned long long size,
+ u32 aspace, u32 cycle, u32 dwidth)
{
int retval = 0;
unsigned int i;
@@ -1031,8 +1031,8 @@ err_window:
* XXX Not parsing prefetch information.
*/
static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
- u32 *cycle, u32 *dwidth)
+ unsigned long long *vme_base, unsigned long long *size,
+ u32 *aspace, u32 *cycle, u32 *dwidth)
{
unsigned int i, ctl;
unsigned int pci_base_low, pci_base_high;
@@ -1140,15 +1140,15 @@ static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
}
static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
- u32 *cycle, u32 *dwidth)
+ unsigned long long *vme_base, unsigned long long *size,
+ u32 *aspace, u32 *cycle, u32 *dwidth)
{
int retval;
spin_lock(&image->lock);
retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
- cycle, dwidth);
+ cycle, dwidth);
spin_unlock(&image->lock);
@@ -1156,7 +1156,7 @@ static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
}
static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
- size_t count, loff_t offset)
+ size_t count, loff_t offset)
{
int retval, enabled;
unsigned long long vme_base, size;
@@ -1241,7 +1241,7 @@ out:
}
static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
- size_t count, loff_t offset)
+ size_t count, loff_t offset)
{
int retval = 0, enabled;
unsigned long long vme_base, size;
@@ -1342,9 +1342,8 @@ out:
*
* Requires a previously configured master window, returns final value.
*/
-static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
- unsigned int mask, unsigned int compare, unsigned int swap,
- loff_t offset)
+static unsigned int tsi148_master_rmw(struct vme_master_resource *image, unsigned int mask,
+ unsigned int compare, unsigned int swap, loff_t offset)
{
unsigned long long pci_addr;
unsigned int pci_addr_high, pci_addr_low;
@@ -1399,7 +1398,7 @@ static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
}
static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
- u32 aspace, u32 cycle, u32 dwidth)
+ u32 aspace, u32 cycle, u32 dwidth)
{
u32 val;
@@ -1497,7 +1496,7 @@ static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
}
static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
- u32 aspace, u32 cycle, u32 dwidth)
+ u32 aspace, u32 cycle, u32 dwidth)
{
u32 val;
@@ -1599,8 +1598,8 @@ static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
*
* Note: DMA engine expects the DMA descriptor to be big endian.
*/
-static int tsi148_dma_list_add(struct vme_dma_list *list,
- struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
+static int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
+ struct vme_dma_attr *dest, size_t count)
{
struct tsi148_dma_entry *entry, *prev;
u32 address_high, address_low, val;
@@ -1653,8 +1652,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
case VME_DMA_PCI:
pci_attr = src->private;
- reg_split((unsigned long long)pci_attr->address, &address_high,
- &address_low);
+ reg_split((unsigned long long)pci_attr->address, &address_high, &address_low);
entry->descriptor.dsau = cpu_to_be32(address_high);
entry->descriptor.dsal = cpu_to_be32(address_low);
entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
@@ -1662,15 +1660,16 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
case VME_DMA_VME:
vme_attr = src->private;
- reg_split((unsigned long long)vme_attr->address, &address_high,
- &address_low);
+ reg_split((unsigned long long)vme_attr->address, &address_high, &address_low);
entry->descriptor.dsau = cpu_to_be32(address_high);
entry->descriptor.dsal = cpu_to_be32(address_low);
entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
- retval = tsi148_dma_set_vme_src_attributes(
- tsi148_bridge->parent, &entry->descriptor.dsat,
- vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
+ retval = tsi148_dma_set_vme_src_attributes(tsi148_bridge->parent,
+ &entry->descriptor.dsat,
+ vme_attr->aspace,
+ vme_attr->cycle,
+ vme_attr->dwidth);
if (retval < 0)
goto err_source;
break;
@@ -1690,7 +1689,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
pci_attr = dest->private;
reg_split((unsigned long long)pci_attr->address, &address_high,
- &address_low);
+ &address_low);
entry->descriptor.ddau = cpu_to_be32(address_high);
entry->descriptor.ddal = cpu_to_be32(address_low);
entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
@@ -1699,14 +1698,16 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
vme_attr = dest->private;
reg_split((unsigned long long)vme_attr->address, &address_high,
- &address_low);
+ &address_low);
entry->descriptor.ddau = cpu_to_be32(address_high);
entry->descriptor.ddal = cpu_to_be32(address_low);
entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
- retval = tsi148_dma_set_vme_dest_attributes(
- tsi148_bridge->parent, &entry->descriptor.ddat,
- vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
+ retval = tsi148_dma_set_vme_dest_attributes(tsi148_bridge->parent,
+ &entry->descriptor.ddat,
+ vme_attr->aspace,
+ vme_attr->cycle,
+ vme_attr->dwidth);
if (retval < 0)
goto err_dest;
break;
@@ -1735,7 +1736,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
/* Fill out previous descriptors "Next Address" */
if (entry->list.prev != &list->entries) {
reg_split((unsigned long long)entry->dma_handle, &address_high,
- &address_low);
+ &address_low);
prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
list);
prev->descriptor.dnlau = cpu_to_be32(address_high);
@@ -1813,7 +1814,7 @@ static int tsi148_dma_list_exec(struct vme_dma_list *list)
/* Get first bus address and write into registers */
entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
- list);
+ list);
mutex_unlock(&ctrlr->mtx);
@@ -1832,7 +1833,7 @@ static int tsi148_dma_list_exec(struct vme_dma_list *list)
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
retval = wait_event_interruptible(bridge->dma_queue[channel],
- tsi148_dma_busy(ctrlr->parent, channel));
+ tsi148_dma_busy(ctrlr->parent, channel));
if (retval) {
iowrite32be(dctlreg | TSI148_LCSR_DCTL_ABT, bridge->base +
@@ -1883,7 +1884,7 @@ static int tsi148_dma_list_empty(struct vme_dma_list *list)
entry = list_entry(pos, struct tsi148_dma_entry, list);
dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
- sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
+ sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
kfree(entry);
}
@@ -1898,7 +1899,7 @@ static int tsi148_dma_list_empty(struct vme_dma_list *list)
* callback is attached and disabled when the last callback is removed.
*/
static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
- u32 aspace, u32 cycle)
+ u32 aspace, u32 cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl = 0;
int i;
@@ -1963,7 +1964,7 @@ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
* or disabled.
*/
static int tsi148_lm_get(struct vme_lm_resource *lm,
- unsigned long long *lm_base, u32 *aspace, u32 *cycle)
+ unsigned long long *lm_base, u32 *aspace, u32 *cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
struct tsi148_driver *bridge;
@@ -2013,7 +2014,7 @@ static int tsi148_lm_get(struct vme_lm_resource *lm,
* Callback will be passed the monitor triggered.
*/
static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
- void (*callback)(void *), void *data)
+ void (*callback)(void *), void *data)
{
u32 lm_ctl, tmp;
struct vme_bridge *tsi148_bridge;
@@ -2086,7 +2087,7 @@ static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
- bridge->base + TSI148_LCSR_INTC);
+ bridge->base + TSI148_LCSR_INTC);
/* Detach callback */
bridge->lm_callback[monitor] = NULL;
@@ -2126,7 +2127,7 @@ static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
}
static void *tsi148_alloc_consistent(struct device *parent, size_t size,
- dma_addr_t *dma)
+ dma_addr_t *dma)
{
struct pci_dev *pdev;
@@ -2137,7 +2138,7 @@ static void *tsi148_alloc_consistent(struct device *parent, size_t size,
}
static void tsi148_free_consistent(struct device *parent, size_t size,
- void *vaddr, dma_addr_t dma)
+ void *vaddr, dma_addr_t dma)
{
struct pci_dev *pdev;
@@ -2160,7 +2161,7 @@ static void tsi148_free_consistent(struct device *parent, size_t size,
* be mapped onto PCI memory.
*/
static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
- struct pci_dev *pdev)
+ struct pci_dev *pdev)
{
u32 cbar, crat, vstat;
u32 crcsr_bus_high, crcsr_bus_low;
@@ -2201,8 +2202,7 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
} else {
dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
- iowrite32be(crat | TSI148_LCSR_CRAT_EN,
- bridge->base + TSI148_LCSR_CRAT);
+ iowrite32be(crat | TSI148_LCSR_CRAT_EN, bridge->base + TSI148_LCSR_CRAT);
}
/* If we want flushed, error-checked writes, set up a window
@@ -2210,9 +2210,8 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
* through VME writes.
*/
if (err_chk) {
- retval = tsi148_master_set(bridge->flush_image, 1,
- (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
- VME_D16);
+ retval = tsi148_master_set(bridge->flush_image, 1, (vstat * 0x80000),
+ 0x80000, VME_CRCSR, VME_SCT, VME_D16);
if (retval)
dev_err(tsi148_bridge->parent, "Configuring flush image failed\n");
}
@@ -2221,7 +2220,7 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
}
static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
- struct pci_dev *pdev)
+ struct pci_dev *pdev)
{
u32 crat;
struct tsi148_driver *bridge;
@@ -2231,7 +2230,7 @@ static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
/* Turn off CR/CSR space */
crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
- bridge->base + TSI148_LCSR_CRAT);
+ bridge->base + TSI148_LCSR_CRAT);
/* Free image */
iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
@@ -2253,6 +2252,12 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct vme_dma_resource *dma_ctrlr;
struct vme_lm_resource *lm;
+ if (geoid >= VME_MAX_SLOTS) {
+ dev_err(&pdev->dev, "VME geographical address must be between 0 and %d (exclusive), but got %d\n",
+ VME_MAX_SLOTS, geoid);
+ return -EINVAL;
+ }
+
/* If we want to support more than one of each bridge, we need to
* dynamically generate this so we get one per device
*/
@@ -2287,7 +2292,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* map registers in BAR 0 */
tsi148_device->base = ioremap(pci_resource_start(pdev, 0),
- 4096);
+ 4096);
if (!tsi148_device->base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
@@ -2367,7 +2372,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sizeof(master_image->bus_resource));
master_image->kern_base = NULL;
list_add_tail(&master_image->list,
- &tsi148_bridge->master_resources);
+ &tsi148_bridge->master_resources);
}
/* Add slave windows to list */
@@ -2388,7 +2393,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
VME_PROG | VME_DATA;
list_add_tail(&slave_image->list,
- &tsi148_bridge->slave_resources);
+ &tsi148_bridge->slave_resources);
}
/* Add dma engines to list */
@@ -2409,7 +2414,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&dma_ctrlr->pending);
INIT_LIST_HEAD(&dma_ctrlr->running);
list_add_tail(&dma_ctrlr->list,
- &tsi148_bridge->dma_resources);
+ &tsi148_bridge->dma_resources);
}
/* Add location monitor to list */
@@ -2447,16 +2452,16 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
dev_info(&pdev->dev, "Board is%s the VME system controller\n",
- (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
+ (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
if (!geoid)
dev_info(&pdev->dev, "VME geographical address is %d\n",
- data & TSI148_LCSR_VSTAT_GA_M);
+ data & TSI148_LCSR_VSTAT_GA_M);
else
dev_info(&pdev->dev, "VME geographical address is set to %d\n",
- geoid);
+ geoid);
dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
- err_chk ? "enabled" : "disabled");
+ err_chk ? "enabled" : "disabled");
retval = tsi148_crcsr_init(tsi148_bridge, pdev);
if (retval) {
@@ -2507,8 +2512,7 @@ err_slave:
err_master:
/* resources are stored in link list */
list_for_each_safe(pos, n, &tsi148_bridge->master_resources) {
- master_image = list_entry(pos, struct vme_master_resource,
- list);
+ master_image = list_entry(pos, struct vme_master_resource, list);
list_del(pos);
kfree(master_image);
}
@@ -2605,8 +2609,7 @@ static void tsi148_remove(struct pci_dev *pdev)
/* resources are stored in link list */
list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
- master_image = list_entry(pos, struct vme_master_resource,
- list);
+ master_image = list_entry(pos, struct vme_master_resource, list);
list_del(pos);
kfree(master_image);
}
@@ -2628,7 +2631,7 @@ MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
module_param(err_chk, bool, 0);
MODULE_PARM_DESC(geoid, "Override geographical addressing");
-module_param(geoid, int, 0);
+module_param(geoid, uint, 0);
MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/vt6655/TODO b/drivers/staging/vt6655/TODO
index 63607ef9c97e..529bc22cd608 100644
--- a/drivers/staging/vt6655/TODO
+++ b/drivers/staging/vt6655/TODO
@@ -18,4 +18,4 @@ TODO:
- integrate with drivers/net/wireless
Please send any patches to Greg Kroah-Hartman <greg@kroah.com>
-and Forest Bond <forest@alittletooquiet.net>.
+and Philipp Hortmann <philipp.g.hortmann@gmail.com>.
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 688c870d89bc..6a2e390e9493 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -388,22 +388,22 @@ void card_safe_reset_tx(struct vnt_private *priv)
struct vnt_tx_desc *curr_td;
/* initialize TD index */
- priv->tail_td[0] = &priv->apTD0Rings[0];
- priv->apCurrTD[0] = &priv->apTD0Rings[0];
+ priv->tail_td[0] = &priv->ap_td0_rings[0];
+ priv->apCurrTD[0] = &priv->ap_td0_rings[0];
- priv->tail_td[1] = &priv->apTD1Rings[0];
- priv->apCurrTD[1] = &priv->apTD1Rings[0];
+ priv->tail_td[1] = &priv->ap_td1_rings[0];
+ priv->apCurrTD[1] = &priv->ap_td1_rings[0];
for (uu = 0; uu < TYPE_MAXTD; uu++)
priv->iTDUsed[uu] = 0;
for (uu = 0; uu < priv->opts.tx_descs[0]; uu++) {
- curr_td = &priv->apTD0Rings[uu];
+ curr_td = &priv->ap_td0_rings[uu];
curr_td->td0.owner = OWNED_BY_HOST;
/* init all Tx Packet pointer to NULL */
}
for (uu = 0; uu < priv->opts.tx_descs[1]; uu++) {
- curr_td = &priv->apTD1Rings[uu];
+ curr_td = &priv->ap_td1_rings[uu];
curr_td->td0.owner = OWNED_BY_HOST;
/* init all Tx Packet pointer to NULL */
}
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index f52e42564e81..f6b462ebca51 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -55,8 +55,8 @@ void CARDvSafeResetRx(struct vnt_private *priv);
void card_radio_power_off(struct vnt_private *priv);
bool card_set_phy_parameter(struct vnt_private *priv, u8 bb_type);
bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate,
- u64 bss_timestamp);
+ u64 bss_timestamp);
bool card_set_beacon_period(struct vnt_private *priv,
- unsigned short beacon_interval);
+ unsigned short beacon_interval);
#endif /* __CARD_H__ */
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index 0212240ba23f..5eaab6b172d3 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -135,8 +135,8 @@ struct vnt_private {
struct vnt_tx_desc *apCurrTD[TYPE_MAXTD];
struct vnt_tx_desc *tail_td[TYPE_MAXTD];
- struct vnt_tx_desc *apTD0Rings;
- struct vnt_tx_desc *apTD1Rings;
+ struct vnt_tx_desc *ap_td0_rings;
+ struct vnt_tx_desc *ap_td1_rings;
struct vnt_rx_desc *aRD0Ring;
struct vnt_rx_desc *aRD1Ring;
@@ -189,10 +189,10 @@ struct vnt_private {
u8 byBBType; /* 0:11A, 1:11B, 2:11G */
u8 packet_type; /*
- * 0:11a,1:11b,2:11gb (only CCK
- * in BasicRate), 3:11ga (OFDM in
- * Basic Rate)
- */
+ * 0:11a,1:11b,2:11gb (only CCK
+ * in BasicRate), 3:11ga (OFDM in
+ * Basic Rate)
+ */
unsigned short wBasicRate;
unsigned char byACKRate;
unsigned char byTopOFDMBasicRate;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 3ff8103366c1..bf3ecf720206 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -550,11 +550,11 @@ static bool device_init_rings(struct vnt_private *priv)
priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc);
/* vir_pool: pvoid type */
- priv->apTD0Rings = vir_pool
+ priv->ap_td0_rings = vir_pool
+ priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc)
+ priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc);
- priv->apTD1Rings = vir_pool
+ priv->ap_td1_rings = vir_pool
+ priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc)
+ priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc)
+ priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc);
@@ -720,7 +720,7 @@ static int device_init_td0_ring(struct vnt_private *priv)
curr = priv->td0_pool_dma;
for (i = 0; i < priv->opts.tx_descs[0];
i++, curr += sizeof(struct vnt_tx_desc)) {
- desc = &priv->apTD0Rings[i];
+ desc = &priv->ap_td0_rings[i];
desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL);
if (!desc->td_info) {
ret = -ENOMEM;
@@ -730,20 +730,20 @@ static int device_init_td0_ring(struct vnt_private *priv)
desc->td_info->buf = priv->tx0_bufs + i * PKT_BUF_SZ;
desc->td_info->buf_dma = priv->tx_bufs_dma0 + i * PKT_BUF_SZ;
- desc->next = &(priv->apTD0Rings[(i + 1) % priv->opts.tx_descs[0]]);
+ desc->next = &(priv->ap_td0_rings[(i + 1) % priv->opts.tx_descs[0]]);
desc->next_desc = cpu_to_le32(curr +
sizeof(struct vnt_tx_desc));
}
if (i > 0)
- priv->apTD0Rings[i - 1].next_desc = cpu_to_le32(priv->td0_pool_dma);
- priv->tail_td[0] = priv->apCurrTD[0] = &priv->apTD0Rings[0];
+ priv->ap_td0_rings[i - 1].next_desc = cpu_to_le32(priv->td0_pool_dma);
+ priv->tail_td[0] = priv->apCurrTD[0] = &priv->ap_td0_rings[0];
return 0;
err_free_desc:
while (i--) {
- desc = &priv->apTD0Rings[i];
+ desc = &priv->ap_td0_rings[i];
kfree(desc->td_info);
}
@@ -761,7 +761,7 @@ static int device_init_td1_ring(struct vnt_private *priv)
curr = priv->td1_pool_dma;
for (i = 0; i < priv->opts.tx_descs[1];
i++, curr += sizeof(struct vnt_tx_desc)) {
- desc = &priv->apTD1Rings[i];
+ desc = &priv->ap_td1_rings[i];
desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL);
if (!desc->td_info) {
ret = -ENOMEM;
@@ -771,19 +771,19 @@ static int device_init_td1_ring(struct vnt_private *priv)
desc->td_info->buf = priv->tx1_bufs + i * PKT_BUF_SZ;
desc->td_info->buf_dma = priv->tx_bufs_dma1 + i * PKT_BUF_SZ;
- desc->next = &(priv->apTD1Rings[(i + 1) % priv->opts.tx_descs[1]]);
+ desc->next = &(priv->ap_td1_rings[(i + 1) % priv->opts.tx_descs[1]]);
desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
}
if (i > 0)
- priv->apTD1Rings[i - 1].next_desc = cpu_to_le32(priv->td1_pool_dma);
- priv->tail_td[1] = priv->apCurrTD[1] = &priv->apTD1Rings[0];
+ priv->ap_td1_rings[i - 1].next_desc = cpu_to_le32(priv->td1_pool_dma);
+ priv->tail_td[1] = priv->apCurrTD[1] = &priv->ap_td1_rings[0];
return 0;
err_free_desc:
while (i--) {
- desc = &priv->apTD1Rings[i];
+ desc = &priv->ap_td1_rings[i];
kfree(desc->td_info);
}
@@ -795,7 +795,7 @@ static void device_free_td0_ring(struct vnt_private *priv)
int i;
for (i = 0; i < priv->opts.tx_descs[0]; i++) {
- struct vnt_tx_desc *desc = &priv->apTD0Rings[i];
+ struct vnt_tx_desc *desc = &priv->ap_td0_rings[i];
struct vnt_td_info *td_info = desc->td_info;
dev_kfree_skb(td_info->skb);
@@ -808,7 +808,7 @@ static void device_free_td1_ring(struct vnt_private *priv)
int i;
for (i = 0; i < priv->opts.tx_descs[1]; i++) {
- struct vnt_tx_desc *desc = &priv->apTD1Rings[i];
+ struct vnt_tx_desc *desc = &priv->ap_td1_rings[i];
struct vnt_td_info *td_info = desc->td_info;
dev_kfree_skb(td_info->skb);
@@ -1140,7 +1140,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
PSbIsNextTBTTWakeUp((void *)priv);
if ((priv->op_mode == NL80211_IFTYPE_AP ||
- priv->op_mode == NL80211_IFTYPE_ADHOC) &&
+ priv->op_mode == NL80211_IFTYPE_ADHOC) &&
priv->vif->bss_conf.enable_beacon)
MACvOneShotTimer1MicroSec(priv,
(priv->vif->bss_conf.beacon_int -
@@ -1535,7 +1535,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
priv->op_mode != NL80211_IFTYPE_AP) {
if (vif->cfg.assoc && conf->beacon_rate) {
card_update_tsf(priv, conf->beacon_rate->hw_value,
- conf->sync_tsf);
+ conf->sync_tsf);
card_set_beacon_period(priv, conf->beacon_int);
@@ -1763,7 +1763,7 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
priv->memaddr = pci_resource_start(pcid, 0);
priv->ioaddr = pci_resource_start(pcid, 1);
priv->port_offset = ioremap(priv->memaddr & PCI_BASE_ADDRESS_MEM_MASK,
- 256);
+ 256);
if (!priv->port_offset) {
dev_err(&pcid->dev, ": Failed to IO remapping ..\n");
device_free_info(priv);
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index acf931c3f5fd..a33af2852227 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -537,9 +537,9 @@
/*--------------------- Export Macros ------------------------------*/
-#define VT6655_MAC_SELECT_PAGE0(iobase) iowrite8(0, iobase + MAC_REG_PAGE1SEL)
+#define VT6655_MAC_SELECT_PAGE0(iobase) iowrite8(0, (iobase) + MAC_REG_PAGE1SEL)
-#define VT6655_MAC_SELECT_PAGE1(iobase) iowrite8(1, iobase + MAC_REG_PAGE1SEL)
+#define VT6655_MAC_SELECT_PAGE1(iobase) iowrite8(1, (iobase) + MAC_REG_PAGE1SEL)
#define MAKEWORD(lb, hb) \
((unsigned short)(((unsigned char)(lb)) | (((unsigned short)((unsigned char)(hb))) << 8)))
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 5e5ed582c35e..3705cb1e87b6 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -493,9 +493,9 @@ s_uFillDataHead(
buf->duration_a = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->duration_b = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, PK_TYPE_11B,
- pDevice->byTopCCKBasicRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
+ pDevice->byTopCCKBasicRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->duration_a_f0 = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
+ wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->duration_a_f1 = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
@@ -520,7 +520,7 @@ s_uFillDataHead(
buf->duration_f0 = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->duration_f1 = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
+ wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->time_stamp_off = vnt_time_stamp_off(pDevice, wCurrentRate);
return buf->duration;
}
@@ -1375,8 +1375,8 @@ static int vnt_beacon_xmit(struct vnt_private *priv,
/* Get Duration and TimeStampOff */
short_head->duration =
cpu_to_le16((u16)s_uGetDataDuration(priv, DATADUR_B,
- frame_size, PK_TYPE_11A, current_rate,
- false, 0, 0, 1, AUTO_FB_NONE));
+ frame_size, PK_TYPE_11A, current_rate,
+ false, 0, 0, 1, AUTO_FB_NONE));
short_head->time_stamp_off =
vnt_time_stamp_off(priv, current_rate);
@@ -1391,8 +1391,8 @@ static int vnt_beacon_xmit(struct vnt_private *priv,
/* Get Duration and TimeStampOff */
short_head->duration =
cpu_to_le16((u16)s_uGetDataDuration(priv, DATADUR_B,
- frame_size, PK_TYPE_11B, current_rate,
- false, 0, 0, 1, AUTO_FB_NONE));
+ frame_size, PK_TYPE_11B, current_rate,
+ false, 0, 0, 1, AUTO_FB_NONE));
short_head->time_stamp_off =
vnt_time_stamp_off(priv, current_rate);
diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
index 4b4a4d63e61f..cb149bcdd7d5 100644
--- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
@@ -564,7 +564,6 @@ static const struct file_operations acpi_thermal_rel_fops = {
.open = acpi_thermal_rel_open,
.release = acpi_thermal_rel_release,
.unlocked_ioctl = acpi_thermal_rel_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice acpi_thermal_rel_misc_device = {
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
index c9b6bb46111c..d2a0054217da 100644
--- a/drivers/thunderbolt/acpi.c
+++ b/drivers/thunderbolt/acpi.c
@@ -32,40 +32,20 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
goto out_put;
/*
- * Try to find physical device walking upwards to the hierarcy.
- * We need to do this because the xHCI driver might not yet be
- * bound so the USB3 SuperSpeed ports are not yet created.
+ * Ignore USB3 ports here as USB core will set up device links between
+ * tunneled USB3 devices and NHI host during USB device creation.
+ * USB3 ports might not even have a physical device yet if xHCI driver
+ * isn't bound yet.
*/
- do {
- dev = acpi_get_first_physical_node(adev);
- if (dev)
- break;
-
- adev = acpi_dev_parent(adev);
- } while (adev);
-
- /*
- * Check that the device is PCIe. This is because USB3
- * SuperSpeed ports have this property and they are not power
- * managed with the xHCI and the SuperSpeed hub so we create the
- * link from xHCI instead.
- */
- while (dev && !dev_is_pci(dev))
- dev = dev->parent;
-
- if (!dev)
+ dev = acpi_get_first_physical_node(adev);
+ if (!dev || !dev_is_pci(dev))
goto out_put;
- /*
- * Check that this actually matches the type of device we
- * expect. It should either be xHCI or PCIe root/downstream
- * port.
- */
+ /* Check that this matches a PCIe root/downstream port. */
pdev = to_pci_dev(dev);
- if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI ||
- (pci_is_pcie(pdev) &&
- (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
- pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) {
+ if (pci_is_pcie(pdev) &&
+ (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM)) {
const struct device_link *link;
/*
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index 9ed4bb2e8d05..350310bd0fee 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -9,6 +9,7 @@
#include <linux/bitfield.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
@@ -34,6 +35,14 @@
#define COUNTER_SET_LEN 3
+/*
+ * USB4 spec doesn't specify dwell range, the range of 100 ms to 500 ms
+ * probed to give good results.
+ */
+#define MIN_DWELL_TIME 100 /* ms */
+#define MAX_DWELL_TIME 500 /* ms */
+#define DWELL_SAMPLE_INTERVAL 10
+
/* Sideband registers and their sizes as defined in the USB4 spec */
struct sb_reg {
unsigned int reg;
@@ -394,8 +403,15 @@ out:
* @ber_level: Current BER level contour value
* @voltage_steps: Number of mandatory voltage steps
* @max_voltage_offset: Maximum mandatory voltage offset (in mV)
+ * @voltage_steps_optional_range: Number of voltage steps for optional range
+ * @max_voltage_offset_optional_range: Maximum voltage offset for the optional
+ * range (in mV).
* @time_steps: Number of time margin steps
* @max_time_offset: Maximum time margin offset (in mUI)
+ * @voltage_time_offset: Offset for voltage / time for software margining
+ * @dwell_time: Dwell time for software margining (in ms)
+ * @error_counter: Error counter operation for software margining
+ * @optional_voltage_offset_range: Enable optional extended voltage range
* @software: %true if software margining is used instead of hardware
* @time: %true if time margining is used instead of voltage
* @right_high: %false if left/low margin test is performed, %true if
@@ -414,13 +430,37 @@ struct tb_margining {
unsigned int ber_level;
unsigned int voltage_steps;
unsigned int max_voltage_offset;
+ unsigned int voltage_steps_optional_range;
+ unsigned int max_voltage_offset_optional_range;
unsigned int time_steps;
unsigned int max_time_offset;
+ unsigned int voltage_time_offset;
+ unsigned int dwell_time;
+ enum usb4_margin_sw_error_counter error_counter;
+ bool optional_voltage_offset_range;
bool software;
bool time;
bool right_high;
};
+static int margining_modify_error_counter(struct tb_margining *margining,
+ u32 lanes, enum usb4_margin_sw_error_counter error_counter)
+{
+ struct usb4_port_margining_params params = { 0 };
+ struct tb_port *port = margining->port;
+ u32 result;
+
+ if (error_counter != USB4_MARGIN_SW_ERROR_COUNTER_CLEAR &&
+ error_counter != USB4_MARGIN_SW_ERROR_COUNTER_STOP)
+ return -EOPNOTSUPP;
+
+ params.error_counter = error_counter;
+ params.lanes = lanes;
+
+ return usb4_port_sw_margin(port, margining->target, margining->index,
+ &params, &result);
+}
+
static bool supports_software(const struct tb_margining *margining)
{
return margining->caps[0] & USB4_MARGIN_CAP_0_MODES_SW;
@@ -454,6 +494,12 @@ independent_time_margins(const struct tb_margining *margining)
return FIELD_GET(USB4_MARGIN_CAP_1_TIME_INDP_MASK, margining->caps[1]);
}
+static bool
+supports_optional_voltage_offset_range(const struct tb_margining *margining)
+{
+ return margining->caps[0] & USB4_MARGIN_CAP_0_OPT_VOLTAGE_SUPPORT;
+}
+
static ssize_t
margining_ber_level_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -553,6 +599,14 @@ static int margining_caps_show(struct seq_file *s, void *not_used)
margining->voltage_steps);
seq_printf(s, "# maximum voltage offset: %u mV\n",
margining->max_voltage_offset);
+ seq_printf(s, "# optional voltage offset range support: %s\n",
+ str_yes_no(supports_optional_voltage_offset_range(margining)));
+ if (supports_optional_voltage_offset_range(margining)) {
+ seq_printf(s, "# voltage margin steps, optional range: %u\n",
+ margining->voltage_steps_optional_range);
+ seq_printf(s, "# maximum voltage offset, optional range: %u mV\n",
+ margining->max_voltage_offset_optional_range);
+ }
switch (independent_voltage_margins(margining)) {
case USB4_MARGIN_CAP_0_VOLTAGE_MIN:
@@ -667,6 +721,198 @@ static int margining_lanes_show(struct seq_file *s, void *not_used)
}
DEBUGFS_ATTR_RW(margining_lanes);
+static ssize_t
+margining_voltage_time_offset_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ unsigned int max_margin;
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 10, &val);
+ if (ret)
+ return ret;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ if (margining->time)
+ max_margin = margining->time_steps;
+ else
+ if (margining->optional_voltage_offset_range)
+ max_margin = margining->voltage_steps_optional_range;
+ else
+ max_margin = margining->voltage_steps;
+
+ margining->voltage_time_offset = clamp(val, 0, max_margin);
+ }
+
+ return count;
+}
+
+static int margining_voltage_time_offset_show(struct seq_file *s,
+ void *not_used)
+{
+ const struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ seq_printf(s, "%d\n", margining->voltage_time_offset);
+ }
+
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_voltage_time_offset);
+
+static ssize_t
+margining_error_counter_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ enum usb4_margin_sw_error_counter error_counter;
+ struct seq_file *s = file->private_data;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ char *buf;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ buf[count - 1] = '\0';
+
+ if (!strcmp(buf, "nop"))
+ error_counter = USB4_MARGIN_SW_ERROR_COUNTER_NOP;
+ else if (!strcmp(buf, "clear"))
+ error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR;
+ else if (!strcmp(buf, "start"))
+ error_counter = USB4_MARGIN_SW_ERROR_COUNTER_START;
+ else if (!strcmp(buf, "stop"))
+ error_counter = USB4_MARGIN_SW_ERROR_COUNTER_STOP;
+ else
+ return -EINVAL;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ margining->error_counter = error_counter;
+ }
+
+ return count;
+}
+
+static int margining_error_counter_show(struct seq_file *s, void *not_used)
+{
+ const struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ switch (margining->error_counter) {
+ case USB4_MARGIN_SW_ERROR_COUNTER_NOP:
+ seq_puts(s, "[nop] clear start stop\n");
+ break;
+ case USB4_MARGIN_SW_ERROR_COUNTER_CLEAR:
+ seq_puts(s, "nop [clear] start stop\n");
+ break;
+ case USB4_MARGIN_SW_ERROR_COUNTER_START:
+ seq_puts(s, "nop clear [start] stop\n");
+ break;
+ case USB4_MARGIN_SW_ERROR_COUNTER_STOP:
+ seq_puts(s, "nop clear start [stop]\n");
+ break;
+ }
+ }
+
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_error_counter);
+
+static ssize_t
+margining_dwell_time_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 10, &val);
+ if (ret)
+ return ret;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ margining->dwell_time = clamp(val, MIN_DWELL_TIME, MAX_DWELL_TIME);
+ }
+
+ return count;
+}
+
+static int margining_dwell_time_show(struct seq_file *s, void *not_used)
+{
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ seq_printf(s, "%d\n", margining->dwell_time);
+ }
+
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_dwell_time);
+
+static ssize_t
+margining_optional_voltage_offset_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ bool val;
+ int ret;
+
+ ret = kstrtobool_from_user(user_buf, count, &val);
+ if (ret)
+ return ret;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ margining->optional_voltage_offset_range = val;
+ }
+
+ return count;
+}
+
+static int margining_optional_voltage_offset_show(struct seq_file *s,
+ void *not_used)
+{
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ seq_printf(s, "%u\n", margining->optional_voltage_offset_range);
+ }
+
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_optional_voltage_offset);
+
static ssize_t margining_mode_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -739,6 +985,51 @@ static int margining_mode_show(struct seq_file *s, void *not_used)
}
DEBUGFS_ATTR_RW(margining_mode);
+static int margining_run_sw(struct tb_margining *margining,
+ struct usb4_port_margining_params *params)
+{
+ u32 nsamples = margining->dwell_time / DWELL_SAMPLE_INTERVAL;
+ int ret, i;
+
+ ret = usb4_port_sw_margin(margining->port, margining->target, margining->index,
+ params, margining->results);
+ if (ret)
+ goto out_stop;
+
+ for (i = 0; i <= nsamples; i++) {
+ u32 errors = 0;
+
+ ret = usb4_port_sw_margin_errors(margining->port, margining->target,
+ margining->index, &margining->results[1]);
+ if (ret)
+ break;
+
+ if (margining->lanes == USB4_MARGIN_SW_LANE_0)
+ errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK,
+ margining->results[1]);
+ else if (margining->lanes == USB4_MARGIN_SW_LANE_1)
+ errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK,
+ margining->results[1]);
+ else if (margining->lanes == USB4_MARGIN_SW_ALL_LANES)
+ errors = margining->results[1];
+
+ /* Any errors stop the test */
+ if (errors)
+ break;
+
+ fsleep(DWELL_SAMPLE_INTERVAL * USEC_PER_MSEC);
+ }
+
+out_stop:
+ /*
+ * Stop the counters but don't clear them to allow the
+ * different error counter configurations.
+ */
+ margining_modify_error_counter(margining, margining->lanes,
+ USB4_MARGIN_SW_ERROR_COUNTER_STOP);
+ return ret;
+}
+
static int margining_run_write(void *data, u64 val)
{
struct tb_margining *margining = data;
@@ -779,36 +1070,43 @@ static int margining_run_write(void *data, u64 val)
clx = ret;
}
+ /* Clear the results */
+ memset(margining->results, 0, sizeof(margining->results));
+
if (margining->software) {
+ struct usb4_port_margining_params params = {
+ .error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR,
+ .lanes = margining->lanes,
+ .time = margining->time,
+ .voltage_time_offset = margining->voltage_time_offset,
+ .right_high = margining->right_high,
+ .optional_voltage_offset_range = margining->optional_voltage_offset_range,
+ };
+
tb_port_dbg(port,
"running software %s lane margining for %s lanes %u\n",
margining->time ? "time" : "voltage", dev_name(dev),
margining->lanes);
- ret = usb4_port_sw_margin(port, margining->target, margining->index,
- margining->lanes, margining->time,
- margining->right_high,
- USB4_MARGIN_SW_COUNTER_CLEAR);
- if (ret)
- goto out_clx;
- ret = usb4_port_sw_margin_errors(port, margining->target,
- margining->index,
- &margining->results[0]);
+ ret = margining_run_sw(margining, &params);
} else {
+ struct usb4_port_margining_params params = {
+ .ber_level = margining->ber_level,
+ .lanes = margining->lanes,
+ .time = margining->time,
+ .right_high = margining->right_high,
+ .optional_voltage_offset_range = margining->optional_voltage_offset_range,
+ };
+
tb_port_dbg(port,
"running hardware %s lane margining for %s lanes %u\n",
margining->time ? "time" : "voltage", dev_name(dev),
margining->lanes);
- /* Clear the results */
- margining->results[0] = 0;
- margining->results[1] = 0;
- ret = usb4_port_hw_margin(port, margining->target, margining->index,
- margining->lanes, margining->ber_level,
- margining->time, margining->right_high,
+
+ ret = usb4_port_hw_margin(port, margining->target, margining->index, &params,
margining->results);
}
-out_clx:
if (down_sw)
tb_switch_clx_enable(down_sw, clx);
out_unlock:
@@ -837,6 +1135,13 @@ static ssize_t margining_results_write(struct file *file,
margining->results[0] = 0;
margining->results[1] = 0;
+ if (margining->software) {
+ /* Clear the error counters */
+ margining_modify_error_counter(margining,
+ USB4_MARGIN_SW_ALL_LANES,
+ USB4_MARGIN_SW_ERROR_COUNTER_CLEAR);
+ }
+
mutex_unlock(&tb->lock);
return count;
}
@@ -852,6 +1157,8 @@ static void voltage_margin_show(struct seq_file *s,
if (val & USB4_MARGIN_HW_RES_1_EXCEEDS)
seq_puts(s, " exceeds maximum");
seq_puts(s, "\n");
+ if (margining->optional_voltage_offset_range)
+ seq_puts(s, " optional voltage offset range enabled\n");
}
static void time_margin_show(struct seq_file *s,
@@ -924,6 +1231,24 @@ static int margining_results_show(struct seq_file *s, void *not_used)
voltage_margin_show(s, margining, val);
}
}
+ } else {
+ u32 lane_errors, result;
+
+ seq_printf(s, "0x%08x\n", margining->results[1]);
+ result = FIELD_GET(USB4_MARGIN_SW_LANES_MASK, margining->results[0]);
+
+ if (result == USB4_MARGIN_SW_LANE_0 ||
+ result == USB4_MARGIN_SW_ALL_LANES) {
+ lane_errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK,
+ margining->results[1]);
+ seq_printf(s, "# lane 0 errors: %u\n", lane_errors);
+ }
+ if (result == USB4_MARGIN_SW_LANE_1 ||
+ result == USB4_MARGIN_SW_ALL_LANES) {
+ lane_errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK,
+ margining->results[1]);
+ seq_printf(s, "# lane 1 errors: %u\n", lane_errors);
+ }
}
mutex_unlock(&tb->lock);
@@ -1091,6 +1416,15 @@ static struct tb_margining *margining_alloc(struct tb_port *port,
val = FIELD_GET(USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK, margining->caps[0]);
margining->max_voltage_offset = 74 + val * 2;
+ if (supports_optional_voltage_offset_range(margining)) {
+ val = FIELD_GET(USB4_MARGIN_CAP_0_VOLT_STEPS_OPT_MASK,
+ margining->caps[0]);
+ margining->voltage_steps_optional_range = val;
+ val = FIELD_GET(USB4_MARGIN_CAP_1_MAX_VOLT_OFS_OPT_MASK,
+ margining->caps[1]);
+ margining->max_voltage_offset_optional_range = 74 + val * 2;
+ }
+
if (supports_time(margining)) {
val = FIELD_GET(USB4_MARGIN_CAP_1_TIME_STEPS_MASK, margining->caps[1]);
margining->time_steps = val;
@@ -1127,6 +1461,22 @@ static struct tb_margining *margining_alloc(struct tb_port *port,
independent_time_margins(margining) == USB4_MARGIN_CAP_1_TIME_LR))
debugfs_create_file("margin", 0600, dir, margining,
&margining_margin_fops);
+
+ margining->error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR;
+ margining->dwell_time = MIN_DWELL_TIME;
+
+ if (supports_optional_voltage_offset_range(margining))
+ debugfs_create_file("optional_voltage_offset", DEBUGFS_MODE, dir, margining,
+ &margining_optional_voltage_offset_fops);
+
+ if (supports_software(margining)) {
+ debugfs_create_file("voltage_time_offset", DEBUGFS_MODE, dir, margining,
+ &margining_voltage_time_offset_fops);
+ debugfs_create_file("error_counter", DEBUGFS_MODE, dir, margining,
+ &margining_error_counter_fops);
+ debugfs_create_file("dwell_time", DEBUGFS_MODE, dir, margining,
+ &margining_dwell_time_fops);
+ }
return margining;
}
diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h
index 2a88edfc97b2..dbcad25ead50 100644
--- a/drivers/thunderbolt/sb_regs.h
+++ b/drivers/thunderbolt/sb_regs.h
@@ -57,6 +57,9 @@ enum usb4_sb_opcode {
#define USB4_MARGIN_CAP_0_TIME BIT(5)
#define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK GENMASK(12, 6)
#define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK GENMASK(18, 13)
+#define USB4_MARGIN_CAP_0_OPT_VOLTAGE_SUPPORT BIT(19)
+#define USB4_MARGIN_CAP_0_VOLT_STEPS_OPT_MASK GENMASK(26, 20)
+#define USB4_MARGIN_CAP_1_MAX_VOLT_OFS_OPT_MASK GENMASK(7, 0)
#define USB4_MARGIN_CAP_1_TIME_DESTR BIT(8)
#define USB4_MARGIN_CAP_1_TIME_INDP_MASK GENMASK(10, 9)
#define USB4_MARGIN_CAP_1_TIME_MIN 0x0
@@ -72,6 +75,7 @@ enum usb4_sb_opcode {
#define USB4_MARGIN_HW_RH BIT(4)
#define USB4_MARGIN_HW_BER_MASK GENMASK(9, 5)
#define USB4_MARGIN_HW_BER_SHIFT 5
+#define USB4_MARGIN_HW_OPT_VOLTAGE BIT(10)
/* Applicable to all margin values */
#define USB4_MARGIN_HW_RES_1_MARGIN_MASK GENMASK(6, 0)
@@ -82,13 +86,17 @@ enum usb4_sb_opcode {
#define USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT 24
/* USB4_SB_OPCODE_RUN_SW_LANE_MARGINING */
+#define USB4_MARGIN_SW_LANES_MASK GENMASK(2, 0)
+#define USB4_MARGIN_SW_LANE_0 0x0
+#define USB4_MARGIN_SW_LANE_1 0x1
+#define USB4_MARGIN_SW_ALL_LANES 0x7
#define USB4_MARGIN_SW_TIME BIT(3)
#define USB4_MARGIN_SW_RH BIT(4)
+#define USB4_MARGIN_SW_OPT_VOLTAGE BIT(5)
+#define USB4_MARGIN_SW_VT_MASK GENMASK(12, 6)
#define USB4_MARGIN_SW_COUNTER_MASK GENMASK(14, 13)
-#define USB4_MARGIN_SW_COUNTER_SHIFT 13
-#define USB4_MARGIN_SW_COUNTER_NOP 0x0
-#define USB4_MARGIN_SW_COUNTER_CLEAR 0x1
-#define USB4_MARGIN_SW_COUNTER_START 0x2
-#define USB4_MARGIN_SW_COUNTER_STOP 0x3
+
+#define USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK GENMASK(3, 0)
+#define USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK GENMASK(7, 4)
#endif
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index b47f7873c847..6737188f2581 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -1353,14 +1353,48 @@ int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index
int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
u8 index, u8 reg, const void *buf, u8 size);
+/**
+ * enum usb4_margin_sw_error_counter - Software margining error counter operation
+ * @USB4_MARGIN_SW_ERROR_COUNTER_NOP: No change in counter setup
+ * @USB4_MARGIN_SW_ERROR_COUNTER_CLEAR: Set the error counter to 0, enable counter
+ * @USB4_MARGIN_SW_ERROR_COUNTER_START: Start counter, count from last value
+ * @USB4_MARGIN_SW_ERROR_COUNTER_STOP: Stop counter, do not clear value
+ */
+enum usb4_margin_sw_error_counter {
+ USB4_MARGIN_SW_ERROR_COUNTER_NOP,
+ USB4_MARGIN_SW_ERROR_COUNTER_CLEAR,
+ USB4_MARGIN_SW_ERROR_COUNTER_START,
+ USB4_MARGIN_SW_ERROR_COUNTER_STOP,
+};
+
+/**
+ * struct usb4_port_margining_params - USB4 margining parameters
+ * @error_counter: Error counter operation for software margining
+ * @ber_level: Current BER level contour value
+ * @lanes: %0, %1 or %7 (all)
+ * @voltage_time_offset: Offset for voltage / time for software margining
+ * @optional_voltage_offset_range: Enable optional extended voltage range
+ * @right_high: %false if left/low margin test is performed, %true if right/high
+ * @time: %true if time margining is used instead of voltage
+ */
+struct usb4_port_margining_params {
+ enum usb4_margin_sw_error_counter error_counter;
+ u32 ber_level;
+ u32 lanes;
+ u32 voltage_time_offset;
+ bool optional_voltage_offset_range;
+ bool right_high;
+ bool time;
+};
+
int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target,
u8 index, u32 *caps);
int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target,
- u8 index, unsigned int lanes, unsigned int ber_level,
- bool timing, bool right_high, u32 *results);
+ u8 index, const struct usb4_port_margining_params *params,
+ u32 *results);
int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target,
- u8 index, unsigned int lanes, bool timing,
- bool right_high, u32 counter);
+ u8 index, const struct usb4_port_margining_params *params,
+ u32 *results);
int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target,
u8 index, u32 *errors);
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 4d83b65afb5b..0a9b4aeb3fa1 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -1653,31 +1653,31 @@ int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target,
* @port: USB4 port
* @target: Sideband target
* @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
- * @lanes: Which lanes to run (must match the port capabilities). Can be
- * %0, %1 or %7.
- * @ber_level: BER level contour value
- * @timing: Perform timing margining instead of voltage
- * @right_high: Use Right/high margin instead of left/low
+ * @params: Parameters for USB4 hardware margining
* @results: Array with at least two elements to hold the results
*
* Runs hardware lane margining on USB4 port and returns the result in
* @results.
*/
int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target,
- u8 index, unsigned int lanes, unsigned int ber_level,
- bool timing, bool right_high, u32 *results)
+ u8 index, const struct usb4_port_margining_params *params,
+ u32 *results)
{
u32 val;
int ret;
- val = lanes;
- if (timing)
+ if (WARN_ON_ONCE(!params))
+ return -EINVAL;
+
+ val = params->lanes;
+ if (params->time)
val |= USB4_MARGIN_HW_TIME;
- if (right_high)
+ if (params->right_high)
val |= USB4_MARGIN_HW_RH;
- if (ber_level)
- val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) &
- USB4_MARGIN_HW_BER_MASK;
+ if (params->ber_level)
+ val |= FIELD_PREP(USB4_MARGIN_HW_BER_MASK, params->ber_level);
+ if (params->optional_voltage_offset_range)
+ val |= USB4_MARGIN_HW_OPT_VOLTAGE;
ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val,
sizeof(val));
@@ -1698,38 +1698,46 @@ int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target,
* @port: USB4 port
* @target: Sideband target
* @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
- * @lanes: Which lanes to run (must match the port capabilities). Can be
- * %0, %1 or %7.
- * @timing: Perform timing margining instead of voltage
- * @right_high: Use Right/high margin instead of left/low
- * @counter: What to do with the error counter
+ * @params: Parameters for USB4 software margining
+ * @results: Data word for the operation completion data
*
* Runs software lane margining on USB4 port. Read back the error
* counters by calling usb4_port_sw_margin_errors(). Returns %0 in
* success and negative errno otherwise.
*/
int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target,
- u8 index, unsigned int lanes, bool timing,
- bool right_high, u32 counter)
+ u8 index, const struct usb4_port_margining_params *params,
+ u32 *results)
{
u32 val;
int ret;
- val = lanes;
- if (timing)
+ if (WARN_ON_ONCE(!params))
+ return -EINVAL;
+
+ val = params->lanes;
+ if (params->time)
val |= USB4_MARGIN_SW_TIME;
- if (right_high)
+ if (params->optional_voltage_offset_range)
+ val |= USB4_MARGIN_SW_OPT_VOLTAGE;
+ if (params->right_high)
val |= USB4_MARGIN_SW_RH;
- val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) &
- USB4_MARGIN_SW_COUNTER_MASK;
+ val |= FIELD_PREP(USB4_MARGIN_SW_COUNTER_MASK, params->error_counter);
+ val |= FIELD_PREP(USB4_MARGIN_SW_VT_MASK, params->voltage_time_offset);
ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val,
sizeof(val));
if (ret)
return ret;
- return usb4_port_sb_op(port, target, index,
- USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
+ ret = usb4_port_sb_op(port, target, index,
+ USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
+ if (ret)
+ return ret;
+
+ return usb4_port_sb_read(port, target, index, USB4_SB_DATA, results,
+ sizeof(*results));
+
}
/**
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
index 22e1bc4d8a66..b35c44caf3d7 100644
--- a/drivers/tty/hvc/hvsi_lib.c
+++ b/drivers/tty/hvc/hvsi_lib.c
@@ -303,7 +303,7 @@ int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr)
pr_devel("HVSI@%x: %s DTR...\n", pv->termno,
dtr ? "Setting" : "Clearing");
- ctrl.hdr.type = VS_CONTROL_PACKET_HEADER,
+ ctrl.hdr.type = VS_CONTROL_PACKET_HEADER;
ctrl.hdr.len = sizeof(struct hvsi_control);
ctrl.verb = cpu_to_be16(VSV_SET_MODEM_CTL);
ctrl.mask = cpu_to_be32(HVSI_TSDTR);
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 5b97e420a95f..4d45eca4929a 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -208,9 +208,6 @@ static const struct {
};
#define UART_INFO_NUM ARRAY_SIZE(Gpci_uart_info)
-
-/* driver_data correspond to the lines in the structure above
- see also ISA probe function before you change something */
static const struct pci_device_id mxser_pcibrds[] = {
{ PCI_DEVICE_DATA(MOXA, C168, 8) },
{ PCI_DEVICE_DATA(MOXA, C104, 4) },
@@ -986,7 +983,7 @@ static int mxser_get_serial_info(struct tty_struct *tty,
ss->baud_base = MXSER_BAUD_BASE;
ss->close_delay = close_delay;
ss->closing_wait = closing_wait;
- ss->custom_divisor = MXSER_CUSTOM_DIVISOR,
+ ss->custom_divisor = MXSER_CUSTOM_DIVISOR;
mutex_unlock(&port->mutex);
return 0;
}
@@ -1773,8 +1770,6 @@ static void mxser_initbrd(struct mxser_board *brd, bool high_baud)
mxser_process_txrx_fifo(info);
- info->port.close_delay = 5 * HZ / 10;
- info->port.closing_wait = 30 * HZ;
spin_lock_init(&info->slock);
/* before set INT ISR, disable all int */
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 8913cdd675f6..ebf0bbc2cff2 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -529,7 +529,7 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl)
bool found = false;
for_each_available_child_of_node(ctrl->dev.of_node, node) {
- if (!of_get_property(node, "compatible", NULL))
+ if (!of_property_present(node, "compatible"))
continue;
dev_dbg(&ctrl->dev, "adding child %pOF\n", node);
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 53d8eee9b1c8..25c201cfb91e 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -561,6 +561,7 @@ static const struct of_device_id aspeed_vuart_table[] = {
{ .compatible = "aspeed,ast2500-vuart" },
{ },
};
+MODULE_DEVICE_TABLE(of, aspeed_vuart_table);
static struct platform_driver aspeed_vuart_driver = {
.driver = {
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index 121a5ce86050..d7a0f271263a 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -13,6 +13,7 @@
*/
#include <linux/clk.h>
+#include <linux/console.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -213,11 +214,57 @@ static const struct acpi_device_id bcm2835aux_serial_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, bcm2835aux_serial_acpi_match);
+static bool bcm2835aux_can_disable_clock(struct device *dev)
+{
+ struct bcm2835aux_data *data = dev_get_drvdata(dev);
+ struct uart_8250_port *up = serial8250_get_port(data->line);
+
+ if (device_may_wakeup(dev))
+ return false;
+
+ if (uart_console(&up->port) && !console_suspend_enabled)
+ return false;
+
+ return true;
+}
+
+static int bcm2835aux_suspend(struct device *dev)
+{
+ struct bcm2835aux_data *data = dev_get_drvdata(dev);
+
+ serial8250_suspend_port(data->line);
+
+ if (!bcm2835aux_can_disable_clock(dev))
+ return 0;
+
+ clk_disable_unprepare(data->clk);
+ return 0;
+}
+
+static int bcm2835aux_resume(struct device *dev)
+{
+ struct bcm2835aux_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ if (bcm2835aux_can_disable_clock(dev)) {
+ ret = clk_prepare_enable(data->clk);
+ if (ret)
+ return ret;
+ }
+
+ serial8250_resume_port(data->line);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(bcm2835aux_dev_pm_ops, bcm2835aux_suspend, bcm2835aux_resume);
+
static struct platform_driver bcm2835aux_serial_driver = {
.driver = {
.name = "bcm2835-aux-uart",
.of_match_table = bcm2835aux_serial_match,
.acpi_match_table = bcm2835aux_serial_acpi_match,
+ .pm = pm_ptr(&bcm2835aux_dev_pm_ops),
},
.probe = bcm2835aux_serial_probe,
.remove_new = bcm2835aux_serial_remove,
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 8a353e3cc3dd..d215c494ee24 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -89,7 +89,9 @@ int serial8250_tx_dma(struct uart_8250_port *p)
struct tty_port *tport = &p->port.state->port;
struct dma_async_tx_descriptor *desc;
struct uart_port *up = &p->port;
- struct scatterlist sg;
+ struct scatterlist *sg;
+ struct scatterlist sgl[2];
+ int i;
int ret;
if (dma->tx_running) {
@@ -110,18 +112,17 @@ int serial8250_tx_dma(struct uart_8250_port *p)
serial8250_do_prepare_tx_dma(p);
- sg_init_table(&sg, 1);
- /* kfifo can do more than one sg, we don't (quite yet) */
- ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, &sg, 1,
+ sg_init_table(sgl, ARRAY_SIZE(sgl));
+
+ ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, sgl, ARRAY_SIZE(sgl),
UART_XMIT_SIZE, dma->tx_addr);
- /* we already checked empty fifo above, so there should be something */
- if (WARN_ON_ONCE(ret != 1))
- return 0;
+ dma->tx_size = 0;
- dma->tx_size = sg_dma_len(&sg);
+ for_each_sg(sgl, sg, ret, i)
+ dma->tx_size += sg_dma_len(sg);
- desc = dmaengine_prep_slave_sg(dma->txchan, &sg, 1,
+ desc = dmaengine_prep_slave_sg(dma->txchan, sgl, ret,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
index 5a2520943dfd..b055d89cfb39 100644
--- a/drivers/tty/serial/8250/8250_dwlib.c
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -89,7 +89,7 @@ static void dw8250_set_divisor(struct uart_port *p, unsigned int baud,
unsigned int quot, unsigned int quot_frac)
{
dw8250_writel_ext(p, DW_UART_DLF, quot_frac);
- serial8250_do_set_divisor(p, baud, quot, quot_frac);
+ serial8250_do_set_divisor(p, baud, quot);
}
void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios,
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index e3f482fd3de4..6176083d0341 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -171,6 +171,17 @@ OF_EARLYCON_DECLARE(ns16550a, "ns16550a", early_serial8250_setup);
OF_EARLYCON_DECLARE(uart, "nvidia,tegra20-uart", early_serial8250_setup);
OF_EARLYCON_DECLARE(uart, "snps,dw-apb-uart", early_serial8250_setup);
+static int __init early_serial8250_rs2_setup(struct earlycon_device *device,
+ const char *options)
+{
+ device->port.regshift = 2;
+
+ return early_serial8250_setup(device, options);
+}
+OF_EARLYCON_DECLARE(uart, "intel,xscale-uart", early_serial8250_rs2_setup);
+OF_EARLYCON_DECLARE(uart, "mrvl,mmp-uart", early_serial8250_rs2_setup);
+OF_EARLYCON_DECLARE(uart, "mrvl,pxa-uart", early_serial8250_rs2_setup);
+
#ifdef CONFIG_SERIAL_8250_OMAP
static int __init early_omap8250_setup(struct earlycon_device *device,
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 616128254bbd..b7a75db15249 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -500,7 +500,7 @@ static unsigned int xr17v35x_get_divisor(struct uart_port *p, unsigned int baud,
static void xr17v35x_set_divisor(struct uart_port *p, unsigned int baud,
unsigned int quot, unsigned int quot_frac)
{
- serial8250_do_set_divisor(p, baud, quot, quot_frac);
+ serial8250_do_set_divisor(p, baud, quot);
/* Preserve bits not related to baudrate; DLD[7:4]. */
quot_frac |= serial_port_in(p, 0x2) & 0xf0;
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index afef1dd4ddf4..88b58f44e4e9 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -137,7 +137,6 @@ struct omap8250_priv {
atomic_t active;
bool is_suspending;
int wakeirq;
- int wakeups_enabled;
u32 latency;
u32 calc_latency;
struct pm_qos_request pm_qos_request;
@@ -1523,7 +1522,10 @@ static int omap8250_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- device_init_wakeup(&pdev->dev, true);
+ device_set_wakeup_capable(&pdev->dev, true);
+ if (of_property_read_bool(np, "wakeup-source"))
+ device_set_wakeup_enable(&pdev->dev, true);
+
pm_runtime_enable(&pdev->dev);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -1581,7 +1583,7 @@ static int omap8250_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, up.port.irq, omap8250_irq, 0,
dev_name(&pdev->dev), priv);
if (ret < 0)
- return ret;
+ goto err;
priv->wakeirq = irq_of_parse_and_map(np, 1);
@@ -1622,7 +1624,7 @@ static void omap8250_remove(struct platform_device *pdev)
flush_work(&priv->qos_work);
pm_runtime_disable(&pdev->dev);
cpu_latency_qos_remove_request(&priv->pm_qos_request);
- device_init_wakeup(&pdev->dev, false);
+ device_set_wakeup_capable(&pdev->dev, false);
}
static int omap8250_prepare(struct device *dev)
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index e1d7aa2fa347..6709b6a5f301 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1277,7 +1277,7 @@ static void pci_oxsemi_tornado_set_divisor(struct uart_port *port,
serial_icr_write(up, UART_TCR, tcr);
serial_icr_write(up, UART_CPR, cpr);
serial_icr_write(up, UART_CKS, cpr2);
- serial8250_do_set_divisor(port, baud, quot, 0);
+ serial8250_do_set_divisor(port, baud, quot);
}
/*
diff --git a/drivers/tty/serial/8250/8250_platform.c b/drivers/tty/serial/8250/8250_platform.c
index d5c8d851348d..be7ff07cbdd0 100644
--- a/drivers/tty/serial/8250/8250_platform.c
+++ b/drivers/tty/serial/8250/8250_platform.c
@@ -2,11 +2,15 @@
/*
* Universal/legacy platform driver for 8250/16550-type serial ports
*
- * Supports: ISA-compatible 8250/16550 ports
+ * Supports:
+ * ISA-compatible 8250/16550 ports
+ * ACPI 8250/16550 ports
* PNP 8250/16550 ports
* "serial8250" platform devices
*/
+#include <linux/acpi.h>
#include <linux/array_size.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/once.h>
@@ -22,9 +26,9 @@
/*
* Configuration:
- * share_irqs Whether we pass IRQF_SHARED to request_irq().
+ * share_irqs: Whether we pass IRQF_SHARED to request_irq().
* This option is unsafe when used on edge-triggered interrupts.
- * skip_txen_test Force skip of txen test at init time.
+ * skip_txen_test: Force skip of txen test at init time.
*/
unsigned int share_irqs = SERIAL8250_SHARE_IRQS;
unsigned int skip_txen_test;
@@ -61,9 +65,9 @@ static void __init __serial8250_isa_init_ports(void)
nr_uarts = UART_NR;
/*
- * Set up initial isa ports based on nr_uart module param, or else
+ * Set up initial ISA ports based on nr_uart module param, or else
* default to CONFIG_SERIAL_8250_RUNTIME_UARTS. Note that we do not
- * need to increase nr_uarts when setting up the initial isa ports.
+ * need to increase nr_uarts when setting up the initial ISA ports.
*/
for (i = 0; i < nr_uarts; i++)
serial8250_setup_port(i);
@@ -101,13 +105,63 @@ void __init serial8250_isa_init_ports(void)
}
/*
- * Register a set of serial devices attached to a platform device. The
- * list is terminated with a zero flags entry, which means we expect
- * all entries to have at least UPF_BOOT_AUTOCONF set.
+ * Generic 16550A platform devices
*/
-static int serial8250_probe(struct platform_device *dev)
+static int serial8250_probe_acpi(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uart_8250_port uart = { };
+ struct resource *regs;
+ unsigned char iotype;
+ int ret, line;
+
+ regs = platform_get_mem_or_io(pdev, 0);
+ if (!regs)
+ return dev_err_probe(dev, -EINVAL, "no registers defined\n");
+
+ switch (resource_type(regs)) {
+ case IORESOURCE_IO:
+ uart.port.iobase = regs->start;
+ iotype = UPIO_PORT;
+ break;
+ case IORESOURCE_MEM:
+ uart.port.mapbase = regs->start;
+ uart.port.mapsize = resource_size(regs);
+ uart.port.flags = UPF_IOREMAP;
+ iotype = UPIO_MEM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* default clock frequency */
+ uart.port.uartclk = 1843200;
+ uart.port.type = PORT_16550A;
+ uart.port.dev = &pdev->dev;
+ uart.port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
+
+ ret = uart_read_and_validate_port_properties(&uart.port);
+ /* no interrupt -> fall back to polling */
+ if (ret == -ENXIO)
+ ret = 0;
+ if (ret)
+ return ret;
+
+ /*
+ * The previous call may not set iotype correctly when reg-io-width
+ * property is absent and it doesn't support IO port resource.
+ */
+ uart.port.iotype = iotype;
+
+ line = serial8250_register_8250_port(&uart);
+ if (line < 0)
+ return line;
+
+ return 0;
+}
+
+static int serial8250_probe_platform(struct platform_device *dev, struct plat_serial8250_port *p)
{
- struct plat_serial8250_port *p = dev_get_platdata(&dev->dev);
struct uart_8250_port uart;
int ret, i, irqflag = 0;
@@ -156,6 +210,31 @@ static int serial8250_probe(struct platform_device *dev)
}
/*
+ * Register a set of serial devices attached to a platform device.
+ * The list is terminated with a zero flags entry, which means we expect
+ * all entries to have at least UPF_BOOT_AUTOCONF set.
+ */
+static int serial8250_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct plat_serial8250_port *p;
+
+ p = dev_get_platdata(dev);
+ if (p)
+ return serial8250_probe_platform(pdev, p);
+
+ /*
+ * Probe platform UART devices defined using standard hardware
+ * discovery mechanism like ACPI or DT. Support only ACPI based
+ * serial device for now.
+ */
+ if (has_acpi_companion(dev))
+ return serial8250_probe_acpi(pdev);
+
+ return 0;
+}
+
+/*
* Remove serial ports registered against a platform device.
*/
static void serial8250_remove(struct platform_device *dev)
@@ -198,6 +277,12 @@ static int serial8250_resume(struct platform_device *dev)
return 0;
}
+static const struct acpi_device_id acpi_platform_serial_table[] = {
+ { "RSCV0003" }, /* RISC-V Generic 16550A UART */
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, acpi_platform_serial_table);
+
static struct platform_driver serial8250_isa_driver = {
.probe = serial8250_probe,
.remove_new = serial8250_remove,
@@ -205,12 +290,13 @@ static struct platform_driver serial8250_isa_driver = {
.resume = serial8250_resume,
.driver = {
.name = "serial8250",
+ .acpi_match_table = acpi_platform_serial_table,
},
};
/*
* This "device" covers _all_ ISA 8250-compatible serial devices listed
- * in the table in include/asm/serial.h
+ * in the table in include/asm/serial.h.
*/
struct platform_device *serial8250_isa_devs;
@@ -239,8 +325,7 @@ static int __init serial8250_init(void)
if (ret)
goto unreg_uart_drv;
- serial8250_isa_devs = platform_device_alloc("serial8250",
- PLAT8250_DEV_LEGACY);
+ serial8250_isa_devs = platform_device_alloc("serial8250", PLAT8250_DEV_LEGACY);
if (!serial8250_isa_devs) {
ret = -ENOMEM;
goto unreg_pnp;
@@ -279,7 +364,7 @@ static void __exit serial8250_exit(void)
/*
* This tells serial8250_unregister_port() not to re-register
* the ports (thereby making serial8250_isa_driver permanently
- * in use.)
+ * in use).
*/
serial8250_isa_devs = NULL;
@@ -312,12 +397,13 @@ MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR);
#ifdef CONFIG_SERIAL_8250_DEPRECATED_OPTIONS
#ifndef MODULE
-/* This module was renamed to 8250_core in 3.7. Keep the old "8250" name
- * working as well for the module options so we don't break people. We
+/*
+ * This module was renamed to 8250_core in 3.7. Keep the old "8250" name
+ * working as well for the module options so we don't break people. We
* need to keep the names identical and the convenient macros will happily
* refuse to let us do that by failing the build with redefinition errors
- * of global variables. So we stick them inside a dummy function to avoid
- * those conflicts. The options still get parsed, and the redefined
+ * of global variables. So we stick them inside a dummy function to avoid
+ * those conflicts. The options still get parsed, and the redefined
* MODULE_PARAM_PREFIX lets us keep the "8250." syntax alive.
*
* This is hacky. I'm sorry.
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 2786918aea98..3509af7dc52b 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2609,7 +2609,7 @@ static unsigned char serial8250_compute_lcr(struct uart_8250_port *up,
}
void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
- unsigned int quot, unsigned int quot_frac)
+ unsigned int quot)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -2641,7 +2641,7 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
if (port->set_divisor)
port->set_divisor(port, baud, quot, quot_frac);
else
- serial8250_do_set_divisor(port, baud, quot, quot_frac);
+ serial8250_do_set_divisor(port, baud, quot);
}
static unsigned int serial8250_get_baud_rate(struct uart_port *port,
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index 1ac86b565374..96dd6126296c 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -165,22 +165,6 @@ static struct platform_driver serial_pxa_driver = {
module_platform_driver(serial_pxa_driver);
-#ifdef CONFIG_SERIAL_8250_CONSOLE
-static int __init early_serial_pxa_setup(struct earlycon_device *device,
- const char *options)
-{
- struct uart_port *port = &device->port;
-
- if (!(device->port.membase || device->port.iobase))
- return -ENODEV;
-
- port->regshift = 2;
- return early_serial8250_setup(device, NULL);
-}
-OF_EARLYCON_DECLARE(early_pxa, "mrvl,pxa-uart", early_serial_pxa_setup);
-OF_EARLYCON_DECLARE(mmp, "mrvl,mmp-uart", early_serial_pxa_setup);
-#endif
-
MODULE_AUTHOR("Sergei Ianovich");
MODULE_DESCRIPTION("driver for PXA on-board UARTS");
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 69a632fefc41..6f0db310cf69 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -124,13 +124,14 @@ struct qcom_geni_serial_port {
dma_addr_t tx_dma_addr;
dma_addr_t rx_dma_addr;
bool setup;
- unsigned int baud;
+ unsigned long poll_timeout_us;
unsigned long clk_rate;
void *rx_buf;
u32 loopback;
bool brk;
unsigned int tx_remaining;
+ unsigned int tx_queued;
int wakeup_irq;
bool rx_tx_swap;
bool cts_rts_swap;
@@ -144,6 +145,9 @@ static const struct uart_ops qcom_geni_uart_pops;
static struct uart_driver qcom_geni_console_driver;
static struct uart_driver qcom_geni_uart_driver;
+static void __qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport);
+static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport);
+
static inline struct qcom_geni_serial_port *to_dev_port(struct uart_port *uport)
{
return container_of(uport, struct qcom_geni_serial_port, uport);
@@ -265,27 +269,18 @@ static bool qcom_geni_serial_secondary_active(struct uart_port *uport)
return readl(uport->membase + SE_GENI_STATUS) & S_GENI_CMD_ACTIVE;
}
-static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
- int offset, int field, bool set)
+static bool qcom_geni_serial_poll_bitfield(struct uart_port *uport,
+ unsigned int offset, u32 field, u32 val)
{
u32 reg;
struct qcom_geni_serial_port *port;
- unsigned int baud;
- unsigned int fifo_bits;
unsigned long timeout_us = 20000;
struct qcom_geni_private_data *private_data = uport->private_data;
if (private_data->drv) {
port = to_dev_port(uport);
- baud = port->baud;
- if (!baud)
- baud = 115200;
- fifo_bits = port->tx_fifo_depth * port->tx_fifo_width;
- /*
- * Total polling iterations based on FIFO worth of bytes to be
- * sent at current baud. Add a little fluff to the wait.
- */
- timeout_us = ((fifo_bits * USEC_PER_SEC) / baud) + 500;
+ if (port->poll_timeout_us)
+ timeout_us = port->poll_timeout_us;
}
/*
@@ -295,7 +290,7 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
timeout_us = DIV_ROUND_UP(timeout_us, 10) * 10;
while (timeout_us) {
reg = readl(uport->membase + offset);
- if ((bool)(reg & field) == set)
+ if ((reg & field) == val)
return true;
udelay(10);
timeout_us -= 10;
@@ -303,6 +298,12 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
return false;
}
+static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
+ unsigned int offset, u32 field, bool set)
+{
+ return qcom_geni_serial_poll_bitfield(uport, offset, field, set ? field : 0);
+}
+
static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size)
{
u32 m_cmd;
@@ -315,18 +316,16 @@ static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size)
static void qcom_geni_serial_poll_tx_done(struct uart_port *uport)
{
int done;
- u32 irq_clear = M_CMD_DONE_EN;
done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_DONE_EN, true);
if (!done) {
writel(M_GENI_CMD_ABORT, uport->membase +
SE_GENI_M_CMD_CTRL_REG);
- irq_clear |= M_CMD_ABORT_EN;
qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_ABORT_EN, true);
+ writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
- writel(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
static void qcom_geni_serial_abort_rx(struct uart_port *uport)
@@ -386,17 +385,27 @@ static int qcom_geni_serial_get_char(struct uart_port *uport)
static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
unsigned char c)
{
- writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ if (qcom_geni_serial_main_active(uport)) {
+ qcom_geni_serial_poll_tx_done(uport);
+ __qcom_geni_serial_cancel_tx_cmd(uport);
+ }
+
+ writel(M_CMD_DONE_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
qcom_geni_serial_setup_tx(uport, 1);
- WARN_ON(!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_TX_FIFO_WATERMARK_EN, true));
writel(c, uport->membase + SE_GENI_TX_FIFOn);
- writel(M_TX_FIFO_WATERMARK_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
qcom_geni_serial_poll_tx_done(uport);
}
#endif
#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
+static void qcom_geni_serial_drain_fifo(struct uart_port *uport)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
+
+ qcom_geni_serial_poll_bitfield(uport, SE_GENI_M_GP_LENGTH, GP_LENGTH,
+ port->tx_queued);
+}
+
static void qcom_geni_serial_wr_char(struct uart_port *uport, unsigned char ch)
{
struct qcom_geni_private_data *private_data = uport->private_data;
@@ -431,6 +440,7 @@ __qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
}
writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ writel(M_CMD_DONE_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
qcom_geni_serial_setup_tx(uport, bytes_to_send);
for (i = 0; i < count; ) {
size_t chars_to_write = 0;
@@ -469,10 +479,9 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
{
struct uart_port *uport;
struct qcom_geni_serial_port *port;
+ u32 m_irq_en, s_irq_en;
bool locked = true;
unsigned long flags;
- u32 geni_status;
- u32 irq_en;
WARN_ON(co->index < 0 || co->index >= GENI_UART_CONS_PORTS);
@@ -486,40 +495,28 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
else
uart_port_lock_irqsave(uport, &flags);
- geni_status = readl(uport->membase + SE_GENI_STATUS);
-
- if (!locked) {
- /*
- * We can only get here if an oops is in progress then we were
- * unable to get the lock. This means we can't safely access
- * our state variables like tx_remaining. About the best we
- * can do is wait for the FIFO to be empty before we start our
- * transfer, so we'll do that.
- */
- qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_TX_FIFO_NOT_EMPTY_EN, false);
- } else if ((geni_status & M_GENI_CMD_ACTIVE) && !port->tx_remaining) {
- /*
- * It seems we can't interrupt existing transfers if all data
- * has been sent, in which case we need to look for done first.
- */
- qcom_geni_serial_poll_tx_done(uport);
-
- if (!kfifo_is_empty(&uport->state->port.xmit_fifo)) {
- irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
- writel(irq_en | M_TX_FIFO_WATERMARK_EN,
- uport->membase + SE_GENI_M_IRQ_EN);
- }
+ m_irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
+ s_irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
+ writel(0, uport->membase + SE_GENI_M_IRQ_EN);
+ writel(0, uport->membase + SE_GENI_S_IRQ_EN);
+
+ if (qcom_geni_serial_main_active(uport)) {
+ /* Wait for completion or drain FIFO */
+ if (!locked || port->tx_remaining == 0)
+ qcom_geni_serial_poll_tx_done(uport);
+ else
+ qcom_geni_serial_drain_fifo(uport);
+
+ qcom_geni_serial_cancel_tx_cmd(uport);
}
__qcom_geni_serial_console_write(uport, s, count);
+ writel(m_irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ writel(s_irq_en, uport->membase + SE_GENI_S_IRQ_EN);
- if (locked) {
- if (port->tx_remaining)
- qcom_geni_serial_setup_tx(uport, port->tx_remaining);
+ if (locked)
uart_port_unlock_irqrestore(uport, flags);
- }
}
static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
@@ -682,13 +679,10 @@ static void qcom_geni_serial_stop_tx_fifo(struct uart_port *uport)
writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
}
-static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport)
+static void __qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
- if (!qcom_geni_serial_main_active(uport))
- return;
-
geni_se_cancel_m_cmd(&port->se);
if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_CANCEL_EN, true)) {
@@ -698,8 +692,19 @@ static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport)
writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+}
+
+static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
+
+ if (!qcom_geni_serial_main_active(uport))
+ return;
+
+ __qcom_geni_serial_cancel_tx_cmd(uport);
port->tx_remaining = 0;
+ port->tx_queued = 0;
}
static void qcom_geni_serial_handle_rx_fifo(struct uart_port *uport, bool drop)
@@ -923,9 +928,10 @@ static void qcom_geni_serial_handle_tx_fifo(struct uart_port *uport,
if (!chunk)
goto out_write_wakeup;
- if (!port->tx_remaining) {
+ if (!active) {
qcom_geni_serial_setup_tx(uport, pending);
port->tx_remaining = pending;
+ port->tx_queued = 0;
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
if (!(irq_en & M_TX_FIFO_WATERMARK_EN))
@@ -934,6 +940,7 @@ static void qcom_geni_serial_handle_tx_fifo(struct uart_port *uport,
}
qcom_geni_serial_send_chunk_fifo(uport, chunk);
+ port->tx_queued += chunk;
/*
* The tx fifo watermark is level triggered and latched. Though we had
@@ -1244,11 +1251,11 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
unsigned long clk_rate;
u32 ver, sampling_rate;
unsigned int avg_bw_core;
+ unsigned long timeout;
qcom_geni_serial_stop_rx(uport);
/* baud rate */
baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
- port->baud = baud;
sampling_rate = UART_OVERSAMPLING;
/* Sampling rate is halved for IP versions >= 2.5 */
@@ -1326,9 +1333,21 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
else
tx_trans_cfg |= UART_CTS_MASK;
- if (baud)
+ if (baud) {
uart_update_timeout(uport, termios->c_cflag, baud);
+ /*
+ * Make sure that qcom_geni_serial_poll_bitfield() waits for
+ * the FIFO, two-word intermediate transfer register and shift
+ * register to clear.
+ *
+ * Note that uart_fifo_timeout() also adds a 20 ms margin.
+ */
+ timeout = jiffies_to_usecs(uart_fifo_timeout(uport));
+ timeout += 3 * timeout / port->tx_fifo_depth;
+ WRITE_ONCE(port->poll_timeout_us, timeout);
+ }
+
if (!uart_console(uport))
writel(port->loopback,
uport->membase + SE_UART_LOOPBACK_CFG);
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
index 4132fcff7d4e..8bab2aedc499 100644
--- a/drivers/tty/serial/rp2.c
+++ b/drivers/tty/serial/rp2.c
@@ -577,8 +577,8 @@ static void rp2_reset_asic(struct rp2_card *card, unsigned int asic_id)
u32 clk_cfg;
writew(1, base + RP2_GLOBAL_CMD);
- readw(base + RP2_GLOBAL_CMD);
msleep(100);
+ readw(base + RP2_GLOBAL_CMD);
writel(0, base + RP2_CLK_PRESCALER);
/* TDM clock configuration */
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index dc35eb77d2ef..0d184ee2f9ce 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -550,6 +550,7 @@ static void s3c24xx_serial_stop_rx(struct uart_port *port)
case TYPE_APPLE_S5L:
s3c24xx_clear_bit(port, APPLE_S5L_UCON_RXTHRESH_ENA, S3C2410_UCON);
s3c24xx_clear_bit(port, APPLE_S5L_UCON_RXTO_ENA, S3C2410_UCON);
+ s3c24xx_clear_bit(port, APPLE_S5L_UCON_RXTO_LEGACY_ENA, S3C2410_UCON);
break;
default:
disable_irq_nosync(ourport->rx_irq);
@@ -707,9 +708,8 @@ static void enable_rx_pio(struct s3c24xx_uart_port *ourport)
static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport);
-static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id)
+static irqreturn_t s3c24xx_serial_rx_chars_dma(struct s3c24xx_uart_port *ourport)
{
- struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
struct s3c24xx_uart_dma *dma = ourport->dma;
struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port);
@@ -843,9 +843,8 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
tty_flip_buffer_push(&port->state->port);
}
-static irqreturn_t s3c24xx_serial_rx_chars_pio(void *dev_id)
+static irqreturn_t s3c24xx_serial_rx_chars_pio(struct s3c24xx_uart_port *ourport)
{
- struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
uart_port_lock(port);
@@ -855,13 +854,11 @@ static irqreturn_t s3c24xx_serial_rx_chars_pio(void *dev_id)
return IRQ_HANDLED;
}
-static irqreturn_t s3c24xx_serial_rx_irq(int irq, void *dev_id)
+static irqreturn_t s3c24xx_serial_rx_irq(struct s3c24xx_uart_port *ourport)
{
- struct s3c24xx_uart_port *ourport = dev_id;
-
if (ourport->dma && ourport->dma->rx_chan)
- return s3c24xx_serial_rx_chars_dma(dev_id);
- return s3c24xx_serial_rx_chars_pio(dev_id);
+ return s3c24xx_serial_rx_chars_dma(ourport);
+ return s3c24xx_serial_rx_chars_pio(ourport);
}
static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport)
@@ -928,9 +925,8 @@ static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport)
s3c24xx_serial_stop_tx(port);
}
-static irqreturn_t s3c24xx_serial_tx_irq(int irq, void *id)
+static irqreturn_t s3c24xx_serial_tx_irq(struct s3c24xx_uart_port *ourport)
{
- struct s3c24xx_uart_port *ourport = id;
struct uart_port *port = &ourport->port;
uart_port_lock(port);
@@ -944,17 +940,17 @@ static irqreturn_t s3c24xx_serial_tx_irq(int irq, void *id)
/* interrupt handler for s3c64xx and later SoC's.*/
static irqreturn_t s3c64xx_serial_handle_irq(int irq, void *id)
{
- const struct s3c24xx_uart_port *ourport = id;
+ struct s3c24xx_uart_port *ourport = id;
const struct uart_port *port = &ourport->port;
u32 pend = rd_regl(port, S3C64XX_UINTP);
irqreturn_t ret = IRQ_HANDLED;
if (pend & S3C64XX_UINTM_RXD_MSK) {
- ret = s3c24xx_serial_rx_irq(irq, id);
+ ret = s3c24xx_serial_rx_irq(ourport);
wr_regl(port, S3C64XX_UINTP, S3C64XX_UINTM_RXD_MSK);
}
if (pend & S3C64XX_UINTM_TXD_MSK) {
- ret = s3c24xx_serial_tx_irq(irq, id);
+ ret = s3c24xx_serial_tx_irq(ourport);
wr_regl(port, S3C64XX_UINTP, S3C64XX_UINTM_TXD_MSK);
}
return ret;
@@ -963,19 +959,21 @@ static irqreturn_t s3c64xx_serial_handle_irq(int irq, void *id)
/* interrupt handler for Apple SoC's.*/
static irqreturn_t apple_serial_handle_irq(int irq, void *id)
{
- const struct s3c24xx_uart_port *ourport = id;
+ struct s3c24xx_uart_port *ourport = id;
const struct uart_port *port = &ourport->port;
u32 pend = rd_regl(port, S3C2410_UTRSTAT);
irqreturn_t ret = IRQ_NONE;
- if (pend & (APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO)) {
+ if (pend & (APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO |
+ APPLE_S5L_UTRSTAT_RXTO_LEGACY)) {
wr_regl(port, S3C2410_UTRSTAT,
- APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO);
- ret = s3c24xx_serial_rx_irq(irq, id);
+ APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO |
+ APPLE_S5L_UTRSTAT_RXTO_LEGACY);
+ ret = s3c24xx_serial_rx_irq(ourport);
}
if (pend & APPLE_S5L_UTRSTAT_TXTHRESH) {
wr_regl(port, S3C2410_UTRSTAT, APPLE_S5L_UTRSTAT_TXTHRESH);
- ret = s3c24xx_serial_tx_irq(irq, id);
+ ret = s3c24xx_serial_tx_irq(ourport);
}
return ret;
@@ -1195,7 +1193,8 @@ static void apple_s5l_serial_shutdown(struct uart_port *port)
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~(APPLE_S5L_UCON_TXTHRESH_ENA_MSK |
APPLE_S5L_UCON_RXTHRESH_ENA_MSK |
- APPLE_S5L_UCON_RXTO_ENA_MSK);
+ APPLE_S5L_UCON_RXTO_ENA_MSK |
+ APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK);
wr_regl(port, S3C2410_UCON, ucon);
wr_regl(port, S3C2410_UTRSTAT, APPLE_S5L_UTRSTAT_ALL_FLAGS);
@@ -1292,6 +1291,7 @@ static int apple_s5l_serial_startup(struct uart_port *port)
/* Enable Rx Interrupt */
s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTHRESH_ENA, S3C2410_UCON);
s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTO_ENA, S3C2410_UCON);
+ s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTO_LEGACY_ENA, S3C2410_UCON);
return ret;
}
@@ -2148,13 +2148,15 @@ static int s3c24xx_serial_resume_noirq(struct device *dev)
ucon &= ~(APPLE_S5L_UCON_TXTHRESH_ENA_MSK |
APPLE_S5L_UCON_RXTHRESH_ENA_MSK |
- APPLE_S5L_UCON_RXTO_ENA_MSK);
+ APPLE_S5L_UCON_RXTO_ENA_MSK |
+ APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK);
if (ourport->tx_enabled)
ucon |= APPLE_S5L_UCON_TXTHRESH_ENA_MSK;
if (ourport->rx_enabled)
ucon |= APPLE_S5L_UCON_RXTHRESH_ENA_MSK |
- APPLE_S5L_UCON_RXTO_ENA_MSK;
+ APPLE_S5L_UCON_RXTO_ENA_MSK |
+ APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK;
wr_regl(port, S3C2410_UCON, ucon);
@@ -2541,7 +2543,7 @@ static const struct s3c24xx_serial_drv_data s5l_serial_drv_data = {
.name = "Apple S5L UART",
.type = TYPE_APPLE_S5L,
.port_type = PORT_8250,
- .iotype = UPIO_MEM,
+ .iotype = UPIO_MEM32,
.fifosize = 16,
.rx_fifomask = S3C2410_UFSTAT_RXMASK,
.rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
@@ -2827,6 +2829,9 @@ OF_EARLYCON_DECLARE(gs101, "google,gs101-uart", gs101_early_console_setup);
static int __init apple_s5l_early_console_setup(struct earlycon_device *device,
const char *opt)
{
+ /* Apple A7-A11 requires MMIO32 register accesses. */
+ device->port.iotype = UPIO_MEM32;
+
/* Close enough to S3C2410 for earlycon... */
device->port.private_data = &s3c2410_early_console_data;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index b4c1798a1df2..ad88a33a504f 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -10,6 +10,7 @@
#undef DEFAULT_SYMBOL_NAMESPACE
#define DEFAULT_SYMBOL_NAMESPACE SERIAL_NXP_SC16IS7XX
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -78,52 +79,52 @@
#define SC16IS7XX_XOFF2_REG (0x07) /* Xoff2 word */
/* IER register bits */
-#define SC16IS7XX_IER_RDI_BIT (1 << 0) /* Enable RX data interrupt */
-#define SC16IS7XX_IER_THRI_BIT (1 << 1) /* Enable TX holding register
+#define SC16IS7XX_IER_RDI_BIT BIT(0) /* Enable RX data interrupt */
+#define SC16IS7XX_IER_THRI_BIT BIT(1) /* Enable TX holding register
* interrupt */
-#define SC16IS7XX_IER_RLSI_BIT (1 << 2) /* Enable RX line status
+#define SC16IS7XX_IER_RLSI_BIT BIT(2) /* Enable RX line status
* interrupt */
-#define SC16IS7XX_IER_MSI_BIT (1 << 3) /* Enable Modem status
+#define SC16IS7XX_IER_MSI_BIT BIT(3) /* Enable Modem status
* interrupt */
/* IER register bits - write only if (EFR[4] == 1) */
-#define SC16IS7XX_IER_SLEEP_BIT (1 << 4) /* Enable Sleep mode */
-#define SC16IS7XX_IER_XOFFI_BIT (1 << 5) /* Enable Xoff interrupt */
-#define SC16IS7XX_IER_RTSI_BIT (1 << 6) /* Enable nRTS interrupt */
-#define SC16IS7XX_IER_CTSI_BIT (1 << 7) /* Enable nCTS interrupt */
+#define SC16IS7XX_IER_SLEEP_BIT BIT(4) /* Enable Sleep mode */
+#define SC16IS7XX_IER_XOFFI_BIT BIT(5) /* Enable Xoff interrupt */
+#define SC16IS7XX_IER_RTSI_BIT BIT(6) /* Enable nRTS interrupt */
+#define SC16IS7XX_IER_CTSI_BIT BIT(7) /* Enable nCTS interrupt */
/* FCR register bits */
-#define SC16IS7XX_FCR_FIFO_BIT (1 << 0) /* Enable FIFO */
-#define SC16IS7XX_FCR_RXRESET_BIT (1 << 1) /* Reset RX FIFO */
-#define SC16IS7XX_FCR_TXRESET_BIT (1 << 2) /* Reset TX FIFO */
-#define SC16IS7XX_FCR_RXLVLL_BIT (1 << 6) /* RX Trigger level LSB */
-#define SC16IS7XX_FCR_RXLVLH_BIT (1 << 7) /* RX Trigger level MSB */
+#define SC16IS7XX_FCR_FIFO_BIT BIT(0) /* Enable FIFO */
+#define SC16IS7XX_FCR_RXRESET_BIT BIT(1) /* Reset RX FIFO */
+#define SC16IS7XX_FCR_TXRESET_BIT BIT(2) /* Reset TX FIFO */
+#define SC16IS7XX_FCR_RXLVLL_BIT BIT(6) /* RX Trigger level LSB */
+#define SC16IS7XX_FCR_RXLVLH_BIT BIT(7) /* RX Trigger level MSB */
/* FCR register bits - write only if (EFR[4] == 1) */
-#define SC16IS7XX_FCR_TXLVLL_BIT (1 << 4) /* TX Trigger level LSB */
-#define SC16IS7XX_FCR_TXLVLH_BIT (1 << 5) /* TX Trigger level MSB */
+#define SC16IS7XX_FCR_TXLVLL_BIT BIT(4) /* TX Trigger level LSB */
+#define SC16IS7XX_FCR_TXLVLH_BIT BIT(5) /* TX Trigger level MSB */
/* IIR register bits */
-#define SC16IS7XX_IIR_NO_INT_BIT (1 << 0) /* No interrupts pending */
-#define SC16IS7XX_IIR_ID_MASK 0x3e /* Mask for the interrupt ID */
-#define SC16IS7XX_IIR_THRI_SRC 0x02 /* TX holding register empty */
-#define SC16IS7XX_IIR_RDI_SRC 0x04 /* RX data interrupt */
-#define SC16IS7XX_IIR_RLSE_SRC 0x06 /* RX line status error */
-#define SC16IS7XX_IIR_RTOI_SRC 0x0c /* RX time-out interrupt */
-#define SC16IS7XX_IIR_MSI_SRC 0x00 /* Modem status interrupt
- * - only on 75x/76x
- */
-#define SC16IS7XX_IIR_INPIN_SRC 0x30 /* Input pin change of state
- * - only on 75x/76x
- */
-#define SC16IS7XX_IIR_XOFFI_SRC 0x10 /* Received Xoff */
-#define SC16IS7XX_IIR_CTSRTS_SRC 0x20 /* nCTS,nRTS change of state
- * from active (LOW)
- * to inactive (HIGH)
- */
+#define SC16IS7XX_IIR_NO_INT_BIT 0x01 /* No interrupts pending */
+#define SC16IS7XX_IIR_ID_MASK GENMASK(5, 1) /* Mask for the interrupt ID */
+#define SC16IS7XX_IIR_THRI_SRC 0x02 /* TX holding register empty */
+#define SC16IS7XX_IIR_RDI_SRC 0x04 /* RX data interrupt */
+#define SC16IS7XX_IIR_RLSE_SRC 0x06 /* RX line status error */
+#define SC16IS7XX_IIR_RTOI_SRC 0x0c /* RX time-out interrupt */
+#define SC16IS7XX_IIR_MSI_SRC 0x00 /* Modem status interrupt
+ * - only on 75x/76x
+ */
+#define SC16IS7XX_IIR_INPIN_SRC 0x30 /* Input pin change of state
+ * - only on 75x/76x
+ */
+#define SC16IS7XX_IIR_XOFFI_SRC 0x10 /* Received Xoff */
+#define SC16IS7XX_IIR_CTSRTS_SRC 0x20 /* nCTS,nRTS change of state
+ * from active (LOW)
+ * to inactive (HIGH)
+ */
/* LCR register bits */
-#define SC16IS7XX_LCR_LENGTH0_BIT (1 << 0) /* Word length bit 0 */
-#define SC16IS7XX_LCR_LENGTH1_BIT (1 << 1) /* Word length bit 1
+#define SC16IS7XX_LCR_LENGTH0_BIT BIT(0) /* Word length bit 0 */
+#define SC16IS7XX_LCR_LENGTH1_BIT BIT(1) /* Word length bit 1
*
* Word length bits table:
* 00 -> 5 bit words
@@ -131,7 +132,7 @@
* 10 -> 7 bit words
* 11 -> 8 bit words
*/
-#define SC16IS7XX_LCR_STOPLEN_BIT (1 << 2) /* STOP length bit
+#define SC16IS7XX_LCR_STOPLEN_BIT BIT(2) /* STOP length bit
*
* STOP length bit table:
* 0 -> 1 stop bit
@@ -139,11 +140,11 @@
* word length is 5,
* 2 stop bits otherwise
*/
-#define SC16IS7XX_LCR_PARITY_BIT (1 << 3) /* Parity bit enable */
-#define SC16IS7XX_LCR_EVENPARITY_BIT (1 << 4) /* Even parity bit enable */
-#define SC16IS7XX_LCR_FORCEPARITY_BIT (1 << 5) /* 9-bit multidrop parity */
-#define SC16IS7XX_LCR_TXBREAK_BIT (1 << 6) /* TX break enable */
-#define SC16IS7XX_LCR_DLAB_BIT (1 << 7) /* Divisor Latch enable */
+#define SC16IS7XX_LCR_PARITY_BIT BIT(3) /* Parity bit enable */
+#define SC16IS7XX_LCR_EVENPARITY_BIT BIT(4) /* Even parity bit enable */
+#define SC16IS7XX_LCR_FORCEPARITY_BIT BIT(5) /* 9-bit multidrop parity */
+#define SC16IS7XX_LCR_TXBREAK_BIT BIT(6) /* TX break enable */
+#define SC16IS7XX_LCR_DLAB_BIT BIT(7) /* Divisor Latch enable */
#define SC16IS7XX_LCR_WORD_LEN_5 (0x00)
#define SC16IS7XX_LCR_WORD_LEN_6 (0x01)
#define SC16IS7XX_LCR_WORD_LEN_7 (0x02)
@@ -154,61 +155,65 @@
* reg set */
/* MCR register bits */
-#define SC16IS7XX_MCR_DTR_BIT (1 << 0) /* DTR complement
+#define SC16IS7XX_MCR_DTR_BIT BIT(0) /* DTR complement
* - only on 75x/76x
*/
-#define SC16IS7XX_MCR_RTS_BIT (1 << 1) /* RTS complement */
-#define SC16IS7XX_MCR_TCRTLR_BIT (1 << 2) /* TCR/TLR register enable */
-#define SC16IS7XX_MCR_LOOP_BIT (1 << 4) /* Enable loopback test mode */
-#define SC16IS7XX_MCR_XONANY_BIT (1 << 5) /* Enable Xon Any
+#define SC16IS7XX_MCR_RTS_BIT BIT(1) /* RTS complement */
+#define SC16IS7XX_MCR_TCRTLR_BIT BIT(2) /* TCR/TLR register enable */
+#define SC16IS7XX_MCR_LOOP_BIT BIT(4) /* Enable loopback test mode */
+#define SC16IS7XX_MCR_XONANY_BIT BIT(5) /* Enable Xon Any
* - write enabled
* if (EFR[4] == 1)
*/
-#define SC16IS7XX_MCR_IRDA_BIT (1 << 6) /* Enable IrDA mode
+#define SC16IS7XX_MCR_IRDA_BIT BIT(6) /* Enable IrDA mode
* - write enabled
* if (EFR[4] == 1)
*/
-#define SC16IS7XX_MCR_CLKSEL_BIT (1 << 7) /* Divide clock by 4
+#define SC16IS7XX_MCR_CLKSEL_BIT BIT(7) /* Divide clock by 4
* - write enabled
* if (EFR[4] == 1)
*/
/* LSR register bits */
-#define SC16IS7XX_LSR_DR_BIT (1 << 0) /* Receiver data ready */
-#define SC16IS7XX_LSR_OE_BIT (1 << 1) /* Overrun Error */
-#define SC16IS7XX_LSR_PE_BIT (1 << 2) /* Parity Error */
-#define SC16IS7XX_LSR_FE_BIT (1 << 3) /* Frame Error */
-#define SC16IS7XX_LSR_BI_BIT (1 << 4) /* Break Interrupt */
-#define SC16IS7XX_LSR_BRK_ERROR_MASK 0x1E /* BI, FE, PE, OE bits */
-#define SC16IS7XX_LSR_THRE_BIT (1 << 5) /* TX holding register empty */
-#define SC16IS7XX_LSR_TEMT_BIT (1 << 6) /* Transmitter empty */
-#define SC16IS7XX_LSR_FIFOE_BIT (1 << 7) /* Fifo Error */
+#define SC16IS7XX_LSR_DR_BIT BIT(0) /* Receiver data ready */
+#define SC16IS7XX_LSR_OE_BIT BIT(1) /* Overrun Error */
+#define SC16IS7XX_LSR_PE_BIT BIT(2) /* Parity Error */
+#define SC16IS7XX_LSR_FE_BIT BIT(3) /* Frame Error */
+#define SC16IS7XX_LSR_BI_BIT BIT(4) /* Break Interrupt */
+#define SC16IS7XX_LSR_BRK_ERROR_MASK \
+ (SC16IS7XX_LSR_OE_BIT | \
+ SC16IS7XX_LSR_PE_BIT | \
+ SC16IS7XX_LSR_FE_BIT | \
+ SC16IS7XX_LSR_BI_BIT)
+
+#define SC16IS7XX_LSR_THRE_BIT BIT(5) /* TX holding register empty */
+#define SC16IS7XX_LSR_TEMT_BIT BIT(6) /* Transmitter empty */
+#define SC16IS7XX_LSR_FIFOE_BIT BIT(7) /* Fifo Error */
/* MSR register bits */
-#define SC16IS7XX_MSR_DCTS_BIT (1 << 0) /* Delta CTS Clear To Send */
-#define SC16IS7XX_MSR_DDSR_BIT (1 << 1) /* Delta DSR Data Set Ready
+#define SC16IS7XX_MSR_DCTS_BIT BIT(0) /* Delta CTS Clear To Send */
+#define SC16IS7XX_MSR_DDSR_BIT BIT(1) /* Delta DSR Data Set Ready
* or (IO4)
* - only on 75x/76x
*/
-#define SC16IS7XX_MSR_DRI_BIT (1 << 2) /* Delta RI Ring Indicator
+#define SC16IS7XX_MSR_DRI_BIT BIT(2) /* Delta RI Ring Indicator
* or (IO7)
* - only on 75x/76x
*/
-#define SC16IS7XX_MSR_DCD_BIT (1 << 3) /* Delta CD Carrier Detect
+#define SC16IS7XX_MSR_DCD_BIT BIT(3) /* Delta CD Carrier Detect
* or (IO6)
* - only on 75x/76x
*/
-#define SC16IS7XX_MSR_CTS_BIT (1 << 4) /* CTS */
-#define SC16IS7XX_MSR_DSR_BIT (1 << 5) /* DSR (IO4)
+#define SC16IS7XX_MSR_CTS_BIT BIT(4) /* CTS */
+#define SC16IS7XX_MSR_DSR_BIT BIT(5) /* DSR (IO4)
* - only on 75x/76x
*/
-#define SC16IS7XX_MSR_RI_BIT (1 << 6) /* RI (IO7)
+#define SC16IS7XX_MSR_RI_BIT BIT(6) /* RI (IO7)
* - only on 75x/76x
*/
-#define SC16IS7XX_MSR_CD_BIT (1 << 7) /* CD (IO6)
+#define SC16IS7XX_MSR_CD_BIT BIT(7) /* CD (IO6)
* - only on 75x/76x
*/
-#define SC16IS7XX_MSR_DELTA_MASK 0x0F /* Any of the delta bits! */
/*
* TCR register bits
@@ -241,19 +246,19 @@
#define SC16IS7XX_TLR_RX_TRIGGER(words) ((((words) / 4) & 0x0f) << 4)
/* IOControl register bits (Only 75x/76x) */
-#define SC16IS7XX_IOCONTROL_LATCH_BIT (1 << 0) /* Enable input latching */
-#define SC16IS7XX_IOCONTROL_MODEM_A_BIT (1 << 1) /* Enable GPIO[7:4] as modem A pins */
-#define SC16IS7XX_IOCONTROL_MODEM_B_BIT (1 << 2) /* Enable GPIO[3:0] as modem B pins */
-#define SC16IS7XX_IOCONTROL_SRESET_BIT (1 << 3) /* Software Reset */
+#define SC16IS7XX_IOCONTROL_LATCH_BIT BIT(0) /* Enable input latching */
+#define SC16IS7XX_IOCONTROL_MODEM_A_BIT BIT(1) /* Enable GPIO[7:4] as modem A pins */
+#define SC16IS7XX_IOCONTROL_MODEM_B_BIT BIT(2) /* Enable GPIO[3:0] as modem B pins */
+#define SC16IS7XX_IOCONTROL_SRESET_BIT BIT(3) /* Software Reset */
/* EFCR register bits */
-#define SC16IS7XX_EFCR_9BIT_MODE_BIT (1 << 0) /* Enable 9-bit or Multidrop
+#define SC16IS7XX_EFCR_9BIT_MODE_BIT BIT(0) /* Enable 9-bit or Multidrop
* mode (RS485) */
-#define SC16IS7XX_EFCR_RXDISABLE_BIT (1 << 1) /* Disable receiver */
-#define SC16IS7XX_EFCR_TXDISABLE_BIT (1 << 2) /* Disable transmitter */
-#define SC16IS7XX_EFCR_AUTO_RS485_BIT (1 << 4) /* Auto RS485 RTS direction */
-#define SC16IS7XX_EFCR_RTS_INVERT_BIT (1 << 5) /* RTS output inversion */
-#define SC16IS7XX_EFCR_IRDA_MODE_BIT (1 << 7) /* IrDA mode
+#define SC16IS7XX_EFCR_RXDISABLE_BIT BIT(1) /* Disable receiver */
+#define SC16IS7XX_EFCR_TXDISABLE_BIT BIT(2) /* Disable transmitter */
+#define SC16IS7XX_EFCR_AUTO_RS485_BIT BIT(4) /* Auto RS485 RTS direction */
+#define SC16IS7XX_EFCR_RTS_INVERT_BIT BIT(5) /* RTS output inversion */
+#define SC16IS7XX_EFCR_IRDA_MODE_BIT BIT(7) /* IrDA mode
* 0 = rate upto 115.2 kbit/s
* - Only 75x/76x
* 1 = rate upto 1.152 Mbit/s
@@ -261,16 +266,16 @@
*/
/* EFR register bits */
-#define SC16IS7XX_EFR_AUTORTS_BIT (1 << 6) /* Auto RTS flow ctrl enable */
-#define SC16IS7XX_EFR_AUTOCTS_BIT (1 << 7) /* Auto CTS flow ctrl enable */
-#define SC16IS7XX_EFR_XOFF2_DETECT_BIT (1 << 5) /* Enable Xoff2 detection */
-#define SC16IS7XX_EFR_ENABLE_BIT (1 << 4) /* Enable enhanced functions
+#define SC16IS7XX_EFR_AUTORTS_BIT BIT(6) /* Auto RTS flow ctrl enable */
+#define SC16IS7XX_EFR_AUTOCTS_BIT BIT(7) /* Auto CTS flow ctrl enable */
+#define SC16IS7XX_EFR_XOFF2_DETECT_BIT BIT(5) /* Enable Xoff2 detection */
+#define SC16IS7XX_EFR_ENABLE_BIT BIT(4) /* Enable enhanced functions
* and writing to IER[7:4],
* FCR[5:4], MCR[7:5]
*/
-#define SC16IS7XX_EFR_SWFLOW3_BIT (1 << 3) /* SWFLOW bit 3 */
-#define SC16IS7XX_EFR_SWFLOW2_BIT (1 << 2) /* SWFLOW bit 2
- *
+#define SC16IS7XX_EFR_SWFLOW3_BIT BIT(3)
+#define SC16IS7XX_EFR_SWFLOW2_BIT BIT(2)
+ /*
* SWFLOW bits 3 & 2 table:
* 00 -> no transmitter flow
* control
@@ -282,10 +287,10 @@
* XON1, XON2, XOFF1 and
* XOFF2
*/
-#define SC16IS7XX_EFR_SWFLOW1_BIT (1 << 1) /* SWFLOW bit 2 */
-#define SC16IS7XX_EFR_SWFLOW0_BIT (1 << 0) /* SWFLOW bit 3
- *
- * SWFLOW bits 3 & 2 table:
+#define SC16IS7XX_EFR_SWFLOW1_BIT BIT(1)
+#define SC16IS7XX_EFR_SWFLOW0_BIT BIT(0)
+ /*
+ * SWFLOW bits 1 & 0 table:
* 00 -> no received flow
* control
* 01 -> receiver compares
@@ -309,9 +314,9 @@
#define SC16IS7XX_FIFO_SIZE (64)
#define SC16IS7XX_GPIOS_PER_BANK 4
-#define SC16IS7XX_RECONF_MD (1 << 0)
-#define SC16IS7XX_RECONF_IER (1 << 1)
-#define SC16IS7XX_RECONF_RS485 (1 << 2)
+#define SC16IS7XX_RECONF_MD BIT(0)
+#define SC16IS7XX_RECONF_IER BIT(1)
+#define SC16IS7XX_RECONF_RS485 BIT(2)
struct sc16is7xx_one_config {
unsigned int flags;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 1e3e28e364df..d94d73e45fb6 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -407,14 +407,16 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
/*
* Turn off DTR and RTS early.
*/
- if (uport && uart_console(uport) && tty) {
- uport->cons->cflag = tty->termios.c_cflag;
- uport->cons->ispeed = tty->termios.c_ispeed;
- uport->cons->ospeed = tty->termios.c_ospeed;
- }
+ if (uport) {
+ if (uart_console(uport) && tty) {
+ uport->cons->cflag = tty->termios.c_cflag;
+ uport->cons->ispeed = tty->termios.c_ispeed;
+ uport->cons->ospeed = tty->termios.c_ospeed;
+ }
- if (!tty || C_HUPCL(tty))
- uart_port_dtr_rts(uport, false);
+ if (!tty || C_HUPCL(tty))
+ uart_port_dtr_rts(uport, false);
+ }
uart_port_shutdown(port);
}
@@ -1102,21 +1104,19 @@ static int uart_tiocmget(struct tty_struct *tty)
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
struct uart_port *uport;
- int result = -EIO;
+ int result;
+
+ guard(mutex)(&port->mutex);
- mutex_lock(&port->mutex);
uport = uart_port_check(state);
- if (!uport)
- goto out;
+ if (!uport || tty_io_error(tty))
+ return -EIO;
+
+ uart_port_lock_irq(uport);
+ result = uport->mctrl;
+ result |= uport->ops->get_mctrl(uport);
+ uart_port_unlock_irq(uport);
- if (!tty_io_error(tty)) {
- uart_port_lock_irq(uport);
- result = uport->mctrl;
- result |= uport->ops->get_mctrl(uport);
- uart_port_unlock_irq(uport);
- }
-out:
- mutex_unlock(&port->mutex);
return result;
}
@@ -1126,20 +1126,16 @@ uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
struct uart_port *uport;
- int ret = -EIO;
- mutex_lock(&port->mutex);
+ guard(mutex)(&port->mutex);
+
uport = uart_port_check(state);
- if (!uport)
- goto out;
+ if (!uport || tty_io_error(tty))
+ return -EIO;
- if (!tty_io_error(tty)) {
- uart_update_mctrl(uport, set, clear);
- ret = 0;
- }
-out:
- mutex_unlock(&port->mutex);
- return ret;
+ uart_update_mctrl(uport, set, clear);
+
+ return 0;
}
static int uart_break_ctl(struct tty_struct *tty, int break_state)
@@ -1147,19 +1143,17 @@ static int uart_break_ctl(struct tty_struct *tty, int break_state)
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
struct uart_port *uport;
- int ret = -EIO;
- mutex_lock(&port->mutex);
+ guard(mutex)(&port->mutex);
+
uport = uart_port_check(state);
if (!uport)
- goto out;
+ return -EIO;
if (uport->type != PORT_UNKNOWN && uport->ops->break_ctl)
uport->ops->break_ctl(uport, break_state);
- ret = 0;
-out:
- mutex_unlock(&port->mutex);
- return ret;
+
+ return 0;
}
static int uart_do_autoconfig(struct tty_struct *tty, struct uart_state *state)
@@ -1176,17 +1170,14 @@ static int uart_do_autoconfig(struct tty_struct *tty, struct uart_state *state)
* changing, and hence any extra opens of the port while
* we're auto-configuring.
*/
- if (mutex_lock_interruptible(&port->mutex))
- return -ERESTARTSYS;
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &port->mutex) {
+ uport = uart_port_check(state);
+ if (!uport)
+ return -EIO;
- uport = uart_port_check(state);
- if (!uport) {
- ret = -EIO;
- goto out;
- }
+ if (tty_port_users(port) != 1)
+ return -EBUSY;
- ret = -EBUSY;
- if (tty_port_users(port) == 1) {
uart_shutdown(tty, state);
/*
@@ -1207,14 +1198,15 @@ static int uart_do_autoconfig(struct tty_struct *tty, struct uart_state *state)
uport->ops->config_port(uport, flags);
ret = uart_startup(tty, state, true);
- if (ret == 0)
- tty_port_set_initialized(port, true);
+ if (ret < 0)
+ return ret;
if (ret > 0)
- ret = 0;
+ return 0;
+
+ tty_port_set_initialized(port, true);
}
-out:
- mutex_unlock(&port->mutex);
- return ret;
+
+ return 0;
}
static void uart_enable_ms(struct uart_port *uport)
@@ -1709,10 +1701,11 @@ static void uart_set_termios(struct tty_struct *tty,
unsigned int iflag_mask = IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK;
bool sw_changed = false;
- mutex_lock(&state->port.mutex);
+ guard(mutex)(&state->port.mutex);
+
uport = uart_port_check(state);
if (!uport)
- goto out;
+ return;
/*
* Drivers doing software flow control also need to know
@@ -1735,9 +1728,8 @@ static void uart_set_termios(struct tty_struct *tty,
tty->termios.c_ospeed == old_termios->c_ospeed &&
tty->termios.c_ispeed == old_termios->c_ispeed &&
((tty->termios.c_iflag ^ old_termios->c_iflag) & iflag_mask) == 0 &&
- !sw_changed) {
- goto out;
- }
+ !sw_changed)
+ return;
uart_change_line_settings(tty, state, old_termios);
/* reload cflag from termios; port driver may have overridden flags */
@@ -1754,8 +1746,6 @@ static void uart_set_termios(struct tty_struct *tty,
mask |= TIOCM_RTS;
uart_set_mctrl(uport, mask);
}
-out:
- mutex_unlock(&state->port.mutex);
}
/*
@@ -2049,10 +2039,11 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
unsigned int status;
int mmio;
- mutex_lock(&port->mutex);
+ guard(mutex)(&port->mutex);
+
uport = uart_port_check(state);
if (!uport)
- goto out;
+ return;
mmio = uport->iotype >= UPIO_MEM;
seq_printf(m, "%d: uart:%s %s%08llX irq:%d",
@@ -2064,7 +2055,7 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
if (uport->type == PORT_UNKNOWN) {
seq_putc(m, '\n');
- goto out;
+ return;
}
if (capable(CAP_SYS_ADMIN)) {
@@ -2115,8 +2106,6 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
seq_putc(m, '\n');
#undef STATBIT
#undef INFOBIT
-out:
- mutex_unlock(&port->mutex);
}
static int uart_proc_show(struct seq_file *m, void *v)
@@ -2393,13 +2382,12 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
struct device *tty_dev;
struct uart_match match = {uport, drv};
- mutex_lock(&port->mutex);
+ guard(mutex)(&port->mutex);
tty_dev = device_find_child(&uport->port_dev->dev, &match, serial_match_port);
if (tty_dev && device_may_wakeup(tty_dev)) {
enable_irq_wake(uport->irq);
put_device(tty_dev);
- mutex_unlock(&port->mutex);
return 0;
}
put_device(tty_dev);
@@ -2417,7 +2405,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
uart_port_unlock_irq(uport);
}
device_set_awake_path(uport->dev);
- goto unlock;
+ return 0;
}
uport->suspended = 1;
@@ -2460,8 +2448,6 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
console_stop(uport->cons);
uart_change_pm(state, UART_PM_STATE_OFF);
-unlock:
- mutex_unlock(&port->mutex);
return 0;
}
@@ -2475,14 +2461,13 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
struct uart_match match = {uport, drv};
struct ktermios termios;
- mutex_lock(&port->mutex);
+ guard(mutex)(&port->mutex);
tty_dev = device_find_child(&uport->port_dev->dev, &match, serial_match_port);
if (!uport->suspended && device_may_wakeup(tty_dev)) {
if (irqd_is_wakeup_set(irq_get_irq_data((uport->irq))))
disable_irq_wake(uport->irq);
put_device(tty_dev);
- mutex_unlock(&port->mutex);
return 0;
}
put_device(tty_dev);
@@ -2555,8 +2540,6 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
tty_port_set_suspended(port, false);
}
- mutex_unlock(&port->mutex);
-
return 0;
}
EXPORT_SYMBOL(uart_resume_port);
@@ -2696,14 +2679,13 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options)
int ret = 0;
tport = &state->port;
- mutex_lock(&tport->mutex);
+
+ guard(mutex)(&tport->mutex);
port = uart_port_check(state);
if (!port || port->type == PORT_UNKNOWN ||
- !(port->ops->poll_get_char && port->ops->poll_put_char)) {
- ret = -1;
- goto out;
- }
+ !(port->ops->poll_get_char && port->ops->poll_put_char))
+ return -1;
pm_state = state->pm_state;
uart_change_pm(state, UART_PM_STATE_ON);
@@ -2723,10 +2705,10 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options)
ret = uart_set_options(port, NULL, baud, parity, bits, flow);
console_list_unlock();
}
-out:
+
if (ret)
uart_change_pm(state, pm_state);
- mutex_unlock(&tport->mutex);
+
return ret;
}
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index f91753a40a69..8aea59f8ca13 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -808,7 +808,6 @@ static void asc_serial_remove(struct platform_device *pdev)
uart_remove_one_port(&asc_uart_driver, port);
}
-#ifdef CONFIG_PM_SLEEP
static int asc_serial_suspend(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
@@ -823,8 +822,6 @@ static int asc_serial_resume(struct device *dev)
return uart_resume_port(&asc_uart_driver, port);
}
-#endif /* CONFIG_PM_SLEEP */
-
/*----------------------------------------------------------------------*/
#ifdef CONFIG_SERIAL_ST_ASC_CONSOLE
@@ -932,16 +929,15 @@ static struct uart_driver asc_uart_driver = {
.cons = ASC_SERIAL_CONSOLE,
};
-static const struct dev_pm_ops asc_serial_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(asc_serial_suspend, asc_serial_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(asc_serial_pm_ops, asc_serial_suspend,
+ asc_serial_resume);
static struct platform_driver asc_serial_driver = {
.probe = asc_serial_probe,
.remove_new = asc_serial_remove,
.driver = {
.name = DRIVER_NAME,
- .pm = &asc_serial_pm_ops,
+ .pm = pm_sleep_ptr(&asc_serial_pm_ops),
.of_match_table = of_match_ptr(asc_match),
},
};
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 2acfcea403ce..777392914819 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -219,7 +219,7 @@ struct cdns_platform_data {
u32 quirks;
};
-struct serial_rs485 cdns_rs485_supported = {
+static struct serial_rs485 cdns_rs485_supported = {
.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
SER_RS485_RTS_AFTER_SEND,
.delay_rts_before_send = 1,
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index abc2708d4ac5..9771072da177 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -350,22 +350,19 @@ int tty_dev_name_to_number(const char *name, dev_t *number)
return ret;
prefix_length = str - name;
- mutex_lock(&tty_mutex);
+
+ guard(mutex)(&tty_mutex);
list_for_each_entry(p, &tty_drivers, tty_drivers)
if (prefix_length == strlen(p->name) && strncmp(name,
p->name, prefix_length) == 0) {
if (index < p->num) {
*number = MKDEV(p->major, p->minor_start + index);
- goto out;
+ return 0;
}
}
- /* if here then driver wasn't found */
- ret = -ENODEV;
-out:
- mutex_unlock(&tty_mutex);
- return ret;
+ return -ENODEV;
}
EXPORT_SYMBOL_GPL(tty_dev_name_to_number);
@@ -462,7 +459,6 @@ static void tty_show_fdinfo(struct seq_file *m, struct file *file)
}
static const struct file_operations tty_fops = {
- .llseek = no_llseek,
.read_iter = tty_read,
.write_iter = tty_write,
.splice_read = copy_splice_read,
@@ -477,7 +473,6 @@ static const struct file_operations tty_fops = {
};
static const struct file_operations console_fops = {
- .llseek = no_llseek,
.read_iter = tty_read,
.write_iter = redirected_tty_write,
.splice_read = copy_splice_read,
@@ -491,7 +486,6 @@ static const struct file_operations console_fops = {
};
static const struct file_operations hung_up_tty_fops = {
- .llseek = no_llseek,
.read_iter = hung_up_tty_read,
.write_iter = hung_up_tty_write,
.poll = hung_up_tty_poll,
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index c87fdc849c62..ecdfff2456e3 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -93,7 +93,7 @@ static const struct __ufs_qcom_bw_table {
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
[MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
- [MODE_MAX][0][0] = { 7643136, 307200 },
+ [MODE_MAX][0][0] = { 7643136, 819200 },
};
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 20d2a55cb40b..004a549c6c7d 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -118,7 +118,7 @@ static const struct sysfs_ops map_sysfs_ops = {
.show = map_type_show,
};
-static struct kobj_type map_attr_type = {
+static const struct kobj_type map_attr_type = {
.release = map_release,
.sysfs_ops = &map_sysfs_ops,
.default_groups = map_groups,
@@ -207,7 +207,7 @@ static const struct sysfs_ops portio_sysfs_ops = {
.show = portio_type_show,
};
-static struct kobj_type portio_attr_type = {
+static const struct kobj_type portio_attr_type = {
.release = portio_release,
.sysfs_ops = &portio_sysfs_ops,
.default_groups = portio_groups,
diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c
index 1f6320d98a76..591d149de8f3 100644
--- a/drivers/usb/cdns3/cdns3-pci-wrap.c
+++ b/drivers/usb/cdns3/cdns3-pci-wrap.c
@@ -37,8 +37,7 @@ struct cdns3_wrap {
#define PCI_DRIVER_NAME "cdns3-pci-usbss"
#define PLAT_DRIVER_NAME "cdns-usb3"
-#define CDNS_VENDOR_ID 0x17cd
-#define CDNS_DEVICE_ID 0x0100
+#define PCI_DEVICE_ID_CDNS_USB3 0x0100
static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev)
{
@@ -190,7 +189,7 @@ static void cdns3_pci_remove(struct pci_dev *pdev)
}
static const struct pci_device_id cdns3_pci_ids[] = {
- { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), },
+ { PCI_VDEVICE(CDNS, PCI_DEVICE_ID_CDNS_USB3) },
{ 0, }
};
diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
index 225540fc81ba..2d05368a6745 100644
--- a/drivers/usb/cdns3/cdnsp-pci.c
+++ b/drivers/usb/cdns3/cdnsp-pci.c
@@ -28,10 +28,11 @@
#define PCI_DRIVER_NAME "cdns-pci-usbssp"
#define PLAT_DRIVER_NAME "cdns-usbssp"
-#define CDNS_VENDOR_ID 0x17cd
-#define CDNS_DEVICE_ID 0x0200
-#define CDNS_DRD_ID 0x0100
-#define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80)
+#define PCI_DEVICE_ID_CDNS_USB3 0x0100
+#define PCI_DEVICE_ID_CDNS_UDC 0x0200
+
+#define PCI_CLASS_SERIAL_USB_CDNS_USB3 (PCI_CLASS_SERIAL_USB << 8 | 0x80)
+#define PCI_CLASS_SERIAL_USB_CDNS_UDC PCI_CLASS_SERIAL_USB_DEVICE
static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
{
@@ -40,10 +41,10 @@ static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
* Platform has two function. The fist keeps resources for
* Host/Device while the secon keeps resources for DRD/OTG.
*/
- if (pdev->device == CDNS_DEVICE_ID)
- return pci_get_device(pdev->vendor, CDNS_DRD_ID, NULL);
- else if (pdev->device == CDNS_DRD_ID)
- return pci_get_device(pdev->vendor, CDNS_DEVICE_ID, NULL);
+ if (pdev->device == PCI_DEVICE_ID_CDNS_UDC)
+ return pci_get_device(pdev->vendor, PCI_DEVICE_ID_CDNS_USB3, NULL);
+ if (pdev->device == PCI_DEVICE_ID_CDNS_USB3)
+ return pci_get_device(pdev->vendor, PCI_DEVICE_ID_CDNS_UDC, NULL);
return NULL;
}
@@ -220,12 +221,12 @@ static const struct dev_pm_ops cdnsp_pci_pm_ops = {
};
static const struct pci_device_id cdnsp_pci_ids[] = {
- { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
- { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
- CDNS_DRD_IF, PCI_ANY_ID },
- { PCI_VENDOR_ID_CDNS, CDNS_DRD_ID, PCI_ANY_ID, PCI_ANY_ID,
- CDNS_DRD_IF, PCI_ANY_ID },
+ { PCI_DEVICE(PCI_VENDOR_ID_CDNS, PCI_DEVICE_ID_CDNS_UDC),
+ .class = PCI_CLASS_SERIAL_USB_CDNS_UDC },
+ { PCI_DEVICE(PCI_VENDOR_ID_CDNS, PCI_DEVICE_ID_CDNS_UDC),
+ .class = PCI_CLASS_SERIAL_USB_CDNS_USB3 },
+ { PCI_DEVICE(PCI_VENDOR_ID_CDNS, PCI_DEVICE_ID_CDNS_USB3),
+ .class = PCI_CLASS_SERIAL_USB_CDNS_USB3 },
{ 0, }
};
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index dbd83d321bca..46852529499d 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -718,7 +718,8 @@ int cdnsp_remove_request(struct cdnsp_device *pdev,
seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb,
cur_td->last_trb, hw_deq);
- if (seg && (pep->ep_state & EP_ENABLED))
+ if (seg && (pep->ep_state & EP_ENABLED) &&
+ !(pep->ep_state & EP_DIS_IN_RROGRESS))
cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id,
cur_td, &deq_state);
else
@@ -736,7 +737,8 @@ int cdnsp_remove_request(struct cdnsp_device *pdev,
* During disconnecting all endpoint will be disabled so we don't
* have to worry about updating dequeue pointer.
*/
- if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) {
+ if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING ||
+ pep->ep_state & EP_DIS_IN_RROGRESS) {
status = -ESHUTDOWN;
ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state);
}
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
index ceca4d839dfd..7ba760ee62e3 100644
--- a/drivers/usb/cdns3/host.c
+++ b/drivers/usb/cdns3/host.c
@@ -62,7 +62,9 @@ static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
.resume_quirk = xhci_cdns3_resume_quirk,
};
-static const struct xhci_plat_priv xhci_plat_cdnsp_xhci;
+static const struct xhci_plat_priv xhci_plat_cdnsp_xhci = {
+ .quirks = XHCI_CDNS_SCTX_QUIRK,
+};
static int __cdns_host_init(struct cdns *cdns)
{
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index bdc04ce919f7..c64ab0e07ea0 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -128,7 +128,7 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
* In case the fsl,usbmisc property is not present this device doesn't
* need usbmisc. Return NULL (which is no error here)
*/
- if (!of_get_property(np, "fsl,usbmisc", NULL))
+ if (!of_property_present(np, "fsl,usbmisc"))
return NULL;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
diff --git a/drivers/usb/chipidea/ci_hdrc_npcm.c b/drivers/usb/chipidea/ci_hdrc_npcm.c
index b14127873c55..3e5e05dbda89 100644
--- a/drivers/usb/chipidea/ci_hdrc_npcm.c
+++ b/drivers/usb/chipidea/ci_hdrc_npcm.c
@@ -18,7 +18,7 @@ struct npcm_udc_data {
struct ci_hdrc_platform_data pdata;
};
-static int npcm_udc_notify_event(struct ci_hdrc *ci, unsigned event)
+static int npcm_udc_notify_event(struct ci_hdrc *ci, unsigned int event)
{
struct device *dev = ci->dev->parent;
@@ -28,7 +28,7 @@ static int npcm_udc_notify_event(struct ci_hdrc *ci, unsigned event)
hw_write(ci, OP_USBMODE, 0xffffffff, 0x0);
break;
default:
- dev_dbg(dev, "unknown ci_hdrc event (%d)\n",event);
+ dev_dbg(dev, "unknown ci_hdrc event (%d)\n", event);
break;
}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 2d7f616270c1..69ef3cd8d4f8 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -86,7 +86,7 @@ static int hw_device_state(struct ci_hdrc *ci, u32 dma)
hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
/* interrupt, error, port change, reset, sleep/suspend */
hw_write(ci, OP_USBINTR, ~0,
- USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
+ USBi_UI|USBi_UEI|USBi_PCI|USBi_URI);
} else {
hw_write(ci, OP_USBINTR, ~0, 0);
}
@@ -877,6 +877,7 @@ __releases(ci->lock)
__acquires(ci->lock)
{
int retval;
+ u32 intr;
spin_unlock(&ci->lock);
if (ci->gadget.speed != USB_SPEED_UNKNOWN)
@@ -890,6 +891,11 @@ __acquires(ci->lock)
if (retval)
goto done;
+ /* clear SLI */
+ hw_write(ci, OP_USBSTS, USBi_SLI, USBi_SLI);
+ intr = hw_read(ci, OP_USBINTR, ~0);
+ hw_write(ci, OP_USBINTR, ~0, intr | USBi_SLI);
+
ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
if (ci->status == NULL)
retval = -ENOMEM;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0c1b69d944ca..605fea461102 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -962,10 +962,12 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
struct acm *acm = tty->driver_data;
ss->line = acm->minor;
+ mutex_lock(&acm->port.mutex);
ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
jiffies_to_msecs(acm->port.closing_wait) / 10;
+ mutex_unlock(&acm->port.mutex);
return 0;
}
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 6bd9fe565385..34e46ef308ab 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -754,7 +754,7 @@ static struct urb *usbtmc_create_urb(void)
if (!urb)
return NULL;
- dmabuf = kmalloc(bufsize, GFP_KERNEL);
+ dmabuf = kzalloc(bufsize, GFP_KERNEL);
if (!dmabuf) {
usb_free_urb(urb);
return NULL;
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 59b55d6cf490..b7bea1015d7c 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -107,19 +107,18 @@ EXPORT_SYMBOL_GPL(usb_speed_string);
*/
enum usb_device_speed usb_get_maximum_speed(struct device *dev)
{
- const char *maximum_speed;
+ const char *p = "maximum-speed";
int ret;
- ret = device_property_read_string(dev, "maximum-speed", &maximum_speed);
- if (ret < 0)
- return USB_SPEED_UNKNOWN;
-
- ret = match_string(ssp_rate, ARRAY_SIZE(ssp_rate), maximum_speed);
+ ret = device_property_match_property_string(dev, p, ssp_rate, ARRAY_SIZE(ssp_rate));
if (ret > 0)
return USB_SPEED_SUPER_PLUS;
- ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed);
- return (ret < 0) ? USB_SPEED_UNKNOWN : ret;
+ ret = device_property_match_property_string(dev, p, speed_names, ARRAY_SIZE(speed_names));
+ if (ret > 0)
+ return ret;
+
+ return USB_SPEED_UNKNOWN;
}
EXPORT_SYMBOL_GPL(usb_get_maximum_speed);
@@ -276,14 +275,13 @@ EXPORT_SYMBOL_GPL(usb_decode_interval);
*/
enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
{
- struct device_node *controller = NULL;
+ struct device_node *controller;
struct of_phandle_args args;
const char *dr_mode;
int index;
int err;
- do {
- controller = of_find_node_with_property(controller, "phys");
+ for_each_node_with_property(controller, "phys") {
if (!of_device_is_available(controller))
continue;
index = 0;
@@ -306,7 +304,7 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
goto finish;
index++;
} while (args.np);
- } while (controller);
+ }
finish:
err = of_property_read_string(controller, "dr_mode", &dr_mode);
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 7f8a912d4fe2..21585ed89ef8 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -142,6 +142,53 @@ int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable)
}
EXPORT_SYMBOL_GPL(usb_acpi_set_power_state);
+/**
+ * usb_acpi_add_usb4_devlink - add device link to USB4 Host Interface for tunneled USB3 devices
+ *
+ * @udev: Tunneled USB3 device connected to a roothub.
+ *
+ * Adds a device link between a tunneled USB3 device and the USB4 Host Interface
+ * device to ensure correct runtime PM suspend and resume order. This function
+ * should only be called for tunneled USB3 devices.
+ * The USB4 Host Interface this tunneled device depends on is found from the roothub
+ * port ACPI device specific data _DSD entry.
+ *
+ * Return: negative error code on failure, 0 otherwise
+ */
+static int usb_acpi_add_usb4_devlink(struct usb_device *udev)
+{
+ const struct device_link *link;
+ struct usb_port *port_dev;
+ struct usb_hub *hub;
+
+ if (!udev->parent || udev->parent->parent)
+ return 0;
+
+ hub = usb_hub_to_struct_hub(udev->parent);
+ port_dev = hub->ports[udev->portnum - 1];
+
+ struct fwnode_handle *nhi_fwnode __free(fwnode_handle) =
+ fwnode_find_reference(dev_fwnode(&port_dev->dev), "usb4-host-interface", 0);
+
+ if (IS_ERR(nhi_fwnode))
+ return 0;
+
+ link = device_link_add(&port_dev->child->dev, nhi_fwnode->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER |
+ DL_FLAG_RPM_ACTIVE |
+ DL_FLAG_PM_RUNTIME);
+ if (!link) {
+ dev_err(&port_dev->dev, "Failed to created device link from %s to %s\n",
+ dev_name(&port_dev->child->dev), dev_name(nhi_fwnode->dev));
+ return -EINVAL;
+ }
+
+ dev_dbg(&port_dev->dev, "Created device link from %s to %s\n",
+ dev_name(&port_dev->child->dev), dev_name(nhi_fwnode->dev));
+
+ return 0;
+}
+
/*
* Private to usb-acpi, all the core needs to know is that
* port_dev->location is non-zero when it has been set by the firmware.
@@ -262,6 +309,12 @@ usb_acpi_find_companion_for_device(struct usb_device *udev)
if (!hub)
return NULL;
+
+ /* Tunneled USB3 devices depend on USB4 Host Interface, set device link to it */
+ if (udev->speed >= USB_SPEED_SUPER &&
+ udev->tunnel_mode != USB_LINK_NATIVE)
+ usb_acpi_add_usb4_devlink(udev);
+
/*
* This is an embedded USB device connected to a port and such
* devices share port's ACPI companion.
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index 7c82ab590401..3116ac72747f 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -702,6 +702,7 @@ static int params_show(struct seq_file *seq, void *v)
print_param(seq, p, uframe_sched);
print_param(seq, p, external_id_pin_ctl);
print_param(seq, p, power_down);
+ print_param(seq, p, no_clock_gating);
print_param(seq, p, lpm);
print_param(seq, p, lpm_clock_gating);
print_param(seq, p, besl);
diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c
index a8605b02115b..1ad8fa3f862a 100644
--- a/drivers/usb/dwc2/drd.c
+++ b/drivers/usb/dwc2/drd.c
@@ -127,6 +127,15 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
role = USB_ROLE_DEVICE;
}
+ if ((IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) ||
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)) &&
+ dwc2_is_device_mode(hsotg) &&
+ hsotg->lx_state == DWC2_L2 &&
+ hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
+ hsotg->bus_suspended &&
+ !hsotg->params.no_clock_gating)
+ dwc2_gadget_exit_clock_gating(hsotg, 0);
+
if (role == USB_ROLE_HOST) {
already = dwc2_ovr_avalid(hsotg, true);
} else if (role == USB_ROLE_DEVICE) {
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index a937eadbc9b3..68226defdc60 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -23,6 +23,7 @@ static void dwc2_set_bcm_params(struct dwc2_hsotg *hsotg)
p->max_transfer_size = 65535;
p->max_packet_count = 511;
p->ahbcfg = 0x10;
+ p->no_clock_gating = true;
}
static void dwc2_set_his_params(struct dwc2_hsotg *hsotg)
@@ -352,6 +353,7 @@ const struct of_device_id dwc2_of_match_table[] = {
MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
const struct acpi_device_id dwc2_acpi_match[] = {
+ /* This ID refers to the same USB IP as of_device_id brcm,bcm2835-usb */
{ "BCM2848", (kernel_ulong_t)dwc2_set_bcm_params },
{ },
};
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 7b84416dfc2b..c1b7209b9483 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -469,18 +469,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
spin_lock_init(&hsotg->lock);
- hsotg->irq = platform_get_irq(dev, 0);
- if (hsotg->irq < 0)
- return hsotg->irq;
-
- dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
- hsotg->irq);
- retval = devm_request_irq(hsotg->dev, hsotg->irq,
- dwc2_handle_common_intr, IRQF_SHARED,
- dev_name(hsotg->dev), hsotg);
- if (retval)
- return retval;
-
hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus");
if (IS_ERR(hsotg->vbus_supply)) {
retval = PTR_ERR(hsotg->vbus_supply);
@@ -524,6 +512,20 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (retval)
goto error;
+ hsotg->irq = platform_get_irq(dev, 0);
+ if (hsotg->irq < 0) {
+ retval = hsotg->irq;
+ goto error;
+ }
+
+ dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
+ hsotg->irq);
+ retval = devm_request_irq(hsotg->dev, hsotg->irq,
+ dwc2_handle_common_intr, IRQF_SHARED,
+ dev_name(hsotg->dev), hsotg);
+ if (retval)
+ goto error;
+
/*
* For OTG cores, set the force mode bits to reflect the value
* of dr_mode. Force mode bits should not be touched at any
diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
index 8ee448068503..64c0cd1995aa 100644
--- a/drivers/usb/dwc3/dwc3-imx8mp.c
+++ b/drivers/usb/dwc3/dwc3-imx8mp.c
@@ -5,6 +5,7 @@
* Copyright (c) 2020 NXP.
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -96,7 +97,8 @@ static void imx8mp_configure_glue(struct dwc3_imx8mp *dwc3_imx)
writel(value, dwc3_imx->glue_base + USB_CTRL1);
}
-static void dwc3_imx8mp_wakeup_enable(struct dwc3_imx8mp *dwc3_imx)
+static void dwc3_imx8mp_wakeup_enable(struct dwc3_imx8mp *dwc3_imx,
+ pm_message_t msg)
{
struct dwc3 *dwc3 = platform_get_drvdata(dwc3_imx->dwc3);
u32 val;
@@ -106,12 +108,14 @@ static void dwc3_imx8mp_wakeup_enable(struct dwc3_imx8mp *dwc3_imx)
val = readl(dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL);
- if ((dwc3->current_dr_role == DWC3_GCTL_PRTCAP_HOST) && dwc3->xhci)
- val |= USB_WAKEUP_EN | USB_WAKEUP_SS_CONN |
- USB_WAKEUP_U3_EN | USB_WAKEUP_DPDM_EN;
- else if (dwc3->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE)
+ if ((dwc3->current_dr_role == DWC3_GCTL_PRTCAP_HOST) && dwc3->xhci) {
+ val |= USB_WAKEUP_EN | USB_WAKEUP_DPDM_EN;
+ if (PMSG_IS_AUTO(msg))
+ val |= USB_WAKEUP_SS_CONN | USB_WAKEUP_U3_EN;
+ } else {
val |= USB_WAKEUP_EN | USB_WAKEUP_VBUS_EN |
USB_WAKEUP_VBUS_SRC_SESS_VAL;
+ }
writel(val, dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL);
}
@@ -144,10 +148,21 @@ static irqreturn_t dwc3_imx8mp_interrupt(int irq, void *_dwc3_imx)
return IRQ_HANDLED;
}
+static int dwc3_imx8mp_set_software_node(struct device *dev)
+{
+ struct property_entry props[3] = { 0 };
+ int prop_idx = 0;
+
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-missing-cas-quirk");
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-skip-phy-init-quirk");
+
+ return device_create_managed_software_node(dev, props, NULL);
+}
+
static int dwc3_imx8mp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *dwc3_np, *node = dev->of_node;
+ struct device_node *node = dev->of_node;
struct dwc3_imx8mp *dwc3_imx;
struct resource *res;
int err, irq;
@@ -178,39 +193,26 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
return PTR_ERR(dwc3_imx->glue_base);
}
- dwc3_imx->hsio_clk = devm_clk_get(dev, "hsio");
- if (IS_ERR(dwc3_imx->hsio_clk)) {
- err = PTR_ERR(dwc3_imx->hsio_clk);
- dev_err(dev, "Failed to get hsio clk, err=%d\n", err);
- return err;
- }
-
- err = clk_prepare_enable(dwc3_imx->hsio_clk);
- if (err) {
- dev_err(dev, "Failed to enable hsio clk, err=%d\n", err);
- return err;
- }
-
- dwc3_imx->suspend_clk = devm_clk_get(dev, "suspend");
- if (IS_ERR(dwc3_imx->suspend_clk)) {
- err = PTR_ERR(dwc3_imx->suspend_clk);
- dev_err(dev, "Failed to get suspend clk, err=%d\n", err);
- goto disable_hsio_clk;
- }
+ dwc3_imx->hsio_clk = devm_clk_get_enabled(dev, "hsio");
+ if (IS_ERR(dwc3_imx->hsio_clk))
+ return dev_err_probe(dev, PTR_ERR(dwc3_imx->hsio_clk),
+ "Failed to get hsio clk\n");
- err = clk_prepare_enable(dwc3_imx->suspend_clk);
- if (err) {
- dev_err(dev, "Failed to enable suspend clk, err=%d\n", err);
- goto disable_hsio_clk;
- }
+ dwc3_imx->suspend_clk = devm_clk_get_enabled(dev, "suspend");
+ if (IS_ERR(dwc3_imx->suspend_clk))
+ return dev_err_probe(dev, PTR_ERR(dwc3_imx->suspend_clk),
+ "Failed to get suspend clk\n");
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = irq;
- goto disable_clks;
- }
+ if (irq < 0)
+ return irq;
dwc3_imx->irq = irq;
+ struct device_node *dwc3_np __free(device_node) = of_get_compatible_child(node,
+ "snps,dwc3");
+ if (!dwc3_np)
+ return dev_err_probe(dev, -ENODEV, "failed to find dwc3 core child\n");
+
imx8mp_configure_glue(dwc3_imx);
pm_runtime_set_active(dev);
@@ -219,17 +221,17 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
if (err < 0)
goto disable_rpm;
- dwc3_np = of_get_compatible_child(node, "snps,dwc3");
- if (!dwc3_np) {
+ err = dwc3_imx8mp_set_software_node(dev);
+ if (err) {
err = -ENODEV;
- dev_err(dev, "failed to find dwc3 core child\n");
+ dev_err(dev, "failed to create software node\n");
goto disable_rpm;
}
err = of_platform_populate(node, NULL, NULL, dev);
if (err) {
dev_err(&pdev->dev, "failed to create dwc3 core\n");
- goto err_node_put;
+ goto disable_rpm;
}
dwc3_imx->dwc3 = of_find_device_by_node(dwc3_np);
@@ -238,7 +240,6 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
err = -ENODEV;
goto depopulate;
}
- of_node_put(dwc3_np);
err = devm_request_threaded_irq(dev, irq, NULL, dwc3_imx8mp_interrupt,
IRQF_ONESHOT, dev_name(dev), dwc3_imx);
@@ -254,51 +255,39 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
depopulate:
of_platform_depopulate(dev);
-err_node_put:
- of_node_put(dwc3_np);
disable_rpm:
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
-disable_clks:
- clk_disable_unprepare(dwc3_imx->suspend_clk);
-disable_hsio_clk:
- clk_disable_unprepare(dwc3_imx->hsio_clk);
return err;
}
static void dwc3_imx8mp_remove(struct platform_device *pdev)
{
- struct dwc3_imx8mp *dwc3_imx = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
pm_runtime_get_sync(dev);
of_platform_depopulate(dev);
- clk_disable_unprepare(dwc3_imx->suspend_clk);
- clk_disable_unprepare(dwc3_imx->hsio_clk);
-
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
}
-static int __maybe_unused dwc3_imx8mp_suspend(struct dwc3_imx8mp *dwc3_imx,
- pm_message_t msg)
+static int dwc3_imx8mp_suspend(struct dwc3_imx8mp *dwc3_imx, pm_message_t msg)
{
if (dwc3_imx->pm_suspended)
return 0;
/* Wakeup enable */
if (PMSG_IS_AUTO(msg) || device_may_wakeup(dwc3_imx->dev))
- dwc3_imx8mp_wakeup_enable(dwc3_imx);
+ dwc3_imx8mp_wakeup_enable(dwc3_imx, msg);
dwc3_imx->pm_suspended = true;
return 0;
}
-static int __maybe_unused dwc3_imx8mp_resume(struct dwc3_imx8mp *dwc3_imx,
- pm_message_t msg)
+static int dwc3_imx8mp_resume(struct dwc3_imx8mp *dwc3_imx, pm_message_t msg)
{
struct dwc3 *dwc = platform_get_drvdata(dwc3_imx->dwc3);
int ret = 0;
@@ -331,7 +320,7 @@ static int __maybe_unused dwc3_imx8mp_resume(struct dwc3_imx8mp *dwc3_imx,
return ret;
}
-static int __maybe_unused dwc3_imx8mp_pm_suspend(struct device *dev)
+static int dwc3_imx8mp_pm_suspend(struct device *dev)
{
struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
int ret;
@@ -349,7 +338,7 @@ static int __maybe_unused dwc3_imx8mp_pm_suspend(struct device *dev)
return ret;
}
-static int __maybe_unused dwc3_imx8mp_pm_resume(struct device *dev)
+static int dwc3_imx8mp_pm_resume(struct device *dev)
{
struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
int ret;
@@ -379,7 +368,7 @@ static int __maybe_unused dwc3_imx8mp_pm_resume(struct device *dev)
return ret;
}
-static int __maybe_unused dwc3_imx8mp_runtime_suspend(struct device *dev)
+static int dwc3_imx8mp_runtime_suspend(struct device *dev)
{
struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
@@ -388,7 +377,7 @@ static int __maybe_unused dwc3_imx8mp_runtime_suspend(struct device *dev)
return dwc3_imx8mp_suspend(dwc3_imx, PMSG_AUTO_SUSPEND);
}
-static int __maybe_unused dwc3_imx8mp_runtime_resume(struct device *dev)
+static int dwc3_imx8mp_runtime_resume(struct device *dev)
{
struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
@@ -398,9 +387,9 @@ static int __maybe_unused dwc3_imx8mp_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops dwc3_imx8mp_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dwc3_imx8mp_pm_suspend, dwc3_imx8mp_pm_resume)
- SET_RUNTIME_PM_OPS(dwc3_imx8mp_runtime_suspend,
- dwc3_imx8mp_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(dwc3_imx8mp_pm_suspend, dwc3_imx8mp_pm_resume)
+ RUNTIME_PM_OPS(dwc3_imx8mp_runtime_suspend, dwc3_imx8mp_runtime_resume,
+ NULL)
};
static const struct of_device_id dwc3_imx8mp_of_match[] = {
@@ -414,7 +403,7 @@ static struct platform_driver dwc3_imx8mp_driver = {
.remove_new = dwc3_imx8mp_remove,
.driver = {
.name = "imx8mp-dwc3",
- .pm = &dwc3_imx8mp_dev_pm_ops,
+ .pm = pm_ptr(&dwc3_imx8mp_dev_pm_ops),
.of_match_table = dwc3_imx8mp_of_match,
},
};
diff --git a/drivers/usb/dwc3/dwc3-octeon.c b/drivers/usb/dwc3/dwc3-octeon.c
index 6010135e1acc..1a3b205367fd 100644
--- a/drivers/usb/dwc3/dwc3-octeon.c
+++ b/drivers/usb/dwc3/dwc3-octeon.c
@@ -419,7 +419,7 @@ static int dwc3_octeon_probe(struct platform_device *pdev)
int ref_clk_sel, ref_clk_fsel, mpll_mul;
int power_active_low, power_gpio;
int err, len;
- u32 clock_rate;
+ u32 clock_rate, gpio_pwr[3];
if (of_property_read_u32(node, "refclk-frequency", &clock_rate)) {
dev_err(dev, "No UCTL \"refclk-frequency\"\n");
@@ -476,21 +476,10 @@ static int dwc3_octeon_probe(struct platform_device *pdev)
power_gpio = DWC3_GPIO_POWER_NONE;
power_active_low = 0;
- if (of_find_property(node, "power", &len)) {
- u32 gpio_pwr[3];
-
- switch (len) {
- case 8:
- of_property_read_u32_array(node, "power", gpio_pwr, 2);
- break;
- case 12:
- of_property_read_u32_array(node, "power", gpio_pwr, 3);
+ len = of_property_read_variable_u32_array(node, "power", gpio_pwr, 2, 3);
+ if (len > 0) {
+ if (len == 3)
power_active_low = gpio_pwr[2] & 0x01;
- break;
- default:
- dev_err(dev, "invalid power configuration\n");
- return -EINVAL;
- }
power_gpio = gpio_pwr[1];
}
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 88fb6706a18d..c1d4b52f25b0 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -4,6 +4,7 @@
* Inspired by dwc3-of-simple.c
*/
+#include <linux/cleanup.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/clk.h>
@@ -702,11 +703,12 @@ static int dwc3_qcom_clk_init(struct dwc3_qcom *qcom, int count)
static int dwc3_qcom_of_register_core(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
- struct device_node *np = pdev->dev.of_node, *dwc3_np;
+ struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
int ret;
- dwc3_np = of_get_compatible_child(np, "snps,dwc3");
+ struct device_node *dwc3_np __free(device_node) = of_get_compatible_child(np,
+ "snps,dwc3");
if (!dwc3_np) {
dev_err(dev, "failed to find dwc3 core child\n");
return -ENODEV;
@@ -715,7 +717,7 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to register dwc3 core - %d\n", ret);
- goto node_put;
+ return ret;
}
qcom->dwc3 = of_find_device_by_node(dwc3_np);
@@ -725,9 +727,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
of_platform_depopulate(dev);
}
-node_put:
- of_node_put(dwc3_np);
-
return ret;
}
@@ -736,7 +735,6 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct dwc3_qcom *qcom;
- struct resource *res;
int ret, i;
bool ignore_pipe_clk;
bool wakeup_source;
@@ -774,9 +772,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
goto reset_assert;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- qcom->qscratch_base = devm_ioremap_resource(dev, res);
+ qcom->qscratch_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(qcom->qscratch_base)) {
ret = PTR_ERR(qcom->qscratch_base);
goto clk_disable;
diff --git a/drivers/usb/dwc3/dwc3-rtk.c b/drivers/usb/dwc3/dwc3-rtk.c
index 3cd6b184551c..e9c8b032c72c 100644
--- a/drivers/usb/dwc3/dwc3-rtk.c
+++ b/drivers/usb/dwc3/dwc3-rtk.c
@@ -6,6 +6,7 @@
*
*/
+#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -173,23 +174,20 @@ static const char *const speed_names[] = {
static enum usb_device_speed __get_dwc3_maximum_speed(struct device_node *np)
{
- struct device_node *dwc3_np;
const char *maximum_speed;
int ret;
- dwc3_np = of_get_compatible_child(np, "snps,dwc3");
+ struct device_node *dwc3_np __free(device_node) = of_get_compatible_child(np,
+ "snps,dwc3");
if (!dwc3_np)
return USB_SPEED_UNKNOWN;
ret = of_property_read_string(dwc3_np, "maximum-speed", &maximum_speed);
if (ret < 0)
- goto out;
+ return USB_SPEED_UNKNOWN;
ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed);
-out:
- of_node_put(dwc3_np);
-
return (ret < 0) ? USB_SPEED_UNKNOWN : ret;
}
@@ -276,7 +274,6 @@ static int dwc3_rtk_probe_dwc3_core(struct dwc3_rtk *rtk)
struct device_node *node = dev->of_node;
struct platform_device *dwc3_pdev;
struct device *dwc3_dev;
- struct device_node *dwc3_node;
enum usb_dr_mode dr_mode;
int ret = 0;
@@ -290,7 +287,8 @@ static int dwc3_rtk_probe_dwc3_core(struct dwc3_rtk *rtk)
return ret;
}
- dwc3_node = of_get_compatible_child(node, "snps,dwc3");
+ struct device_node *dwc3_node __free(device_node) = of_get_compatible_child(node,
+ "snps,dwc3");
if (!dwc3_node) {
dev_err(dev, "failed to find dwc3 core node\n");
ret = -ENODEV;
@@ -301,7 +299,7 @@ static int dwc3_rtk_probe_dwc3_core(struct dwc3_rtk *rtk)
if (!dwc3_pdev) {
dev_err(dev, "failed to find dwc3 core platform_device\n");
ret = -ENODEV;
- goto err_node_put;
+ goto depopulate;
}
dwc3_dev = &dwc3_pdev->dev;
@@ -343,14 +341,11 @@ static int dwc3_rtk_probe_dwc3_core(struct dwc3_rtk *rtk)
switch_usb2_role(rtk, rtk->cur_role);
platform_device_put(dwc3_pdev);
- of_node_put(dwc3_node);
return 0;
err_pdev_put:
platform_device_put(dwc3_pdev);
-err_node_put:
- of_node_put(dwc3_node);
depopulate:
of_platform_depopulate(dev);
@@ -363,30 +358,18 @@ static int dwc3_rtk_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
void __iomem *regs;
- int ret = 0;
rtk = devm_kzalloc(dev, sizeof(*rtk), GFP_KERNEL);
- if (!rtk) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!rtk)
+ return -ENOMEM;
platform_set_drvdata(pdev, rtk);
rtk->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "missing memory resource\n");
- ret = -ENODEV;
- goto out;
- }
-
- regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(regs)) {
- ret = PTR_ERR(regs);
- goto out;
- }
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
rtk->regs = regs;
rtk->regs_size = resource_size(res);
@@ -394,16 +377,11 @@ static int dwc3_rtk_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
rtk->pm_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(rtk->pm_base)) {
- ret = PTR_ERR(rtk->pm_base);
- goto out;
- }
+ if (IS_ERR(rtk->pm_base))
+ return PTR_ERR(rtk->pm_base);
}
- ret = dwc3_rtk_probe_dwc3_core(rtk);
-
-out:
- return ret;
+ return dwc3_rtk_probe_dwc3_core(rtk);
}
static void dwc3_rtk_remove(struct platform_device *pdev)
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index c8c7cd0c1796..2841021f3557 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -14,6 +14,7 @@
* Inspired by dwc3-omap.c and dwc3-exynos.c.
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -197,7 +198,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
struct st_dwc3 *dwc3_data;
struct resource *res;
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node, *child;
+ struct device_node *node = dev->of_node;
struct platform_device *child_pdev;
struct regmap *regmap;
int ret;
@@ -224,15 +225,21 @@ static int st_dwc3_probe(struct platform_device *pdev)
dwc3_data->syscfg_reg_off = res->start;
- dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
+ dev_vdbg(dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
+ struct device_node *child __free(device_node) = of_get_compatible_child(node,
+ "snps,dwc3");
+ if (!child) {
+ dev_err(dev, "failed to find dwc3 core node\n");
+ return -ENODEV;
+ }
+
dwc3_data->rstc_pwrdn =
devm_reset_control_get_exclusive(dev, "powerdown");
- if (IS_ERR(dwc3_data->rstc_pwrdn)) {
- dev_err(&pdev->dev, "could not get power controller\n");
- return PTR_ERR(dwc3_data->rstc_pwrdn);
- }
+ if (IS_ERR(dwc3_data->rstc_pwrdn))
+ return dev_err_probe(dev, PTR_ERR(dwc3_data->rstc_pwrdn),
+ "could not get power controller\n");
/* Manage PowerDown */
reset_control_deassert(dwc3_data->rstc_pwrdn);
@@ -240,26 +247,19 @@ static int st_dwc3_probe(struct platform_device *pdev)
dwc3_data->rstc_rst =
devm_reset_control_get_shared(dev, "softreset");
if (IS_ERR(dwc3_data->rstc_rst)) {
- dev_err(&pdev->dev, "could not get reset controller\n");
- ret = PTR_ERR(dwc3_data->rstc_rst);
+ ret = dev_err_probe(dev, PTR_ERR(dwc3_data->rstc_rst),
+ "could not get reset controller\n");
goto undo_powerdown;
}
/* Manage SoftReset */
reset_control_deassert(dwc3_data->rstc_rst);
- child = of_get_compatible_child(node, "snps,dwc3");
- if (!child) {
- dev_err(&pdev->dev, "failed to find dwc3 core node\n");
- ret = -ENODEV;
- goto err_node_put;
- }
-
/* Allocate and initialize the core */
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to add dwc3 core\n");
- goto err_node_put;
+ goto undo_softreset;
}
child_pdev = of_find_device_by_node(child);
@@ -270,7 +270,6 @@ static int st_dwc3_probe(struct platform_device *pdev)
}
dwc3_data->dr_mode = usb_get_dr_mode(&child_pdev->dev);
- of_node_put(child);
platform_device_put(child_pdev);
/*
@@ -282,8 +281,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
ret = st_dwc3_drd_init(dwc3_data);
if (ret) {
dev_err(dev, "drd initialisation failed\n");
- of_platform_depopulate(dev);
- goto undo_softreset;
+ goto depopulate;
}
/* ST glue logic init */
@@ -294,8 +292,6 @@ static int st_dwc3_probe(struct platform_device *pdev)
depopulate:
of_platform_depopulate(dev);
-err_node_put:
- of_node_put(child);
undo_softreset:
reset_control_assert(dwc3_data->rstc_rst);
undo_powerdown:
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index f1298b1b4f84..b5e5be424ce9 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -285,11 +285,8 @@ static int dwc3_xlnx_probe(struct platform_device *pdev)
return -ENOMEM;
regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(regs)) {
- ret = PTR_ERR(regs);
- dev_err_probe(dev, ret, "failed to map registers\n");
- return ret;
- }
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs), "failed to map registers\n");
match = of_match_node(dwc3_xlnx_of_match, pdev->dev.of_node);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 0e7c1e947c0a..c82a6a0fba93 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -6,13 +6,13 @@
#include <linux/kstrtox.h>
#include <linux/nls.h>
#include <linux/usb/composite.h>
+#include <linux/usb/func_utils.h>
#include <linux/usb/gadget_configfs.h>
#include <linux/usb/webusb.h>
#include "configfs.h"
-#include "u_f.h"
#include "u_os_desc.h"
-int check_user_usb_string(const char *name,
+static int check_user_usb_string(const char *name,
struct usb_gadget_strings *stringtab_dev)
{
u16 num;
@@ -902,7 +902,7 @@ static struct configfs_group_operations gadget_language_langid_group_ops = {
.drop_item = gadget_language_string_drop,
};
-static struct config_item_type gadget_language_type = {
+static const struct config_item_type gadget_language_type = {
.ct_item_ops = &gadget_language_langid_item_ops,
.ct_group_ops = &gadget_language_langid_group_ops,
.ct_attrs = gadget_language_langid_attrs,
@@ -961,7 +961,7 @@ static struct configfs_group_operations gadget_language_group_ops = {
.drop_item = &gadget_language_drop,
};
-static struct config_item_type gadget_language_strings_type = {
+static const struct config_item_type gadget_language_strings_type = {
.ct_group_ops = &gadget_language_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -1106,7 +1106,7 @@ static struct configfs_attribute *webusb_attrs[] = {
NULL,
};
-static struct config_item_type webusb_type = {
+static const struct config_item_type webusb_type = {
.ct_attrs = webusb_attrs,
.ct_owner = THIS_MODULE,
};
@@ -1263,7 +1263,7 @@ static struct configfs_item_operations os_desc_ops = {
.drop_link = os_desc_unlink,
};
-static struct config_item_type os_desc_type = {
+static const struct config_item_type os_desc_type = {
.ct_item_ops = &os_desc_ops,
.ct_attrs = os_desc_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 724b2631f249..7061720b9732 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -41,6 +41,7 @@ struct f_acm {
struct gserial port;
u8 ctrl_id, data_id;
u8 port_num;
+ u8 bInterfaceProtocol;
u8 pending;
@@ -89,7 +90,7 @@ acm_iad_descriptor = {
.bInterfaceCount = 2, // control + data
.bFunctionClass = USB_CLASS_COMM,
.bFunctionSubClass = USB_CDC_SUBCLASS_ACM,
- .bFunctionProtocol = USB_CDC_ACM_PROTO_AT_V25TER,
+ /* .bFunctionProtocol = DYNAMIC */
/* .iFunction = DYNAMIC */
};
@@ -101,7 +102,7 @@ static struct usb_interface_descriptor acm_control_interface_desc = {
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_ACM,
- .bInterfaceProtocol = USB_CDC_ACM_PROTO_AT_V25TER,
+ /* .bInterfaceProtocol = DYNAMIC */
/* .iInterface = DYNAMIC */
};
@@ -663,6 +664,9 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
acm->notify = ep;
+ acm_iad_descriptor.bFunctionProtocol = acm->bInterfaceProtocol;
+ acm_control_interface_desc.bInterfaceProtocol = acm->bInterfaceProtocol;
+
/* allocate notification */
acm->notify_req = gs_alloc_req(ep,
sizeof(struct usb_cdc_notification) + 2,
@@ -719,8 +723,14 @@ static void acm_unbind(struct usb_configuration *c, struct usb_function *f)
static void acm_free_func(struct usb_function *f)
{
struct f_acm *acm = func_to_acm(f);
+ struct f_serial_opts *opts;
+
+ opts = container_of(f->fi, struct f_serial_opts, func_inst);
kfree(acm);
+ mutex_lock(&opts->lock);
+ opts->instances--;
+ mutex_unlock(&opts->lock);
}
static void acm_resume(struct usb_function *f)
@@ -761,7 +771,11 @@ static struct usb_function *acm_alloc_func(struct usb_function_instance *fi)
acm->port.func.disable = acm_disable;
opts = container_of(fi, struct f_serial_opts, func_inst);
+ mutex_lock(&opts->lock);
acm->port_num = opts->port_num;
+ acm->bInterfaceProtocol = opts->protocol;
+ opts->instances++;
+ mutex_unlock(&opts->lock);
acm->port.func.unbind = acm_unbind;
acm->port.func.free_func = acm_free_func;
acm->port.func.resume = acm_resume;
@@ -812,11 +826,42 @@ static ssize_t f_acm_port_num_show(struct config_item *item, char *page)
CONFIGFS_ATTR_RO(f_acm_, port_num);
+static ssize_t f_acm_protocol_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%u\n", to_f_serial_opts(item)->protocol);
+}
+
+static ssize_t f_acm_protocol_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct f_serial_opts *opts = to_f_serial_opts(item);
+ int ret;
+
+ mutex_lock(&opts->lock);
+
+ if (opts->instances) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = kstrtou8(page, 0, &opts->protocol);
+ if (ret)
+ goto out;
+ ret = count;
+
+out:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+CONFIGFS_ATTR(f_acm_, protocol);
+
static struct configfs_attribute *acm_attrs[] = {
#ifdef CONFIG_U_SERIAL_CONSOLE
&f_acm_attr_console,
#endif
&f_acm_attr_port_num,
+ &f_acm_attr_protocol,
NULL,
};
@@ -832,6 +877,7 @@ static void acm_free_instance(struct usb_function_instance *fi)
opts = container_of(fi, struct f_serial_opts, func_inst);
gserial_free_line(opts->port_num);
+ mutex_destroy(&opts->lock);
kfree(opts);
}
@@ -843,7 +889,9 @@ static struct usb_function_instance *acm_alloc_instance(void)
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
+ opts->protocol = USB_CDC_ACM_PROTO_AT_V25TER;
opts->func_inst.free_func_inst = acm_free_instance;
+ mutex_init(&opts->lock);
ret = gserial_alloc_line(&opts->port_num);
if (ret) {
kfree(opts);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index e0ceaa721949..c626bb73ea59 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -33,6 +33,7 @@
#include <linux/usb/ccid.h>
#include <linux/usb/composite.h>
#include <linux/usb/functionfs.h>
+#include <linux/usb/func_utils.h>
#include <linux/aio.h>
#include <linux/kthread.h>
@@ -40,7 +41,6 @@
#include <linux/eventfd.h>
#include "u_fs.h"
-#include "u_f.h"
#include "u_os_desc.h"
#include "configfs.h"
@@ -722,7 +722,6 @@ static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait)
}
static const struct file_operations ffs_ep0_operations = {
- .llseek = no_llseek,
.open = ffs_ep0_open,
.write = ffs_ep0_write,
@@ -1830,7 +1829,6 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
}
static const struct file_operations ffs_epfile_operations = {
- .llseek = no_llseek,
.open = ffs_epfile_open,
.write_iter = ffs_epfile_write_iter,
@@ -2478,7 +2476,7 @@ typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
static int __must_check ffs_do_single_desc(char *data, unsigned len,
ffs_entity_callback entity,
- void *priv, int *current_class)
+ void *priv, int *current_class, int *current_subclass)
{
struct usb_descriptor_header *_ds = (void *)data;
u8 length;
@@ -2535,6 +2533,7 @@ static int __must_check ffs_do_single_desc(char *data, unsigned len,
if (ds->iInterface)
__entity(STRING, ds->iInterface);
*current_class = ds->bInterfaceClass;
+ *current_subclass = ds->bInterfaceSubClass;
}
break;
@@ -2559,6 +2558,12 @@ static int __must_check ffs_do_single_desc(char *data, unsigned len,
if (length != sizeof(struct ccid_descriptor))
goto inv_length;
break;
+ } else if (*current_class == USB_CLASS_APP_SPEC &&
+ *current_subclass == USB_SUBCLASS_DFU) {
+ pr_vdebug("dfu functional descriptor\n");
+ if (length != sizeof(struct usb_dfu_functional_descriptor))
+ goto inv_length;
+ break;
} else {
pr_vdebug("unknown descriptor: %d for class %d\n",
_ds->bDescriptorType, *current_class);
@@ -2621,6 +2626,7 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
const unsigned _len = len;
unsigned long num = 0;
int current_class = -1;
+ int current_subclass = -1;
for (;;) {
int ret;
@@ -2640,7 +2646,7 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
return _len - len;
ret = ffs_do_single_desc(data, len, entity, priv,
- &current_class);
+ &current_class, &current_subclass);
if (ret < 0) {
pr_debug("%s returns %d\n", __func__, ret);
return ret;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 93dae017ae45..740311c4fa24 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -15,13 +15,21 @@
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/usb/func_utils.h>
#include <linux/usb/g_hid.h>
+#include <uapi/linux/usb/g_hid.h>
-#include "u_f.h"
#include "u_hid.h"
#define HIDG_MINORS 4
+/*
+ * Most operating systems seem to allow for 5000ms timeout, we will allow
+ * userspace half that time to respond before we return an empty report.
+ */
+#define GET_REPORT_TIMEOUT_MS 2500
+
static int major, minors;
static const struct class hidg_class = {
@@ -31,6 +39,11 @@ static const struct class hidg_class = {
static DEFINE_IDA(hidg_ida);
static DEFINE_MUTEX(hidg_ida_lock); /* protects access to hidg_ida */
+struct report_entry {
+ struct usb_hidg_report report_data;
+ struct list_head node;
+};
+
/*-------------------------------------------------------------------------*/
/* HID gadget struct */
@@ -75,6 +88,19 @@ struct f_hidg {
wait_queue_head_t write_queue;
struct usb_request *req;
+ /* get report */
+ struct usb_request *get_req;
+ struct usb_hidg_report get_report;
+ bool get_report_returned;
+ int get_report_req_report_id;
+ int get_report_req_report_length;
+ spinlock_t get_report_spinlock;
+ wait_queue_head_t get_queue; /* Waiting for userspace response */
+ wait_queue_head_t get_id_queue; /* Get ID came in */
+ struct work_struct work;
+ struct workqueue_struct *workqueue;
+ struct list_head report_list;
+
struct device dev;
struct cdev cdev;
struct usb_function func;
@@ -524,6 +550,174 @@ release_write_pending:
return status;
}
+static struct report_entry *f_hidg_search_for_report(struct f_hidg *hidg, u8 report_id)
+{
+ struct list_head *ptr;
+ struct report_entry *entry;
+
+ list_for_each(ptr, &hidg->report_list) {
+ entry = list_entry(ptr, struct report_entry, node);
+ if (entry->report_data.report_id == report_id)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static void get_report_workqueue_handler(struct work_struct *work)
+{
+ struct f_hidg *hidg = container_of(work, struct f_hidg, work);
+ struct usb_composite_dev *cdev = hidg->func.config->cdev;
+ struct usb_request *req;
+ struct report_entry *ptr;
+ unsigned long flags;
+
+ int status = 0;
+
+ spin_lock_irqsave(&hidg->get_report_spinlock, flags);
+ req = hidg->get_req;
+ if (!req) {
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ return;
+ }
+
+ req->zero = 0;
+ req->length = min_t(unsigned int, min_t(unsigned int, hidg->get_report_req_report_length,
+ hidg->report_length),
+ MAX_REPORT_LENGTH);
+
+ /* Check if there is a response available for immediate response */
+ ptr = f_hidg_search_for_report(hidg, hidg->get_report_req_report_id);
+ if (ptr && !ptr->report_data.userspace_req) {
+ /* Report exists in list and it is to be used for immediate response */
+ req->buf = ptr->report_data.data;
+ status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ hidg->get_report_returned = true;
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ } else {
+ /*
+ * Report does not exist in list or should not be immediately sent
+ * i.e. give userspace time to respond
+ */
+ hidg->get_report_returned = false;
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ wake_up(&hidg->get_id_queue);
+#define GET_REPORT_COND (!hidg->get_report_returned)
+ /* Wait until userspace has responded or timeout */
+ status = wait_event_interruptible_timeout(hidg->get_queue, !GET_REPORT_COND,
+ msecs_to_jiffies(GET_REPORT_TIMEOUT_MS));
+ spin_lock_irqsave(&hidg->get_report_spinlock, flags);
+ req = hidg->get_req;
+ if (!req) {
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ return;
+ }
+ if (status == 0 && !hidg->get_report_returned) {
+ /* GET_REPORT request was not serviced by userspace within timeout period */
+ VDBG(cdev, "get_report : userspace timeout.\n");
+ hidg->get_report_returned = true;
+ }
+
+ /* Search again for report ID in list and respond to GET_REPORT request */
+ ptr = f_hidg_search_for_report(hidg, hidg->get_report_req_report_id);
+ if (ptr) {
+ /*
+ * Either get an updated response just serviced by userspace
+ * or send the latest response in the list
+ */
+ req->buf = ptr->report_data.data;
+ } else {
+ /* If there are no prevoiusly sent reports send empty report */
+ req->buf = hidg->get_report.data;
+ memset(req->buf, 0x0, req->length);
+ }
+
+ status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ }
+
+ if (status < 0)
+ VDBG(cdev, "usb_ep_queue error on ep0 responding to GET_REPORT\n");
+}
+
+static int f_hidg_get_report_id(struct file *file, __u8 __user *buffer)
+{
+ struct f_hidg *hidg = file->private_data;
+ int ret = 0;
+
+ ret = put_user(hidg->get_report_req_report_id, buffer);
+
+ return ret;
+}
+
+static int f_hidg_get_report(struct file *file, struct usb_hidg_report __user *buffer)
+{
+ struct f_hidg *hidg = file->private_data;
+ struct usb_composite_dev *cdev = hidg->func.config->cdev;
+ unsigned long flags;
+ struct report_entry *entry;
+ struct report_entry *ptr;
+ __u8 report_id;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ if (copy_from_user(&entry->report_data, buffer,
+ sizeof(struct usb_hidg_report))) {
+ ERROR(cdev, "copy_from_user error\n");
+ kfree(entry);
+ return -EINVAL;
+ }
+
+ report_id = entry->report_data.report_id;
+
+ spin_lock_irqsave(&hidg->get_report_spinlock, flags);
+ ptr = f_hidg_search_for_report(hidg, report_id);
+
+ if (ptr) {
+ /* Report already exists in list - update it */
+ if (copy_from_user(&ptr->report_data, buffer,
+ sizeof(struct usb_hidg_report))) {
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ ERROR(cdev, "copy_from_user error\n");
+ kfree(entry);
+ return -EINVAL;
+ }
+ kfree(entry);
+ } else {
+ /* Report does not exist in list - add it */
+ list_add_tail(&entry->node, &hidg->report_list);
+ }
+
+ /* If there is no response pending then do nothing further */
+ if (hidg->get_report_returned) {
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ return 0;
+ }
+
+ /* If this userspace response serves the current pending report */
+ if (hidg->get_report_req_report_id == report_id) {
+ hidg->get_report_returned = true;
+ wake_up(&hidg->get_queue);
+ }
+
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ return 0;
+}
+
+static long f_hidg_ioctl(struct file *file, unsigned int code, unsigned long arg)
+{
+ switch (code) {
+ case GADGET_HID_READ_GET_REPORT_ID:
+ return f_hidg_get_report_id(file, (__u8 __user *)arg);
+ case GADGET_HID_WRITE_GET_REPORT:
+ return f_hidg_get_report(file, (struct usb_hidg_report __user *)arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
{
struct f_hidg *hidg = file->private_data;
@@ -531,6 +725,8 @@ static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
poll_wait(file, &hidg->read_queue, wait);
poll_wait(file, &hidg->write_queue, wait);
+ poll_wait(file, &hidg->get_queue, wait);
+ poll_wait(file, &hidg->get_id_queue, wait);
if (WRITE_COND)
ret |= EPOLLOUT | EPOLLWRNORM;
@@ -543,12 +739,16 @@ static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
ret |= EPOLLIN | EPOLLRDNORM;
}
+ if (GET_REPORT_COND)
+ ret |= EPOLLPRI;
+
return ret;
}
#undef WRITE_COND
#undef READ_COND_SSREPORT
#undef READ_COND_INTOUT
+#undef GET_REPORT_COND
static int f_hidg_release(struct inode *inode, struct file *fd)
{
@@ -641,6 +841,10 @@ static void hidg_ssreport_complete(struct usb_ep *ep, struct usb_request *req)
wake_up(&hidg->read_queue);
}
+static void hidg_get_report_complete(struct usb_ep *ep, struct usb_request *req)
+{
+}
+
static int hidg_setup(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
@@ -649,6 +853,7 @@ static int hidg_setup(struct usb_function *f,
struct usb_request *req = cdev->req;
int status = 0;
__u16 value, length;
+ unsigned long flags;
value = __le16_to_cpu(ctrl->wValue);
length = __le16_to_cpu(ctrl->wLength);
@@ -660,14 +865,20 @@ static int hidg_setup(struct usb_function *f,
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_GET_REPORT):
- VDBG(cdev, "get_report\n");
+ VDBG(cdev, "get_report | wLength=%d\n", ctrl->wLength);
- /* send an empty report */
- length = min_t(unsigned, length, hidg->report_length);
- memset(req->buf, 0x0, length);
+ /*
+ * Update GET_REPORT ID so that an ioctl can be used to determine what
+ * GET_REPORT the request was actually for.
+ */
+ spin_lock_irqsave(&hidg->get_report_spinlock, flags);
+ hidg->get_report_req_report_id = value & 0xff;
+ hidg->get_report_req_report_length = length;
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
- goto respond;
- break;
+ queue_work(hidg->workqueue, &hidg->work);
+
+ return status;
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_GET_PROTOCOL):
@@ -793,6 +1004,14 @@ static void hidg_disable(struct usb_function *f)
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
}
+ spin_lock_irqsave(&hidg->get_report_spinlock, flags);
+ if (!hidg->get_report_returned) {
+ usb_ep_free_request(f->config->cdev->gadget->ep0, hidg->get_req);
+ hidg->get_req = NULL;
+ hidg->get_report_returned = true;
+ }
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+
spin_lock_irqsave(&hidg->write_spinlock, flags);
if (!hidg->write_pending) {
free_ep_req(hidg->in_ep, hidg->req);
@@ -902,6 +1121,14 @@ fail:
return status;
}
+#ifdef CONFIG_COMPAT
+static long f_hidg_compat_ioctl(struct file *file, unsigned int code,
+ unsigned long value)
+{
+ return f_hidg_ioctl(file, code, value);
+}
+#endif
+
static const struct file_operations f_hidg_fops = {
.owner = THIS_MODULE,
.open = f_hidg_open,
@@ -909,6 +1136,10 @@ static const struct file_operations f_hidg_fops = {
.write = f_hidg_write,
.read = f_hidg_read,
.poll = f_hidg_poll,
+ .unlocked_ioctl = f_hidg_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = f_hidg_compat_ioctl,
+#endif
.llseek = noop_llseek,
};
@@ -919,6 +1150,15 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_string *us;
int status;
+ hidg->get_req = usb_ep_alloc_request(c->cdev->gadget->ep0, GFP_ATOMIC);
+ if (!hidg->get_req)
+ return -ENOMEM;
+
+ hidg->get_req->zero = 0;
+ hidg->get_req->complete = hidg_get_report_complete;
+ hidg->get_req->context = hidg;
+ hidg->get_report_returned = true;
+
/* maybe allocate device-global string IDs, and patch descriptors */
us = usb_gstrings_attach(c->cdev, ct_func_strings,
ARRAY_SIZE(ct_func_string_defs));
@@ -1004,9 +1244,24 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
hidg->write_pending = 1;
hidg->req = NULL;
spin_lock_init(&hidg->read_spinlock);
+ spin_lock_init(&hidg->get_report_spinlock);
init_waitqueue_head(&hidg->write_queue);
init_waitqueue_head(&hidg->read_queue);
+ init_waitqueue_head(&hidg->get_queue);
+ init_waitqueue_head(&hidg->get_id_queue);
INIT_LIST_HEAD(&hidg->completed_out_req);
+ INIT_LIST_HEAD(&hidg->report_list);
+
+ INIT_WORK(&hidg->work, get_report_workqueue_handler);
+ hidg->workqueue = alloc_workqueue("report_work",
+ WQ_FREEZABLE |
+ WQ_MEM_RECLAIM,
+ 1);
+
+ if (!hidg->workqueue) {
+ status = -ENOMEM;
+ goto fail;
+ }
/* create char device */
cdev_init(&hidg->cdev, &f_hidg_fops);
@@ -1016,12 +1271,16 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
return 0;
fail_free_descs:
+ destroy_workqueue(hidg->workqueue);
usb_free_all_descriptors(f);
fail:
ERROR(f->config->cdev, "hidg_bind FAILED\n");
if (hidg->req != NULL)
free_ep_req(hidg->in_ep, hidg->req);
+ usb_ep_free_request(c->cdev->gadget->ep0, hidg->get_req);
+ hidg->get_req = NULL;
+
return status;
}
@@ -1256,7 +1515,7 @@ static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
struct f_hidg *hidg = func_to_hidg(f);
cdev_device_del(&hidg->cdev, &hidg->dev);
-
+ destroy_workqueue(hidg->workqueue);
usb_free_all_descriptors(f);
}
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index 979b028edb99..49b009a7d5d7 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -14,9 +14,9 @@
#include <linux/module.h>
#include <linux/err.h>
#include <linux/usb/composite.h>
+#include <linux/usb/func_utils.h>
#include "g_zero.h"
-#include "u_f.h"
/*
* LOOPBACK FUNCTION ... a testing vehicle for USB peripherals,
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index cfd712fd7452..e11d8c0edf06 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -3050,7 +3050,7 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
if (!common->thread_task) {
common->state = FSG_STATE_NORMAL;
common->thread_task =
- kthread_create(fsg_main_thread, common, "file-storage");
+ kthread_run(fsg_main_thread, common, "file-storage");
if (IS_ERR(common->thread_task)) {
ret = PTR_ERR(common->thread_task);
common->thread_task = NULL;
@@ -3059,7 +3059,6 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
}
DBG(common, "I/O thread pid: %d\n",
task_pid_nr(common->thread_task));
- wake_up_process(common->thread_task);
}
fsg->gadget = gadget;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 67052a664e74..1067847cc079 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -30,11 +30,11 @@
#include <sound/rawmidi.h>
#include <linux/usb/ch9.h>
+#include <linux/usb/func_utils.h>
#include <linux/usb/gadget.h>
#include <linux/usb/audio.h>
#include <linux/usb/midi.h>
-#include "u_f.h"
#include "u_midi.h"
MODULE_AUTHOR("Ben Williamson");
diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c
index 3f63253ad3e0..8285df9ed6fd 100644
--- a/drivers/usb/gadget/function/f_midi2.c
+++ b/drivers/usb/gadget/function/f_midi2.c
@@ -15,11 +15,11 @@
#include <sound/ump_convert.h>
#include <linux/usb/ch9.h>
+#include <linux/usb/func_utils.h>
#include <linux/usb/gadget.h>
#include <linux/usb/audio.h>
#include <linux/usb/midi-v2.h>
-#include "u_f.h"
#include "u_midi2.h"
struct f_midi2;
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 6f3702210450..ec5fd25020fd 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -13,10 +13,10 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/usb/composite.h>
+#include <linux/usb/func_utils.h>
#include <linux/err.h>
#include "g_zero.h"
-#include "u_f.h"
/*
* SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 2b9fb4daa806..c87e74afc881 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -377,24 +377,10 @@ enum {
STR_AS_OUT_IF_ALT1,
STR_AS_IN_IF_ALT0,
STR_AS_IN_IF_ALT1,
+ NUM_STR_DESCRIPTORS,
};
-static struct usb_string strings_uac1[] = {
- /* [STR_AC_IF].s = DYNAMIC, */
- [STR_USB_OUT_IT].s = "Playback Input terminal",
- [STR_USB_OUT_IT_CH_NAMES].s = "Playback Channels",
- [STR_IO_OUT_OT].s = "Playback Output terminal",
- [STR_IO_IN_IT].s = "Capture Input terminal",
- [STR_IO_IN_IT_CH_NAMES].s = "Capture Channels",
- [STR_USB_IN_OT].s = "Capture Output terminal",
- [STR_FU_IN].s = "Capture Volume",
- [STR_FU_OUT].s = "Playback Volume",
- [STR_AS_OUT_IF_ALT0].s = "Playback Inactive",
- [STR_AS_OUT_IF_ALT1].s = "Playback Active",
- [STR_AS_IN_IF_ALT0].s = "Capture Inactive",
- [STR_AS_IN_IF_ALT1].s = "Capture Active",
- { },
-};
+static struct usb_string strings_uac1[NUM_STR_DESCRIPTORS + 1] = {};
static struct usb_gadget_strings str_uac1 = {
.language = 0x0409, /* en-us */
@@ -1265,6 +1251,20 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
strings_uac1[STR_AC_IF].s = audio_opts->function_name;
+ strings_uac1[STR_USB_OUT_IT].s = audio_opts->c_it_name;
+ strings_uac1[STR_USB_OUT_IT_CH_NAMES].s = audio_opts->c_it_ch_name;
+ strings_uac1[STR_IO_OUT_OT].s = audio_opts->c_ot_name;
+ strings_uac1[STR_FU_OUT].s = audio_opts->c_fu_vol_name;
+ strings_uac1[STR_AS_OUT_IF_ALT0].s = "Playback Inactive";
+ strings_uac1[STR_AS_OUT_IF_ALT1].s = "Playback Active";
+
+ strings_uac1[STR_IO_IN_IT].s = audio_opts->p_it_name;
+ strings_uac1[STR_IO_IN_IT_CH_NAMES].s = audio_opts->p_it_ch_name;
+ strings_uac1[STR_USB_IN_OT].s = audio_opts->p_ot_name;
+ strings_uac1[STR_FU_IN].s = audio_opts->p_fu_vol_name;
+ strings_uac1[STR_AS_IN_IF_ALT0].s = "Capture Inactive";
+ strings_uac1[STR_AS_IN_IF_ALT1].s = "Capture Active";
+
us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
if (IS_ERR(us))
return PTR_ERR(us);
@@ -1681,8 +1681,19 @@ UAC1_ATTRIBUTE(bool, c_volume_present);
UAC1_ATTRIBUTE(s16, c_volume_min);
UAC1_ATTRIBUTE(s16, c_volume_max);
UAC1_ATTRIBUTE(s16, c_volume_res);
+
UAC1_ATTRIBUTE_STRING(function_name);
+UAC1_ATTRIBUTE_STRING(p_it_name);
+UAC1_ATTRIBUTE_STRING(p_it_ch_name);
+UAC1_ATTRIBUTE_STRING(p_ot_name);
+UAC1_ATTRIBUTE_STRING(p_fu_vol_name);
+
+UAC1_ATTRIBUTE_STRING(c_it_name);
+UAC1_ATTRIBUTE_STRING(c_it_ch_name);
+UAC1_ATTRIBUTE_STRING(c_ot_name);
+UAC1_ATTRIBUTE_STRING(c_fu_vol_name);
+
static struct configfs_attribute *f_uac1_attrs[] = {
&f_uac1_opts_attr_c_chmask,
&f_uac1_opts_attr_c_srate,
@@ -1706,6 +1717,16 @@ static struct configfs_attribute *f_uac1_attrs[] = {
&f_uac1_opts_attr_function_name,
+ &f_uac1_opts_attr_p_it_name,
+ &f_uac1_opts_attr_p_it_ch_name,
+ &f_uac1_opts_attr_p_ot_name,
+ &f_uac1_opts_attr_p_fu_vol_name,
+
+ &f_uac1_opts_attr_c_it_name,
+ &f_uac1_opts_attr_c_it_ch_name,
+ &f_uac1_opts_attr_c_ot_name,
+ &f_uac1_opts_attr_c_fu_vol_name,
+
NULL,
};
@@ -1760,6 +1781,16 @@ static struct usb_function_instance *f_audio_alloc_inst(void)
scnprintf(opts->function_name, sizeof(opts->function_name), "AC Interface");
+ scnprintf(opts->p_it_name, sizeof(opts->p_it_name), "Capture Input terminal");
+ scnprintf(opts->p_it_ch_name, sizeof(opts->p_it_ch_name), "Capture Channels");
+ scnprintf(opts->p_ot_name, sizeof(opts->p_ot_name), "Capture Output terminal");
+ scnprintf(opts->p_fu_vol_name, sizeof(opts->p_fu_vol_name), "Capture Volume");
+
+ scnprintf(opts->c_it_name, sizeof(opts->c_it_name), "Playback Input terminal");
+ scnprintf(opts->c_it_ch_name, sizeof(opts->c_it_ch_name), "Playback Channels");
+ scnprintf(opts->c_ot_name, sizeof(opts->c_ot_name), "Playback Output terminal");
+ scnprintf(opts->c_fu_vol_name, sizeof(opts->c_fu_vol_name), "Playback Volume");
+
return &opts->func_inst;
}
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 2d6d3286ffde..1cdda44455b3 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -95,7 +95,9 @@ enum {
STR_CLKSRC_IN,
STR_CLKSRC_OUT,
STR_USB_IT,
+ STR_USB_IT_CH,
STR_IO_IT,
+ STR_IO_IT_CH,
STR_USB_OT,
STR_IO_OT,
STR_FU_IN,
@@ -104,25 +106,10 @@ enum {
STR_AS_OUT_ALT1,
STR_AS_IN_ALT0,
STR_AS_IN_ALT1,
+ NUM_STR_DESCRIPTORS,
};
-static struct usb_string strings_fn[] = {
- /* [STR_ASSOC].s = DYNAMIC, */
- [STR_IF_CTRL].s = "Topology Control",
- [STR_CLKSRC_IN].s = "Input Clock",
- [STR_CLKSRC_OUT].s = "Output Clock",
- [STR_USB_IT].s = "USBH Out",
- [STR_IO_IT].s = "USBD Out",
- [STR_USB_OT].s = "USBH In",
- [STR_IO_OT].s = "USBD In",
- [STR_FU_IN].s = "Capture Volume",
- [STR_FU_OUT].s = "Playback Volume",
- [STR_AS_OUT_ALT0].s = "Playback Inactive",
- [STR_AS_OUT_ALT1].s = "Playback Active",
- [STR_AS_IN_ALT0].s = "Capture Inactive",
- [STR_AS_IN_ALT1].s = "Capture Active",
- { },
-};
+static struct usb_string strings_fn[NUM_STR_DESCRIPTORS + 1] = {};
static const char *const speed_names[] = {
[USB_SPEED_UNKNOWN] = "UNKNOWN",
@@ -1049,6 +1036,23 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
return ret;
strings_fn[STR_ASSOC].s = uac2_opts->function_name;
+ strings_fn[STR_IF_CTRL].s = uac2_opts->if_ctrl_name;
+ strings_fn[STR_CLKSRC_IN].s = uac2_opts->clksrc_in_name;
+ strings_fn[STR_CLKSRC_OUT].s = uac2_opts->clksrc_out_name;
+
+ strings_fn[STR_USB_IT].s = uac2_opts->c_it_name;
+ strings_fn[STR_USB_IT_CH].s = uac2_opts->c_it_ch_name;
+ strings_fn[STR_IO_OT].s = uac2_opts->c_ot_name;
+ strings_fn[STR_FU_OUT].s = uac2_opts->c_fu_vol_name;
+ strings_fn[STR_AS_OUT_ALT0].s = "Playback Inactive";
+ strings_fn[STR_AS_OUT_ALT1].s = "Playback Active";
+
+ strings_fn[STR_IO_IT].s = uac2_opts->p_it_name;
+ strings_fn[STR_IO_IT_CH].s = uac2_opts->p_it_ch_name;
+ strings_fn[STR_USB_OT].s = uac2_opts->p_ot_name;
+ strings_fn[STR_FU_IN].s = uac2_opts->p_fu_vol_name;
+ strings_fn[STR_AS_IN_ALT0].s = "Capture Inactive";
+ strings_fn[STR_AS_IN_ALT1].s = "Capture Active";
us = usb_gstrings_attach(cdev, fn_strings, ARRAY_SIZE(strings_fn));
if (IS_ERR(us))
@@ -1072,7 +1076,9 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
in_clk_src_desc.iClockSource = us[STR_CLKSRC_IN].id;
out_clk_src_desc.iClockSource = us[STR_CLKSRC_OUT].id;
usb_out_it_desc.iTerminal = us[STR_USB_IT].id;
+ usb_out_it_desc.iChannelNames = us[STR_USB_IT_CH].id;
io_in_it_desc.iTerminal = us[STR_IO_IT].id;
+ io_in_it_desc.iChannelNames = us[STR_IO_IT_CH].id;
usb_in_ot_desc.iTerminal = us[STR_USB_OT].id;
io_out_ot_desc.iTerminal = us[STR_IO_OT].id;
std_as_out_if0_desc.iInterface = us[STR_AS_OUT_ALT0].id;
@@ -2100,10 +2106,24 @@ UAC2_ATTRIBUTE(s16, c_volume_max);
UAC2_ATTRIBUTE(s16, c_volume_res);
UAC2_ATTRIBUTE(u32, fb_max);
UAC2_ATTRIBUTE_STRING(function_name);
+UAC2_ATTRIBUTE_STRING(if_ctrl_name);
+UAC2_ATTRIBUTE_STRING(clksrc_in_name);
+UAC2_ATTRIBUTE_STRING(clksrc_out_name);
+
+UAC2_ATTRIBUTE_STRING(p_it_name);
+UAC2_ATTRIBUTE_STRING(p_it_ch_name);
+UAC2_ATTRIBUTE_STRING(p_ot_name);
+UAC2_ATTRIBUTE_STRING(p_fu_vol_name);
+
+UAC2_ATTRIBUTE_STRING(c_it_name);
+UAC2_ATTRIBUTE_STRING(c_it_ch_name);
+UAC2_ATTRIBUTE_STRING(c_ot_name);
+UAC2_ATTRIBUTE_STRING(c_fu_vol_name);
UAC2_ATTRIBUTE(s16, p_terminal_type);
UAC2_ATTRIBUTE(s16, c_terminal_type);
+
static struct configfs_attribute *f_uac2_attrs[] = {
&f_uac2_opts_attr_p_chmask,
&f_uac2_opts_attr_p_srate,
@@ -2130,6 +2150,19 @@ static struct configfs_attribute *f_uac2_attrs[] = {
&f_uac2_opts_attr_c_volume_res,
&f_uac2_opts_attr_function_name,
+ &f_uac2_opts_attr_if_ctrl_name,
+ &f_uac2_opts_attr_clksrc_in_name,
+ &f_uac2_opts_attr_clksrc_out_name,
+
+ &f_uac2_opts_attr_p_it_name,
+ &f_uac2_opts_attr_p_it_ch_name,
+ &f_uac2_opts_attr_p_ot_name,
+ &f_uac2_opts_attr_p_fu_vol_name,
+
+ &f_uac2_opts_attr_c_it_name,
+ &f_uac2_opts_attr_c_it_ch_name,
+ &f_uac2_opts_attr_c_ot_name,
+ &f_uac2_opts_attr_c_fu_vol_name,
&f_uac2_opts_attr_p_terminal_type,
&f_uac2_opts_attr_c_terminal_type,
@@ -2191,6 +2224,19 @@ static struct usb_function_instance *afunc_alloc_inst(void)
opts->fb_max = FBACK_FAST_MAX;
scnprintf(opts->function_name, sizeof(opts->function_name), "Source/Sink");
+ scnprintf(opts->if_ctrl_name, sizeof(opts->if_ctrl_name), "Topology Control");
+ scnprintf(opts->clksrc_in_name, sizeof(opts->clksrc_in_name), "Input Clock");
+ scnprintf(opts->clksrc_out_name, sizeof(opts->clksrc_out_name), "Output Clock");
+
+ scnprintf(opts->p_it_name, sizeof(opts->p_it_name), "USBD Out");
+ scnprintf(opts->p_it_ch_name, sizeof(opts->p_it_ch_name), "Capture Channels");
+ scnprintf(opts->p_ot_name, sizeof(opts->p_ot_name), "USBH In");
+ scnprintf(opts->p_fu_vol_name, sizeof(opts->p_fu_vol_name), "Capture Volume");
+
+ scnprintf(opts->c_it_name, sizeof(opts->c_it_name), "USBH Out");
+ scnprintf(opts->c_it_ch_name, sizeof(opts->c_it_ch_name), "Playback Channels");
+ scnprintf(opts->c_ot_name, sizeof(opts->c_ot_name), "USBD In");
+ scnprintf(opts->c_fu_vol_name, sizeof(opts->c_fu_vol_name), "Playback Volume");
opts->p_terminal_type = UAC2_DEF_P_TERM_TYPE;
opts->c_terminal_type = UAC2_DEF_C_TERM_TYPE;
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 24299576972f..ca8dbec65f73 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -1140,35 +1140,35 @@ static int u_audio_rate_get(struct snd_kcontrol *kcontrol,
}
static struct snd_kcontrol_new u_audio_controls[] = {
- [UAC_FBACK_CTRL] {
+ [UAC_FBACK_CTRL] = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "Capture Pitch 1000000",
.info = u_audio_pitch_info,
.get = u_audio_pitch_get,
.put = u_audio_pitch_put,
},
- [UAC_P_PITCH_CTRL] {
+ [UAC_P_PITCH_CTRL] = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "Playback Pitch 1000000",
.info = u_audio_pitch_info,
.get = u_audio_pitch_get,
.put = u_audio_pitch_put,
},
- [UAC_MUTE_CTRL] {
+ [UAC_MUTE_CTRL] = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later */
.info = u_audio_mute_info,
.get = u_audio_mute_get,
.put = u_audio_mute_put,
},
- [UAC_VOLUME_CTRL] {
+ [UAC_VOLUME_CTRL] = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later */
.info = u_audio_volume_info,
.get = u_audio_volume_get,
.put = u_audio_volume_put,
},
- [UAC_RATE_CTRL] {
+ [UAC_RATE_CTRL] = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "", /* will be filled later */
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index b394105e55d6..0a8c05b2746b 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -28,6 +28,7 @@
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <linux/kfifo.h>
+#include <linux/serial.h>
#include "u_serial.h"
@@ -126,6 +127,7 @@ struct gs_port {
wait_queue_head_t close_wait;
bool suspended; /* port suspended */
bool start_delayed; /* delay start when suspended */
+ struct async_icount icount;
/* REVISIT this state ... */
struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
@@ -257,6 +259,7 @@ __acquires(&port->port_lock)
break;
}
do_tty_wake = true;
+ port->icount.tx += len;
req->length = len;
list_del(&req->list);
@@ -408,6 +411,7 @@ static void gs_rx_push(struct work_struct *work)
size -= n;
}
+ port->icount.rx += size;
count = tty_insert_flip_string(&port->port, packet,
size);
if (count)
@@ -851,6 +855,23 @@ static int gs_break_ctl(struct tty_struct *tty, int duration)
return status;
}
+static int gs_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
+{
+ struct gs_port *port = tty->driver_data;
+ struct async_icount cnow;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ cnow = port->icount;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ icount->rx = cnow.rx;
+ icount->tx = cnow.tx;
+
+ return 0;
+}
+
static const struct tty_operations gs_tty_ops = {
.open = gs_open,
.close = gs_close,
@@ -861,6 +882,7 @@ static const struct tty_operations gs_tty_ops = {
.chars_in_buffer = gs_chars_in_buffer,
.unthrottle = gs_unthrottle,
.break_ctl = gs_break_ctl,
+ .get_icount = gs_get_icount,
};
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h
index 901d99310bc4..e1274338ea61 100644
--- a/drivers/usb/gadget/function/u_serial.h
+++ b/drivers/usb/gadget/function/u_serial.h
@@ -17,6 +17,10 @@
struct f_serial_opts {
struct usb_function_instance func_inst;
u8 port_num;
+ u8 protocol;
+
+ struct mutex lock; /* protect instances */
+ int instances;
};
/*
diff --git a/drivers/usb/gadget/function/u_uac1.h b/drivers/usb/gadget/function/u_uac1.h
index f7a616760e31..feb6eb76462f 100644
--- a/drivers/usb/gadget/function/u_uac1.h
+++ b/drivers/usb/gadget/function/u_uac1.h
@@ -52,7 +52,17 @@ struct f_uac1_opts {
int req_number;
unsigned bound:1;
- char function_name[32];
+ char function_name[USB_MAX_STRING_LEN];
+
+ char p_it_name[USB_MAX_STRING_LEN];
+ char p_it_ch_name[USB_MAX_STRING_LEN];
+ char p_ot_name[USB_MAX_STRING_LEN];
+ char p_fu_vol_name[USB_MAX_STRING_LEN];
+
+ char c_it_name[USB_MAX_STRING_LEN];
+ char c_it_ch_name[USB_MAX_STRING_LEN];
+ char c_ot_name[USB_MAX_STRING_LEN];
+ char c_fu_vol_name[USB_MAX_STRING_LEN];
struct mutex lock;
int refcnt;
diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h
index 5e81bdd6c5fb..0df808289ded 100644
--- a/drivers/usb/gadget/function/u_uac2.h
+++ b/drivers/usb/gadget/function/u_uac2.h
@@ -68,7 +68,20 @@ struct f_uac2_opts {
int fb_max;
bool bound;
- char function_name[32];
+ char function_name[USB_MAX_STRING_LEN];
+ char if_ctrl_name[USB_MAX_STRING_LEN];
+ char clksrc_in_name[USB_MAX_STRING_LEN];
+ char clksrc_out_name[USB_MAX_STRING_LEN];
+
+ char p_it_name[USB_MAX_STRING_LEN];
+ char p_it_ch_name[USB_MAX_STRING_LEN];
+ char p_ot_name[USB_MAX_STRING_LEN];
+ char p_fu_vol_name[USB_MAX_STRING_LEN];
+
+ char c_it_name[USB_MAX_STRING_LEN];
+ char c_it_ch_name[USB_MAX_STRING_LEN];
+ char c_ot_name[USB_MAX_STRING_LEN];
+ char c_fu_vol_name[USB_MAX_STRING_LEN];
s16 p_terminal_type;
s16 c_terminal_type;
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index a024aecb76dc..de1736f834e6 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -121,6 +121,9 @@ static struct uvcg_format *find_format_by_pix(struct uvc_device *uvc,
list_for_each_entry(format, &uvc->header->formats, entry) {
const struct uvc_format_desc *fmtdesc = to_uvc_format(format->fmt);
+ if (IS_ERR(fmtdesc))
+ continue;
+
if (fmtdesc->fcc == pixelformat) {
uformat = format->fmt;
break;
@@ -240,6 +243,7 @@ uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt)
struct uvc_video *video = &uvc->video;
struct uvcg_format *uformat;
struct uvcg_frame *uframe;
+ const struct uvc_format_desc *fmtdesc;
u8 *fcc;
if (fmt->type != video->queue.queue.type)
@@ -277,7 +281,10 @@ uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt)
fmt->fmt.pix.height = uframe->frame.w_height;
fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(uformat, uframe);
fmt->fmt.pix.sizeimage = uvc_get_frame_size(uformat, uframe);
- fmt->fmt.pix.pixelformat = to_uvc_format(uformat)->fcc;
+ fmtdesc = to_uvc_format(uformat);
+ if (IS_ERR(fmtdesc))
+ return PTR_ERR(fmtdesc);
+ fmt->fmt.pix.pixelformat = fmtdesc->fcc;
}
fmt->fmt.pix.field = V4L2_FIELD_NONE;
fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
@@ -389,6 +396,9 @@ uvc_v4l2_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
return -EINVAL;
fmtdesc = to_uvc_format(uformat);
+ if (IS_ERR(fmtdesc))
+ return PTR_ERR(fmtdesc);
+
f->pixelformat = fmtdesc->fcc;
return 0;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 03179b1880fd..9c7381661016 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -705,7 +705,6 @@ static const struct file_operations ep_io_operations = {
.open = ep_open,
.release = ep_release,
- .llseek = no_llseek,
.unlocked_ioctl = ep_ioctl,
.read_iter = ep_read_iter,
.write_iter = ep_write_iter,
@@ -1939,7 +1938,6 @@ gadget_dev_open (struct inode *inode, struct file *fd)
}
static const struct file_operations ep0_operations = {
- .llseek = no_llseek,
.open = gadget_dev_open,
.read = ep0_read,
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index 399fca32a8ac..112fd18d8c99 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -1364,7 +1364,6 @@ static const struct file_operations raw_fops = {
.unlocked_ioctl = raw_ioctl,
.compat_ioctl = raw_ioctl,
.release = raw_release,
- .llseek = no_llseek,
};
static struct miscdevice raw_misc_device = {
diff --git a/drivers/usb/gadget/u_f.c b/drivers/usb/gadget/u_f.c
index 6aea1ecb3999..115d219c9c00 100644
--- a/drivers/usb/gadget/u_f.c
+++ b/drivers/usb/gadget/u_f.c
@@ -8,8 +8,8 @@
* Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
-#include "u_f.h"
#include <linux/usb/ch9.h>
+#include <linux/usb/func_utils.h>
struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len)
{
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index b76885d78e8a..4928eba19327 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -187,7 +187,6 @@ static int regs_dbg_release(struct inode *inode, struct file *file)
static const struct file_operations queue_dbg_fops = {
.owner = THIS_MODULE,
.open = queue_dbg_open,
- .llseek = no_llseek,
.read = queue_dbg_read,
.release = queue_dbg_release,
};
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 35a652807fca..5149e2b7f050 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -639,6 +639,7 @@ static const struct of_device_id bdc_of_match[] = {
{ .compatible = "brcm,bdc" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, bdc_of_match);
static struct platform_driver bdc_driver = {
.driver = {
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
index d394affb7072..62fce42ef2da 100644
--- a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
@@ -2033,8 +2033,8 @@ static void cdns2_quiesce(struct cdns2_device *pdev)
set_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_DISCON);
/* Disable interrupt. */
- writeb(0, &pdev->interrupt_regs->extien),
- writeb(0, &pdev->interrupt_regs->usbien),
+ writeb(0, &pdev->interrupt_regs->extien);
+ writeb(0, &pdev->interrupt_regs->usbien);
writew(0, &pdev->adma_regs->ep_ien);
/* Clear interrupt line. */
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-pci.c b/drivers/usb/gadget/udc/cdns2/cdns2-pci.c
index 50c3d0974d9b..b1a8f772467c 100644
--- a/drivers/usb/gadget/udc/cdns2/cdns2-pci.c
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-pci.c
@@ -15,8 +15,7 @@
#include "cdns2-gadget.h"
#define PCI_DRIVER_NAME "cdns-pci-usbhs"
-#define CDNS_VENDOR_ID 0x17cd
-#define CDNS_DEVICE_ID 0x0120
+#define PCI_DEVICE_ID_CDNS_USB2 0x0120
#define PCI_BAR_DEV 0
#define PCI_DEV_FN_DEVICE 0
@@ -114,8 +113,8 @@ static const struct dev_pm_ops cdns2_pci_pm_ops = {
};
static const struct pci_device_id cdns2_pci_ids[] = {
- { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
+ { PCI_DEVICE(PCI_VENDOR_ID_CDNS, PCI_DEVICE_ID_CDNS_USB2),
+ .class = PCI_CLASS_SERIAL_USB_DEVICE },
{ 0, }
};
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index f37b0d8386c1..ff7bee78bcc4 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1304,7 +1304,8 @@ static int dummy_urb_enqueue(
/* kick the scheduler, it'll do the rest */
if (!hrtimer_active(&dum_hcd->timer))
- hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), HRTIMER_MODE_REL);
+ hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS),
+ HRTIMER_MODE_REL_SOFT);
done:
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
@@ -1325,7 +1326,7 @@ static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (!rc && dum_hcd->rh_state != DUMMY_RH_RUNNING &&
!list_empty(&dum_hcd->urbp_list))
- hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL);
+ hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL_SOFT);
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return rc;
@@ -1995,7 +1996,8 @@ return_urb:
dum_hcd->udev = NULL;
} else if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
/* want a 1 msec delay here */
- hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), HRTIMER_MODE_REL);
+ hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS),
+ HRTIMER_MODE_REL_SOFT);
}
spin_unlock_irqrestore(&dum->lock, flags);
@@ -2389,7 +2391,7 @@ static int dummy_bus_resume(struct usb_hcd *hcd)
dum_hcd->rh_state = DUMMY_RH_RUNNING;
set_link_state(dum_hcd);
if (!list_empty(&dum_hcd->urbp_list))
- hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL);
+ hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL_SOFT);
hcd->state = HC_STATE_RUNNING;
}
spin_unlock_irq(&dum_hcd->dum->lock);
@@ -2467,7 +2469,7 @@ static DEVICE_ATTR_RO(urbs);
static int dummy_start_ss(struct dummy_hcd *dum_hcd)
{
- hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
dum_hcd->timer.function = dummy_timer;
dum_hcd->rh_state = DUMMY_RH_RUNNING;
dum_hcd->stream_en_ep = 0;
@@ -2497,7 +2499,7 @@ static int dummy_start(struct usb_hcd *hcd)
return dummy_start_ss(dum_hcd);
spin_lock_init(&dum_hcd->dum->lock);
- hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
dum_hcd->timer.function = dummy_timer;
dum_hcd->rh_state = DUMMY_RH_RUNNING;
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index d5f29f8fe481..3bfd889ed56a 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -1487,31 +1487,29 @@ static int udc_ep0_out_req(struct lpc32xx_udc *udc)
req = list_entry(ep0->queue.next, struct lpc32xx_request,
queue);
- if (req) {
- if (req->req.length == 0) {
- /* Just dequeue request */
- done(ep0, req, 0);
- udc->ep0state = WAIT_FOR_SETUP;
- return 1;
- }
+ if (req->req.length == 0) {
+ /* Just dequeue request */
+ done(ep0, req, 0);
+ udc->ep0state = WAIT_FOR_SETUP;
+ return 1;
+ }
- /* Get data from FIFO */
- bufferspace = req->req.length - req->req.actual;
- if (bufferspace > ep0->ep.maxpacket)
- bufferspace = ep0->ep.maxpacket;
+ /* Get data from FIFO */
+ bufferspace = req->req.length - req->req.actual;
+ if (bufferspace > ep0->ep.maxpacket)
+ bufferspace = ep0->ep.maxpacket;
- /* Copy data to buffer */
- prefetchw(req->req.buf + req->req.actual);
- tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual,
- bufferspace);
- req->req.actual += bufferspace;
+ /* Copy data to buffer */
+ prefetchw(req->req.buf + req->req.actual);
+ tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual,
+ bufferspace);
+ req->req.actual += bufferspace;
- if (tr < ep0->ep.maxpacket) {
- /* This is the last packet */
- done(ep0, req, 0);
- udc->ep0state = WAIT_FOR_SETUP;
- return 1;
- }
+ if (tr < ep0->ep.maxpacket) {
+ /* This is the last packet */
+ done(ep0, req, 0);
+ udc->ep0state = WAIT_FOR_SETUP;
+ return 1;
}
return 0;
@@ -1962,18 +1960,17 @@ static void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
/* If there isn't a request waiting, something went wrong */
req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
- if (req) {
- done(ep, req, 0);
- /* Start another request if ready */
- if (!list_empty(&ep->queue)) {
- if (ep->is_in)
- udc_ep_in_req_dma(udc, ep);
- else
- udc_ep_out_req_dma(udc, ep);
- } else
- ep->req_pending = 0;
- }
+ done(ep, req, 0);
+
+ /* Start another request if ready */
+ if (!list_empty(&ep->queue)) {
+ if (ep->is_in)
+ udc_ep_in_req_dma(udc, ep);
+ else
+ udc_ep_out_req_dma(udc, ep);
+ } else
+ ep->req_pending = 0;
}
@@ -1989,10 +1986,6 @@ static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
#endif
req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
- if (!req) {
- ep_err(ep, "DMA interrupt on no req!\n");
- return;
- }
dd = req->dd_desc_ptr;
/* DMA descriptor should always be retired for this call */
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 74590f93ea61..ebc45565c33e 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -947,7 +947,7 @@ static int xudc_ep_disable(struct usb_ep *_ep)
ep->desc = NULL;
ep->ep_usb.desc = NULL;
- dev_dbg(udc->dev, "USB Ep %d disable\n ", ep->epnumber);
+ dev_dbg(udc->dev, "USB Ep %d disable\n", ep->epnumber);
/* Disable the endpoint.*/
epcfg = udc->read_fn(udc->addr + ep->offset);
epcfg &= ~XUSB_EP_CFG_VALID_MASK;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 4448d0ab06f0..d011d6c753ed 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -40,11 +40,11 @@ config USB_XHCI_DBGCAP
config USB_XHCI_PCI
tristate
depends on USB_PCI
- depends on USB_XHCI_PCI_RENESAS || !USB_XHCI_PCI_RENESAS
default y
config USB_XHCI_PCI_RENESAS
tristate "Support for additional Renesas xHCI controller with firmware"
+ depends on USB_XHCI_PCI
help
Say 'Y' to enable the support for the Renesas xHCI controller with
firmware. Make sure you have the firmware for the device and
diff --git a/drivers/usb/host/ehci-brcm.c b/drivers/usb/host/ehci-brcm.c
index 77e42c739c58..68cad0620f1a 100644
--- a/drivers/usb/host/ehci-brcm.c
+++ b/drivers/usb/host/ehci-brcm.c
@@ -246,6 +246,7 @@ static const struct of_device_id brcm_ehci_of_match[] = {
{ .compatible = "brcm,bcm7445-ehci", },
{}
};
+MODULE_DEVICE_TABLE(of, brcm_ehci_of_match);
static struct platform_driver ehci_brcm_driver = {
.probe = ehci_brcm_probe,
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index f40bc2a7a124..e3a961d3f5fc 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -48,7 +48,6 @@ struct exynos_ehci_hcd {
static int exynos_ehci_get_phy(struct device *dev,
struct exynos_ehci_hcd *exynos_ehci)
{
- struct device_node *child;
struct phy *phy;
int phy_number, num_phys;
int ret;
@@ -66,26 +65,22 @@ static int exynos_ehci_get_phy(struct device *dev,
return 0;
/* Get PHYs using legacy bindings */
- for_each_available_child_of_node(dev->of_node, child) {
+ for_each_available_child_of_node_scoped(dev->of_node, child) {
ret = of_property_read_u32(child, "reg", &phy_number);
if (ret) {
dev_err(dev, "Failed to parse device tree\n");
- of_node_put(child);
return ret;
}
if (phy_number >= PHY_NUMBER) {
dev_err(dev, "Invalid number of PHYs\n");
- of_node_put(child);
return -EINVAL;
}
phy = devm_of_phy_optional_get(dev, child, NULL);
exynos_ehci->phy[phy_number] = phy;
- if (IS_ERR(phy)) {
- of_node_put(child);
+ if (IS_ERR(phy))
return PTR_ERR(phy);
- }
}
exynos_ehci->legacy_phy = true;
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index bfa2eba4e3a7..1379e03644b2 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -37,7 +37,6 @@ struct exynos_ohci_hcd {
static int exynos_ohci_get_phy(struct device *dev,
struct exynos_ohci_hcd *exynos_ohci)
{
- struct device_node *child;
struct phy *phy;
int phy_number, num_phys;
int ret;
@@ -55,26 +54,22 @@ static int exynos_ohci_get_phy(struct device *dev,
return 0;
/* Get PHYs using legacy bindings */
- for_each_available_child_of_node(dev->of_node, child) {
+ for_each_available_child_of_node_scoped(dev->of_node, child) {
ret = of_property_read_u32(child, "reg", &phy_number);
if (ret) {
dev_err(dev, "Failed to parse device tree\n");
- of_node_put(child);
return ret;
}
if (phy_number >= PHY_NUMBER) {
dev_err(dev, "Invalid number of PHYs\n");
- of_node_put(child);
return -EINVAL;
}
phy = devm_of_phy_optional_get(dev, child, NULL);
exynos_ohci->phy[phy_number] = phy;
- if (IS_ERR(phy)) {
- of_node_put(child);
+ if (IS_ERR(phy))
return PTR_ERR(phy);
- }
}
exynos_ohci->legacy_phy = true;
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 8264c454f6bd..5b775e1ea527 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -51,8 +51,6 @@ static struct hc_driver __read_mostly ohci_nxp_hc_driver;
static struct i2c_client *isp1301_i2c_client;
-static struct clk *usb_host_clk;
-
static void isp1301_configure_lpc32xx(void)
{
/* LPC32XX only supports DAT_SE0 USB mode */
@@ -155,6 +153,7 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
struct resource *res;
int ret = 0, irq;
struct device_node *isp1301_node;
+ struct clk *usb_host_clk;
if (pdev->dev.of_node) {
isp1301_node = of_parse_phandle(pdev->dev.of_node,
@@ -180,26 +179,20 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
}
/* Enable USB host clock */
- usb_host_clk = devm_clk_get(&pdev->dev, NULL);
+ usb_host_clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(usb_host_clk)) {
- dev_err(&pdev->dev, "failed to acquire USB OHCI clock\n");
+ dev_err(&pdev->dev, "failed to acquire and start USB OHCI clock\n");
ret = PTR_ERR(usb_host_clk);
goto fail_disable;
}
- ret = clk_prepare_enable(usb_host_clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to start USB OHCI clock\n");
- goto fail_disable;
- }
-
isp1301_configure();
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
dev_err(&pdev->dev, "Failed to allocate HC buffer\n");
ret = -ENOMEM;
- goto fail_hcd;
+ goto fail_disable;
}
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
@@ -229,8 +222,6 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
ohci_nxp_stop_hc();
fail_resource:
usb_put_hcd(hcd);
-fail_hcd:
- clk_disable_unprepare(usb_host_clk);
fail_disable:
isp1301_i2c_client = NULL;
return ret;
@@ -243,7 +234,6 @@ static void ohci_hcd_nxp_remove(struct platform_device *pdev)
usb_remove_hcd(hcd);
ohci_nxp_stop_hc();
usb_put_hcd(hcd);
- clk_disable_unprepare(usb_host_clk);
isp1301_i2c_client = NULL;
}
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index f64bfe5f4d4d..a6be061f8653 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -204,10 +204,6 @@ static const struct of_device_id ohci_hcd_ppc_of_match[] = {
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_LE
{
.name = "usb",
- .compatible = "ohci-littledian",
- },
- {
- .name = "usb",
.compatible = "ohci-le",
},
#endif
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 9f4bf8c5f8a5..6576515a29cd 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -297,9 +297,9 @@ static void put_child_connect_map(struct r8a66597 *r8a66597, int address)
static void set_pipe_reg_addr(struct r8a66597_pipe *pipe, u8 dma_ch)
{
u16 pipenum = pipe->info.pipenum;
- const unsigned long fifoaddr[] = {D0FIFO, D1FIFO, CFIFO};
- const unsigned long fifosel[] = {D0FIFOSEL, D1FIFOSEL, CFIFOSEL};
- const unsigned long fifoctr[] = {D0FIFOCTR, D1FIFOCTR, CFIFOCTR};
+ static const unsigned long fifoaddr[] = {D0FIFO, D1FIFO, CFIFO};
+ static const unsigned long fifosel[] = {D0FIFOSEL, D1FIFOSEL, CFIFOSEL};
+ static const unsigned long fifoctr[] = {D0FIFOCTR, D1FIFOCTR, CFIFOCTR};
if (dma_ch > R8A66597_PIPE_NO_DMA) /* dma fifo not use? */
dma_ch = R8A66597_PIPE_NO_DMA;
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index 161c09953c4e..241d7aa1fbc2 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -173,16 +173,18 @@ static void xhci_dbc_giveback(struct dbc_request *req, int status)
spin_lock(&dbc->lock);
}
-static void xhci_dbc_flush_single_request(struct dbc_request *req)
+static void trb_to_noop(union xhci_trb *trb)
{
- union xhci_trb *trb = req->trb;
-
trb->generic.field[0] = 0;
trb->generic.field[1] = 0;
trb->generic.field[2] = 0;
trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
+}
+static void xhci_dbc_flush_single_request(struct dbc_request *req)
+{
+ trb_to_noop(req->trb);
xhci_dbc_giveback(req, -ESHUTDOWN);
}
@@ -649,7 +651,6 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc)
case DS_DISABLED:
return;
case DS_CONFIGURED:
- case DS_STALLED:
if (dbc->driver->disconnect)
dbc->driver->disconnect(dbc);
break;
@@ -670,6 +671,23 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc)
}
static void
+handle_ep_halt_changes(struct xhci_dbc *dbc, struct dbc_ep *dep, bool halted)
+{
+ if (halted) {
+ dev_info(dbc->dev, "DbC Endpoint halted\n");
+ dep->halted = 1;
+
+ } else if (dep->halted) {
+ dev_info(dbc->dev, "DbC Endpoint halt cleared\n");
+ dep->halted = 0;
+
+ if (!list_empty(&dep->list_pending))
+ writel(DBC_DOOR_BELL_TARGET(dep->direction),
+ &dbc->regs->doorbell);
+ }
+}
+
+static void
dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
{
u32 portsc;
@@ -697,6 +715,7 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
struct xhci_ring *ring;
int ep_id;
int status;
+ struct xhci_ep_ctx *ep_ctx;
u32 comp_code;
size_t remain_length;
struct dbc_request *req = NULL, *r;
@@ -706,8 +725,30 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
dep = (ep_id == EPID_OUT) ?
get_out_ep(dbc) : get_in_ep(dbc);
+ ep_ctx = (ep_id == EPID_OUT) ?
+ dbc_bulkout_ctx(dbc) : dbc_bulkin_ctx(dbc);
ring = dep->ring;
+ /* Match the pending request: */
+ list_for_each_entry(r, &dep->list_pending, list_pending) {
+ if (r->trb_dma == event->trans_event.buffer) {
+ req = r;
+ break;
+ }
+ if (r->status == -COMP_STALL_ERROR) {
+ dev_warn(dbc->dev, "Give back stale stalled req\n");
+ ring->num_trbs_free++;
+ xhci_dbc_giveback(r, 0);
+ }
+ }
+
+ if (!req) {
+ dev_warn(dbc->dev, "no matched request\n");
+ return;
+ }
+
+ trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
+
switch (comp_code) {
case COMP_SUCCESS:
remain_length = 0;
@@ -718,31 +759,49 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
case COMP_TRB_ERROR:
case COMP_BABBLE_DETECTED_ERROR:
case COMP_USB_TRANSACTION_ERROR:
- case COMP_STALL_ERROR:
dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
status = -comp_code;
break;
+ case COMP_STALL_ERROR:
+ dev_warn(dbc->dev, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n",
+ event->trans_event.buffer, remain_length, ep_ctx->deq);
+ status = 0;
+ dep->halted = 1;
+
+ /*
+ * xHC DbC may trigger a STALL bulk xfer event when host sends a
+ * ClearFeature(ENDPOINT_HALT) request even if there wasn't an
+ * active bulk transfer.
+ *
+ * Don't give back this transfer request as hardware will later
+ * start processing TRBs starting from this 'STALLED' TRB,
+ * causing TRBs and requests to be out of sync.
+ *
+ * If STALL event shows some bytes were transferred then assume
+ * it's an actual transfer issue and give back the request.
+ * In this case mark the TRB as No-Op to avoid hw from using the
+ * TRB again.
+ */
+
+ if ((ep_ctx->deq & ~TRB_CYCLE) == event->trans_event.buffer) {
+ dev_dbg(dbc->dev, "Ep stopped on Stalled TRB\n");
+ if (remain_length == req->length) {
+ dev_dbg(dbc->dev, "Spurious stall event, keep req\n");
+ req->status = -COMP_STALL_ERROR;
+ req->actual = 0;
+ return;
+ }
+ dev_dbg(dbc->dev, "Give back stalled req, but turn TRB to No-op\n");
+ trb_to_noop(req->trb);
+ }
+ break;
+
default:
dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
status = -comp_code;
break;
}
- /* Match the pending request: */
- list_for_each_entry(r, &dep->list_pending, list_pending) {
- if (r->trb_dma == event->trans_event.buffer) {
- req = r;
- break;
- }
- }
-
- if (!req) {
- dev_warn(dbc->dev, "no matched request\n");
- return;
- }
-
- trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
-
ring->num_trbs_free++;
req->actual = req->length - remain_length;
xhci_dbc_giveback(req, status);
@@ -762,7 +821,6 @@ static void inc_evt_deq(struct xhci_ring *ring)
static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
{
dma_addr_t deq;
- struct dbc_ep *dep;
union xhci_trb *evt;
u32 ctrl, portsc;
bool update_erdp = false;
@@ -814,43 +872,17 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
return EVT_DISC;
}
- /* Handle endpoint stall event: */
+ /* Check and handle changes in endpoint halt status */
ctrl = readl(&dbc->regs->control);
- if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
- (ctrl & DBC_CTRL_HALT_OUT_TR)) {
- dev_info(dbc->dev, "DbC Endpoint stall\n");
- dbc->state = DS_STALLED;
-
- if (ctrl & DBC_CTRL_HALT_IN_TR) {
- dep = get_in_ep(dbc);
- xhci_dbc_flush_endpoint_requests(dep);
- }
-
- if (ctrl & DBC_CTRL_HALT_OUT_TR) {
- dep = get_out_ep(dbc);
- xhci_dbc_flush_endpoint_requests(dep);
- }
-
- return EVT_DONE;
- }
+ handle_ep_halt_changes(dbc, get_in_ep(dbc), ctrl & DBC_CTRL_HALT_IN_TR);
+ handle_ep_halt_changes(dbc, get_out_ep(dbc), ctrl & DBC_CTRL_HALT_OUT_TR);
/* Clear DbC run change bit: */
if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
writel(ctrl, &dbc->regs->control);
ctrl = readl(&dbc->regs->control);
}
-
break;
- case DS_STALLED:
- ctrl = readl(&dbc->regs->control);
- if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
- !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
- (ctrl & DBC_CTRL_DBC_RUN)) {
- dbc->state = DS_CONFIGURED;
- break;
- }
-
- return EVT_DONE;
default:
dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
break;
@@ -939,7 +971,6 @@ static const char * const dbc_state_strings[DS_MAX] = {
[DS_ENABLED] = "enabled",
[DS_CONNECTED] = "connected",
[DS_CONFIGURED] = "configured",
- [DS_STALLED] = "stalled",
};
static ssize_t dbc_show(struct device *dev,
diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h
index 0118c6288a3c..8ec813b6e9fd 100644
--- a/drivers/usb/host/xhci-dbgcap.h
+++ b/drivers/usb/host/xhci-dbgcap.h
@@ -81,7 +81,6 @@ enum dbc_state {
DS_ENABLED,
DS_CONNECTED,
DS_CONFIGURED,
- DS_STALLED,
DS_MAX
};
@@ -90,6 +89,7 @@ struct dbc_ep {
struct list_head list_pending;
struct xhci_ring *ring;
unsigned int direction:1;
+ unsigned int halted:1;
};
#define DBC_QUEUE_SIZE 16
@@ -110,7 +110,6 @@ struct dbc_port {
struct tasklet_struct push;
struct list_head write_pool;
- struct kfifo write_fifo;
bool registered;
};
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index b74e98e94393..b8e78867e25a 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -24,19 +24,6 @@ static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
return dbc->priv;
}
-static unsigned int
-dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
-{
- unsigned int len;
-
- len = kfifo_len(&port->write_fifo);
- if (len < size)
- size = len;
- if (size != 0)
- size = kfifo_out(&port->write_fifo, packet, size);
- return size;
-}
-
static int dbc_start_tx(struct dbc_port *port)
__releases(&port->port_lock)
__acquires(&port->port_lock)
@@ -49,7 +36,7 @@ static int dbc_start_tx(struct dbc_port *port)
while (!list_empty(pool)) {
req = list_entry(pool->next, struct dbc_request, list_pool);
- len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
+ len = kfifo_out(&port->port.xmit_fifo, req->buf, DBC_MAX_PACKET);
if (len == 0)
break;
do_tty_wake = true;
@@ -216,7 +203,7 @@ static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf,
spin_lock_irqsave(&port->port_lock, flags);
if (count)
- count = kfifo_in(&port->write_fifo, buf, count);
+ count = kfifo_in(&port->port.xmit_fifo, buf, count);
dbc_start_tx(port);
spin_unlock_irqrestore(&port->port_lock, flags);
@@ -230,7 +217,7 @@ static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
int status;
spin_lock_irqsave(&port->port_lock, flags);
- status = kfifo_put(&port->write_fifo, ch);
+ status = kfifo_put(&port->port.xmit_fifo, ch);
spin_unlock_irqrestore(&port->port_lock, flags);
return status;
@@ -253,7 +240,7 @@ static unsigned int dbc_tty_write_room(struct tty_struct *tty)
unsigned int room;
spin_lock_irqsave(&port->port_lock, flags);
- room = kfifo_avail(&port->write_fifo);
+ room = kfifo_avail(&port->port.xmit_fifo);
spin_unlock_irqrestore(&port->port_lock, flags);
return room;
@@ -266,7 +253,7 @@ static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
unsigned int chars;
spin_lock_irqsave(&port->port_lock, flags);
- chars = kfifo_len(&port->write_fifo);
+ chars = kfifo_len(&port->port.xmit_fifo);
spin_unlock_irqrestore(&port->port_lock, flags);
return chars;
@@ -346,7 +333,7 @@ static void dbc_rx_push(struct tasklet_struct *t)
port->n_read = 0;
}
- list_move(&req->list_pool, &port->read_pool);
+ list_move_tail(&req->list_pool, &port->read_pool);
}
if (do_push)
@@ -424,7 +411,8 @@ static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
goto err_idr;
}
- ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
+ ret = kfifo_alloc(&port->port.xmit_fifo, DBC_WRITE_BUF_SIZE,
+ GFP_KERNEL);
if (ret)
goto err_exit_port;
@@ -453,7 +441,7 @@ err_free_requests:
xhci_dbc_free_requests(&port->read_pool);
xhci_dbc_free_requests(&port->write_pool);
err_free_fifo:
- kfifo_free(&port->write_fifo);
+ kfifo_free(&port->port.xmit_fifo);
err_exit_port:
idr_remove(&dbc_tty_minors, port->minor);
err_idr:
@@ -478,7 +466,7 @@ static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
idr_remove(&dbc_tty_minors, port->minor);
mutex_unlock(&dbc_tty_minors_lock);
- kfifo_free(&port->write_fifo);
+ kfifo_free(&port->port.xmit_fifo);
xhci_dbc_free_requests(&port->read_pool);
xhci_dbc_free_requests(&port->read_queue);
xhci_dbc_free_requests(&port->write_pool);
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index 96eb36a58738..67ecf7320c62 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -42,6 +42,7 @@
#define XHCI_EXT_CAPS_DEBUG 10
/* Vendor caps */
#define XHCI_EXT_CAPS_VENDOR_INTEL 192
+#define XHCI_EXT_CAPS_INTEL_SPR_SHADOW 206
/* USB Legacy Support Capability - section 7.1.1 */
#define XHCI_HC_BIOS_OWNED (1 << 16)
#define XHCI_HC_OS_OWNED (1 << 24)
@@ -64,6 +65,10 @@
#define XHCI_HLC (1 << 19)
#define XHCI_BLC (1 << 20)
+/* Intel SPR shadow capability */
+#define XHCI_INTEL_SPR_ESS_PORT_OFFSET 0x8ac4 /* SuperSpeed port control */
+#define XHCI_INTEL_SPR_TUNEN BIT(4) /* Tunnel mode enabled */
+
/* command register values to disable interrupts and halt the HC */
/* start/stop HC execution - do not write unless HC is halted*/
#define XHCI_CMD_RUN (1 << 0)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 61f083de6e19..d27c30ac17fd 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -752,6 +752,42 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci)
return xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
}
+/**
+ * xhci_port_is_tunneled() - Check if USB3 connection is tunneled over USB4
+ * @xhci: xhci host controller
+ * @port: USB3 port to be checked.
+ *
+ * Some hosts can detect if a USB3 connection is native USB3 or tunneled over
+ * USB4. Intel hosts expose this via vendor specific extended capability 206
+ * eSS PORT registers TUNEN (tunnel enabled) bit.
+ *
+ * A USB3 device must be connected to the port to detect the tunnel.
+ *
+ * Return: link tunnel mode enum, USB_LINK_UNKNOWN if host is incapable of
+ * detecting USB3 over USB4 tunnels. USB_LINK_NATIVE or USB_LINK_TUNNELED
+ * otherwise.
+ */
+enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci,
+ struct xhci_port *port)
+{
+ void __iomem *base;
+ u32 offset;
+
+ base = &xhci->cap_regs->hc_capbase;
+ offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_INTEL_SPR_SHADOW);
+
+ if (offset && offset <= XHCI_INTEL_SPR_ESS_PORT_OFFSET) {
+ offset = XHCI_INTEL_SPR_ESS_PORT_OFFSET + port->hcd_portnum * 0x20;
+
+ if (readl(base + offset) & XHCI_INTEL_SPR_TUNEN)
+ return USB_LINK_TUNNELED;
+ else
+ return USB_LINK_NATIVE;
+ }
+
+ return USB_LINK_UNKNOWN;
+}
+
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
u32 link_state)
{
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 937ce5fd5809..d2900197a49e 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2332,7 +2332,8 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
}
struct xhci_interrupter *
-xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs)
+xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
+ u32 imod_interval)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_interrupter *ir;
@@ -2365,6 +2366,11 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs)
return NULL;
}
+ err = xhci_set_interrupter_moderation(ir, imod_interval);
+ if (err)
+ xhci_warn(xhci, "Failed to set interrupter %d moderation to %uns\n",
+ i, imod_interval);
+
xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n",
i, xhci->max_interrupters);
diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
index 247cc7c2ce70..30cc5a1380a5 100644
--- a/drivers/usb/host/xhci-pci-renesas.c
+++ b/drivers/usb/host/xhci-pci-renesas.c
@@ -50,6 +50,8 @@
#define RENESAS_RETRY 10000
#define RENESAS_DELAY 10
+#define RENESAS_FW_NAME "renesas_usb_fw.mem"
+
static int renesas_fw_download_image(struct pci_dev *dev,
const u32 *fw, size_t step, bool rom)
{
@@ -573,12 +575,10 @@ exit:
return err;
}
-int renesas_xhci_check_request_fw(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int renesas_xhci_check_request_fw(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
- struct xhci_driver_data *driver_data =
- (struct xhci_driver_data *)id->driver_data;
- const char *fw_name = driver_data->firmware;
+ const char fw_name[] = RENESAS_FW_NAME;
const struct firmware *fw;
bool has_rom;
int err;
@@ -625,7 +625,41 @@ exit:
release_firmware(fw);
return err;
}
-EXPORT_SYMBOL_GPL(renesas_xhci_check_request_fw);
-MODULE_DESCRIPTION("Support for Renesas xHCI controller with firmware");
+static int
+xhci_pci_renesas_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int retval;
+
+ retval = renesas_xhci_check_request_fw(dev, id);
+ if (retval)
+ return retval;
+
+ return xhci_pci_common_probe(dev, id);
+}
+
+static const struct pci_device_id pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015) },
+ { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static struct pci_driver xhci_renesas_pci_driver = {
+ .name = "xhci-pci-renesas",
+ .id_table = pci_ids,
+
+ .probe = xhci_pci_renesas_probe,
+ .remove = xhci_pci_remove,
+
+ .shutdown = usb_hcd_pci_shutdown,
+ .driver = {
+ .pm = pm_ptr(&usb_hcd_pci_pm_ops),
+ },
+};
+module_pci_driver(xhci_renesas_pci_driver);
+
+MODULE_DESCRIPTION("Renesas xHCI PCI Host Controller Driver");
+MODULE_FIRMWARE(RENESAS_FW_NAME);
+MODULE_IMPORT_NS(xhci);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index dc1e345ab67e..91dccd25a551 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -55,6 +55,9 @@
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed
+#define PCI_VENDOR_ID_PHYTIUM 0x1db7
+#define PCI_DEVICE_ID_PHYTIUM_XHCI 0xdc27
+
/* Thunderbolt */
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI 0x15b5
@@ -78,6 +81,9 @@
#define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142
#define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242
+#define PCI_DEVICE_ID_CADENCE 0x17CD
+#define PCI_DEVICE_ID_CADENCE_SSP 0x0200
+
static const char hcd_name[] = "xhci_hcd";
static struct hc_driver __read_mostly xhci_pci_hc_driver;
@@ -93,6 +99,10 @@ static const struct xhci_driver_overrides xhci_pci_overrides __initconst = {
.update_hub_device = xhci_pci_update_hub_device,
};
+/*
+ * Primary Legacy and MSI IRQ are synced in suspend_common().
+ * All MSI-X IRQs and secondary MSI IRQs should be synced here.
+ */
static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
{
struct usb_hcd *hcd = xhci_to_hcd(xhci);
@@ -105,13 +115,12 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
}
}
-/* Free any IRQs and disable MSI-X */
+/* Legacy IRQ is freed by usb_remove_hcd() or usb_hcd_pci_shutdown() */
static void xhci_cleanup_msix(struct xhci_hcd *xhci)
{
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- /* return if using legacy interrupt */
if (hcd->irq > 0)
return;
@@ -235,15 +244,6 @@ static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct xhci_driver_data *driver_data;
- const struct pci_device_id *id;
-
- id = pci_match_id(to_pci_driver(pdev->dev.driver)->id_table, pdev);
-
- if (id && id->driver_data) {
- driver_data = (struct xhci_driver_data *)id->driver_data;
- xhci->quirks |= driver_data->quirks;
- }
/* Look for vendor-specific quirks */
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
@@ -416,6 +416,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
+ if (pdev->vendor == PCI_VENDOR_ID_PHYTIUM &&
+ pdev->device == PCI_DEVICE_ID_PHYTIUM_XHCI)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+
/* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */
if (pdev->vendor == PCI_VENDOR_ID_VIA &&
pdev->device == 0x3432)
@@ -473,6 +477,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
}
+ if (pdev->vendor == PCI_DEVICE_ID_CADENCE &&
+ pdev->device == PCI_DEVICE_ID_CADENCE_SSP)
+ xhci->quirks |= XHCI_CDNS_SCTX_QUIRK;
+
/* xHC spec requires PCI devices to support D3hot and D3cold */
if (xhci->hci_version >= 0x120)
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
@@ -534,10 +542,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
struct xhci_hcd *xhci;
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval;
+ u8 sbrn;
xhci = hcd_to_xhci(hcd);
- if (!xhci->sbrn)
- pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
/* imod_interval is the interrupt moderation value in nanoseconds. */
xhci->imod_interval = 40000;
@@ -552,7 +559,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_acpi_rtd3_enable(pdev);
- xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
+ pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &sbrn);
+ xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int)sbrn);
/* Find any debug ports */
return xhci_pci_reinit(xhci, pdev);
@@ -572,21 +580,13 @@ static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hd
* We need to register our own PCI probe function (instead of the USB core's
* function) in order to create a second roothub under xHCI.
*/
-static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int retval;
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
- struct xhci_driver_data *driver_data;
struct reset_control *reset;
- driver_data = (struct xhci_driver_data *)id->driver_data;
- if (driver_data && driver_data->quirks & XHCI_RENESAS_FW_QUIRK) {
- retval = renesas_xhci_check_request_fw(dev, id);
- if (retval)
- return retval;
- }
-
reset = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
if (IS_ERR(reset))
return PTR_ERR(reset);
@@ -651,12 +651,30 @@ put_runtime_pm:
pm_runtime_put_noidle(&dev->dev);
return retval;
}
+EXPORT_SYMBOL_NS_GPL(xhci_pci_common_probe, xhci);
-static void xhci_pci_remove(struct pci_dev *dev)
+static const struct pci_device_id pci_ids_reject[] = {
+ /* handled by xhci-pci-renesas */
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015) },
+ { /* end: all zeroes */ }
+};
+
+static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ if (pci_match_id(pci_ids_reject, dev))
+ return -ENODEV;
+
+ return xhci_pci_common_probe(dev, id);
+}
+
+void xhci_pci_remove(struct pci_dev *dev)
{
struct xhci_hcd *xhci;
+ bool set_power_d3;
xhci = hcd_to_xhci(pci_get_drvdata(dev));
+ set_power_d3 = xhci->quirks & XHCI_SPURIOUS_WAKEUP;
xhci->xhc_state |= XHCI_STATE_REMOVING;
@@ -669,12 +687,13 @@ static void xhci_pci_remove(struct pci_dev *dev)
xhci->shared_hcd = NULL;
}
+ usb_hcd_pci_remove(dev);
+
/* Workaround for spurious wakeups at shutdown with HSW */
- if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+ if (set_power_d3)
pci_set_power_state(dev, PCI_D3hot);
-
- usb_hcd_pci_remove(dev);
}
+EXPORT_SYMBOL_NS_GPL(xhci_pci_remove, xhci);
/*
* In some Intel xHCI controllers, in order to get D3 working,
@@ -783,7 +802,6 @@ static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- int retval = 0;
reset_control_reset(xhci->reset);
@@ -814,8 +832,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_quirk(hcd);
- retval = xhci_resume(xhci, msg);
- return retval;
+ return xhci_resume(xhci, msg);
}
static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup)
@@ -882,19 +899,8 @@ static void xhci_pci_shutdown(struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
-static const struct xhci_driver_data reneses_data = {
- .quirks = XHCI_RENESAS_FW_QUIRK,
- .firmware = "renesas_usb_fw.mem",
-};
-
/* PCI driver selection metadata; PCI hotplugging uses this */
static const struct pci_device_id pci_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014),
- .driver_data = (unsigned long)&reneses_data,
- },
- { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015),
- .driver_data = (unsigned long)&reneses_data,
- },
/* handle any USB 3.0 xHCI controller */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
},
@@ -902,14 +908,6 @@ static const struct pci_device_id pci_ids[] = {
};
MODULE_DEVICE_TABLE(pci, pci_ids);
-/*
- * Without CONFIG_USB_XHCI_PCI_RENESAS renesas_xhci_check_request_fw() won't
- * load firmware, so don't encumber the xhci-pci driver with it.
- */
-#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS)
-MODULE_FIRMWARE("renesas_usb_fw.mem");
-#endif
-
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver xhci_pci_driver = {
.name = hcd_name,
diff --git a/drivers/usb/host/xhci-pci.h b/drivers/usb/host/xhci-pci.h
index cb9a8f331a44..e87c7d9d76b8 100644
--- a/drivers/usb/host/xhci-pci.h
+++ b/drivers/usb/host/xhci-pci.h
@@ -4,22 +4,7 @@
#ifndef XHCI_PCI_H
#define XHCI_PCI_H
-#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS)
-int renesas_xhci_check_request_fw(struct pci_dev *dev,
- const struct pci_device_id *id);
-
-#else
-static int renesas_xhci_check_request_fw(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return 0;
-}
-
-#endif
-
-struct xhci_driver_data {
- u64 quirks;
- const char *firmware;
-};
+int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id);
+void xhci_pci_remove(struct pci_dev *dev);
#endif
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 31bdfa52eeb2..ecaa75718e59 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -259,6 +259,12 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
if (device_property_read_bool(tmpdev, "write-64-hi-lo-quirk"))
xhci->quirks |= XHCI_WRITE_64_HI_LO;
+ if (device_property_read_bool(tmpdev, "xhci-missing-cas-quirk"))
+ xhci->quirks |= XHCI_MISSING_CAS;
+
+ if (device_property_read_bool(tmpdev, "xhci-skip-phy-init-quirk"))
+ xhci->quirks |= XHCI_SKIP_PHY_INIT;
+
device_property_read_u32(tmpdev, "imod-interval-ns",
&xhci->imod_interval);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 4ea2c3e072a9..4d664ba53fe9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1399,6 +1399,20 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
struct xhci_stream_ctx *ctx =
&ep->stream_info->stream_ctx_array[stream_id];
deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
+
+ /*
+ * Cadence xHCI controllers store some endpoint state
+ * information within Rsvd0 fields of Stream Endpoint
+ * context. This field is not cleared during Set TR
+ * Dequeue Pointer command which causes XDMA to skip
+ * over transfer ring and leads to data loss on stream
+ * pipe.
+ * To fix this issue driver must clear Rsvd0 field.
+ */
+ if (xhci->quirks & XHCI_CDNS_SCTX_QUIRK) {
+ ctx->reserved[0] = 0;
+ ctx->reserved[1] = 0;
+ }
} else {
deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
}
@@ -2521,9 +2535,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
td->status = 0;
break;
case COMP_SHORT_PACKET:
- xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
- td->urb->ep->desc.bEndpointAddress,
- requested, remaining);
td->status = 0;
break;
case COMP_STOPPED_SHORT_PACKET:
@@ -2764,35 +2775,25 @@ static int handle_tx_event(struct xhci_hcd *xhci,
return 0;
}
- do {
- /* This TRB should be in the TD at the head of this ring's
- * TD list.
+ if (list_empty(&ep_ring->td_list)) {
+ /*
+ * Don't print wanings if ring is empty due to a stopped endpoint generating an
+ * extra completion event if the device was suspended. Or, a event for the last TRB
+ * of a short TD we already got a short event for. The short TD is already removed
+ * from the TD list.
*/
- if (list_empty(&ep_ring->td_list)) {
- /*
- * Don't print wanings if it's due to a stopped endpoint
- * generating an extra completion event if the device
- * was suspended. Or, a event for the last TRB of a
- * short TD we already got a short event for.
- * The short TD is already removed from the TD list.
- */
-
- if (!(trb_comp_code == COMP_STOPPED ||
- trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
- ep_ring->last_td_was_short)) {
- xhci_warn(xhci, "WARN Event TRB for slot %u ep %d with no TDs queued?\n",
- slot_id, ep_index);
- }
- if (ep->skip) {
- ep->skip = false;
- xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
- slot_id, ep_index);
- }
-
- td = NULL;
- goto check_endpoint_halted;
+ if (trb_comp_code != COMP_STOPPED &&
+ trb_comp_code != COMP_STOPPED_LENGTH_INVALID &&
+ !ep_ring->last_td_was_short) {
+ xhci_warn(xhci, "Event TRB for slot %u ep %u with no TDs queued\n",
+ slot_id, ep_index);
}
+ ep->skip = false;
+ goto check_endpoint_halted;
+ }
+
+ do {
td = list_first_entry(&ep_ring->td_list, struct xhci_td,
td_list);
@@ -2803,7 +2804,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
skip_isoc_td(xhci, td, ep, status);
- continue;
+ if (!list_empty(&ep_ring->td_list))
+ continue;
+
+ xhci_dbg(xhci, "All TDs skipped for slot %u ep %u. Clear skip flag.\n",
+ slot_id, ep_index);
+ ep->skip = false;
+ td = NULL;
+ goto check_endpoint_halted;
}
/*
@@ -3941,10 +3949,6 @@ static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
start_frame_id = (start_frame_id >> 3) & 0x7ff;
end_frame_id = (end_frame_id >> 3) & 0x7ff;
- xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
- __func__, index, readl(&xhci->run_regs->microframe_index),
- start_frame_id, end_frame_id, start_frame);
-
if (start_frame_id < end_frame_id) {
if (start_frame > end_frame_id ||
start_frame < start_frame_id)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index efdf4c228b8c..899c0effb5d3 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -347,8 +347,8 @@ static int xhci_disable_interrupter(struct xhci_interrupter *ir)
}
/* interrupt moderation interval imod_interval in nanoseconds */
-static int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
- u32 imod_interval)
+int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
+ u32 imod_interval)
{
u32 imod;
@@ -4525,6 +4525,20 @@ static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_port *port;
u32 capability;
+ /* Check if USB3 device at root port is tunneled over USB4 */
+ if (hcd->speed >= HCD_USB3 && !udev->parent->parent) {
+ port = xhci->usb3_rhub.ports[udev->portnum - 1];
+
+ udev->tunnel_mode = xhci_port_is_tunneled(xhci, port);
+ if (udev->tunnel_mode == USB_LINK_UNKNOWN)
+ dev_dbg(&udev->dev, "link tunnel state unknown\n");
+ else if (udev->tunnel_mode == USB_LINK_TUNNELED)
+ dev_dbg(&udev->dev, "tunneled over USB4 link\n");
+ else if (udev->tunnel_mode == USB_LINK_NATIVE)
+ dev_dbg(&udev->dev, "native USB 3.x link\n");
+ return 0;
+ }
+
if (hcd->speed >= HCD_USB3 || !udev->lpm_capable || !xhci->hw_lpm_support)
return 0;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index ebd0afd59a60..620502de971a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1498,15 +1498,10 @@ struct xhci_hcd {
spinlock_t lock;
/* packed release number */
- u8 sbrn;
u16 hci_version;
- u8 max_slots;
u16 max_interrupters;
- u8 max_ports;
- u8 isoc_threshold;
/* imod_interval in ns (I * 250ns) */
u32 imod_interval;
- int event_ring_max;
/* 4KB min, 128MB max */
int page_size;
/* Valid values are 12 to 20, inclusive */
@@ -1616,7 +1611,7 @@ struct xhci_hcd {
#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33)
#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
-#define XHCI_RENESAS_FW_QUIRK BIT_ULL(36)
+/* Reserved. It was XHCI_RENESAS_FW_QUIRK */
#define XHCI_SKIP_PHY_INIT BIT_ULL(37)
#define XHCI_DISABLE_SPARSE BIT_ULL(38)
#define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39)
@@ -1628,6 +1623,7 @@ struct xhci_hcd {
#define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45)
#define XHCI_ZHAOXIN_HOST BIT_ULL(46)
#define XHCI_WRITE_64_HI_LO BIT_ULL(47)
+#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
unsigned int num_active_eps;
unsigned int limit_active_eps;
@@ -1831,7 +1827,8 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx);
struct xhci_interrupter *
-xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs);
+xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
+ u32 imod_interval);
void xhci_remove_secondary_interrupter(struct usb_hcd
*hcd, struct xhci_interrupter *ir);
@@ -1871,6 +1868,8 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
+int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
+ u32 imod_interval);
/* xHCI ring, segment, TRB, and TD functions */
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
@@ -1904,10 +1903,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
enum xhci_ep_reset_type reset_type);
int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 slot_id);
-void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
- unsigned int ep_index, unsigned int stream_id,
- struct xhci_td *td);
-void xhci_stop_endpoint_command_watchdog(struct timer_list *t);
void xhci_handle_command_timeout(struct work_struct *work);
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
@@ -1929,7 +1924,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
-
+enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci,
+ struct xhci_port *port);
void xhci_hc_died(struct xhci_hcd *xhci);
#ifdef CONFIG_PM
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index c8098e9b432e..62b5a30edc42 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -107,7 +107,12 @@ static void appledisplay_complete(struct urb *urb)
case ACD_BTN_BRIGHT_UP:
case ACD_BTN_BRIGHT_DOWN:
pdata->button_pressed = 1;
- schedule_delayed_work(&pdata->work, 0);
+ /*
+ * there is a window during which no device
+ * is registered
+ */
+ if (pdata->bd )
+ schedule_delayed_work(&pdata->work, 0);
break;
case ACD_BTN_NONE:
default:
@@ -202,6 +207,7 @@ static int appledisplay_probe(struct usb_interface *iface,
const struct usb_device_id *id)
{
struct backlight_properties props;
+ struct backlight_device *backlight;
struct appledisplay *pdata;
struct usb_device *udev = interface_to_usbdev(iface);
struct usb_endpoint_descriptor *endpoint;
@@ -272,13 +278,14 @@ static int appledisplay_probe(struct usb_interface *iface,
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = 0xff;
- pdata->bd = backlight_device_register(bl_name, NULL, pdata,
+ backlight = backlight_device_register(bl_name, NULL, pdata,
&appledisplay_bl_data, &props);
- if (IS_ERR(pdata->bd)) {
+ if (IS_ERR(backlight)) {
dev_err(&iface->dev, "Backlight registration failed\n");
- retval = PTR_ERR(pdata->bd);
+ retval = PTR_ERR(backlight);
goto error;
}
+ pdata->bd = backlight;
/* Try to get brightness */
brightness = appledisplay_bl_get_brightness(pdata->bd);
diff --git a/drivers/usb/misc/brcmstb-usb-pinmap.c b/drivers/usb/misc/brcmstb-usb-pinmap.c
index 2b2019c19cde..1ce885e4184c 100644
--- a/drivers/usb/misc/brcmstb-usb-pinmap.c
+++ b/drivers/usb/misc/brcmstb-usb-pinmap.c
@@ -335,6 +335,7 @@ static const struct of_device_id brcmstb_usb_pinmap_of_match[] = {
{ .compatible = "brcm,usb-pinmap" },
{ },
};
+MODULE_DEVICE_TABLE(of, brcmstb_usb_pinmap_of_match);
static struct platform_driver brcmstb_usb_pinmap_driver = {
.driver = {
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index cecd7693b741..75f5a740cba3 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -88,6 +88,9 @@ static int vendor_command(struct cypress *dev, unsigned char request,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER,
address, data, iobuf, CYPRESS_MAX_REQSIZE,
USB_CTRL_GET_TIMEOUT);
+ /* we must not process garbage */
+ if (retval < 2)
+ goto err_buf;
/* store returned data (more READs to be added) */
switch (request) {
@@ -107,6 +110,7 @@ static int vendor_command(struct cypress *dev, unsigned char request,
break;
}
+err_buf:
kfree(iobuf);
error:
return retval;
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 7cbef74dfc9a..f392d6f84df9 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -627,7 +627,6 @@ static const struct file_operations ld_usb_fops = {
.open = ld_usb_open,
.release = ld_usb_release,
.poll = ld_usb_poll,
- .llseek = no_llseek,
};
/*
diff --git a/drivers/usb/misc/onboard_usb_dev.c b/drivers/usb/misc/onboard_usb_dev.c
index 56710e6b1653..560591e02d6a 100644
--- a/drivers/usb/misc/onboard_usb_dev.c
+++ b/drivers/usb/misc/onboard_usb_dev.c
@@ -11,6 +11,7 @@
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
+#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -29,6 +30,17 @@
#include "onboard_usb_dev.h"
+/* USB5744 register offset and mask */
+#define USB5744_CMD_ATTACH 0xAA
+#define USB5744_CMD_ATTACH_LSB 0x56
+#define USB5744_CMD_CREG_ACCESS 0x99
+#define USB5744_CMD_CREG_ACCESS_LSB 0x37
+#define USB5744_CREG_MEM_ADDR 0x00
+#define USB5744_CREG_WRITE 0x00
+#define USB5744_CREG_RUNTIMEFLAGS2 0x41
+#define USB5744_CREG_RUNTIMEFLAGS2_LSB 0x1D
+#define USB5744_CREG_BYPASS_UDC_SUSPEND BIT(3)
+
static void onboard_dev_attach_usb_driver(struct work_struct *work);
static struct usb_device_driver onboard_dev_usbdev_driver;
@@ -98,6 +110,7 @@ static int onboard_dev_power_on(struct onboard_dev *onboard_dev)
fsleep(onboard_dev->pdata->reset_us);
gpiod_set_value_cansleep(onboard_dev->reset_gpio, 0);
+ fsleep(onboard_dev->pdata->power_on_delay_us);
onboard_dev->is_powered_on = true;
@@ -296,10 +309,50 @@ static void onboard_dev_attach_usb_driver(struct work_struct *work)
pr_err("Failed to attach USB driver: %pe\n", ERR_PTR(err));
}
+static int onboard_dev_5744_i2c_init(struct i2c_client *client)
+{
+#if IS_ENABLED(CONFIG_I2C)
+ struct device *dev = &client->dev;
+ int ret;
+
+ /*
+ * Set BYPASS_UDC_SUSPEND bit to ensure MCU is always enabled
+ * and ready to respond to SMBus runtime commands.
+ * The command writes 5 bytes to memory and single data byte in
+ * configuration register.
+ */
+ char wr_buf[7] = {USB5744_CREG_MEM_ADDR, 5,
+ USB5744_CREG_WRITE, 1,
+ USB5744_CREG_RUNTIMEFLAGS2,
+ USB5744_CREG_RUNTIMEFLAGS2_LSB,
+ USB5744_CREG_BYPASS_UDC_SUSPEND};
+
+ ret = i2c_smbus_write_block_data(client, 0, sizeof(wr_buf), wr_buf);
+ if (ret)
+ return dev_err_probe(dev, ret, "BYPASS_UDC_SUSPEND bit configuration failed\n");
+
+ ret = i2c_smbus_write_word_data(client, USB5744_CMD_CREG_ACCESS,
+ USB5744_CMD_CREG_ACCESS_LSB);
+ if (ret)
+ return dev_err_probe(dev, ret, "Configuration Register Access Command failed\n");
+
+ /* Send SMBus command to boot hub. */
+ ret = i2c_smbus_write_word_data(client, USB5744_CMD_ATTACH,
+ USB5744_CMD_ATTACH_LSB);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "USB Attach with SMBus command failed\n");
+
+ return ret;
+#else
+ return -ENODEV;
+#endif
+}
+
static int onboard_dev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct onboard_dev *onboard_dev;
+ struct device_node *i2c_node;
int err;
onboard_dev = devm_kzalloc(dev, sizeof(*onboard_dev), GFP_KERNEL);
@@ -339,6 +392,27 @@ static int onboard_dev_probe(struct platform_device *pdev)
if (err)
return err;
+ i2c_node = of_parse_phandle(pdev->dev.of_node, "i2c-bus", 0);
+ if (i2c_node) {
+ struct i2c_client *client;
+
+ client = of_find_i2c_device_by_node(i2c_node);
+ of_node_put(i2c_node);
+
+ if (!client) {
+ err = -EPROBE_DEFER;
+ goto err_power_off;
+ }
+
+ if (of_device_is_compatible(pdev->dev.of_node, "usb424,2744") ||
+ of_device_is_compatible(pdev->dev.of_node, "usb424,5744"))
+ err = onboard_dev_5744_i2c_init(client);
+
+ put_device(&client->dev);
+ if (err < 0)
+ goto err_power_off;
+ }
+
/*
* The USB driver might have been detached from the USB devices by
* onboard_dev_remove() (e.g. through an 'unbind' by userspace),
@@ -350,6 +424,10 @@ static int onboard_dev_probe(struct platform_device *pdev)
schedule_work(&attach_usb_driver_work);
return 0;
+
+err_power_off:
+ onboard_dev_power_off(onboard_dev);
+ return err;
}
static void onboard_dev_remove(struct platform_device *pdev)
diff --git a/drivers/usb/misc/onboard_usb_dev.h b/drivers/usb/misc/onboard_usb_dev.h
index fbba549c0f47..317b3eb99c02 100644
--- a/drivers/usb/misc/onboard_usb_dev.h
+++ b/drivers/usb/misc/onboard_usb_dev.h
@@ -10,6 +10,7 @@
struct onboard_dev_pdata {
unsigned long reset_us; /* reset pulse width in us */
+ unsigned long power_on_delay_us; /* power on delay in us */
unsigned int num_supplies; /* number of supplies */
const char * const supply_names[MAX_SUPPLIES];
bool is_hub;
@@ -24,6 +25,7 @@ static const struct onboard_dev_pdata microchip_usb424_data = {
static const struct onboard_dev_pdata microchip_usb5744_data = {
.reset_us = 0,
+ .power_on_delay_us = 10000,
.num_supplies = 2,
.supply_names = { "vdd", "vdd2" },
.is_hub = true,
diff --git a/drivers/usb/misc/qcom_eud.c b/drivers/usb/misc/qcom_eud.c
index 26e9b8749d8a..19906301a4eb 100644
--- a/drivers/usb/misc/qcom_eud.c
+++ b/drivers/usb/misc/qcom_eud.c
@@ -232,7 +232,7 @@ static void eud_remove(struct platform_device *pdev)
}
static const struct of_device_id eud_dt_match[] = {
- { .compatible = "qcom,sc7280-eud" },
+ { .compatible = "qcom,eud" },
{ }
};
MODULE_DEVICE_TABLE(of, eud_dt_match);
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 4745a320eae4..4a9859e03f6b 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -404,7 +404,6 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
struct usb_yurex *dev;
int len = 0;
char in_buffer[MAX_S64_STRLEN];
- unsigned long flags;
dev = file->private_data;
@@ -419,9 +418,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
return -EIO;
}
- spin_lock_irqsave(&dev->lock, flags);
+ spin_lock_irq(&dev->lock);
scnprintf(in_buffer, MAX_S64_STRLEN, "%lld\n", dev->bbu);
- spin_unlock_irqrestore(&dev->lock, flags);
+ spin_unlock_irq(&dev->lock);
mutex_unlock(&dev->io_mutex);
return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
@@ -511,8 +510,11 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
__func__, retval);
goto error;
}
- if (set && timeout)
+ if (set && timeout) {
+ spin_lock_irq(&dev->lock);
dev->bbu = c2;
+ spin_unlock_irq(&dev->lock);
+ }
return timeout ? count : -EIO;
error:
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 4e30de4db1c0..afb71c18415d 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1289,7 +1289,6 @@ static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations mon_fops_binary = {
.owner = THIS_MODULE,
.open = mon_bin_open,
- .llseek = no_llseek,
.read = mon_bin_read,
/* .write = mon_text_write, */
.poll = mon_bin_poll,
diff --git a/drivers/usb/mon/mon_stat.c b/drivers/usb/mon/mon_stat.c
index 3c23805ab1a4..398e02af6a2b 100644
--- a/drivers/usb/mon/mon_stat.c
+++ b/drivers/usb/mon/mon_stat.c
@@ -62,7 +62,6 @@ static int mon_stat_release(struct inode *inode, struct file *file)
const struct file_operations mon_fops_stat = {
.owner = THIS_MODULE,
.open = mon_stat_open,
- .llseek = no_llseek,
.read = mon_stat_read,
/* .write = mon_stat_write, */
/* .poll = mon_stat_poll, */
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 2fe9b95bac1d..68b9b2b41189 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -685,7 +685,6 @@ static int mon_text_release(struct inode *inode, struct file *file)
static const struct file_operations mon_fops_text_t = {
.owner = THIS_MODULE,
.open = mon_text_open,
- .llseek = no_llseek,
.read = mon_text_read_t,
.release = mon_text_release,
};
@@ -693,7 +692,6 @@ static const struct file_operations mon_fops_text_t = {
static const struct file_operations mon_fops_text_u = {
.owner = THIS_MODULE,
.open = mon_text_open,
- .llseek = no_llseek,
.read = mon_text_read_u,
.release = mon_text_release,
};
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index 0a35aab3ab81..63c86c046b98 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -416,10 +416,9 @@ static int mtk_musb_probe(struct platform_device *pdev)
return -ENOMEM;
ret = of_platform_populate(np, NULL, NULL, dev);
- if (ret) {
- dev_err(dev, "failed to create child devices at %p\n", np);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to create child devices at %p\n", np);
ret = mtk_musb_clks_get(glue);
if (ret)
@@ -448,23 +447,19 @@ static int mtk_musb_probe(struct platform_device *pdev)
glue->role = USB_ROLE_NONE;
break;
default:
- dev_err(&pdev->dev, "Error 'dr_mode' property\n");
- return -EINVAL;
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Error 'dr_mode' property\n");
}
glue->phy = devm_of_phy_get_by_index(dev, np, 0);
- if (IS_ERR(glue->phy)) {
- dev_err(dev, "fail to getting phy %ld\n",
- PTR_ERR(glue->phy));
- return PTR_ERR(glue->phy);
- }
+ if (IS_ERR(glue->phy))
+ return dev_err_probe(dev, PTR_ERR(glue->phy),
+ "fail to getting phy\n");
glue->usb_phy = usb_phy_generic_register();
- if (IS_ERR(glue->usb_phy)) {
- dev_err(dev, "fail to registering usb-phy %ld\n",
- PTR_ERR(glue->usb_phy));
- return PTR_ERR(glue->usb_phy);
- }
+ if (IS_ERR(glue->usb_phy))
+ return dev_err_probe(dev, PTR_ERR(glue->usb_phy),
+ "fail to registering usb-phy\n");
glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
if (IS_ERR(glue->xceiv)) {
diff --git a/drivers/usb/musb/mpfs.c b/drivers/usb/musb/mpfs.c
index 29c7e5cdb230..00e13214aa76 100644
--- a/drivers/usb/musb/mpfs.c
+++ b/drivers/usb/musb/mpfs.c
@@ -49,30 +49,6 @@ static const struct musb_hdrc_config mpfs_musb_hdrc_config = {
.ram_bits = MPFS_MUSB_RAM_BITS,
};
-static irqreturn_t mpfs_musb_interrupt(int irq, void *__hci)
-{
- unsigned long flags;
- irqreturn_t ret = IRQ_NONE;
- struct musb *musb = __hci;
-
- spin_lock_irqsave(&musb->lock, flags);
-
- musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
- musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
- musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
-
- if (musb->int_usb || musb->int_tx || musb->int_rx) {
- musb_writeb(musb->mregs, MUSB_INTRUSB, musb->int_usb);
- musb_writew(musb->mregs, MUSB_INTRTX, musb->int_tx);
- musb_writew(musb->mregs, MUSB_INTRRX, musb->int_rx);
- ret = musb_interrupt(musb);
- }
-
- spin_unlock_irqrestore(&musb->lock, flags);
-
- return ret;
-}
-
static void mpfs_musb_set_vbus(struct musb *musb, int is_on)
{
u8 devctl;
@@ -111,6 +87,129 @@ static void mpfs_musb_set_vbus(struct musb *musb, int is_on)
musb_readb(musb->mregs, MUSB_DEVCTL));
}
+#define POLL_SECONDS 2
+
+static void otg_timer(struct timer_list *t)
+{
+ struct musb *musb = from_timer(musb, t, dev_timer);
+ void __iomem *mregs = musb->mregs;
+ u8 devctl;
+ unsigned long flags;
+
+ /*
+ * We poll because PolarFire SoC won't expose several OTG-critical
+ * status change events (from the transceiver) otherwise.
+ */
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
+ usb_otg_state_string(musb->xceiv->otg->state));
+
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_WAIT_BCON:
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+ } else {
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ }
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ if (devctl & MUSB_DEVCTL_VBUS) {
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+ break;
+ }
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ break;
+ case OTG_STATE_B_IDLE:
+ /*
+ * There's no ID-changed IRQ, so we have no good way to tell
+ * when to switch to the A-Default state machine (by setting
+ * the DEVCTL.Session bit).
+ *
+ * Workaround: whenever we're in B_IDLE, try setting the
+ * session flag every few seconds. If it works, ID was
+ * grounded and we're now in the A-Default state machine.
+ *
+ * NOTE: setting the session flag is _supposed_ to trigger
+ * SRP but clearly it doesn't.
+ */
+ musb_writeb(mregs, MUSB_DEVCTL, devctl | MUSB_DEVCTL_SESSION);
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE)
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+ else
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static void __maybe_unused mpfs_musb_try_idle(struct musb *musb, unsigned long timeout)
+{
+ static unsigned long last_timer;
+
+ if (timeout == 0)
+ timeout = jiffies + msecs_to_jiffies(3);
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || (musb->a_wait_bcon == 0 &&
+ musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) {
+ dev_dbg(musb->controller, "%s active, deleting timer\n",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ del_timer(&musb->dev_timer);
+ last_timer = jiffies;
+ return;
+ }
+
+ if (time_after(last_timer, timeout) && timer_pending(&musb->dev_timer)) {
+ dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
+ return;
+ }
+ last_timer = timeout;
+
+ dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&musb->dev_timer, timeout);
+}
+
+static irqreturn_t mpfs_musb_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+ musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+ musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx) {
+ musb_writeb(musb->mregs, MUSB_INTRUSB, musb->int_usb);
+ musb_writew(musb->mregs, MUSB_INTRTX, musb->int_tx);
+ musb_writew(musb->mregs, MUSB_INTRRX, musb->int_rx);
+ ret = musb_interrupt(musb);
+ }
+
+ /* Poll for ID change */
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return ret;
+}
+
static int mpfs_musb_init(struct musb *musb)
{
struct device *dev = musb->controller;
@@ -121,6 +220,8 @@ static int mpfs_musb_init(struct musb *musb)
return PTR_ERR(musb->xceiv);
}
+ timer_setup(&musb->dev_timer, otg_timer, 0);
+
musb->dyn_fifo = true;
musb->isr = mpfs_musb_interrupt;
@@ -129,14 +230,25 @@ static int mpfs_musb_init(struct musb *musb)
return 0;
}
+static int mpfs_musb_exit(struct musb *musb)
+{
+ del_timer_sync(&musb->dev_timer);
+
+ return 0;
+}
+
static const struct musb_platform_ops mpfs_ops = {
.quirks = MUSB_DMA_INVENTRA,
.init = mpfs_musb_init,
+ .exit = mpfs_musb_exit,
.fifo_mode = 2,
#ifdef CONFIG_USB_INVENTRA_DMA
.dma_init = musbhs_dma_controller_create,
.dma_exit = musbhs_dma_controller_destroy,
#endif
+#ifndef CONFIG_USB_MUSB_HOST
+ .try_idle = mpfs_musb_try_idle,
+#endif
.set_vbus = mpfs_musb_set_vbus
};
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index 817c242a76ca..5428b2b67de1 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -374,6 +374,7 @@ static const struct of_device_id gpio_vbus_of_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, gpio_vbus_of_match);
static struct platform_driver gpio_vbus_driver = {
.driver = {
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 920a32cd094d..cc4156c1b148 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -18,6 +18,7 @@
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/iopoll.h>
+#include <linux/regulator/consumer.h>
#define DRIVER_NAME "mxs_phy"
@@ -70,6 +71,9 @@
#define BM_USBPHY_PLL_EN_USB_CLKS BIT(6)
/* Anatop Registers */
+#define ANADIG_REG_1P1_SET 0x114
+#define ANADIG_REG_1P1_CLR 0x118
+
#define ANADIG_ANA_MISC0 0x150
#define ANADIG_ANA_MISC0_SET 0x154
#define ANADIG_ANA_MISC0_CLR 0x158
@@ -117,6 +121,14 @@
#define BM_ANADIG_USB2_MISC_RX_VPIN_FS BIT(29)
#define BM_ANADIG_USB2_MISC_RX_VMIN_FS BIT(28)
+/* System Integration Module (SIM) Registers */
+#define SIM_GPR1 0x30
+
+#define USB_PHY_VLLS_WAKEUP_EN BIT(0)
+
+#define BM_ANADIG_REG_1P1_ENABLE_WEAK_LINREG BIT(18)
+#define BM_ANADIG_REG_1P1_TRACK_VDD_SOC_CAP BIT(19)
+
#define to_mxs_phy(p) container_of((p), struct mxs_phy, phy)
/* Do disconnection between PHY and controller without vbus */
@@ -149,6 +161,15 @@
#define MXS_PHY_TX_D_CAL_MIN 79
#define MXS_PHY_TX_D_CAL_MAX 119
+/*
+ * At imx6q/6sl/6sx, the PHY2's clock is controlled by hardware directly,
+ * eg, according to PHY's suspend status. In these PHYs, we only need to
+ * open the clock at the initialization and close it at its shutdown routine.
+ * These PHYs can send resume signal without software interfere if not
+ * gate clock.
+ */
+#define MXS_PHY_HARDWARE_CONTROL_PHY2_CLK BIT(4)
+
struct mxs_phy_data {
unsigned int flags;
};
@@ -160,12 +181,14 @@ static const struct mxs_phy_data imx23_phy_data = {
static const struct mxs_phy_data imx6q_phy_data = {
.flags = MXS_PHY_SENDING_SOF_TOO_FAST |
MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
- MXS_PHY_NEED_IP_FIX,
+ MXS_PHY_NEED_IP_FIX |
+ MXS_PHY_HARDWARE_CONTROL_PHY2_CLK,
};
static const struct mxs_phy_data imx6sl_phy_data = {
.flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
- MXS_PHY_NEED_IP_FIX,
+ MXS_PHY_NEED_IP_FIX |
+ MXS_PHY_HARDWARE_CONTROL_PHY2_CLK,
};
static const struct mxs_phy_data vf610_phy_data = {
@@ -174,11 +197,13 @@ static const struct mxs_phy_data vf610_phy_data = {
};
static const struct mxs_phy_data imx6sx_phy_data = {
- .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS,
+ .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
+ MXS_PHY_HARDWARE_CONTROL_PHY2_CLK,
};
static const struct mxs_phy_data imx6ul_phy_data = {
- .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS,
+ .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
+ MXS_PHY_HARDWARE_CONTROL_PHY2_CLK,
};
static const struct mxs_phy_data imx7ulp_phy_data = {
@@ -201,9 +226,11 @@ struct mxs_phy {
struct clk *clk;
const struct mxs_phy_data *data;
struct regmap *regmap_anatop;
+ struct regmap *regmap_sim;
int port_id;
u32 tx_reg_set;
u32 tx_reg_mask;
+ struct regulator *phy_3p0;
};
static inline bool is_imx6q_phy(struct mxs_phy *mxs_phy)
@@ -221,6 +248,11 @@ static inline bool is_imx7ulp_phy(struct mxs_phy *mxs_phy)
return mxs_phy->data == &imx7ulp_phy_data;
}
+static inline bool is_imx6ul_phy(struct mxs_phy *mxs_phy)
+{
+ return mxs_phy->data == &imx6ul_phy_data;
+}
+
/*
* PHY needs some 32K cycles to switch from 32K clock to
* bus (such as AHB/AXI, etc) clock.
@@ -288,6 +320,16 @@ static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
if (ret)
goto disable_pll;
+ if (mxs_phy->phy_3p0) {
+ ret = regulator_enable(mxs_phy->phy_3p0);
+ if (ret) {
+ dev_err(mxs_phy->phy.dev,
+ "Failed to enable 3p0 regulator, ret=%d\n",
+ ret);
+ return ret;
+ }
+ }
+
/* Power up the PHY */
writel(0, base + HW_USBPHY_PWD);
@@ -448,6 +490,9 @@ static void mxs_phy_shutdown(struct usb_phy *phy)
if (is_imx7ulp_phy(mxs_phy))
mxs_phy_pll_enable(phy->io_priv, false);
+ if (mxs_phy->phy_3p0)
+ regulator_disable(mxs_phy->phy_3p0);
+
clk_disable_unprepare(mxs_phy->clk);
}
@@ -503,12 +548,19 @@ static int mxs_phy_suspend(struct usb_phy *x, int suspend)
}
writel(BM_USBPHY_CTRL_CLKGATE,
x->io_priv + HW_USBPHY_CTRL_SET);
- clk_disable_unprepare(mxs_phy->clk);
+ if (!(mxs_phy->port_id == 1 &&
+ (mxs_phy->data->flags &
+ MXS_PHY_HARDWARE_CONTROL_PHY2_CLK)))
+ clk_disable_unprepare(mxs_phy->clk);
} else {
mxs_phy_clock_switch_delay();
- ret = clk_prepare_enable(mxs_phy->clk);
- if (ret)
- return ret;
+ if (!(mxs_phy->port_id == 1 &&
+ (mxs_phy->data->flags &
+ MXS_PHY_HARDWARE_CONTROL_PHY2_CLK))) {
+ ret = clk_prepare_enable(mxs_phy->clk);
+ if (ret)
+ return ret;
+ }
writel(BM_USBPHY_CTRL_CLKGATE,
x->io_priv + HW_USBPHY_CTRL_CLR);
writel(0, x->io_priv + HW_USBPHY_PWD);
@@ -738,6 +790,17 @@ static int mxs_phy_probe(struct platform_device *pdev)
}
}
+ /* Currently, only imx7ulp has SIM module */
+ if (of_get_property(np, "nxp,sim", NULL)) {
+ mxs_phy->regmap_sim = syscon_regmap_lookup_by_phandle
+ (np, "nxp,sim");
+ if (IS_ERR(mxs_phy->regmap_sim)) {
+ dev_dbg(&pdev->dev,
+ "failed to find regmap for sim\n");
+ return PTR_ERR(mxs_phy->regmap_sim);
+ }
+ }
+
/* Precompute which bits of the TX register are to be updated, if any */
if (!of_property_read_u32(np, "fsl,tx-cal-45-dn-ohms", &val) &&
val >= MXS_PHY_TX_CAL45_MIN && val <= MXS_PHY_TX_CAL45_MAX) {
@@ -789,6 +852,17 @@ static int mxs_phy_probe(struct platform_device *pdev)
mxs_phy->clk = clk;
mxs_phy->data = of_device_get_match_data(&pdev->dev);
+ mxs_phy->phy_3p0 = devm_regulator_get(&pdev->dev, "phy-3p0");
+ if (PTR_ERR(mxs_phy->phy_3p0) == -ENODEV)
+ /* not exist */
+ mxs_phy->phy_3p0 = NULL;
+ else if (IS_ERR(mxs_phy->phy_3p0))
+ return dev_err_probe(&pdev->dev, PTR_ERR(mxs_phy->phy_3p0),
+ "Getting regulator error\n");
+
+ if (mxs_phy->phy_3p0)
+ regulator_set_voltage(mxs_phy->phy_3p0, 3200000, 3200000);
+
platform_set_drvdata(pdev, mxs_phy);
device_set_wakeup_capable(&pdev->dev, true);
@@ -804,28 +878,58 @@ static void mxs_phy_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
+static void mxs_phy_wakeup_enable(struct mxs_phy *mxs_phy, bool on)
+{
+ u32 mask = USB_PHY_VLLS_WAKEUP_EN;
+
+ /* If the SoCs don't have SIM, quit */
+ if (!mxs_phy->regmap_sim)
+ return;
+
+ if (on) {
+ regmap_update_bits(mxs_phy->regmap_sim, SIM_GPR1, mask, mask);
+ udelay(500);
+ } else {
+ regmap_update_bits(mxs_phy->regmap_sim, SIM_GPR1, mask, 0);
+ }
+}
+
static void mxs_phy_enable_ldo_in_suspend(struct mxs_phy *mxs_phy, bool on)
{
- unsigned int reg = on ? ANADIG_ANA_MISC0_SET : ANADIG_ANA_MISC0_CLR;
+ unsigned int reg;
+ u32 value;
/* If the SoCs don't have anatop, quit */
if (!mxs_phy->regmap_anatop)
return;
- if (is_imx6q_phy(mxs_phy))
+ if (is_imx6q_phy(mxs_phy)) {
+ reg = on ? ANADIG_ANA_MISC0_SET : ANADIG_ANA_MISC0_CLR;
regmap_write(mxs_phy->regmap_anatop, reg,
BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG);
- else if (is_imx6sl_phy(mxs_phy))
+ } else if (is_imx6sl_phy(mxs_phy)) {
+ reg = on ? ANADIG_ANA_MISC0_SET : ANADIG_ANA_MISC0_CLR;
regmap_write(mxs_phy->regmap_anatop,
reg, BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG_SL);
+ } else if (is_imx6ul_phy(mxs_phy)) {
+ reg = on ? ANADIG_REG_1P1_SET : ANADIG_REG_1P1_CLR;
+ value = BM_ANADIG_REG_1P1_ENABLE_WEAK_LINREG |
+ BM_ANADIG_REG_1P1_TRACK_VDD_SOC_CAP;
+ if (mxs_phy_get_vbus_status(mxs_phy) && on)
+ regmap_write(mxs_phy->regmap_anatop, reg, value);
+ else if (!on)
+ regmap_write(mxs_phy->regmap_anatop, reg, value);
+ }
}
static int mxs_phy_system_suspend(struct device *dev)
{
struct mxs_phy *mxs_phy = dev_get_drvdata(dev);
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(dev)) {
mxs_phy_enable_ldo_in_suspend(mxs_phy, true);
+ mxs_phy_wakeup_enable(mxs_phy, true);
+ }
return 0;
}
@@ -834,8 +938,10 @@ static int mxs_phy_system_resume(struct device *dev)
{
struct mxs_phy *mxs_phy = dev_get_drvdata(dev);
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(dev)) {
mxs_phy_enable_ldo_in_suspend(mxs_phy, false);
+ mxs_phy_wakeup_enable(mxs_phy, false);
+ }
return 0;
}
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index d7aa913ceb8a..c58a12c147f4 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -11,6 +11,7 @@
#include <linux/usb/role.h>
#include <linux/property.h>
#include <linux/device.h>
+#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -21,6 +22,7 @@ static const struct class role_class = {
struct usb_role_switch {
struct device dev;
+ struct lock_class_key key;
struct mutex lock; /* device lock*/
struct module *module; /* the module this device depends on */
enum usb_role role;
@@ -326,6 +328,8 @@ static void usb_role_switch_release(struct device *dev)
{
struct usb_role_switch *sw = to_role_switch(dev);
+ mutex_destroy(&sw->lock);
+ lockdep_unregister_key(&sw->key);
kfree(sw);
}
@@ -364,7 +368,8 @@ usb_role_switch_register(struct device *parent,
if (!sw)
return ERR_PTR(-ENOMEM);
- mutex_init(&sw->lock);
+ lockdep_register_key(&sw->key);
+ mutex_init_with_key(&sw->lock, &sw->key);
sw->allow_userspace_control = desc->allow_userspace_control;
sw->usb2_port = desc->usb2_port;
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index a1df686c3066..aa517242d060 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -138,7 +138,6 @@ static void aircable_process_read_urb(struct urb *urb)
static struct usb_serial_driver aircable_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "aircable",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 67a07cc007f0..800b04fe37fa 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -599,7 +599,6 @@ static void ark3116_process_read_urb(struct urb *urb)
static struct usb_serial_driver ark3116_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ark3116",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index cf47ee4ae5d3..44f5b58beec9 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -66,7 +66,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
/* All of the device info needed for the serial converters */
static struct usb_serial_driver belkin_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "belkin",
},
.description = "Belkin / Peracom / GoHubs USB Serial Adapter",
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 0870c6533f80..02945ccf531d 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -837,7 +837,6 @@ static int ch341_reset_resume(struct usb_serial *serial)
static struct usb_serial_driver ch341_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ch341-uart",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 21fd26609252..c24101f0a07a 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -299,7 +299,6 @@ struct cp210x_port_private {
static struct usb_serial_driver cp210x_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "cp210x",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 51e5aac3bf4c..76dd8e7453b5 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -67,7 +67,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver cyberjack_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "cyberjack",
},
.description = "Reiner SCT Cyberjack USB card reader",
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 1e0c028c5ec9..ce9134bb30f3 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -139,7 +139,6 @@ static void cypress_write_int_callback(struct urb *urb);
static struct usb_serial_driver cypress_earthmate_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "earthmate",
},
.description = "DeLorme Earthmate USB",
@@ -166,7 +165,6 @@ static struct usb_serial_driver cypress_earthmate_device = {
static struct usb_serial_driver cypress_hidcom_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "cyphidcom",
},
.description = "HID->COM RS232 Adapter",
@@ -192,7 +190,6 @@ static struct usb_serial_driver cypress_hidcom_device = {
static struct usb_serial_driver cypress_ca42v2_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "nokiaca42v2",
},
.description = "Nokia CA-42 V2 Adapter",
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index d1dea3850576..a06485965412 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -262,7 +262,6 @@ MODULE_DEVICE_TABLE(usb, id_table_combined);
static struct usb_serial_driver digi_acceleport_2_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "digi_2",
},
.description = "Digi 2 port USB adapter",
@@ -293,7 +292,6 @@ static struct usb_serial_driver digi_acceleport_2_device = {
static struct usb_serial_driver digi_acceleport_4_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "digi_4",
},
.description = "Digi 4 port USB adapter",
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 405e835e93dd..aedcf7ebd269 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -43,7 +43,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver empeg_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "empeg",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 5f7a46bcace6..530b77fc2f78 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -967,7 +967,6 @@ static int f81232_resume(struct usb_serial *serial)
static struct usb_serial_driver f81232_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "f81232",
},
.id_table = f81232_id_table,
@@ -994,7 +993,6 @@ static struct usb_serial_driver f81232_device = {
static struct usb_serial_driver f81534a_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "f81534a",
},
.id_table = f81534a_id_table,
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
index ef126cd3d94f..685930ac8383 100644
--- a/drivers/usb/serial/f81534.c
+++ b/drivers/usb/serial/f81534.c
@@ -1538,7 +1538,6 @@ static int f81534_resume(struct usb_serial *serial)
static struct usb_serial_driver f81534_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "f81534",
},
.description = DRIVER_DESC,
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 76a04ab41100..c6f17d732b95 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -2871,7 +2871,6 @@ static int ftdi_ioctl(struct tty_struct *tty,
static struct usb_serial_driver ftdi_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ftdi_sio",
.dev_groups = ftdi_groups,
},
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 6d6ec7eed87c..b97ba8ed6801 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1412,7 +1412,6 @@ static void garmin_port_remove(struct usb_serial_port *port)
/* All of the device info needed */
static struct usb_serial_driver garmin_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "garmin_gps",
},
.description = "Garmin GPS usb/tty",
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 15b6dee3a8e5..757f0a586ddb 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -63,7 +63,6 @@ static int usb_serial_generic_calc_num_ports(struct usb_serial *serial,
static struct usb_serial_driver usb_serial_generic_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "generic",
},
.id_table = generic_device_ids,
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index abe4bbb0ac65..c7d6b5e3f898 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2978,7 +2978,6 @@ static void edge_port_remove(struct usb_serial_port *port)
static struct usb_serial_driver edgeport_2port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "edgeport_2",
},
.description = "Edgeport 2 port adapter",
@@ -3013,7 +3012,6 @@ static struct usb_serial_driver edgeport_2port_device = {
static struct usb_serial_driver edgeport_4port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "edgeport_4",
},
.description = "Edgeport 4 port adapter",
@@ -3048,7 +3046,6 @@ static struct usb_serial_driver edgeport_4port_device = {
static struct usb_serial_driver edgeport_8port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "edgeport_8",
},
.description = "Edgeport 8 port adapter",
@@ -3083,7 +3080,6 @@ static struct usb_serial_driver edgeport_8port_device = {
static struct usb_serial_driver epic_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "epic",
},
.description = "EPiC device",
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 7a3a6e539456..7d0584b2a234 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2670,7 +2670,6 @@ static int edge_resume(struct usb_serial *serial)
static struct usb_serial_driver edgeport_1port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "edgeport_ti_1",
},
.description = "Edgeport TI 1 port adapter",
@@ -2708,7 +2707,6 @@ static struct usb_serial_driver edgeport_1port_device = {
static struct usb_serial_driver edgeport_2port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "edgeport_ti_2",
},
.description = "Edgeport TI 2 port adapter",
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index e11441bac44f..3c6a9b9b9c2b 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -496,7 +496,6 @@ MODULE_DEVICE_TABLE(usb, ipaq_id_table);
/* All of the device info needed for the Compaq iPAQ */
static struct usb_serial_driver ipaq_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ipaq",
},
.description = "PocketPC PDA",
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index d04c7cc5c1c2..b1e672c2f423 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -285,7 +285,6 @@ static void ipw_close(struct usb_serial_port *port)
static struct usb_serial_driver ipw_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ipw",
},
.description = "IPWireless converter",
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 82f108134e6f..a106b71e698f 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -71,7 +71,6 @@ MODULE_DEVICE_TABLE(usb, ir_id_table);
static struct usb_serial_driver ir_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ir-usb",
},
.description = "IR Dongle",
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 77cba71bcccb..c21dcc9b6f05 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1157,7 +1157,6 @@ static int iuu_remove_sysfs_attrs(struct usb_serial_port *port)
static struct usb_serial_driver iuu_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "iuu_phoenix",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 0a783985197c..9129e0282c24 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -3001,7 +3001,6 @@ static void keyspan_port_remove(struct usb_serial_port *port)
/* Structs for the devices, pre and post renumeration. */
static struct usb_serial_driver keyspan_pre_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "keyspan_no_firm",
},
.description = "Keyspan - (without firmware)",
@@ -3012,7 +3011,6 @@ static struct usb_serial_driver keyspan_pre_device = {
static struct usb_serial_driver keyspan_1port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "keyspan_1",
},
.description = "Keyspan 1 port adapter",
@@ -3036,7 +3034,6 @@ static struct usb_serial_driver keyspan_1port_device = {
static struct usb_serial_driver keyspan_2port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "keyspan_2",
},
.description = "Keyspan 2 port adapter",
@@ -3060,7 +3057,6 @@ static struct usb_serial_driver keyspan_2port_device = {
static struct usb_serial_driver keyspan_4port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "keyspan_4",
},
.description = "Keyspan 4 port adapter",
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 0eef358b314a..e98b479593d3 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -676,7 +676,6 @@ static void keyspan_pda_port_remove(struct usb_serial_port *port)
static struct usb_serial_driver keyspan_pda_fake_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "keyspan_pda_pre",
},
.description = "Keyspan PDA - (prerenumeration)",
@@ -687,7 +686,6 @@ static struct usb_serial_driver keyspan_pda_fake_device = {
static struct usb_serial_driver keyspan_pda_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "keyspan_pda",
},
.description = "Keyspan PDA",
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 394b3189e003..a2c0bebc041f 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -75,7 +75,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver kl5kusb105d_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "kl5kusb105d",
},
.description = "KL5KUSB105D / PalmConnect",
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 5e775f68fcb8..464433be2034 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -77,7 +77,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver kobil_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "kobil",
},
.description = "KOBIL USB smart card terminal",
@@ -156,8 +155,7 @@ static void kobil_init_termios(struct tty_struct *tty)
{
/* Default to echo off and other sane device settings */
tty->termios.c_lflag = 0;
- tty->termios.c_iflag &= ~(ISIG | ICANON | ECHO | IEXTEN | XCASE);
- tty->termios.c_iflag |= IGNBRK | IGNPAR | IXOFF;
+ tty->termios.c_iflag = IGNBRK | IGNPAR | IXOFF;
/* do NOT translate CR to CR-NL (0x0A -> 0x0A 0x0D) */
tty->termios.c_oflag &= ~ONLCR;
}
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 6570c8817a80..e5a139ed5d90 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -69,7 +69,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver mct_u232_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "mct_u232",
},
.description = "MCT U232",
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 30ab565e0738..028878292901 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -341,7 +341,6 @@ static void metrousb_unthrottle(struct tty_struct *tty)
static struct usb_serial_driver metrousb_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "metro-usb",
},
.description = "Metrologic USB to Serial",
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 23544074eb1c..e59bfa7c8030 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1724,7 +1724,6 @@ static void mos7720_port_remove(struct usb_serial_port *port)
static struct usb_serial_driver moschip7720_2port_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "moschip7720",
},
.description = "Moschip 2 port adapter",
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 85697466b147..ca3da79afd23 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1782,7 +1782,6 @@ static int mos7840_resume(struct usb_serial *serial)
static struct usb_serial_driver moschip7840_4port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "mos7840",
},
.description = DRIVER_DESC,
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
index 942cb0153423..57e4f2b215d8 100644
--- a/drivers/usb/serial/mxuport.c
+++ b/drivers/usb/serial/mxuport.c
@@ -1278,7 +1278,6 @@ static int mxuport_resume(struct usb_serial *serial)
static struct usb_serial_driver mxuport_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "mxuport",
},
.description = "MOXA UPort",
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 82791fd67c46..4d57a5902292 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -95,7 +95,6 @@ static int navman_write(struct tty_struct *tty, struct usb_serial_port *port,
static struct usb_serial_driver navman_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "navman",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 41f1b872d277..397ebd5a3e74 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -49,7 +49,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver zyxel_omninet_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "omninet",
},
.description = "ZyXEL - omni.net usb",
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index e31a6d77da3a..1ee84ccc4bbd 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -375,7 +375,6 @@ static void opticon_port_remove(struct usb_serial_port *port)
static struct usb_serial_driver opticon_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "opticon",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 176f38750ad5..eb0731992ca9 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2381,7 +2381,6 @@ MODULE_DEVICE_TABLE(usb, option_ids);
static struct usb_serial_driver option_1port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "option1",
},
.description = "GSM modem (1-port)",
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index fa07f6ff9ecc..24068368892c 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -138,7 +138,6 @@ static void oti6858_port_remove(struct usb_serial_port *port);
/* device info */
static struct usb_serial_driver oti6858_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "oti6858",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index d93f5d584557..ab48f8875249 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -118,6 +118,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
{ USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_PRODUCT_ID) },
+ { USB_DEVICE(MACROSILICON_VENDOR_ID, MACROSILICON_MS3020_PRODUCT_ID) },
{ } /* Terminating entry */
};
@@ -1234,7 +1235,6 @@ static void pl2303_process_read_urb(struct urb *urb)
static struct usb_serial_driver pl2303_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "pl2303",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 732f9b13ad5d..d60eda7f6eda 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -171,3 +171,7 @@
/* Allied Telesis VT-Kit3 */
#define AT_VENDOR_ID 0x0caa
#define AT_VTKIT3_PRODUCT_ID 0x3001
+
+/* Macrosilicon MS3020 */
+#define MACROSILICON_VENDOR_ID 0x345f
+#define MACROSILICON_MS3020_PRODUCT_ID 0x3020
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
index 015bb7c5d19d..485ec5b07122 100644
--- a/drivers/usb/serial/qcaux.c
+++ b/drivers/usb/serial/qcaux.c
@@ -72,7 +72,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver qcaux_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "qcaux",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 703a9c563557..c7de9585feb2 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -454,7 +454,6 @@ static void qc_release(struct usb_serial *serial)
static struct usb_serial_driver qcdevice = {
.driver = {
- .owner = THIS_MODULE,
.name = "qcserial",
},
.description = "Qualcomm USB modem",
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 821f25e52ec2..4167a45d1be3 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -924,7 +924,6 @@ write_out:
static struct usb_serial_driver qt2_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "quatech-serial",
},
.description = DRIVER_DESC,
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 6accbecb6318..238b54993446 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -284,7 +284,6 @@ static int safe_startup(struct usb_serial *serial)
static struct usb_serial_driver safe_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "safe_serial",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 353b2549eaa8..64a2e0bb5723 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -1021,7 +1021,6 @@ static int sierra_resume(struct usb_serial *serial)
static struct usb_serial_driver sierra_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "sierra",
},
.description = "Sierra USB modem",
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 6b294bf8bc43..11077beb7232 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -452,7 +452,6 @@ static int spcp8x5_tiocmget(struct tty_struct *tty)
static struct usb_serial_driver spcp8x5_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "SPCP8x5",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 1e1888b66305..df21009bdf42 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -500,7 +500,6 @@ static void ssu100_process_read_urb(struct urb *urb)
static struct usb_serial_driver ssu100_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ssu100",
},
.description = DRIVER_DESC,
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 9aabb087f733..58962bcbf9ba 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -169,7 +169,6 @@ static void symbol_port_remove(struct usb_serial_port *port)
static struct usb_serial_driver symbol_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "symbol",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 0fba25abf671..a0c244bc77c0 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -417,7 +417,6 @@ static const struct usb_device_id ti_id_table_combined[] = {
static struct usb_serial_driver ti_1port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ti_usb_3410_5052_1",
},
.description = "TI USB 3410 1 port adapter",
@@ -450,7 +449,6 @@ static struct usb_serial_driver ti_1port_device = {
static struct usb_serial_driver ti_2port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ti_usb_3410_5052_2",
},
.description = "TI USB 5052 2 port adapter",
diff --git a/drivers/usb/serial/upd78f0730.c b/drivers/usb/serial/upd78f0730.c
index 46952182e04f..15a17bf111f1 100644
--- a/drivers/usb/serial/upd78f0730.c
+++ b/drivers/usb/serial/upd78f0730.c
@@ -407,7 +407,6 @@ static void upd78f0730_close(struct usb_serial_port *port)
static struct usb_serial_driver upd78f0730_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "upd78f0730",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 82f4f0b992aa..2c12449ff60c 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -24,7 +24,6 @@ static const struct usb_device_id vendor##_id_table[] = { \
}; \
static struct usb_serial_driver vendor##_device = { \
.driver = { \
- .owner = THIS_MODULE, \
.name = #vendor, \
}, \
.id_table = vendor##_id_table, \
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index f1e91eb7f8a4..df6a2ae0bf42 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1459,17 +1459,18 @@ static void usb_serial_deregister(struct usb_serial_driver *device)
}
/**
- * usb_serial_register_drivers - register drivers for a usb-serial module
+ * __usb_serial_register_drivers - register drivers for a usb-serial module
* @serial_drivers: NULL-terminated array of pointers to drivers to be registered
+ * @owner: owning module
* @name: name of the usb_driver for this set of @serial_drivers
* @id_table: list of all devices this @serial_drivers set binds to
*
* Registers all the drivers in the @serial_drivers array, and dynamically
* creates a struct usb_driver with the name @name and id_table of @id_table.
*/
-int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
- const char *name,
- const struct usb_device_id *id_table)
+int __usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
+ struct module *owner, const char *name,
+ const struct usb_device_id *id_table)
{
int rc;
struct usb_driver *udriver;
@@ -1514,6 +1515,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
for (sd = serial_drivers; *sd; ++sd) {
(*sd)->usb_driver = udriver;
+ (*sd)->driver.owner = owner;
rc = usb_serial_register(*sd);
if (rc)
goto err_deregister_drivers;
@@ -1532,7 +1534,7 @@ err_free_driver:
kfree(udriver);
return rc;
}
-EXPORT_SYMBOL_GPL(usb_serial_register_drivers);
+EXPORT_SYMBOL_GPL(__usb_serial_register_drivers);
/**
* usb_serial_deregister_drivers - deregister drivers for a usb-serial module
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 61a8425b7762..ec9fff794b36 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -83,7 +83,6 @@ static void usb_debug_init_termios(struct tty_struct *tty)
static struct usb_serial_driver debug_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "debug",
},
.id_table = id_table,
@@ -96,7 +95,6 @@ static struct usb_serial_driver debug_device = {
static struct usb_serial_driver dbc_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "xhci_dbc",
},
.id_table = dbc_id_table,
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 4412834db21c..062a38fe0c1c 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -161,7 +161,6 @@ MODULE_DEVICE_TABLE(usb, id_table_combined);
and Palm 4.0 devices */
static struct usb_serial_driver handspring_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "visor",
},
.description = "Handspring Visor / Palm OS",
@@ -180,7 +179,6 @@ static struct usb_serial_driver handspring_device = {
/* All of the device info needed for the Clie UX50, TH55 Palm 5.0 devices */
static struct usb_serial_driver clie_5_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "clie_5",
},
.description = "Sony Clie 5.0",
@@ -200,7 +198,6 @@ static struct usb_serial_driver clie_5_device = {
/* device info for the Sony Clie OS version 3.5 */
static struct usb_serial_driver clie_3_5_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "clie_3.5",
},
.description = "Sony Clie 3.5",
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index ca48e90a8e81..009faeb2ef55 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -91,7 +91,6 @@ static int whiteheat_break_ctl(struct tty_struct *tty, int break_state);
static struct usb_serial_driver whiteheat_fake_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "whiteheatnofirm",
},
.description = "Connect Tech - WhiteHEAT - (prerenumeration)",
@@ -103,7 +102,6 @@ static struct usb_serial_driver whiteheat_fake_device = {
static struct usb_serial_driver whiteheat_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "whiteheat",
},
.description = "Connect Tech - WhiteHEAT",
diff --git a/drivers/usb/serial/wishbone-serial.c b/drivers/usb/serial/wishbone-serial.c
index ff4092f9b33c..670d573f6b63 100644
--- a/drivers/usb/serial/wishbone-serial.c
+++ b/drivers/usb/serial/wishbone-serial.c
@@ -70,7 +70,6 @@ static void wishbone_serial_close(struct usb_serial_port *port)
static struct usb_serial_driver wishbone_serial_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "wishbone_serial",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c
index 1d9a12628f81..4186e791b384 100644
--- a/drivers/usb/serial/xr_serial.c
+++ b/drivers/usb/serial/xr_serial.c
@@ -1082,7 +1082,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver xr_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "xr_serial",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/xsens_mt.c b/drivers/usb/serial/xsens_mt.c
index cf262c9a9638..382b3698c1d5 100644
--- a/drivers/usb/serial/xsens_mt.c
+++ b/drivers/usb/serial/xsens_mt.c
@@ -49,7 +49,6 @@ static int xsens_mt_probe(struct usb_serial *serial,
static struct usb_serial_driver xsens_mt_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "xsens_mt",
},
.id_table = id_table,
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 40d34cc28344..a9d3c58ce7d9 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -132,7 +132,7 @@ static int init_alauda(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id alauda_usb_ids[] = {
+static const struct usb_device_id alauda_usb_ids[] = {
# include "unusual_alauda.h"
{ } /* Terminating entry */
};
@@ -154,7 +154,7 @@ MODULE_DEVICE_TABLE(usb, alauda_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev alauda_unusual_dev_list[] = {
+static const struct us_unusual_dev alauda_unusual_dev_list[] = {
# include "unusual_alauda.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
index 98b3ec352a13..30dfd0082474 100644
--- a/drivers/usb/storage/cypress_atacb.c
+++ b/drivers/usb/storage/cypress_atacb.c
@@ -33,7 +33,7 @@ MODULE_IMPORT_NS(USB_STORAGE);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id cypress_usb_ids[] = {
+static const struct usb_device_id cypress_usb_ids[] = {
# include "unusual_cypress.h"
{ } /* Terminating entry */
};
@@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(usb, cypress_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev cypress_unusual_dev_list[] = {
+static const struct us_unusual_dev cypress_unusual_dev_list[] = {
# include "unusual_cypress.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/datafab.c b/drivers/usb/storage/datafab.c
index bcc4a2fad863..3ea5601d16b8 100644
--- a/drivers/usb/storage/datafab.c
+++ b/drivers/usb/storage/datafab.c
@@ -80,7 +80,7 @@ static int datafab_determine_lun(struct us_data *us,
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id datafab_usb_ids[] = {
+static const struct usb_device_id datafab_usb_ids[] = {
# include "unusual_datafab.h"
{ } /* Terminating entry */
};
@@ -102,7 +102,7 @@ MODULE_DEVICE_TABLE(usb, datafab_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev datafab_unusual_dev_list[] = {
+static const struct us_unusual_dev datafab_unusual_dev_list[] = {
# include "unusual_datafab.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 97c66c0d91f4..a4bfbecbf16c 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -43,7 +43,7 @@ MODULE_FIRMWARE(MS_RW_FIRMWARE);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)}
-static struct usb_device_id ene_ub6250_usb_ids[] = {
+static const struct usb_device_id ene_ub6250_usb_ids[] = {
# include "unusual_ene_ub6250.h"
{ } /* Terminating entry */
};
@@ -65,7 +65,7 @@ MODULE_DEVICE_TABLE(usb, ene_ub6250_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev ene_ub6250_unusual_dev_list[] = {
+static const struct us_unusual_dev ene_ub6250_unusual_dev_list[] = {
# include "unusual_ene_ub6250.h"
{ } /* Terminating entry */
};
@@ -1484,7 +1484,7 @@ static int ms_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb)
static int ms_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb)
{
u32 bl_num;
- u16 bl_len;
+ u32 bl_len;
unsigned int offset = 0;
unsigned char buf[8];
struct scatterlist *sg = NULL;
diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c
index c3ce51c2dabd..cab27ba7a32a 100644
--- a/drivers/usb/storage/freecom.c
+++ b/drivers/usb/storage/freecom.c
@@ -119,7 +119,7 @@ static int init_freecom(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id freecom_usb_ids[] = {
+static const struct usb_device_id freecom_usb_ids[] = {
# include "unusual_freecom.h"
{ } /* Terminating entry */
};
@@ -141,7 +141,7 @@ MODULE_DEVICE_TABLE(usb, freecom_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev freecom_unusual_dev_list[] = {
+static const struct us_unusual_dev freecom_unusual_dev_list[] = {
# include "unusual_freecom.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 300aeef160e7..f2254eb3c0d7 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -67,7 +67,7 @@ static int isd200_Initialization(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id isd200_usb_ids[] = {
+static const struct usb_device_id isd200_usb_ids[] = {
# include "unusual_isd200.h"
{ } /* Terminating entry */
};
@@ -89,7 +89,7 @@ MODULE_DEVICE_TABLE(usb, isd200_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev isd200_unusual_dev_list[] = {
+static const struct us_unusual_dev isd200_unusual_dev_list[] = {
# include "unusual_isd200.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/jumpshot.c b/drivers/usb/storage/jumpshot.c
index 229bf0c1afc9..0e71a8f33c2b 100644
--- a/drivers/usb/storage/jumpshot.c
+++ b/drivers/usb/storage/jumpshot.c
@@ -62,7 +62,7 @@ MODULE_IMPORT_NS(USB_STORAGE);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id jumpshot_usb_ids[] = {
+static const struct usb_device_id jumpshot_usb_ids[] = {
# include "unusual_jumpshot.h"
{ } /* Terminating entry */
};
@@ -84,7 +84,7 @@ MODULE_DEVICE_TABLE(usb, jumpshot_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev jumpshot_unusual_dev_list[] = {
+static const struct us_unusual_dev jumpshot_unusual_dev_list[] = {
# include "unusual_jumpshot.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c
index 38ddfedef629..d6a5e54f2ca8 100644
--- a/drivers/usb/storage/karma.c
+++ b/drivers/usb/storage/karma.c
@@ -51,7 +51,7 @@ static int rio_karma_init(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id karma_usb_ids[] = {
+static const struct usb_device_id karma_usb_ids[] = {
# include "unusual_karma.h"
{ } /* Terminating entry */
};
@@ -73,7 +73,7 @@ MODULE_DEVICE_TABLE(usb, karma_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev karma_unusual_dev_list[] = {
+static const struct us_unusual_dev karma_unusual_dev_list[] = {
# include "unusual_karma.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 01f3c2779ccf..f97cf6cadb8e 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -55,7 +55,7 @@ struct usb_onetouch {
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id onetouch_usb_ids[] = {
+static const struct usb_device_id onetouch_usb_ids[] = {
# include "unusual_onetouch.h"
{ } /* Terminating entry */
};
@@ -77,7 +77,7 @@ MODULE_DEVICE_TABLE(usb, onetouch_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev onetouch_unusual_dev_list[] = {
+static const struct us_unusual_dev onetouch_unusual_dev_list[] = {
# include "unusual_onetouch.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index 51bcd4a43690..03d1b9c69ea1 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -63,7 +63,7 @@ static int usb_stor_sddr09_init(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id sddr09_usb_ids[] = {
+static const struct usb_device_id sddr09_usb_ids[] = {
# include "unusual_sddr09.h"
{ } /* Terminating entry */
};
@@ -85,7 +85,7 @@ MODULE_DEVICE_TABLE(usb, sddr09_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev sddr09_unusual_dev_list[] = {
+static const struct us_unusual_dev sddr09_unusual_dev_list[] = {
# include "unusual_sddr09.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index 0aa079405d23..b8227478a7ad 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -40,7 +40,7 @@ MODULE_IMPORT_NS(USB_STORAGE);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id sddr55_usb_ids[] = {
+static const struct usb_device_id sddr55_usb_ids[] = {
# include "unusual_sddr55.h"
{ } /* Terminating entry */
};
@@ -62,7 +62,7 @@ MODULE_DEVICE_TABLE(usb, sddr55_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev sddr55_unusual_dev_list[] = {
+static const struct us_unusual_dev sddr55_unusual_dev_list[] = {
# include "unusual_sddr55.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index f0d0ca37163d..e7c224b7c464 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -162,7 +162,7 @@ static int init_usbat_flash(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id usbat_usb_ids[] = {
+static const struct usb_device_id usbat_usb_ids[] = {
# include "unusual_usbat.h"
{ } /* Terminating entry */
};
@@ -184,7 +184,7 @@ MODULE_DEVICE_TABLE(usb, usbat_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev usbat_unusual_dev_list[] = {
+static const struct us_unusual_dev usbat_unusual_dev_list[] = {
# include "unusual_usbat.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index c223b4dc1b19..03043d567fa1 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -927,7 +927,7 @@ static const struct scsi_host_template uas_host_template = {
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id uas_usb_ids[] = {
+static const struct usb_device_id uas_usb_ids[] = {
# include "unusual_uas.h"
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) },
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_UAS) },
diff --git a/drivers/usb/typec/anx7411.c b/drivers/usb/typec/anx7411.c
index 31e3e9544bc0..d1e7c487ddfb 100644
--- a/drivers/usb/typec/anx7411.c
+++ b/drivers/usb/typec/anx7411.c
@@ -6,6 +6,7 @@
* Copyright(c) 2022, Analogix Semiconductor. All rights reserved.
*
*/
+#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -884,8 +885,8 @@ static void anx7411_chip_standby(struct anx7411_data *ctx)
OCM_RESET);
ret |= anx7411_reg_write(ctx->tcpc_client, ANALOG_CTRL_10, 0x80);
/* Set TCPC to RD and DRP enable */
- cc1 = TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT;
- cc2 = TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT;
+ cc1 = FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD);
+ cc2 = FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD);
ret |= anx7411_reg_write(ctx->tcpc_client, TCPC_ROLE_CTRL,
TCPC_ROLE_CTRL_DRP | cc1 | cc2);
@@ -1571,6 +1572,7 @@ static const struct of_device_id anx_match_table[] = {
{.compatible = "analogix,anx7411",},
{},
};
+MODULE_DEVICE_TABLE(of, anx_match_table);
static struct i2c_driver anx7411_driver = {
.driver = {
diff --git a/drivers/usb/typec/tcpm/maxim_contaminant.c b/drivers/usb/typec/tcpm/maxim_contaminant.c
index f8504a90da26..22163d8f9eb0 100644
--- a/drivers/usb/typec/tcpm/maxim_contaminant.c
+++ b/drivers/usb/typec/tcpm/maxim_contaminant.c
@@ -5,6 +5,7 @@
* USB-C module to reduce wakeups due to contaminants.
*/
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/irqreturn.h>
#include <linux/module.h>
@@ -45,14 +46,9 @@ enum fladc_select {
#define READ1_SLEEP_MS 10
#define READ2_SLEEP_MS 5
-#define STATUS_CHECK(reg, mask, val) (((reg) & (mask)) == (val))
-
#define IS_CC_OPEN(cc_status) \
- (STATUS_CHECK((cc_status), TCPC_CC_STATUS_CC1_MASK << TCPC_CC_STATUS_CC1_SHIFT, \
- TCPC_CC_STATE_SRC_OPEN) && STATUS_CHECK((cc_status), \
- TCPC_CC_STATUS_CC2_MASK << \
- TCPC_CC_STATUS_CC2_SHIFT, \
- TCPC_CC_STATE_SRC_OPEN))
+ (FIELD_GET(TCPC_CC_STATUS_CC1, cc_status) == TCPC_CC_STATE_SRC_OPEN \
+ && FIELD_GET(TCPC_CC_STATUS_CC2, cc_status) == TCPC_CC_STATE_SRC_OPEN)
static int max_contaminant_adc_to_mv(struct max_tcpci_chip *chip, enum fladc_select channel,
bool ua_src, u8 fladc)
@@ -80,8 +76,8 @@ static int max_contaminant_read_adc_mv(struct max_tcpci_chip *chip, enum fladc_s
int ret;
/* Channel & scale select */
- ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL_MASK,
- channel << ADC_CHANNEL_OFFSET);
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL,
+ FIELD_PREP(ADCINSEL, channel));
if (ret < 0)
return ret;
@@ -100,7 +96,8 @@ static int max_contaminant_read_adc_mv(struct max_tcpci_chip *chip, enum fladc_s
if (ret < 0)
return ret;
- ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL_MASK, 0);
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL,
+ FIELD_PREP(ADCINSEL, 0));
if (ret < 0)
return ret;
@@ -120,13 +117,14 @@ static int max_contaminant_read_resistance_kohm(struct max_tcpci_chip *chip,
if (channel == CC1_SCALE1 || channel == CC2_SCALE1 || channel == CC1_SCALE2 ||
channel == CC2_SCALE2) {
/* Enable 1uA current source */
- ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL_MASK,
- ULTRA_LOW_POWER_MODE);
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL,
+ FIELD_PREP(CCLPMODESEL, ULTRA_LOW_POWER_MODE));
if (ret < 0)
return ret;
/* Enable 1uA current source */
- ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, UA_1_SRC);
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL,
+ FIELD_PREP(CCRPCTRL, UA_1_SRC));
if (ret < 0)
return ret;
@@ -180,7 +178,8 @@ static int max_contaminant_read_comparators(struct max_tcpci_chip *chip, u8 *ven
int ret;
/* Enable 80uA source */
- ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, UA_80_SRC);
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL,
+ FIELD_PREP(CCRPCTRL, UA_80_SRC));
if (ret < 0)
return ret;
@@ -213,7 +212,8 @@ static int max_contaminant_read_comparators(struct max_tcpci_chip *chip, u8 *ven
if (ret < 0)
return ret;
- ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, 0);
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL,
+ FIELD_PREP(CCRPCTRL, 0));
if (ret < 0)
return ret;
@@ -284,10 +284,11 @@ static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip)
u8 temp;
int ret;
- ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL3, CCWTRDEB_MASK | CCWTRSEL_MASK
- | WTRCYCLE_MASK, CCWTRDEB_1MS << CCWTRDEB_SHIFT |
- CCWTRSEL_1V << CCWTRSEL_SHIFT | WTRCYCLE_4_8_S <<
- WTRCYCLE_SHIFT);
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL3,
+ CCWTRDEB | CCWTRSEL | WTRCYCLE,
+ FIELD_PREP(CCWTRDEB, CCWTRDEB_1MS)
+ | FIELD_PREP(CCWTRSEL, CCWTRSEL_1V)
+ | FIELD_PREP(WTRCYCLE, WTRCYCLE_4_8_S));
if (ret < 0)
return ret;
@@ -302,8 +303,9 @@ static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip)
if (ret < 0)
return ret;
- ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL_MASK,
- ULTRA_LOW_POWER_MODE);
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL,
+ FIELD_PREP(CCLPMODESEL,
+ ULTRA_LOW_POWER_MODE));
if (ret < 0)
return ret;
ret = max_tcpci_read8(chip, TCPC_VENDOR_CC_CTRL2, &temp);
@@ -322,11 +324,14 @@ static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip)
return 0;
}
-bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce)
+bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce,
+ bool *cc_handled)
{
u8 cc_status, pwr_cntl;
int ret;
+ *cc_handled = true;
+
ret = max_tcpci_read8(chip, TCPC_CC_STATUS, &cc_status);
if (ret < 0)
return false;
@@ -368,9 +373,8 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect
return true;
}
}
- return false;
} else if (chip->contaminant_state == DETECTED) {
- if (STATUS_CHECK(cc_status, TCPC_CC_STATUS_TOGGLING, 0)) {
+ if (!(cc_status & TCPC_CC_STATUS_TOGGLING)) {
chip->contaminant_state = max_contaminant_detect_contaminant(chip);
if (chip->contaminant_state == DETECTED) {
max_contaminant_enable_dry_detection(chip);
@@ -379,6 +383,7 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect
}
}
+ *cc_handled = false;
return false;
}
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 3e3dcb983dde..ed32583829be 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -5,6 +5,7 @@
* USB Type-C Port Controller Interface.
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -103,45 +104,45 @@ static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
switch (cc) {
case TYPEC_CC_RA:
- reg = (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RA)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RA));
break;
case TYPEC_CC_RD:
- reg = (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD));
break;
case TYPEC_CC_RP_DEF:
- reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
- (TCPC_ROLE_CTRL_RP_VAL_DEF <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP)
+ | FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_DEF));
break;
case TYPEC_CC_RP_1_5:
- reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
- (TCPC_ROLE_CTRL_RP_VAL_1_5 <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP)
+ | FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_1_5));
break;
case TYPEC_CC_RP_3_0:
- reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
- (TCPC_ROLE_CTRL_RP_VAL_3_0 <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP)
+ | FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_3_0));
break;
case TYPEC_CC_OPEN:
default:
- reg = (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_OPEN)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_OPEN));
break;
}
if (vconn_pres) {
if (polarity == TYPEC_POLARITY_CC2) {
- reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT);
- reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT);
+ reg &= ~TCPC_ROLE_CTRL_CC1;
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_OPEN);
} else {
- reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT);
- reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg &= ~TCPC_ROLE_CTRL_CC2;
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_OPEN);
}
}
@@ -167,15 +168,11 @@ static int tcpci_apply_rc(struct tcpc_dev *tcpc, enum typec_cc_status cc,
* APPLY_RC state is when ROLE_CONTROL.CC1 != ROLE_CONTROL.CC2 and vbus autodischarge on
* disconnect is disabled. Bail out when ROLE_CONTROL.CC1 != ROLE_CONTROL.CC2.
*/
- if (((reg & (TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT)) >>
- TCPC_ROLE_CTRL_CC2_SHIFT) !=
- ((reg & (TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT)) >>
- TCPC_ROLE_CTRL_CC1_SHIFT))
+ if (FIELD_GET(TCPC_ROLE_CTRL_CC2, reg) != FIELD_GET(TCPC_ROLE_CTRL_CC1, reg))
return 0;
return regmap_update_bits(tcpci->regmap, TCPC_ROLE_CTRL, polarity == TYPEC_POLARITY_CC1 ?
- TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT :
- TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT,
+ TCPC_ROLE_CTRL_CC2 : TCPC_ROLE_CTRL_CC1,
TCPC_ROLE_CTRL_CC_OPEN);
}
@@ -200,25 +197,25 @@ static int tcpci_start_toggling(struct tcpc_dev *tcpc,
switch (cc) {
default:
case TYPEC_CC_RP_DEF:
- reg |= (TCPC_ROLE_CTRL_RP_VAL_DEF <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_DEF);
break;
case TYPEC_CC_RP_1_5:
- reg |= (TCPC_ROLE_CTRL_RP_VAL_1_5 <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_1_5);
break;
case TYPEC_CC_RP_3_0:
- reg |= (TCPC_ROLE_CTRL_RP_VAL_3_0 <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_3_0);
break;
}
if (cc == TYPEC_CC_RD)
- reg |= (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg |= (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD));
else
- reg |= (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg |= (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP));
ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
if (ret < 0)
return ret;
@@ -241,12 +238,10 @@ static int tcpci_get_cc(struct tcpc_dev *tcpc,
if (ret < 0)
return ret;
- *cc1 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC1_SHIFT) &
- TCPC_CC_STATUS_CC1_MASK,
+ *cc1 = tcpci_to_typec_cc(FIELD_GET(TCPC_CC_STATUS_CC1, reg),
reg & TCPC_CC_STATUS_TERM ||
tcpc_presenting_rd(role_control, CC1));
- *cc2 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC2_SHIFT) &
- TCPC_CC_STATUS_CC2_MASK,
+ *cc2 = tcpci_to_typec_cc(FIELD_GET(TCPC_CC_STATUS_CC2, reg),
reg & TCPC_CC_STATUS_TERM ||
tcpc_presenting_rd(role_control, CC2));
@@ -282,28 +277,28 @@ static int tcpci_set_polarity(struct tcpc_dev *tcpc,
reg = reg & ~TCPC_ROLE_CTRL_DRP;
if (polarity == TYPEC_POLARITY_CC2) {
- reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg &= ~TCPC_ROLE_CTRL_CC2;
/* Local port is source */
if (cc2 == TYPEC_CC_RD)
/* Role control would have the Rp setting when DRP was enabled */
- reg |= TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT;
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP);
else
- reg |= TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT;
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD);
} else {
- reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT);
+ reg &= ~TCPC_ROLE_CTRL_CC1;
/* Local port is source */
if (cc1 == TYPEC_CC_RD)
/* Role control would have the Rp setting when DRP was enabled */
- reg |= TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT;
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP);
else
- reg |= TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT;
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD);
}
}
if (polarity == TYPEC_POLARITY_CC2)
- reg |= TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT;
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_OPEN);
else
- reg |= TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT;
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_OPEN);
ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
if (ret < 0)
return ret;
@@ -461,7 +456,7 @@ static int tcpci_set_roles(struct tcpc_dev *tcpc, bool attached,
unsigned int reg;
int ret;
- reg = PD_REV20 << TCPC_MSG_HDR_INFO_REV_SHIFT;
+ reg = FIELD_PREP(TCPC_MSG_HDR_INFO_REV, PD_REV20);
if (role == TYPEC_SOURCE)
reg |= TCPC_MSG_HDR_INFO_PWR_ROLE;
if (data == TYPEC_HOST)
@@ -612,8 +607,11 @@ static int tcpci_pd_transmit(struct tcpc_dev *tcpc, enum tcpm_transmit_type type
}
/* nRetryCount is 3 in PD2.0 spec where 2 in PD3.0 spec */
- reg = ((negotiated_rev > PD_REV20 ? PD_RETRY_COUNT_3_0_OR_HIGHER : PD_RETRY_COUNT_DEFAULT)
- << TCPC_TRANSMIT_RETRY_SHIFT) | (type << TCPC_TRANSMIT_TYPE_SHIFT);
+ reg = FIELD_PREP(TCPC_TRANSMIT_RETRY,
+ (negotiated_rev > PD_REV20
+ ? PD_RETRY_COUNT_3_0_OR_HIGHER
+ : PD_RETRY_COUNT_DEFAULT));
+ reg |= FIELD_PREP(TCPC_TRANSMIT_TYPE, type);
ret = regmap_write(tcpci->regmap, TCPC_TRANSMIT, reg);
if (ret < 0)
return ret;
@@ -709,10 +707,13 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
{
u16 status;
int ret;
+ int irq_ret;
unsigned int raw;
tcpci_read16(tcpci, TCPC_ALERT, &status);
+ irq_ret = status & tcpci->alert_mask;
+process_status:
/*
* Clear alert status for everything except RX_STATUS, which shouldn't
* be cleared until we have successfully retrieved message.
@@ -785,7 +786,12 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
else if (status & TCPC_ALERT_TX_FAILED)
tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_FAILED);
- return IRQ_RETVAL(status & tcpci->alert_mask);
+ tcpci_read16(tcpci, TCPC_ALERT, &status);
+
+ if (status & tcpci->alert_mask)
+ goto process_status;
+
+ return IRQ_RETVAL(irq_ret);
}
EXPORT_SYMBOL_GPL(tcpci_irq);
@@ -917,20 +923,22 @@ static int tcpci_probe(struct i2c_client *client)
chip->data.set_orientation = err;
- chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
- if (IS_ERR(chip->tcpci))
- return PTR_ERR(chip->tcpci);
-
err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
_tcpci_irq,
- IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ IRQF_SHARED | IRQF_ONESHOT,
dev_name(&client->dev), chip);
- if (err < 0) {
- tcpci_unregister_port(chip->tcpci);
+ if (err < 0)
return err;
- }
- return 0;
+ /*
+ * Disable irq while registering port. If irq is configured as an edge
+ * irq this allow to keep track and process the irq as soon as it is enabled.
+ */
+ disable_irq(client->irq);
+ chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
+ enable_irq(client->irq);
+
+ return PTR_ERR_OR_ZERO(chip->tcpci);
}
static void tcpci_remove(struct i2c_client *client)
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.h b/drivers/usb/typec/tcpm/tcpci_maxim.h
index 78ff3b73ee7e..76270d5c2838 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim.h
+++ b/drivers/usb/typec/tcpm/tcpci_maxim.h
@@ -20,28 +20,24 @@
#define SBUOVPDIS BIT(7)
#define CCOVPDIS BIT(6)
#define SBURPCTRL BIT(5)
-#define CCLPMODESEL_MASK GENMASK(4, 3)
-#define ULTRA_LOW_POWER_MODE BIT(3)
-#define CCRPCTRL_MASK GENMASK(2, 0)
+#define CCLPMODESEL GENMASK(4, 3)
+#define ULTRA_LOW_POWER_MODE 1
+#define CCRPCTRL GENMASK(2, 0)
#define UA_1_SRC 1
#define UA_80_SRC 3
#define TCPC_VENDOR_CC_CTRL3 0x8e
-#define CCWTRDEB_MASK GENMASK(7, 6)
-#define CCWTRDEB_SHIFT 6
+#define CCWTRDEB GENMASK(7, 6)
#define CCWTRDEB_1MS 1
-#define CCWTRSEL_MASK GENMASK(5, 3)
-#define CCWTRSEL_SHIFT 3
+#define CCWTRSEL GENMASK(5, 3)
#define CCWTRSEL_1V 0x4
#define CCLADDERDIS BIT(2)
-#define WTRCYCLE_MASK BIT(0)
-#define WTRCYCLE_SHIFT 0
+#define WTRCYCLE GENMASK(0, 0)
#define WTRCYCLE_2_4_S 0
#define WTRCYCLE_4_8_S 1
#define TCPC_VENDOR_ADC_CTRL1 0x91
-#define ADCINSEL_MASK GENMASK(7, 5)
-#define ADC_CHANNEL_OFFSET 5
+#define ADCINSEL GENMASK(7, 5)
#define ADCEN BIT(0)
enum contamiant_state {
@@ -85,6 +81,20 @@ static inline int max_tcpci_write8(struct max_tcpci_chip *chip, unsigned int reg
return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u8));
}
-bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce);
+/**
+ * max_contaminant_is_contaminant - Test if CC was toggled due to contaminant
+ *
+ * @chip: Handle to a struct max_tcpci_chip
+ * @disconnect_while_debounce: Whether the disconnect was detected when CC
+ * pins were debouncing
+ * @cc_handled: Returns whether or not update to CC status was handled here
+ *
+ * Determine if a contaminant was detected.
+ *
+ * Returns: true if a contaminant was detected, false otherwise. cc_handled
+ * is updated to reflect whether or not further CC handling is required.
+ */
+bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce,
+ bool *cc_handled);
#endif // TCPCI_MAXIM_H_
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim_core.c b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
index 760e2f92b958..fd1b80593367 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim_core.c
+++ b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
@@ -97,11 +97,13 @@ static void max_tcpci_init_regs(struct max_tcpci_chip *chip)
if (ret < 0)
return;
- alert_mask = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED | TCPC_ALERT_TX_FAILED |
- TCPC_ALERT_RX_HARD_RST | TCPC_ALERT_RX_STATUS | TCPC_ALERT_CC_STATUS |
- TCPC_ALERT_VBUS_DISCNCT | TCPC_ALERT_RX_BUF_OVF | TCPC_ALERT_POWER_STATUS |
- /* Enable Extended alert for detecting Fast Role Swap Signal */
- TCPC_ALERT_EXTND | TCPC_ALERT_EXTENDED_STATUS | TCPC_ALERT_FAULT;
+ alert_mask = (TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED |
+ TCPC_ALERT_TX_FAILED | TCPC_ALERT_RX_HARD_RST |
+ TCPC_ALERT_RX_STATUS | TCPC_ALERT_POWER_STATUS |
+ TCPC_ALERT_CC_STATUS |
+ TCPC_ALERT_EXTND | TCPC_ALERT_EXTENDED_STATUS |
+ TCPC_ALERT_VBUS_DISCNCT | TCPC_ALERT_RX_BUF_OVF |
+ TCPC_ALERT_FAULT);
ret = max_tcpci_write16(chip, TCPC_ALERT_MASK, alert_mask);
if (ret < 0) {
@@ -191,9 +193,8 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
* Read complete, clear RX status alert bit.
* Clear overflow as well if set.
*/
- ret = max_tcpci_write16(chip, TCPC_ALERT, status & TCPC_ALERT_RX_BUF_OVF ?
- TCPC_ALERT_RX_STATUS | TCPC_ALERT_RX_BUF_OVF :
- TCPC_ALERT_RX_STATUS);
+ ret = max_tcpci_write16(chip, TCPC_ALERT,
+ TCPC_ALERT_RX_STATUS | (status & TCPC_ALERT_RX_BUF_OVF));
if (ret < 0)
return;
@@ -295,9 +296,8 @@ static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
* be cleared until we have successfully retrieved message.
*/
if (status & ~TCPC_ALERT_RX_STATUS) {
- mask = status & TCPC_ALERT_RX_BUF_OVF ?
- status & ~(TCPC_ALERT_RX_STATUS | TCPC_ALERT_RX_BUF_OVF) :
- status & ~TCPC_ALERT_RX_STATUS;
+ mask = status & ~(TCPC_ALERT_RX_STATUS
+ | (status & TCPC_ALERT_RX_BUF_OVF));
ret = max_tcpci_write16(chip, TCPC_ALERT, mask);
if (ret < 0) {
dev_err(chip->dev, "ALERT clear failed\n");
@@ -357,12 +357,14 @@ static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
tcpm_vbus_change(chip->port);
if (status & TCPC_ALERT_CC_STATUS) {
+ bool cc_handled = false;
+
if (chip->contaminant_state == DETECTED || tcpm_port_is_toggling(chip->port)) {
- if (!max_contaminant_is_contaminant(chip, false))
+ if (!max_contaminant_is_contaminant(chip, false, &cc_handled))
tcpm_port_clean(chip->port);
- } else {
- tcpm_cc_change(chip->port);
}
+ if (!cc_handled)
+ tcpm_cc_change(chip->port);
}
if (status & TCPC_ALERT_POWER_STATUS)
@@ -397,7 +399,7 @@ static irqreturn_t max_tcpci_irq(int irq, void *dev_id)
}
while (status) {
irq_return = _max_tcpci_irq(chip, status);
- /* Do not return if the ALERT is already set. */
+ /* Do not return if a (new) ALERT is set (again). */
ret = max_tcpci_read16(chip, TCPC_ALERT, &status);
if (ret < 0)
break;
@@ -455,8 +457,9 @@ static int tcpci_init(struct tcpci *tcpci, struct tcpci_data *data)
static void max_tcpci_check_contaminant(struct tcpci *tcpci, struct tcpci_data *tdata)
{
struct max_tcpci_chip *chip = tdata_to_max_tcpci(tdata);
+ bool cc_handled;
- if (!max_contaminant_is_contaminant(chip, true))
+ if (!max_contaminant_is_contaminant(chip, true, &cc_handled))
tcpm_port_clean(chip->port);
}
@@ -472,6 +475,11 @@ static bool max_tcpci_attempt_vconn_swap_discovery(struct tcpci *tcpci, struct t
return true;
}
+static void max_tcpci_unregister_tcpci_port(void *tcpci)
+{
+ tcpci_unregister_port(tcpci);
+}
+
static int max_tcpci_probe(struct i2c_client *client)
{
int ret;
@@ -484,17 +492,17 @@ static int max_tcpci_probe(struct i2c_client *client)
chip->client = client;
chip->data.regmap = devm_regmap_init_i2c(client, &max_tcpci_regmap_config);
- if (IS_ERR(chip->data.regmap)) {
- dev_err(&client->dev, "Regmap init failed\n");
- return PTR_ERR(chip->data.regmap);
- }
+ if (IS_ERR(chip->data.regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(chip->data.regmap),
+ "Regmap init failed\n");
chip->dev = &client->dev;
i2c_set_clientdata(client, chip);
ret = max_tcpci_read8(chip, TCPC_POWER_STATUS, &power_status);
if (ret < 0)
- return ret;
+ return dev_err_probe(&client->dev, ret,
+ "Failed to read TCPC_POWER_STATUS\n");
/* Chip level tcpci callbacks */
chip->data.set_vbus = max_tcpci_set_vbus;
@@ -511,30 +519,25 @@ static int max_tcpci_probe(struct i2c_client *client)
max_tcpci_init_regs(chip);
chip->tcpci = tcpci_register_port(chip->dev, &chip->data);
- if (IS_ERR(chip->tcpci)) {
- dev_err(&client->dev, "TCPCI port registration failed\n");
- return PTR_ERR(chip->tcpci);
- }
+ if (IS_ERR(chip->tcpci))
+ return dev_err_probe(&client->dev, PTR_ERR(chip->tcpci),
+ "TCPCI port registration failed\n");
+
+ ret = devm_add_action_or_reset(&client->dev,
+ max_tcpci_unregister_tcpci_port,
+ chip->tcpci);
+ if (ret)
+ return ret;
+
chip->port = tcpci_get_tcpm_port(chip->tcpci);
+
ret = max_tcpci_init_alert(chip, client);
if (ret < 0)
- goto unreg_port;
+ return dev_err_probe(&client->dev, ret,
+ "IRQ initialization failed\n");
device_init_wakeup(chip->dev, true);
return 0;
-
-unreg_port:
- tcpci_unregister_port(chip->tcpci);
-
- return ret;
-}
-
-static void max_tcpci_remove(struct i2c_client *client)
-{
- struct max_tcpci_chip *chip = i2c_get_clientdata(client);
-
- if (!IS_ERR_OR_NULL(chip->tcpci))
- tcpci_unregister_port(chip->tcpci);
}
static const struct i2c_device_id max_tcpci_id[] = {
@@ -557,7 +560,6 @@ static struct i2c_driver max_tcpci_i2c_driver = {
.of_match_table = of_match_ptr(max_tcpci_of_match),
},
.probe = max_tcpci_probe,
- .remove = max_tcpci_remove,
.id_table = max_tcpci_id,
};
module_i2c_driver(max_tcpci_i2c_driver);
diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index 67422d45eb54..64f6dd0dc660 100644
--- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
@@ -5,6 +5,7 @@
* Richtek RT1711H Type-C Chip Driver
*/
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
@@ -195,12 +196,10 @@ static inline int rt1711h_init_cc_params(struct rt1711h_chip *chip, u8 status)
if (ret < 0)
return ret;
- cc1 = tcpci_to_typec_cc((status >> TCPC_CC_STATUS_CC1_SHIFT) &
- TCPC_CC_STATUS_CC1_MASK,
+ cc1 = tcpci_to_typec_cc(FIELD_GET(TCPC_CC_STATUS_CC1, status),
status & TCPC_CC_STATUS_TERM ||
tcpc_presenting_rd(role, CC1));
- cc2 = tcpci_to_typec_cc((status >> TCPC_CC_STATUS_CC2_SHIFT) &
- TCPC_CC_STATUS_CC2_MASK,
+ cc2 = tcpci_to_typec_cc(FIELD_GET(TCPC_CC_STATUS_CC2, status),
status & TCPC_CC_STATUS_TERM ||
tcpc_presenting_rd(role, CC2));
@@ -233,25 +232,25 @@ static int rt1711h_start_drp_toggling(struct tcpci *tcpci,
switch (cc) {
default:
case TYPEC_CC_RP_DEF:
- reg |= (TCPC_ROLE_CTRL_RP_VAL_DEF <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_DEF);
break;
case TYPEC_CC_RP_1_5:
- reg |= (TCPC_ROLE_CTRL_RP_VAL_1_5 <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_1_5);
break;
case TYPEC_CC_RP_3_0:
- reg |= (TCPC_ROLE_CTRL_RP_VAL_3_0 <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_3_0);
break;
}
if (cc == TYPEC_CC_RD)
- reg |= (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg |= (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD));
else
- reg |= (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg |= (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP));
ret = rt1711h_write8(chip, TCPC_ROLE_CTRL, reg);
if (ret < 0)
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index d532670885e4..7ee721a877c1 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -1460,8 +1460,9 @@ static void tps6598x_remove(struct i2c_client *client)
if (!client->irq)
cancel_delayed_work_sync(&tps->wq_poll);
+ else
+ devm_free_irq(tps->dev, client->irq, tps);
- devm_free_irq(tps->dev, client->irq, tps);
tps6598x_disconnect(tps, 0);
typec_unregister_port(tps->port);
usb_role_switch_put(tps->role_sw);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 17155ed17fdf..e0f3925e401b 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -38,6 +38,10 @@
void ucsi_notify_common(struct ucsi *ucsi, u32 cci)
{
+ /* Ignore bogus data in CCI if busy indicator is set. */
+ if (cci & UCSI_CCI_BUSY)
+ return;
+
if (UCSI_CCI_CONNECTOR(cci))
ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci));
@@ -99,24 +103,18 @@ static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
*cci = 0;
- /*
- * Below UCSI 2.0, MESSAGE_IN was limited to 16 bytes. Truncate the
- * reads here.
- */
- if (ucsi->version <= UCSI_VERSION_1_2)
- size = clamp(size, 0, 16);
+ if (size > UCSI_MAX_DATA_LENGTH(ucsi))
+ return -EINVAL;
ret = ucsi->ops->sync_control(ucsi, command);
- if (ret)
- return ret;
+ if (ucsi->ops->read_cci(ucsi, cci))
+ return -EIO;
- ret = ucsi->ops->read_cci(ucsi, cci);
+ if (*cci & UCSI_CCI_BUSY)
+ return ucsi_run_command(ucsi, UCSI_CANCEL, cci, NULL, 0, false) ?: -EBUSY;
if (ret)
return ret;
- if (*cci & UCSI_CCI_BUSY)
- return -EBUSY;
-
if (!(*cci & UCSI_CCI_COMMAND_COMPLETE))
return -EIO;
@@ -148,21 +146,10 @@ static int ucsi_read_error(struct ucsi *ucsi, u8 connector_num)
int ret;
command = UCSI_GET_ERROR_STATUS | UCSI_CONNECTOR_NUMBER(connector_num);
- ret = ucsi_run_command(ucsi, command, &cci,
- &error, sizeof(error), false);
-
- if (cci & UCSI_CCI_BUSY) {
- ret = ucsi_run_command(ucsi, UCSI_CANCEL, &cci, NULL, 0, false);
-
- return ret ? ret : -EBUSY;
- }
-
+ ret = ucsi_run_command(ucsi, command, &cci, &error, sizeof(error), false);
if (ret < 0)
return ret;
- if (cci & UCSI_CCI_ERROR)
- return -EIO;
-
switch (error) {
case UCSI_ERROR_INCOMPATIBLE_PARTNER:
return -EOPNOTSUPP;
@@ -238,9 +225,8 @@ static int ucsi_send_command_common(struct ucsi *ucsi, u64 cmd,
mutex_lock(&ucsi->ppm_lock);
ret = ucsi_run_command(ucsi, cmd, &cci, data, size, conn_ack);
- if (cci & UCSI_CCI_BUSY)
- ret = ucsi_run_command(ucsi, UCSI_CANCEL, &cci, NULL, 0, false) ?: -EBUSY;
- else if (cci & UCSI_CCI_ERROR)
+
+ if (cci & UCSI_CCI_ERROR)
ret = ucsi_read_error(ucsi, connector_num);
mutex_unlock(&ucsi->ppm_lock);
@@ -752,104 +738,66 @@ static struct usb_power_delivery_capabilities *ucsi_get_pd_caps(struct ucsi_conn
&pd_caps);
}
-static int ucsi_read_identity(struct ucsi_connector *con, u8 recipient,
- u8 offset, u8 bytes, void *resp)
+static int ucsi_get_pd_message(struct ucsi_connector *con, u8 recipient,
+ size_t bytes, void *data, u8 type)
{
- struct ucsi *ucsi = con->ucsi;
+ size_t len = min(bytes, UCSI_MAX_DATA_LENGTH(con->ucsi));
u64 command;
+ u8 offset;
int ret;
- command = UCSI_COMMAND(UCSI_GET_PD_MESSAGE) |
- UCSI_CONNECTOR_NUMBER(con->num);
- command |= UCSI_GET_PD_MESSAGE_RECIPIENT(recipient);
- command |= UCSI_GET_PD_MESSAGE_OFFSET(offset);
- command |= UCSI_GET_PD_MESSAGE_BYTES(bytes);
- command |= UCSI_GET_PD_MESSAGE_TYPE(UCSI_GET_PD_MESSAGE_TYPE_IDENTITY);
-
- ret = ucsi_send_command(ucsi, command, resp, bytes);
- if (ret < 0)
- dev_err(ucsi->dev, "UCSI_GET_PD_MESSAGE failed (%d)\n", ret);
-
- return ret;
-}
-
-static int ucsi_get_identity(struct ucsi_connector *con, u8 recipient,
- struct usb_pd_identity *id)
-{
- struct ucsi *ucsi = con->ucsi;
- struct ucsi_pd_message_disc_id resp = {};
- int ret;
-
- if (ucsi->version < UCSI_VERSION_2_0) {
- /*
- * Before UCSI v2.0, MESSAGE_IN is 16 bytes which cannot fit
- * the 28 byte identity response including the VDM header.
- * First request the VDM header, ID Header VDO, Cert Stat VDO
- * and Product VDO.
- */
- ret = ucsi_read_identity(con, recipient, 0, 0x10, &resp);
- if (ret < 0)
- return ret;
-
+ for (offset = 0; offset < bytes; offset += len) {
+ len = min(len, bytes - offset);
- /* Then request Product Type VDO1 through Product Type VDO3. */
- ret = ucsi_read_identity(con, recipient, 0x10, 0xc,
- &resp.vdo[0]);
- if (ret < 0)
- return ret;
+ command = UCSI_COMMAND(UCSI_GET_PD_MESSAGE) | UCSI_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_GET_PD_MESSAGE_RECIPIENT(recipient);
+ command |= UCSI_GET_PD_MESSAGE_OFFSET(offset);
+ command |= UCSI_GET_PD_MESSAGE_BYTES(len);
+ command |= UCSI_GET_PD_MESSAGE_TYPE(type);
- } else {
- /*
- * In UCSI v2.0 and after, MESSAGE_IN is large enough to request
- * the large enough to request the full Discover Identity
- * response at once.
- */
- ret = ucsi_read_identity(con, recipient, 0x0, 0x1c, &resp);
+ ret = ucsi_send_command(con->ucsi, command, data + offset, len);
if (ret < 0)
return ret;
}
- id->id_header = resp.id_header;
- id->cert_stat = resp.cert_stat;
- id->product = resp.product;
- id->vdo[0] = resp.vdo[0];
- id->vdo[1] = resp.vdo[1];
- id->vdo[2] = resp.vdo[2];
return 0;
}
static int ucsi_get_partner_identity(struct ucsi_connector *con)
{
+ u32 vdo[7] = {};
int ret;
- ret = ucsi_get_identity(con, UCSI_RECIPIENT_SOP,
- &con->partner_identity);
+ ret = ucsi_get_pd_message(con, UCSI_RECIPIENT_SOP, sizeof(vdo), vdo,
+ UCSI_GET_PD_MESSAGE_TYPE_IDENTITY);
if (ret < 0)
return ret;
+ /* VDM Header is not part of struct usb_pd_identity, so dropping it. */
+ con->partner_identity = *(struct usb_pd_identity *)&vdo[1];
+
ret = typec_partner_set_identity(con->partner);
- if (ret < 0) {
- dev_err(con->ucsi->dev, "Failed to set partner identity (%d)\n",
- ret);
- }
+ if (ret < 0)
+ dev_err(con->ucsi->dev, "Failed to set partner identity (%d)\n", ret);
return ret;
}
static int ucsi_get_cable_identity(struct ucsi_connector *con)
{
+ u32 vdo[7] = {};
int ret;
- ret = ucsi_get_identity(con, UCSI_RECIPIENT_SOP_P,
- &con->cable_identity);
+ ret = ucsi_get_pd_message(con, UCSI_RECIPIENT_SOP_P, sizeof(vdo), vdo,
+ UCSI_GET_PD_MESSAGE_TYPE_IDENTITY);
if (ret < 0)
return ret;
+ con->cable_identity = *(struct usb_pd_identity *)&vdo[1];
+
ret = typec_cable_set_identity(con->cable);
- if (ret < 0) {
- dev_err(con->ucsi->dev, "Failed to set cable identity (%d)\n",
- ret);
- }
+ if (ret < 0)
+ dev_err(con->ucsi->dev, "Failed to set cable identity (%d)\n", ret);
return ret;
}
@@ -993,7 +941,8 @@ static int ucsi_register_cable(struct ucsi_connector *con)
break;
}
- desc.identity = &con->cable_identity;
+ if (con->ucsi->cap.features & UCSI_CAP_GET_PD_MESSAGE)
+ desc.identity = &con->cable_identity;
desc.active = !!(UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE & cable_prop.flags);
if (con->ucsi->version >= UCSI_VERSION_2_1)
@@ -1094,7 +1043,8 @@ static int ucsi_register_partner(struct ucsi_connector *con)
if (pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD)
ucsi_register_device_pdos(con);
- desc.identity = &con->partner_identity;
+ if (con->ucsi->cap.features & UCSI_CAP_GET_PD_MESSAGE)
+ desc.identity = &con->partner_identity;
desc.usb_pd = pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD;
partner = typec_register_partner(con->port, &desc);
@@ -1249,6 +1199,10 @@ static void ucsi_handle_connector_change(struct work_struct *work)
mutex_lock(&con->lock);
+ if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
+ dev_err_once(ucsi->dev, "%s entered without EVENT_PENDING\n",
+ __func__);
+
command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
ret = ucsi_send_command_common(ucsi, command, &con->status,
@@ -1341,12 +1295,26 @@ EXPORT_SYMBOL_GPL(ucsi_connector_change);
/* -------------------------------------------------------------------------- */
+/*
+ * Hard Reset bit field was defined with value 1 in UCSI spec version 1.0.
+ * Starting with spec version 1.1, Hard Reset bit field was removed from the
+ * CONNECTOR_RESET command, until spec 2.0 reintroduced it with value 0, so, in effect,
+ * the value to pass in to the command for a Hard Reset is different depending
+ * on the supported UCSI version by the LPM.
+ *
+ * For performing a Data Reset on LPMs supporting version 2.0 and greater,
+ * this function needs to be called with the second argument set to 0.
+ */
static int ucsi_reset_connector(struct ucsi_connector *con, bool hard)
{
u64 command;
command = UCSI_CONNECTOR_RESET | UCSI_CONNECTOR_NUMBER(con->num);
- command |= hard ? UCSI_CONNECTOR_RESET_HARD : 0;
+
+ if (con->ucsi->version < UCSI_VERSION_1_1)
+ command |= hard ? UCSI_CONNECTOR_RESET_HARD_VER_1_0 : 0;
+ else if (con->ucsi->version >= UCSI_VERSION_2_0)
+ command |= hard ? 0 : UCSI_CONNECTOR_RESET_DATA_VER_2_0;
return ucsi_send_command(con->ucsi, command, NULL, 0);
}
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 5a3481d36d7a..4a017eb6a65b 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -29,6 +29,7 @@ struct dentry;
#define UCSIv2_MESSAGE_OUT 272
/* UCSI versions */
+#define UCSI_VERSION_1_1 0x0110
#define UCSI_VERSION_1_2 0x0120
#define UCSI_VERSION_2_0 0x0200
#define UCSI_VERSION_2_1 0x0210
@@ -122,7 +123,9 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_DEFAULT_GET_CONNECTOR_NUMBER(_cmd_) (((_cmd_) >> 16) & GENMASK(6, 0))
/* CONNECTOR_RESET command bits */
-#define UCSI_CONNECTOR_RESET_HARD BIT(23) /* Deprecated in v1.1 */
+#define UCSI_CONNECTOR_RESET_HARD_VER_1_0 BIT(23) /* Deprecated in v1.1 */
+#define UCSI_CONNECTOR_RESET_DATA_VER_2_0 BIT(23) /* Redefined in v2.0 */
+
/* ACK_CC_CI bits */
#define UCSI_ACK_CONNECTOR_CHANGE BIT(16)
@@ -344,47 +347,12 @@ struct ucsi_connector_status {
#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6
u32 request_data_obj;
- u8 pwr_status[3];
-#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_[0]) & GENMASK(1, 0))
+ u8 pwr_status;
+#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_) & GENMASK(1, 0))
#define UCSI_CONSTAT_BC_NOT_CHARGING 0
#define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1
#define UCSI_CONSTAT_BC_SLOW_CHARGING 2
#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3
-#define UCSI_CONSTAT_PROVIDER_CAP_LIMIT(_p_) (((_p_[0]) & GENMASK(5, 2)) >> 2)
-#define UCSI_CONSTAT_CAP_PWR_LOWERED 0
-#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1
-#define UCSI_CONSTAT_PROVIDER_PD_VERSION_OPER_MODE(_p_) \
- ((get_unaligned_le32(_p_) & GENMASK(21, 6)) >> 6)
-#define UCSI_CONSTAT_ORIENTATION(_p_) (((_p_[2]) & GENMASK(6, 6)) >> 6)
-#define UCSI_CONSTAT_ORIENTATION_DIRECT 0
-#define UCSI_CONSTAT_ORIENTATION_FLIPPED 1
-#define UCSI_CONSTAT_SINK_PATH_STATUS(_p_) (((_p_[2]) & GENMASK(7, 7)) >> 7)
-#define UCSI_CONSTAT_SINK_PATH_DISABLED 0
-#define UCSI_CONSTAT_SINK_PATH_ENABLED 1
- u8 pwr_readings[9];
-#define UCSI_CONSTAT_REV_CURR_PROT_STATUS(_p_) ((_p_[0]) & 0x1)
-#define UCSI_CONSTAT_PWR_READING_VALID(_p_) (((_p_[0]) & GENMASK(1, 1)) >> 1)
-#define UCSI_CONSTAT_CURRENT_SCALE(_p_) (((_p_[0]) & GENMASK(4, 2)) >> 2)
-#define UCSI_CONSTAT_PEAK_CURRENT(_p_) \
- ((get_unaligned_le32(_p_) & GENMASK(20, 5)) >> 5)
-#define UCSI_CONSTAT_AVG_CURRENT(_p_) \
- ((get_unaligned_le32(&(_p_)[2]) & GENMASK(20, 5)) >> 5)
-#define UCSI_CONSTAT_VOLTAGE_SCALE(_p_) \
- ((get_unaligned_le16(&(_p_)[4]) & GENMASK(8, 5)) >> 5)
-#define UCSI_CONSTAT_VOLTAGE_READING(_p_) \
- ((get_unaligned_le32(&(_p_)[5]) & GENMASK(16, 1)) >> 1)
-} __packed;
-
-/*
- * Data structure filled by PPM in response to GET_PD_MESSAGE command with the
- * Response Message Type set to Discover Identity Response.
- */
-struct ucsi_pd_message_disc_id {
- u32 vdm_header;
- u32 id_header;
- u32 cert_stat;
- u32 product;
- u32 vdo[3];
} __packed;
/* -------------------------------------------------------------------------- */
@@ -435,6 +403,8 @@ struct ucsi {
#define UCSI_DELAY_DEVICE_PDOS BIT(1) /* Reading PDOs fails until the parter is in PD mode */
};
+#define UCSI_MAX_DATA_LENGTH(u) (((u)->version < UCSI_VERSION_2_0) ? 0x10 : 0xff)
+
#define UCSI_MAX_SVID 5
#define UCSI_MAX_ALTMODES (UCSI_MAX_SVID * 6)
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index 6aace19d595b..03c0fa8edc8d 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -278,7 +278,7 @@ static void pmic_glink_ucsi_callback(const void *data, size_t len, void *priv)
case UC_UCSI_USBC_NOTIFY_IND:
schedule_work(&ucsi->notify_work);
break;
- };
+ }
}
static void pmic_glink_ucsi_pdr_notify(void *priv, int state)
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 302a89aeb258..8dac1edc74d4 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -372,7 +372,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- if (hcd->speed == HCD_USB3) {
+ if (hcd->speed >= HCD_USB3) {
pr_err(" ClearPortFeature: USB_PORT_FEAT_SUSPEND req not "
"supported for USB 3.0 roothub\n");
goto error;
@@ -388,7 +388,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_POWER:
usbip_dbg_vhci_rh(
" ClearPortFeature: USB_PORT_FEAT_POWER\n");
- if (hcd->speed == HCD_USB3)
+ if (hcd->speed >= HCD_USB3)
vhci_hcd->port_status[rhport] &= ~USB_SS_PORT_STAT_POWER;
else
vhci_hcd->port_status[rhport] &= ~USB_PORT_STAT_POWER;
@@ -404,19 +404,19 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case GetHubDescriptor:
usbip_dbg_vhci_rh(" GetHubDescriptor\n");
- if (hcd->speed == HCD_USB3 &&
+ if (hcd->speed >= HCD_USB3 &&
(wLength < USB_DT_SS_HUB_SIZE ||
wValue != (USB_DT_SS_HUB << 8))) {
pr_err("Wrong hub descriptor type for USB 3.0 roothub.\n");
goto error;
}
- if (hcd->speed == HCD_USB3)
+ if (hcd->speed >= HCD_USB3)
ss_hub_descriptor((struct usb_hub_descriptor *) buf);
else
hub_descriptor((struct usb_hub_descriptor *) buf);
break;
case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
- if (hcd->speed != HCD_USB3)
+ if (hcd->speed < HCD_USB3)
goto error;
if ((wValue >> 8) != USB_DT_BOS)
@@ -503,7 +503,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_LINK_STATE:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_LINK_STATE\n");
- if (hcd->speed != HCD_USB3) {
+ if (hcd->speed < HCD_USB3) {
pr_err("USB_PORT_FEAT_LINK_STATE req not "
"supported for USB 2.0 roothub\n");
goto error;
@@ -521,7 +521,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_U2_TIMEOUT\n");
/* TODO: add suspend/resume support! */
- if (hcd->speed != HCD_USB3) {
+ if (hcd->speed < HCD_USB3) {
pr_err("USB_PORT_FEAT_U1/2_TIMEOUT req not "
"supported for USB 2.0 roothub\n");
goto error;
@@ -531,7 +531,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_SUSPEND\n");
/* Applicable only for USB2.0 hub */
- if (hcd->speed == HCD_USB3) {
+ if (hcd->speed >= HCD_USB3) {
pr_err("USB_PORT_FEAT_SUSPEND req not "
"supported for USB 3.0 roothub\n");
goto error;
@@ -551,7 +551,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
pr_err("invalid port number %d\n", wIndex);
goto error;
}
- if (hcd->speed == HCD_USB3)
+ if (hcd->speed >= HCD_USB3)
vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER;
else
vhci_hcd->port_status[rhport] |= USB_PORT_STAT_POWER;
@@ -564,7 +564,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
/* Applicable only for USB3.0 hub */
- if (hcd->speed != HCD_USB3) {
+ if (hcd->speed < HCD_USB3) {
pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
"supported for USB 2.0 roothub\n");
goto error;
@@ -578,7 +578,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
/* if it's already enabled, disable */
- if (hcd->speed == HCD_USB3) {
+ if (hcd->speed >= HCD_USB3) {
vhci_hcd->port_status[rhport] = 0;
vhci_hcd->port_status[rhport] =
(USB_SS_PORT_STAT_POWER |
@@ -602,7 +602,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
if (wValue >= 32)
goto error;
- if (hcd->speed == HCD_USB3) {
+ if (hcd->speed >= HCD_USB3) {
if ((vhci_hcd->port_status[rhport] &
USB_SS_PORT_STAT_POWER) != 0) {
vhci_hcd->port_status[rhport] |= (1 << wValue);
@@ -616,7 +616,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case GetPortErrorCount:
usbip_dbg_vhci_rh(" GetPortErrorCount\n");
- if (hcd->speed != HCD_USB3) {
+ if (hcd->speed < HCD_USB3) {
pr_err("GetPortErrorCount req not "
"supported for USB 2.0 roothub\n");
goto error;
@@ -626,7 +626,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case SetHubDepth:
usbip_dbg_vhci_rh(" SetHubDepth\n");
- if (hcd->speed != HCD_USB3) {
+ if (hcd->speed < HCD_USB3) {
pr_err("SetHubDepth req not supported for "
"USB 2.0 roothub\n");
goto error;
@@ -646,7 +646,7 @@ error:
if (!invalid_rhport) {
dump_port_status_diff(prev_port_status[rhport],
vhci_hcd->port_status[rhport],
- hcd->speed == HCD_USB3);
+ hcd->speed >= HCD_USB3);
}
}
usbip_dbg_vhci_rh(" bye\n");
@@ -1157,8 +1157,8 @@ static int vhci_setup(struct usb_hcd *hcd)
} else {
vhci->vhci_hcd_ss = hcd_to_vhci_hcd(hcd);
vhci->vhci_hcd_ss->vhci = vhci;
- hcd->speed = HCD_USB3;
- hcd->self.root_hub->speed = USB_SPEED_SUPER;
+ hcd->speed = HCD_USB31;
+ hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
}
/*
@@ -1319,7 +1319,7 @@ static const struct hc_driver vhci_hc_driver = {
.product_desc = driver_desc,
.hcd_priv_size = sizeof(struct vhci_hcd),
- .flags = HCD_USB3 | HCD_SHARED,
+ .flags = HCD_USB31 | HCD_SHARED,
.reset = vhci_setup,
.start = vhci_start,
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index e2847cd3e6e3..d5865460e82d 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -283,6 +283,7 @@ static int valid_args(__u32 *pdev_nr, __u32 *rhport,
case USB_SPEED_HIGH:
case USB_SPEED_WIRELESS:
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
break;
default:
pr_err("Failed attach request for unsupported USB speed: %s\n",
@@ -349,7 +350,7 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
vhci_hcd = hcd_to_vhci_hcd(hcd);
vhci = vhci_hcd->vhci;
- if (speed == USB_SPEED_SUPER)
+ if (speed >= USB_SPEED_SUPER)
vdev = &vhci->vhci_hcd_ss->vdev[rhport];
else
vdev = &vhci->vhci_hcd_hs->vdev[rhport];
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 0f347717021a..aa36de361c10 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -112,15 +112,12 @@ void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
const void *src, int length);
u8 ifcvf_get_status(struct ifcvf_hw *hw);
void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
-void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
void ifcvf_reset(struct ifcvf_hw *hw);
u64 ifcvf_get_dev_features(struct ifcvf_hw *hw);
u64 ifcvf_get_hw_features(struct ifcvf_hw *hw);
int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features);
u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
-struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
-int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 50aac8fe57ef..2cedf7e2dbc4 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -83,10 +83,28 @@ enum {
MLX5_VDPA_NUM_AS = 2
};
+struct mlx5_vdpa_mr_resources {
+ struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
+ unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
+
+ /* Pre-deletion mr list */
+ struct list_head mr_list_head;
+
+ /* Deferred mr list */
+ struct list_head mr_gc_list_head;
+ struct workqueue_struct *wq_gc;
+ struct delayed_work gc_dwork_ent;
+
+ struct mutex lock;
+
+ atomic_t shutdown;
+};
+
struct mlx5_vdpa_dev {
struct vdpa_device vdev;
struct mlx5_core_dev *mdev;
struct mlx5_vdpa_resources res;
+ struct mlx5_vdpa_mr_resources mres;
u64 mlx_features;
u64 actual_features;
@@ -95,14 +113,23 @@ struct mlx5_vdpa_dev {
u16 max_idx;
u32 generation;
- struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
- struct list_head mr_list_head;
- /* serialize mr access */
- struct mutex mr_mtx;
struct mlx5_control_vq cvq;
struct workqueue_struct *wq;
- unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
bool suspended;
+
+ struct mlx5_async_ctx async_ctx;
+};
+
+struct mlx5_vdpa_async_cmd {
+ int err;
+ struct mlx5_async_work cb_work;
+ struct completion cmd_done;
+
+ void *in;
+ size_t inlen;
+
+ void *out;
+ size_t outlen;
};
int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn);
@@ -121,7 +148,9 @@ int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
struct vhost_iotlb *iotlb);
+int mlx5_vdpa_init_mr_resources(struct mlx5_vdpa_dev *mvdev);
void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev);
+void mlx5_vdpa_clean_mrs(struct mlx5_vdpa_dev *mvdev);
void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr);
void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
@@ -134,6 +163,14 @@ int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
unsigned int asid);
int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev);
int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
+int mlx5_vdpa_exec_async_cmds(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_async_cmd *cmds,
+ int num_cmds);
+
+#define mlx5_vdpa_err(__dev, format, ...) \
+ dev_err((__dev)->mdev->device, "%s:%d:(pid %d) error: " format, __func__, __LINE__, \
+ current->pid, ##__VA_ARGS__)
+
#define mlx5_vdpa_warn(__dev, format, ...) \
dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index 4758914ccf86..2dd21e0b399e 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -49,17 +49,23 @@ static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
}
}
-static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
-{
- int inlen;
+struct mlx5_create_mkey_mem {
+ u8 out[MLX5_ST_SZ_BYTES(create_mkey_out)];
+ u8 in[MLX5_ST_SZ_BYTES(create_mkey_in)];
+ __be64 mtt[];
+};
+
+struct mlx5_destroy_mkey_mem {
+ u8 out[MLX5_ST_SZ_BYTES(destroy_mkey_out)];
+ u8 in[MLX5_ST_SZ_BYTES(destroy_mkey_in)];
+};
+
+static void fill_create_direct_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_direct_mr *mr,
+ struct mlx5_create_mkey_mem *mem)
+{
+ void *in = &mem->in;
void *mkc;
- void *in;
- int err;
-
- inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
@@ -76,18 +82,36 @@ static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
get_octo_len(mr->end - mr->start, mr->log_size));
populate_mtts(mr, MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt));
- err = mlx5_vdpa_create_mkey(mvdev, &mr->mr, in, inlen);
- kvfree(in);
- if (err) {
- mlx5_vdpa_warn(mvdev, "Failed to create direct MR\n");
- return err;
- }
- return 0;
+ MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
+ MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
+}
+
+static void create_direct_mr_end(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_direct_mr *mr,
+ struct mlx5_create_mkey_mem *mem)
+{
+ u32 mkey_index = MLX5_GET(create_mkey_out, mem->out, mkey_index);
+
+ mr->mr = mlx5_idx_to_mkey(mkey_index);
+}
+
+static void fill_destroy_direct_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_direct_mr *mr,
+ struct mlx5_destroy_mkey_mem *mem)
+{
+ void *in = &mem->in;
+
+ MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid);
+ MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
+ MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mr->mr));
}
static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
{
+ if (!mr->mr)
+ return;
+
mlx5_vdpa_destroy_mkey(mvdev, mr->mr);
}
@@ -179,6 +203,123 @@ static int klm_byte_size(int nklms)
return 16 * ALIGN(nklms, 4);
}
+#define MLX5_VDPA_MTT_ALIGN 16
+
+static int create_direct_keys(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
+{
+ struct mlx5_vdpa_async_cmd *cmds;
+ struct mlx5_vdpa_direct_mr *dmr;
+ int err = 0;
+ int i = 0;
+
+ cmds = kvcalloc(mr->num_directs, sizeof(*cmds), GFP_KERNEL);
+ if (!cmds)
+ return -ENOMEM;
+
+ list_for_each_entry(dmr, &mr->head, list) {
+ struct mlx5_create_mkey_mem *cmd_mem;
+ int mttlen, mttcount;
+
+ mttlen = roundup(MLX5_ST_SZ_BYTES(mtt) * dmr->nsg, MLX5_VDPA_MTT_ALIGN);
+ mttcount = mttlen / sizeof(cmd_mem->mtt[0]);
+ cmd_mem = kvcalloc(1, struct_size(cmd_mem, mtt, mttcount), GFP_KERNEL);
+ if (!cmd_mem) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ cmds[i].out = cmd_mem->out;
+ cmds[i].outlen = sizeof(cmd_mem->out);
+ cmds[i].in = cmd_mem->in;
+ cmds[i].inlen = struct_size(cmd_mem, mtt, mttcount);
+
+ fill_create_direct_mr(mvdev, dmr, cmd_mem);
+
+ i++;
+ }
+
+ err = mlx5_vdpa_exec_async_cmds(mvdev, cmds, mr->num_directs);
+ if (err) {
+
+ mlx5_vdpa_err(mvdev, "error issuing MTT mkey creation for direct mrs: %d\n", err);
+ goto done;
+ }
+
+ i = 0;
+ list_for_each_entry(dmr, &mr->head, list) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i++];
+ struct mlx5_create_mkey_mem *cmd_mem;
+
+ cmd_mem = container_of(cmd->out, struct mlx5_create_mkey_mem, out);
+
+ if (!cmd->err) {
+ create_direct_mr_end(mvdev, dmr, cmd_mem);
+ } else {
+ err = err ? err : cmd->err;
+ mlx5_vdpa_err(mvdev, "error creating MTT mkey [0x%llx, 0x%llx]: %d\n",
+ dmr->start, dmr->end, cmd->err);
+ }
+ }
+
+done:
+ for (i = i-1; i >= 0; i--) {
+ struct mlx5_create_mkey_mem *cmd_mem;
+
+ cmd_mem = container_of(cmds[i].out, struct mlx5_create_mkey_mem, out);
+ kvfree(cmd_mem);
+ }
+
+ kvfree(cmds);
+ return err;
+}
+
+DEFINE_FREE(free_cmds, struct mlx5_vdpa_async_cmd *, kvfree(_T))
+DEFINE_FREE(free_cmd_mem, struct mlx5_destroy_mkey_mem *, kvfree(_T))
+
+static int destroy_direct_keys(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
+{
+ struct mlx5_destroy_mkey_mem *cmd_mem __free(free_cmd_mem) = NULL;
+ struct mlx5_vdpa_async_cmd *cmds __free(free_cmds) = NULL;
+ struct mlx5_vdpa_direct_mr *dmr;
+ int err = 0;
+ int i = 0;
+
+ cmds = kvcalloc(mr->num_directs, sizeof(*cmds), GFP_KERNEL);
+ cmd_mem = kvcalloc(mr->num_directs, sizeof(*cmd_mem), GFP_KERNEL);
+ if (!cmds || !cmd_mem)
+ return -ENOMEM;
+
+ list_for_each_entry(dmr, &mr->head, list) {
+ cmds[i].out = cmd_mem[i].out;
+ cmds[i].outlen = sizeof(cmd_mem[i].out);
+ cmds[i].in = cmd_mem[i].in;
+ cmds[i].inlen = sizeof(cmd_mem[i].in);
+ fill_destroy_direct_mr(mvdev, dmr, &cmd_mem[i]);
+ i++;
+ }
+
+ err = mlx5_vdpa_exec_async_cmds(mvdev, cmds, mr->num_directs);
+ if (err) {
+
+ mlx5_vdpa_err(mvdev, "error issuing MTT mkey deletion for direct mrs: %d\n", err);
+ return err;
+ }
+
+ i = 0;
+ list_for_each_entry(dmr, &mr->head, list) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i++];
+
+ dmr->mr = 0;
+ if (cmd->err) {
+ err = err ? err : cmd->err;
+ mlx5_vdpa_err(mvdev, "error deleting MTT mkey [0x%llx, 0x%llx]: %d\n",
+ dmr->start, dmr->end, cmd->err);
+ }
+ }
+
+ return err;
+}
+
static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{
int inlen;
@@ -279,14 +420,8 @@ done:
goto err_map;
}
- err = create_direct_mr(mvdev, mr);
- if (err)
- goto err_direct;
-
return 0;
-err_direct:
- dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
err_map:
sg_free_table(&mr->sg_head);
return err;
@@ -401,6 +536,10 @@ static int create_user_mr(struct mlx5_vdpa_dev *mvdev,
if (err)
goto err_chain;
+ err = create_direct_keys(mvdev, mr);
+ if (err)
+ goto err_chain;
+
/* Create the memory key that defines the guests's address space. This
* memory key refers to the direct keys that contain the MTT
* translations
@@ -489,6 +628,7 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
struct mlx5_vdpa_direct_mr *n;
destroy_indirect_key(mvdev, mr);
+ destroy_direct_keys(mvdev, mr);
list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
list_del_init(&dmr->list);
unmap_direct_mr(mvdev, dmr);
@@ -513,22 +653,58 @@ static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_
kfree(mr);
}
+/* There can be multiple .set_map() operations in quick succession.
+ * This large delay is a simple way to prevent the MR cleanup from blocking
+ * .set_map() MR creation in this scenario.
+ */
+#define MLX5_VDPA_MR_GC_TRIGGER_MS 2000
+
+static void mlx5_vdpa_mr_gc_handler(struct work_struct *work)
+{
+ struct mlx5_vdpa_mr_resources *mres;
+ struct mlx5_vdpa_mr *mr, *tmp;
+ struct mlx5_vdpa_dev *mvdev;
+
+ mres = container_of(work, struct mlx5_vdpa_mr_resources, gc_dwork_ent.work);
+
+ if (atomic_read(&mres->shutdown)) {
+ mutex_lock(&mres->lock);
+ } else if (!mutex_trylock(&mres->lock)) {
+ queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent,
+ msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS));
+ return;
+ }
+
+ mvdev = container_of(mres, struct mlx5_vdpa_dev, mres);
+
+ list_for_each_entry_safe(mr, tmp, &mres->mr_gc_list_head, mr_list) {
+ _mlx5_vdpa_destroy_mr(mvdev, mr);
+ }
+
+ mutex_unlock(&mres->lock);
+}
+
static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr)
{
+ struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
+
if (!mr)
return;
- if (refcount_dec_and_test(&mr->refcount))
- _mlx5_vdpa_destroy_mr(mvdev, mr);
+ if (refcount_dec_and_test(&mr->refcount)) {
+ list_move_tail(&mr->mr_list, &mres->mr_gc_list_head);
+ queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent,
+ msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS));
+ }
}
void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr)
{
- mutex_lock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
_mlx5_vdpa_put_mr(mvdev, mr);
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_unlock(&mvdev->mres.lock);
}
static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
@@ -543,44 +719,47 @@ static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr)
{
- mutex_lock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
_mlx5_vdpa_get_mr(mvdev, mr);
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_unlock(&mvdev->mres.lock);
}
void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *new_mr,
unsigned int asid)
{
- struct mlx5_vdpa_mr *old_mr = mvdev->mr[asid];
+ struct mlx5_vdpa_mr *old_mr = mvdev->mres.mr[asid];
- mutex_lock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
_mlx5_vdpa_put_mr(mvdev, old_mr);
- mvdev->mr[asid] = new_mr;
+ mvdev->mres.mr[asid] = new_mr;
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_unlock(&mvdev->mres.lock);
}
static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_vdpa_mr *mr;
- mutex_lock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
- list_for_each_entry(mr, &mvdev->mr_list_head, mr_list) {
+ list_for_each_entry(mr, &mvdev->mres.mr_list_head, mr_list) {
mlx5_vdpa_warn(mvdev, "mkey still alive after resource delete: "
"mr: %p, mkey: 0x%x, refcount: %u\n",
mr, mr->mkey, refcount_read(&mr->refcount));
}
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_unlock(&mvdev->mres.lock);
}
-void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
+void mlx5_vdpa_clean_mrs(struct mlx5_vdpa_dev *mvdev)
{
+ if (!mvdev->res.valid)
+ return;
+
for (int i = 0; i < MLX5_VDPA_NUM_AS; i++)
mlx5_vdpa_update_mr(mvdev, NULL, i);
@@ -613,7 +792,7 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
if (err)
goto err_iotlb;
- list_add_tail(&mr->mr_list, &mvdev->mr_list_head);
+ list_add_tail(&mr->mr_list, &mvdev->mres.mr_list_head);
return 0;
@@ -639,9 +818,9 @@ struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
if (!mr)
return ERR_PTR(-ENOMEM);
- mutex_lock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_unlock(&mvdev->mres.lock);
if (err)
goto out_err;
@@ -661,7 +840,7 @@ int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
{
int err;
- if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
+ if (mvdev->mres.group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
return 0;
spin_lock(&mvdev->cvq.iommu_lock);
@@ -703,3 +882,33 @@ int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
return 0;
}
+
+int mlx5_vdpa_init_mr_resources(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
+
+ mres->wq_gc = create_singlethread_workqueue("mlx5_vdpa_mr_gc");
+ if (!mres->wq_gc)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&mres->gc_dwork_ent, mlx5_vdpa_mr_gc_handler);
+
+ mutex_init(&mres->lock);
+
+ INIT_LIST_HEAD(&mres->mr_list_head);
+ INIT_LIST_HEAD(&mres->mr_gc_list_head);
+
+ return 0;
+}
+
+void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
+
+ atomic_set(&mres->shutdown, 1);
+
+ flush_delayed_work(&mres->gc_dwork_ent);
+ destroy_workqueue(mres->wq_gc);
+ mres->wq_gc = NULL;
+ mutex_destroy(&mres->lock);
+}
diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c
index 5c5a41b64bfc..aeae31d0cefa 100644
--- a/drivers/vdpa/mlx5/core/resources.c
+++ b/drivers/vdpa/mlx5/core/resources.c
@@ -256,7 +256,6 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
mlx5_vdpa_warn(mvdev, "resources already allocated\n");
return -EINVAL;
}
- mutex_init(&mvdev->mr_mtx);
res->uar = mlx5_get_uars_page(mdev);
if (IS_ERR(res->uar)) {
err = PTR_ERR(res->uar);
@@ -301,7 +300,6 @@ err_pd:
err_uctx:
mlx5_put_uars_page(mdev, res->uar);
err_uars:
- mutex_destroy(&mvdev->mr_mtx);
return err;
}
@@ -318,6 +316,78 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
dealloc_pd(mvdev, res->pdn, res->uid);
destroy_uctx(mvdev, res->uid);
mlx5_put_uars_page(mvdev->mdev, res->uar);
- mutex_destroy(&mvdev->mr_mtx);
res->valid = false;
}
+
+static void virtqueue_cmd_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5_vdpa_async_cmd *cmd =
+ container_of(context, struct mlx5_vdpa_async_cmd, cb_work);
+
+ cmd->err = mlx5_cmd_check(context->ctx->dev, status, cmd->in, cmd->out);
+ complete(&cmd->cmd_done);
+}
+
+static int issue_async_cmd(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_async_cmd *cmds,
+ int issued,
+ int *completed)
+
+{
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[issued];
+ int err;
+
+retry:
+ err = mlx5_cmd_exec_cb(&mvdev->async_ctx,
+ cmd->in, cmd->inlen,
+ cmd->out, cmd->outlen,
+ virtqueue_cmd_callback,
+ &cmd->cb_work);
+ if (err == -EBUSY) {
+ if (*completed < issued) {
+ /* Throttled by own commands: wait for oldest completion. */
+ wait_for_completion(&cmds[*completed].cmd_done);
+ (*completed)++;
+
+ goto retry;
+ } else {
+ /* Throttled by external commands: switch to sync api. */
+ err = mlx5_cmd_exec(mvdev->mdev,
+ cmd->in, cmd->inlen,
+ cmd->out, cmd->outlen);
+ if (!err)
+ (*completed)++;
+ }
+ }
+
+ return err;
+}
+
+int mlx5_vdpa_exec_async_cmds(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_async_cmd *cmds,
+ int num_cmds)
+{
+ int completed = 0;
+ int issued = 0;
+ int err = 0;
+
+ for (int i = 0; i < num_cmds; i++)
+ init_completion(&cmds[i].cmd_done);
+
+ while (issued < num_cmds) {
+
+ err = issue_async_cmd(mvdev, cmds, issued, &completed);
+ if (err) {
+ mlx5_vdpa_err(mvdev, "error issuing command %d of %d: %d\n",
+ issued, num_cmds, err);
+ break;
+ }
+
+ issued++;
+ }
+
+ while (completed < issued)
+ wait_for_completion(&cmds[completed++].cmd_done);
+
+ return err;
+}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index fa78e8288ebb..dee019977716 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -941,11 +941,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev,
MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
- vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
+ vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr)
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
- vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
+ vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (vq_desc_mr &&
MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey);
@@ -953,11 +953,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev,
/* If there is no mr update, make sure that the existing ones are set
* modify to ready.
*/
- vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
+ vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr)
mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY;
- vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
+ vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (vq_desc_mr)
mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY;
}
@@ -1184,40 +1184,92 @@ struct mlx5_virtq_attr {
u16 used_index;
};
-static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
- struct mlx5_virtq_attr *attr)
-{
- int outlen = MLX5_ST_SZ_BYTES(query_virtio_net_q_out);
- u32 in[MLX5_ST_SZ_DW(query_virtio_net_q_in)] = {};
- void *out;
- void *obj_context;
- void *cmd_hdr;
- int err;
+struct mlx5_virtqueue_query_mem {
+ u8 in[MLX5_ST_SZ_BYTES(query_virtio_net_q_in)];
+ u8 out[MLX5_ST_SZ_BYTES(query_virtio_net_q_out)];
+};
- out = kzalloc(outlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
+struct mlx5_virtqueue_modify_mem {
+ u8 in[MLX5_ST_SZ_BYTES(modify_virtio_net_q_in)];
+ u8 out[MLX5_ST_SZ_BYTES(modify_virtio_net_q_out)];
+};
- cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+static void fill_query_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
+ struct mlx5_vdpa_virtqueue *mvq,
+ struct mlx5_virtqueue_query_mem *cmd)
+{
+ void *cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, cmd->in, general_obj_in_cmd_hdr);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
- err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen);
- if (err)
- goto err_cmd;
+}
+
+static void query_virtqueue_end(struct mlx5_vdpa_net *ndev,
+ struct mlx5_virtqueue_query_mem *cmd,
+ struct mlx5_virtq_attr *attr)
+{
+ void *obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, cmd->out, obj_context);
- obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, out, obj_context);
memset(attr, 0, sizeof(*attr));
attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index);
- kfree(out);
- return 0;
+}
-err_cmd:
- kfree(out);
+static int query_virtqueues(struct mlx5_vdpa_net *ndev,
+ int start_vq,
+ int num_vqs,
+ struct mlx5_virtq_attr *attrs)
+{
+ struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
+ struct mlx5_virtqueue_query_mem *cmd_mem;
+ struct mlx5_vdpa_async_cmd *cmds;
+ int err = 0;
+
+ WARN(start_vq + num_vqs > mvdev->max_vqs, "query vq range invalid [%d, %d), max_vqs: %u\n",
+ start_vq, start_vq + num_vqs, mvdev->max_vqs);
+
+ cmds = kvcalloc(num_vqs, sizeof(*cmds), GFP_KERNEL);
+ cmd_mem = kvcalloc(num_vqs, sizeof(*cmd_mem), GFP_KERNEL);
+ if (!cmds || !cmd_mem) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ for (int i = 0; i < num_vqs; i++) {
+ cmds[i].in = &cmd_mem[i].in;
+ cmds[i].inlen = sizeof(cmd_mem[i].in);
+ cmds[i].out = &cmd_mem[i].out;
+ cmds[i].outlen = sizeof(cmd_mem[i].out);
+ fill_query_virtqueue_cmd(ndev, &ndev->vqs[start_vq + i], &cmd_mem[i]);
+ }
+
+ err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs);
+ if (err) {
+ mlx5_vdpa_err(mvdev, "error issuing query cmd for vq range [%d, %d): %d\n",
+ start_vq, start_vq + num_vqs, err);
+ goto done;
+ }
+
+ for (int i = 0; i < num_vqs; i++) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i];
+ int vq_idx = start_vq + i;
+
+ if (cmd->err) {
+ mlx5_vdpa_err(mvdev, "query vq %d failed, err: %d\n", vq_idx, err);
+ if (!err)
+ err = cmd->err;
+ continue;
+ }
+
+ query_virtqueue_end(ndev, &cmd_mem[i], &attrs[i]);
+ }
+
+done:
+ kvfree(cmd_mem);
+ kvfree(cmds);
return err;
}
@@ -1251,51 +1303,30 @@ static bool modifiable_virtqueue_fields(struct mlx5_vdpa_virtqueue *mvq)
return true;
}
-static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
- struct mlx5_vdpa_virtqueue *mvq,
- int state)
+static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
+ struct mlx5_vdpa_virtqueue *mvq,
+ int state,
+ struct mlx5_virtqueue_modify_mem *cmd)
{
- int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
- u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
struct mlx5_vdpa_mr *desc_mr = NULL;
struct mlx5_vdpa_mr *vq_mr = NULL;
- bool state_change = false;
void *obj_context;
void *cmd_hdr;
void *vq_ctx;
- void *in;
- int err;
- if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE)
- return 0;
-
- if (!modifiable_virtqueue_fields(mvq))
- return -EINVAL;
-
- in = kzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+ cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, cmd->in, general_obj_in_cmd_hdr);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
- obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
+ obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, cmd->in, obj_context);
vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
- if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) {
- if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) {
- err = -EINVAL;
- goto done;
- }
-
+ if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE)
MLX5_SET(virtio_net_q_object, obj_context, state, state);
- state_change = true;
- }
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS) {
MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
@@ -1323,7 +1354,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
}
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
- vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
+ vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr)
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
@@ -1332,7 +1363,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
}
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
- desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
+ desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey);
@@ -1341,38 +1372,36 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
}
MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields);
- err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
- if (err)
- goto done;
+}
- if (state_change)
- mvq->fw_state = state;
+static void modify_virtqueue_end(struct mlx5_vdpa_net *ndev,
+ struct mlx5_vdpa_virtqueue *mvq,
+ int state)
+{
+ struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
+ unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP];
+ struct mlx5_vdpa_mr *vq_mr = mvdev->mres.mr[asid];
+
mlx5_vdpa_put_mr(mvdev, mvq->vq_mr);
mlx5_vdpa_get_mr(mvdev, vq_mr);
mvq->vq_mr = vq_mr;
}
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
+ unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP];
+ struct mlx5_vdpa_mr *desc_mr = mvdev->mres.mr[asid];
+
mlx5_vdpa_put_mr(mvdev, mvq->desc_mr);
mlx5_vdpa_get_mr(mvdev, desc_mr);
mvq->desc_mr = desc_mr;
}
- mvq->modified_fields = 0;
-
-done:
- kfree(in);
- return err;
-}
+ if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE)
+ mvq->fw_state = state;
-static int modify_virtqueue_state(struct mlx5_vdpa_net *ndev,
- struct mlx5_vdpa_virtqueue *mvq,
- unsigned int state)
-{
- mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE;
- return modify_virtqueue(ndev, mvq, state);
+ mvq->modified_fields = 0;
}
static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
@@ -1525,53 +1554,136 @@ err_fwqp:
return err;
}
-static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+static int modify_virtqueues(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs, int state)
{
- struct mlx5_virtq_attr attr;
- int err;
+ struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
+ struct mlx5_virtqueue_modify_mem *cmd_mem;
+ struct mlx5_vdpa_async_cmd *cmds;
+ int err = 0;
- if (!mvq->initialized)
- return 0;
+ WARN(start_vq + num_vqs > mvdev->max_vqs, "modify vq range invalid [%d, %d), max_vqs: %u\n",
+ start_vq, start_vq + num_vqs, mvdev->max_vqs);
- if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
- return 0;
+ cmds = kvcalloc(num_vqs, sizeof(*cmds), GFP_KERNEL);
+ cmd_mem = kvcalloc(num_vqs, sizeof(*cmd_mem), GFP_KERNEL);
+ if (!cmds || !cmd_mem) {
+ err = -ENOMEM;
+ goto done;
+ }
- err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND);
- if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed, err: %d\n", err);
- return err;
+ for (int i = 0; i < num_vqs; i++) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i];
+ struct mlx5_vdpa_virtqueue *mvq;
+ int vq_idx = start_vq + i;
+
+ mvq = &ndev->vqs[vq_idx];
+
+ if (!modifiable_virtqueue_fields(mvq)) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (mvq->fw_state != state) {
+ if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE;
+ }
+
+ cmd->in = &cmd_mem[i].in;
+ cmd->inlen = sizeof(cmd_mem[i].in);
+ cmd->out = &cmd_mem[i].out;
+ cmd->outlen = sizeof(cmd_mem[i].out);
+ fill_modify_virtqueue_cmd(ndev, mvq, state, &cmd_mem[i]);
}
- err = query_virtqueue(ndev, mvq, &attr);
+ err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs);
if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue, err: %d\n", err);
- return err;
+ mlx5_vdpa_err(mvdev, "error issuing modify cmd for vq range [%d, %d)\n",
+ start_vq, start_vq + num_vqs);
+ goto done;
}
- mvq->avail_idx = attr.available_index;
- mvq->used_idx = attr.used_index;
+ for (int i = 0; i < num_vqs; i++) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i];
+ struct mlx5_vdpa_virtqueue *mvq;
+ int vq_idx = start_vq + i;
- return 0;
+ mvq = &ndev->vqs[vq_idx];
+
+ if (cmd->err) {
+ mlx5_vdpa_err(mvdev, "modify vq %d failed, state: %d -> %d, err: %d\n",
+ vq_idx, mvq->fw_state, state, err);
+ if (!err)
+ err = cmd->err;
+ continue;
+ }
+
+ modify_virtqueue_end(ndev, mvq, state);
+ }
+
+done:
+ kvfree(cmd_mem);
+ kvfree(cmds);
+ return err;
}
-static int suspend_vqs(struct mlx5_vdpa_net *ndev)
+static int suspend_vqs(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs)
{
- int err = 0;
- int i;
+ struct mlx5_vdpa_virtqueue *mvq;
+ struct mlx5_virtq_attr *attrs;
+ int vq_idx, i;
+ int err;
+
+ if (start_vq >= ndev->cur_num_vqs)
+ return -EINVAL;
+
+ mvq = &ndev->vqs[start_vq];
+ if (!mvq->initialized)
+ return 0;
+
+ if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
+ return 0;
+
+ err = modify_virtqueues(ndev, start_vq, num_vqs, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND);
+ if (err)
+ return err;
+
+ attrs = kcalloc(num_vqs, sizeof(struct mlx5_virtq_attr), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
- for (i = 0; i < ndev->cur_num_vqs; i++) {
- int local_err = suspend_vq(ndev, &ndev->vqs[i]);
+ err = query_virtqueues(ndev, start_vq, num_vqs, attrs);
+ if (err)
+ goto done;
- err = local_err ? local_err : err;
+ for (i = 0, vq_idx = start_vq; i < num_vqs; i++, vq_idx++) {
+ mvq = &ndev->vqs[vq_idx];
+ mvq->avail_idx = attrs[i].available_index;
+ mvq->used_idx = attrs[i].used_index;
}
+done:
+ kfree(attrs);
return err;
}
-static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ return suspend_vqs(ndev, mvq->index, 1);
+}
+
+static int resume_vqs(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs)
{
+ struct mlx5_vdpa_virtqueue *mvq;
int err;
+ if (start_vq >= ndev->mvdev.max_vqs)
+ return -EINVAL;
+
+ mvq = &ndev->vqs[start_vq];
if (!mvq->initialized)
return 0;
@@ -1583,13 +1695,9 @@ static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq
/* Due to a FW quirk we need to modify the VQ fields first then change state.
* This should be fixed soon. After that, a single command can be used.
*/
- err = modify_virtqueue(ndev, mvq, 0);
- if (err) {
- mlx5_vdpa_warn(&ndev->mvdev,
- "modify vq properties failed for vq %u, err: %d\n",
- mvq->index, err);
+ err = modify_virtqueues(ndev, start_vq, num_vqs, mvq->fw_state);
+ if (err)
return err;
- }
break;
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND:
if (!is_resumable(ndev)) {
@@ -1600,30 +1708,17 @@ static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY:
return 0;
default:
- mlx5_vdpa_warn(&ndev->mvdev, "resume vq %u called from bad state %d\n",
+ mlx5_vdpa_err(&ndev->mvdev, "resume vq %u called from bad state %d\n",
mvq->index, mvq->fw_state);
return -EINVAL;
}
- err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
- if (err)
- mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq %u, err: %d\n",
- mvq->index, err);
-
- return err;
+ return modify_virtqueues(ndev, start_vq, num_vqs, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
}
-static int resume_vqs(struct mlx5_vdpa_net *ndev)
+static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
- int err = 0;
-
- for (int i = 0; i < ndev->cur_num_vqs; i++) {
- int local_err = resume_vq(ndev, &ndev->vqs[i]);
-
- err = local_err ? local_err : err;
- }
-
- return err;
+ return resume_vqs(ndev, mvq->index, 1);
}
static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
@@ -2002,13 +2097,13 @@ static int setup_steering(struct mlx5_vdpa_net *ndev)
ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
if (!ns) {
- mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
+ mlx5_vdpa_err(&ndev->mvdev, "failed to get flow namespace\n");
return -EOPNOTSUPP;
}
ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ndev->rxft)) {
- mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n");
+ mlx5_vdpa_err(&ndev->mvdev, "failed to create flow table\n");
return PTR_ERR(ndev->rxft);
}
mlx5_vdpa_add_rx_flow_table(ndev);
@@ -2124,45 +2219,48 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- int cur_qps = ndev->cur_num_vqs / 2;
+ int cur_vqs = ndev->cur_num_vqs;
+ int new_vqs = newqps * 2;
int err;
int i;
- if (cur_qps > newqps) {
- err = modify_rqt(ndev, 2 * newqps);
+ if (cur_vqs > new_vqs) {
+ err = modify_rqt(ndev, new_vqs);
if (err)
return err;
- for (i = ndev->cur_num_vqs - 1; i >= 2 * newqps; i--) {
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[i];
-
- if (is_resumable(ndev))
- suspend_vq(ndev, mvq);
- else
- teardown_vq(ndev, mvq);
+ if (is_resumable(ndev)) {
+ suspend_vqs(ndev, new_vqs, cur_vqs - new_vqs);
+ } else {
+ for (i = new_vqs; i < cur_vqs; i++)
+ teardown_vq(ndev, &ndev->vqs[i]);
}
- ndev->cur_num_vqs = 2 * newqps;
+ ndev->cur_num_vqs = new_vqs;
} else {
- ndev->cur_num_vqs = 2 * newqps;
- for (i = cur_qps * 2; i < 2 * newqps; i++) {
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[i];
+ ndev->cur_num_vqs = new_vqs;
- err = mvq->initialized ? resume_vq(ndev, mvq) : setup_vq(ndev, mvq, true);
+ for (i = cur_vqs; i < new_vqs; i++) {
+ err = setup_vq(ndev, &ndev->vqs[i], false);
if (err)
goto clean_added;
}
- err = modify_rqt(ndev, 2 * newqps);
+
+ err = resume_vqs(ndev, cur_vqs, new_vqs - cur_vqs);
+ if (err)
+ goto clean_added;
+
+ err = modify_rqt(ndev, new_vqs);
if (err)
goto clean_added;
}
return 0;
clean_added:
- for (--i; i >= 2 * cur_qps; --i)
+ for (--i; i >= cur_vqs; --i)
teardown_vq(ndev, &ndev->vqs[i]);
- ndev->cur_num_vqs = 2 * cur_qps;
+ ndev->cur_num_vqs = cur_vqs;
return err;
}
@@ -2528,9 +2626,9 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
return 0;
}
- err = query_virtqueue(ndev, mvq, &attr);
+ err = query_virtqueues(ndev, mvq->index, 1, &attr);
if (err) {
- mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
+ mlx5_vdpa_err(mvdev, "failed to query virtqueue\n");
return err;
}
state->split.avail_index = attr.used_index;
@@ -2755,6 +2853,9 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
struct mlx5_eqe *eqe = param;
int ret = NOTIFY_DONE;
+ if (ndev->mvdev.suspended)
+ return NOTIFY_DONE;
+
if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
switch (eqe->sub_type) {
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
@@ -2879,7 +2980,7 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu
int err;
if (mvq->initialized) {
- err = query_virtqueue(ndev, mvq, &attr);
+ err = query_virtqueues(ndev, mvq->index, 1, &attr);
if (err)
return err;
}
@@ -2948,7 +3049,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
bool teardown = !is_resumable(ndev);
int err;
- suspend_vqs(ndev);
+ suspend_vqs(ndev, 0, ndev->cur_num_vqs);
if (teardown) {
err = save_channels_info(ndev);
if (err)
@@ -2973,7 +3074,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
return err;
}
- resume_vqs(ndev);
+ resume_vqs(ndev, 0, ndev->cur_num_vqs);
return 0;
}
@@ -3097,7 +3198,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
teardown_vq_resources(ndev);
if (ndev->setup) {
- err = resume_vqs(ndev);
+ err = resume_vqs(ndev, 0, ndev->cur_num_vqs);
if (err) {
mlx5_vdpa_warn(mvdev, "failed to resume VQs\n");
goto err_driver;
@@ -3122,7 +3223,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
err_driver:
unregister_link_notifier(ndev);
err_setup:
- mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
+ mlx5_vdpa_clean_mrs(&ndev->mvdev);
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
err_clear:
up_write(&ndev->reslock);
@@ -3134,7 +3235,7 @@ static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
/* default mapping all groups are mapped to asid 0 */
for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++)
- mvdev->group2asid[i] = 0;
+ mvdev->mres.group2asid[i] = 0;
}
static bool needs_vqs_reset(const struct mlx5_vdpa_dev *mvdev)
@@ -3174,7 +3275,7 @@ static int mlx5_vdpa_compat_reset(struct vdpa_device *vdev, u32 flags)
}
if (flags & VDPA_RESET_F_CLEAN_MAP)
- mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
+ mlx5_vdpa_clean_mrs(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->mvdev.suspended = false;
ndev->cur_num_vqs = MLX5V_DEFAULT_VQ_COUNT;
@@ -3189,7 +3290,7 @@ static int mlx5_vdpa_compat_reset(struct vdpa_device *vdev, u32 flags)
if ((flags & VDPA_RESET_F_CLEAN_MAP) &&
MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
if (mlx5_vdpa_create_dma_mr(mvdev))
- mlx5_vdpa_warn(mvdev, "create MR failed\n");
+ mlx5_vdpa_err(mvdev, "create MR failed\n");
}
if (vq_reset)
setup_vq_resources(ndev, false);
@@ -3244,7 +3345,7 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
new_mr = mlx5_vdpa_create_mr(mvdev, iotlb);
if (IS_ERR(new_mr)) {
err = PTR_ERR(new_mr);
- mlx5_vdpa_warn(mvdev, "create map failed(%d)\n", err);
+ mlx5_vdpa_err(mvdev, "create map failed(%d)\n", err);
return err;
}
} else {
@@ -3252,12 +3353,12 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
new_mr = NULL;
}
- if (!mvdev->mr[asid]) {
+ if (!mvdev->mres.mr[asid]) {
mlx5_vdpa_update_mr(mvdev, new_mr, asid);
} else {
err = mlx5_vdpa_change_map(mvdev, new_mr, asid);
if (err) {
- mlx5_vdpa_warn(mvdev, "change map failed(%d)\n", err);
+ mlx5_vdpa_err(mvdev, "change map failed(%d)\n", err);
goto out_err;
}
}
@@ -3332,7 +3433,10 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
ndev = to_mlx5_vdpa_ndev(mvdev);
free_fixed_resources(ndev);
- mlx5_vdpa_destroy_mr_resources(mvdev);
+ mlx5_vdpa_clean_mrs(mvdev);
+ mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
+ mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
+
if (!is_zero_ether_addr(ndev->config.mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
@@ -3500,8 +3604,7 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
mlx5_vdpa_info(mvdev, "suspending device\n");
down_write(&ndev->reslock);
- unregister_link_notifier(ndev);
- err = suspend_vqs(ndev);
+ err = suspend_vqs(ndev, 0, ndev->cur_num_vqs);
mlx5_vdpa_cvq_suspend(mvdev);
mvdev->suspended = true;
up_write(&ndev->reslock);
@@ -3521,8 +3624,8 @@ static int mlx5_vdpa_resume(struct vdpa_device *vdev)
down_write(&ndev->reslock);
mvdev->suspended = false;
- err = resume_vqs(ndev);
- register_link_notifier(ndev);
+ err = resume_vqs(ndev, 0, ndev->cur_num_vqs);
+ queue_link_work(ndev);
up_write(&ndev->reslock);
return err;
@@ -3537,12 +3640,12 @@ static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
if (group >= MLX5_VDPA_NUMVQ_GROUPS)
return -EINVAL;
- mvdev->group2asid[group] = asid;
+ mvdev->mres.group2asid[group] = asid;
- mutex_lock(&mvdev->mr_mtx);
- if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mr[asid])
- err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mr[asid]->iotlb, asid);
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
+ if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mres.mr[asid])
+ err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mres.mr[asid]->iotlb, asid);
+ mutex_unlock(&mvdev->mres.lock);
return err;
}
@@ -3854,18 +3957,22 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->rqt_size = 1;
}
+ mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx);
+
ndev->mvdev.mlx_features = device_features;
mvdev->vdev.dma_dev = &mdev->pdev->dev;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
goto err_mpfs;
- INIT_LIST_HEAD(&mvdev->mr_list_head);
+ err = mlx5_vdpa_init_mr_resources(mvdev);
+ if (err)
+ goto err_res;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
err = mlx5_vdpa_create_dma_mr(mvdev);
if (err)
- goto err_res;
+ goto err_mr_res;
}
err = alloc_fixed_resources(ndev);
@@ -3906,6 +4013,8 @@ err_reg:
err_res2:
free_fixed_resources(ndev);
err_mr:
+ mlx5_vdpa_clean_mrs(mvdev);
+err_mr_res:
mlx5_vdpa_destroy_mr_resources(mvdev);
err_res:
mlx5_vdpa_free_resources(&ndev->mvdev);
@@ -3937,9 +4046,37 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
mgtdev->ndev = NULL;
}
+static int mlx5_vdpa_set_attr(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *dev,
+ const struct vdpa_dev_set_config *add_config)
+{
+ struct virtio_net_config *config;
+ struct mlx5_core_dev *pfmdev;
+ struct mlx5_vdpa_dev *mvdev;
+ struct mlx5_vdpa_net *ndev;
+ struct mlx5_core_dev *mdev;
+ int err = -EOPNOTSUPP;
+
+ mvdev = to_mvdev(dev);
+ ndev = to_mlx5_vdpa_ndev(mvdev);
+ mdev = mvdev->mdev;
+ config = &ndev->config;
+
+ down_write(&ndev->reslock);
+ if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) {
+ pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
+ err = mlx5_mpfs_add_mac(pfmdev, config->mac);
+ if (!err)
+ ether_addr_copy(config->mac, add_config->net.mac);
+ }
+
+ up_write(&ndev->reslock);
+ return err;
+}
+
static const struct vdpa_mgmtdev_ops mdev_ops = {
.dev_add = mlx5_vdpa_dev_add,
.dev_del = mlx5_vdpa_dev_del,
+ .dev_set_attr = mlx5_vdpa_set_attr,
};
static struct virtio_device_id id_table[] = {
diff --git a/drivers/vdpa/pds/cmds.h b/drivers/vdpa/pds/cmds.h
index e24d85cb8f1c..6b1bc33356b0 100644
--- a/drivers/vdpa/pds/cmds.h
+++ b/drivers/vdpa/pds/cmds.h
@@ -14,5 +14,4 @@ int pds_vdpa_cmd_init_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx,
struct pds_vdpa_vq_info *vq_info);
int pds_vdpa_cmd_reset_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx,
struct pds_vdpa_vq_info *vq_info);
-int pds_vdpa_cmd_set_features(struct pds_vdpa_device *pdsv, u64 features);
#endif /* _VDPA_CMDS_H_ */
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 4dbd2e55a288..8a372b51c21a 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -1361,6 +1361,80 @@ dev_err:
return err;
}
+static int vdpa_dev_net_device_attr_set(struct vdpa_device *vdev,
+ struct genl_info *info)
+{
+ struct vdpa_dev_set_config set_config = {};
+ struct vdpa_mgmt_dev *mdev = vdev->mdev;
+ struct nlattr **nl_attrs = info->attrs;
+ const u8 *macaddr;
+ int err = -EOPNOTSUPP;
+
+ down_write(&vdev->cf_lock);
+ if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
+ set_config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
+ macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
+
+ if (is_valid_ether_addr(macaddr)) {
+ ether_addr_copy(set_config.net.mac, macaddr);
+ if (mdev->ops->dev_set_attr) {
+ err = mdev->ops->dev_set_attr(mdev, vdev,
+ &set_config);
+ } else {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "Operation not supported by the device.");
+ }
+ } else {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "Invalid MAC address");
+ }
+ }
+ up_write(&vdev->cf_lock);
+ return err;
+}
+
+static int vdpa_nl_cmd_dev_attr_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct vdpa_device *vdev;
+ struct device *dev;
+ const char *name;
+ u64 classes;
+ int err = 0;
+
+ if (!info->attrs[VDPA_ATTR_DEV_NAME])
+ return -EINVAL;
+
+ name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
+
+ down_write(&vdpa_dev_lock);
+ dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
+ if (!dev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "device not found");
+ err = -ENODEV;
+ goto dev_err;
+ }
+ vdev = container_of(dev, struct vdpa_device, dev);
+ if (!vdev->mdev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
+ err = -EINVAL;
+ goto mdev_err;
+ }
+ classes = vdpa_mgmtdev_get_classes(vdev->mdev, NULL);
+ if (classes & BIT_ULL(VIRTIO_ID_NET)) {
+ err = vdpa_dev_net_device_attr_set(vdev, info);
+ } else {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack, "%s device not supported",
+ name);
+ }
+
+mdev_err:
+ put_device(dev);
+dev_err:
+ up_write(&vdpa_dev_lock);
+ return err;
+}
+
static int vdpa_dev_config_dump(struct device *dev, void *data)
{
struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
@@ -1497,6 +1571,11 @@ static const struct genl_ops vdpa_nl_ops[] = {
.doit = vdpa_nl_cmd_dev_stats_get_doit,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = VDPA_CMD_DEV_ATTR_SET,
+ .doit = vdpa_nl_cmd_dev_attr_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family vdpa_nl_family __ro_after_init = {
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index cfe962911804..6caf09a1907b 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -414,6 +414,24 @@ static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config)
net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
}
+static int vdpasim_net_set_attr(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev,
+ const struct vdpa_dev_set_config *config)
+{
+ struct vdpasim *vdpasim = container_of(dev, struct vdpasim, vdpa);
+ struct virtio_net_config *vio_config = vdpasim->config;
+
+ mutex_lock(&vdpasim->mutex);
+
+ if (config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) {
+ ether_addr_copy(vio_config->mac, config->net.mac);
+ mutex_unlock(&vdpasim->mutex);
+ return 0;
+ }
+
+ mutex_unlock(&vdpasim->mutex);
+ return -EOPNOTSUPP;
+}
+
static void vdpasim_net_setup_config(struct vdpasim *vdpasim,
const struct vdpa_dev_set_config *config)
{
@@ -510,7 +528,8 @@ static void vdpasim_net_dev_del(struct vdpa_mgmt_dev *mdev,
static const struct vdpa_mgmtdev_ops vdpasim_net_mgmtdev_ops = {
.dev_add = vdpasim_net_dev_add,
- .dev_del = vdpasim_net_dev_del
+ .dev_del = vdpasim_net_dev_del,
+ .dev_set_attr = vdpasim_net_set_attr
};
static struct virtio_device_id id_table[] = {
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index 9a3e97108ace..0d632ba5d2a3 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -723,7 +723,6 @@ static const struct file_operations hisi_acc_vf_resume_fops = {
.owner = THIS_MODULE,
.write = hisi_acc_vf_resume_write,
.release = hisi_acc_vf_release_file,
- .llseek = no_llseek,
};
static struct hisi_acc_vf_migration_file *
@@ -845,7 +844,6 @@ static const struct file_operations hisi_acc_vf_save_fops = {
.unlocked_ioctl = hisi_acc_vf_precopy_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.release = hisi_acc_vf_release_file,
- .llseek = no_llseek,
};
static struct hisi_acc_vf_migration_file *
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index 61d9b0f9146d..242c23eef452 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -587,7 +587,6 @@ static const struct file_operations mlx5vf_save_fops = {
.unlocked_ioctl = mlx5vf_precopy_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.release = mlx5vf_release_file,
- .llseek = no_llseek,
};
static int mlx5vf_pci_save_device_inc_data(struct mlx5vf_pci_core_device *mvdev)
@@ -1000,7 +999,6 @@ static const struct file_operations mlx5vf_resume_fops = {
.owner = THIS_MODULE,
.write = mlx5vf_resume_write,
.release = mlx5vf_release_file,
- .llseek = no_llseek,
};
static struct mlx5_vf_migration_file *
diff --git a/drivers/vfio/pci/pds/lm.c b/drivers/vfio/pci/pds/lm.c
index 6b94cc0bf45b..f2673d395236 100644
--- a/drivers/vfio/pci/pds/lm.c
+++ b/drivers/vfio/pci/pds/lm.c
@@ -235,7 +235,6 @@ static const struct file_operations pds_vfio_save_fops = {
.owner = THIS_MODULE,
.read = pds_vfio_save_read,
.release = pds_vfio_release_file,
- .llseek = no_llseek,
};
static int pds_vfio_get_save_file(struct pds_vfio_pci_device *pds_vfio)
@@ -334,7 +333,6 @@ static const struct file_operations pds_vfio_restore_fops = {
.owner = THIS_MODULE,
.write = pds_vfio_restore_write,
.release = pds_vfio_release_file,
- .llseek = no_llseek,
};
static int pds_vfio_get_restore_file(struct pds_vfio_pci_device *pds_vfio)
diff --git a/drivers/vfio/pci/qat/main.c b/drivers/vfio/pci/qat/main.c
index e36740a282e7..be3644ced17b 100644
--- a/drivers/vfio/pci/qat/main.c
+++ b/drivers/vfio/pci/qat/main.c
@@ -220,7 +220,6 @@ static const struct file_operations qat_vf_save_fops = {
.unlocked_ioctl = qat_vf_precopy_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.release = qat_vf_release_file,
- .llseek = no_llseek,
};
static int qat_vf_save_state(struct qat_vf_core_device *qat_vdev,
@@ -345,7 +344,6 @@ static const struct file_operations qat_vf_resume_fops = {
.owner = THIS_MODULE,
.write = qat_vf_resume_write,
.release = qat_vf_release_file,
- .llseek = no_llseek,
};
static struct qat_vf_migration_file *
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 478cd46a49ed..5a49b5a6d496 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -209,11 +209,9 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
if (irq < 0)
return;
- irq_bypass_unregister_producer(&vq->call_ctx.producer);
if (!vq->call_ctx.ctx)
return;
- vq->call_ctx.producer.token = vq->call_ctx.ctx;
vq->call_ctx.producer.irq = irq;
ret = irq_bypass_register_producer(&vq->call_ctx.producer);
if (unlikely(ret))
@@ -709,6 +707,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
vq->last_avail_idx = vq_state.split.avail_index;
}
break;
+ case VHOST_SET_VRING_CALL:
+ if (vq->call_ctx.ctx) {
+ if (ops->get_status(vdpa) &
+ VIRTIO_CONFIG_S_DRIVER_OK)
+ vhost_vdpa_unsetup_vq_irq(v, idx);
+ vq->call_ctx.producer.token = NULL;
+ }
+ break;
}
r = vhost_vring_ioctl(&v->vdev, cmd, argp);
@@ -747,13 +753,16 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
cb.callback = vhost_vdpa_virtqueue_cb;
cb.private = vq;
cb.trigger = vq->call_ctx.ctx;
+ vq->call_ctx.producer.token = vq->call_ctx.ctx;
+ if (ops->get_status(vdpa) &
+ VIRTIO_CONFIG_S_DRIVER_OK)
+ vhost_vdpa_setup_vq_irq(v, idx);
} else {
cb.callback = NULL;
cb.private = NULL;
cb.trigger = NULL;
}
ops->set_vq_cb(vdpa, idx, &cb);
- vhost_vdpa_setup_vq_irq(v, idx);
break;
case VHOST_SET_VRING_NUM:
@@ -1419,6 +1428,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
for (i = 0; i < nvqs; i++) {
vqs[i] = &v->vqs[i];
vqs[i]->handle_kick = handle_vq_kick;
+ vqs[i]->call_ctx.ctx = NULL;
}
vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
vhost_vdpa_process_iotlb_msg);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 2e093535884b..e8b4e8c119b5 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -512,8 +512,10 @@ static int search_fb_in_map(int idx)
int i, retval = 0;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
- if (con2fb_map[i] == idx)
+ if (con2fb_map[i] == idx) {
retval = 1;
+ break;
+ }
}
return retval;
}
@@ -523,8 +525,10 @@ static int search_for_mapped_con(void)
int i, retval = 0;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
- if (con2fb_map[i] != -1)
+ if (con2fb_map[i] != -1) {
retval = 1;
+ break;
+ }
}
return retval;
}
@@ -861,6 +865,8 @@ static int set_con2fb_map(int unit, int newidx, int user)
return err;
fbcon_add_cursor_work(info);
+ } else if (vc) {
+ set_blitting_type(vc, info);
}
con2fb_map[unit] = newidx;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
index 4040e247e026..d5a43b3bf45e 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
@@ -129,12 +129,9 @@ omapdss_of_find_source_for_first_ep(struct device_node *node)
return ERR_PTR(-EINVAL);
src_port = of_graph_get_remote_port(ep);
- if (!src_port) {
- of_node_put(ep);
- return ERR_PTR(-EINVAL);
- }
-
of_node_put(ep);
+ if (!src_port)
+ return ERR_PTR(-EINVAL);
src = omap_dss_find_output_by_port_node(src_port);
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 009bf1d92644..75033e6be15a 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -183,7 +183,7 @@ static void sisfb_search_mode(char *name, bool quiet)
{
unsigned int j = 0, xres = 0, yres = 0, depth = 0, rate = 0;
int i = 0;
- char strbuf[16], strbuf1[20];
+ char strbuf[24], strbuf1[20];
char *nameptr = name;
/* We don't know the hardware specs yet and there is no ivideo */
diff --git a/drivers/virt/coco/tdx-guest/tdx-guest.c b/drivers/virt/coco/tdx-guest/tdx-guest.c
index 2acba56ad42e..d7db6c824e13 100644
--- a/drivers/virt/coco/tdx-guest/tdx-guest.c
+++ b/drivers/virt/coco/tdx-guest/tdx-guest.c
@@ -285,7 +285,6 @@ static long tdx_guest_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations tdx_guest_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = tdx_guest_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice tdx_misc_dev = {
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 54469277ca30..b36d2803674e 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -355,6 +355,8 @@ static inline unsigned int update_balloon_vm_stats(struct virtio_balloon *vb)
{
unsigned long events[NR_VM_EVENT_ITEMS];
unsigned int idx = 0;
+ unsigned int zid;
+ unsigned long stall = 0;
all_vm_events(events);
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
@@ -363,6 +365,22 @@ static inline unsigned int update_balloon_vm_stats(struct virtio_balloon *vb)
pages_to_bytes(events[PSWPOUT]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_OOM_KILL, events[OOM_KILL]);
+
+ /* sum all the stall events */
+ for (zid = 0; zid < MAX_NR_ZONES; zid++)
+ stall += events[ALLOCSTALL_NORMAL - ZONE_NORMAL + zid];
+
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_ALLOC_STALL, stall);
+
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_ASYNC_SCAN,
+ pages_to_bytes(events[PGSCAN_KSWAPD]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_DIRECT_SCAN,
+ pages_to_bytes(events[PGSCAN_DIRECT]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_ASYNC_RECLAIM,
+ pages_to_bytes(events[PGSTEAL_KSWAPD]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_DIRECT_RECLAIM,
+ pages_to_bytes(events[PGSTEAL_DIRECT]));
#ifdef CONFIG_HUGETLB_PAGE
update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGALLOC,
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index b2d76c1784bd..a2ecbb863c57 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -541,8 +541,8 @@ static void ds2482_remove(struct i2c_client *client)
* Driver data (common to all clients)
*/
static const struct i2c_device_id ds2482_id[] = {
- { "ds2482", 0 },
- { "ds2484", 0 },
+ { "ds2482" },
+ { "ds2484" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ds2482_id);
diff --git a/drivers/watchdog/acquirewdt.c b/drivers/watchdog/acquirewdt.c
index 53b04abd55b0..08ca18e91124 100644
--- a/drivers/watchdog/acquirewdt.c
+++ b/drivers/watchdog/acquirewdt.c
@@ -218,7 +218,6 @@ static int acq_close(struct inode *inode, struct file *file)
static const struct file_operations acq_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = acq_write,
.unlocked_ioctl = acq_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/advantechwdt.c b/drivers/watchdog/advantechwdt.c
index 7a0acbc3e4dd..e41cd3ba4e0e 100644
--- a/drivers/watchdog/advantechwdt.c
+++ b/drivers/watchdog/advantechwdt.c
@@ -217,7 +217,6 @@ static int advwdt_close(struct inode *inode, struct file *file)
static const struct file_operations advwdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = advwdt_write,
.unlocked_ioctl = advwdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index bfb9a91ca1df..1ecbd1ac5c3a 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -359,7 +359,6 @@ static int __init ali_find_watchdog(void)
static const struct file_operations ali_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = ali_write,
.unlocked_ioctl = ali_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 4ff7f5afb7aa..9c7cf939ba3d 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -289,7 +289,6 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = fop_write,
.open = fop_open,
.release = fop_close,
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 558015f08c7a..17382512a609 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -210,7 +210,6 @@ static ssize_t at91_wdt_write(struct file *file, const char *data,
static const struct file_operations at91wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = at91_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = at91_wdt_open,
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index e5cc30622b12..d16b2c583fa4 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -231,7 +231,6 @@ static long ath79_wdt_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations ath79_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = ath79_wdt_write,
.unlocked_ioctl = ath79_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index 9f279c0e13a6..f94b84048612 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -185,7 +185,6 @@ static ssize_t cpu5wdt_write(struct file *file, const char __user *buf,
static const struct file_operations cpu5wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = cpu5wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = cpu5wdt_open,
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 901b94d456db..8ee81f018dda 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -507,7 +507,6 @@ static const struct file_operations cpwd_fops = {
.write = cpwd_write,
.read = cpwd_read,
.release = cpwd_release,
- .llseek = no_llseek,
};
static int cpwd_probe(struct platform_device *op)
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index e26609ad4c17..10c647b1226a 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -368,7 +368,6 @@ static int eurwdt_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations eurwdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = eurwdt_write,
.unlocked_ioctl = eurwdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index 6a1db1c783fa..d854fcfbfa5b 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -245,7 +245,6 @@ static int gef_wdt_release(struct inode *inode, struct file *file)
static const struct file_operations gef_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = gef_wdt_write,
.unlocked_ioctl = gef_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 5186c37ad451..4ed6d139320b 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -196,7 +196,6 @@ static long geodewdt_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations geodewdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = geodewdt_write,
.unlocked_ioctl = geodewdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c
index 39ea97009abd..b041ad90a62c 100644
--- a/drivers/watchdog/ib700wdt.c
+++ b/drivers/watchdog/ib700wdt.c
@@ -256,7 +256,6 @@ static int ibwdt_close(struct inode *inode, struct file *file)
static const struct file_operations ibwdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = ibwdt_write,
.unlocked_ioctl = ibwdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c
index 6955c693b5fd..cf845f865945 100644
--- a/drivers/watchdog/ibmasr.c
+++ b/drivers/watchdog/ibmasr.c
@@ -340,7 +340,6 @@ static int asr_release(struct inode *inode, struct file *file)
static const struct file_operations asr_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = asr_write,
.unlocked_ioctl = asr_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c
index 9857bb74a723..d3092d261345 100644
--- a/drivers/watchdog/indydog.c
+++ b/drivers/watchdog/indydog.c
@@ -149,7 +149,6 @@ static int indydog_notify_sys(struct notifier_block *this,
static const struct file_operations indydog_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = indydog_write,
.unlocked_ioctl = indydog_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index 3ce6a58bd81e..b776e6766c9d 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -341,7 +341,6 @@ static int it8712f_wdt_release(struct inode *inode, struct file *file)
static const struct file_operations it8712f_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = it8712f_wdt_write,
.unlocked_ioctl = it8712f_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c
index 062ea3e6497e..26bd073bd375 100644
--- a/drivers/watchdog/m54xx_wdt.c
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -179,7 +179,6 @@ static int m54xx_wdt_release(struct inode *inode, struct file *file)
static const struct file_operations m54xx_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = m54xx_wdt_write,
.unlocked_ioctl = m54xx_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index 73f2221f6222..73d641486909 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -359,7 +359,6 @@ static int zf_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations zf_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = zf_write,
.unlocked_ioctl = zf_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/mixcomwd.c b/drivers/watchdog/mixcomwd.c
index d387bad377c4..70d9cf84c342 100644
--- a/drivers/watchdog/mixcomwd.c
+++ b/drivers/watchdog/mixcomwd.c
@@ -224,7 +224,6 @@ static long mixcomwd_ioctl(struct file *file,
static const struct file_operations mixcomwd_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = mixcomwd_write,
.unlocked_ioctl = mixcomwd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 06756135033d..11f05024a181 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -177,7 +177,6 @@ static ssize_t mtx1_wdt_write(struct file *file, const char *buf,
static const struct file_operations mtx1_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = mtx1_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = mtx1_wdt_open,
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index ac4a9c16341d..f8eb1f65a59e 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -264,7 +264,6 @@ static long nv_tco_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations nv_tco_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = nv_tco_write,
.unlocked_ioctl = nv_tco_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index c7f745caf203..fbf835d112b8 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -470,7 +470,6 @@ static int pc87413_notify_sys(struct notifier_block *this,
static const struct file_operations pc87413_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = pc87413_write,
.unlocked_ioctl = pc87413_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index a793b03a785d..1a4282235aac 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -749,7 +749,6 @@ static int pcwd_temp_close(struct inode *inode, struct file *file)
static const struct file_operations pcwd_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = pcwd_write,
.unlocked_ioctl = pcwd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
@@ -765,7 +764,6 @@ static struct miscdevice pcwd_miscdev = {
static const struct file_operations pcwd_temp_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = pcwd_temp_read,
.open = pcwd_temp_open,
.release = pcwd_temp_close,
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index 54d86fcb1837..a489b426f2ba 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -643,7 +643,6 @@ static int pcipcwd_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations pcipcwd_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = pcipcwd_write,
.unlocked_ioctl = pcipcwd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
@@ -659,7 +658,6 @@ static struct miscdevice pcipcwd_miscdev = {
static const struct file_operations pcipcwd_temp_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = pcipcwd_temp_read,
.open = pcipcwd_temp_open,
.release = pcipcwd_temp_release,
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 8202f0a6b093..132699e2f247 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -549,7 +549,6 @@ static int usb_pcwd_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations usb_pcwd_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = usb_pcwd_write,
.unlocked_ioctl = usb_pcwd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
@@ -565,7 +564,6 @@ static struct miscdevice usb_pcwd_miscdev = {
static const struct file_operations usb_pcwd_temperature_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = usb_pcwd_temperature_read,
.open = usb_pcwd_temperature_open,
.release = usb_pcwd_temperature_release,
diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c
index 782b8c23d99c..393aa4b1bc13 100644
--- a/drivers/watchdog/pika_wdt.c
+++ b/drivers/watchdog/pika_wdt.c
@@ -209,7 +209,6 @@ static long pikawdt_ioctl(struct file *file,
static const struct file_operations pikawdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.open = pikawdt_open,
.release = pikawdt_release,
.write = pikawdt_write,
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index 417f9b75679c..efadbb9d7ce7 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -242,7 +242,6 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations rc32434_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = rc32434_wdt_write,
.unlocked_ioctl = rc32434_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 6176f4343fc5..80490316a27f 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -197,7 +197,6 @@ static ssize_t rdc321x_wdt_write(struct file *file, const char __user *buf,
static const struct file_operations rdc321x_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = rdc321x_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = rdc321x_wdt_open,
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index b293792a292a..f47d90d01c19 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -160,7 +160,6 @@ static ssize_t riowd_write(struct file *file, const char __user *buf,
static const struct file_operations riowd_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = riowd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = riowd_open,
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index 34a917221e31..6e91ee3fbfb5 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -164,7 +164,6 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations sa1100dog_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = sa1100dog_write,
.unlocked_ioctl = sa1100dog_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 504be461f992..eaa68b54cf56 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -234,7 +234,6 @@ static int sbwdog_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations sbwdog_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = sbwdog_write,
.unlocked_ioctl = sbwdog_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
index 7b974802dfc7..e9bf12918ed8 100644
--- a/drivers/watchdog/sbc60xxwdt.c
+++ b/drivers/watchdog/sbc60xxwdt.c
@@ -275,7 +275,6 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = fop_write,
.open = fop_open,
.release = fop_close,
diff --git a/drivers/watchdog/sbc7240_wdt.c b/drivers/watchdog/sbc7240_wdt.c
index d640b26e18a6..21a1f0b32070 100644
--- a/drivers/watchdog/sbc7240_wdt.c
+++ b/drivers/watchdog/sbc7240_wdt.c
@@ -205,7 +205,6 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = fop_write,
.open = fop_open,
.release = fop_close,
diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c
index 4f8b9912fc51..a9fd1615b4c3 100644
--- a/drivers/watchdog/sbc8360.c
+++ b/drivers/watchdog/sbc8360.c
@@ -301,7 +301,6 @@ static int sbc8360_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations sbc8360_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = sbc8360_write,
.open = sbc8360_open,
.release = sbc8360_close,
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index 5e3a9ddb952e..1d291dc0a4a6 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -153,7 +153,6 @@ static int epx_c3_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations epx_c3_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = epx_c3_write,
.unlocked_ioctl = epx_c3_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index b8eb8d5ca1af..ff9e44825423 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -181,7 +181,6 @@ static int fitpc2_wdt_release(struct inode *inode, struct file *file)
static const struct file_operations fitpc2_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = fitpc2_wdt_write,
.unlocked_ioctl = fitpc2_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c
index f22ebe89fe13..76a58715f665 100644
--- a/drivers/watchdog/sc1200wdt.c
+++ b/drivers/watchdog/sc1200wdt.c
@@ -304,7 +304,6 @@ static struct notifier_block sc1200wdt_notifier = {
static const struct file_operations sc1200wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = sc1200wdt_write,
.unlocked_ioctl = sc1200wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index ca65468f4b9c..e849e1af267b 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -331,7 +331,6 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = fop_write,
.open = fop_open,
.release = fop_close,
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index 409d49880170..76053158d259 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -334,7 +334,6 @@ static int sch311x_wdt_close(struct inode *inode, struct file *file)
static const struct file_operations sch311x_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = sch311x_wdt_write,
.unlocked_ioctl = sch311x_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c
index 7b5e18323f3f..4dd8549e3674 100644
--- a/drivers/watchdog/scx200_wdt.c
+++ b/drivers/watchdog/scx200_wdt.c
@@ -198,7 +198,6 @@ static long scx200_wdt_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations scx200_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = scx200_wdt_write,
.unlocked_ioctl = scx200_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index 7463df479d11..97ca500ec8a8 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -502,7 +502,6 @@ static int wb_smsc_wdt_notify_sys(struct notifier_block *this,
static const struct file_operations wb_smsc_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = wb_smsc_wdt_write,
.unlocked_ioctl = wb_smsc_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 3d57670befe1..ac709dc31a65 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -12,6 +12,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/watchdog.h>
#include <linux/io.h>
@@ -160,10 +161,17 @@ static int ts72xx_wdt_probe(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ts72xx_wdt_of_ids[] = {
+ { .compatible = "technologic,ts7200-wdt" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ts72xx_wdt_of_ids);
+
static struct platform_driver ts72xx_wdt_driver = {
.probe = ts72xx_wdt_probe,
.driver = {
.name = "ts72xx-wdt",
+ .of_match_table = ts72xx_wdt_of_ids,
},
};
diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c
index f2650863fd02..1937084c182c 100644
--- a/drivers/watchdog/w83877f_wdt.c
+++ b/drivers/watchdog/w83877f_wdt.c
@@ -299,7 +299,6 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = fop_write,
.open = fop_open,
.release = fop_close,
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index 31bf21ceaf48..3776030fa7c6 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -443,7 +443,6 @@ static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = wdt_write,
.unlocked_ioctl = wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c
index a8a1ed215e1e..291109349e73 100644
--- a/drivers/watchdog/wafer5823wdt.c
+++ b/drivers/watchdog/wafer5823wdt.c
@@ -227,7 +227,6 @@ static int wafwdt_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations wafwdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = wafwdt_write,
.unlocked_ioctl = wafwdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c
index c00627825de8..d4fe0bc82211 100644
--- a/drivers/watchdog/wdrtas.c
+++ b/drivers/watchdog/wdrtas.c
@@ -469,7 +469,6 @@ static int wdrtas_reboot(struct notifier_block *this,
static const struct file_operations wdrtas_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = wdrtas_write,
.unlocked_ioctl = wdrtas_ioctl,
.compat_ioctl = compat_ptr_ioctl,
@@ -485,7 +484,6 @@ static struct miscdevice wdrtas_miscdev = {
static const struct file_operations wdrtas_temp_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = wdrtas_temp_read,
.open = wdrtas_temp_open,
.release = wdrtas_temp_close,
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index 183876156243..3980d60bacd8 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -520,7 +520,6 @@ static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = wdt_write,
.unlocked_ioctl = wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
@@ -536,7 +535,6 @@ static struct miscdevice wdt_miscdev = {
static const struct file_operations wdt_temp_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = wdt_temp_read,
.open = wdt_temp_open,
.release = wdt_temp_release,
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index 5b7be7a62d54..78681d9f7d53 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -178,7 +178,6 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations watchdog_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = watchdog_write,
.unlocked_ioctl = watchdog_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index c9b8e863f70f..4f449ac4dda4 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -419,7 +419,6 @@ static int wdt977_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations wdt977_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = wdt977_write,
.unlocked_ioctl = wdt977_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index d5e56b601351..dc5f29560e9b 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -563,7 +563,6 @@ static int wdtpci_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations wdtpci_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = wdtpci_write,
.unlocked_ioctl = wdtpci_ioctl,
.compat_ioctl = compat_ptr_ioctl,
@@ -579,7 +578,6 @@ static struct miscdevice wdtpci_miscdev = {
static const struct file_operations wdtpci_temp_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = wdtpci_temp_read,
.open = wdtpci_temp_open,
.release = wdtpci_temp_release,
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index f7d6f47971fd..62035fe16bb8 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -261,6 +261,7 @@ config XEN_SCSI_BACKEND
config XEN_PRIVCMD
tristate "Xen hypercall passthrough driver"
depends on XEN
+ imply CONFIG_XEN_PCIDEV_BACKEND
default m
help
The hypercall passthrough driver allows privileged user programs to
diff --git a/drivers/xen/acpi.c b/drivers/xen/acpi.c
index 6893c79fd2a1..9e2096524fbc 100644
--- a/drivers/xen/acpi.c
+++ b/drivers/xen/acpi.c
@@ -30,6 +30,7 @@
* IN THE SOFTWARE.
*/
+#include <linux/pci.h>
#include <xen/acpi.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
@@ -75,3 +76,52 @@ int xen_acpi_notify_hypervisor_extended_sleep(u8 sleep_state,
return xen_acpi_notify_hypervisor_state(sleep_state, val_a,
val_b, true);
}
+
+struct acpi_prt_entry {
+ struct acpi_pci_id id;
+ u8 pin;
+ acpi_handle link;
+ u32 index;
+};
+
+int xen_acpi_get_gsi_info(struct pci_dev *dev,
+ int *gsi_out,
+ int *trigger_out,
+ int *polarity_out)
+{
+ int gsi;
+ u8 pin;
+ struct acpi_prt_entry *entry;
+ int trigger = ACPI_LEVEL_SENSITIVE;
+ int polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ?
+ ACPI_ACTIVE_HIGH : ACPI_ACTIVE_LOW;
+
+ if (!dev || !gsi_out || !trigger_out || !polarity_out)
+ return -EINVAL;
+
+ pin = dev->pin;
+ if (!pin)
+ return -EINVAL;
+
+ entry = acpi_pci_irq_lookup(dev, pin);
+ if (entry) {
+ if (entry->link)
+ gsi = acpi_pci_link_allocate_irq(entry->link,
+ entry->index,
+ &trigger, &polarity,
+ NULL);
+ else
+ gsi = entry->index;
+ } else
+ gsi = -1;
+
+ if (gsi < 0)
+ return -EINVAL;
+
+ *gsi_out = gsi;
+ *trigger_out = trigger;
+ *polarity_out = polarity;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xen_acpi_get_gsi_info);
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 9b7fcc7dbb38..7e4a13e632dc 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -694,7 +694,6 @@ static const struct file_operations evtchn_fops = {
.fasync = evtchn_fasync,
.open = evtchn_open,
.release = evtchn_release,
- .llseek = no_llseek,
};
static struct miscdevice evtchn_miscdev = {
diff --git a/drivers/xen/mcelog.c b/drivers/xen/mcelog.c
index e9ac3b8c4167..4f65b641c054 100644
--- a/drivers/xen/mcelog.c
+++ b/drivers/xen/mcelog.c
@@ -182,7 +182,6 @@ static const struct file_operations xen_mce_chrdev_ops = {
.read = xen_mce_chrdev_read,
.poll = xen_mce_chrdev_poll,
.unlocked_ioctl = xen_mce_chrdev_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice xen_mce_chrdev_device = {
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index a2facd8f7e51..416f231809cb 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -173,6 +173,19 @@ static int xen_remove_device(struct device *dev)
return r;
}
+int xen_reset_device(const struct pci_dev *dev)
+{
+ struct pci_device_reset device = {
+ .dev.seg = pci_domain_nr(dev->bus),
+ .dev.bus = dev->bus->number,
+ .dev.devfn = dev->devfn,
+ .flags = PCI_DEVICE_RESET_FLR,
+ };
+
+ return HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_reset, &device);
+}
+EXPORT_SYMBOL_GPL(xen_reset_device);
+
static int xen_pci_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 54e4f285c0f4..3273cb8c2a66 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -46,6 +46,9 @@
#include <xen/page.h>
#include <xen/xen-ops.h>
#include <xen/balloon.h>
+#ifdef CONFIG_XEN_ACPI
+#include <xen/acpi.h>
+#endif
#include "privcmd.h"
@@ -844,6 +847,31 @@ out:
return rc;
}
+static long privcmd_ioctl_pcidev_get_gsi(struct file *file, void __user *udata)
+{
+#if defined(CONFIG_XEN_ACPI)
+ int rc = -EINVAL;
+ struct privcmd_pcidev_get_gsi kdata;
+
+ if (copy_from_user(&kdata, udata, sizeof(kdata)))
+ return -EFAULT;
+
+ if (IS_REACHABLE(CONFIG_XEN_PCIDEV_BACKEND))
+ rc = pcistub_get_gsi_from_sbdf(kdata.sbdf);
+
+ if (rc < 0)
+ return rc;
+
+ kdata.gsi = rc;
+ if (copy_to_user(udata, &kdata, sizeof(kdata)))
+ return -EFAULT;
+
+ return 0;
+#else
+ return -EINVAL;
+#endif
+}
+
#ifdef CONFIG_XEN_PRIVCMD_EVENTFD
/* Irqfd support */
static struct workqueue_struct *irqfd_cleanup_wq;
@@ -1543,6 +1571,10 @@ static long privcmd_ioctl(struct file *file,
ret = privcmd_ioctl_ioeventfd(file, udata);
break;
+ case IOCTL_PRIVCMD_PCIDEV_GET_GSI:
+ ret = privcmd_ioctl_pcidev_get_gsi(file, udata);
+ break;
+
default:
break;
}
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
index 1948a9700c8f..cf568e899ee2 100644
--- a/drivers/xen/xen-pciback/conf_space_capability.c
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -122,7 +122,7 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
if (err)
goto out;
- new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
+ new_state = (__force pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
new_value &= PM_OK_BITS;
if ((old_value & PM_OK_BITS) != new_value) {
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 4faebbb84999..2f3da5ac62cd 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -21,6 +21,9 @@
#include <xen/events.h>
#include <xen/pci.h>
#include <xen/xen.h>
+#ifdef CONFIG_XEN_ACPI
+#include <xen/acpi.h>
+#endif
#include <asm/xen/hypervisor.h>
#include <xen/interface/physdev.h>
#include "pciback.h"
@@ -53,6 +56,9 @@ struct pcistub_device {
struct pci_dev *dev;
struct xen_pcibk_device *pdev;/* non-NULL if struct pci_dev is in use */
+#ifdef CONFIG_XEN_ACPI
+ int gsi;
+#endif
};
/* Access to pcistub_devices & seized_devices lists and the initialize_devices
@@ -85,10 +91,23 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
kref_init(&psdev->kref);
spin_lock_init(&psdev->lock);
+#ifdef CONFIG_XEN_ACPI
+ psdev->gsi = -1;
+#endif
return psdev;
}
+static int pcistub_reset_device_state(struct pci_dev *dev)
+{
+ __pci_reset_function_locked(dev);
+
+ if (!xen_pv_domain())
+ return xen_reset_device(dev);
+ else
+ return 0;
+}
+
/* Don't call this directly as it's called by pcistub_device_put */
static void pcistub_device_release(struct kref *kref)
{
@@ -107,7 +126,7 @@ static void pcistub_device_release(struct kref *kref)
/* Call the reset function which does not take lock as this
* is called from "unbind" which takes a device_lock mutex.
*/
- __pci_reset_function_locked(dev);
+ pcistub_reset_device_state(dev);
if (dev_data &&
pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
dev_info(&dev->dev, "Could not reload PCI state\n");
@@ -207,6 +226,25 @@ static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev,
return pci_dev;
}
+#ifdef CONFIG_XEN_ACPI
+int pcistub_get_gsi_from_sbdf(unsigned int sbdf)
+{
+ struct pcistub_device *psdev;
+ int domain = (sbdf >> 16) & 0xffff;
+ int bus = PCI_BUS_NUM(sbdf);
+ int slot = PCI_SLOT(sbdf);
+ int func = PCI_FUNC(sbdf);
+
+ psdev = pcistub_device_find(domain, bus, slot, func);
+
+ if (!psdev)
+ return -ENODEV;
+
+ return psdev->gsi;
+}
+EXPORT_SYMBOL_GPL(pcistub_get_gsi_from_sbdf);
+#endif
+
struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
int domain, int bus,
int slot, int func)
@@ -284,7 +322,7 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
* (so it's ready for the next domain)
*/
device_lock_assert(&dev->dev);
- __pci_reset_function_locked(dev);
+ pcistub_reset_device_state(dev);
dev_data = pci_get_drvdata(dev);
ret = pci_load_saved_state(dev, dev_data->pci_saved_state);
@@ -354,11 +392,20 @@ static int pcistub_match(struct pci_dev *dev)
return found;
}
-static int pcistub_init_device(struct pci_dev *dev)
+static int pcistub_init_device(struct pcistub_device *psdev)
{
struct xen_pcibk_dev_data *dev_data;
+ struct pci_dev *dev;
+#ifdef CONFIG_XEN_ACPI
+ int gsi, trigger, polarity;
+#endif
int err = 0;
+ if (!psdev)
+ return -EINVAL;
+
+ dev = psdev->dev;
+
dev_dbg(&dev->dev, "initializing...\n");
/* The PCI backend is not intended to be a module (or to work with
@@ -420,9 +467,26 @@ static int pcistub_init_device(struct pci_dev *dev)
dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
else {
dev_dbg(&dev->dev, "resetting (FLR, D3, etc) the device\n");
- __pci_reset_function_locked(dev);
+ err = pcistub_reset_device_state(dev);
+ if (err)
+ goto config_release;
pci_restore_state(dev);
}
+
+#ifdef CONFIG_XEN_ACPI
+ if (xen_initial_domain() && xen_pvh_domain()) {
+ err = xen_acpi_get_gsi_info(dev, &gsi, &trigger, &polarity);
+ if (err) {
+ dev_err(&dev->dev, "Fail to get gsi info!\n");
+ goto config_release;
+ }
+ err = xen_pvh_setup_gsi(gsi, trigger, polarity);
+ if (err)
+ goto config_release;
+ psdev->gsi = gsi;
+ }
+#endif
+
/* Now disable the device (this also ensures some private device
* data is setup before we export)
*/
@@ -462,7 +526,7 @@ static int __init pcistub_init_devices_late(void)
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
- err = pcistub_init_device(psdev->dev);
+ err = pcistub_init_device(psdev);
if (err) {
dev_err(&psdev->dev->dev,
"error %d initializing device\n", err);
@@ -532,7 +596,7 @@ static int pcistub_seize(struct pci_dev *dev,
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
/* don't want irqs disabled when calling pcistub_init_device */
- err = pcistub_init_device(psdev->dev);
+ err = pcistub_init_device(psdev);
spin_lock_irqsave(&pcistub_devices_lock, flags);
@@ -757,7 +821,7 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
}
clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
- res = (pci_ers_result_t)aer_op->err;
+ res = (__force pci_ers_result_t)aer_op->err;
return res;
}
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 6f56640092a9..46f8916597e5 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -700,7 +700,6 @@ const struct file_operations xen_xenbus_fops = {
.open = xenbus_file_open,
.release = xenbus_file_release,
.poll = xenbus_file_poll,
- .llseek = no_llseek,
};
EXPORT_SYMBOL_GPL(xen_xenbus_fops);
diff --git a/fs/Kconfig b/fs/Kconfig
index 0e4efec1d92e..949895cff872 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -386,6 +386,29 @@ config NFS_COMMON
depends on NFSD || NFS_FS || LOCKD
default y
+config NFS_COMMON_LOCALIO_SUPPORT
+ tristate
+ default n
+ default y if NFSD=y || NFS_FS=y
+ default m if NFSD=m && NFS_FS=m
+ select SUNRPC
+
+config NFS_LOCALIO
+ bool "NFS client and server support for LOCALIO auxiliary protocol"
+ depends on NFSD && NFS_FS
+ select NFS_COMMON_LOCALIO_SUPPORT
+ default n
+ help
+ Some NFS servers support an auxiliary NFS LOCALIO protocol
+ that is not an official part of the NFS protocol.
+
+ This option enables support for the LOCALIO protocol in the
+ kernel's NFS server and client. Enable this to permit local
+ NFS clients to bypass the network when issuing reads and
+ writes to the local NFS server.
+
+ If unsure, say N.
+
config NFS_V4_2_SSC_HELPER
bool
default y if NFS_V4_2
diff --git a/fs/afs/afs_vl.h b/fs/afs/afs_vl.h
index 9c65ffb8a523..a06296c8827d 100644
--- a/fs/afs/afs_vl.h
+++ b/fs/afs/afs_vl.h
@@ -134,13 +134,4 @@ struct afs_uvldbentry__xdr {
__be32 spares9;
};
-struct afs_address_list {
- refcount_t usage;
- unsigned int version;
- unsigned int nr_addrs;
- struct sockaddr_rxrpc addrs[];
-};
-
-extern void afs_put_address_list(struct afs_address_list *alist);
-
#endif /* AFS_VL_H */
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 492d857a3fa0..6762eff97517 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -420,6 +420,7 @@ const struct netfs_request_ops afs_req_ops = {
.begin_writeback = afs_begin_writeback,
.prepare_write = afs_prepare_write,
.issue_write = afs_issue_write,
+ .retry_request = afs_retry_request,
};
static void afs_add_open_mmap(struct afs_vnode *vnode)
diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
index 3546b087e791..428721bbe4f6 100644
--- a/fs/afs/fs_operation.c
+++ b/fs/afs/fs_operation.c
@@ -201,7 +201,7 @@ void afs_wait_for_operation(struct afs_operation *op)
}
}
- if (op->call_responded)
+ if (op->call_responded && op->server)
set_bit(AFS_SERVER_FL_RESPONDING, &op->server->flags);
if (!afs_op_error(op)) {
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index 580de4adaaf6..b516d05b0fef 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -506,10 +506,10 @@ int afs_wait_for_one_fs_probe(struct afs_server *server, struct afs_endpoint_sta
finish_wait(&server->probe_wq, &wait);
dont_wait:
- if (estate->responsive_set & ~exclude)
- return 1;
if (test_bit(AFS_ESTATE_SUPERSEDED, &estate->flags))
return 0;
+ if (estate->responsive_set & ~exclude)
+ return 1;
if (is_intr && signal_pending(current))
return -ERESTARTSYS;
if (timo == 0)
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index ed09d4d4c211..d612983d6f38 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -632,8 +632,10 @@ iterate_address:
wait_for_more_probe_results:
error = afs_wait_for_one_fs_probe(op->server, op->estate, op->addr_tried,
!(op->flags & AFS_OPERATION_UNINTR));
- if (!error)
+ if (error == 1)
goto iterate_address;
+ if (!error)
+ goto restart_from_beginning;
/* We've now had a failure to respond on all of a server's addresses -
* immediately probe them again and consider retrying the server.
@@ -644,10 +646,13 @@ wait_for_more_probe_results:
error = afs_wait_for_one_fs_probe(op->server, op->estate, op->addr_tried,
!(op->flags & AFS_OPERATION_UNINTR));
switch (error) {
- case 0:
+ case 1:
op->flags &= ~AFS_OPERATION_RETRY_SERVER;
- trace_afs_rotate(op, afs_rotate_trace_retry_server, 0);
+ trace_afs_rotate(op, afs_rotate_trace_retry_server, 1);
goto retry_server;
+ case 0:
+ trace_afs_rotate(op, afs_rotate_trace_retry_server, 0);
+ goto restart_from_beginning;
case -ERESTARTSYS:
afs_op_set_error(op, error);
goto failed;
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
index e11989a57ca0..47455a85c909 100644
--- a/fs/bcachefs/backpointers.c
+++ b/fs/bcachefs/backpointers.c
@@ -501,7 +501,7 @@ found:
prt_printf(&buf, "\n %s ", bch2_btree_id_str(o_btree));
bch2_bkey_val_to_text(&buf, c, extent2);
- struct nonce nonce = extent_nonce(extent.k->version, p.crc);
+ struct nonce nonce = extent_nonce(extent.k->bversion, p.crc);
struct bch_csum csum = bch2_checksum(c, p.crc.csum_type, nonce, data_buf, bytes);
if (fsck_err_on(bch2_crc_cmp(csum, p.crc.csum),
trans, dup_backpointer_to_bad_csum_extent,
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index c711d4c27a03..f4151ee51b03 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -594,6 +594,7 @@ struct bch_dev {
#define BCH_FS_FLAGS() \
x(new_fs) \
x(started) \
+ x(clean_recovery) \
x(btree_running) \
x(accounting_replay_done) \
x(may_go_rw) \
@@ -776,7 +777,7 @@ struct bch_fs {
unsigned nsec_per_time_unit;
u64 features;
u64 compat;
- unsigned long errors_silent[BITS_TO_LONGS(BCH_SB_ERR_MAX)];
+ unsigned long errors_silent[BITS_TO_LONGS(BCH_FSCK_ERR_MAX)];
u64 btrees_lost_data;
} sb;
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index 8c4addddd07e..84832c2d4df9 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -217,13 +217,13 @@ struct bkey {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
__u8 pad[1];
- struct bversion version;
+ struct bversion bversion;
__u32 size; /* extent size, in sectors */
struct bpos p;
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
struct bpos p;
__u32 size; /* extent size, in sectors */
- struct bversion version;
+ struct bversion bversion;
__u8 pad[1];
#endif
@@ -328,8 +328,8 @@ enum bch_bkey_fields {
bkey_format_field(OFFSET, p.offset), \
bkey_format_field(SNAPSHOT, p.snapshot), \
bkey_format_field(SIZE, size), \
- bkey_format_field(VERSION_HI, version.hi), \
- bkey_format_field(VERSION_LO, version.lo), \
+ bkey_format_field(VERSION_HI, bversion.hi), \
+ bkey_format_field(VERSION_LO, bversion.lo), \
}, \
})
diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h
index e34cb2bf329c..41df24a53d97 100644
--- a/fs/bcachefs/bkey.h
+++ b/fs/bcachefs/bkey.h
@@ -214,9 +214,9 @@ static __always_inline int bversion_cmp(struct bversion l, struct bversion r)
#define ZERO_VERSION ((struct bversion) { .hi = 0, .lo = 0 })
#define MAX_VERSION ((struct bversion) { .hi = ~0, .lo = ~0ULL })
-static __always_inline int bversion_zero(struct bversion v)
+static __always_inline bool bversion_zero(struct bversion v)
{
- return !bversion_cmp(v, ZERO_VERSION);
+ return bversion_cmp(v, ZERO_VERSION) == 0;
}
#ifdef CONFIG_BCACHEFS_DEBUG
@@ -554,8 +554,8 @@ static inline void bch2_bkey_pack_test(void) {}
x(BKEY_FIELD_OFFSET, p.offset) \
x(BKEY_FIELD_SNAPSHOT, p.snapshot) \
x(BKEY_FIELD_SIZE, size) \
- x(BKEY_FIELD_VERSION_HI, version.hi) \
- x(BKEY_FIELD_VERSION_LO, version.lo)
+ x(BKEY_FIELD_VERSION_HI, bversion.hi) \
+ x(BKEY_FIELD_VERSION_LO, bversion.lo)
struct bkey_format_state {
u64 field_min[BKEY_NR_FIELDS];
diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c
index 88d8958281e8..e7ac227ba7e8 100644
--- a/fs/bcachefs/bkey_methods.c
+++ b/fs/bcachefs/bkey_methods.c
@@ -289,7 +289,7 @@ void bch2_bkey_to_text(struct printbuf *out, const struct bkey *k)
bch2_bpos_to_text(out, k->p);
- prt_printf(out, " len %u ver %llu", k->size, k->version.lo);
+ prt_printf(out, " len %u ver %llu", k->size, k->bversion.lo);
} else {
prt_printf(out, "(null)");
}
diff --git a/fs/bcachefs/bkey_methods.h b/fs/bcachefs/bkey_methods.h
index 3df3dd2723a1..018fb72e32d3 100644
--- a/fs/bcachefs/bkey_methods.h
+++ b/fs/bcachefs/bkey_methods.h
@@ -70,7 +70,7 @@ bool bch2_bkey_normalize(struct bch_fs *, struct bkey_s);
static inline bool bch2_bkey_maybe_mergable(const struct bkey *l, const struct bkey *r)
{
return l->type == r->type &&
- !bversion_cmp(l->version, r->version) &&
+ !bversion_cmp(l->bversion, r->bversion) &&
bpos_eq(l->p, bkey_start_pos(r));
}
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index b5e0692f03c6..660d2fa02da2 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -513,6 +513,8 @@ int bch2_check_topology(struct bch_fs *c)
struct bpos pulled_from_scan = POS_MIN;
int ret = 0;
+ bch2_trans_srcu_unlock(trans);
+
for (unsigned i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
struct btree_root *r = bch2_btree_id_root(c, i);
bool reconstructed_root = false;
@@ -599,15 +601,15 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
if (initial) {
BUG_ON(bch2_journal_seq_verify &&
- k.k->version.lo > atomic64_read(&c->journal.seq));
+ k.k->bversion.lo > atomic64_read(&c->journal.seq));
if (fsck_err_on(btree_id != BTREE_ID_accounting &&
- k.k->version.lo > atomic64_read(&c->key_version),
+ k.k->bversion.lo > atomic64_read(&c->key_version),
trans, bkey_version_in_future,
"key version number higher than recorded %llu\n %s",
atomic64_read(&c->key_version),
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- atomic64_set(&c->key_version, k.k->version.lo);
+ atomic64_set(&c->key_version, k.k->bversion.lo);
}
if (mustfix_fsck_err_on(level && !bch2_dev_btree_bitmap_marked(c, k),
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index cb48a9477514..1c1448b52207 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -1195,6 +1195,10 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
set_btree_bset(b, b->set, &b->data->keys);
b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
+ memset((uint8_t *)(sorted + 1) + b->nr.live_u64s * sizeof(u64), 0,
+ btree_buf_bytes(b) -
+ sizeof(struct btree_node) -
+ b->nr.live_u64s * sizeof(u64));
u64s = le16_to_cpu(sorted->keys.u64s);
*sorted = *b->data;
@@ -1219,7 +1223,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
ret = bch2_bkey_val_validate(c, u.s_c, READ);
if (ret == -BCH_ERR_fsck_delete_bkey ||
(bch2_inject_invalid_keys &&
- !bversion_cmp(u.k->version, MAX_VERSION))) {
+ !bversion_cmp(u.k->bversion, MAX_VERSION))) {
btree_keys_account_key_drop(&b->nr, 0, k);
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
index b28c649c6838..1e694fedc5da 100644
--- a/fs/bcachefs/btree_node_scan.c
+++ b/fs/bcachefs/btree_node_scan.c
@@ -275,7 +275,7 @@ static int read_btree_nodes(struct find_btree_nodes *f)
w->ca = ca;
t = kthread_run(read_btree_nodes_worker, w, "read_btree_nodes/%s", ca->name);
- ret = IS_ERR_OR_NULL(t);
+ ret = PTR_ERR_OR_ZERO(t);
if (ret) {
percpu_ref_put(&ca->io_ref);
closure_put(&cl);
diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c
index 91884da4e30a..1a74a1a252ee 100644
--- a/fs/bcachefs/btree_trans_commit.c
+++ b/fs/bcachefs/btree_trans_commit.c
@@ -684,10 +684,10 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
!(flags & BCH_TRANS_COMMIT_no_journal_res)) {
if (bch2_journal_seq_verify)
trans_for_each_update(trans, i)
- i->k->k.version.lo = trans->journal_res.seq;
+ i->k->k.bversion.lo = trans->journal_res.seq;
else if (bch2_inject_invalid_keys)
trans_for_each_update(trans, i)
- i->k->k.version = MAX_VERSION;
+ i->k->k.bversion = MAX_VERSION;
}
h = trans->hooks;
@@ -700,27 +700,31 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
struct jset_entry *entry = trans->journal_entries;
- if (likely(!(flags & BCH_TRANS_COMMIT_skip_accounting_apply))) {
- percpu_down_read(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
+
+ for (entry = trans->journal_entries;
+ entry != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
+ entry = vstruct_next(entry))
+ if (entry->type == BCH_JSET_ENTRY_write_buffer_keys &&
+ entry->start->k.type == KEY_TYPE_accounting) {
+ BUG_ON(!trans->journal_res.ref);
+
+ struct bkey_i_accounting *a = bkey_i_to_accounting(entry->start);
- for (entry = trans->journal_entries;
- entry != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
- entry = vstruct_next(entry))
- if (jset_entry_is_key(entry) && entry->start->k.type == KEY_TYPE_accounting) {
- struct bkey_i_accounting *a = bkey_i_to_accounting(entry->start);
+ a->k.bversion = journal_pos_to_bversion(&trans->journal_res,
+ (u64 *) entry - (u64 *) trans->journal_entries);
+ BUG_ON(bversion_zero(a->k.bversion));
- a->k.version = journal_pos_to_bversion(&trans->journal_res,
- (u64 *) entry - (u64 *) trans->journal_entries);
- BUG_ON(bversion_zero(a->k.version));
- ret = bch2_accounting_mem_mod_locked(trans, accounting_i_to_s_c(a), false, false);
+ if (likely(!(flags & BCH_TRANS_COMMIT_skip_accounting_apply))) {
+ ret = bch2_accounting_mem_mod_locked(trans, accounting_i_to_s_c(a), BCH_ACCOUNTING_normal);
if (ret)
goto revert_fs_usage;
}
- percpu_up_read(&c->mark_lock);
+ }
+ percpu_up_read(&c->mark_lock);
- /* XXX: we only want to run this if deltas are nonzero */
- bch2_trans_account_disk_usage_change(trans);
- }
+ /* XXX: we only want to run this if deltas are nonzero */
+ bch2_trans_account_disk_usage_change(trans);
trans_for_each_update(trans, i)
if (btree_node_type_has_atomic_triggers(i->bkey_type)) {
@@ -735,6 +739,40 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
goto fatal_err;
}
+ trans_for_each_update(trans, i) {
+ enum bch_validate_flags invalid_flags = 0;
+
+ if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
+ invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
+
+ ret = bch2_bkey_validate(c, bkey_i_to_s_c(i->k),
+ i->bkey_type, invalid_flags);
+ if (unlikely(ret)){
+ bch2_trans_inconsistent(trans, "invalid bkey on insert from %s -> %ps\n",
+ trans->fn, (void *) i->ip_allocated);
+ goto fatal_err;
+ }
+ btree_insert_entry_checks(trans, i);
+ }
+
+ for (struct jset_entry *i = trans->journal_entries;
+ i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
+ i = vstruct_next(i)) {
+ enum bch_validate_flags invalid_flags = 0;
+
+ if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
+ invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
+
+ ret = bch2_journal_entry_validate(c, NULL, i,
+ bcachefs_metadata_version_current,
+ CPU_BIG_ENDIAN, invalid_flags);
+ if (unlikely(ret)) {
+ bch2_trans_inconsistent(trans, "invalid journal entry on insert from %s\n",
+ trans->fn);
+ goto fatal_err;
+ }
+ }
+
if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res))) {
struct journal *j = &c->journal;
struct jset_entry *entry;
@@ -798,7 +836,7 @@ revert_fs_usage:
struct bkey_s_accounting a = bkey_i_to_s_accounting(entry2->start);
bch2_accounting_neg(a);
- bch2_accounting_mem_mod_locked(trans, a.c, false, false);
+ bch2_accounting_mem_mod_locked(trans, a.c, BCH_ACCOUNTING_normal);
bch2_accounting_neg(a);
}
percpu_up_read(&c->mark_lock);
@@ -1019,40 +1057,6 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
if (ret)
goto out_reset;
- trans_for_each_update(trans, i) {
- enum bch_validate_flags invalid_flags = 0;
-
- if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
- invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
-
- ret = bch2_bkey_validate(c, bkey_i_to_s_c(i->k),
- i->bkey_type, invalid_flags);
- if (unlikely(ret)){
- bch2_trans_inconsistent(trans, "invalid bkey on insert from %s -> %ps\n",
- trans->fn, (void *) i->ip_allocated);
- return ret;
- }
- btree_insert_entry_checks(trans, i);
- }
-
- for (struct jset_entry *i = trans->journal_entries;
- i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
- i = vstruct_next(i)) {
- enum bch_validate_flags invalid_flags = 0;
-
- if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
- invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
-
- ret = bch2_journal_entry_validate(c, NULL, i,
- bcachefs_metadata_version_current,
- CPU_BIG_ENDIAN, invalid_flags);
- if (unlikely(ret)) {
- bch2_trans_inconsistent(trans, "invalid journal entry on insert from %s\n",
- trans->fn);
- return ret;
- }
- }
-
if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) {
ret = do_bch2_trans_commit_to_journal_replay(trans);
goto out_reset;
diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h
index 60393e98084d..6a454f2fa005 100644
--- a/fs/bcachefs/btree_update.h
+++ b/fs/bcachefs/btree_update.h
@@ -220,7 +220,8 @@ static inline struct bkey_i *__bch2_bkey_make_mut_noupdate(struct btree_trans *t
if (type && k.k->type != type)
return ERR_PTR(-ENOENT);
- mut = bch2_trans_kmalloc_nomemzero(trans, bytes);
+ /* extra padding for varint_decode_fast... */
+ mut = bch2_trans_kmalloc_nomemzero(trans, bytes + 8);
if (!IS_ERR(mut)) {
bkey_reassemble(mut, k);
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index ef1f74866e23..cbfd88f98472 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -471,7 +471,6 @@ static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
static const struct file_operations bcachefs_data_ops = {
.release = bch2_data_job_release,
.read = bch2_data_job_read,
- .llseek = no_llseek,
};
static long bch2_ioctl_data(struct bch_fs *c,
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 757b9884ef55..462b1a2fe1ad 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -639,7 +639,7 @@ int bch2_data_update_init(struct btree_trans *trans,
bch2_write_op_init(&m->op, c, io_opts);
m->op.pos = bkey_start_pos(k.k);
- m->op.version = k.k->version;
+ m->op.version = k.k->bversion;
m->op.target = data_opts.target;
m->op.write_point = wp;
m->op.nr_replicas = 0;
diff --git a/fs/bcachefs/disk_accounting.c b/fs/bcachefs/disk_accounting.c
index e972e2bca546..9f3133e3e7e5 100644
--- a/fs/bcachefs/disk_accounting.c
+++ b/fs/bcachefs/disk_accounting.c
@@ -134,6 +134,10 @@ int bch2_accounting_validate(struct bch_fs *c, struct bkey_s_c k,
void *end = &acc_k + 1;
int ret = 0;
+ bkey_fsck_err_on(bversion_zero(k.k->bversion),
+ c, accounting_key_version_0,
+ "accounting key with version=0");
+
switch (acc_k.type) {
case BCH_DISK_ACCOUNTING_nr_inodes:
end = field_end(acc_k, nr_inodes);
@@ -291,7 +295,7 @@ static int __bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accoun
struct accounting_mem_entry n = {
.pos = a.k->p,
- .version = a.k->version,
+ .bversion = a.k->bversion,
.nr_counters = bch2_accounting_counters(a.k),
.v[0] = __alloc_percpu_gfp(n.nr_counters * sizeof(u64),
sizeof(u64), GFP_KERNEL),
@@ -319,11 +323,13 @@ err:
return -BCH_ERR_ENOMEM_disk_accounting;
}
-int bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accounting a, bool gc)
+int bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accounting a,
+ enum bch_accounting_mode mode)
{
struct bch_replicas_padded r;
- if (accounting_to_replicas(&r.e, a.k->p) &&
+ if (mode != BCH_ACCOUNTING_read &&
+ accounting_to_replicas(&r.e, a.k->p) &&
!bch2_replicas_marked_locked(c, &r.e))
return -BCH_ERR_btree_insert_need_mark_replicas;
@@ -566,7 +572,9 @@ int bch2_gc_accounting_done(struct bch_fs *c)
struct { __BKEY_PADDED(k, BCH_ACCOUNTING_MAX_COUNTERS); } k_i;
accounting_key_init(&k_i.k, &acc_k, src_v, nr);
- bch2_accounting_mem_mod_locked(trans, bkey_i_to_s_c_accounting(&k_i.k), false, false);
+ bch2_accounting_mem_mod_locked(trans,
+ bkey_i_to_s_c_accounting(&k_i.k),
+ BCH_ACCOUNTING_normal);
preempt_disable();
struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage);
@@ -589,30 +597,14 @@ fsck_err:
static int accounting_read_key(struct btree_trans *trans, struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
if (k.k->type != KEY_TYPE_accounting)
return 0;
percpu_down_read(&c->mark_lock);
- int ret = bch2_accounting_mem_mod_locked(trans, bkey_s_c_to_accounting(k), false, true);
+ int ret = bch2_accounting_mem_mod_locked(trans, bkey_s_c_to_accounting(k),
+ BCH_ACCOUNTING_read);
percpu_up_read(&c->mark_lock);
-
- if (bch2_accounting_key_is_zero(bkey_s_c_to_accounting(k)) &&
- ret == -BCH_ERR_btree_insert_need_mark_replicas)
- ret = 0;
-
- struct disk_accounting_pos acc;
- bpos_to_disk_accounting_pos(&acc, k.k->p);
-
- if (fsck_err_on(ret == -BCH_ERR_btree_insert_need_mark_replicas,
- trans, accounting_replicas_not_marked,
- "accounting not marked in superblock replicas\n %s",
- (bch2_accounting_key_to_text(&buf, &acc),
- buf.buf)))
- ret = bch2_accounting_update_sb_one(c, k.k->p);
-fsck_err:
- printbuf_exit(&buf);
return ret;
}
@@ -624,6 +616,7 @@ int bch2_accounting_read(struct bch_fs *c)
{
struct bch_accounting_mem *acc = &c->accounting;
struct btree_trans *trans = bch2_trans_get(c);
+ struct printbuf buf = PRINTBUF;
int ret = for_each_btree_key(trans, iter,
BTREE_ID_accounting, POS_MIN,
@@ -647,7 +640,7 @@ int bch2_accounting_read(struct bch_fs *c)
accounting_pos_cmp, &k.k->p);
bool applied = idx < acc->k.nr &&
- bversion_cmp(acc->k.data[idx].version, k.k->version) >= 0;
+ bversion_cmp(acc->k.data[idx].bversion, k.k->bversion) >= 0;
if (applied)
continue;
@@ -655,7 +648,7 @@ int bch2_accounting_read(struct bch_fs *c)
if (i + 1 < &darray_top(*keys) &&
i[1].k->k.type == KEY_TYPE_accounting &&
!journal_key_cmp(i, i + 1)) {
- BUG_ON(bversion_cmp(i[0].k->k.version, i[1].k->k.version) >= 0);
+ WARN_ON(bversion_cmp(i[0].k->k.bversion, i[1].k->k.bversion) >= 0);
i[1].journal_seq = i[0].journal_seq;
@@ -674,6 +667,45 @@ int bch2_accounting_read(struct bch_fs *c)
keys->gap = keys->nr = dst - keys->data;
percpu_down_read(&c->mark_lock);
+ for (unsigned i = 0; i < acc->k.nr; i++) {
+ u64 v[BCH_ACCOUNTING_MAX_COUNTERS];
+ bch2_accounting_mem_read_counters(acc, i, v, ARRAY_SIZE(v), false);
+
+ if (bch2_is_zero(v, sizeof(v[0]) * acc->k.data[i].nr_counters))
+ continue;
+
+ struct bch_replicas_padded r;
+ if (!accounting_to_replicas(&r.e, acc->k.data[i].pos))
+ continue;
+
+ /*
+ * If the replicas entry is invalid it'll get cleaned up by
+ * check_allocations:
+ */
+ if (bch2_replicas_entry_validate(&r.e, c, &buf))
+ continue;
+
+ struct disk_accounting_pos k;
+ bpos_to_disk_accounting_pos(&k, acc->k.data[i].pos);
+
+ if (fsck_err_on(!bch2_replicas_marked_locked(c, &r.e),
+ trans, accounting_replicas_not_marked,
+ "accounting not marked in superblock replicas\n %s",
+ (printbuf_reset(&buf),
+ bch2_accounting_key_to_text(&buf, &k),
+ buf.buf))) {
+ /*
+ * We're not RW yet and still single threaded, dropping
+ * and retaking lock is ok:
+ */
+ percpu_up_read(&c->mark_lock);
+ ret = bch2_mark_replicas(c, &r.e);
+ if (ret)
+ goto fsck_err;
+ percpu_down_read(&c->mark_lock);
+ }
+ }
+
preempt_disable();
struct bch_fs_usage_base *usage = this_cpu_ptr(c->usage);
@@ -709,8 +741,10 @@ int bch2_accounting_read(struct bch_fs *c)
}
}
preempt_enable();
+fsck_err:
percpu_up_read(&c->mark_lock);
err:
+ printbuf_exit(&buf);
bch2_trans_put(trans);
bch_err_fn(c, ret);
return ret;
diff --git a/fs/bcachefs/disk_accounting.h b/fs/bcachefs/disk_accounting.h
index f29fd0dd9581..4ea6c8a092bc 100644
--- a/fs/bcachefs/disk_accounting.h
+++ b/fs/bcachefs/disk_accounting.h
@@ -36,8 +36,8 @@ static inline void bch2_accounting_accumulate(struct bkey_i_accounting *dst,
for (unsigned i = 0; i < bch2_accounting_counters(&dst->k); i++)
dst->v.d[i] += src.v->d[i];
- if (bversion_cmp(dst->k.version, src.k->version) < 0)
- dst->k.version = src.k->version;
+ if (bversion_cmp(dst->k.bversion, src.k->bversion) < 0)
+ dst->k.bversion = src.k->bversion;
}
static inline void fs_usage_data_type_to_base(struct bch_fs_usage_base *fs_usage,
@@ -103,23 +103,35 @@ static inline int accounting_pos_cmp(const void *_l, const void *_r)
return bpos_cmp(*l, *r);
}
-int bch2_accounting_mem_insert(struct bch_fs *, struct bkey_s_c_accounting, bool);
+enum bch_accounting_mode {
+ BCH_ACCOUNTING_normal,
+ BCH_ACCOUNTING_gc,
+ BCH_ACCOUNTING_read,
+};
+
+int bch2_accounting_mem_insert(struct bch_fs *, struct bkey_s_c_accounting, enum bch_accounting_mode);
void bch2_accounting_mem_gc(struct bch_fs *);
/*
* Update in memory counters so they match the btree update we're doing; called
* from transaction commit path
*/
-static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc, bool read)
+static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans,
+ struct bkey_s_c_accounting a,
+ enum bch_accounting_mode mode)
{
struct bch_fs *c = trans->c;
+ struct bch_accounting_mem *acc = &c->accounting;
struct disk_accounting_pos acc_k;
bpos_to_disk_accounting_pos(&acc_k, a.k->p);
+ bool gc = mode == BCH_ACCOUNTING_gc;
+
+ EBUG_ON(gc && !acc->gc_running);
if (acc_k.type == BCH_DISK_ACCOUNTING_inum)
return 0;
- if (!gc && !read) {
+ if (mode == BCH_ACCOUNTING_normal) {
switch (acc_k.type) {
case BCH_DISK_ACCOUNTING_persistent_reserved:
trans->fs_usage_delta.reserved += acc_k.persistent_reserved.nr_replicas * a.v->d[0];
@@ -140,14 +152,11 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, stru
}
}
- struct bch_accounting_mem *acc = &c->accounting;
unsigned idx;
- EBUG_ON(gc && !acc->gc_running);
-
while ((idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
accounting_pos_cmp, &a.k->p)) >= acc->k.nr) {
- int ret = bch2_accounting_mem_insert(c, a, gc);
+ int ret = bch2_accounting_mem_insert(c, a, mode);
if (ret)
return ret;
}
@@ -164,7 +173,7 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, stru
static inline int bch2_accounting_mem_add(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc)
{
percpu_down_read(&trans->c->mark_lock);
- int ret = bch2_accounting_mem_mod_locked(trans, a, gc, false);
+ int ret = bch2_accounting_mem_mod_locked(trans, a, gc ? BCH_ACCOUNTING_gc : BCH_ACCOUNTING_normal);
percpu_up_read(&trans->c->mark_lock);
return ret;
}
diff --git a/fs/bcachefs/disk_accounting_types.h b/fs/bcachefs/disk_accounting_types.h
index 1687a45177a7..b1982131b206 100644
--- a/fs/bcachefs/disk_accounting_types.h
+++ b/fs/bcachefs/disk_accounting_types.h
@@ -6,7 +6,7 @@
struct accounting_mem_entry {
struct bpos pos;
- struct bversion version;
+ struct bversion bversion;
unsigned nr_counters;
u64 __percpu *v[2];
};
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index 95afa7bf2020..3a16b535b6c3 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -239,7 +239,19 @@ int __bch2_fsck_err(struct bch_fs *c,
if (!c)
c = trans->c;
- WARN_ON(!trans && bch2_current_has_btree_trans(c));
+ /*
+ * Ugly: if there's a transaction in the current task it has to be
+ * passed in to unlock if we prompt for user input.
+ *
+ * But, plumbing a transaction and transaction restarts into
+ * bkey_validate() is problematic.
+ *
+ * So:
+ * - make all bkey errors AUTOFIX, they're simple anyways (we just
+ * delete the key)
+ * - and we don't need to warn if we're not prompting
+ */
+ WARN_ON(!(flags & FSCK_AUTOFIX) && !trans && bch2_current_has_btree_trans(c));
if ((flags & FSCK_CAN_FIX) &&
test_bit(err, c->sb.errors_silent))
diff --git a/fs/bcachefs/error.h b/fs/bcachefs/error.h
index 2f1b86978f36..21ee7211b03e 100644
--- a/fs/bcachefs/error.h
+++ b/fs/bcachefs/error.h
@@ -184,7 +184,7 @@ do { \
ret = -BCH_ERR_fsck_delete_bkey; \
goto fsck_err; \
} \
- int _ret = __bch2_bkey_fsck_err(c, k, FSCK_CAN_FIX, \
+ int _ret = __bch2_bkey_fsck_err(c, k, FSCK_CAN_FIX|FSCK_AUTOFIX,\
BCH_FSCK_ERR_##_err_type, \
_err_msg, ##__VA_ARGS__); \
if (_ret != -BCH_ERR_fsck_fix && \
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 9b3470a97546..0d8b782b63fb 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -21,6 +21,49 @@
#include <linux/bsearch.h>
#include <linux/dcache.h> /* struct qstr */
+static bool inode_points_to_dirent(struct bch_inode_unpacked *inode,
+ struct bkey_s_c_dirent d)
+{
+ return inode->bi_dir == d.k->p.inode &&
+ inode->bi_dir_offset == d.k->p.offset;
+}
+
+static bool dirent_points_to_inode_nowarn(struct bkey_s_c_dirent d,
+ struct bch_inode_unpacked *inode)
+{
+ if (d.v->d_type == DT_SUBVOL
+ ? le32_to_cpu(d.v->d_child_subvol) == inode->bi_subvol
+ : le64_to_cpu(d.v->d_inum) == inode->bi_inum)
+ return 0;
+ return -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
+}
+
+static void dirent_inode_mismatch_msg(struct printbuf *out,
+ struct bch_fs *c,
+ struct bkey_s_c_dirent dirent,
+ struct bch_inode_unpacked *inode)
+{
+ prt_str(out, "inode points to dirent that does not point back:");
+ prt_newline(out);
+ bch2_bkey_val_to_text(out, c, dirent.s_c);
+ prt_newline(out);
+ bch2_inode_unpacked_to_text(out, inode);
+}
+
+static int dirent_points_to_inode(struct bch_fs *c,
+ struct bkey_s_c_dirent dirent,
+ struct bch_inode_unpacked *inode)
+{
+ int ret = dirent_points_to_inode_nowarn(dirent, inode);
+ if (ret) {
+ struct printbuf buf = PRINTBUF;
+ dirent_inode_mismatch_msg(&buf, c, dirent, inode);
+ bch_warn(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ }
+ return ret;
+}
+
/*
* XXX: this is handling transaction restarts without returning
* -BCH_ERR_transaction_restart_nested, this is not how we do things anymore:
@@ -346,14 +389,17 @@ static int reattach_inode(struct btree_trans *trans,
static int remove_backpointer(struct btree_trans *trans,
struct bch_inode_unpacked *inode)
{
- struct btree_iter iter;
- struct bkey_s_c_dirent d;
- int ret;
+ if (!inode->bi_dir)
+ return 0;
- d = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_dirents,
- POS(inode->bi_dir, inode->bi_dir_offset), 0,
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c_dirent d =
+ bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_dirents,
+ SPOS(inode->bi_dir, inode->bi_dir_offset, inode->bi_snapshot), 0,
dirent);
- ret = bkey_err(d) ?:
+ int ret = bkey_err(d) ?:
+ dirent_points_to_inode(c, d, inode) ?:
__remove_dirent(trans, d.k->p);
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -371,7 +417,8 @@ static int reattach_subvol(struct btree_trans *trans, struct bkey_s_c_subvolume
return ret;
ret = remove_backpointer(trans, &inode);
- bch_err_msg(c, ret, "removing dirent");
+ if (!bch2_err_matches(ret, ENOENT))
+ bch_err_msg(c, ret, "removing dirent");
if (ret)
return ret;
@@ -626,12 +673,12 @@ static int ref_visible2(struct bch_fs *c,
struct inode_walker_entry {
struct bch_inode_unpacked inode;
u32 snapshot;
- bool seen_this_pos;
u64 count;
};
struct inode_walker {
bool first_this_inode;
+ bool have_inodes;
bool recalculate_sums;
struct bpos last_pos;
@@ -669,6 +716,12 @@ static int get_inodes_all_snapshots(struct btree_trans *trans,
struct bkey_s_c k;
int ret;
+ /*
+ * We no longer have inodes for w->last_pos; clear this to avoid
+ * screwing up check_i_sectors/check_subdir_count if we take a
+ * transaction restart here:
+ */
+ w->have_inodes = false;
w->recalculate_sums = false;
w->inodes.nr = 0;
@@ -686,6 +739,7 @@ static int get_inodes_all_snapshots(struct btree_trans *trans,
return ret;
w->first_this_inode = true;
+ w->have_inodes = true;
return 0;
}
@@ -740,9 +794,6 @@ static struct inode_walker_entry *walk_inode(struct btree_trans *trans,
int ret = get_inodes_all_snapshots(trans, w, k.k->p.inode);
if (ret)
return ERR_PTR(ret);
- } else if (bkey_cmp(w->last_pos, k.k->p)) {
- darray_for_each(w->inodes, i)
- i->seen_this_pos = false;
}
w->last_pos = k.k->p;
@@ -896,21 +947,6 @@ static struct bkey_s_c_dirent inode_get_dirent(struct btree_trans *trans,
return dirent_get_by_pos(trans, iter, SPOS(inode->bi_dir, inode->bi_dir_offset, *snapshot));
}
-static bool inode_points_to_dirent(struct bch_inode_unpacked *inode,
- struct bkey_s_c_dirent d)
-{
- return inode->bi_dir == d.k->p.inode &&
- inode->bi_dir_offset == d.k->p.offset;
-}
-
-static bool dirent_points_to_inode(struct bkey_s_c_dirent d,
- struct bch_inode_unpacked *inode)
-{
- return d.v->d_type == DT_SUBVOL
- ? le32_to_cpu(d.v->d_child_subvol) == inode->bi_subvol
- : le64_to_cpu(d.v->d_inum) == inode->bi_inum;
-}
-
static int check_inode_deleted_list(struct btree_trans *trans, struct bpos p)
{
struct btree_iter iter;
@@ -920,13 +956,14 @@ static int check_inode_deleted_list(struct btree_trans *trans, struct bpos p)
return ret;
}
-static int check_inode_dirent_inode(struct btree_trans *trans, struct bkey_s_c inode_k,
+static int check_inode_dirent_inode(struct btree_trans *trans,
struct bch_inode_unpacked *inode,
- u32 inode_snapshot, bool *write_inode)
+ bool *write_inode)
{
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
+ u32 inode_snapshot = inode->bi_snapshot;
struct btree_iter dirent_iter = {};
struct bkey_s_c_dirent d = inode_get_dirent(trans, &dirent_iter, inode, &inode_snapshot);
int ret = bkey_err(d);
@@ -936,13 +973,13 @@ static int check_inode_dirent_inode(struct btree_trans *trans, struct bkey_s_c i
if (fsck_err_on(ret,
trans, inode_points_to_missing_dirent,
"inode points to missing dirent\n%s",
- (bch2_bkey_val_to_text(&buf, c, inode_k), buf.buf)) ||
- fsck_err_on(!ret && !dirent_points_to_inode(d, inode),
+ (bch2_inode_unpacked_to_text(&buf, inode), buf.buf)) ||
+ fsck_err_on(!ret && dirent_points_to_inode_nowarn(d, inode),
trans, inode_points_to_wrong_dirent,
- "inode points to dirent that does not point back:\n%s",
- (bch2_bkey_val_to_text(&buf, c, inode_k),
- prt_newline(&buf),
- bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
+ "%s",
+ (printbuf_reset(&buf),
+ dirent_inode_mismatch_msg(&buf, c, d, inode),
+ buf.buf))) {
/*
* We just clear the backpointer fields for now. If we find a
* dirent that points to this inode in check_dirents(), we'll
@@ -963,7 +1000,7 @@ fsck_err:
return ret;
}
-static bool bch2_inode_open(struct bch_fs *c, struct bpos p)
+static bool bch2_inode_is_open(struct bch_fs *c, struct bpos p)
{
subvol_inum inum = {
.subvol = snapshot_t(c, p.snapshot)->subvol,
@@ -972,7 +1009,7 @@ static bool bch2_inode_open(struct bch_fs *c, struct bpos p)
/* snapshot tree corruption, can't safely delete */
if (!inum.subvol) {
- bch_err_ratelimited(c, "%s(): snapshot %u has no subvol", __func__, p.snapshot);
+ bch_warn_ratelimited(c, "%s(): snapshot %u has no subvol, unlinked but can't safely delete", __func__, p.snapshot);
return true;
}
@@ -1045,30 +1082,44 @@ static int check_inode(struct btree_trans *trans,
}
if (u.bi_flags & BCH_INODE_unlinked) {
- ret = check_inode_deleted_list(trans, k.k->p);
- if (ret < 0)
- return ret;
+ if (!test_bit(BCH_FS_started, &c->flags)) {
+ /*
+ * If we're not in online fsck, don't delete unlinked
+ * inodes, just make sure they're on the deleted list.
+ *
+ * They might be referred to by a logged operation -
+ * i.e. we might have crashed in the middle of a
+ * truncate on an unlinked but open file - so we want to
+ * let the delete_dead_inodes kill it after resuming
+ * logged ops.
+ */
+ ret = check_inode_deleted_list(trans, k.k->p);
+ if (ret < 0)
+ return ret;
- fsck_err_on(!ret,
- trans, unlinked_inode_not_on_deleted_list,
- "inode %llu:%u unlinked, but not on deleted list",
- u.bi_inum, k.k->p.snapshot);
- ret = 0;
- }
+ fsck_err_on(!ret,
+ trans, unlinked_inode_not_on_deleted_list,
+ "inode %llu:%u unlinked, but not on deleted list",
+ u.bi_inum, k.k->p.snapshot);
- if (u.bi_flags & BCH_INODE_unlinked &&
- !bch2_inode_open(c, k.k->p) &&
- (!c->sb.clean ||
- fsck_err(trans, inode_unlinked_but_clean,
- "filesystem marked clean, but inode %llu unlinked",
- u.bi_inum))) {
- ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot);
- bch_err_msg(c, ret, "in fsck deleting inode");
- return ret;
+ ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, k.k->p, 1);
+ if (ret)
+ goto err;
+ } else {
+ if (fsck_err_on(bch2_inode_is_open(c, k.k->p),
+ trans, inode_unlinked_and_not_open,
+ "inode %llu%u unlinked and not open",
+ u.bi_inum, u.bi_snapshot)) {
+ ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot);
+ bch_err_msg(c, ret, "in fsck deleting inode");
+ return ret;
+ }
+ }
}
+ /* i_size_dirty is vestigal, since we now have logged ops for truncate * */
if (u.bi_flags & BCH_INODE_i_size_dirty &&
- (!c->sb.clean ||
+ (!test_bit(BCH_FS_clean_recovery, &c->flags) ||
fsck_err(trans, inode_i_size_dirty_but_clean,
"filesystem marked clean, but inode %llu has i_size dirty",
u.bi_inum))) {
@@ -1097,8 +1148,9 @@ static int check_inode(struct btree_trans *trans,
do_update = true;
}
+ /* i_sectors_dirty is vestigal, i_sectors is always updated transactionally */
if (u.bi_flags & BCH_INODE_i_sectors_dirty &&
- (!c->sb.clean ||
+ (!test_bit(BCH_FS_clean_recovery, &c->flags) ||
fsck_err(trans, inode_i_sectors_dirty_but_clean,
"filesystem marked clean, but inode %llu has i_sectors dirty",
u.bi_inum))) {
@@ -1126,7 +1178,7 @@ static int check_inode(struct btree_trans *trans,
}
if (u.bi_dir || u.bi_dir_offset) {
- ret = check_inode_dirent_inode(trans, k, &u, k.k->p.snapshot, &do_update);
+ ret = check_inode_dirent_inode(trans, &u, &do_update);
if (ret)
goto err;
}
@@ -1555,10 +1607,10 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c k,
struct inode_walker *inode,
struct snapshots_seen *s,
- struct extent_ends *extent_ends)
+ struct extent_ends *extent_ends,
+ struct disk_reservation *res)
{
struct bch_fs *c = trans->c;
- struct inode_walker_entry *i;
struct printbuf buf = PRINTBUF;
int ret = 0;
@@ -1568,7 +1620,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
goto out;
}
- if (inode->last_pos.inode != k.k->p.inode) {
+ if (inode->last_pos.inode != k.k->p.inode && inode->have_inodes) {
ret = check_i_sectors(trans, inode);
if (ret)
goto err;
@@ -1578,12 +1630,12 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
if (ret)
goto err;
- i = walk_inode(trans, inode, k);
- ret = PTR_ERR_OR_ZERO(i);
+ struct inode_walker_entry *extent_i = walk_inode(trans, inode, k);
+ ret = PTR_ERR_OR_ZERO(extent_i);
if (ret)
goto err;
- ret = check_key_has_inode(trans, iter, inode, i, k);
+ ret = check_key_has_inode(trans, iter, inode, extent_i, k);
if (ret)
goto err;
@@ -1592,24 +1644,19 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
&inode->recalculate_sums);
if (ret)
goto err;
- }
- /*
- * Check inodes in reverse order, from oldest snapshots to newest,
- * starting from the inode that matches this extent's snapshot. If we
- * didn't have one, iterate over all inodes:
- */
- if (!i)
- i = &darray_last(inode->inodes);
-
- for (;
- inode->inodes.data && i >= inode->inodes.data;
- --i) {
- if (i->snapshot > k.k->p.snapshot ||
- !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot))
- continue;
+ /*
+ * Check inodes in reverse order, from oldest snapshots to
+ * newest, starting from the inode that matches this extent's
+ * snapshot. If we didn't have one, iterate over all inodes:
+ */
+ for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes);
+ inode->inodes.data && i >= inode->inodes.data;
+ --i) {
+ if (i->snapshot > k.k->p.snapshot ||
+ !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot))
+ continue;
- if (k.k->type != KEY_TYPE_whiteout) {
if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_size_dirty) &&
k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
!bkey_extent_is_reservation(k),
@@ -1629,13 +1676,25 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
goto err;
iter->k.type = KEY_TYPE_whiteout;
+ break;
}
-
- if (bkey_extent_is_allocation(k.k))
- i->count += k.k->size;
}
+ }
- i->seen_this_pos = true;
+ ret = bch2_trans_commit(trans, res, NULL, BCH_TRANS_COMMIT_no_enospc);
+ if (ret)
+ goto err;
+
+ if (bkey_extent_is_allocation(k.k)) {
+ for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes);
+ inode->inodes.data && i >= inode->inodes.data;
+ --i) {
+ if (i->snapshot > k.k->p.snapshot ||
+ !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot))
+ continue;
+
+ i->count += k.k->size;
+ }
}
if (k.k->type != KEY_TYPE_whiteout) {
@@ -1666,13 +1725,11 @@ int bch2_check_extents(struct bch_fs *c)
extent_ends_init(&extent_ends);
int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter, BTREE_ID_extents,
+ for_each_btree_key(trans, iter, BTREE_ID_extents,
POS(BCACHEFS_ROOT_INO, 0),
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
- &res, NULL,
- BCH_TRANS_COMMIT_no_enospc, ({
+ BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, ({
bch2_disk_reservation_put(c, &res);
- check_extent(trans, &iter, k, &w, &s, &extent_ends) ?:
+ check_extent(trans, &iter, k, &w, &s, &extent_ends, &res) ?:
check_extent_overbig(trans, &iter, k);
})) ?:
check_i_sectors_notnested(trans, &w));
@@ -1758,6 +1815,7 @@ static int check_dirent_inode_dirent(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
+ struct btree_iter bp_iter = { NULL };
int ret = 0;
if (inode_points_to_dirent(target, d))
@@ -1770,7 +1828,7 @@ static int check_dirent_inode_dirent(struct btree_trans *trans,
prt_printf(&buf, "\n "),
bch2_inode_unpacked_to_text(&buf, target),
buf.buf)))
- goto out_noiter;
+ goto err;
if (!target->bi_dir &&
!target->bi_dir_offset) {
@@ -1779,7 +1837,6 @@ static int check_dirent_inode_dirent(struct btree_trans *trans,
return __bch2_fsck_write_inode(trans, target, target_snapshot);
}
- struct btree_iter bp_iter = { NULL };
struct bkey_s_c_dirent bp_dirent = dirent_get_by_pos(trans, &bp_iter,
SPOS(target->bi_dir, target->bi_dir_offset, target_snapshot));
ret = bkey_err(bp_dirent);
@@ -1840,7 +1897,6 @@ out:
err:
fsck_err:
bch2_trans_iter_exit(trans, &bp_iter);
-out_noiter:
printbuf_exit(&buf);
bch_err_fn(c, ret);
return ret;
@@ -2075,7 +2131,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
if (k.k->type == KEY_TYPE_whiteout)
goto out;
- if (dir->last_pos.inode != k.k->p.inode) {
+ if (dir->last_pos.inode != k.k->p.inode && dir->have_inodes) {
ret = check_subdir_count(trans, dir);
if (ret)
goto err;
@@ -2137,11 +2193,15 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
if (ret)
goto err;
}
-
- if (d.v->d_type == DT_DIR)
- for_each_visible_inode(c, s, dir, d.k->p.snapshot, i)
- i->count++;
}
+
+ ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
+ if (ret)
+ goto err;
+
+ if (d.v->d_type == DT_DIR)
+ for_each_visible_inode(c, s, dir, d.k->p.snapshot, i)
+ i->count++;
out:
err:
fsck_err:
@@ -2164,12 +2224,9 @@ int bch2_check_dirents(struct bch_fs *c)
snapshots_seen_init(&s);
int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter, BTREE_ID_dirents,
+ for_each_btree_key(trans, iter, BTREE_ID_dirents,
POS(BCACHEFS_ROOT_INO, 0),
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
- k,
- NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc,
+ BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s)) ?:
check_subdir_count_notnested(trans, &dir));
@@ -2314,22 +2371,6 @@ static bool darray_u32_has(darray_u32 *d, u32 v)
return false;
}
-/*
- * We've checked that inode backpointers point to valid dirents; here, it's
- * sufficient to check that the subvolume root has a dirent:
- */
-static int subvol_has_dirent(struct btree_trans *trans, struct bkey_s_c_subvolume s)
-{
- struct bch_inode_unpacked inode;
- int ret = bch2_inode_find_by_inum_trans(trans,
- (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) },
- &inode);
- if (ret)
- return ret;
-
- return inode.bi_dir != 0;
-}
-
static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
@@ -2348,14 +2389,24 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
- ret = subvol_has_dirent(trans, s);
- if (ret < 0)
+ struct bch_inode_unpacked subvol_root;
+ ret = bch2_inode_find_by_inum_trans(trans,
+ (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) },
+ &subvol_root);
+ if (ret)
break;
- if (fsck_err_on(!ret,
+ /*
+ * We've checked that inode backpointers point to valid dirents;
+ * here, it's sufficient to check that the subvolume root has a
+ * dirent:
+ */
+ if (fsck_err_on(!subvol_root.bi_dir,
trans, subvol_unreachable,
"unreachable subvolume %s",
(bch2_bkey_val_to_text(&buf, c, s.s_c),
+ prt_newline(&buf),
+ bch2_inode_unpacked_to_text(&buf, &subvol_root),
buf.buf))) {
ret = reattach_subvol(trans, s);
break;
@@ -2450,10 +2501,8 @@ static int check_path(struct btree_trans *trans, pathbuf *p, struct bkey_s_c ino
if (ret && !bch2_err_matches(ret, ENOENT))
break;
- if (!ret && !dirent_points_to_inode(d, &inode)) {
+ if (!ret && (ret = dirent_points_to_inode(c, d, &inode)))
bch2_trans_iter_exit(trans, &dirent_iter);
- ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
- }
if (bch2_err_matches(ret, ENOENT)) {
ret = 0;
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 6ac0ff7e074b..753c208896c3 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -320,9 +320,11 @@ static noinline int bch2_inode_unpack_slowpath(struct bkey_s_c k,
int bch2_inode_unpack(struct bkey_s_c k,
struct bch_inode_unpacked *unpacked)
{
- if (likely(k.k->type == KEY_TYPE_inode_v3))
- return bch2_inode_unpack_v3(k, unpacked);
- return bch2_inode_unpack_slowpath(k, unpacked);
+ unpacked->bi_snapshot = k.k->p.snapshot;
+
+ return likely(k.k->type == KEY_TYPE_inode_v3)
+ ? bch2_inode_unpack_v3(k, unpacked)
+ : bch2_inode_unpack_slowpath(k, unpacked);
}
int bch2_inode_peek_nowarn(struct btree_trans *trans,
@@ -557,7 +559,7 @@ static void __bch2_inode_unpacked_to_text(struct printbuf *out,
void bch2_inode_unpacked_to_text(struct printbuf *out, struct bch_inode_unpacked *inode)
{
- prt_printf(out, "inum: %llu ", inode->bi_inum);
+ prt_printf(out, "inum: %llu:%u ", inode->bi_inum, inode->bi_snapshot);
__bch2_inode_unpacked_to_text(out, inode);
}
@@ -1111,7 +1113,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans,
pos.offset, pos.snapshot))
goto delete;
- if (c->sb.clean &&
+ if (test_bit(BCH_FS_clean_recovery, &c->flags) &&
!fsck_err(trans, deleted_inode_but_clean,
"filesystem marked as clean but have deleted inode %llu:%u",
pos.offset, pos.snapshot)) {
diff --git a/fs/bcachefs/inode.h b/fs/bcachefs/inode.h
index f1fcb4c58039..695abd707cb6 100644
--- a/fs/bcachefs/inode.h
+++ b/fs/bcachefs/inode.h
@@ -69,6 +69,7 @@ typedef u64 u96;
struct bch_inode_unpacked {
u64 bi_inum;
+ u32 bi_snapshot;
u64 bi_journal_seq;
__le64 bi_hash_seed;
u64 bi_size;
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index b2f50e74bb76..e4fc17c548fd 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -517,7 +517,7 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
if ((ret = bkey_err(k)))
goto out;
- if (bversion_cmp(k.k->version, rbio->version) ||
+ if (bversion_cmp(k.k->bversion, rbio->version) ||
!bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
goto out;
@@ -1031,7 +1031,7 @@ get_bio:
rbio->read_pos = read_pos;
rbio->data_btree = data_btree;
rbio->data_pos = data_pos;
- rbio->version = k.k->version;
+ rbio->version = k.k->bversion;
rbio->promote = promote;
INIT_WORK(&rbio->work, NULL);
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index d3b5be7fd9bf..b5fe9e0dc155 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -697,7 +697,7 @@ static void init_append_extent(struct bch_write_op *op,
e = bkey_extent_init(op->insert_keys.top);
e->k.p = op->pos;
e->k.size = crc.uncompressed_size;
- e->k.version = version;
+ e->k.bversion = version;
if (crc.csum_type ||
crc.compression_type ||
@@ -1544,7 +1544,7 @@ static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
id = bkey_inline_data_init(op->insert_keys.top);
id->k.p = op->pos;
- id->k.version = op->version;
+ id->k.bversion = op->version;
id->k.size = sectors;
iter = bio->bi_iter;
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 30460bce04be..954f6a96e0f4 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -605,7 +605,7 @@ static int journal_entry_data_usage_validate(struct bch_fs *c,
goto out;
}
- if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c->disk_sb.sb, &err),
+ if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c, &err),
c, version, jset, entry,
journal_entry_data_usage_bad_size,
"invalid journal entry usage: %s", err.buf)) {
diff --git a/fs/bcachefs/logged_ops.c b/fs/bcachefs/logged_ops.c
index f49fdca1d07d..6f4a4e1083c9 100644
--- a/fs/bcachefs/logged_ops.c
+++ b/fs/bcachefs/logged_ops.c
@@ -37,6 +37,14 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter,
const struct bch_logged_op_fn *fn = logged_op_fn(k.k->type);
struct bkey_buf sk;
u32 restart_count = trans->restart_count;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ fsck_err_on(test_bit(BCH_FS_clean_recovery, &c->flags),
+ trans, logged_op_but_clean,
+ "filesystem marked as clean but have logged op\n%s",
+ (bch2_bkey_val_to_text(&buf, c, k),
+ buf.buf));
if (!fn)
return 0;
@@ -47,8 +55,9 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter,
fn->resume(trans, sk.k);
bch2_bkey_buf_exit(&sk, c);
-
- return trans_was_restarted(trans, restart_count);
+fsck_err:
+ printbuf_exit(&buf);
+ return ret ?: trans_was_restarted(trans, restart_count);
}
int bch2_resume_logged_ops(struct bch_fs *c)
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index be1e7ca4362f..6db72d3bad7d 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -151,7 +151,7 @@ static int bch2_journal_replay_accounting_key(struct btree_trans *trans,
struct bkey_s_c old = bch2_btree_path_peek_slot(btree_iter_path(trans, &iter), &u);
/* Has this delta already been applied to the btree? */
- if (bversion_cmp(old.k->version, k->k->k.version) >= 0) {
+ if (bversion_cmp(old.k->bversion, k->k->k.bversion) >= 0) {
ret = 0;
goto out;
}
@@ -717,6 +717,8 @@ int bch2_fs_recovery(struct bch_fs *c)
if (c->opts.fsck)
set_bit(BCH_FS_fsck_running, &c->flags);
+ if (c->sb.clean)
+ set_bit(BCH_FS_clean_recovery, &c->flags);
ret = bch2_blacklist_table_initialize(c);
if (ret) {
@@ -862,6 +864,9 @@ use_clean:
clear_bit(BCH_FS_fsck_running, &c->flags);
+ /* in case we don't run journal replay, i.e. norecovery mode */
+ set_bit(BCH_FS_accounting_replay_done, &c->flags);
+
/* fsync if we fixed errors */
if (test_bit(BCH_FS_errors_fixed, &c->flags) &&
bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync)) {
diff --git a/fs/bcachefs/recovery_passes_types.h b/fs/bcachefs/recovery_passes_types.h
index 8c7dee5983d2..50406ce0e4ef 100644
--- a/fs/bcachefs/recovery_passes_types.h
+++ b/fs/bcachefs/recovery_passes_types.h
@@ -50,7 +50,7 @@
x(check_directory_structure, 30, PASS_ONLINE|PASS_FSCK) \
x(check_nlinks, 31, PASS_FSCK) \
x(resume_logged_ops, 23, PASS_ALWAYS) \
- x(delete_dead_inodes, 32, PASS_FSCK|PASS_UNCLEAN) \
+ x(delete_dead_inodes, 32, PASS_ALWAYS) \
x(fix_reflink_p, 33, 0) \
x(set_fs_needs_rebalance, 34, 0) \
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
index e59c0abb4772..f457925fa362 100644
--- a/fs/bcachefs/reflink.c
+++ b/fs/bcachefs/reflink.c
@@ -367,7 +367,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
r_v->k.type = bkey_type_to_indirect(&orig->k);
r_v->k.p = reflink_iter.pos;
bch2_key_resize(&r_v->k, orig->k.size);
- r_v->k.version = orig->k.version;
+ r_v->k.bversion = orig->k.bversion;
set_bkey_val_bytes(&r_v->k, sizeof(__le64) + bkey_val_bytes(&orig->k));
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index 998c0bd06802..bcb3276747e0 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -66,9 +66,9 @@ void bch2_replicas_entry_to_text(struct printbuf *out,
prt_printf(out, "]");
}
-int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r,
- struct bch_sb *sb,
- struct printbuf *err)
+static int bch2_replicas_entry_validate_locked(struct bch_replicas_entry_v1 *r,
+ struct bch_sb *sb,
+ struct printbuf *err)
{
if (!r->nr_devs) {
prt_printf(err, "no devices in entry ");
@@ -94,6 +94,16 @@ bad:
return -BCH_ERR_invalid_replicas_entry;
}
+int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r,
+ struct bch_fs *c,
+ struct printbuf *err)
+{
+ mutex_lock(&c->sb_lock);
+ int ret = bch2_replicas_entry_validate_locked(r, c->disk_sb.sb, err);
+ mutex_unlock(&c->sb_lock);
+ return ret;
+}
+
void bch2_cpu_replicas_to_text(struct printbuf *out,
struct bch_replicas_cpu *r)
{
@@ -676,7 +686,7 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
struct bch_replicas_entry_v1 *e =
cpu_replicas_entry(cpu_r, i);
- int ret = bch2_replicas_entry_validate(e, sb, err);
+ int ret = bch2_replicas_entry_validate_locked(e, sb, err);
if (ret)
return ret;
diff --git a/fs/bcachefs/replicas.h b/fs/bcachefs/replicas.h
index 622482559c3d..5aba2c1ce133 100644
--- a/fs/bcachefs/replicas.h
+++ b/fs/bcachefs/replicas.h
@@ -10,7 +10,7 @@ void bch2_replicas_entry_sort(struct bch_replicas_entry_v1 *);
void bch2_replicas_entry_to_text(struct printbuf *,
struct bch_replicas_entry_v1 *);
int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *,
- struct bch_sb *, struct printbuf *);
+ struct bch_fs *, struct printbuf *);
void bch2_cpu_replicas_to_text(struct printbuf *, struct bch_replicas_cpu *);
static inline struct bch_replicas_entry_v1 *
diff --git a/fs/bcachefs/sb-clean.c b/fs/bcachefs/sb-clean.c
index 025848a9c4c0..005275281804 100644
--- a/fs/bcachefs/sb-clean.c
+++ b/fs/bcachefs/sb-clean.c
@@ -167,6 +167,7 @@ struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *c)
ret = bch2_sb_clean_validate_late(c, clean, READ);
if (ret) {
+ kfree(clean);
mutex_unlock(&c->sb_lock);
return ERR_PTR(ret);
}
diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c
index c7e4cdd3f6a5..5102059a0f1d 100644
--- a/fs/bcachefs/sb-downgrade.c
+++ b/fs/bcachefs/sb-downgrade.c
@@ -312,8 +312,7 @@ static void bch2_sb_downgrade_to_text(struct printbuf *out, struct bch_sb *sb,
if (!first)
prt_char(out, ',');
first = false;
- unsigned e = le16_to_cpu(i->errors[j]);
- prt_str(out, e < BCH_SB_ERR_MAX ? bch2_sb_error_strs[e] : "(unknown)");
+ bch2_sb_error_id_to_text(out, le16_to_cpu(i->errors[j]));
}
prt_newline(out);
}
@@ -353,7 +352,9 @@ int bch2_sb_downgrade_update(struct bch_fs *c)
for (unsigned i = 0; i < src->nr_errors; i++)
dst->errors[i] = cpu_to_le16(src->errors[i]);
- downgrade_table_extra(c, &table);
+ ret = downgrade_table_extra(c, &table);
+ if (ret)
+ goto out;
if (!dst->recovery_passes[0] &&
!dst->recovery_passes[1] &&
@@ -399,7 +400,7 @@ void bch2_sb_set_downgrade(struct bch_fs *c, unsigned new_minor, unsigned old_mi
for (unsigned j = 0; j < le16_to_cpu(i->nr_errors); j++) {
unsigned e = le16_to_cpu(i->errors[j]);
- if (e < BCH_SB_ERR_MAX)
+ if (e < BCH_FSCK_ERR_MAX)
__set_bit(e, c->sb.errors_silent);
if (e < sizeof(ext->errors_silent) * 8)
__set_bit_le64(e, ext->errors_silent);
diff --git a/fs/bcachefs/sb-errors.c b/fs/bcachefs/sb-errors.c
index c1270d790e43..013a96883b4e 100644
--- a/fs/bcachefs/sb-errors.c
+++ b/fs/bcachefs/sb-errors.c
@@ -7,12 +7,12 @@
const char * const bch2_sb_error_strs[] = {
#define x(t, n, ...) [n] = #t,
BCH_SB_ERRS()
- NULL
+#undef x
};
-static void bch2_sb_error_id_to_text(struct printbuf *out, enum bch_sb_error_id id)
+void bch2_sb_error_id_to_text(struct printbuf *out, enum bch_sb_error_id id)
{
- if (id < BCH_SB_ERR_MAX)
+ if (id < BCH_FSCK_ERR_MAX)
prt_str(out, bch2_sb_error_strs[id]);
else
prt_printf(out, "(unknown error %u)", id);
diff --git a/fs/bcachefs/sb-errors.h b/fs/bcachefs/sb-errors.h
index 8889001e7db4..b2357b8e6107 100644
--- a/fs/bcachefs/sb-errors.h
+++ b/fs/bcachefs/sb-errors.h
@@ -6,6 +6,8 @@
extern const char * const bch2_sb_error_strs[];
+void bch2_sb_error_id_to_text(struct printbuf *, enum bch_sb_error_id);
+
extern const struct bch_sb_field_ops bch_sb_field_ops_errors;
void bch2_sb_error_count(struct bch_fs *, enum bch_sb_error_id);
diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h
index f0c14702f9e6..ed5dca5e1161 100644
--- a/fs/bcachefs/sb-errors_format.h
+++ b/fs/bcachefs/sb-errors_format.h
@@ -210,22 +210,23 @@ enum bch_fsck_flags {
x(inode_snapshot_mismatch, 196, 0) \
x(inode_unlinked_but_clean, 197, 0) \
x(inode_unlinked_but_nlink_nonzero, 198, 0) \
+ x(inode_unlinked_and_not_open, 281, 0) \
x(inode_checksum_type_invalid, 199, 0) \
x(inode_compression_type_invalid, 200, 0) \
x(inode_subvol_root_but_not_dir, 201, 0) \
- x(inode_i_size_dirty_but_clean, 202, 0) \
- x(inode_i_sectors_dirty_but_clean, 203, 0) \
- x(inode_i_sectors_wrong, 204, 0) \
- x(inode_dir_wrong_nlink, 205, 0) \
- x(inode_dir_multiple_links, 206, 0) \
- x(inode_multiple_links_but_nlink_0, 207, 0) \
- x(inode_wrong_backpointer, 208, 0) \
- x(inode_wrong_nlink, 209, 0) \
- x(inode_unreachable, 210, 0) \
- x(deleted_inode_but_clean, 211, 0) \
- x(deleted_inode_missing, 212, 0) \
- x(deleted_inode_is_dir, 213, 0) \
- x(deleted_inode_not_unlinked, 214, 0) \
+ x(inode_i_size_dirty_but_clean, 202, FSCK_AUTOFIX) \
+ x(inode_i_sectors_dirty_but_clean, 203, FSCK_AUTOFIX) \
+ x(inode_i_sectors_wrong, 204, FSCK_AUTOFIX) \
+ x(inode_dir_wrong_nlink, 205, FSCK_AUTOFIX) \
+ x(inode_dir_multiple_links, 206, FSCK_AUTOFIX) \
+ x(inode_multiple_links_but_nlink_0, 207, FSCK_AUTOFIX) \
+ x(inode_wrong_backpointer, 208, FSCK_AUTOFIX) \
+ x(inode_wrong_nlink, 209, FSCK_AUTOFIX) \
+ x(inode_unreachable, 210, FSCK_AUTOFIX) \
+ x(deleted_inode_but_clean, 211, FSCK_AUTOFIX) \
+ x(deleted_inode_missing, 212, FSCK_AUTOFIX) \
+ x(deleted_inode_is_dir, 213, FSCK_AUTOFIX) \
+ x(deleted_inode_not_unlinked, 214, FSCK_AUTOFIX) \
x(extent_overlapping, 215, 0) \
x(key_in_missing_inode, 216, 0) \
x(key_in_wrong_inode_type, 217, 0) \
@@ -255,7 +256,7 @@ enum bch_fsck_flags {
x(dir_loop, 241, 0) \
x(hash_table_key_duplicate, 242, 0) \
x(hash_table_key_wrong_offset, 243, 0) \
- x(unlinked_inode_not_on_deleted_list, 244, 0) \
+ x(unlinked_inode_not_on_deleted_list, 244, FSCK_AUTOFIX) \
x(reflink_p_front_pad_bad, 245, 0) \
x(journal_entry_dup_same_device, 246, 0) \
x(inode_bi_subvol_missing, 247, 0) \
@@ -270,7 +271,7 @@ enum bch_fsck_flags {
x(subvol_children_not_set, 256, 0) \
x(subvol_children_bad, 257, 0) \
x(subvol_loop, 258, 0) \
- x(subvol_unreachable, 259, 0) \
+ x(subvol_unreachable, 259, FSCK_AUTOFIX) \
x(btree_node_bkey_bad_u64s, 260, 0) \
x(btree_node_topology_empty_interior_node, 261, 0) \
x(btree_ptr_v2_min_key_bad, 262, 0) \
@@ -282,8 +283,8 @@ enum bch_fsck_flags {
x(btree_ptr_v2_written_0, 268, 0) \
x(subvol_snapshot_bad, 269, 0) \
x(subvol_inode_bad, 270, 0) \
- x(alloc_key_stripe_sectors_wrong, 271, 0) \
- x(accounting_mismatch, 272, 0) \
+ x(alloc_key_stripe_sectors_wrong, 271, FSCK_AUTOFIX) \
+ x(accounting_mismatch, 272, FSCK_AUTOFIX) \
x(accounting_replicas_not_marked, 273, 0) \
x(invalid_btree_id, 274, 0) \
x(alloc_key_io_time_bad, 275, 0) \
@@ -292,12 +293,14 @@ enum bch_fsck_flags {
x(accounting_key_replicas_nr_devs_0, 278, FSCK_AUTOFIX) \
x(accounting_key_replicas_nr_required_bad, 279, FSCK_AUTOFIX) \
x(accounting_key_replicas_devs_unsorted, 280, FSCK_AUTOFIX) \
+ x(accounting_key_version_0, 282, FSCK_AUTOFIX) \
+ x(logged_op_but_clean, 283, FSCK_AUTOFIX) \
+ x(MAX, 284, 0)
enum bch_sb_error_id {
#define x(t, n, ...) BCH_FSCK_ERR_##t = n,
BCH_SB_ERRS()
#undef x
- BCH_SB_ERR_MAX
};
struct bch_sb_field_errors {
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c
index 9cbd3c14c94f..617d07e53b20 100644
--- a/fs/bcachefs/six.c
+++ b/fs/bcachefs/six.c
@@ -169,11 +169,17 @@ static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type,
ret = -1 - SIX_LOCK_write;
}
} else if (type == SIX_LOCK_write && lock->readers) {
- if (try) {
+ if (try)
atomic_add(SIX_LOCK_HELD_write, &lock->state);
- smp_mb__after_atomic();
- }
+ /*
+ * Make sure atomic_add happens before pcpu_read_count and
+ * six_set_bitmask in slow path happens before pcpu_read_count.
+ *
+ * Paired with the smp_mb() in read lock fast path (per-cpu mode)
+ * and the one before atomic_read in read unlock path.
+ */
+ smp_mb();
ret = !pcpu_read_count(lock);
if (try && !ret) {
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
index 8b18a9b483a4..1809442b00ee 100644
--- a/fs/bcachefs/snapshot.c
+++ b/fs/bcachefs/snapshot.c
@@ -469,6 +469,7 @@ static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
u32 id = snapshot_root;
u32 subvol = 0, s;
+ rcu_read_lock();
while (id) {
s = snapshot_t(c, id)->subvol;
@@ -477,6 +478,7 @@ static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
id = bch2_snapshot_tree_next(c, id);
}
+ rcu_read_unlock();
return subvol;
}
@@ -1782,6 +1784,7 @@ static int bch2_propagate_key_to_snapshot_leaf(struct btree_trans *trans,
new->k.p.snapshot = leaf_id;
ret = bch2_trans_update(trans, &iter, new, 0);
out:
+ bch2_set_btree_iter_dontneed(&iter);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index dbe834cb349f..6845dde1b339 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -92,34 +92,32 @@ static int check_subvol(struct btree_trans *trans,
}
struct bch_inode_unpacked inode;
- struct btree_iter inode_iter = {};
- ret = bch2_inode_peek_nowarn(trans, &inode_iter, &inode,
+ ret = bch2_inode_find_by_inum_nowarn_trans(trans,
(subvol_inum) { k.k->p.offset, le64_to_cpu(subvol.v->inode) },
- 0);
- bch2_trans_iter_exit(trans, &inode_iter);
-
- if (ret && !bch2_err_matches(ret, ENOENT))
- return ret;
-
- if (fsck_err_on(ret,
- trans, subvol_to_missing_root,
- "subvolume %llu points to missing subvolume root %llu:%u",
- k.k->p.offset, le64_to_cpu(subvol.v->inode),
- le32_to_cpu(subvol.v->snapshot))) {
- ret = bch2_subvolume_delete(trans, iter->pos.offset);
- bch_err_msg(c, ret, "deleting subvolume %llu", iter->pos.offset);
- return ret ?: -BCH_ERR_transaction_restart_nested;
- }
-
- if (fsck_err_on(inode.bi_subvol != subvol.k->p.offset,
- trans, subvol_root_wrong_bi_subvol,
- "subvol root %llu:%u has wrong bi_subvol field: got %u, should be %llu",
- inode.bi_inum, inode_iter.k.p.snapshot,
- inode.bi_subvol, subvol.k->p.offset)) {
- inode.bi_subvol = subvol.k->p.offset;
- ret = __bch2_fsck_write_inode(trans, &inode, le32_to_cpu(subvol.v->snapshot));
- if (ret)
+ &inode);
+ if (!ret) {
+ if (fsck_err_on(inode.bi_subvol != subvol.k->p.offset,
+ trans, subvol_root_wrong_bi_subvol,
+ "subvol root %llu:%u has wrong bi_subvol field: got %u, should be %llu",
+ inode.bi_inum, inode.bi_snapshot,
+ inode.bi_subvol, subvol.k->p.offset)) {
+ inode.bi_subvol = subvol.k->p.offset;
+ ret = __bch2_fsck_write_inode(trans, &inode, le32_to_cpu(subvol.v->snapshot));
+ if (ret)
+ goto err;
+ }
+ } else if (bch2_err_matches(ret, ENOENT)) {
+ if (fsck_err(trans, subvol_to_missing_root,
+ "subvolume %llu points to missing subvolume root %llu:%u",
+ k.k->p.offset, le64_to_cpu(subvol.v->inode),
+ le32_to_cpu(subvol.v->snapshot))) {
+ ret = bch2_subvolume_delete(trans, iter->pos.offset);
+ bch_err_msg(c, ret, "deleting subvolume %llu", iter->pos.offset);
+ ret = ret ?: -BCH_ERR_transaction_restart_nested;
goto err;
+ }
+ } else {
+ goto err;
}
if (!BCH_SUBVOLUME_SNAP(subvol.v)) {
@@ -137,7 +135,7 @@ static int check_subvol(struct btree_trans *trans,
"%s: snapshot tree %u not found", __func__, snapshot_tree);
if (ret)
- return ret;
+ goto err;
if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset,
trans, subvol_not_master_and_not_snapshot,
@@ -147,7 +145,7 @@ static int check_subvol(struct btree_trans *trans,
bch2_bkey_make_mut_typed(trans, iter, &subvol.s_c, 0, subvolume);
ret = PTR_ERR_OR_ZERO(s);
if (ret)
- return ret;
+ goto err;
SET_BCH_SUBVOLUME_SNAP(&s->v, true);
}
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index d86d5dae54c9..ce7410d72089 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -799,8 +799,10 @@ retry:
i < layout.sb_offset + layout.nr_superblocks; i++) {
offset = le64_to_cpu(*i);
- if (offset == opt_get(*opts, sb))
+ if (offset == opt_get(*opts, sb)) {
+ ret = -BCH_ERR_invalid;
continue;
+ }
ret = read_one_super(sb, offset, &err);
if (!ret)
@@ -1188,7 +1190,8 @@ static void bch2_sb_ext_to_text(struct printbuf *out, struct bch_sb *sb,
le_bitvector_to_cpu(errors_silent, (void *) e->errors_silent, sizeof(e->errors_silent) * 8);
prt_printf(out, "Errors to silently fix:\t");
- prt_bitflags_vector(out, bch2_sb_error_strs, errors_silent, sizeof(e->errors_silent) * 8);
+ prt_bitflags_vector(out, bch2_sb_error_strs, errors_silent,
+ min(BCH_FSCK_ERR_MAX, sizeof(e->errors_silent) * 8));
prt_newline(out);
kfree(errors_silent);
diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c
index 01b768c9b767..b2f209743afe 100644
--- a/fs/bcachefs/tests.c
+++ b/fs/bcachefs/tests.c
@@ -394,7 +394,7 @@ static int insert_test_extent(struct bch_fs *c,
k.k_i.k.p.offset = end;
k.k_i.k.p.snapshot = U32_MAX;
k.k_i.k.size = end - start;
- k.k_i.k.version.lo = test_version++;
+ k.k_i.k.bversion.lo = test_version++;
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i, NULL, 0, 0);
bch_err_fn(c, ret);
diff --git a/fs/bcachefs/thread_with_file.c b/fs/bcachefs/thread_with_file.c
index fb3442a7c67f..dea73bc1cb51 100644
--- a/fs/bcachefs/thread_with_file.c
+++ b/fs/bcachefs/thread_with_file.c
@@ -275,7 +275,6 @@ static long thread_with_stdio_ioctl(struct file *file, unsigned int cmd, unsigne
}
static const struct file_operations thread_with_stdio_fops = {
- .llseek = no_llseek,
.read = thread_with_stdio_read,
.write = thread_with_stdio_write,
.poll = thread_with_stdio_poll,
@@ -285,7 +284,6 @@ static const struct file_operations thread_with_stdio_fops = {
};
static const struct file_operations thread_with_stdout_fops = {
- .llseek = no_llseek,
.read = thread_with_stdio_read,
.poll = thread_with_stdout_poll,
.flush = thread_with_stdio_flush,
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 34d0d1e43f36..06dc4a57ba78 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -2032,10 +2032,8 @@ static int elf_core_dump(struct coredump_params *cprm)
* Collect all the non-memory information about the process for the
* notes. This also sets up the file header.
*/
- if (!fill_note_info(&elf, e_phnum, &info, cprm)) {
- coredump_report_failure("Error collecting note info");
+ if (!fill_note_info(&elf, e_phnum, &info, cprm))
goto end_coredump;
- }
has_dumped = 1;
@@ -2050,10 +2048,8 @@ static int elf_core_dump(struct coredump_params *cprm)
sz += elf_coredump_extra_notes_size();
phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
- if (!phdr4note) {
- coredump_report_failure("Error allocating program headers note entry");
+ if (!phdr4note)
goto end_coredump;
- }
fill_elf_note_phdr(phdr4note, sz, offset);
offset += sz;
@@ -2067,24 +2063,18 @@ static int elf_core_dump(struct coredump_params *cprm)
if (e_phnum == PN_XNUM) {
shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
- if (!shdr4extnum) {
- coredump_report_failure("Error allocating extra program headers");
+ if (!shdr4extnum)
goto end_coredump;
- }
fill_extnum_info(&elf, shdr4extnum, e_shoff, segs);
}
offset = dataoff;
- if (!dump_emit(cprm, &elf, sizeof(elf))) {
- coredump_report_failure("Error emitting the ELF headers");
+ if (!dump_emit(cprm, &elf, sizeof(elf)))
goto end_coredump;
- }
- if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note))) {
- coredump_report_failure("Error emitting the program header for notes");
+ if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
goto end_coredump;
- }
/* Write program headers for segments dump */
for (i = 0; i < cprm->vma_count; i++) {
@@ -2107,28 +2097,20 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_flags |= PF_X;
phdr.p_align = ELF_EXEC_PAGESIZE;
- if (!dump_emit(cprm, &phdr, sizeof(phdr))) {
- coredump_report_failure("Error emitting program headers");
+ if (!dump_emit(cprm, &phdr, sizeof(phdr)))
goto end_coredump;
- }
}
- if (!elf_core_write_extra_phdrs(cprm, offset)) {
- coredump_report_failure("Error writing out extra program headers");
+ if (!elf_core_write_extra_phdrs(cprm, offset))
goto end_coredump;
- }
/* write out the notes section */
- if (!write_note_info(&info, cprm)) {
- coredump_report_failure("Error writing out notes");
+ if (!write_note_info(&info, cprm))
goto end_coredump;
- }
/* For cell spufs and x86 xstate */
- if (elf_coredump_extra_notes_write(cprm)) {
- coredump_report_failure("Error writing out extra notes");
+ if (elf_coredump_extra_notes_write(cprm))
goto end_coredump;
- }
/* Align to page */
dump_skip_to(cprm, dataoff);
@@ -2136,22 +2118,16 @@ static int elf_core_dump(struct coredump_params *cprm)
for (i = 0; i < cprm->vma_count; i++) {
struct core_vma_metadata *meta = cprm->vma_meta + i;
- if (!dump_user_range(cprm, meta->start, meta->dump_size)) {
- coredump_report_failure("Error writing out the process memory");
+ if (!dump_user_range(cprm, meta->start, meta->dump_size))
goto end_coredump;
- }
}
- if (!elf_core_write_extra_data(cprm)) {
- coredump_report_failure("Error writing out extra data");
+ if (!elf_core_write_extra_data(cprm))
goto end_coredump;
- }
if (e_phnum == PN_XNUM) {
- if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum))) {
- coredump_report_failure("Error emitting extra program headers");
+ if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
goto end_coredump;
- }
}
end_coredump:
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index f53977169db4..2b3f9935dbb4 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -595,14 +595,12 @@ static bool cachefiles_open_file(struct cachefiles_object *object,
* write and readdir but not lookup or open).
*/
touch_atime(&file->f_path);
- dput(dentry);
return true;
check_failed:
fscache_cookie_lookup_negative(object->cookie);
cachefiles_unmark_inode_in_use(object, file);
fput(file);
- dput(dentry);
if (ret == -ESTALE)
return cachefiles_create_file(object);
return false;
@@ -611,7 +609,6 @@ error_fput:
fput(file);
error:
cachefiles_do_unmark_inode_in_use(object, d_inode(dentry));
- dput(dentry);
return false;
}
@@ -654,7 +651,9 @@ bool cachefiles_look_up_object(struct cachefiles_object *object)
goto new_file;
}
- if (!cachefiles_open_file(object, dentry))
+ ret = cachefiles_open_file(object, dentry);
+ dput(dentry);
+ if (!ret)
return false;
_leave(" = t [%lu]", file_inode(object->file)->i_ino);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 5d9ccda098cc..53fef258c2bc 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -96,7 +96,6 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
/* dirty the head */
spin_lock(&ci->i_ceph_lock);
- BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
if (__ceph_have_pending_cap_snap(ci)) {
struct ceph_cap_snap *capsnap =
list_last_entry(&ci->i_cap_snaps,
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 808c9c048276..bed34fc11c91 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -10,6 +10,7 @@
#include <linux/writeback.h>
#include <linux/iversion.h>
#include <linux/filelock.h>
+#include <linux/jiffies.h>
#include "super.h"
#include "mds_client.h"
@@ -4149,7 +4150,7 @@ retry:
ceph_remove_cap(mdsc, cap, false);
goto out_unlock;
} else if (tsession) {
- /* add placeholder for the export tagert */
+ /* add placeholder for the export target */
int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0;
tcap = new_cap;
ceph_add_cap(inode, tsession, t_cap_id, issued, 0,
@@ -4602,7 +4603,7 @@ flush_cap_releases:
__ceph_queue_cap_release(session, cap);
spin_unlock(&session->s_cap_lock);
}
- ceph_flush_cap_releases(mdsc, session);
+ ceph_flush_session_cap_releases(mdsc, session);
goto done;
bad:
@@ -4659,7 +4660,7 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
* slowness doesn't block mdsc delayed work,
* preventing send_renew_caps() from running.
*/
- if (jiffies - loop_start >= 5 * HZ)
+ if (time_after_eq(jiffies, loop_start + 5 * HZ))
break;
}
spin_unlock(&mdsc->cap_delay_lock);
@@ -4701,6 +4702,28 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
ceph_mdsc_iterate_sessions(mdsc, flush_dirty_session_caps, true);
}
+/*
+ * Flush all cap releases to the mds
+ */
+static void flush_cap_releases(struct ceph_mds_session *s)
+{
+ struct ceph_mds_client *mdsc = s->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
+
+ doutc(cl, "begin\n");
+ spin_lock(&s->s_cap_lock);
+ if (s->s_num_cap_releases)
+ ceph_flush_session_cap_releases(mdsc, s);
+ spin_unlock(&s->s_cap_lock);
+ doutc(cl, "done\n");
+
+}
+
+void ceph_flush_cap_releases(struct ceph_mds_client *mdsc)
+{
+ ceph_mdsc_iterate_sessions(mdsc, flush_cap_releases, true);
+}
+
void __ceph_touch_fmode(struct ceph_inode_info *ci,
struct ceph_mds_client *mdsc, int fmode)
{
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index ddec8c9244ee..952109292d69 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -2058,7 +2058,7 @@ static int ceph_d_delete(const struct dentry *dentry)
return 0;
if (ceph_snap(d_inode(dentry)) != CEPH_NOSNAP)
return 0;
- /* vaild lease? */
+ /* valid lease? */
di = ceph_dentry(dentry);
if (di) {
if (__dentry_lease_is_valid(di))
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 4a8eec46254b..315ef02f9a3f 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1779,7 +1779,7 @@ retry_lookup:
if (err < 0)
goto done;
} else if (rinfo->head->is_dentry && req->r_dentry) {
- /* parent inode is not locked, be carefull */
+ /* parent inode is not locked, be careful */
struct ceph_vino *ptvino = NULL;
dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 276e34ab3e2c..c4a5fd94bbbb 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2266,7 +2266,7 @@ int ceph_trim_caps(struct ceph_mds_client *mdsc,
trim_caps - remaining);
}
- ceph_flush_cap_releases(mdsc, session);
+ ceph_flush_session_cap_releases(mdsc, session);
return 0;
}
@@ -2420,7 +2420,7 @@ static void ceph_cap_release_work(struct work_struct *work)
ceph_put_mds_session(session);
}
-void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
+void ceph_flush_session_cap_releases(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
struct ceph_client *cl = mdsc->fsc->client;
@@ -2447,7 +2447,7 @@ void __ceph_queue_cap_release(struct ceph_mds_session *session,
session->s_num_cap_releases++;
if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE))
- ceph_flush_cap_releases(session->s_mdsc, session);
+ ceph_flush_session_cap_releases(session->s_mdsc, session);
}
static void ceph_cap_reclaim_work(struct work_struct *work)
@@ -4340,7 +4340,7 @@ skip_cap_auths:
/* flush cap releases */
spin_lock(&session->s_cap_lock);
if (session->s_num_cap_releases)
- ceph_flush_cap_releases(mdsc, session);
+ ceph_flush_session_cap_releases(mdsc, session);
spin_unlock(&session->s_cap_lock);
send_flushmsg_ack(mdsc, session, seq);
@@ -4910,7 +4910,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
} else {
recon_state.msg_version = 2;
}
- /* trsaverse this session's caps */
+ /* traverse this session's caps */
err = ceph_iterate_session_caps(session, reconnect_caps_cb, &recon_state);
spin_lock(&session->s_cap_lock);
@@ -5446,7 +5446,7 @@ static void delayed_work(struct work_struct *work)
}
mutex_unlock(&mdsc->mutex);
- ceph_flush_cap_releases(mdsc, s);
+ ceph_flush_session_cap_releases(mdsc, s);
mutex_lock(&s->s_mutex);
if (renew_caps)
@@ -5877,6 +5877,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
mutex_unlock(&mdsc->mutex);
ceph_flush_dirty_caps(mdsc);
+ ceph_flush_cap_releases(mdsc);
spin_lock(&mdsc->cap_dirty_lock);
want_flush = mdsc->last_cap_flush_tid;
if (!list_empty(&mdsc->cap_flush_list)) {
@@ -6015,6 +6016,18 @@ static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
ceph_mdsmap_destroy(mdsc->mdsmap);
kfree(mdsc->sessions);
ceph_caps_finalize(mdsc);
+
+ if (mdsc->s_cap_auths) {
+ int i;
+
+ for (i = 0; i < mdsc->s_cap_auths_num; i++) {
+ kfree(mdsc->s_cap_auths[i].match.gids);
+ kfree(mdsc->s_cap_auths[i].match.path);
+ kfree(mdsc->s_cap_auths[i].match.fs_name);
+ }
+ kfree(mdsc->s_cap_auths);
+ }
+
ceph_pool_perm_destroy(mdsc);
}
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 9bcc7f181bfe..3dd54587944a 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -559,9 +559,6 @@ extern struct ceph_mds_session *
ceph_get_mds_session(struct ceph_mds_session *s);
extern void ceph_put_mds_session(struct ceph_mds_session *s);
-extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc,
- struct ceph_msg *msg, int mds);
-
extern int ceph_mdsc_init(struct ceph_fs_client *fsc);
extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc);
extern void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc);
@@ -602,8 +599,8 @@ extern void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
extern struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq);
extern void __ceph_queue_cap_release(struct ceph_mds_session *session,
struct ceph_cap *cap);
-extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session);
+extern void ceph_flush_session_cap_releases(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session);
extern void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc);
extern void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr);
extern void ceph_queue_cap_unlink_work(struct ceph_mds_client *mdsc);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 0cdf84cd1791..73f321b52895 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -126,6 +126,7 @@ static int ceph_sync_fs(struct super_block *sb, int wait)
if (!wait) {
doutc(cl, "(non-blocking)\n");
ceph_flush_dirty_caps(fsc->mdsc);
+ ceph_flush_cap_releases(fsc->mdsc);
doutc(cl, "(non-blocking) done\n");
return 0;
}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 6e817bf1337c..2508aa8950b7 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -1056,8 +1056,6 @@ extern int ceph_fill_trace(struct super_block *sb,
extern int ceph_readdir_prepopulate(struct ceph_mds_request *req,
struct ceph_mds_session *session);
-extern int ceph_inode_holds_cap(struct inode *inode, int mask);
-
extern bool ceph_inode_set_size(struct inode *inode, loff_t size);
extern void __ceph_do_pending_vmtruncate(struct inode *inode);
@@ -1208,10 +1206,6 @@ static inline void ceph_init_inode_acls(struct inode *inode,
struct ceph_acl_sec_ctx *as_ctx)
{
}
-static inline int ceph_acl_chmod(struct dentry *dentry, struct inode *inode)
-{
- return 0;
-}
static inline void ceph_forget_all_cached_acls(struct inode *inode)
{
@@ -1270,6 +1264,7 @@ extern bool __ceph_should_report_size(struct ceph_inode_info *ci);
extern void ceph_check_caps(struct ceph_inode_info *ci, int flags);
extern unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc);
+extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc);
extern int ceph_drop_caps_for_unlink(struct inode *inode);
extern int ceph_encode_inode_release(void **p, struct inode *inode,
int mds, int drop, int unless, int force);
diff --git a/fs/coredump.c b/fs/coredump.c
index 53a78b6bbb5b..45737b43dda5 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -465,17 +465,7 @@ static bool dump_interrupted(void)
* but then we need to teach dump_write() to restart and clear
* TIF_SIGPENDING.
*/
- if (fatal_signal_pending(current)) {
- coredump_report_failure("interrupted: fatal signal pending");
- return true;
- }
-
- if (freezing(current)) {
- coredump_report_failure("interrupted: freezing");
- return true;
- }
-
- return false;
+ return fatal_signal_pending(current) || freezing(current);
}
static void wait_for_dump_helpers(struct file *file)
@@ -530,7 +520,7 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
return err;
}
-int do_coredump(const kernel_siginfo_t *siginfo)
+void do_coredump(const kernel_siginfo_t *siginfo)
{
struct core_state core_state;
struct core_name cn;
@@ -538,7 +528,7 @@ int do_coredump(const kernel_siginfo_t *siginfo)
struct linux_binfmt * binfmt;
const struct cred *old_cred;
struct cred *cred;
- int retval;
+ int retval = 0;
int ispipe;
size_t *argv = NULL;
int argc = 0;
@@ -562,20 +552,14 @@ int do_coredump(const kernel_siginfo_t *siginfo)
audit_core_dumps(siginfo->si_signo);
binfmt = mm->binfmt;
- if (!binfmt || !binfmt->core_dump) {
- retval = -ENOEXEC;
+ if (!binfmt || !binfmt->core_dump)
goto fail;
- }
- if (!__get_dumpable(cprm.mm_flags)) {
- retval = -EACCES;
+ if (!__get_dumpable(cprm.mm_flags))
goto fail;
- }
cred = prepare_creds();
- if (!cred) {
- retval = -EPERM;
+ if (!cred)
goto fail;
- }
/*
* We cannot trust fsuid as being the "true" uid of the process
* nor do we know its entire history. We only know it was tainted
@@ -604,7 +588,6 @@ int do_coredump(const kernel_siginfo_t *siginfo)
if (ispipe < 0) {
coredump_report_failure("format_corename failed, aborting core");
- retval = ispipe;
goto fail_unlock;
}
@@ -625,7 +608,6 @@ int do_coredump(const kernel_siginfo_t *siginfo)
* core_pattern process dies.
*/
coredump_report_failure("RLIMIT_CORE is set to 1, aborting core");
- retval = -EPERM;
goto fail_unlock;
}
cprm.limit = RLIM_INFINITY;
@@ -633,7 +615,6 @@ int do_coredump(const kernel_siginfo_t *siginfo)
dump_count = atomic_inc_return(&core_dump_count);
if (core_pipe_limit && (core_pipe_limit < dump_count)) {
coredump_report_failure("over core_pipe_limit, skipping core dump");
- retval = -E2BIG;
goto fail_dropcount;
}
@@ -641,7 +622,6 @@ int do_coredump(const kernel_siginfo_t *siginfo)
GFP_KERNEL);
if (!helper_argv) {
coredump_report_failure("%s failed to allocate memory", __func__);
- retval = -ENOMEM;
goto fail_dropcount;
}
for (argi = 0; argi < argc; argi++)
@@ -667,16 +647,12 @@ int do_coredump(const kernel_siginfo_t *siginfo)
int open_flags = O_CREAT | O_WRONLY | O_NOFOLLOW |
O_LARGEFILE | O_EXCL;
- if (cprm.limit < binfmt->min_coredump) {
- coredump_report_failure("over coredump resource limit, skipping core dump");
- retval = -E2BIG;
+ if (cprm.limit < binfmt->min_coredump)
goto fail_unlock;
- }
if (need_suid_safe && cn.corename[0] != '/') {
coredump_report_failure(
"this process can only dump core to a fully qualified path, skipping core dump");
- retval = -EPERM;
goto fail_unlock;
}
@@ -722,28 +698,20 @@ int do_coredump(const kernel_siginfo_t *siginfo)
} else {
cprm.file = filp_open(cn.corename, open_flags, 0600);
}
- if (IS_ERR(cprm.file)) {
- retval = PTR_ERR(cprm.file);
+ if (IS_ERR(cprm.file))
goto fail_unlock;
- }
inode = file_inode(cprm.file);
- if (inode->i_nlink > 1) {
- retval = -EMLINK;
+ if (inode->i_nlink > 1)
goto close_fail;
- }
- if (d_unhashed(cprm.file->f_path.dentry)) {
- retval = -EEXIST;
+ if (d_unhashed(cprm.file->f_path.dentry))
goto close_fail;
- }
/*
* AK: actually i see no reason to not allow this for named
* pipes etc, but keep the previous behaviour for now.
*/
- if (!S_ISREG(inode->i_mode)) {
- retval = -EISDIR;
+ if (!S_ISREG(inode->i_mode))
goto close_fail;
- }
/*
* Don't dump core if the filesystem changed owner or mode
* of the file during file creation. This is an issue when
@@ -755,22 +723,17 @@ int do_coredump(const kernel_siginfo_t *siginfo)
current_fsuid())) {
coredump_report_failure("Core dump to %s aborted: "
"cannot preserve file owner", cn.corename);
- retval = -EPERM;
goto close_fail;
}
if ((inode->i_mode & 0677) != 0600) {
coredump_report_failure("Core dump to %s aborted: "
"cannot preserve file permissions", cn.corename);
- retval = -EPERM;
goto close_fail;
}
- if (!(cprm.file->f_mode & FMODE_CAN_WRITE)) {
- retval = -EACCES;
+ if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
goto close_fail;
- }
- retval = do_truncate(idmap, cprm.file->f_path.dentry,
- 0, 0, cprm.file);
- if (retval)
+ if (do_truncate(idmap, cprm.file->f_path.dentry,
+ 0, 0, cprm.file))
goto close_fail;
}
@@ -786,15 +749,10 @@ int do_coredump(const kernel_siginfo_t *siginfo)
*/
if (!cprm.file) {
coredump_report_failure("Core dump to |%s disabled", cn.corename);
- retval = -EPERM;
goto close_fail;
}
- if (!dump_vma_snapshot(&cprm)) {
- coredump_report_failure("Can't get VMA snapshot for core dump |%s",
- cn.corename);
- retval = -EACCES;
+ if (!dump_vma_snapshot(&cprm))
goto close_fail;
- }
file_start_write(cprm.file);
core_dumped = binfmt->core_dump(&cprm);
@@ -810,21 +768,9 @@ int do_coredump(const kernel_siginfo_t *siginfo)
}
file_end_write(cprm.file);
free_vma_snapshot(&cprm);
- } else {
- coredump_report_failure("Core dump to %s%s has been interrupted",
- ispipe ? "|" : "", cn.corename);
- retval = -EAGAIN;
- goto fail;
}
- coredump_report(
- "written to %s%s: VMAs: %d, size %zu; core: %lld bytes, pos %lld",
- ispipe ? "|" : "", cn.corename,
- cprm.vma_count, cprm.vma_data_size, cprm.written, cprm.pos);
if (ispipe && core_pipe_limit)
wait_for_dump_helpers(cprm.file);
-
- retval = 0;
-
close_fail:
if (cprm.file)
filp_close(cprm.file, NULL);
@@ -839,7 +785,7 @@ fail_unlock:
fail_creds:
put_cred(cred);
fail:
- return retval;
+ return;
}
/*
@@ -859,16 +805,8 @@ static int __dump_emit(struct coredump_params *cprm, const void *addr, int nr)
if (dump_interrupted())
return 0;
n = __kernel_write(file, addr, nr, &pos);
- if (n != nr) {
- if (n < 0)
- coredump_report_failure("failed when writing out, error %zd", n);
- else
- coredump_report_failure(
- "partially written out, only %zd(of %d) bytes written",
- n, nr);
-
+ if (n != nr)
return 0;
- }
file->f_pos = pos;
cprm->written += n;
cprm->pos += n;
@@ -881,16 +819,9 @@ static int __dump_skip(struct coredump_params *cprm, size_t nr)
static char zeroes[PAGE_SIZE];
struct file *file = cprm->file;
if (file->f_mode & FMODE_LSEEK) {
- int ret;
-
- if (dump_interrupted())
+ if (dump_interrupted() ||
+ vfs_llseek(file, nr, SEEK_CUR) < 0)
return 0;
-
- ret = vfs_llseek(file, nr, SEEK_CUR);
- if (ret < 0) {
- coredump_report_failure("failed when seeking, error %d", ret);
- return 0;
- }
cprm->pos += nr;
return 1;
} else {
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index c6f4a9a98b85..67299e8b734e 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -1218,7 +1218,6 @@ static const struct file_operations u32_array_fops = {
.open = u32_array_open,
.release = u32_array_release,
.read = u32_array_read,
- .llseek = no_llseek,
};
/**
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 7112958c2e5b..700a0cbb2f14 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -733,7 +733,6 @@ out:
static const struct file_operations dlm_rawmsg_fops = {
.open = simple_open,
.write = dlm_rawmsg_write,
- .llseek = no_llseek,
};
void *dlm_create_debug_comms_file(int nodeid, void *data)
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index 7e9961639802..23c51d62f902 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -110,5 +110,4 @@ const struct file_operations efivarfs_file_operations = {
.open = simple_open,
.read = efivarfs_file_read,
.write = efivarfs_file_write,
- .llseek = no_llseek,
};
diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
index 0356c88252bd..ce9be95c9172 100644
--- a/fs/exfat/balloc.c
+++ b/fs/exfat/balloc.c
@@ -91,11 +91,8 @@ int exfat_load_bitmap(struct super_block *sb)
return -EIO;
type = exfat_get_entry_type(ep);
- if (type == TYPE_UNUSED)
- break;
- if (type != TYPE_BITMAP)
- continue;
- if (ep->dentry.bitmap.flags == 0x0) {
+ if (type == TYPE_BITMAP &&
+ ep->dentry.bitmap.flags == 0x0) {
int err;
err = exfat_allocate_bitmap(sb, ep);
@@ -103,6 +100,9 @@ int exfat_load_bitmap(struct super_block *sb)
return err;
}
brelse(bh);
+
+ if (type == TYPE_UNUSED)
+ return -EINVAL;
}
if (exfat_get_next_cluster(sb, &clu.dir))
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index ecc5db952deb..3cdc1de362a9 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -10,6 +10,7 @@
#include <linux/ratelimit.h>
#include <linux/nls.h>
#include <linux/blkdev.h>
+#include <uapi/linux/exfat.h>
#define EXFAT_ROOT_INO 1
@@ -148,6 +149,9 @@ enum {
#define DIR_CACHE_SIZE \
(DIV_ROUND_UP(EXFAT_DEN_TO_B(ES_MAX_ENTRY_NUM), SECTOR_SIZE) + 1)
+/* Superblock flags */
+#define EXFAT_FLAGS_SHUTDOWN 1
+
struct exfat_dentry_namebuf {
char *lfn;
int lfnbuf_len; /* usually MAX_UNINAME_BUF_SIZE */
@@ -267,6 +271,8 @@ struct exfat_sb_info {
unsigned int clu_srch_ptr; /* cluster search pointer */
unsigned int used_clusters; /* number of used clusters */
+ unsigned long s_exfat_flags; /* Exfat superblock flags */
+
struct mutex s_lock; /* superblock lock */
struct mutex bitmap_lock; /* bitmap lock */
struct exfat_mount_options options;
@@ -309,13 +315,6 @@ struct exfat_inode_info {
/* for avoiding the race between alloc and free */
unsigned int cache_valid_id;
- /*
- * NOTE: i_size_ondisk is 64bits, so must hold ->inode_lock to access.
- * physically allocated size.
- */
- loff_t i_size_ondisk;
- /* block-aligned i_size (used in cont_write_begin) */
- loff_t i_size_aligned;
/* on-disk position of directory entry or 0 */
loff_t i_pos;
loff_t valid_size;
@@ -338,6 +337,11 @@ static inline struct exfat_inode_info *EXFAT_I(struct inode *inode)
return container_of(inode, struct exfat_inode_info, vfs_inode);
}
+static inline int exfat_forced_shutdown(struct super_block *sb)
+{
+ return test_bit(EXFAT_FLAGS_SHUTDOWN, &EXFAT_SB(sb)->s_exfat_flags);
+}
+
/*
* If ->i_mode can't hold 0222 (i.e. ATTR_RO), we use ->i_attrs to
* save ATTR_RO instead of ->i_mode.
@@ -417,6 +421,11 @@ static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
return clus >= EXFAT_FIRST_CLUSTER && clus < sbi->num_clusters;
}
+static inline loff_t exfat_ondisk_size(const struct inode *inode)
+{
+ return ((loff_t)inode->i_blocks) << 9;
+}
+
/* super.c */
int exfat_set_volume_dirty(struct super_block *sb);
int exfat_clear_volume_dirty(struct super_block *sb);
@@ -461,6 +470,7 @@ int exfat_file_fsync(struct file *file, loff_t start, loff_t end, int datasync);
long exfat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long exfat_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+int exfat_force_shutdown(struct super_block *sb, u32 flags);
/* namei.c */
extern const struct dentry_operations exfat_dentry_ops;
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index e19469e88000..a25d7eb789f4 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -29,7 +29,7 @@ static int exfat_cont_expand(struct inode *inode, loff_t size)
if (ret)
return ret;
- num_clusters = EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi);
+ num_clusters = EXFAT_B_TO_CLU(exfat_ondisk_size(inode), sbi);
new_num_clusters = EXFAT_B_TO_CLU_ROUND_UP(size, sbi);
if (new_num_clusters == num_clusters)
@@ -74,8 +74,6 @@ out:
/* Expanded range not zeroed, do not update valid_size */
i_size_write(inode, size);
- ei->i_size_aligned = round_up(size, sb->s_blocksize);
- ei->i_size_ondisk = ei->i_size_aligned;
inode->i_blocks = round_up(size, sbi->cluster_size) >> 9;
mark_inode_dirty(inode);
@@ -159,7 +157,7 @@ int __exfat_truncate(struct inode *inode)
exfat_set_volume_dirty(sb);
num_clusters_new = EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi);
- num_clusters_phys = EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi);
+ num_clusters_phys = EXFAT_B_TO_CLU(exfat_ondisk_size(inode), sbi);
exfat_chain_set(&clu, ei->start_clu, num_clusters_phys, ei->flags);
@@ -245,8 +243,6 @@ void exfat_truncate(struct inode *inode)
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
- unsigned int blocksize = i_blocksize(inode);
- loff_t aligned_size;
int err;
mutex_lock(&sbi->s_lock);
@@ -264,17 +260,6 @@ void exfat_truncate(struct inode *inode)
inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
write_size:
- aligned_size = i_size_read(inode);
- if (aligned_size & (blocksize - 1)) {
- aligned_size |= (blocksize - 1);
- aligned_size++;
- }
-
- if (ei->i_size_ondisk > i_size_read(inode))
- ei->i_size_ondisk = aligned_size;
-
- if (ei->i_size_aligned > i_size_read(inode))
- ei->i_size_aligned = aligned_size;
mutex_unlock(&sbi->s_lock);
}
@@ -302,6 +287,9 @@ int exfat_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
unsigned int ia_valid;
int error;
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size > i_size_read(inode)) {
error = exfat_cont_expand(inode, attr->ia_size);
@@ -485,6 +473,19 @@ static int exfat_ioctl_fitrim(struct inode *inode, unsigned long arg)
return 0;
}
+static int exfat_ioctl_shutdown(struct super_block *sb, unsigned long arg)
+{
+ u32 flags;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (get_user(flags, (__u32 __user *)arg))
+ return -EFAULT;
+
+ return exfat_force_shutdown(sb, flags);
+}
+
long exfat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -495,6 +496,8 @@ long exfat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return exfat_ioctl_get_attributes(inode, user_attr);
case FAT_IOCTL_SET_ATTRIBUTES:
return exfat_ioctl_set_attributes(filp, user_attr);
+ case EXFAT_IOC_SHUTDOWN:
+ return exfat_ioctl_shutdown(inode->i_sb, arg);
case FITRIM:
return exfat_ioctl_fitrim(inode, arg);
default:
@@ -515,6 +518,9 @@ int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
struct inode *inode = filp->f_mapping->host;
int err;
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
err = __generic_file_fsync(filp, start, end, datasync);
if (err)
return err;
@@ -526,32 +532,32 @@ int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
return blkdev_issue_flush(inode->i_sb->s_bdev);
}
-static int exfat_file_zeroed_range(struct file *file, loff_t start, loff_t end)
+static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size)
{
int err;
+ loff_t pos;
struct inode *inode = file_inode(file);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *ops = mapping->a_ops;
- while (start < end) {
- u32 zerofrom, len;
+ pos = ei->valid_size;
+ while (pos < new_valid_size) {
+ u32 len;
struct folio *folio;
- zerofrom = start & (PAGE_SIZE - 1);
- len = PAGE_SIZE - zerofrom;
- if (start + len > end)
- len = end - start;
+ len = PAGE_SIZE - (pos & (PAGE_SIZE - 1));
+ if (pos + len > new_valid_size)
+ len = new_valid_size - pos;
- err = ops->write_begin(file, mapping, start, len, &folio, NULL);
+ err = ops->write_begin(file, mapping, pos, len, &folio, NULL);
if (err)
goto out;
- folio_zero_range(folio, offset_in_folio(folio, start), len);
-
- err = ops->write_end(file, mapping, start, len, len, folio, NULL);
+ err = ops->write_end(file, mapping, pos, len, len, folio, NULL);
if (err < 0)
goto out;
- start += len;
+ pos += len;
balance_dirty_pages_ratelimited(mapping);
cond_resched();
@@ -579,7 +585,7 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
goto unlock;
if (pos > valid_size) {
- ret = exfat_file_zeroed_range(file, valid_size, pos);
+ ret = exfat_extend_valid_size(file, pos);
if (ret < 0 && ret != -ENOSPC) {
exfat_err(inode->i_sb,
"write: fail to zero from %llu to %llu(%zd)",
@@ -613,26 +619,46 @@ unlock:
return ret;
}
-static int exfat_file_mmap(struct file *file, struct vm_area_struct *vma)
+static vm_fault_t exfat_page_mkwrite(struct vm_fault *vmf)
{
- int ret;
+ int err;
+ struct vm_area_struct *vma = vmf->vma;
+ struct file *file = vma->vm_file;
struct inode *inode = file_inode(file);
struct exfat_inode_info *ei = EXFAT_I(inode);
- loff_t start = ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
- loff_t end = min_t(loff_t, i_size_read(inode),
+ loff_t start, end;
+
+ if (!inode_trylock(inode))
+ return VM_FAULT_RETRY;
+
+ start = ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ end = min_t(loff_t, i_size_read(inode),
start + vma->vm_end - vma->vm_start);
- if ((vma->vm_flags & VM_WRITE) && ei->valid_size < end) {
- ret = exfat_file_zeroed_range(file, ei->valid_size, end);
- if (ret < 0) {
- exfat_err(inode->i_sb,
- "mmap: fail to zero from %llu to %llu(%d)",
- start, end, ret);
- return ret;
+ if (ei->valid_size < end) {
+ err = exfat_extend_valid_size(file, end);
+ if (err < 0) {
+ inode_unlock(inode);
+ return vmf_fs_error(err);
}
}
- return generic_file_mmap(file, vma);
+ inode_unlock(inode);
+
+ return filemap_page_mkwrite(vmf);
+}
+
+static const struct vm_operations_struct exfat_file_vm_ops = {
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = exfat_page_mkwrite,
+};
+
+static int exfat_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ file_accessed(file);
+ vma->vm_ops = &exfat_file_vm_ops;
+ return 0;
}
const struct file_operations exfat_file_operations = {
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index 05f0e07b01d0..d724de8f57bf 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -102,6 +102,9 @@ int exfat_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int ret;
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
ret = __exfat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
mutex_unlock(&EXFAT_SB(inode->i_sb)->s_lock);
@@ -130,11 +133,9 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
unsigned int local_clu_offset = clu_offset;
- unsigned int num_to_be_allocated = 0, num_clusters = 0;
+ unsigned int num_to_be_allocated = 0, num_clusters;
- if (ei->i_size_ondisk > 0)
- num_clusters =
- EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi);
+ num_clusters = EXFAT_B_TO_CLU(exfat_ondisk_size(inode), sbi);
if (clu_offset >= num_clusters)
num_to_be_allocated = clu_offset - num_clusters + 1;
@@ -260,21 +261,6 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
return 0;
}
-static int exfat_map_new_buffer(struct exfat_inode_info *ei,
- struct buffer_head *bh, loff_t pos)
-{
- if (buffer_delay(bh) && pos > ei->i_size_aligned)
- return -EIO;
- set_buffer_new(bh);
-
- /*
- * Adjust i_size_aligned if i_size_ondisk is bigger than it.
- */
- if (ei->i_size_ondisk > ei->i_size_aligned)
- ei->i_size_aligned = ei->i_size_ondisk;
- return 0;
-}
-
static int exfat_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
@@ -288,7 +274,6 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
sector_t last_block;
sector_t phys = 0;
sector_t valid_blks;
- loff_t pos;
mutex_lock(&sbi->s_lock);
last_block = EXFAT_B_TO_BLK_ROUND_UP(i_size_read(inode), sb);
@@ -316,12 +301,6 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
mapped_blocks = sbi->sect_per_clus - sec_offset;
max_blocks = min(mapped_blocks, max_blocks);
- pos = EXFAT_BLK_TO_B((iblock + 1), sb);
- if ((create && iblock >= last_block) || buffer_delay(bh_result)) {
- if (ei->i_size_ondisk < pos)
- ei->i_size_ondisk = pos;
- }
-
map_bh(bh_result, sb, phys);
if (buffer_delay(bh_result))
clear_buffer_delay(bh_result);
@@ -342,13 +321,7 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
}
/* The area has not been written, map and mark as new. */
- err = exfat_map_new_buffer(ei, bh_result, pos);
- if (err) {
- exfat_fs_error(sb,
- "requested for bmap out of range(pos : (%llu) > i_size_aligned(%llu)\n",
- pos, ei->i_size_aligned);
- goto unlock_ret;
- }
+ set_buffer_new(bh_result);
ei->valid_size = EXFAT_BLK_TO_B(iblock + max_blocks, sb);
mark_inode_dirty(inode);
@@ -371,7 +344,7 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
* The block has been partially written,
* zero the unwritten part and map the block.
*/
- loff_t size, off;
+ loff_t size, off, pos;
max_blocks = 1;
@@ -382,7 +355,7 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
if (!bh_result->b_folio)
goto done;
- pos -= sb->s_blocksize;
+ pos = EXFAT_BLK_TO_B(iblock, sb);
size = ei->valid_size - pos;
off = pos & (PAGE_SIZE - 1);
@@ -432,6 +405,9 @@ static void exfat_readahead(struct readahead_control *rac)
static int exfat_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
+ if (unlikely(exfat_forced_shutdown(mapping->host->i_sb)))
+ return -EIO;
+
return mpage_writepages(mapping, wbc, exfat_get_block);
}
@@ -452,6 +428,9 @@ static int exfat_write_begin(struct file *file, struct address_space *mapping,
{
int ret;
+ if (unlikely(exfat_forced_shutdown(mapping->host->i_sb)))
+ return -EIO;
+
ret = block_write_begin(mapping, pos, len, foliop, exfat_get_block);
if (ret < 0)
@@ -469,14 +448,6 @@ static int exfat_write_end(struct file *file, struct address_space *mapping,
int err;
err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
-
- if (ei->i_size_aligned < i_size_read(inode)) {
- exfat_fs_error(inode->i_sb,
- "invalid size(size(%llu) > aligned(%llu)\n",
- i_size_read(inode), ei->i_size_aligned);
- return -EIO;
- }
-
if (err < len)
exfat_write_failed(mapping, pos+len);
@@ -504,20 +475,6 @@ static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
int rw = iov_iter_rw(iter);
ssize_t ret;
- if (rw == WRITE) {
- /*
- * FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
- * so we need to update the ->i_size_aligned to block boundary.
- *
- * But we must fill the remaining area or hole by nul for
- * updating ->i_size_aligned
- *
- * Return 0, and fallback to normal buffered write.
- */
- if (EXFAT_I(inode)->i_size_aligned < size)
- return 0;
- }
-
/*
* Need to use the DIO_LOCKING for avoiding the race
* condition of exfat_get_block() and ->truncate().
@@ -531,8 +488,18 @@ static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
} else
size = pos + ret;
- /* zero the unwritten part in the partially written block */
- if (rw == READ && pos < ei->valid_size && ei->valid_size < size) {
+ if (rw == WRITE) {
+ /*
+ * If the block had been partially written before this write,
+ * ->valid_size will not be updated in exfat_get_block(),
+ * update it here.
+ */
+ if (ei->valid_size < size) {
+ ei->valid_size = size;
+ mark_inode_dirty(inode);
+ }
+ } else if (pos < ei->valid_size && ei->valid_size < size) {
+ /* zero the unwritten part in the partially written block */
iov_iter_revert(iter, size - ei->valid_size);
iov_iter_zero(size - ei->valid_size, iter);
}
@@ -667,15 +634,6 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
i_size_write(inode, size);
- /* ondisk and aligned size should be aligned with block size */
- if (size & (inode->i_sb->s_blocksize - 1)) {
- size |= (inode->i_sb->s_blocksize - 1);
- size++;
- }
-
- ei->i_size_aligned = size;
- ei->i_size_ondisk = size;
-
exfat_save_attr(inode, info->attr);
inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index 631ad9e8e32a..2c4c44229352 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -372,8 +372,6 @@ static int exfat_find_empty_entry(struct inode *inode,
/* directory inode should be updated in here */
i_size_write(inode, size);
- ei->i_size_ondisk += sbi->cluster_size;
- ei->i_size_aligned += sbi->cluster_size;
ei->valid_size += sbi->cluster_size;
ei->flags = p_dir->flags;
inode->i_blocks += sbi->cluster_size >> 9;
@@ -549,6 +547,9 @@ static int exfat_create(struct mnt_idmap *idmap, struct inode *dir,
int err;
loff_t size = i_size_read(dir);
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
+
mutex_lock(&EXFAT_SB(sb)->s_lock);
exfat_set_volume_dirty(sb);
err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_FILE,
@@ -772,6 +773,9 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
struct exfat_entry_set_cache es;
int entry, err = 0;
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
+
mutex_lock(&EXFAT_SB(sb)->s_lock);
exfat_chain_dup(&cdir, &ei->dir);
entry = ei->entry;
@@ -825,6 +829,9 @@ static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
int err;
loff_t size = i_size_read(dir);
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
+
mutex_lock(&EXFAT_SB(sb)->s_lock);
exfat_set_volume_dirty(sb);
err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_DIR,
@@ -915,6 +922,9 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
struct exfat_entry_set_cache es;
int entry, err;
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
+
mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
exfat_chain_dup(&cdir, &ei->dir);
@@ -982,6 +992,9 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
struct exfat_entry_set_cache old_es, new_es;
int sync = IS_DIRSYNC(inode);
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
+
num_new_entries = exfat_calc_num_entries(p_uniname);
if (num_new_entries < 0)
return num_new_entries;
diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c
index afdf13c34ff5..1ac011088ce7 100644
--- a/fs/exfat/nls.c
+++ b/fs/exfat/nls.c
@@ -779,8 +779,11 @@ int exfat_create_upcase_table(struct super_block *sb)
le32_to_cpu(ep->dentry.upcase.checksum));
brelse(bh);
- if (ret && ret != -EIO)
+ if (ret && ret != -EIO) {
+ /* free memory from exfat_load_upcase_table call */
+ exfat_free_upcase_table(sbi);
goto load_default;
+ }
/* load successfully */
return ret;
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index 323ecebe6f0e..bd57844414aa 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -46,6 +46,9 @@ static int exfat_sync_fs(struct super_block *sb, int wait)
struct exfat_sb_info *sbi = EXFAT_SB(sb);
int err = 0;
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return 0;
+
if (!wait)
return 0;
@@ -167,6 +170,41 @@ static int exfat_show_options(struct seq_file *m, struct dentry *root)
return 0;
}
+int exfat_force_shutdown(struct super_block *sb, u32 flags)
+{
+ int ret;
+ struct exfat_sb_info *sbi = sb->s_fs_info;
+ struct exfat_mount_options *opts = &sbi->options;
+
+ if (exfat_forced_shutdown(sb))
+ return 0;
+
+ switch (flags) {
+ case EXFAT_GOING_DOWN_DEFAULT:
+ case EXFAT_GOING_DOWN_FULLSYNC:
+ ret = bdev_freeze(sb->s_bdev);
+ if (ret)
+ return ret;
+ bdev_thaw(sb->s_bdev);
+ set_bit(EXFAT_FLAGS_SHUTDOWN, &sbi->s_exfat_flags);
+ break;
+ case EXFAT_GOING_DOWN_NOSYNC:
+ set_bit(EXFAT_FLAGS_SHUTDOWN, &sbi->s_exfat_flags);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (opts->discard)
+ opts->discard = 0;
+ return 0;
+}
+
+static void exfat_shutdown(struct super_block *sb)
+{
+ exfat_force_shutdown(sb, EXFAT_GOING_DOWN_NOSYNC);
+}
+
static struct inode *exfat_alloc_inode(struct super_block *sb)
{
struct exfat_inode_info *ei;
@@ -193,6 +231,7 @@ static const struct super_operations exfat_sops = {
.sync_fs = exfat_sync_fs,
.statfs = exfat_statfs,
.show_options = exfat_show_options,
+ .shutdown = exfat_shutdown,
};
enum {
@@ -370,8 +409,6 @@ static int exfat_read_root(struct inode *inode)
inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
ei->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff;
- ei->i_size_aligned = i_size_read(inode);
- ei->i_size_ondisk = i_size_read(inode);
exfat_save_attr(inode, EXFAT_ATTR_SUBDIR);
ei->i_crtime = simple_inode_init_ts(inode);
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index bdd96329dddd..7f76460b721f 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -99,7 +99,7 @@ repeat:
}
if (unlikely(!PageUptodate(page))) {
- f2fs_handle_page_eio(sbi, page->index, META);
+ f2fs_handle_page_eio(sbi, page_folio(page), META);
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
@@ -345,30 +345,31 @@ static int __f2fs_write_meta_page(struct page *page,
enum iostat_type io_type)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ struct folio *folio = page_folio(page);
- trace_f2fs_writepage(page_folio(page), META);
+ trace_f2fs_writepage(folio, META);
if (unlikely(f2fs_cp_error(sbi))) {
if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
dec_page_count(sbi, F2FS_DIRTY_META);
- unlock_page(page);
+ folio_unlock(folio);
return 0;
}
goto redirty_out;
}
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
- if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
+ if (wbc->for_reclaim && folio->index < GET_SUM_BLOCK(sbi, 0))
goto redirty_out;
- f2fs_do_write_meta_page(sbi, page, io_type);
+ f2fs_do_write_meta_page(sbi, folio, io_type);
dec_page_count(sbi, F2FS_DIRTY_META);
if (wbc->for_reclaim)
f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META);
- unlock_page(page);
+ folio_unlock(folio);
if (unlikely(f2fs_cp_error(sbi)))
f2fs_submit_merged_write(sbi, META);
@@ -1551,7 +1552,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
blk = start_blk + BLKS_PER_SEG(sbi) - nm_i->nat_bits_blocks;
for (i = 0; i < nm_i->nat_bits_blocks; i++)
f2fs_update_meta_page(sbi, nm_i->nat_bits +
- (i << F2FS_BLKSIZE_BITS), blk + i);
+ F2FS_BLK_TO_BYTES(i), blk + i);
}
/* write out checkpoint buffer at block 0 */
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 990b93689b46..7f26440e8595 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -90,11 +90,13 @@ bool f2fs_is_compressed_page(struct page *page)
static void f2fs_set_compressed_page(struct page *page,
struct inode *inode, pgoff_t index, void *data)
{
- attach_page_private(page, (void *)data);
+ struct folio *folio = page_folio(page);
+
+ folio_attach_private(folio, (void *)data);
/* i_crypto_info and iv index */
- page->index = index;
- page->mapping = inode->i_mapping;
+ folio->index = index;
+ folio->mapping = inode->i_mapping;
}
static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
@@ -160,17 +162,17 @@ void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
cc->cluster_idx = NULL_CLUSTER;
}
-void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
+void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio)
{
unsigned int cluster_ofs;
- if (!f2fs_cluster_can_merge_page(cc, page->index))
+ if (!f2fs_cluster_can_merge_page(cc, folio->index))
f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
- cluster_ofs = offset_in_cluster(cc, page->index);
- cc->rpages[cluster_ofs] = page;
+ cluster_ofs = offset_in_cluster(cc, folio->index);
+ cc->rpages[cluster_ofs] = folio_page(folio, 0);
cc->nr_rpages++;
- cc->cluster_idx = cluster_idx(cc, page->index);
+ cc->cluster_idx = cluster_idx(cc, folio->index);
}
#ifdef CONFIG_F2FS_FS_LZO
@@ -879,7 +881,7 @@ static bool cluster_has_invalid_data(struct compress_ctx *cc)
f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
/* beyond EOF */
- if (page->index >= nr_pages)
+ if (page_folio(page)->index >= nr_pages)
return true;
}
return false;
@@ -945,7 +947,7 @@ static int __f2fs_get_cluster_blocks(struct inode *inode,
unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
int count, i;
- for (i = 1, count = 1; i < cluster_size; i++) {
+ for (i = 0, count = 0; i < cluster_size; i++) {
block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
dn->ofs_in_node + i);
@@ -956,8 +958,8 @@ static int __f2fs_get_cluster_blocks(struct inode *inode,
return count;
}
-static int __f2fs_cluster_blocks(struct inode *inode,
- unsigned int cluster_idx, bool compr_blks)
+static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx,
+ enum cluster_check_type type)
{
struct dnode_of_data dn;
unsigned int start_idx = cluster_idx <<
@@ -978,10 +980,12 @@ static int __f2fs_cluster_blocks(struct inode *inode,
}
if (dn.data_blkaddr == COMPRESS_ADDR) {
- if (compr_blks)
- ret = __f2fs_get_cluster_blocks(inode, &dn);
- else
+ if (type == CLUSTER_COMPR_BLKS)
+ ret = 1 + __f2fs_get_cluster_blocks(inode, &dn);
+ else if (type == CLUSTER_IS_COMPR)
ret = 1;
+ } else if (type == CLUSTER_RAW_BLKS) {
+ ret = __f2fs_get_cluster_blocks(inode, &dn);
}
fail:
f2fs_put_dnode(&dn);
@@ -991,7 +995,16 @@ fail:
/* return # of compressed blocks in compressed cluster */
static int f2fs_compressed_blocks(struct compress_ctx *cc)
{
- return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
+ return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx,
+ CLUSTER_COMPR_BLKS);
+}
+
+/* return # of raw blocks in non-compressed cluster */
+static int f2fs_decompressed_blocks(struct inode *inode,
+ unsigned int cluster_idx)
+{
+ return __f2fs_cluster_blocks(inode, cluster_idx,
+ CLUSTER_RAW_BLKS);
}
/* return whether cluster is compressed one or not */
@@ -999,7 +1012,16 @@ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
{
return __f2fs_cluster_blocks(inode,
index >> F2FS_I(inode)->i_log_cluster_size,
- false);
+ CLUSTER_IS_COMPR);
+}
+
+/* return whether cluster contains non raw blocks or not */
+bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index)
+{
+ unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size;
+
+ return f2fs_decompressed_blocks(inode, cluster_idx) !=
+ F2FS_I(inode)->i_cluster_size;
}
static bool cluster_may_compress(struct compress_ctx *cc)
@@ -1093,7 +1115,7 @@ retry:
if (PageUptodate(page))
f2fs_put_page(page, 1);
else
- f2fs_compress_ctx_add_page(cc, page);
+ f2fs_compress_ctx_add_page(cc, page_folio(page));
}
if (!f2fs_cluster_is_empty(cc)) {
@@ -1123,7 +1145,7 @@ retry:
}
f2fs_wait_on_page_writeback(page, DATA, true, true);
- f2fs_compress_ctx_add_page(cc, page);
+ f2fs_compress_ctx_add_page(cc, page_folio(page));
if (!PageUptodate(page)) {
release_and_retry:
@@ -1523,7 +1545,8 @@ continue_unlock:
if (!clear_page_dirty_for_io(cc->rpages[i]))
goto continue_unlock;
- ret = f2fs_write_single_data_page(cc->rpages[i], &submitted,
+ ret = f2fs_write_single_data_page(page_folio(cc->rpages[i]),
+ &submitted,
NULL, NULL, wbc, io_type,
compr_blocks, false);
if (ret) {
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 5dfa0207ad8f..94f7b084f601 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -7,7 +7,6 @@
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
-#include <linux/buffer_head.h>
#include <linux/sched/mm.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
@@ -355,7 +354,7 @@ static void f2fs_write_end_io(struct bio *bio)
}
f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
- page->index != nid_of_node(page));
+ page_folio(page)->index != nid_of_node(page));
dec_page_count(sbi, type);
if (f2fs_in_warm_node_list(sbi, page))
@@ -704,7 +703,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
bio = __bio_alloc(fio, 1);
f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
- fio->page->index, fio, GFP_NOIO);
+ page_folio(fio->page)->index, fio, GFP_NOIO);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
@@ -803,7 +802,7 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
fio->new_blkaddr));
if (f2fs_crypt_mergeable_bio(*bio,
fio->page->mapping->host,
- fio->page->index, fio) &&
+ page_folio(fio->page)->index, fio) &&
bio_add_page(*bio, page, PAGE_SIZE, 0) ==
PAGE_SIZE) {
ret = 0;
@@ -903,7 +902,7 @@ alloc_new:
if (!bio) {
bio = __bio_alloc(fio, BIO_MAX_VECS);
f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
- fio->page->index, fio, GFP_NOIO);
+ page_folio(fio->page)->index, fio, GFP_NOIO);
add_bio_entry(fio->sbi, bio, page, fio->temp);
} else {
@@ -996,13 +995,13 @@ next:
(!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
fio->new_blkaddr) ||
!f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
- bio_page->index, fio)))
+ page_folio(bio_page)->index, fio)))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
io->bio = __bio_alloc(fio, BIO_MAX_VECS);
f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
- bio_page->index, fio, GFP_NOIO);
+ page_folio(bio_page)->index, fio, GFP_NOIO);
io->fio = *fio;
}
@@ -1087,7 +1086,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
}
/* This can handle encryption stuffs */
-static int f2fs_submit_page_read(struct inode *inode, struct page *page,
+static int f2fs_submit_page_read(struct inode *inode, struct folio *folio,
block_t blkaddr, blk_opf_t op_flags,
bool for_write)
{
@@ -1095,14 +1094,14 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
struct bio *bio;
bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
- page->index, for_write);
+ folio->index, for_write);
if (IS_ERR(bio))
return PTR_ERR(bio);
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, blkaddr);
- if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+ if (!bio_add_folio(bio, folio, PAGE_SIZE, 0)) {
iostat_update_and_unbind_ctx(bio);
if (bio->bi_private)
mempool_free(bio->bi_private, bio_post_read_ctx_pool);
@@ -1270,7 +1269,7 @@ got_it:
return page;
}
- err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
+ err = f2fs_submit_page_read(inode, page_folio(page), dn.data_blkaddr,
op_flags, for_write);
if (err)
goto put_err;
@@ -1713,6 +1712,14 @@ skip:
dn.ofs_in_node = end_offset;
}
+ if (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
+ map->m_may_create) {
+ /* the next block to be allocated may not be contiguous. */
+ if (GET_SEGOFF_FROM_SEG0(sbi, blkaddr) % BLKS_PER_SEC(sbi) ==
+ CAP_BLKS_PER_SEC(sbi) - 1)
+ goto sync_out;
+ }
+
if (pgofs >= end)
goto sync_out;
else if (dn.ofs_in_node < end_offset)
@@ -1939,7 +1946,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
inode_lock_shared(inode);
- maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
+ maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
if (start > maxbytes) {
ret = -EFBIG;
goto out;
@@ -2064,7 +2071,7 @@ out:
static inline loff_t f2fs_readpage_limit(struct inode *inode)
{
if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
- return inode->i_sb->s_maxbytes;
+ return F2FS_BLK_TO_BYTES(max_file_blocks(inode));
return i_size_read(inode);
}
@@ -2208,19 +2215,22 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
/* get rid of pages beyond EOF */
for (i = 0; i < cc->cluster_size; i++) {
struct page *page = cc->rpages[i];
+ struct folio *folio;
if (!page)
continue;
- if ((sector_t)page->index >= last_block_in_file) {
- zero_user_segment(page, 0, PAGE_SIZE);
- if (!PageUptodate(page))
- SetPageUptodate(page);
- } else if (!PageUptodate(page)) {
+
+ folio = page_folio(page);
+ if ((sector_t)folio->index >= last_block_in_file) {
+ folio_zero_segment(folio, 0, folio_size(folio));
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ } else if (!folio_test_uptodate(folio)) {
continue;
}
- unlock_page(page);
+ folio_unlock(folio);
if (for_write)
- put_page(page);
+ folio_put(folio);
cc->rpages[i] = NULL;
cc->nr_rpages--;
}
@@ -2280,7 +2290,7 @@ skip_reading_dnode:
}
for (i = 0; i < cc->nr_cpages; i++) {
- struct page *page = dic->cpages[i];
+ struct folio *folio = page_folio(dic->cpages[i]);
block_t blkaddr;
struct bio_post_read_ctx *ctx;
@@ -2290,7 +2300,8 @@ skip_reading_dnode:
f2fs_wait_on_block_writeback(inode, blkaddr);
- if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
+ if (f2fs_load_compressed_page(sbi, folio_page(folio, 0),
+ blkaddr)) {
if (atomic_dec_and_test(&dic->remaining_pages)) {
f2fs_decompress_cluster(dic, true);
break;
@@ -2300,7 +2311,7 @@ skip_reading_dnode:
if (bio && (!page_is_mergeable(sbi, bio,
*last_block_in_bio, blkaddr) ||
- !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
+ !f2fs_crypt_mergeable_bio(bio, inode, folio->index, NULL))) {
submit_and_realloc:
f2fs_submit_read_bio(sbi, bio, DATA);
bio = NULL;
@@ -2309,7 +2320,7 @@ submit_and_realloc:
if (!bio) {
bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
f2fs_ra_op_flags(rac),
- page->index, for_write);
+ folio->index, for_write);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
f2fs_decompress_end_io(dic, ret, true);
@@ -2319,7 +2330,7 @@ submit_and_realloc:
}
}
- if (bio_add_page(bio, page, blocksize, 0) < blocksize)
+ if (!bio_add_folio(bio, folio, blocksize, 0))
goto submit_and_realloc;
ctx = get_post_read_ctx(bio);
@@ -2430,7 +2441,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
if (ret)
goto set_error_page;
- f2fs_compress_ctx_add_page(&cc, &folio->page);
+ f2fs_compress_ctx_add_page(&cc, folio);
goto next_page;
read_single_page:
@@ -2645,21 +2656,24 @@ static inline bool need_inplace_update(struct f2fs_io_info *fio)
int f2fs_do_write_data_page(struct f2fs_io_info *fio)
{
- struct page *page = fio->page;
- struct inode *inode = page->mapping->host;
+ struct folio *folio = page_folio(fio->page);
+ struct inode *inode = folio->mapping->host;
struct dnode_of_data dn;
struct node_info ni;
bool ipu_force = false;
+ bool atomic_commit;
int err = 0;
/* Use COW inode to make dnode_of_data for atomic write */
- if (f2fs_is_atomic_file(inode))
+ atomic_commit = f2fs_is_atomic_file(inode) &&
+ page_private_atomic(folio_page(folio, 0));
+ if (atomic_commit)
set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
else
set_new_dnode(&dn, inode, NULL, NULL, 0);
if (need_inplace_update(fio) &&
- f2fs_lookup_read_extent_cache_block(inode, page->index,
+ f2fs_lookup_read_extent_cache_block(inode, folio->index,
&fio->old_blkaddr)) {
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
DATA_GENERIC_ENHANCE))
@@ -2674,7 +2688,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
return -EAGAIN;
- err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
+ err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE);
if (err)
goto out;
@@ -2682,8 +2696,8 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
/* This page is already truncated */
if (fio->old_blkaddr == NULL_ADDR) {
- ClearPageUptodate(page);
- clear_page_private_gcing(page);
+ folio_clear_uptodate(folio);
+ clear_page_private_gcing(folio_page(folio, 0));
goto out_writepage;
}
got_it:
@@ -2709,7 +2723,7 @@ got_it:
if (err)
goto out_writepage;
- set_page_writeback(page);
+ folio_start_writeback(folio);
f2fs_put_dnode(&dn);
if (fio->need_lock == LOCK_REQ)
f2fs_unlock_op(fio->sbi);
@@ -2717,11 +2731,11 @@ got_it:
if (err) {
if (fscrypt_inode_uses_fs_layer_crypto(inode))
fscrypt_finalize_bounce_page(&fio->encrypted_page);
- end_page_writeback(page);
+ folio_end_writeback(folio);
} else {
set_inode_flag(inode, FI_UPDATE_WRITE);
}
- trace_f2fs_do_write_data_page(page_folio(page), IPU);
+ trace_f2fs_do_write_data_page(folio, IPU);
return err;
}
@@ -2743,15 +2757,17 @@ got_it:
if (err)
goto out_writepage;
- set_page_writeback(page);
+ folio_start_writeback(folio);
if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
/* LFS mode write path */
f2fs_outplace_write_data(&dn, fio);
- trace_f2fs_do_write_data_page(page_folio(page), OPU);
+ trace_f2fs_do_write_data_page(folio, OPU);
set_inode_flag(inode, FI_APPEND_WRITE);
+ if (atomic_commit)
+ clear_page_private_atomic(folio_page(folio, 0));
out_writepage:
f2fs_put_dnode(&dn);
out:
@@ -2760,7 +2776,7 @@ out:
return err;
}
-int f2fs_write_single_data_page(struct page *page, int *submitted,
+int f2fs_write_single_data_page(struct folio *folio, int *submitted,
struct bio **bio,
sector_t *last_block,
struct writeback_control *wbc,
@@ -2768,12 +2784,13 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
int compr_blocks,
bool allow_balance)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
+ struct page *page = folio_page(folio, 0);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long)i_size)
>> PAGE_SHIFT;
- loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
+ loff_t psize = (loff_t)(folio->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
bool quota_inode = IS_NOQUOTA(inode);
@@ -2797,11 +2814,11 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
.last_block = last_block,
};
- trace_f2fs_writepage(page_folio(page), DATA);
+ trace_f2fs_writepage(folio, DATA);
/* we should bypass data pages to proceed the kworker jobs */
if (unlikely(f2fs_cp_error(sbi))) {
- mapping_set_error(page->mapping, -EIO);
+ mapping_set_error(folio->mapping, -EIO);
/*
* don't drop any dirty dentry pages for keeping lastest
* directory structure.
@@ -2819,7 +2836,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
- if (page->index < end_index ||
+ if (folio->index < end_index ||
f2fs_verity_in_progress(inode) ||
compr_blocks)
goto write;
@@ -2829,10 +2846,10 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
* this page does not have to be written to disk.
*/
offset = i_size & (PAGE_SIZE - 1);
- if ((page->index >= end_index + 1) || !offset)
+ if ((folio->index >= end_index + 1) || !offset)
goto out;
- zero_user_segment(page, offset, PAGE_SIZE);
+ folio_zero_segment(folio, offset, folio_size(folio));
write:
/* Dentry/quota blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode) || quota_inode) {
@@ -2862,7 +2879,7 @@ write:
err = -EAGAIN;
if (f2fs_has_inline_data(inode)) {
- err = f2fs_write_inline_data(inode, page);
+ err = f2fs_write_inline_data(inode, folio);
if (!err)
goto out;
}
@@ -2892,7 +2909,7 @@ done:
out:
inode_dec_dirty_pages(inode);
if (err) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
clear_page_private_gcing(page);
}
@@ -2902,7 +2919,7 @@ out:
f2fs_remove_dirty_inode(inode);
submitted = NULL;
}
- unlock_page(page);
+ folio_unlock(folio);
if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
!F2FS_I(inode)->wb_task && allow_balance)
f2fs_balance_fs(sbi, need_balance_fs);
@@ -2920,7 +2937,7 @@ out:
return 0;
redirty_out:
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
/*
* pageout() in MM translates EAGAIN, so calls handle_write_error()
* -> mapping_set_error() -> set_bit(AS_EIO, ...).
@@ -2929,29 +2946,30 @@ redirty_out:
*/
if (!err || wbc->for_reclaim)
return AOP_WRITEPAGE_ACTIVATE;
- unlock_page(page);
+ folio_unlock(folio);
return err;
}
static int f2fs_write_data_page(struct page *page,
struct writeback_control *wbc)
{
+ struct folio *folio = page_folio(page);
#ifdef CONFIG_F2FS_FS_COMPRESSION
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
goto out;
if (f2fs_compressed_file(inode)) {
- if (f2fs_is_compressed_cluster(inode, page->index)) {
- redirty_page_for_writepage(wbc, page);
+ if (f2fs_is_compressed_cluster(inode, folio->index)) {
+ folio_redirty_for_writepage(wbc, folio);
return AOP_WRITEPAGE_ACTIVATE;
}
}
out:
#endif
- return f2fs_write_single_data_page(page, NULL, NULL, NULL,
+ return f2fs_write_single_data_page(folio, NULL, NULL, NULL,
wbc, FS_DATA_IO, 0, true);
}
@@ -3157,11 +3175,11 @@ continue_unlock:
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
folio_get(folio);
- f2fs_compress_ctx_add_page(&cc, &folio->page);
+ f2fs_compress_ctx_add_page(&cc, folio);
continue;
}
#endif
- ret = f2fs_write_single_data_page(&folio->page,
+ ret = f2fs_write_single_data_page(folio,
&submitted, &bio, &last_block,
wbc, io_type, 0, true);
if (ret == AOP_WRITEPAGE_ACTIVATE)
@@ -3369,11 +3387,11 @@ void f2fs_write_failed(struct inode *inode, loff_t to)
}
static int prepare_write_begin(struct f2fs_sb_info *sbi,
- struct page *page, loff_t pos, unsigned len,
+ struct folio *folio, loff_t pos, unsigned int len,
block_t *blk_addr, bool *node_changed)
{
- struct inode *inode = page->mapping->host;
- pgoff_t index = page->index;
+ struct inode *inode = folio->mapping->host;
+ pgoff_t index = folio->index;
struct dnode_of_data dn;
struct page *ipage;
bool locked = false;
@@ -3410,13 +3428,13 @@ restart:
if (f2fs_has_inline_data(inode)) {
if (pos + len <= MAX_INLINE_DATA(inode)) {
- f2fs_do_read_inline_data(page_folio(page), ipage);
+ f2fs_do_read_inline_data(folio, ipage);
set_inode_flag(inode, FI_DATA_EXIST);
if (inode->i_nlink)
set_page_private_inline(ipage);
goto out;
}
- err = f2fs_convert_inline_page(&dn, page);
+ err = f2fs_convert_inline_page(&dn, folio_page(folio, 0));
if (err || dn.data_blkaddr != NULL_ADDR)
goto out;
}
@@ -3509,12 +3527,12 @@ unlock_out:
}
static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
- struct page *page, loff_t pos, unsigned int len,
+ struct folio *folio, loff_t pos, unsigned int len,
block_t *blk_addr, bool *node_changed, bool *use_cow)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct inode *cow_inode = F2FS_I(inode)->cow_inode;
- pgoff_t index = page->index;
+ pgoff_t index = folio->index;
int err = 0;
block_t ori_blk_addr = NULL_ADDR;
@@ -3620,10 +3638,10 @@ repeat:
*foliop = folio;
if (f2fs_is_atomic_file(inode))
- err = prepare_atomic_write_begin(sbi, &folio->page, pos, len,
+ err = prepare_atomic_write_begin(sbi, folio, pos, len,
&blkaddr, &need_balance, &use_cow);
else
- err = prepare_write_begin(sbi, &folio->page, pos, len,
+ err = prepare_write_begin(sbi, folio, pos, len,
&blkaddr, &need_balance);
if (err)
goto put_folio;
@@ -3648,7 +3666,7 @@ repeat:
if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
!f2fs_verity_in_progress(inode)) {
- folio_zero_segment(folio, len, PAGE_SIZE);
+ folio_zero_segment(folio, len, folio_size(folio));
return 0;
}
@@ -3662,8 +3680,8 @@ repeat:
goto put_folio;
}
err = f2fs_submit_page_read(use_cow ?
- F2FS_I(inode)->cow_inode : inode, &folio->page,
- blkaddr, 0, true);
+ F2FS_I(inode)->cow_inode : inode,
+ folio, blkaddr, 0, true);
if (err)
goto put_folio;
@@ -3727,6 +3745,9 @@ static int f2fs_write_end(struct file *file,
folio_mark_dirty(folio);
+ if (f2fs_is_atomic_file(inode))
+ set_page_private_atomic(folio_page(folio, 0));
+
if (pos + copied > i_size_read(inode) &&
!f2fs_verity_in_progress(inode)) {
f2fs_i_size_write(inode, pos + copied);
@@ -4117,9 +4138,8 @@ const struct address_space_operations f2fs_dblock_aops = {
.swap_deactivate = f2fs_swap_deactivate,
};
-void f2fs_clear_page_cache_dirty_tag(struct page *page)
+void f2fs_clear_page_cache_dirty_tag(struct folio *folio)
{
- struct folio *folio = page_folio(page);
struct address_space *mapping = folio->mapping;
unsigned long flags;
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 8b0e1e71b667..546b8ba91261 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -275,7 +275,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
/* build nm */
si->base_mem += sizeof(struct f2fs_nm_info);
si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
- si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
+ si->base_mem += F2FS_BLK_TO_BYTES(NM_I(sbi)->nat_bits_blocks);
si->base_mem += NM_I(sbi)->nat_blocks *
f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK);
si->base_mem += NM_I(sbi)->nat_blocks / 8;
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index cbd7a5e96a37..1136539a57a8 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -166,7 +166,8 @@ static unsigned long dir_block_index(unsigned int level,
unsigned long bidx = 0;
for (i = 0; i < level; i++)
- bidx += dir_buckets(i, dir_level) * bucket_blocks(i);
+ bidx += mul_u32_u32(dir_buckets(i, dir_level),
+ bucket_blocks(i));
bidx += idx * bucket_blocks(level);
return bidx;
}
@@ -841,6 +842,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
struct f2fs_dentry_block *dentry_blk;
unsigned int bit_pos;
int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
+ pgoff_t index = page_folio(page)->index;
int i;
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
@@ -866,8 +868,8 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
set_page_dirty(page);
if (bit_pos == NR_DENTRY_IN_BLOCK &&
- !f2fs_truncate_hole(dir, page->index, page->index + 1)) {
- f2fs_clear_page_cache_dirty_tag(page);
+ !f2fs_truncate_hole(dir, index, index + 1)) {
+ f2fs_clear_page_cache_dirty_tag(page_folio(page));
clear_page_dirty_for_io(page);
ClearPageUptodate(page);
clear_page_private_all(page);
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index fd1fc06359ee..62ac440d9416 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -366,7 +366,7 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
static void __drop_largest_extent(struct extent_tree *et,
pgoff_t fofs, unsigned int len)
{
- if (fofs < et->largest.fofs + et->largest.len &&
+ if (fofs < (pgoff_t)et->largest.fofs + et->largest.len &&
fofs + len > et->largest.fofs) {
et->largest.len = 0;
et->largest_updated = true;
@@ -456,7 +456,7 @@ static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
if (type == EX_READ &&
et->largest.fofs <= pgofs &&
- et->largest.fofs + et->largest.len > pgofs) {
+ (pgoff_t)et->largest.fofs + et->largest.len > pgofs) {
*ei = et->largest;
ret = true;
stat_inc_largest_node_hit(sbi);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index ac19c61f0c3e..33f5449dc22d 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -11,7 +11,6 @@
#include <linux/uio.h>
#include <linux/types.h>
#include <linux/page-flags.h>
-#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/crc32.h>
#include <linux/magic.h>
@@ -134,6 +133,12 @@ typedef u32 nid_t;
#define COMPRESS_EXT_NUM 16
+enum blkzone_allocation_policy {
+ BLKZONE_ALLOC_PRIOR_SEQ, /* Prioritize writing to sequential zones */
+ BLKZONE_ALLOC_ONLY_SEQ, /* Only allow writing to sequential zones */
+ BLKZONE_ALLOC_PRIOR_CONV, /* Prioritize writing to conventional zones */
+};
+
/*
* An implementation of an rwsem that is explicitly unfair to readers. This
* prevents priority inversion when a low-priority reader acquires the read lock
@@ -285,6 +290,7 @@ enum {
APPEND_INO, /* for append ino list */
UPDATE_INO, /* for update ino list */
TRANS_DIR_INO, /* for transactions dir ino list */
+ XATTR_DIR_INO, /* for xattr updated dir ino list */
FLUSH_INO, /* for multiple device flushing */
MAX_INO_ENTRY, /* max. list */
};
@@ -784,7 +790,6 @@ enum {
FI_NEED_IPU, /* used for ipu per file */
FI_ATOMIC_FILE, /* indicate atomic file */
FI_DATA_EXIST, /* indicate data exists */
- FI_INLINE_DOTS, /* indicate inline dot dentries */
FI_SKIP_WRITES, /* should skip data page writeback */
FI_OPU_WRITE, /* used for opu per file */
FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
@@ -802,6 +807,7 @@ enum {
FI_ALIGNED_WRITE, /* enable aligned write */
FI_COW_FILE, /* indicate COW file */
FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */
+ FI_ATOMIC_DIRTIED, /* indicate atomic file is dirtied */
FI_ATOMIC_REPLACE, /* indicate atomic replace */
FI_OPENED_FILE, /* indicate file has been opened */
FI_MAX, /* max flag, never be used */
@@ -1155,6 +1161,7 @@ enum cp_reason_type {
CP_FASTBOOT_MODE,
CP_SPEC_LOG_NUM,
CP_RECOVER_DIR,
+ CP_XATTR_DIR,
};
enum iostat_type {
@@ -1293,6 +1300,7 @@ struct f2fs_gc_control {
bool no_bg_gc; /* check the space and stop bg_gc */
bool should_migrate_blocks; /* should migrate blocks */
bool err_gc_skipped; /* return EAGAIN if GC skipped */
+ bool one_time; /* require one time GC in one migration unit */
unsigned int nr_free_secs; /* # of free sections to do GC */
};
@@ -1418,7 +1426,8 @@ static inline void f2fs_clear_bit(unsigned int nr, char *addr);
* bit 1 PAGE_PRIVATE_ONGOING_MIGRATION
* bit 2 PAGE_PRIVATE_INLINE_INODE
* bit 3 PAGE_PRIVATE_REF_RESOURCE
- * bit 4- f2fs private data
+ * bit 4 PAGE_PRIVATE_ATOMIC_WRITE
+ * bit 5- f2fs private data
*
* Layout B: lowest bit should be 0
* page.private is a wrapped pointer.
@@ -1428,6 +1437,7 @@ enum {
PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */
PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */
+ PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */
PAGE_PRIVATE_MAX
};
@@ -1559,6 +1569,8 @@ struct f2fs_sb_info {
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int blocks_per_blkz; /* F2FS blocks per zone */
unsigned int max_open_zones; /* max open zone resources of the zoned device */
+ /* For adjust the priority writing position of data in zone UFS */
+ unsigned int blkzone_alloc_policy;
#endif
/* for node-related operations */
@@ -1685,6 +1697,8 @@ struct f2fs_sb_info {
unsigned int max_victim_search;
/* migration granularity of garbage collection, unit: segment */
unsigned int migration_granularity;
+ /* migration window granularity of garbage collection, unit: segment */
+ unsigned int migration_window_granularity;
/*
* for stat information.
@@ -1994,6 +2008,16 @@ static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
return (struct f2fs_super_block *)(sbi->raw_super);
}
+static inline struct f2fs_super_block *F2FS_SUPER_BLOCK(struct folio *folio,
+ pgoff_t index)
+{
+ pgoff_t idx_in_folio = index % (1 << folio_order(folio));
+
+ return (struct f2fs_super_block *)
+ (page_address(folio_page(folio, idx_in_folio)) +
+ F2FS_SUPER_OFFSET);
+}
+
static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
{
return (struct f2fs_checkpoint *)(sbi->ckpt);
@@ -2396,14 +2420,17 @@ static inline void clear_page_private_##name(struct page *page) \
PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
static inline unsigned long get_page_private_data(struct page *page)
{
@@ -2435,6 +2462,7 @@ static inline void clear_page_private_all(struct page *page)
clear_page_private_reference(page);
clear_page_private_gcing(page);
clear_page_private_inline(page);
+ clear_page_private_atomic(page);
f2fs_bug_on(F2FS_P_SB(page), page_private(page));
}
@@ -2854,13 +2882,26 @@ static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
return false;
}
+static inline bool is_inflight_read_io(struct f2fs_sb_info *sbi)
+{
+ return get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_DIO_READ);
+}
+
static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
{
+ bool zoned_gc = (type == GC_TIME &&
+ F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_BLKZONED));
+
if (sbi->gc_mode == GC_URGENT_HIGH)
return true;
- if (is_inflight_io(sbi, type))
- return false;
+ if (zoned_gc) {
+ if (is_inflight_read_io(sbi))
+ return false;
+ } else {
+ if (is_inflight_io(sbi, type))
+ return false;
+ }
if (sbi->gc_mode == GC_URGENT_MID)
return true;
@@ -2869,6 +2910,9 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
(type == DISCARD_TIME || type == GC_TIME))
return true;
+ if (zoned_gc)
+ return true;
+
return f2fs_time_over(sbi, type);
}
@@ -2900,26 +2944,27 @@ static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
}
static inline int f2fs_has_extra_attr(struct inode *inode);
-static inline block_t data_blkaddr(struct inode *inode,
- struct page *node_page, unsigned int offset)
+static inline unsigned int get_dnode_base(struct inode *inode,
+ struct page *node_page)
{
- struct f2fs_node *raw_node;
- __le32 *addr_array;
- int base = 0;
- bool is_inode = IS_INODE(node_page);
+ if (!IS_INODE(node_page))
+ return 0;
- raw_node = F2FS_NODE(node_page);
+ return inode ? get_extra_isize(inode) :
+ offset_in_addr(&F2FS_NODE(node_page)->i);
+}
- if (is_inode) {
- if (!inode)
- /* from GC path only */
- base = offset_in_addr(&raw_node->i);
- else if (f2fs_has_extra_attr(inode))
- base = get_extra_isize(inode);
- }
+static inline __le32 *get_dnode_addr(struct inode *inode,
+ struct page *node_page)
+{
+ return blkaddr_in_node(F2FS_NODE(node_page)) +
+ get_dnode_base(inode, node_page);
+}
- addr_array = blkaddr_in_node(raw_node);
- return le32_to_cpu(addr_array[base + offset]);
+static inline block_t data_blkaddr(struct inode *inode,
+ struct page *node_page, unsigned int offset)
+{
+ return le32_to_cpu(*(get_dnode_addr(inode, node_page) + offset));
}
static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
@@ -3038,10 +3083,8 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
return;
fallthrough;
case FI_DATA_EXIST:
- case FI_INLINE_DOTS:
case FI_PIN_FILE:
case FI_COMPRESS_RELEASED:
- case FI_ATOMIC_COMMITTED:
f2fs_mark_inode_dirty_sync(inode, true);
}
}
@@ -3163,8 +3206,6 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
set_bit(FI_INLINE_DENTRY, fi->flags);
if (ri->i_inline & F2FS_DATA_EXIST)
set_bit(FI_DATA_EXIST, fi->flags);
- if (ri->i_inline & F2FS_INLINE_DOTS)
- set_bit(FI_INLINE_DOTS, fi->flags);
if (ri->i_inline & F2FS_EXTRA_ATTR)
set_bit(FI_EXTRA_ATTR, fi->flags);
if (ri->i_inline & F2FS_PIN_FILE)
@@ -3185,8 +3226,6 @@ static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
ri->i_inline |= F2FS_INLINE_DENTRY;
if (is_inode_flag_set(inode, FI_DATA_EXIST))
ri->i_inline |= F2FS_DATA_EXIST;
- if (is_inode_flag_set(inode, FI_INLINE_DOTS))
- ri->i_inline |= F2FS_INLINE_DOTS;
if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
ri->i_inline |= F2FS_EXTRA_ATTR;
if (is_inode_flag_set(inode, FI_PIN_FILE))
@@ -3267,11 +3306,6 @@ static inline int f2fs_exist_data(struct inode *inode)
return is_inode_flag_set(inode, FI_DATA_EXIST);
}
-static inline int f2fs_has_inline_dots(struct inode *inode)
-{
- return is_inode_flag_set(inode, FI_INLINE_DOTS);
-}
-
static inline int f2fs_is_mmap_file(struct inode *inode)
{
return is_inode_flag_set(inode, FI_MMAP_FILE);
@@ -3292,8 +3326,6 @@ static inline bool f2fs_is_cow_file(struct inode *inode)
return is_inode_flag_set(inode, FI_COW_FILE);
}
-static inline __le32 *get_dnode_addr(struct inode *inode,
- struct page *node_page);
static inline void *inline_data_addr(struct inode *inode, struct page *page)
{
__le32 *addr = get_dnode_addr(inode, page);
@@ -3432,17 +3464,6 @@ static inline int get_inline_xattr_addrs(struct inode *inode)
return F2FS_I(inode)->i_inline_xattr_size;
}
-static inline __le32 *get_dnode_addr(struct inode *inode,
- struct page *node_page)
-{
- int base = 0;
-
- if (IS_INODE(node_page) && f2fs_has_extra_attr(inode))
- base = get_extra_isize(inode);
-
- return blkaddr_in_node(F2FS_NODE(node_page)) + base;
-}
-
#define f2fs_get_inode_mode(i) \
((is_inode_flag_set(i, FI_ACL_MODE)) ? \
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
@@ -3495,7 +3516,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
- bool readonly);
+ bool readonly, bool need_lock);
int f2fs_precache_extents(struct inode *inode);
int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
int f2fs_fileattr_set(struct mnt_idmap *idmap,
@@ -3719,7 +3740,7 @@ bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
block_t blk_addr);
-void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio,
enum iostat_type io_type);
void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
void f2fs_outplace_write_data(struct dnode_of_data *dn,
@@ -3759,8 +3780,7 @@ void f2fs_destroy_segment_manager_caches(void);
int f2fs_rw_hint_to_seg_type(struct f2fs_sb_info *sbi, enum rw_hint hint);
enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
enum page_type type, enum temp_type temp);
-unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
- unsigned int segno);
+unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi);
unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
unsigned int segno);
@@ -3868,7 +3888,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
-int f2fs_write_single_data_page(struct page *page, int *submitted,
+int f2fs_write_single_data_page(struct folio *folio, int *submitted,
struct bio **bio, sector_t *last_block,
struct writeback_control *wbc,
enum iostat_type io_type,
@@ -3877,7 +3897,7 @@ void f2fs_write_failed(struct inode *inode, loff_t to);
void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
bool f2fs_release_folio(struct folio *folio, gfp_t wait);
bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
-void f2fs_clear_page_cache_dirty_tag(struct page *page);
+void f2fs_clear_page_cache_dirty_tag(struct folio *folio);
int f2fs_init_post_read_processing(void);
void f2fs_destroy_post_read_processing(void);
int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
@@ -3901,7 +3921,7 @@ void f2fs_destroy_garbage_collection_cache(void);
/* victim selection function for cleaning and SSR */
int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
int gc_type, int type, char alloc_mode,
- unsigned long long age);
+ unsigned long long age, bool one_time);
/*
* recovery.c
@@ -3987,7 +4007,7 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
#define stat_inc_cp_call_count(sbi, foreground) \
atomic_inc(&sbi->cp_call_count[(foreground)])
-#define stat_inc_cp_count(si) (F2FS_STAT(sbi)->cp_count++)
+#define stat_inc_cp_count(sbi) (F2FS_STAT(sbi)->cp_count++)
#define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
#define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
#define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
@@ -4172,7 +4192,7 @@ int f2fs_read_inline_data(struct inode *inode, struct folio *folio);
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
int f2fs_convert_inline_inode(struct inode *inode);
int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
-int f2fs_write_inline_data(struct inode *inode, struct page *page);
+int f2fs_write_inline_data(struct inode *inode, struct folio *folio);
int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
const struct f2fs_filename *fname,
@@ -4289,6 +4309,11 @@ static inline bool f2fs_meta_inode_gc_required(struct inode *inode)
* compress.c
*/
#ifdef CONFIG_F2FS_FS_COMPRESSION
+enum cluster_check_type {
+ CLUSTER_IS_COMPR, /* check only if compressed cluster */
+ CLUSTER_COMPR_BLKS, /* return # of compressed blocks in a cluster */
+ CLUSTER_RAW_BLKS /* return # of raw blocks in a cluster */
+};
bool f2fs_is_compressed_page(struct page *page);
struct page *f2fs_compress_control_page(struct page *page);
int f2fs_prepare_compress_overwrite(struct inode *inode,
@@ -4309,12 +4334,13 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
int index, int nr_pages, bool uptodate);
bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
-void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
+void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio);
int f2fs_write_multi_pages(struct compress_ctx *cc,
int *submitted,
struct writeback_control *wbc,
enum iostat_type io_type);
int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
+bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index);
void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
pgoff_t fofs, block_t blkaddr,
unsigned int llen, unsigned int c_len);
@@ -4401,6 +4427,12 @@ static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
nid_t ino) { }
#define inc_compr_inode_stat(inode) do { } while (0)
+static inline int f2fs_is_compressed_cluster(
+ struct inode *inode,
+ pgoff_t index) { return 0; }
+static inline bool f2fs_is_sparse_cluster(
+ struct inode *inode,
+ pgoff_t index) { return true; }
static inline void f2fs_update_read_extent_tree_range_compressed(
struct inode *inode,
pgoff_t fofs, block_t blkaddr,
@@ -4653,9 +4685,11 @@ static inline void f2fs_io_schedule_timeout(long timeout)
io_schedule_timeout(timeout);
}
-static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi, pgoff_t ofs,
- enum page_type type)
+static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi,
+ struct folio *folio, enum page_type type)
{
+ pgoff_t ofs = folio->index;
+
if (unlikely(f2fs_cp_error(sbi)))
return;
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 903337f8d21a..9ae54c4c72fe 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -8,7 +8,6 @@
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/stat.h>
-#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/falloc.h>
@@ -54,7 +53,7 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
{
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct inode *inode = file_inode(vmf->vma->vm_file);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
@@ -86,7 +85,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
- int ret = f2fs_is_compressed_cluster(inode, page->index);
+ int ret = f2fs_is_compressed_cluster(inode, folio->index);
if (ret < 0) {
err = ret;
@@ -106,11 +105,11 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
file_update_time(vmf->vma->vm_file);
filemap_invalidate_lock_shared(inode->i_mapping);
- lock_page(page);
- if (unlikely(page->mapping != inode->i_mapping ||
- page_offset(page) > i_size_read(inode) ||
- !PageUptodate(page))) {
- unlock_page(page);
+ folio_lock(folio);
+ if (unlikely(folio->mapping != inode->i_mapping ||
+ folio_pos(folio) > i_size_read(inode) ||
+ !folio_test_uptodate(folio))) {
+ folio_unlock(folio);
err = -EFAULT;
goto out_sem;
}
@@ -118,9 +117,9 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
set_new_dnode(&dn, inode, NULL, NULL, 0);
if (need_alloc) {
/* block allocation */
- err = f2fs_get_block_locked(&dn, page->index);
+ err = f2fs_get_block_locked(&dn, folio->index);
} else {
- err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
+ err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE);
f2fs_put_dnode(&dn);
if (f2fs_is_pinned_file(inode) &&
!__is_valid_data_blkaddr(dn.data_blkaddr))
@@ -128,11 +127,11 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
}
if (err) {
- unlock_page(page);
+ folio_unlock(folio);
goto out_sem;
}
- f2fs_wait_on_page_writeback(page, DATA, false, true);
+ f2fs_wait_on_page_writeback(folio_page(folio, 0), DATA, false, true);
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
@@ -140,18 +139,18 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
/*
* check to see if the page is mapped already (no holes)
*/
- if (PageMappedToDisk(page))
+ if (folio_test_mappedtodisk(folio))
goto out_sem;
/* page is wholly or partially inside EOF */
- if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
+ if (((loff_t)(folio->index + 1) << PAGE_SHIFT) >
i_size_read(inode)) {
loff_t offset;
offset = i_size_read(inode) & ~PAGE_MASK;
- zero_user_segment(page, offset, PAGE_SIZE);
+ folio_zero_segment(folio, offset, folio_size(folio));
}
- set_page_dirty(page);
+ folio_mark_dirty(folio);
f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
f2fs_update_time(sbi, REQ_TIME);
@@ -163,7 +162,7 @@ out_sem:
out:
ret = vmf_fs_error(err);
- trace_f2fs_vm_page_mkwrite(inode, page->index, vmf->vma->vm_flags, ret);
+ trace_f2fs_vm_page_mkwrite(inode, folio->index, vmf->vma->vm_flags, ret);
return ret;
}
@@ -218,6 +217,9 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
TRANS_DIR_INO))
cp_reason = CP_RECOVER_DIR;
+ else if (f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
+ XATTR_DIR_INO))
+ cp_reason = CP_XATTR_DIR;
return cp_reason;
}
@@ -373,8 +375,7 @@ sync_nodes:
f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
clear_inode_flag(inode, FI_APPEND_WRITE);
flush_out:
- if ((!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ||
- (atomic && !test_opt(sbi, NOBARRIER) && f2fs_sb_has_blkzoned(sbi)))
+ if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
ret = f2fs_issue_flush(sbi, inode->i_ino);
if (!ret) {
f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
@@ -431,7 +432,7 @@ static bool __found_offset(struct address_space *mapping,
static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
- loff_t maxbytes = inode->i_sb->s_maxbytes;
+ loff_t maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
struct dnode_of_data dn;
pgoff_t pgofs, end_offset;
loff_t data_ofs = offset;
@@ -513,10 +514,7 @@ fail:
static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
- loff_t maxbytes = inode->i_sb->s_maxbytes;
-
- if (f2fs_compressed_file(inode))
- maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
+ loff_t maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
switch (whence) {
case SEEK_SET:
@@ -1052,6 +1050,13 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
return err;
}
+ /*
+ * wait for inflight dio, blocks should be removed after
+ * IO completion.
+ */
+ if (attr->ia_size < old_size)
+ inode_dio_wait(inode);
+
f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
@@ -1888,6 +1893,12 @@ static long f2fs_fallocate(struct file *file, int mode,
if (ret)
goto out;
+ /*
+ * wait for inflight dio, blocks should be removed after IO
+ * completion.
+ */
+ inode_dio_wait(inode);
+
if (mode & FALLOC_FL_PUNCH_HOLE) {
if (offset >= inode->i_size)
goto out;
@@ -2116,10 +2127,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
struct mnt_idmap *idmap = file_mnt_idmap(filp);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct inode *pinode;
loff_t isize;
int ret;
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
if (!inode_owner_or_capable(idmap, inode))
return -EACCES;
@@ -2149,6 +2162,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
goto out;
f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&fi->i_gc_rwsem[READ]);
/*
* Should wait end_io to count F2FS_WB_CP_DATA correctly by
@@ -2158,27 +2172,18 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u",
inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
- if (ret) {
- f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
- goto out;
- }
+ if (ret)
+ goto out_unlock;
/* Check if the inode already has a COW inode */
if (fi->cow_inode == NULL) {
/* Create a COW inode for atomic write */
- pinode = f2fs_iget(inode->i_sb, fi->i_pino);
- if (IS_ERR(pinode)) {
- f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
- ret = PTR_ERR(pinode);
- goto out;
- }
+ struct dentry *dentry = file_dentry(filp);
+ struct inode *dir = d_inode(dentry->d_parent);
- ret = f2fs_get_tmpfile(idmap, pinode, &fi->cow_inode);
- iput(pinode);
- if (ret) {
- f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
- goto out;
- }
+ ret = f2fs_get_tmpfile(idmap, dir, &fi->cow_inode);
+ if (ret)
+ goto out_unlock;
set_inode_flag(fi->cow_inode, FI_COW_FILE);
clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
@@ -2187,11 +2192,13 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
F2FS_I(fi->cow_inode)->atomic_inode = inode;
} else {
/* Reuse the already created COW inode */
+ f2fs_bug_on(sbi, get_dirty_pages(fi->cow_inode));
+
+ invalidate_mapping_pages(fi->cow_inode->i_mapping, 0, -1);
+
ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
- if (ret) {
- f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
- goto out;
- }
+ if (ret)
+ goto out_unlock;
}
f2fs_write_inode(inode, NULL);
@@ -2210,7 +2217,11 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
}
f2fs_i_size_write(fi->cow_inode, isize);
+out_unlock:
+ f2fs_up_write(&fi->i_gc_rwsem[READ]);
f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+ if (ret)
+ goto out;
f2fs_update_time(sbi, REQ_TIME);
fi->atomic_write_task = current;
@@ -2228,6 +2239,9 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
struct mnt_idmap *idmap = file_mnt_idmap(filp);
int ret;
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
if (!inode_owner_or_capable(idmap, inode))
return -EACCES;
@@ -2260,6 +2274,9 @@ static int f2fs_ioc_abort_atomic_write(struct file *filp)
struct mnt_idmap *idmap = file_mnt_idmap(filp);
int ret;
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
if (!inode_owner_or_capable(idmap, inode))
return -EACCES;
@@ -2279,7 +2296,7 @@ static int f2fs_ioc_abort_atomic_write(struct file *filp)
}
int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
- bool readonly)
+ bool readonly, bool need_lock)
{
struct super_block *sb = sbi->sb;
int ret = 0;
@@ -2326,12 +2343,19 @@ int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
if (readonly)
goto out;
+ /* grab sb->s_umount to avoid racing w/ remount() */
+ if (need_lock)
+ down_read(&sbi->sb->s_umount);
+
f2fs_stop_gc_thread(sbi);
f2fs_stop_discard_thread(sbi);
f2fs_drop_discard_cmd(sbi);
clear_opt(sbi, DISCARD);
+ if (need_lock)
+ up_read(&sbi->sb->s_umount);
+
f2fs_update_time(sbi, REQ_TIME);
out:
@@ -2368,7 +2392,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
}
}
- ret = f2fs_do_shutdown(sbi, in, readonly);
+ ret = f2fs_do_shutdown(sbi, in, readonly, true);
if (need_drop)
mnt_drop_write_file(filp);
@@ -2686,7 +2710,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
(range->start + range->len) >> PAGE_SHIFT,
DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) ||
+ f2fs_is_atomic_file(inode)) {
err = -EINVAL;
goto unlock_out;
}
@@ -2710,7 +2735,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
* block addresses are continuous.
*/
if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
- if (ei.fofs + ei.len >= pg_end)
+ if ((pgoff_t)ei.fofs + ei.len >= pg_end)
goto out;
}
@@ -2793,6 +2818,8 @@ do_map:
goto clear_out;
}
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
+
set_page_dirty(page);
set_page_private_gcing(page);
f2fs_put_page(page, 1);
@@ -2917,6 +2944,11 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
goto out_unlock;
}
+ if (f2fs_is_atomic_file(src) || f2fs_is_atomic_file(dst)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
ret = -EINVAL;
if (pos_in + len > src->i_size || pos_in + len < pos_in)
goto out_unlock;
@@ -2968,9 +3000,9 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
}
f2fs_lock_op(sbi);
- ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
- pos_out >> F2FS_BLKSIZE_BITS,
- len >> F2FS_BLKSIZE_BITS, false);
+ ret = __exchange_data_block(src, dst, F2FS_BYTES_TO_BLK(pos_in),
+ F2FS_BYTES_TO_BLK(pos_out),
+ F2FS_BYTES_TO_BLK(len), false);
if (!ret) {
if (dst_max_i_size)
@@ -3300,6 +3332,11 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
inode_lock(inode);
+ if (f2fs_is_atomic_file(inode)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (!pin) {
clear_inode_flag(inode, FI_PIN_FILE);
f2fs_i_gc_failures_write(inode, 0);
@@ -4193,6 +4230,8 @@ static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
/* It will never fail, when page has pinned above */
f2fs_bug_on(F2FS_I_SB(inode), !page);
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
+
set_page_dirty(page);
set_page_private_gcing(page);
f2fs_put_page(page, 1);
@@ -4207,9 +4246,8 @@ static int f2fs_ioc_decompress_file(struct file *filp)
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
- pgoff_t page_idx = 0, last_idx;
- int cluster_size = fi->i_cluster_size;
- int count, ret;
+ pgoff_t page_idx = 0, last_idx, cluster_idx;
+ int ret;
if (!f2fs_sb_has_compression(sbi) ||
F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
@@ -4244,10 +4282,15 @@ static int f2fs_ioc_decompress_file(struct file *filp)
goto out;
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ last_idx >>= fi->i_log_cluster_size;
- count = last_idx - page_idx;
- while (count && count >= cluster_size) {
- ret = redirty_blocks(inode, page_idx, cluster_size);
+ for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) {
+ page_idx = cluster_idx << fi->i_log_cluster_size;
+
+ if (!f2fs_is_compressed_cluster(inode, page_idx))
+ continue;
+
+ ret = redirty_blocks(inode, page_idx, fi->i_cluster_size);
if (ret < 0)
break;
@@ -4257,9 +4300,6 @@ static int f2fs_ioc_decompress_file(struct file *filp)
break;
}
- count -= cluster_size;
- page_idx += cluster_size;
-
cond_resched();
if (fatal_signal_pending(current)) {
ret = -EINTR;
@@ -4286,9 +4326,9 @@ static int f2fs_ioc_compress_file(struct file *filp)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- pgoff_t page_idx = 0, last_idx;
- int cluster_size = F2FS_I(inode)->i_cluster_size;
- int count, ret;
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ pgoff_t page_idx = 0, last_idx, cluster_idx;
+ int ret;
if (!f2fs_sb_has_compression(sbi) ||
F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
@@ -4322,10 +4362,15 @@ static int f2fs_ioc_compress_file(struct file *filp)
set_inode_flag(inode, FI_ENABLE_COMPRESS);
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ last_idx >>= fi->i_log_cluster_size;
+
+ for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) {
+ page_idx = cluster_idx << fi->i_log_cluster_size;
+
+ if (f2fs_is_sparse_cluster(inode, page_idx))
+ continue;
- count = last_idx - page_idx;
- while (count && count >= cluster_size) {
- ret = redirty_blocks(inode, page_idx, cluster_size);
+ ret = redirty_blocks(inode, page_idx, fi->i_cluster_size);
if (ret < 0)
break;
@@ -4335,9 +4380,6 @@ static int f2fs_ioc_compress_file(struct file *filp)
break;
}
- count -= cluster_size;
- page_idx += cluster_size;
-
cond_resched();
if (fatal_signal_pending(current)) {
ret = -EINTR;
@@ -4538,6 +4580,13 @@ static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
f2fs_down_read(&fi->i_gc_rwsem[READ]);
}
+ /* dio is not compatible w/ atomic file */
+ if (f2fs_is_atomic_file(inode)) {
+ f2fs_up_read(&fi->i_gc_rwsem[READ]);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
/*
* We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
* the higher-level function iomap_dio_rw() in order to ensure that the
@@ -4597,6 +4646,10 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
iov_iter_count(to), READ);
+ /* In LFS mode, if there is inflight dio, wait for its completion */
+ if (f2fs_lfs_mode(F2FS_I_SB(inode)))
+ inode_dio_wait(inode);
+
if (f2fs_should_use_dio(inode, iocb, to)) {
ret = f2fs_dio_read_iter(iocb, to);
} else {
@@ -4949,6 +5002,12 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
/* Determine whether we will do a direct write or a buffered write. */
dio = f2fs_should_use_dio(inode, iocb, from);
+ /* dio is not compatible w/ atomic write */
+ if (dio && f2fs_is_atomic_file(inode)) {
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+
/* Possibly preallocate the blocks for the write. */
target_size = iocb->ki_pos + iov_iter_count(from);
preallocated = f2fs_preallocate_blocks(iocb, from, dio);
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 724bbcb447d3..9322a7200e31 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -81,6 +81,8 @@ static int gc_thread_func(void *data)
continue;
}
+ gc_control.one_time = false;
+
/*
* [GC triggering condition]
* 0. GC is not conducted currently.
@@ -116,15 +118,30 @@ static int gc_thread_func(void *data)
goto next;
}
- if (has_enough_invalid_blocks(sbi))
+ if (f2fs_sb_has_blkzoned(sbi)) {
+ if (has_enough_free_blocks(sbi,
+ gc_th->no_zoned_gc_percent)) {
+ wait_ms = gc_th->no_gc_sleep_time;
+ f2fs_up_write(&sbi->gc_lock);
+ goto next;
+ }
+ if (wait_ms == gc_th->no_gc_sleep_time)
+ wait_ms = gc_th->max_sleep_time;
+ }
+
+ if (need_to_boost_gc(sbi)) {
decrease_sleep_time(gc_th, &wait_ms);
- else
+ if (f2fs_sb_has_blkzoned(sbi))
+ gc_control.one_time = true;
+ } else {
increase_sleep_time(gc_th, &wait_ms);
+ }
do_gc:
stat_inc_gc_call_count(sbi, foreground ?
FOREGROUND : BACKGROUND);
- sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
+ sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) ||
+ gc_control.one_time;
/* foreground GC was been triggered via f2fs_balance_fs() */
if (foreground)
@@ -179,9 +196,21 @@ int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
return -ENOMEM;
gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
- gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
- gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
- gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
+ gc_th->valid_thresh_ratio = DEF_GC_THREAD_VALID_THRESH_RATIO;
+
+ if (f2fs_sb_has_blkzoned(sbi)) {
+ gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED;
+ gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED;
+ gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED;
+ gc_th->no_zoned_gc_percent = LIMIT_NO_ZONED_GC;
+ gc_th->boost_zoned_gc_percent = LIMIT_BOOST_ZONED_GC;
+ } else {
+ gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
+ gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
+ gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
+ gc_th->no_zoned_gc_percent = 0;
+ gc_th->boost_zoned_gc_percent = 0;
+ }
gc_th->gc_wake = false;
@@ -339,7 +368,7 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
unsigned char age = 0;
unsigned char u;
unsigned int i;
- unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
+ unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi);
for (i = 0; i < usable_segs_per_sec; i++)
mtime += get_seg_entry(sbi, start + i)->mtime;
@@ -368,6 +397,11 @@ static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
if (p->alloc_mode == SSR)
return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+ if (p->one_time_gc && (get_valid_blocks(sbi, segno, true) >=
+ CAP_BLKS_PER_SEC(sbi) * sbi->gc_thread->valid_thresh_ratio /
+ 100))
+ return UINT_MAX;
+
/* alloc_mode == LFS */
if (p->gc_mode == GC_GREEDY)
return get_valid_blocks(sbi, segno, true);
@@ -742,7 +776,7 @@ static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
*/
int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
int gc_type, int type, char alloc_mode,
- unsigned long long age)
+ unsigned long long age, bool one_time)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct sit_info *sm = SIT_I(sbi);
@@ -759,6 +793,7 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
p.alloc_mode = alloc_mode;
p.age = age;
p.age_threshold = sbi->am.age_threshold;
+ p.one_time_gc = one_time;
retry:
select_policy(sbi, gc_type, type, &p);
@@ -1670,13 +1705,14 @@ next_step:
}
static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
- int gc_type)
+ int gc_type, bool one_time)
{
struct sit_info *sit_i = SIT_I(sbi);
int ret;
down_write(&sit_i->sentry_lock);
- ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, LFS, 0);
+ ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE,
+ LFS, 0, one_time);
up_write(&sit_i->sentry_lock);
return ret;
}
@@ -1684,30 +1720,49 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
static int do_garbage_collect(struct f2fs_sb_info *sbi,
unsigned int start_segno,
struct gc_inode_list *gc_list, int gc_type,
- bool force_migrate)
+ bool force_migrate, bool one_time)
{
struct page *sum_page;
struct f2fs_summary_block *sum;
struct blk_plug plug;
unsigned int segno = start_segno;
unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
+ unsigned int sec_end_segno;
int seg_freed = 0, migrated = 0;
unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
SUM_TYPE_DATA : SUM_TYPE_NODE;
unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
int submitted = 0;
- if (__is_large_section(sbi))
- end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
+ if (__is_large_section(sbi)) {
+ sec_end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
- /*
- * zone-capacity can be less than zone-size in zoned devices,
- * resulting in less than expected usable segments in the zone,
- * calculate the end segno in the zone which can be garbage collected
- */
- if (f2fs_sb_has_blkzoned(sbi))
- end_segno -= SEGS_PER_SEC(sbi) -
- f2fs_usable_segs_in_sec(sbi, segno);
+ /*
+ * zone-capacity can be less than zone-size in zoned devices,
+ * resulting in less than expected usable segments in the zone,
+ * calculate the end segno in the zone which can be garbage
+ * collected
+ */
+ if (f2fs_sb_has_blkzoned(sbi))
+ sec_end_segno -= SEGS_PER_SEC(sbi) -
+ f2fs_usable_segs_in_sec(sbi);
+
+ if (gc_type == BG_GC || one_time) {
+ unsigned int window_granularity =
+ sbi->migration_window_granularity;
+
+ if (f2fs_sb_has_blkzoned(sbi) &&
+ !has_enough_free_blocks(sbi,
+ sbi->gc_thread->boost_zoned_gc_percent))
+ window_granularity *=
+ BOOST_GC_MULTIPLE;
+
+ end_segno = start_segno + window_granularity;
+ }
+
+ if (end_segno > sec_end_segno)
+ end_segno = sec_end_segno;
+ }
sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
@@ -1786,7 +1841,8 @@ freed:
if (__is_large_section(sbi))
sbi->next_victim_seg[gc_type] =
- (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
+ (segno + 1 < sec_end_segno) ?
+ segno + 1 : NULL_SEGNO;
skip:
f2fs_put_page(sum_page, 0);
}
@@ -1863,7 +1919,7 @@ gc_more:
goto stop;
}
retry:
- ret = __get_victim(sbi, &segno, gc_type);
+ ret = __get_victim(sbi, &segno, gc_type, gc_control->one_time);
if (ret) {
/* allow to search victim from sections has pinned data */
if (ret == -ENODATA && gc_type == FG_GC &&
@@ -1875,17 +1931,21 @@ retry:
}
seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
- gc_control->should_migrate_blocks);
+ gc_control->should_migrate_blocks,
+ gc_control->one_time);
if (seg_freed < 0)
goto stop;
total_freed += seg_freed;
- if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) {
+ if (seg_freed == f2fs_usable_segs_in_sec(sbi)) {
sec_freed++;
total_sec_freed++;
}
+ if (gc_control->one_time)
+ goto stop;
+
if (gc_type == FG_GC) {
sbi->cur_victim_sec = NULL_SEGNO;
@@ -2010,8 +2070,7 @@ int f2fs_gc_range(struct f2fs_sb_info *sbi,
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
};
- do_garbage_collect(sbi, segno, &gc_list, FG_GC,
- dry_run_sections == 0);
+ do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false);
put_gc_inode(&gc_list);
if (!dry_run && get_valid_blocks(sbi, segno, true))
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index a8ea3301b815..2914b678bf8f 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -15,16 +15,27 @@
#define DEF_GC_THREAD_MAX_SLEEP_TIME 60000
#define DEF_GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */
+/* GC sleep parameters for zoned deivces */
+#define DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED 10
+#define DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED 20
+#define DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED 60000
+
/* choose candidates from sections which has age of more than 7 days */
#define DEF_GC_THREAD_AGE_THRESHOLD (60 * 60 * 24 * 7)
#define DEF_GC_THREAD_CANDIDATE_RATIO 20 /* select 20% oldest sections as candidates */
#define DEF_GC_THREAD_MAX_CANDIDATE_COUNT 10 /* select at most 10 sections as candidates */
#define DEF_GC_THREAD_AGE_WEIGHT 60 /* age weight */
+#define DEF_GC_THREAD_VALID_THRESH_RATIO 95 /* do not GC over 95% valid block ratio for one time GC */
#define DEFAULT_ACCURACY_CLASS 10000 /* accuracy class */
#define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
#define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
+#define LIMIT_NO_ZONED_GC 60 /* percentage over total user space of no gc for zoned devices */
+#define LIMIT_BOOST_ZONED_GC 25 /* percentage over total user space of boosted gc for zoned devices */
+#define DEF_MIGRATION_WINDOW_GRANULARITY_ZONED 3
+#define BOOST_GC_MULTIPLE 5
+
#define DEF_GC_FAILED_PINNED_FILES 2048
#define MAX_GC_FAILED_PINNED_FILES USHRT_MAX
@@ -51,6 +62,11 @@ struct f2fs_gc_kthread {
* caller of f2fs_balance_fs()
* will wait on this wait queue.
*/
+
+ /* for gc control for zoned devices */
+ unsigned int no_zoned_gc_percent;
+ unsigned int boost_zoned_gc_percent;
+ unsigned int valid_thresh_ratio;
};
struct gc_inode_list {
@@ -152,6 +168,12 @@ static inline void decrease_sleep_time(struct f2fs_gc_kthread *gc_th,
*wait -= min_time;
}
+static inline bool has_enough_free_blocks(struct f2fs_sb_info *sbi,
+ unsigned int limit_perc)
+{
+ return free_sections(sbi) > ((sbi->total_sections * limit_perc) / 100);
+}
+
static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
{
block_t user_block_count = sbi->user_block_count;
@@ -167,3 +189,10 @@ static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
free_user_blocks(sbi) <
limit_free_user_blocks(invalid_user_blocks));
}
+
+static inline bool need_to_boost_gc(struct f2fs_sb_info *sbi)
+{
+ if (f2fs_sb_has_blkzoned(sbi))
+ return !has_enough_free_blocks(sbi, LIMIT_BOOST_ZONED_GC);
+ return has_enough_invalid_blocks(sbi);
+}
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index cca7d448e55c..005babf1bed1 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -260,35 +260,34 @@ out:
return err;
}
-int f2fs_write_inline_data(struct inode *inode, struct page *page)
+int f2fs_write_inline_data(struct inode *inode, struct folio *folio)
{
- struct dnode_of_data dn;
- int err;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct page *ipage;
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
- if (err)
- return err;
+ ipage = f2fs_get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(ipage))
+ return PTR_ERR(ipage);
if (!f2fs_has_inline_data(inode)) {
- f2fs_put_dnode(&dn);
+ f2fs_put_page(ipage, 1);
return -EAGAIN;
}
- f2fs_bug_on(F2FS_I_SB(inode), page->index);
+ f2fs_bug_on(F2FS_I_SB(inode), folio->index);
- f2fs_wait_on_page_writeback(dn.inode_page, NODE, true, true);
- memcpy_from_page(inline_data_addr(inode, dn.inode_page),
- page, 0, MAX_INLINE_DATA(inode));
- set_page_dirty(dn.inode_page);
+ f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ memcpy_from_folio(inline_data_addr(inode, ipage),
+ folio, 0, MAX_INLINE_DATA(inode));
+ set_page_dirty(ipage);
- f2fs_clear_page_cache_dirty_tag(page);
+ f2fs_clear_page_cache_dirty_tag(folio);
set_inode_flag(inode, FI_APPEND_WRITE);
set_inode_flag(inode, FI_DATA_EXIST);
- clear_page_private_inline(dn.inode_page);
- f2fs_put_dnode(&dn);
+ clear_page_private_inline(ipage);
+ f2fs_put_page(ipage, 1);
return 0;
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index aef57172014f..1ed86df343a5 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -7,7 +7,6 @@
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
-#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/sched/mm.h>
#include <linux/lz4.h>
@@ -35,6 +34,11 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
if (f2fs_inode_dirtied(inode, sync))
return;
+ if (f2fs_is_atomic_file(inode)) {
+ set_inode_flag(inode, FI_ATOMIC_DIRTIED);
+ return;
+ }
+
mark_inode_dirty_sync(inode);
}
@@ -175,7 +179,8 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
if (provided != calculated)
f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
- page->index, ino_of_node(page), provided, calculated);
+ page_folio(page)->index, ino_of_node(page),
+ provided, calculated);
return provided == calculated;
}
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 38b4750475db..57d46e1439de 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -457,62 +457,6 @@ struct dentry *f2fs_get_parent(struct dentry *child)
return d_obtain_alias(f2fs_iget(child->d_sb, ino));
}
-static int __recover_dot_dentries(struct inode *dir, nid_t pino)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- struct qstr dot = QSTR_INIT(".", 1);
- struct f2fs_dir_entry *de;
- struct page *page;
- int err = 0;
-
- if (f2fs_readonly(sbi->sb)) {
- f2fs_info(sbi, "skip recovering inline_dots inode (ino:%lu, pino:%u) in readonly mountpoint",
- dir->i_ino, pino);
- return 0;
- }
-
- if (!S_ISDIR(dir->i_mode)) {
- f2fs_err(sbi, "inconsistent inode status, skip recovering inline_dots inode (ino:%lu, i_mode:%u, pino:%u)",
- dir->i_ino, dir->i_mode, pino);
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- return -ENOTDIR;
- }
-
- err = f2fs_dquot_initialize(dir);
- if (err)
- return err;
-
- f2fs_balance_fs(sbi, true);
-
- f2fs_lock_op(sbi);
-
- de = f2fs_find_entry(dir, &dot, &page);
- if (de) {
- f2fs_put_page(page, 0);
- } else if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto out;
- } else {
- err = f2fs_do_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR);
- if (err)
- goto out;
- }
-
- de = f2fs_find_entry(dir, &dotdot_name, &page);
- if (de)
- f2fs_put_page(page, 0);
- else if (IS_ERR(page))
- err = PTR_ERR(page);
- else
- err = f2fs_do_add_link(dir, &dotdot_name, NULL, pino, S_IFDIR);
-out:
- if (!err)
- clear_inode_flag(dir, FI_INLINE_DOTS);
-
- f2fs_unlock_op(sbi);
- return err;
-}
-
static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
@@ -522,7 +466,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
struct dentry *new;
nid_t ino = -1;
int err = 0;
- unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir));
struct f2fs_filename fname;
trace_f2fs_lookup_start(dir, dentry, flags);
@@ -558,17 +501,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
- if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) {
- err = __recover_dot_dentries(dir, root_ino);
- if (err)
- goto out_iput;
- }
-
- if (f2fs_has_inline_dots(inode)) {
- err = __recover_dot_dentries(inode, dir->i_ino);
- if (err)
- goto out_iput;
- }
if (IS_ENCRYPTED(dir) &&
(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
!fscrypt_has_permitted_context(dir, inode)) {
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index b72ef96f7e33..59b13ff243fa 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -20,7 +20,7 @@
#include "iostat.h"
#include <trace/events/f2fs.h>
-#define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
+#define on_f2fs_build_free_nids(nm_i) mutex_is_locked(&(nm_i)->build_lock)
static struct kmem_cache *nat_entry_slab;
static struct kmem_cache *free_nid_slab;
@@ -123,7 +123,7 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
static void clear_node_page_dirty(struct page *page)
{
if (PageDirty(page)) {
- f2fs_clear_page_cache_dirty_tag(page);
+ f2fs_clear_page_cache_dirty_tag(page_folio(page));
clear_page_dirty_for_io(page);
dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
}
@@ -919,7 +919,7 @@ static int truncate_node(struct dnode_of_data *dn)
clear_node_page_dirty(dn->node_page);
set_sbi_flag(sbi, SBI_IS_DIRTY);
- index = dn->node_page->index;
+ index = page_folio(dn->node_page)->index;
f2fs_put_page(dn->node_page, 1);
invalidate_mapping_pages(NODE_MAPPING(sbi),
@@ -1369,6 +1369,7 @@ fail:
*/
static int read_node_page(struct page *page, blk_opf_t op_flags)
{
+ struct folio *folio = page_folio(page);
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
struct node_info ni;
struct f2fs_io_info fio = {
@@ -1381,21 +1382,21 @@ static int read_node_page(struct page *page, blk_opf_t op_flags)
};
int err;
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
if (!f2fs_inode_chksum_verify(sbi, page)) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
return -EFSBADCRC;
}
return LOCKED_PAGE;
}
- err = f2fs_get_node_info(sbi, page->index, &ni, false);
+ err = f2fs_get_node_info(sbi, folio->index, &ni, false);
if (err)
return err;
/* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
return -ENOENT;
}
@@ -1492,7 +1493,7 @@ out_err:
out_put_err:
/* ENOENT comes from read_node_page which is not an error. */
if (err != -ENOENT)
- f2fs_handle_page_eio(sbi, page->index, NODE);
+ f2fs_handle_page_eio(sbi, page_folio(page), NODE);
f2fs_put_page(page, 1);
return ERR_PTR(err);
}
@@ -1535,7 +1536,7 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
if (!clear_page_dirty_for_io(page))
goto page_out;
- ret = f2fs_write_inline_data(inode, page);
+ ret = f2fs_write_inline_data(inode, page_folio(page));
inode_dec_dirty_pages(inode);
f2fs_remove_dirty_inode(inode);
if (ret)
@@ -1608,6 +1609,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
enum iostat_type io_type, unsigned int *seq_id)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ struct folio *folio = page_folio(page);
nid_t nid;
struct node_info ni;
struct f2fs_io_info fio = {
@@ -1624,15 +1626,15 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
};
unsigned int seq;
- trace_f2fs_writepage(page_folio(page), NODE);
+ trace_f2fs_writepage(folio, NODE);
if (unlikely(f2fs_cp_error(sbi))) {
/* keep node pages in remount-ro mode */
if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
goto redirty_out;
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
dec_page_count(sbi, F2FS_DIRTY_NODES);
- unlock_page(page);
+ folio_unlock(folio);
return 0;
}
@@ -1646,7 +1648,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
/* get old block addr of this node page */
nid = nid_of_node(page);
- f2fs_bug_on(sbi, page->index != nid);
+ f2fs_bug_on(sbi, folio->index != nid);
if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
goto redirty_out;
@@ -1660,10 +1662,10 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
/* This page is already truncated */
if (unlikely(ni.blk_addr == NULL_ADDR)) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
dec_page_count(sbi, F2FS_DIRTY_NODES);
f2fs_up_read(&sbi->node_write);
- unlock_page(page);
+ folio_unlock(folio);
return 0;
}
@@ -1674,7 +1676,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
goto redirty_out;
}
- if (atomic && !test_opt(sbi, NOBARRIER) && !f2fs_sb_has_blkzoned(sbi))
+ if (atomic && !test_opt(sbi, NOBARRIER))
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
/* should add to global list before clearing PAGECACHE status */
@@ -1684,7 +1686,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
*seq_id = seq;
}
- set_page_writeback(page);
+ folio_start_writeback(folio);
fio.old_blkaddr = ni.blk_addr;
f2fs_do_write_node_page(nid, &fio);
@@ -1697,7 +1699,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
submitted = NULL;
}
- unlock_page(page);
+ folio_unlock(folio);
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_submit_merged_write(sbi, NODE);
@@ -1711,7 +1713,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
return 0;
redirty_out:
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
return AOP_WRITEPAGE_ACTIVATE;
}
@@ -1867,7 +1869,7 @@ continue_unlock:
}
if (!ret && atomic && !marked) {
f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
- ino, last_page->index);
+ ino, page_folio(last_page)->index);
lock_page(last_page);
f2fs_wait_on_page_writeback(last_page, NODE, true, true);
set_page_dirty(last_page);
@@ -3166,7 +3168,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
nm_i->nat_bits = f2fs_kvzalloc(sbi,
- nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
+ F2FS_BLK_TO_BYTES(nm_i->nat_bits_blocks), GFP_KERNEL);
if (!nm_i->nat_bits)
return -ENOMEM;
@@ -3185,7 +3187,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
if (IS_ERR(page))
return PTR_ERR(page);
- memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
+ memcpy(nm_i->nat_bits + F2FS_BLK_TO_BYTES(i),
page_address(page), F2FS_BLKSIZE);
f2fs_put_page(page, 1);
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 78c3198a6308..1766254279d2 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -199,6 +199,10 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
clear_inode_flag(inode, FI_ATOMIC_REPLACE);
clear_inode_flag(inode, FI_ATOMIC_FILE);
+ if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
+ clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
stat_dec_atomic_inode(inode);
F2FS_I(inode)->atomic_write_task = NULL;
@@ -366,6 +370,10 @@ out:
} else {
sbi->committed_atomic_block += fi->atomic_write_cnt;
set_inode_flag(inode, FI_ATOMIC_COMMITTED);
+ if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
+ clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
}
__complete_revoke_list(inode, &revoke_list, ret ? true : false);
@@ -1282,6 +1290,13 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
wait_list, issued);
return 0;
}
+
+ /*
+ * Issue discard for conventional zones only if the device
+ * supports discard.
+ */
+ if (!bdev_max_discard_sectors(bdev))
+ return -EOPNOTSUPP;
}
#endif
@@ -2686,22 +2701,47 @@ static int get_new_segment(struct f2fs_sb_info *sbi,
goto got_it;
}
+#ifdef CONFIG_BLK_DEV_ZONED
/*
* If we format f2fs on zoned storage, let's try to get pinned sections
* from beginning of the storage, which should be a conventional one.
*/
if (f2fs_sb_has_blkzoned(sbi)) {
- segno = pinning ? 0 : max(first_zoned_segno(sbi), *newseg);
+ /* Prioritize writing to conventional zones */
+ if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_PRIOR_CONV || pinning)
+ segno = 0;
+ else
+ segno = max(first_zoned_segno(sbi), *newseg);
hint = GET_SEC_FROM_SEG(sbi, segno);
}
+#endif
find_other_zone:
secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (secno >= MAIN_SECS(sbi) && f2fs_sb_has_blkzoned(sbi)) {
+ /* Write only to sequential zones */
+ if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_ONLY_SEQ) {
+ hint = GET_SEC_FROM_SEG(sbi, first_zoned_segno(sbi));
+ secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
+ } else
+ secno = find_first_zero_bit(free_i->free_secmap,
+ MAIN_SECS(sbi));
+ if (secno >= MAIN_SECS(sbi)) {
+ ret = -ENOSPC;
+ f2fs_bug_on(sbi, 1);
+ goto out_unlock;
+ }
+ }
+#endif
+
if (secno >= MAIN_SECS(sbi)) {
secno = find_first_zero_bit(free_i->free_secmap,
MAIN_SECS(sbi));
if (secno >= MAIN_SECS(sbi)) {
ret = -ENOSPC;
+ f2fs_bug_on(sbi, 1);
goto out_unlock;
}
}
@@ -2743,10 +2783,8 @@ got_it:
out_unlock:
spin_unlock(&free_i->segmap_lock);
- if (ret == -ENOSPC) {
+ if (ret == -ENOSPC)
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT);
- f2fs_bug_on(sbi, 1);
- }
return ret;
}
@@ -3052,7 +3090,8 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
sanity_check_seg_type(sbi, seg_type);
/* f2fs_need_SSR() already forces to do this */
- if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
+ if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type,
+ alloc_mode, age, false)) {
curseg->next_segno = segno;
return 1;
}
@@ -3079,7 +3118,8 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
for (; cnt-- > 0; reversed ? i-- : i++) {
if (i == seg_type)
continue;
- if (!f2fs_get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
+ if (!f2fs_get_victim(sbi, &segno, BG_GC, i,
+ alloc_mode, age, false)) {
curseg->next_segno = segno;
return 1;
}
@@ -3522,7 +3562,8 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
if (file_is_cold(inode) || f2fs_need_compress_data(inode))
return CURSEG_COLD_DATA;
- type = __get_age_segment_type(inode, fio->page->index);
+ type = __get_age_segment_type(inode,
+ page_folio(fio->page)->index);
if (type != NO_CHECK_TYPE)
return type;
@@ -3781,7 +3822,7 @@ out:
f2fs_up_read(&fio->sbi->io_order_lock);
}
-void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio,
enum iostat_type io_type)
{
struct f2fs_io_info fio = {
@@ -3790,20 +3831,20 @@ void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
.temp = HOT,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
- .old_blkaddr = page->index,
- .new_blkaddr = page->index,
- .page = page,
+ .old_blkaddr = folio->index,
+ .new_blkaddr = folio->index,
+ .page = folio_page(folio, 0),
.encrypted_page = NULL,
.in_list = 0,
};
- if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
+ if (unlikely(folio->index >= MAIN_BLKADDR(sbi)))
fio.op_flags &= ~REQ_META;
- set_page_writeback(page);
+ folio_start_writeback(folio);
f2fs_submit_page_write(&fio);
- stat_inc_meta_count(sbi, page->index);
+ stat_inc_meta_count(sbi, folio->index);
f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE);
}
@@ -5381,8 +5422,7 @@ unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
return BLKS_PER_SEG(sbi);
}
-unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
- unsigned int segno)
+unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi)
{
if (f2fs_sb_has_blkzoned(sbi))
return CAP_SEGS_PER_SEC(sbi);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index bfc01a521cb9..71adb4a43bec 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -188,6 +188,7 @@ struct victim_sel_policy {
unsigned int min_segno; /* segment # having min. cost */
unsigned long long age; /* mtime of GCed section*/
unsigned long long age_threshold;/* age threshold */
+ bool one_time_gc; /* one time GC */
};
struct seg_entry {
@@ -430,7 +431,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
unsigned int next;
- unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
+ unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi);
spin_lock(&free_i->segmap_lock);
clear_bit(segno, free_i->free_segmap);
@@ -464,7 +465,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
unsigned int next;
- unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
+ unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi);
spin_lock(&free_i->segmap_lock);
if (test_and_clear_bit(segno, free_i->free_segmap)) {
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 176b5177c89d..87ab5696bd48 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -11,7 +11,6 @@
#include <linux/fs_context.h>
#include <linux/sched/mm.h>
#include <linux/statfs.h>
-#include <linux/buffer_head.h>
#include <linux/kthread.h>
#include <linux/parser.h>
#include <linux/mount.h>
@@ -707,6 +706,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
if (!strcmp(name, "on")) {
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
} else if (!strcmp(name, "off")) {
+ if (f2fs_sb_has_blkzoned(sbi)) {
+ f2fs_warn(sbi, "zoned devices need bggc");
+ kfree(name);
+ return -EINVAL;
+ }
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
} else if (!strcmp(name, "sync")) {
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
@@ -2561,7 +2565,7 @@ restore_opts:
static void f2fs_shutdown(struct super_block *sb)
{
- f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false);
+ f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false, false);
}
#ifdef CONFIG_QUOTA
@@ -3318,29 +3322,47 @@ loff_t max_file_blocks(struct inode *inode)
* fit within U32_MAX + 1 data units.
*/
- result = min(result, (((loff_t)U32_MAX + 1) * 4096) >> F2FS_BLKSIZE_BITS);
+ result = min(result, F2FS_BYTES_TO_BLK(((loff_t)U32_MAX + 1) * 4096));
return result;
}
-static int __f2fs_commit_super(struct buffer_head *bh,
- struct f2fs_super_block *super)
+static int __f2fs_commit_super(struct f2fs_sb_info *sbi, struct folio *folio,
+ pgoff_t index, bool update)
{
- lock_buffer(bh);
- if (super)
- memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
- set_buffer_dirty(bh);
- unlock_buffer(bh);
-
+ struct bio *bio;
/* it's rare case, we can do fua all the time */
- return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
+ blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA;
+ int ret;
+
+ folio_lock(folio);
+ folio_wait_writeback(folio);
+ if (update)
+ memcpy(F2FS_SUPER_BLOCK(folio, index), F2FS_RAW_SUPER(sbi),
+ sizeof(struct f2fs_super_block));
+ folio_mark_dirty(folio);
+ folio_clear_dirty_for_io(folio);
+ folio_start_writeback(folio);
+ folio_unlock(folio);
+
+ bio = bio_alloc(sbi->sb->s_bdev, 1, opf, GFP_NOFS);
+
+ /* it doesn't need to set crypto context for superblock update */
+ bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(folio_index(folio));
+
+ if (!bio_add_folio(bio, folio, folio_size(folio), 0))
+ f2fs_bug_on(sbi, 1);
+
+ ret = submit_bio_wait(bio);
+ folio_end_writeback(folio);
+
+ return ret;
}
static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
- struct buffer_head *bh)
+ struct folio *folio, pgoff_t index)
{
- struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
- (bh->b_data + F2FS_SUPER_OFFSET);
+ struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index);
struct super_block *sb = sbi->sb;
u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
@@ -3356,9 +3378,9 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
u32 segment_count = le32_to_cpu(raw_super->segment_count);
u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
u64 main_end_blkaddr = main_blkaddr +
- (segment_count_main << log_blocks_per_seg);
+ ((u64)segment_count_main << log_blocks_per_seg);
u64 seg_end_blkaddr = segment0_blkaddr +
- (segment_count << log_blocks_per_seg);
+ ((u64)segment_count << log_blocks_per_seg);
if (segment0_blkaddr != cp_blkaddr) {
f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
@@ -3415,7 +3437,7 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
res = "internally";
} else {
- err = __f2fs_commit_super(bh, NULL);
+ err = __f2fs_commit_super(sbi, folio, index, false);
res = err ? "failed" : "done";
}
f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
@@ -3428,12 +3450,11 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
}
static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
- struct buffer_head *bh)
+ struct folio *folio, pgoff_t index)
{
block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
block_t total_sections, blocks_per_seg;
- struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
- (bh->b_data + F2FS_SUPER_OFFSET);
+ struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index);
size_t crc_offset = 0;
__u32 crc = 0;
@@ -3591,7 +3612,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
}
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
- if (sanity_check_area_boundary(sbi, bh))
+ if (sanity_check_area_boundary(sbi, folio, index))
return -EFSCORRUPTED;
return 0;
@@ -3786,6 +3807,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
sbi->migration_granularity = SEGS_PER_SEC(sbi);
+ sbi->migration_window_granularity = f2fs_sb_has_blkzoned(sbi) ?
+ DEF_MIGRATION_WINDOW_GRANULARITY_ZONED : SEGS_PER_SEC(sbi);
sbi->seq_file_ra_mul = MIN_RA_MUL;
sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
@@ -3938,7 +3961,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
{
struct super_block *sb = sbi->sb;
int block;
- struct buffer_head *bh;
+ struct folio *folio;
struct f2fs_super_block *super;
int err = 0;
@@ -3947,32 +3970,32 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
return -ENOMEM;
for (block = 0; block < 2; block++) {
- bh = sb_bread(sb, block);
- if (!bh) {
+ folio = read_mapping_folio(sb->s_bdev->bd_mapping, block, NULL);
+ if (IS_ERR(folio)) {
f2fs_err(sbi, "Unable to read %dth superblock",
block + 1);
- err = -EIO;
+ err = PTR_ERR(folio);
*recovery = 1;
continue;
}
/* sanity checking of raw super */
- err = sanity_check_raw_super(sbi, bh);
+ err = sanity_check_raw_super(sbi, folio, block);
if (err) {
f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
block + 1);
- brelse(bh);
+ folio_put(folio);
*recovery = 1;
continue;
}
if (!*raw_super) {
- memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
+ memcpy(super, F2FS_SUPER_BLOCK(folio, block),
sizeof(*super));
*valid_super_block = block;
*raw_super = super;
}
- brelse(bh);
+ folio_put(folio);
}
/* No valid superblock */
@@ -3986,7 +4009,8 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
{
- struct buffer_head *bh;
+ struct folio *folio;
+ pgoff_t index;
__u32 crc = 0;
int err;
@@ -4004,22 +4028,24 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
}
/* write back-up superblock first */
- bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
- if (!bh)
- return -EIO;
- err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
- brelse(bh);
+ index = sbi->valid_super_block ? 0 : 1;
+ folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ err = __f2fs_commit_super(sbi, folio, index, true);
+ folio_put(folio);
/* if we are in recovery path, skip writing valid superblock */
if (recover || err)
return err;
/* write current valid superblock */
- bh = sb_bread(sbi->sb, sbi->valid_super_block);
- if (!bh)
- return -EIO;
- err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
- brelse(bh);
+ index = sbi->valid_super_block;
+ folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ err = __f2fs_commit_super(sbi, folio, index, true);
+ folio_put(folio);
return err;
}
@@ -4173,12 +4199,14 @@ void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
}
f2fs_warn(sbi, "Remounting filesystem read-only");
+
/*
- * Make sure updated value of ->s_mount_flags will be visible before
- * ->s_flags update
+ * We have already set CP_ERROR_FLAG flag to stop all updates
+ * to filesystem, so it doesn't need to set SB_RDONLY flag here
+ * because the flag should be set covered w/ sb->s_umount semaphore
+ * via remount procedure, otherwise, it will confuse code like
+ * freeze_super() which will lead to deadlocks and other problems.
*/
- smp_wmb();
- sb->s_flags |= SB_RDONLY;
}
static void f2fs_record_error_work(struct work_struct *work)
@@ -4219,6 +4247,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
sbi->aligned_blksize = true;
#ifdef CONFIG_BLK_DEV_ZONED
sbi->max_open_zones = UINT_MAX;
+ sbi->blkzone_alloc_policy = BLKZONE_ALLOC_PRIOR_SEQ;
#endif
for (i = 0; i < max_devices; i++) {
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index fee7ee45ceaa..c56e8c873935 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -170,6 +170,12 @@ static ssize_t undiscard_blks_show(struct f2fs_attr *a,
SM_I(sbi)->dcc_info->undiscard_blks);
}
+static ssize_t atgc_enabled_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", sbi->am.atgc_enabled ? 1 : 0);
+}
+
static ssize_t gc_mode_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
@@ -182,50 +188,50 @@ static ssize_t features_show(struct f2fs_attr *a,
int len = 0;
if (f2fs_sb_has_encrypt(sbi))
- len += scnprintf(buf, PAGE_SIZE - len, "%s",
+ len += sysfs_emit_at(buf, len, "%s",
"encryption");
if (f2fs_sb_has_blkzoned(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "blkzoned");
if (f2fs_sb_has_extra_attr(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "extra_attr");
if (f2fs_sb_has_project_quota(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "projquota");
if (f2fs_sb_has_inode_chksum(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "inode_checksum");
if (f2fs_sb_has_flexible_inline_xattr(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "flexible_inline_xattr");
if (f2fs_sb_has_quota_ino(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "quota_ino");
if (f2fs_sb_has_inode_crtime(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "inode_crtime");
if (f2fs_sb_has_lost_found(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "lost_found");
if (f2fs_sb_has_verity(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "verity");
if (f2fs_sb_has_sb_chksum(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "sb_checksum");
if (f2fs_sb_has_casefold(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "casefold");
if (f2fs_sb_has_readonly(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "readonly");
if (f2fs_sb_has_compression(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "compression");
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "pin_file");
- len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
return len;
}
@@ -323,17 +329,14 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
int hot_count = sbi->raw_super->hot_ext_count;
int len = 0, i;
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "cold file extension:\n");
+ len += sysfs_emit_at(buf, len, "cold file extension:\n");
for (i = 0; i < cold_count; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n",
- extlist[i]);
+ len += sysfs_emit_at(buf, len, "%s\n", extlist[i]);
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "hot file extension:\n");
+ len += sysfs_emit_at(buf, len, "hot file extension:\n");
for (i = cold_count; i < cold_count + hot_count; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n",
- extlist[i]);
+ len += sysfs_emit_at(buf, len, "%s\n", extlist[i]);
+
return len;
}
@@ -561,6 +564,11 @@ out:
return -EINVAL;
}
+ if (!strcmp(a->attr.name, "migration_window_granularity")) {
+ if (t == 0 || t > SEGS_PER_SEC(sbi))
+ return -EINVAL;
+ }
+
if (!strcmp(a->attr.name, "gc_urgent")) {
if (t == 0) {
sbi->gc_mode = GC_NORMAL;
@@ -627,6 +635,15 @@ out:
}
#endif
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (!strcmp(a->attr.name, "blkzone_alloc_policy")) {
+ if (t < BLKZONE_ALLOC_PRIOR_SEQ || t > BLKZONE_ALLOC_PRIOR_CONV)
+ return -EINVAL;
+ sbi->blkzone_alloc_policy = t;
+ return count;
+ }
+#endif
+
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (!strcmp(a->attr.name, "compr_written_block") ||
!strcmp(a->attr.name, "compr_saved_block")) {
@@ -775,7 +792,8 @@ out:
if (!strcmp(a->attr.name, "ipu_policy")) {
if (t >= BIT(F2FS_IPU_MAX))
return -EINVAL;
- if (t && f2fs_lfs_mode(sbi))
+ /* allow F2FS_IPU_NOCACHE only for IPU in the pinned file */
+ if (f2fs_lfs_mode(sbi) && (t & ~BIT(F2FS_IPU_NOCACHE)))
return -EINVAL;
SM_I(sbi)->ipu_policy = (unsigned int)t;
return count;
@@ -960,6 +978,9 @@ GC_THREAD_RW_ATTR(gc_urgent_sleep_time, urgent_sleep_time);
GC_THREAD_RW_ATTR(gc_min_sleep_time, min_sleep_time);
GC_THREAD_RW_ATTR(gc_max_sleep_time, max_sleep_time);
GC_THREAD_RW_ATTR(gc_no_gc_sleep_time, no_gc_sleep_time);
+GC_THREAD_RW_ATTR(gc_no_zoned_gc_percent, no_zoned_gc_percent);
+GC_THREAD_RW_ATTR(gc_boost_zoned_gc_percent, boost_zoned_gc_percent);
+GC_THREAD_RW_ATTR(gc_valid_thresh_ratio, valid_thresh_ratio);
/* SM_INFO ATTR */
SM_INFO_RW_ATTR(reclaim_segments, rec_prefree_segments);
@@ -969,6 +990,7 @@ SM_INFO_GENERAL_RW_ATTR(min_fsync_blocks);
SM_INFO_GENERAL_RW_ATTR(min_seq_blocks);
SM_INFO_GENERAL_RW_ATTR(min_hot_blocks);
SM_INFO_GENERAL_RW_ATTR(min_ssr_sections);
+SM_INFO_GENERAL_RW_ATTR(reserved_segments);
/* DCC_INFO ATTR */
DCC_INFO_RW_ATTR(max_small_discards, max_discards);
@@ -1001,6 +1023,7 @@ F2FS_SBI_RW_ATTR(gc_pin_file_thresh, gc_pin_file_threshold);
F2FS_SBI_RW_ATTR(gc_reclaimed_segments, gc_reclaimed_segs);
F2FS_SBI_GENERAL_RW_ATTR(max_victim_search);
F2FS_SBI_GENERAL_RW_ATTR(migration_granularity);
+F2FS_SBI_GENERAL_RW_ATTR(migration_window_granularity);
F2FS_SBI_GENERAL_RW_ATTR(dir_level);
#ifdef CONFIG_F2FS_IOSTAT
F2FS_SBI_GENERAL_RW_ATTR(iostat_enable);
@@ -1033,6 +1056,7 @@ F2FS_SBI_GENERAL_RW_ATTR(warm_data_age_threshold);
F2FS_SBI_GENERAL_RW_ATTR(last_age_weight);
#ifdef CONFIG_BLK_DEV_ZONED
F2FS_SBI_GENERAL_RO_ATTR(unusable_blocks_per_sec);
+F2FS_SBI_GENERAL_RW_ATTR(blkzone_alloc_policy);
#endif
/* STAT_INFO ATTR */
@@ -1072,6 +1096,7 @@ F2FS_GENERAL_RO_ATTR(encoding);
F2FS_GENERAL_RO_ATTR(mounted_time_sec);
F2FS_GENERAL_RO_ATTR(main_blkaddr);
F2FS_GENERAL_RO_ATTR(pending_discard);
+F2FS_GENERAL_RO_ATTR(atgc_enabled);
F2FS_GENERAL_RO_ATTR(gc_mode);
#ifdef CONFIG_F2FS_STAT_FS
F2FS_GENERAL_RO_ATTR(moved_blocks_background);
@@ -1116,6 +1141,9 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(gc_min_sleep_time),
ATTR_LIST(gc_max_sleep_time),
ATTR_LIST(gc_no_gc_sleep_time),
+ ATTR_LIST(gc_no_zoned_gc_percent),
+ ATTR_LIST(gc_boost_zoned_gc_percent),
+ ATTR_LIST(gc_valid_thresh_ratio),
ATTR_LIST(gc_idle),
ATTR_LIST(gc_urgent),
ATTR_LIST(reclaim_segments),
@@ -1138,8 +1166,10 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(min_seq_blocks),
ATTR_LIST(min_hot_blocks),
ATTR_LIST(min_ssr_sections),
+ ATTR_LIST(reserved_segments),
ATTR_LIST(max_victim_search),
ATTR_LIST(migration_granularity),
+ ATTR_LIST(migration_window_granularity),
ATTR_LIST(dir_level),
ATTR_LIST(ram_thresh),
ATTR_LIST(ra_nid_pages),
@@ -1187,6 +1217,7 @@ static struct attribute *f2fs_attrs[] = {
#endif
#ifdef CONFIG_BLK_DEV_ZONED
ATTR_LIST(unusable_blocks_per_sec),
+ ATTR_LIST(blkzone_alloc_policy),
#endif
#ifdef CONFIG_F2FS_FS_COMPRESSION
ATTR_LIST(compr_written_block),
@@ -1200,6 +1231,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(atgc_candidate_count),
ATTR_LIST(atgc_age_weight),
ATTR_LIST(atgc_age_threshold),
+ ATTR_LIST(atgc_enabled),
ATTR_LIST(seq_file_ra_mul),
ATTR_LIST(gc_segment_mode),
ATTR_LIST(gc_reclaimed_segments),
diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
index 84a33fe49bed..2287f238ae09 100644
--- a/fs/f2fs/verity.c
+++ b/fs/f2fs/verity.c
@@ -74,7 +74,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *aops = mapping->a_ops;
- if (pos + count > inode->i_sb->s_maxbytes)
+ if (pos + count > F2FS_BLK_TO_BYTES(max_file_blocks(inode)))
return -EFBIG;
while (count) {
@@ -237,7 +237,8 @@ static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
pos = le64_to_cpu(dloc.pos);
/* Get the descriptor */
- if (pos + size < pos || pos + size > inode->i_sb->s_maxbytes ||
+ if (pos + size < pos ||
+ pos + size > F2FS_BLK_TO_BYTES(max_file_blocks(inode)) ||
pos < f2fs_verity_metadata_pos(inode) || size > INT_MAX) {
f2fs_warn(F2FS_I_SB(inode), "invalid verity xattr");
f2fs_handle_error(F2FS_I_SB(inode),
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index f290fe9327c4..3f3874943679 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -629,6 +629,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
const char *name, const void *value, size_t size,
struct page *ipage, int flags)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_xattr_entry *here, *last;
void *base_addr, *last_base_addr;
int found, newsize;
@@ -772,9 +773,18 @@ retry:
if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
f2fs_set_encrypted_inode(inode);
- if (S_ISDIR(inode->i_mode))
- set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
+ if (!S_ISDIR(inode->i_mode))
+ goto same;
+ /*
+ * In restrict mode, fsync() always try to trigger checkpoint for all
+ * metadata consistency, in other mode, it triggers checkpoint when
+ * parent's xattr metadata was updated.
+ */
+ if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
+ set_sbi_flag(sbi, SBI_NEED_CP);
+ else
+ f2fs_add_ino_entry(sbi, inode->i_ino, XATTR_DIR_INO);
same:
if (is_inode_flag_set(inode, FI_ACL_MODE)) {
inode->i_mode = F2FS_I(inode)->i_acl_mode;
diff --git a/fs/fsopen.c b/fs/fsopen.c
index ee92ca58429e..6cef3deccded 100644
--- a/fs/fsopen.c
+++ b/fs/fsopen.c
@@ -78,7 +78,6 @@ static int fscontext_release(struct inode *inode, struct file *file)
const struct file_operations fscontext_fops = {
.read = fscontext_read,
.release = fscontext_release,
- .llseek = no_llseek,
};
/*
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index 6e0228c6d0cb..ce0ff7a9007b 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -3,6 +3,9 @@
# Makefile for the FUSE filesystem.
#
+# Needed for trace events
+ccflags-y = -I$(src)
+
obj-$(CONFIG_FUSE_FS) += fuse.o
obj-$(CONFIG_CUSE) += cuse.o
obj-$(CONFIG_VIRTIO_FS) += virtiofs.o
diff --git a/fs/fuse/acl.c b/fs/fuse/acl.c
index 04cfd8fee992..8f484b105f13 100644
--- a/fs/fuse/acl.c
+++ b/fs/fuse/acl.c
@@ -12,7 +12,6 @@
#include <linux/posix_acl_xattr.h>
static struct posix_acl *__fuse_get_acl(struct fuse_conn *fc,
- struct mnt_idmap *idmap,
struct inode *inode, int type, bool rcu)
{
int size;
@@ -74,7 +73,7 @@ struct posix_acl *fuse_get_acl(struct mnt_idmap *idmap,
if (fuse_no_acl(fc, inode))
return ERR_PTR(-EOPNOTSUPP);
- return __fuse_get_acl(fc, idmap, inode, type, false);
+ return __fuse_get_acl(fc, inode, type, false);
}
struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu)
@@ -90,8 +89,7 @@ struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu)
*/
if (!fc->posix_acl)
return NULL;
-
- return __fuse_get_acl(fc, &nop_mnt_idmap, inode, type, rcu);
+ return __fuse_get_acl(fc, inode, type, rcu);
}
int fuse_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
@@ -146,8 +144,8 @@ int fuse_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
* be stripped.
*/
if (fc->posix_acl &&
- !in_group_or_capable(&nop_mnt_idmap, inode,
- i_gid_into_vfsgid(&nop_mnt_idmap, inode)))
+ !in_group_or_capable(idmap, inode,
+ i_gid_into_vfsgid(idmap, inode)))
extra_flags |= FUSE_SETXATTR_ACL_KILL_SGID;
ret = fuse_setxattr(inode, name, value, size, 0, extra_flags);
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 97ac994ff78f..2a730d88cc3b 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -183,27 +183,23 @@ out:
static const struct file_operations fuse_ctl_abort_ops = {
.open = nonseekable_open,
.write = fuse_conn_abort_write,
- .llseek = no_llseek,
};
static const struct file_operations fuse_ctl_waiting_ops = {
.open = nonseekable_open,
.read = fuse_conn_waiting_read,
- .llseek = no_llseek,
};
static const struct file_operations fuse_conn_max_background_ops = {
.open = nonseekable_open,
.read = fuse_conn_max_background_read,
.write = fuse_conn_max_background_write,
- .llseek = no_llseek,
};
static const struct file_operations fuse_conn_congestion_threshold_ops = {
.open = nonseekable_open,
.read = fuse_conn_congestion_threshold_read,
.write = fuse_conn_congestion_threshold_write,
- .llseek = no_llseek,
};
static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 3dc035e419cf..1f64ae6d7a69 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -22,6 +22,9 @@
#include <linux/splice.h>
#include <linux/sched.h>
+#define CREATE_TRACE_POINTS
+#include "fuse_trace.h"
+
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
@@ -105,11 +108,17 @@ static void fuse_drop_waiting(struct fuse_conn *fc)
static void fuse_put_request(struct fuse_req *req);
-static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background)
+static struct fuse_req *fuse_get_req(struct mnt_idmap *idmap,
+ struct fuse_mount *fm,
+ bool for_background)
{
struct fuse_conn *fc = fm->fc;
struct fuse_req *req;
+ bool no_idmap = !fm->sb || (fm->sb->s_iflags & SB_I_NOIDMAP);
+ kuid_t fsuid;
+ kgid_t fsgid;
int err;
+
atomic_inc(&fc->num_waiting);
if (fuse_block_alloc(fc, for_background)) {
@@ -137,19 +146,32 @@ static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background)
goto out;
}
- req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
- req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
__set_bit(FR_WAITING, &req->flags);
if (for_background)
__set_bit(FR_BACKGROUND, &req->flags);
- if (unlikely(req->in.h.uid == ((uid_t)-1) ||
- req->in.h.gid == ((gid_t)-1))) {
+ /*
+ * Keep the old behavior when idmappings support was not
+ * declared by a FUSE server.
+ *
+ * For those FUSE servers who support idmapped mounts,
+ * we send UID/GID only along with "inode creation"
+ * fuse requests, otherwise idmap == &invalid_mnt_idmap and
+ * req->in.h.{u,g}id will be equal to FUSE_INVALID_UIDGID.
+ */
+ fsuid = no_idmap ? current_fsuid() : mapped_fsuid(idmap, fc->user_ns);
+ fsgid = no_idmap ? current_fsgid() : mapped_fsgid(idmap, fc->user_ns);
+ req->in.h.uid = from_kuid(fc->user_ns, fsuid);
+ req->in.h.gid = from_kgid(fc->user_ns, fsgid);
+
+ if (no_idmap && unlikely(req->in.h.uid == ((uid_t)-1) ||
+ req->in.h.gid == ((gid_t)-1))) {
fuse_put_request(req);
return ERR_PTR(-EOVERFLOW);
}
+
return req;
out:
@@ -194,11 +216,22 @@ unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
}
EXPORT_SYMBOL_GPL(fuse_len_args);
-u64 fuse_get_unique(struct fuse_iqueue *fiq)
+static u64 fuse_get_unique_locked(struct fuse_iqueue *fiq)
{
fiq->reqctr += FUSE_REQ_ID_STEP;
return fiq->reqctr;
}
+
+u64 fuse_get_unique(struct fuse_iqueue *fiq)
+{
+ u64 ret;
+
+ spin_lock(&fiq->lock);
+ ret = fuse_get_unique_locked(fiq);
+ spin_unlock(&fiq->lock);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(fuse_get_unique);
static unsigned int fuse_req_hash(u64 unique)
@@ -217,22 +250,70 @@ __releases(fiq->lock)
spin_unlock(&fiq->lock);
}
+static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *forget)
+{
+ spin_lock(&fiq->lock);
+ if (fiq->connected) {
+ fiq->forget_list_tail->next = forget;
+ fiq->forget_list_tail = forget;
+ fuse_dev_wake_and_unlock(fiq);
+ } else {
+ kfree(forget);
+ spin_unlock(&fiq->lock);
+ }
+}
+
+static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
+{
+ spin_lock(&fiq->lock);
+ if (list_empty(&req->intr_entry)) {
+ list_add_tail(&req->intr_entry, &fiq->interrupts);
+ /*
+ * Pairs with smp_mb() implied by test_and_set_bit()
+ * from fuse_request_end().
+ */
+ smp_mb();
+ if (test_bit(FR_FINISHED, &req->flags)) {
+ list_del_init(&req->intr_entry);
+ spin_unlock(&fiq->lock);
+ } else {
+ fuse_dev_wake_and_unlock(fiq);
+ }
+ } else {
+ spin_unlock(&fiq->lock);
+ }
+}
+
+static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req)
+{
+ spin_lock(&fiq->lock);
+ if (fiq->connected) {
+ if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
+ req->in.h.unique = fuse_get_unique_locked(fiq);
+ list_add_tail(&req->list, &fiq->pending);
+ fuse_dev_wake_and_unlock(fiq);
+ } else {
+ spin_unlock(&fiq->lock);
+ req->out.h.error = -ENOTCONN;
+ clear_bit(FR_PENDING, &req->flags);
+ fuse_request_end(req);
+ }
+}
+
const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
- .wake_forget_and_unlock = fuse_dev_wake_and_unlock,
- .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock,
- .wake_pending_and_unlock = fuse_dev_wake_and_unlock,
+ .send_forget = fuse_dev_queue_forget,
+ .send_interrupt = fuse_dev_queue_interrupt,
+ .send_req = fuse_dev_queue_req,
};
EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
-static void queue_request_and_unlock(struct fuse_iqueue *fiq,
- struct fuse_req *req)
-__releases(fiq->lock)
+static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req)
{
req->in.h.len = sizeof(struct fuse_in_header) +
fuse_len_args(req->args->in_numargs,
(struct fuse_arg *) req->args->in_args);
- list_add_tail(&req->list, &fiq->pending);
- fiq->ops->wake_pending_and_unlock(fiq);
+ trace_fuse_request_send(req);
+ fiq->ops->send_req(fiq, req);
}
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
@@ -243,15 +324,7 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
forget->forget_one.nodeid = nodeid;
forget->forget_one.nlookup = nlookup;
- spin_lock(&fiq->lock);
- if (fiq->connected) {
- fiq->forget_list_tail->next = forget;
- fiq->forget_list_tail = forget;
- fiq->ops->wake_forget_and_unlock(fiq);
- } else {
- kfree(forget);
- spin_unlock(&fiq->lock);
- }
+ fiq->ops->send_forget(fiq, forget);
}
static void flush_bg_queue(struct fuse_conn *fc)
@@ -265,9 +338,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
list_del(&req->list);
fc->active_background++;
- spin_lock(&fiq->lock);
- req->in.h.unique = fuse_get_unique(fiq);
- queue_request_and_unlock(fiq, req);
+ fuse_send_one(fiq, req);
}
}
@@ -288,6 +359,7 @@ void fuse_request_end(struct fuse_req *req)
if (test_and_set_bit(FR_FINISHED, &req->flags))
goto put_request;
+ trace_fuse_request_end(req);
/*
* test_and_set_bit() implies smp_mb() between bit
* changing and below FR_INTERRUPTED check. Pairs with
@@ -337,29 +409,12 @@ static int queue_interrupt(struct fuse_req *req)
{
struct fuse_iqueue *fiq = &req->fm->fc->iq;
- spin_lock(&fiq->lock);
/* Check for we've sent request to interrupt this req */
- if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
- spin_unlock(&fiq->lock);
+ if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags)))
return -EINVAL;
- }
- if (list_empty(&req->intr_entry)) {
- list_add_tail(&req->intr_entry, &fiq->interrupts);
- /*
- * Pairs with smp_mb() implied by test_and_set_bit()
- * from fuse_request_end().
- */
- smp_mb();
- if (test_bit(FR_FINISHED, &req->flags)) {
- list_del_init(&req->intr_entry);
- spin_unlock(&fiq->lock);
- return 0;
- }
- fiq->ops->wake_interrupt_and_unlock(fiq);
- } else {
- spin_unlock(&fiq->lock);
- }
+ fiq->ops->send_interrupt(fiq, req);
+
return 0;
}
@@ -414,21 +469,15 @@ static void __fuse_request_send(struct fuse_req *req)
struct fuse_iqueue *fiq = &req->fm->fc->iq;
BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
- spin_lock(&fiq->lock);
- if (!fiq->connected) {
- spin_unlock(&fiq->lock);
- req->out.h.error = -ENOTCONN;
- } else {
- req->in.h.unique = fuse_get_unique(fiq);
- /* acquire extra reference, since request is still needed
- after fuse_request_end() */
- __fuse_get_request(req);
- queue_request_and_unlock(fiq, req);
- request_wait_answer(req);
- /* Pairs with smp_wmb() in fuse_request_end() */
- smp_rmb();
- }
+ /* acquire extra reference, since request is still needed after
+ fuse_request_end() */
+ __fuse_get_request(req);
+ fuse_send_one(fiq, req);
+
+ request_wait_answer(req);
+ /* Pairs with smp_wmb() in fuse_request_end() */
+ smp_rmb();
}
static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
@@ -468,8 +517,14 @@ static void fuse_force_creds(struct fuse_req *req)
{
struct fuse_conn *fc = req->fm->fc;
- req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
- req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
+ if (!req->fm->sb || req->fm->sb->s_iflags & SB_I_NOIDMAP) {
+ req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
+ req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
+ } else {
+ req->in.h.uid = FUSE_INVALID_UIDGID;
+ req->in.h.gid = FUSE_INVALID_UIDGID;
+ }
+
req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
}
@@ -484,7 +539,9 @@ static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
__set_bit(FR_ASYNC, &req->flags);
}
-ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
+ssize_t __fuse_simple_request(struct mnt_idmap *idmap,
+ struct fuse_mount *fm,
+ struct fuse_args *args)
{
struct fuse_conn *fc = fm->fc;
struct fuse_req *req;
@@ -501,7 +558,7 @@ ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
__set_bit(FR_FORCE, &req->flags);
} else {
WARN_ON(args->nocreds);
- req = fuse_get_req(fm, false);
+ req = fuse_get_req(idmap, fm, false);
if (IS_ERR(req))
return PTR_ERR(req);
}
@@ -562,7 +619,7 @@ int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
__set_bit(FR_BACKGROUND, &req->flags);
} else {
WARN_ON(args->nocreds);
- req = fuse_get_req(fm, true);
+ req = fuse_get_req(&invalid_mnt_idmap, fm, true);
if (IS_ERR(req))
return PTR_ERR(req);
}
@@ -583,9 +640,8 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm,
{
struct fuse_req *req;
struct fuse_iqueue *fiq = &fm->fc->iq;
- int err = 0;
- req = fuse_get_req(fm, false);
+ req = fuse_get_req(&invalid_mnt_idmap, fm, false);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -594,16 +650,9 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm,
fuse_args_to_req(req, args);
- spin_lock(&fiq->lock);
- if (fiq->connected) {
- queue_request_and_unlock(fiq, req);
- } else {
- err = -ENODEV;
- spin_unlock(&fiq->lock);
- fuse_put_request(req);
- }
+ fuse_send_one(fiq, req);
- return err;
+ return 0;
}
/*
@@ -1075,9 +1124,9 @@ __releases(fiq->lock)
return err ? err : reqsize;
}
-struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
- unsigned int max,
- unsigned int *countp)
+static struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
+ unsigned int max,
+ unsigned int *countp)
{
struct fuse_forget_link *head = fiq->forget_list_head.next;
struct fuse_forget_link **newhead = &head;
@@ -1096,7 +1145,6 @@ struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
return head;
}
-EXPORT_SYMBOL(fuse_dequeue_forget);
static int fuse_read_single_forget(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
@@ -1111,7 +1159,7 @@ __releases(fiq->lock)
struct fuse_in_header ih = {
.opcode = FUSE_FORGET,
.nodeid = forget->forget_one.nodeid,
- .unique = fuse_get_unique(fiq),
+ .unique = fuse_get_unique_locked(fiq),
.len = sizeof(ih) + sizeof(arg),
};
@@ -1142,7 +1190,7 @@ __releases(fiq->lock)
struct fuse_batch_forget_in arg = { .count = 0 };
struct fuse_in_header ih = {
.opcode = FUSE_BATCH_FORGET,
- .unique = fuse_get_unique(fiq),
+ .unique = fuse_get_unique_locked(fiq),
.len = sizeof(ih) + sizeof(arg),
};
@@ -1830,7 +1878,7 @@ static void fuse_resend(struct fuse_conn *fc)
}
/* iq and pq requests are both oldest to newest */
list_splice(&to_queue, &fiq->pending);
- fiq->ops->wake_pending_and_unlock(fiq);
+ fuse_dev_wake_and_unlock(fiq);
}
static int fuse_notify_resend(struct fuse_conn *fc)
@@ -2408,7 +2456,6 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
const struct file_operations fuse_dev_operations = {
.owner = THIS_MODULE,
.open = fuse_dev_open,
- .llseek = no_llseek,
.read_iter = fuse_dev_read,
.splice_read = fuse_dev_splice_read,
.write_iter = fuse_dev_write,
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8e96df9fd76c..54104dd48af7 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -545,17 +545,21 @@ static u32 fuse_ext_size(size_t size)
/*
* This adds just a single supplementary group that matches the parent's group.
*/
-static int get_create_supp_group(struct inode *dir, struct fuse_in_arg *ext)
+static int get_create_supp_group(struct mnt_idmap *idmap,
+ struct inode *dir,
+ struct fuse_in_arg *ext)
{
struct fuse_conn *fc = get_fuse_conn(dir);
struct fuse_ext_header *xh;
struct fuse_supp_groups *sg;
kgid_t kgid = dir->i_gid;
+ vfsgid_t vfsgid = make_vfsgid(idmap, fc->user_ns, kgid);
gid_t parent_gid = from_kgid(fc->user_ns, kgid);
+
u32 sg_len = fuse_ext_size(sizeof(*sg) + sizeof(sg->groups[0]));
- if (parent_gid == (gid_t) -1 || gid_eq(kgid, current_fsgid()) ||
- !in_group_p(kgid))
+ if (parent_gid == (gid_t) -1 || vfsgid_eq_kgid(vfsgid, current_fsgid()) ||
+ !vfsgid_in_group_p(vfsgid))
return 0;
xh = extend_arg(ext, sg_len);
@@ -572,7 +576,8 @@ static int get_create_supp_group(struct inode *dir, struct fuse_in_arg *ext)
return 0;
}
-static int get_create_ext(struct fuse_args *args,
+static int get_create_ext(struct mnt_idmap *idmap,
+ struct fuse_args *args,
struct inode *dir, struct dentry *dentry,
umode_t mode)
{
@@ -583,7 +588,7 @@ static int get_create_ext(struct fuse_args *args,
if (fc->init_security)
err = get_security_context(dentry, mode, &ext);
if (!err && fc->create_supp_group)
- err = get_create_supp_group(dir, &ext);
+ err = get_create_supp_group(idmap, dir, &ext);
if (!err && ext.size) {
WARN_ON(args->in_numargs >= ARRAY_SIZE(args->in_args));
@@ -609,9 +614,9 @@ static void free_ext_value(struct fuse_args *args)
* If the filesystem doesn't support this, then fall back to separate
* 'mknod' + 'open' requests.
*/
-static int fuse_create_open(struct inode *dir, struct dentry *entry,
- struct file *file, unsigned int flags,
- umode_t mode, u32 opcode)
+static int fuse_create_open(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *entry, struct file *file,
+ unsigned int flags, umode_t mode, u32 opcode)
{
int err;
struct inode *inode;
@@ -668,11 +673,11 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
args.out_args[1].size = sizeof(*outopenp);
args.out_args[1].value = outopenp;
- err = get_create_ext(&args, dir, entry, mode);
+ err = get_create_ext(idmap, &args, dir, entry, mode);
if (err)
goto out_free_ff;
- err = fuse_simple_request(fm, &args);
+ err = fuse_simple_idmap_request(idmap, fm, &args);
free_ext_value(&args);
if (err)
goto out_free_ff;
@@ -729,6 +734,7 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
umode_t mode)
{
int err;
+ struct mnt_idmap *idmap = file_mnt_idmap(file);
struct fuse_conn *fc = get_fuse_conn(dir);
struct dentry *res = NULL;
@@ -753,7 +759,7 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
if (fc->no_create)
goto mknod;
- err = fuse_create_open(dir, entry, file, flags, mode, FUSE_CREATE);
+ err = fuse_create_open(idmap, dir, entry, file, flags, mode, FUSE_CREATE);
if (err == -ENOSYS) {
fc->no_create = 1;
goto mknod;
@@ -764,7 +770,7 @@ out_dput:
return err;
mknod:
- err = fuse_mknod(&nop_mnt_idmap, dir, entry, mode, 0);
+ err = fuse_mknod(idmap, dir, entry, mode, 0);
if (err)
goto out_dput;
no_open:
@@ -774,9 +780,9 @@ no_open:
/*
* Code shared between mknod, mkdir, symlink and link
*/
-static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
- struct inode *dir, struct dentry *entry,
- umode_t mode)
+static int create_new_entry(struct mnt_idmap *idmap, struct fuse_mount *fm,
+ struct fuse_args *args, struct inode *dir,
+ struct dentry *entry, umode_t mode)
{
struct fuse_entry_out outarg;
struct inode *inode;
@@ -798,12 +804,12 @@ static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
args->out_args[0].value = &outarg;
if (args->opcode != FUSE_LINK) {
- err = get_create_ext(args, dir, entry, mode);
+ err = get_create_ext(idmap, args, dir, entry, mode);
if (err)
goto out_put_forget_req;
}
- err = fuse_simple_request(fm, args);
+ err = fuse_simple_idmap_request(idmap, fm, args);
free_ext_value(args);
if (err)
goto out_put_forget_req;
@@ -864,13 +870,13 @@ static int fuse_mknod(struct mnt_idmap *idmap, struct inode *dir,
args.in_args[0].value = &inarg;
args.in_args[1].size = entry->d_name.len + 1;
args.in_args[1].value = entry->d_name.name;
- return create_new_entry(fm, &args, dir, entry, mode);
+ return create_new_entry(idmap, fm, &args, dir, entry, mode);
}
static int fuse_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *entry, umode_t mode, bool excl)
{
- return fuse_mknod(&nop_mnt_idmap, dir, entry, mode, 0);
+ return fuse_mknod(idmap, dir, entry, mode, 0);
}
static int fuse_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
@@ -882,7 +888,8 @@ static int fuse_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
if (fc->no_tmpfile)
return -EOPNOTSUPP;
- err = fuse_create_open(dir, file->f_path.dentry, file, file->f_flags, mode, FUSE_TMPFILE);
+ err = fuse_create_open(idmap, dir, file->f_path.dentry, file,
+ file->f_flags, mode, FUSE_TMPFILE);
if (err == -ENOSYS) {
fc->no_tmpfile = 1;
err = -EOPNOTSUPP;
@@ -909,7 +916,7 @@ static int fuse_mkdir(struct mnt_idmap *idmap, struct inode *dir,
args.in_args[0].value = &inarg;
args.in_args[1].size = entry->d_name.len + 1;
args.in_args[1].value = entry->d_name.name;
- return create_new_entry(fm, &args, dir, entry, S_IFDIR);
+ return create_new_entry(idmap, fm, &args, dir, entry, S_IFDIR);
}
static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir,
@@ -925,7 +932,7 @@ static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir,
args.in_args[0].value = entry->d_name.name;
args.in_args[1].size = len;
args.in_args[1].value = link;
- return create_new_entry(fm, &args, dir, entry, S_IFLNK);
+ return create_new_entry(idmap, fm, &args, dir, entry, S_IFLNK);
}
void fuse_flush_time_update(struct inode *inode)
@@ -1019,7 +1026,7 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
return err;
}
-static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
+static int fuse_rename_common(struct mnt_idmap *idmap, struct inode *olddir, struct dentry *oldent,
struct inode *newdir, struct dentry *newent,
unsigned int flags, int opcode, size_t argsize)
{
@@ -1040,7 +1047,7 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
args.in_args[1].value = oldent->d_name.name;
args.in_args[2].size = newent->d_name.len + 1;
args.in_args[2].value = newent->d_name.name;
- err = fuse_simple_request(fm, &args);
+ err = fuse_simple_idmap_request(idmap, fm, &args);
if (!err) {
/* ctime changes */
fuse_update_ctime(d_inode(oldent));
@@ -1086,7 +1093,8 @@ static int fuse_rename2(struct mnt_idmap *idmap, struct inode *olddir,
if (fc->no_rename2 || fc->minor < 23)
return -EINVAL;
- err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
+ err = fuse_rename_common((flags & RENAME_WHITEOUT) ? idmap : &invalid_mnt_idmap,
+ olddir, oldent, newdir, newent, flags,
FUSE_RENAME2,
sizeof(struct fuse_rename2_in));
if (err == -ENOSYS) {
@@ -1094,7 +1102,7 @@ static int fuse_rename2(struct mnt_idmap *idmap, struct inode *olddir,
err = -EINVAL;
}
} else {
- err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
+ err = fuse_rename_common(&invalid_mnt_idmap, olddir, oldent, newdir, newent, 0,
FUSE_RENAME,
sizeof(struct fuse_rename_in));
}
@@ -1119,7 +1127,7 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
args.in_args[0].value = &inarg;
args.in_args[1].size = newent->d_name.len + 1;
args.in_args[1].value = newent->d_name.name;
- err = create_new_entry(fm, &args, newdir, newent, inode->i_mode);
+ err = create_new_entry(&invalid_mnt_idmap, fm, &args, newdir, newent, inode->i_mode);
if (!err)
fuse_update_ctime_in_cache(inode);
else if (err == -EINTR)
@@ -1128,18 +1136,22 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
return err;
}
-static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
- struct kstat *stat)
+static void fuse_fillattr(struct mnt_idmap *idmap, struct inode *inode,
+ struct fuse_attr *attr, struct kstat *stat)
{
unsigned int blkbits;
struct fuse_conn *fc = get_fuse_conn(inode);
+ vfsuid_t vfsuid = make_vfsuid(idmap, fc->user_ns,
+ make_kuid(fc->user_ns, attr->uid));
+ vfsgid_t vfsgid = make_vfsgid(idmap, fc->user_ns,
+ make_kgid(fc->user_ns, attr->gid));
stat->dev = inode->i_sb->s_dev;
stat->ino = attr->ino;
stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
stat->nlink = attr->nlink;
- stat->uid = make_kuid(fc->user_ns, attr->uid);
- stat->gid = make_kgid(fc->user_ns, attr->gid);
+ stat->uid = vfsuid_into_kuid(vfsuid);
+ stat->gid = vfsgid_into_kgid(vfsgid);
stat->rdev = inode->i_rdev;
stat->atime.tv_sec = attr->atime;
stat->atime.tv_nsec = attr->atimensec;
@@ -1178,8 +1190,8 @@ static void fuse_statx_to_attr(struct fuse_statx *sx, struct fuse_attr *attr)
attr->blksize = sx->blksize;
}
-static int fuse_do_statx(struct inode *inode, struct file *file,
- struct kstat *stat)
+static int fuse_do_statx(struct mnt_idmap *idmap, struct inode *inode,
+ struct file *file, struct kstat *stat)
{
int err;
struct fuse_attr attr;
@@ -1232,15 +1244,15 @@ static int fuse_do_statx(struct inode *inode, struct file *file,
stat->result_mask = sx->mask & (STATX_BASIC_STATS | STATX_BTIME);
stat->btime.tv_sec = sx->btime.tv_sec;
stat->btime.tv_nsec = min_t(u32, sx->btime.tv_nsec, NSEC_PER_SEC - 1);
- fuse_fillattr(inode, &attr, stat);
+ fuse_fillattr(idmap, inode, &attr, stat);
stat->result_mask |= STATX_TYPE;
}
return 0;
}
-static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
- struct file *file)
+static int fuse_do_getattr(struct mnt_idmap *idmap, struct inode *inode,
+ struct kstat *stat, struct file *file)
{
int err;
struct fuse_getattr_in inarg;
@@ -1279,15 +1291,15 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
ATTR_TIMEOUT(&outarg),
attr_version);
if (stat)
- fuse_fillattr(inode, &outarg.attr, stat);
+ fuse_fillattr(idmap, inode, &outarg.attr, stat);
}
}
return err;
}
-static int fuse_update_get_attr(struct inode *inode, struct file *file,
- struct kstat *stat, u32 request_mask,
- unsigned int flags)
+static int fuse_update_get_attr(struct mnt_idmap *idmap, struct inode *inode,
+ struct file *file, struct kstat *stat,
+ u32 request_mask, unsigned int flags)
{
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
@@ -1318,17 +1330,17 @@ retry:
forget_all_cached_acls(inode);
/* Try statx if BTIME is requested */
if (!fc->no_statx && (request_mask & ~STATX_BASIC_STATS)) {
- err = fuse_do_statx(inode, file, stat);
+ err = fuse_do_statx(idmap, inode, file, stat);
if (err == -ENOSYS) {
fc->no_statx = 1;
err = 0;
goto retry;
}
} else {
- err = fuse_do_getattr(inode, stat, file);
+ err = fuse_do_getattr(idmap, inode, stat, file);
}
} else if (stat) {
- generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
stat->mode = fi->orig_i_mode;
stat->ino = fi->orig_ino;
if (test_bit(FUSE_I_BTIME, &fi->state)) {
@@ -1342,7 +1354,7 @@ retry:
int fuse_update_attributes(struct inode *inode, struct file *file, u32 mask)
{
- return fuse_update_get_attr(inode, file, NULL, mask, 0);
+ return fuse_update_get_attr(&nop_mnt_idmap, inode, file, NULL, mask, 0);
}
int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
@@ -1462,6 +1474,14 @@ static int fuse_access(struct inode *inode, int mask)
BUG_ON(mask & MAY_NOT_BLOCK);
+ /*
+ * We should not send FUSE_ACCESS to the userspace
+ * when idmapped mounts are enabled as for this case
+ * we have fc->default_permissions = 1 and access
+ * permission checks are done on the kernel side.
+ */
+ WARN_ON_ONCE(!(fm->sb->s_iflags & SB_I_NOIDMAP));
+
if (fm->fc->no_access)
return 0;
@@ -1486,7 +1506,7 @@ static int fuse_perm_getattr(struct inode *inode, int mask)
return -ECHILD;
forget_all_cached_acls(inode);
- return fuse_do_getattr(inode, NULL, NULL);
+ return fuse_do_getattr(&nop_mnt_idmap, inode, NULL, NULL);
}
/*
@@ -1534,7 +1554,7 @@ static int fuse_permission(struct mnt_idmap *idmap,
}
if (fc->default_permissions) {
- err = generic_permission(&nop_mnt_idmap, inode, mask);
+ err = generic_permission(idmap, inode, mask);
/* If permission is denied, try to refresh file
attributes. This is also needed, because the root
@@ -1542,7 +1562,7 @@ static int fuse_permission(struct mnt_idmap *idmap,
if (err == -EACCES && !refreshed) {
err = fuse_perm_getattr(inode, mask);
if (!err)
- err = generic_permission(&nop_mnt_idmap,
+ err = generic_permission(idmap,
inode, mask);
}
@@ -1738,17 +1758,29 @@ static bool update_mtime(unsigned ivalid, bool trust_local_mtime)
return true;
}
-static void iattr_to_fattr(struct fuse_conn *fc, struct iattr *iattr,
- struct fuse_setattr_in *arg, bool trust_local_cmtime)
+static void iattr_to_fattr(struct mnt_idmap *idmap, struct fuse_conn *fc,
+ struct iattr *iattr, struct fuse_setattr_in *arg,
+ bool trust_local_cmtime)
{
unsigned ivalid = iattr->ia_valid;
if (ivalid & ATTR_MODE)
arg->valid |= FATTR_MODE, arg->mode = iattr->ia_mode;
- if (ivalid & ATTR_UID)
- arg->valid |= FATTR_UID, arg->uid = from_kuid(fc->user_ns, iattr->ia_uid);
- if (ivalid & ATTR_GID)
- arg->valid |= FATTR_GID, arg->gid = from_kgid(fc->user_ns, iattr->ia_gid);
+
+ if (ivalid & ATTR_UID) {
+ kuid_t fsuid = from_vfsuid(idmap, fc->user_ns, iattr->ia_vfsuid);
+
+ arg->valid |= FATTR_UID;
+ arg->uid = from_kuid(fc->user_ns, fsuid);
+ }
+
+ if (ivalid & ATTR_GID) {
+ kgid_t fsgid = from_vfsgid(idmap, fc->user_ns, iattr->ia_vfsgid);
+
+ arg->valid |= FATTR_GID;
+ arg->gid = from_kgid(fc->user_ns, fsgid);
+ }
+
if (ivalid & ATTR_SIZE)
arg->valid |= FATTR_SIZE, arg->size = iattr->ia_size;
if (ivalid & ATTR_ATIME) {
@@ -1868,8 +1900,8 @@ int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
* vmtruncate() doesn't allow for this case, so do the rlimit checking
* and the actual truncation by hand.
*/
-int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
- struct file *file)
+int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr, struct file *file)
{
struct inode *inode = d_inode(dentry);
struct fuse_mount *fm = get_fuse_mount(inode);
@@ -1889,7 +1921,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
if (!fc->default_permissions)
attr->ia_valid |= ATTR_FORCE;
- err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
+ err = setattr_prepare(idmap, dentry, attr);
if (err)
return err;
@@ -1948,7 +1980,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
memset(&inarg, 0, sizeof(inarg));
memset(&outarg, 0, sizeof(outarg));
- iattr_to_fattr(fc, attr, &inarg, trust_local_cmtime);
+ iattr_to_fattr(idmap, fc, attr, &inarg, trust_local_cmtime);
if (file) {
struct fuse_file *ff = file->private_data;
inarg.valid |= FATTR_FH;
@@ -2065,7 +2097,7 @@ static int fuse_setattr(struct mnt_idmap *idmap, struct dentry *entry,
* ia_mode calculation may have used stale i_mode.
* Refresh and recalculate.
*/
- ret = fuse_do_getattr(inode, NULL, file);
+ ret = fuse_do_getattr(idmap, inode, NULL, file);
if (ret)
return ret;
@@ -2083,7 +2115,7 @@ static int fuse_setattr(struct mnt_idmap *idmap, struct dentry *entry,
if (!attr->ia_valid)
return 0;
- ret = fuse_do_setattr(entry, attr, file);
+ ret = fuse_do_setattr(idmap, entry, attr, file);
if (!ret) {
/*
* If filesystem supports acls it may have updated acl xattrs in
@@ -2122,7 +2154,7 @@ static int fuse_getattr(struct mnt_idmap *idmap,
return -EACCES;
}
- return fuse_update_get_attr(inode, NULL, stat, request_mask, flags);
+ return fuse_update_get_attr(idmap, inode, NULL, stat, request_mask, flags);
}
static const struct inode_operations fuse_dir_inode_operations = {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index ba6df52a823e..f33fbce86ae0 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -448,9 +448,6 @@ static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
/*
* Check if any page in a range is under writeback
- *
- * This is currently done by walking the list of writepage requests
- * for the inode, which can be pretty inefficient.
*/
static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
pgoff_t idx_to)
@@ -458,6 +455,9 @@ static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
struct fuse_inode *fi = get_fuse_inode(inode);
bool found;
+ if (RB_EMPTY_ROOT(&fi->writepages))
+ return false;
+
spin_lock(&fi->lock);
found = fuse_find_writeback(fi, idx_from, idx_to);
spin_unlock(&fi->lock);
@@ -1345,7 +1345,7 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from
/* shared locks are not allowed with parallel page cache IO */
if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state))
- return false;
+ return true;
/* Parallel dio beyond EOF is not supported, at least for now. */
if (fuse_io_past_eof(iocb, from))
@@ -1398,6 +1398,7 @@ static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive)
static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
+ struct mnt_idmap *idmap = file_mnt_idmap(file);
struct address_space *mapping = file->f_mapping;
ssize_t written = 0;
struct inode *inode = mapping->host;
@@ -1412,7 +1413,7 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
return err;
if (fc->handle_killpriv_v2 &&
- setattr_should_drop_suidgid(&nop_mnt_idmap,
+ setattr_should_drop_suidgid(idmap,
file_inode(file))) {
goto writethrough;
}
@@ -1762,27 +1763,31 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa)
for (i = 0; i < ap->num_pages; i++)
__free_page(ap->pages[i]);
- if (wpa->ia.ff)
- fuse_file_put(wpa->ia.ff, false);
+ fuse_file_put(wpa->ia.ff, false);
kfree(ap->pages);
kfree(wpa);
}
-static void fuse_writepage_finish(struct fuse_mount *fm,
- struct fuse_writepage_args *wpa)
+static void fuse_writepage_finish_stat(struct inode *inode, struct page *page)
+{
+ struct backing_dev_info *bdi = inode_to_bdi(inode);
+
+ dec_wb_stat(&bdi->wb, WB_WRITEBACK);
+ dec_node_page_state(page, NR_WRITEBACK_TEMP);
+ wb_writeout_inc(&bdi->wb);
+}
+
+static void fuse_writepage_finish(struct fuse_writepage_args *wpa)
{
struct fuse_args_pages *ap = &wpa->ia.ap;
struct inode *inode = wpa->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
- struct backing_dev_info *bdi = inode_to_bdi(inode);
int i;
- for (i = 0; i < ap->num_pages; i++) {
- dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
- wb_writeout_inc(&bdi->wb);
- }
+ for (i = 0; i < ap->num_pages; i++)
+ fuse_writepage_finish_stat(inode, ap->pages[i]);
+
wake_up(&fi->page_waitq);
}
@@ -1829,19 +1834,14 @@ __acquires(fi->lock)
out_free:
fi->writectr--;
rb_erase(&wpa->writepages_entry, &fi->writepages);
- fuse_writepage_finish(fm, wpa);
+ fuse_writepage_finish(wpa);
spin_unlock(&fi->lock);
/* After rb_erase() aux request list is private */
for (aux = wpa->next; aux; aux = next) {
- struct backing_dev_info *bdi = inode_to_bdi(aux->inode);
-
next = aux->next;
aux->next = NULL;
-
- dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(aux->ia.ap.pages[0], NR_WRITEBACK_TEMP);
- wb_writeout_inc(&bdi->wb);
+ fuse_writepage_finish_stat(aux->inode, aux->ia.ap.pages[0]);
fuse_writepage_free(aux);
}
@@ -1936,7 +1936,6 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
wpa->next = next->next;
next->next = NULL;
- next->ia.ff = fuse_file_get(wpa->ia.ff);
tree_insert(&fi->writepages, next);
/*
@@ -1965,7 +1964,7 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
fuse_send_writepage(fm, next, inarg->offset + inarg->size);
}
fi->writectr--;
- fuse_writepage_finish(fm, wpa);
+ fuse_writepage_finish(wpa);
spin_unlock(&fi->lock);
fuse_writepage_free(wpa);
}
@@ -2049,49 +2048,77 @@ static void fuse_writepage_add_to_bucket(struct fuse_conn *fc,
rcu_read_unlock();
}
+static void fuse_writepage_args_page_fill(struct fuse_writepage_args *wpa, struct folio *folio,
+ struct folio *tmp_folio, uint32_t page_index)
+{
+ struct inode *inode = folio->mapping->host;
+ struct fuse_args_pages *ap = &wpa->ia.ap;
+
+ folio_copy(tmp_folio, folio);
+
+ ap->pages[page_index] = &tmp_folio->page;
+ ap->descs[page_index].offset = 0;
+ ap->descs[page_index].length = PAGE_SIZE;
+
+ inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
+ inc_node_page_state(&tmp_folio->page, NR_WRITEBACK_TEMP);
+}
+
+static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio,
+ struct fuse_file *ff)
+{
+ struct inode *inode = folio->mapping->host;
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_writepage_args *wpa;
+ struct fuse_args_pages *ap;
+
+ wpa = fuse_writepage_args_alloc();
+ if (!wpa)
+ return NULL;
+
+ fuse_writepage_add_to_bucket(fc, wpa);
+ fuse_write_args_fill(&wpa->ia, ff, folio_pos(folio), 0);
+ wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
+ wpa->inode = inode;
+ wpa->ia.ff = ff;
+
+ ap = &wpa->ia.ap;
+ ap->args.in_pages = true;
+ ap->args.end = fuse_writepage_end;
+
+ return wpa;
+}
+
static int fuse_writepage_locked(struct folio *folio)
{
struct address_space *mapping = folio->mapping;
struct inode *inode = mapping->host;
- struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_writepage_args *wpa;
struct fuse_args_pages *ap;
struct folio *tmp_folio;
+ struct fuse_file *ff;
int error = -ENOMEM;
- folio_start_writeback(folio);
-
- wpa = fuse_writepage_args_alloc();
- if (!wpa)
- goto err;
- ap = &wpa->ia.ap;
-
tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0);
if (!tmp_folio)
- goto err_free;
+ goto err;
error = -EIO;
- wpa->ia.ff = fuse_write_file_get(fi);
- if (!wpa->ia.ff)
+ ff = fuse_write_file_get(fi);
+ if (!ff)
goto err_nofile;
- fuse_writepage_add_to_bucket(fc, wpa);
- fuse_write_args_fill(&wpa->ia, wpa->ia.ff, folio_pos(folio), 0);
+ wpa = fuse_writepage_args_setup(folio, ff);
+ error = -ENOMEM;
+ if (!wpa)
+ goto err_writepage_args;
- folio_copy(tmp_folio, folio);
- wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
- wpa->next = NULL;
- ap->args.in_pages = true;
+ ap = &wpa->ia.ap;
ap->num_pages = 1;
- ap->pages[0] = &tmp_folio->page;
- ap->descs[0].offset = 0;
- ap->descs[0].length = PAGE_SIZE;
- ap->args.end = fuse_writepage_end;
- wpa->inode = inode;
- inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
- node_stat_add_folio(tmp_folio, NR_WRITEBACK_TEMP);
+ folio_start_writeback(folio);
+ fuse_writepage_args_page_fill(wpa, folio, tmp_folio, 0);
spin_lock(&fi->lock);
tree_insert(&fi->writepages, wpa);
@@ -2103,13 +2130,12 @@ static int fuse_writepage_locked(struct folio *folio)
return 0;
+err_writepage_args:
+ fuse_file_put(ff, false);
err_nofile:
folio_put(tmp_folio);
-err_free:
- kfree(wpa);
err:
mapping_set_error(folio->mapping, error);
- folio_end_writeback(folio);
return error;
}
@@ -2155,7 +2181,6 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data)
int num_pages = wpa->ia.ap.num_pages;
int i;
- wpa->ia.ff = fuse_file_get(data->ff);
spin_lock(&fi->lock);
list_add_tail(&wpa->queue_entry, &fi->queued_writes);
fuse_flush_writepages(inode);
@@ -2210,11 +2235,7 @@ static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa,
spin_unlock(&fi->lock);
if (tmp) {
- struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode);
-
- dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP);
- wb_writeout_inc(&bdi->wb);
+ fuse_writepage_finish_stat(new_wpa->inode, new_ap->pages[0]);
fuse_writepage_free(new_wpa);
}
@@ -2264,24 +2285,17 @@ static int fuse_writepages_fill(struct folio *folio,
struct inode *inode = data->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
- struct page *tmp_page;
+ struct folio *tmp_folio;
int err;
- if (!data->ff) {
- err = -EIO;
- data->ff = fuse_write_file_get(fi);
- if (!data->ff)
- goto out_unlock;
- }
-
if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) {
fuse_writepages_send(data);
data->wpa = NULL;
}
err = -ENOMEM;
- tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
- if (!tmp_page)
+ tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0);
+ if (!tmp_folio)
goto out_unlock;
/*
@@ -2299,35 +2313,20 @@ static int fuse_writepages_fill(struct folio *folio,
*/
if (data->wpa == NULL) {
err = -ENOMEM;
- wpa = fuse_writepage_args_alloc();
+ wpa = fuse_writepage_args_setup(folio, data->ff);
if (!wpa) {
- __free_page(tmp_page);
+ folio_put(tmp_folio);
goto out_unlock;
}
- fuse_writepage_add_to_bucket(fc, wpa);
-
+ fuse_file_get(wpa->ia.ff);
data->max_pages = 1;
-
ap = &wpa->ia.ap;
- fuse_write_args_fill(&wpa->ia, data->ff, folio_pos(folio), 0);
- wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
- wpa->next = NULL;
- ap->args.in_pages = true;
- ap->args.end = fuse_writepage_end;
- ap->num_pages = 0;
- wpa->inode = inode;
}
folio_start_writeback(folio);
- copy_highpage(tmp_page, &folio->page);
- ap->pages[ap->num_pages] = tmp_page;
- ap->descs[ap->num_pages].offset = 0;
- ap->descs[ap->num_pages].length = PAGE_SIZE;
+ fuse_writepage_args_page_fill(wpa, folio, tmp_folio, ap->num_pages);
data->orig_pages[ap->num_pages] = &folio->page;
- inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
- inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
-
err = 0;
if (data->wpa) {
/*
@@ -2352,13 +2351,13 @@ static int fuse_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
+ struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_fill_wb_data data;
int err;
- err = -EIO;
if (fuse_is_bad(inode))
- goto out;
+ return -EIO;
if (wbc->sync_mode == WB_SYNC_NONE &&
fc->num_background >= fc->congestion_threshold)
@@ -2366,7 +2365,9 @@ static int fuse_writepages(struct address_space *mapping,
data.inode = inode;
data.wpa = NULL;
- data.ff = NULL;
+ data.ff = fuse_write_file_get(fi);
+ if (!data.ff)
+ return -EIO;
err = -ENOMEM;
data.orig_pages = kcalloc(fc->max_pages,
@@ -2380,11 +2381,10 @@ static int fuse_writepages(struct address_space *mapping,
WARN_ON(!data.wpa->ia.ap.num_pages);
fuse_writepages_send(&data);
}
- if (data.ff)
- fuse_file_put(data.ff, false);
kfree(data.orig_pages);
out:
+ fuse_file_put(data.ff, false);
return err;
}
@@ -2973,7 +2973,7 @@ static void fuse_do_truncate(struct file *file)
attr.ia_file = file;
attr.ia_valid |= ATTR_FILE;
- fuse_do_setattr(file_dentry(file), &attr, file);
+ fuse_do_setattr(file_mnt_idmap(file), file_dentry(file), &attr, file);
}
static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index f23919610313..e6cc3d552b13 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -449,22 +449,19 @@ struct fuse_iqueue;
*/
struct fuse_iqueue_ops {
/**
- * Signal that a forget has been queued
+ * Send one forget
*/
- void (*wake_forget_and_unlock)(struct fuse_iqueue *fiq)
- __releases(fiq->lock);
+ void (*send_forget)(struct fuse_iqueue *fiq, struct fuse_forget_link *link);
/**
- * Signal that an INTERRUPT request has been queued
+ * Send interrupt for request
*/
- void (*wake_interrupt_and_unlock)(struct fuse_iqueue *fiq)
- __releases(fiq->lock);
+ void (*send_interrupt)(struct fuse_iqueue *fiq, struct fuse_req *req);
/**
- * Signal that a request has been queued
+ * Send one request
*/
- void (*wake_pending_and_unlock)(struct fuse_iqueue *fiq)
- __releases(fiq->lock);
+ void (*send_req)(struct fuse_iqueue *fiq, struct fuse_req *req);
/**
* Clean up when fuse_iqueue is destroyed
@@ -869,7 +866,7 @@ struct fuse_conn {
/** Negotiated minor version */
unsigned minor;
- /** Entry on the fuse_mount_list */
+ /** Entry on the fuse_conn_list */
struct list_head entry;
/** Device ID from the root super block */
@@ -1053,10 +1050,6 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
struct fuse_forget_link *fuse_alloc_forget(void);
-struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
- unsigned int max,
- unsigned int *countp);
-
/*
* Initialize READ or READDIR request
*/
@@ -1154,7 +1147,22 @@ void __exit fuse_ctl_cleanup(void);
/**
* Simple request sending that does request allocation and freeing
*/
-ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args);
+ssize_t __fuse_simple_request(struct mnt_idmap *idmap,
+ struct fuse_mount *fm,
+ struct fuse_args *args);
+
+static inline ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
+{
+ return __fuse_simple_request(&invalid_mnt_idmap, fm, args);
+}
+
+static inline ssize_t fuse_simple_idmap_request(struct mnt_idmap *idmap,
+ struct fuse_mount *fm,
+ struct fuse_args *args)
+{
+ return __fuse_simple_request(idmap, fm, args);
+}
+
int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
gfp_t gfp_flags);
@@ -1330,8 +1338,8 @@ bool fuse_write_update_attr(struct inode *inode, loff_t pos, ssize_t written);
int fuse_flush_times(struct inode *inode, struct fuse_file *ff);
int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
-int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
- struct file *file);
+int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr, struct file *file);
void fuse_set_initialized(struct fuse_conn *fc);
diff --git a/fs/fuse/fuse_trace.h b/fs/fuse/fuse_trace.h
new file mode 100644
index 000000000000..bbe9ddd8c716
--- /dev/null
+++ b/fs/fuse/fuse_trace.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fuse
+
+#if !defined(_TRACE_FUSE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FUSE_H
+
+#include <linux/tracepoint.h>
+
+#define OPCODES \
+ EM( FUSE_LOOKUP, "FUSE_LOOKUP") \
+ EM( FUSE_FORGET, "FUSE_FORGET") \
+ EM( FUSE_GETATTR, "FUSE_GETATTR") \
+ EM( FUSE_SETATTR, "FUSE_SETATTR") \
+ EM( FUSE_READLINK, "FUSE_READLINK") \
+ EM( FUSE_SYMLINK, "FUSE_SYMLINK") \
+ EM( FUSE_MKNOD, "FUSE_MKNOD") \
+ EM( FUSE_MKDIR, "FUSE_MKDIR") \
+ EM( FUSE_UNLINK, "FUSE_UNLINK") \
+ EM( FUSE_RMDIR, "FUSE_RMDIR") \
+ EM( FUSE_RENAME, "FUSE_RENAME") \
+ EM( FUSE_LINK, "FUSE_LINK") \
+ EM( FUSE_OPEN, "FUSE_OPEN") \
+ EM( FUSE_READ, "FUSE_READ") \
+ EM( FUSE_WRITE, "FUSE_WRITE") \
+ EM( FUSE_STATFS, "FUSE_STATFS") \
+ EM( FUSE_RELEASE, "FUSE_RELEASE") \
+ EM( FUSE_FSYNC, "FUSE_FSYNC") \
+ EM( FUSE_SETXATTR, "FUSE_SETXATTR") \
+ EM( FUSE_GETXATTR, "FUSE_GETXATTR") \
+ EM( FUSE_LISTXATTR, "FUSE_LISTXATTR") \
+ EM( FUSE_REMOVEXATTR, "FUSE_REMOVEXATTR") \
+ EM( FUSE_FLUSH, "FUSE_FLUSH") \
+ EM( FUSE_INIT, "FUSE_INIT") \
+ EM( FUSE_OPENDIR, "FUSE_OPENDIR") \
+ EM( FUSE_READDIR, "FUSE_READDIR") \
+ EM( FUSE_RELEASEDIR, "FUSE_RELEASEDIR") \
+ EM( FUSE_FSYNCDIR, "FUSE_FSYNCDIR") \
+ EM( FUSE_GETLK, "FUSE_GETLK") \
+ EM( FUSE_SETLK, "FUSE_SETLK") \
+ EM( FUSE_SETLKW, "FUSE_SETLKW") \
+ EM( FUSE_ACCESS, "FUSE_ACCESS") \
+ EM( FUSE_CREATE, "FUSE_CREATE") \
+ EM( FUSE_INTERRUPT, "FUSE_INTERRUPT") \
+ EM( FUSE_BMAP, "FUSE_BMAP") \
+ EM( FUSE_DESTROY, "FUSE_DESTROY") \
+ EM( FUSE_IOCTL, "FUSE_IOCTL") \
+ EM( FUSE_POLL, "FUSE_POLL") \
+ EM( FUSE_NOTIFY_REPLY, "FUSE_NOTIFY_REPLY") \
+ EM( FUSE_BATCH_FORGET, "FUSE_BATCH_FORGET") \
+ EM( FUSE_FALLOCATE, "FUSE_FALLOCATE") \
+ EM( FUSE_READDIRPLUS, "FUSE_READDIRPLUS") \
+ EM( FUSE_RENAME2, "FUSE_RENAME2") \
+ EM( FUSE_LSEEK, "FUSE_LSEEK") \
+ EM( FUSE_COPY_FILE_RANGE, "FUSE_COPY_FILE_RANGE") \
+ EM( FUSE_SETUPMAPPING, "FUSE_SETUPMAPPING") \
+ EM( FUSE_REMOVEMAPPING, "FUSE_REMOVEMAPPING") \
+ EM( FUSE_SYNCFS, "FUSE_SYNCFS") \
+ EM( FUSE_TMPFILE, "FUSE_TMPFILE") \
+ EM( FUSE_STATX, "FUSE_STATX") \
+ EMe(CUSE_INIT, "CUSE_INIT")
+
+/*
+ * This will turn the above table into TRACE_DEFINE_ENUM() for each of the
+ * entries.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+OPCODES
+
+/* Now we redfine it with the table that __print_symbolic needs. */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
+
+TRACE_EVENT(fuse_request_send,
+ TP_PROTO(const struct fuse_req *req),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __field(dev_t, connection)
+ __field(uint64_t, unique)
+ __field(enum fuse_opcode, opcode)
+ __field(uint32_t, len)
+ ),
+
+ TP_fast_assign(
+ __entry->connection = req->fm->fc->dev;
+ __entry->unique = req->in.h.unique;
+ __entry->opcode = req->in.h.opcode;
+ __entry->len = req->in.h.len;
+ ),
+
+ TP_printk("connection %u req %llu opcode %u (%s) len %u ",
+ __entry->connection, __entry->unique, __entry->opcode,
+ __print_symbolic(__entry->opcode, OPCODES), __entry->len)
+);
+
+TRACE_EVENT(fuse_request_end,
+ TP_PROTO(const struct fuse_req *req),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __field(dev_t, connection)
+ __field(uint64_t, unique)
+ __field(uint32_t, len)
+ __field(int32_t, error)
+ ),
+
+ TP_fast_assign(
+ __entry->connection = req->fm->fc->dev;
+ __entry->unique = req->in.h.unique;
+ __entry->len = req->out.h.len;
+ __entry->error = req->out.h.error;
+ ),
+
+ TP_printk("connection %u req %llu len %u error %d", __entry->connection,
+ __entry->unique, __entry->len, __entry->error)
+);
+
+#endif /* _TRACE_FUSE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE fuse_trace
+#include <trace/define_trace.h>
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index bebd89002328..fd3321e29a3e 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1348,6 +1348,12 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
}
if (flags & FUSE_NO_EXPORT_SUPPORT)
fm->sb->s_export_op = &fuse_export_fid_operations;
+ if (flags & FUSE_ALLOW_IDMAP) {
+ if (fc->default_permissions)
+ fm->sb->s_iflags &= ~SB_I_NOIDMAP;
+ else
+ ok = false;
+ }
} else {
ra_pages = fc->max_read / PAGE_SIZE;
fc->no_lock = 1;
@@ -1395,7 +1401,7 @@ void fuse_send_init(struct fuse_mount *fm)
FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT |
FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP |
FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP |
- FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND;
+ FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | FUSE_ALLOW_IDMAP;
#ifdef CONFIG_FUSE_DAX
if (fm->fc->dax)
flags |= FUSE_MAP_ALIGNMENT;
@@ -1572,6 +1578,7 @@ static void fuse_sb_defaults(struct super_block *sb)
sb->s_time_gran = 1;
sb->s_export_op = &fuse_export_operations;
sb->s_iflags |= SB_I_IMA_UNVERIFIABLE_SIGNATURE;
+ sb->s_iflags |= SB_I_NOIDMAP;
if (sb->s_user_ns != &init_user_ns)
sb->s_iflags |= SB_I_UNTRUSTED_MOUNTER;
sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
@@ -1984,7 +1991,7 @@ static void fuse_kill_sb_anon(struct super_block *sb)
static struct file_system_type fuse_fs_type = {
.owner = THIS_MODULE,
.name = "fuse",
- .fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT,
+ .fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
.init_fs_context = fuse_init_fs_context,
.parameters = fuse_fs_parameters,
.kill_sb = fuse_kill_sb_anon,
@@ -2005,7 +2012,7 @@ static struct file_system_type fuseblk_fs_type = {
.init_fs_context = fuse_init_fs_context,
.parameters = fuse_fs_parameters,
.kill_sb = fuse_kill_sb_blk,
- .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
+ .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("fuseblk");
diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c
index 9666d13884ce..62aee8289d11 100644
--- a/fs/fuse/passthrough.c
+++ b/fs/fuse/passthrough.c
@@ -228,16 +228,13 @@ int fuse_backing_open(struct fuse_conn *fc, struct fuse_backing_map *map)
if (map->flags || map->padding)
goto out;
- file = fget(map->fd);
+ file = fget_raw(map->fd);
res = -EBADF;
if (!file)
goto out;
- res = -EOPNOTSUPP;
- if (!file->f_op->read_iter || !file->f_op->write_iter)
- goto out_fput;
-
backing_sb = file_inode(file)->i_sb;
+ pr_info("%s: %x:%pD %i\n", __func__, backing_sb->s_dev, file, backing_sb->s_stack_depth);
res = -ELOOP;
if (backing_sb->s_stack_depth >= fc->max_stack_depth)
goto out_fput;
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index dd5260141615..6404a189e989 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -56,12 +56,14 @@ struct virtio_fs_vq {
bool connected;
long in_flight;
struct completion in_flight_zero; /* No inflight requests */
+ struct kobject *kobj;
char name[VQ_NAME_LEN];
} ____cacheline_aligned_in_smp;
/* A virtio-fs device instance */
struct virtio_fs {
struct kobject kobj;
+ struct kobject *mqs_kobj;
struct list_head list; /* on virtio_fs_instances */
char *tag;
struct virtio_fs_vq *vqs;
@@ -200,19 +202,94 @@ static const struct kobj_type virtio_fs_ktype = {
.default_groups = virtio_fs_groups,
};
+static struct virtio_fs_vq *virtio_fs_kobj_to_vq(struct virtio_fs *fs,
+ struct kobject *kobj)
+{
+ int i;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ if (kobj == fs->vqs[i].kobj)
+ return &fs->vqs[i];
+ }
+ return NULL;
+}
+
+static ssize_t name_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj);
+ struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj);
+
+ if (!fsvq)
+ return -EINVAL;
+ return sysfs_emit(buf, "%s\n", fsvq->name);
+}
+
+static struct kobj_attribute virtio_fs_vq_name_attr = __ATTR_RO(name);
+
+static ssize_t cpu_list_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj);
+ struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj);
+ unsigned int cpu, qid;
+ const size_t size = PAGE_SIZE - 1;
+ bool first = true;
+ int ret = 0, pos = 0;
+
+ if (!fsvq)
+ return -EINVAL;
+
+ qid = fsvq->vq->index;
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid - VQ_REQUEST)) {
+ if (first)
+ ret = snprintf(buf + pos, size - pos, "%u", cpu);
+ else
+ ret = snprintf(buf + pos, size - pos, ", %u", cpu);
+
+ if (ret >= size - pos)
+ break;
+ first = false;
+ pos += ret;
+ }
+ }
+ ret = snprintf(buf + pos, size + 1 - pos, "\n");
+ return pos + ret;
+}
+
+static struct kobj_attribute virtio_fs_vq_cpu_list_attr = __ATTR_RO(cpu_list);
+
+static struct attribute *virtio_fs_vq_attrs[] = {
+ &virtio_fs_vq_name_attr.attr,
+ &virtio_fs_vq_cpu_list_attr.attr,
+ NULL
+};
+
+static struct attribute_group virtio_fs_vq_attr_group = {
+ .attrs = virtio_fs_vq_attrs,
+};
+
/* Make sure virtiofs_mutex is held */
-static void virtio_fs_put(struct virtio_fs *fs)
+static void virtio_fs_put_locked(struct virtio_fs *fs)
{
+ lockdep_assert_held(&virtio_fs_mutex);
+
kobject_put(&fs->kobj);
}
+static void virtio_fs_put(struct virtio_fs *fs)
+{
+ mutex_lock(&virtio_fs_mutex);
+ virtio_fs_put_locked(fs);
+ mutex_unlock(&virtio_fs_mutex);
+}
+
static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
{
struct virtio_fs *vfs = fiq->priv;
- mutex_lock(&virtio_fs_mutex);
virtio_fs_put(vfs);
- mutex_unlock(&virtio_fs_mutex);
}
static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
@@ -273,6 +350,50 @@ static void virtio_fs_start_all_queues(struct virtio_fs *fs)
}
}
+static void virtio_fs_delete_queues_sysfs(struct virtio_fs *fs)
+{
+ struct virtio_fs_vq *fsvq;
+ int i;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ fsvq = &fs->vqs[i];
+ kobject_put(fsvq->kobj);
+ }
+}
+
+static int virtio_fs_add_queues_sysfs(struct virtio_fs *fs)
+{
+ struct virtio_fs_vq *fsvq;
+ char buff[12];
+ int i, j, ret;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ fsvq = &fs->vqs[i];
+
+ sprintf(buff, "%d", i);
+ fsvq->kobj = kobject_create_and_add(buff, fs->mqs_kobj);
+ if (!fs->mqs_kobj) {
+ ret = -ENOMEM;
+ goto out_del;
+ }
+
+ ret = sysfs_create_group(fsvq->kobj, &virtio_fs_vq_attr_group);
+ if (ret) {
+ kobject_put(fsvq->kobj);
+ goto out_del;
+ }
+ }
+
+ return 0;
+
+out_del:
+ for (j = 0; j < i; j++) {
+ fsvq = &fs->vqs[j];
+ kobject_put(fsvq->kobj);
+ }
+ return ret;
+}
+
/* Add a new instance to the list or return -EEXIST if tag name exists*/
static int virtio_fs_add_instance(struct virtio_device *vdev,
struct virtio_fs *fs)
@@ -296,17 +417,22 @@ static int virtio_fs_add_instance(struct virtio_device *vdev,
*/
fs->kobj.kset = virtio_fs_kset;
ret = kobject_add(&fs->kobj, NULL, "%d", vdev->index);
- if (ret < 0) {
- mutex_unlock(&virtio_fs_mutex);
- return ret;
+ if (ret < 0)
+ goto out_unlock;
+
+ fs->mqs_kobj = kobject_create_and_add("mqs", &fs->kobj);
+ if (!fs->mqs_kobj) {
+ ret = -ENOMEM;
+ goto out_del;
}
ret = sysfs_create_link(&fs->kobj, &vdev->dev.kobj, "device");
- if (ret < 0) {
- kobject_del(&fs->kobj);
- mutex_unlock(&virtio_fs_mutex);
- return ret;
- }
+ if (ret < 0)
+ goto out_put;
+
+ ret = virtio_fs_add_queues_sysfs(fs);
+ if (ret)
+ goto out_remove;
list_add_tail(&fs->list, &virtio_fs_instances);
@@ -315,6 +441,16 @@ static int virtio_fs_add_instance(struct virtio_device *vdev,
kobject_uevent(&fs->kobj, KOBJ_ADD);
return 0;
+
+out_remove:
+ sysfs_remove_link(&fs->kobj, "device");
+out_put:
+ kobject_put(fs->mqs_kobj);
+out_del:
+ kobject_del(&fs->kobj);
+out_unlock:
+ mutex_unlock(&virtio_fs_mutex);
+ return ret;
}
/* Return the virtio_fs with a given tag, or NULL */
@@ -1043,7 +1179,9 @@ static void virtio_fs_remove(struct virtio_device *vdev)
mutex_lock(&virtio_fs_mutex);
/* This device is going away. No one should get new reference */
list_del_init(&fs->list);
+ virtio_fs_delete_queues_sysfs(fs);
sysfs_remove_link(&fs->kobj, "device");
+ kobject_put(fs->mqs_kobj);
kobject_del(&fs->kobj);
virtio_fs_stop_all_queues(fs);
virtio_fs_drain_all_queues_locked(fs);
@@ -1052,7 +1190,7 @@ static void virtio_fs_remove(struct virtio_device *vdev)
vdev->priv = NULL;
/* Put device reference on virtio_fs object */
- virtio_fs_put(fs);
+ virtio_fs_put_locked(fs);
mutex_unlock(&virtio_fs_mutex);
}
@@ -1091,22 +1229,13 @@ static struct virtio_driver virtio_fs_driver = {
#endif
};
-static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq)
-__releases(fiq->lock)
+static void virtio_fs_send_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *link)
{
- struct fuse_forget_link *link;
struct virtio_fs_forget *forget;
struct virtio_fs_forget_req *req;
- struct virtio_fs *fs;
- struct virtio_fs_vq *fsvq;
- u64 unique;
-
- link = fuse_dequeue_forget(fiq, 1, NULL);
- unique = fuse_get_unique(fiq);
-
- fs = fiq->priv;
- fsvq = &fs->vqs[VQ_HIPRIO];
- spin_unlock(&fiq->lock);
+ struct virtio_fs *fs = fiq->priv;
+ struct virtio_fs_vq *fsvq = &fs->vqs[VQ_HIPRIO];
+ u64 unique = fuse_get_unique(fiq);
/* Allocate a buffer for the request */
forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
@@ -1126,8 +1255,7 @@ __releases(fiq->lock)
kfree(link);
}
-static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq)
-__releases(fiq->lock)
+static void virtio_fs_send_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
/*
* TODO interrupts.
@@ -1136,7 +1264,6 @@ __releases(fiq->lock)
* Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
* with shared lock between host and guest.
*/
- spin_unlock(&fiq->lock);
}
/* Count number of scatter-gather elements required */
@@ -1341,21 +1468,17 @@ out:
return ret;
}
-static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
-__releases(fiq->lock)
+static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req)
{
unsigned int queue_id;
struct virtio_fs *fs;
- struct fuse_req *req;
struct virtio_fs_vq *fsvq;
int ret;
- WARN_ON(list_empty(&fiq->pending));
- req = list_last_entry(&fiq->pending, struct fuse_req, list);
+ if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
+ req->in.h.unique = fuse_get_unique(fiq);
+
clear_bit(FR_PENDING, &req->flags);
- list_del_init(&req->list);
- WARN_ON(!list_empty(&fiq->pending));
- spin_unlock(&fiq->lock);
fs = fiq->priv;
queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()];
@@ -1393,10 +1516,10 @@ __releases(fiq->lock)
}
static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
- .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock,
- .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock,
- .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock,
- .release = virtio_fs_fiq_release,
+ .send_forget = virtio_fs_send_forget,
+ .send_interrupt = virtio_fs_send_interrupt,
+ .send_req = virtio_fs_send_req,
+ .release = virtio_fs_fiq_release,
};
static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
@@ -1596,9 +1719,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
out_err:
kfree(fc);
- mutex_lock(&virtio_fs_mutex);
virtio_fs_put(fs);
- mutex_unlock(&virtio_fs_mutex);
return err;
}
@@ -1628,6 +1749,7 @@ static struct file_system_type virtio_fs_type = {
.name = "virtiofs",
.init_fs_context = virtio_fs_init_fs_context,
.kill_sb = virtio_kill_sb,
+ .fs_flags = FS_ALLOW_IDMAP,
};
static int virtio_fs_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
diff --git a/fs/mnt_idmapping.c b/fs/mnt_idmapping.c
index 79491663dbc0..7b1df8cc2821 100644
--- a/fs/mnt_idmapping.c
+++ b/fs/mnt_idmapping.c
@@ -32,6 +32,15 @@ struct mnt_idmap nop_mnt_idmap = {
};
EXPORT_SYMBOL_GPL(nop_mnt_idmap);
+/*
+ * Carries the invalid idmapping of a full 0-4294967295 {g,u}id range.
+ * This means that all {g,u}ids are mapped to INVALID_VFS{G,U}ID.
+ */
+struct mnt_idmap invalid_mnt_idmap = {
+ .count = REFCOUNT_INIT(1),
+};
+EXPORT_SYMBOL_GPL(invalid_mnt_idmap);
+
/**
* initial_idmapping - check whether this is the initial mapping
* @ns: idmapping to check
@@ -75,6 +84,8 @@ vfsuid_t make_vfsuid(struct mnt_idmap *idmap,
if (idmap == &nop_mnt_idmap)
return VFSUIDT_INIT(kuid);
+ if (idmap == &invalid_mnt_idmap)
+ return INVALID_VFSUID;
if (initial_idmapping(fs_userns))
uid = __kuid_val(kuid);
else
@@ -112,6 +123,8 @@ vfsgid_t make_vfsgid(struct mnt_idmap *idmap,
if (idmap == &nop_mnt_idmap)
return VFSGIDT_INIT(kgid);
+ if (idmap == &invalid_mnt_idmap)
+ return INVALID_VFSGID;
if (initial_idmapping(fs_userns))
gid = __kgid_val(kgid);
else
@@ -140,6 +153,8 @@ kuid_t from_vfsuid(struct mnt_idmap *idmap,
if (idmap == &nop_mnt_idmap)
return AS_KUIDT(vfsuid);
+ if (idmap == &invalid_mnt_idmap)
+ return INVALID_UID;
uid = map_id_up(&idmap->uid_map, __vfsuid_val(vfsuid));
if (uid == (uid_t)-1)
return INVALID_UID;
@@ -167,6 +182,8 @@ kgid_t from_vfsgid(struct mnt_idmap *idmap,
if (idmap == &nop_mnt_idmap)
return AS_KGIDT(vfsgid);
+ if (idmap == &invalid_mnt_idmap)
+ return INVALID_GID;
gid = map_id_up(&idmap->gid_map, __vfsgid_val(vfsgid));
if (gid == (gid_t)-1)
return INVALID_GID;
@@ -296,7 +313,7 @@ struct mnt_idmap *alloc_mnt_idmap(struct user_namespace *mnt_userns)
*/
struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap)
{
- if (idmap != &nop_mnt_idmap)
+ if (idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap)
refcount_inc(&idmap->count);
return idmap;
@@ -312,7 +329,8 @@ EXPORT_SYMBOL_GPL(mnt_idmap_get);
*/
void mnt_idmap_put(struct mnt_idmap *idmap)
{
- if (idmap != &nop_mnt_idmap && refcount_dec_and_test(&idmap->count))
+ if (idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap &&
+ refcount_dec_and_test(&idmap->count))
free_mnt_idmap(idmap);
}
EXPORT_SYMBOL_GPL(mnt_idmap_put);
diff --git a/fs/namespace.c b/fs/namespace.c
index 6ba9c434cc9f..93c377816d75 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -4471,6 +4471,10 @@ static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
return -EINVAL;
+ /* The filesystem has turned off idmapped mounts. */
+ if (m->mnt_sb->s_iflags & SB_I_NOIDMAP)
+ return -EINVAL;
+
/* We're not controlling the superblock. */
if (!ns_capable(fs_userns, CAP_SYS_ADMIN))
return -EPERM;
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index d7eae597e54d..b3910dfcb56d 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -552,6 +552,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
netfs_set_group(folio, netfs_group);
file_update_time(file);
+ set_bit(NETFS_ICTX_MODIFIED_ATTR, &ictx->flags);
if (ictx->ops->post_modify)
ictx->ops->post_modify(inode);
ret = VM_FAULT_LOCKED;
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index c9f0ed24cb7b..c562aec3b483 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -58,6 +58,7 @@ static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
/*
* misc.c
*/
+struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq);
int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
bool needs_put);
struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq);
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index 0ad0982ce0e2..63280791de3b 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -9,34 +9,66 @@
#include "internal.h"
/*
- * Append a folio to the rolling queue.
+ * Make sure there's space in the rolling queue.
*/
-int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
- bool needs_put)
+struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq)
{
- struct folio_queue *tail = rreq->buffer_tail;
- unsigned int slot, order = folio_order(folio);
+ struct folio_queue *tail = rreq->buffer_tail, *prev;
+ unsigned int prev_nr_slots = 0;
if (WARN_ON_ONCE(!rreq->buffer && tail) ||
WARN_ON_ONCE(rreq->buffer && !tail))
- return -EIO;
-
- if (!tail || folioq_full(tail)) {
- tail = kmalloc(sizeof(*tail), GFP_NOFS);
- if (!tail)
- return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(tail);
- tail->prev = rreq->buffer_tail;
- if (tail->prev)
- tail->prev->next = tail;
- rreq->buffer_tail = tail;
- if (!rreq->buffer) {
- rreq->buffer = tail;
- iov_iter_folio_queue(&rreq->io_iter, ITER_SOURCE, tail, 0, 0, 0);
+ return ERR_PTR(-EIO);
+
+ prev = tail;
+ if (prev) {
+ if (!folioq_full(tail))
+ return tail;
+ prev_nr_slots = folioq_nr_slots(tail);
+ }
+
+ tail = kmalloc(sizeof(*tail), GFP_NOFS);
+ if (!tail)
+ return ERR_PTR(-ENOMEM);
+ netfs_stat(&netfs_n_folioq);
+ folioq_init(tail);
+ tail->prev = prev;
+ if (prev)
+ /* [!] NOTE: After we set prev->next, the consumer is entirely
+ * at liberty to delete prev.
+ */
+ WRITE_ONCE(prev->next, tail);
+
+ rreq->buffer_tail = tail;
+ if (!rreq->buffer) {
+ rreq->buffer = tail;
+ iov_iter_folio_queue(&rreq->io_iter, ITER_SOURCE, tail, 0, 0, 0);
+ } else {
+ /* Make sure we don't leave the master iterator pointing to a
+ * block that might get immediately consumed.
+ */
+ if (rreq->io_iter.folioq == prev &&
+ rreq->io_iter.folioq_slot == prev_nr_slots) {
+ rreq->io_iter.folioq = tail;
+ rreq->io_iter.folioq_slot = 0;
}
- rreq->buffer_tail_slot = 0;
}
+ rreq->buffer_tail_slot = 0;
+ return tail;
+}
+
+/*
+ * Append a folio to the rolling queue.
+ */
+int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
+ bool needs_put)
+{
+ struct folio_queue *tail;
+ unsigned int slot, order = folio_order(folio);
+
+ tail = netfs_buffer_make_space(rreq);
+ if (IS_ERR(tail))
+ return PTR_ERR(tail);
rreq->io_iter.count += PAGE_SIZE << order;
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index 04e66d587f77..6293f547e4c3 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -153,12 +153,22 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
loff_t start)
{
struct netfs_io_subrequest *subreq;
+ struct iov_iter *wreq_iter = &wreq->io_iter;
+
+ /* Make sure we don't point the iterator at a used-up folio_queue
+ * struct being used as a placeholder to prevent the queue from
+ * collapsing. In such a case, extend the queue.
+ */
+ if (iov_iter_is_folioq(wreq_iter) &&
+ wreq_iter->folioq_slot >= folioq_nr_slots(wreq_iter->folioq)) {
+ netfs_buffer_make_space(wreq);
+ }
subreq = netfs_alloc_subrequest(wreq);
subreq->source = stream->source;
subreq->start = start;
subreq->stream_nr = stream->stream_nr;
- subreq->io_iter = wreq->io_iter;
+ subreq->io_iter = *wreq_iter;
_enter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
@@ -307,6 +317,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
struct netfs_io_stream *stream;
struct netfs_group *fgroup; /* TODO: Use this with ceph */
struct netfs_folio *finfo;
+ size_t iter_off = 0;
size_t fsize = folio_size(folio), flen = fsize, foff = 0;
loff_t fpos = folio_pos(folio), i_size;
bool to_eof = false, streamw = false;
@@ -462,7 +473,12 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
if (choose_s < 0)
break;
stream = &wreq->io_streams[choose_s];
- wreq->io_iter.iov_offset = stream->submit_off;
+
+ /* Advance the iterator(s). */
+ if (stream->submit_off > iter_off) {
+ iov_iter_advance(&wreq->io_iter, stream->submit_off - iter_off);
+ iter_off = stream->submit_off;
+ }
atomic64_set(&wreq->issued_to, fpos + stream->submit_off);
stream->submit_extendable_to = fsize - stream->submit_off;
@@ -477,8 +493,8 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
debug = true;
}
- wreq->io_iter.iov_offset = 0;
- iov_iter_advance(&wreq->io_iter, fsize);
+ if (fsize > iter_off)
+ iov_iter_advance(&wreq->io_iter, fsize - iter_off);
atomic64_set(&wreq->issued_to, fpos + fsize);
if (!debug)
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 57249f040dfc..0eb20012792f 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -4,6 +4,7 @@ config NFS_FS
depends on INET && FILE_LOCKING && MULTIUSER
select LOCKD
select SUNRPC
+ select NFS_COMMON
select NFS_ACL_SUPPORT if NFS_V3_ACL
help
Choose Y here if you want to access files residing on other
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 5f6db37f461e..9fb2f2cac87e 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -13,6 +13,7 @@ nfs-y := client.o dir.o file.o getroot.o inode.o super.o \
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
nfs-$(CONFIG_NFS_FSCACHE) += fscache.o
+nfs-$(CONFIG_NFS_LOCALIO) += localio.o
obj-$(CONFIG_NFS_V2) += nfsv2.o
nfsv2-y := nfs2super.o proc.o nfs2xdr.o
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 8286edd6062d..a1d21c4be0ac 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -178,6 +178,14 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
clp->cl_max_connect = cl_init->max_connect ? cl_init->max_connect : 1;
clp->cl_net = get_net(cl_init->net);
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ seqlock_init(&clp->cl_boot_lock);
+ ktime_get_real_ts64(&clp->cl_nfssvc_boot);
+ clp->cl_uuid.net = NULL;
+ clp->cl_uuid.dom = NULL;
+ spin_lock_init(&clp->cl_localio_lock);
+#endif /* CONFIG_NFS_LOCALIO */
+
clp->cl_principal = "*";
clp->cl_xprtsec = cl_init->xprtsec;
return clp;
@@ -233,6 +241,8 @@ static void pnfs_init_server(struct nfs_server *server)
*/
void nfs_free_client(struct nfs_client *clp)
{
+ nfs_local_disable(clp);
+
/* -EIO all pending I/O */
if (!IS_ERR(clp->cl_rpcclient))
rpc_shutdown_client(clp->cl_rpcclient);
@@ -424,7 +434,10 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
list_add_tail(&new->cl_share_link,
&nn->nfs_client_list);
spin_unlock(&nn->nfs_client_lock);
- return rpc_ops->init_client(new, cl_init);
+ new = rpc_ops->init_client(new, cl_init);
+ if (!IS_ERR(new))
+ nfs_local_probe(new);
+ return new;
}
spin_unlock(&nn->nfs_client_lock);
@@ -997,8 +1010,8 @@ struct nfs_server *nfs_alloc_server(void)
init_waitqueue_head(&server->write_congestion_wait);
atomic_long_set(&server->writeback, 0);
- ida_init(&server->openowner_id);
- ida_init(&server->lockowner_id);
+ atomic64_set(&server->owner_ctr, 0);
+
pnfs_init_server(server);
rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC");
@@ -1037,8 +1050,6 @@ void nfs_free_server(struct nfs_server *server)
}
ida_free(&s_sysfs_ids, server->s_sysfs_id);
- ida_destroy(&server->lockowner_id);
- ida_destroy(&server->openowner_id);
put_cred(server->cred);
nfs_release_automount_timer();
call_rcu(&server->rcu, delayed_free);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 4cb97ef41350..492cffd9d3d8 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -151,7 +151,7 @@ struct nfs_cache_array {
unsigned char folio_full : 1,
folio_is_eof : 1,
cookies_are_ordered : 1;
- struct nfs_cache_array_entry array[];
+ struct nfs_cache_array_entry array[] __counted_by(size);
};
struct nfs_readdir_descriptor {
@@ -328,7 +328,8 @@ static int nfs_readdir_folio_array_append(struct folio *folio,
goto out;
}
- cache_entry = &array->array[array->size];
+ array->size++;
+ cache_entry = &array->array[array->size - 1];
cache_entry->cookie = array->last_cookie;
cache_entry->ino = entry->ino;
cache_entry->d_type = entry->d_type;
@@ -337,7 +338,6 @@ static int nfs_readdir_folio_array_append(struct folio *folio,
array->last_cookie = entry->cookie;
if (array->last_cookie <= cache_entry->cookie)
array->cookies_are_ordered = 0;
- array->size++;
if (entry->eof != 0)
nfs_readdir_array_set_eof(array);
out:
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index b6e9aeaf4ce2..d39a1f58e18d 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -488,7 +488,7 @@ filelayout_read_pagelist(struct nfs_pgio_header *hdr)
/* Perform an asynchronous read to ds */
nfs_initiate_pgio(ds_clnt, hdr, hdr->cred,
NFS_PROTO(hdr->inode), &filelayout_read_call_ops,
- 0, RPC_TASK_SOFTCONN);
+ 0, RPC_TASK_SOFTCONN, NULL);
return PNFS_ATTEMPTED;
}
@@ -530,7 +530,7 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
/* Perform an asynchronous write */
nfs_initiate_pgio(ds_clnt, hdr, hdr->cred,
NFS_PROTO(hdr->inode), &filelayout_write_call_ops,
- sync, RPC_TASK_SOFTCONN);
+ sync, RPC_TASK_SOFTCONN, NULL);
return PNFS_ATTEMPTED;
}
@@ -1011,7 +1011,7 @@ static int filelayout_initiate_commit(struct nfs_commit_data *data, int how)
data->args.fh = fh;
return nfs_initiate_commit(ds_clnt, data, NFS_PROTO(data->inode),
&filelayout_commit_call_ops, how,
- RPC_TASK_SOFTCONN);
+ RPC_TASK_SOFTCONN, NULL);
out_err:
pnfs_generic_prepare_to_resend_writes(data);
pnfs_generic_commit_release(data);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 39ba9f4208aa..f78115c6c2c1 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -11,6 +11,7 @@
#include <linux/nfs_mount.h>
#include <linux/nfs_page.h>
#include <linux/module.h>
+#include <linux/file.h>
#include <linux/sched/mm.h>
#include <linux/sunrpc/metrics.h>
@@ -162,6 +163,21 @@ decode_name(struct xdr_stream *xdr, u32 *id)
return 0;
}
+static struct nfsd_file *
+ff_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, fmode_t mode)
+{
+ if (mode & FMODE_WRITE) {
+ /*
+ * Always request read and write access since this corresponds
+ * to a rw layout.
+ */
+ mode |= FMODE_READ;
+ }
+
+ return nfs_local_open_fh(clp, cred, fh, mode);
+}
+
static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
const struct nfs4_ff_layout_mirror *m2)
{
@@ -237,7 +253,7 @@ static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
{
- const struct cred *cred;
+ const struct cred *cred;
ff_layout_remove_mirror(mirror);
kfree(mirror->fh_versions);
@@ -1756,6 +1772,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
struct pnfs_layout_segment *lseg = hdr->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
+ struct nfsd_file *localio;
struct nfs4_ff_layout_mirror *mirror;
const struct cred *ds_cred;
loff_t offset = hdr->args.offset;
@@ -1802,11 +1819,18 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
hdr->args.offset = offset;
hdr->mds_offset = offset;
+ /* Start IO accounting for local read */
+ localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh, FMODE_READ);
+ if (localio) {
+ hdr->task.tk_start = ktime_get();
+ ff_layout_read_record_layoutstats_start(&hdr->task, hdr);
+ }
+
/* Perform an asynchronous read to ds */
nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
vers == 3 ? &ff_layout_read_call_ops_v3 :
&ff_layout_read_call_ops_v4,
- 0, RPC_TASK_SOFTCONN);
+ 0, RPC_TASK_SOFTCONN, localio);
put_cred(ds_cred);
return PNFS_ATTEMPTED;
@@ -1826,6 +1850,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
struct pnfs_layout_segment *lseg = hdr->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
+ struct nfsd_file *localio;
struct nfs4_ff_layout_mirror *mirror;
const struct cred *ds_cred;
loff_t offset = hdr->args.offset;
@@ -1870,11 +1895,19 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
*/
hdr->args.offset = offset;
+ /* Start IO accounting for local write */
+ localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh,
+ FMODE_READ|FMODE_WRITE);
+ if (localio) {
+ hdr->task.tk_start = ktime_get();
+ ff_layout_write_record_layoutstats_start(&hdr->task, hdr);
+ }
+
/* Perform an asynchronous write */
nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
vers == 3 ? &ff_layout_write_call_ops_v3 :
&ff_layout_write_call_ops_v4,
- sync, RPC_TASK_SOFTCONN);
+ sync, RPC_TASK_SOFTCONN, localio);
put_cred(ds_cred);
return PNFS_ATTEMPTED;
@@ -1908,6 +1941,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
struct pnfs_layout_segment *lseg = data->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
+ struct nfsd_file *localio;
struct nfs4_ff_layout_mirror *mirror;
const struct cred *ds_cred;
u32 idx;
@@ -1946,10 +1980,18 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
if (fh)
data->args.fh = fh;
+ /* Start IO accounting for local commit */
+ localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh,
+ FMODE_READ|FMODE_WRITE);
+ if (localio) {
+ data->task.tk_start = ktime_get();
+ ff_layout_commit_record_layoutstats_start(&data->task, data);
+ }
+
ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
vers == 3 ? &ff_layout_commit_call_ops_v3 :
&ff_layout_commit_call_ops_v4,
- how, RPC_TASK_SOFTCONN);
+ how, RPC_TASK_SOFTCONN, localio);
put_cred(ds_cred);
return ret;
out_err:
@@ -2087,12 +2129,6 @@ static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
}
static void
-encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
-{
- WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
-}
-
-static void
ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
const nfs4_stateid *stateid,
const struct nfs42_layoutstat_devinfo *devinfo)
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index e028f5a0ef5f..e58bedfb1dcc 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -395,6 +395,12 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
/* connect success, check rsize/wsize limit */
if (!status) {
+ /*
+ * ds_clp is put in destroy_ds().
+ * keep ds_clp even if DS is local, so that if local IO cannot
+ * proceed somehow, we can fall back to NFS whenever we want.
+ */
+ nfs_local_probe(ds->ds_clp);
max_payload =
nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient),
NULL);
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
index 6c9f3f6645dd..7e000d782e28 100644
--- a/fs/nfs/fs_context.c
+++ b/fs/nfs/fs_context.c
@@ -49,6 +49,7 @@ enum nfs_param {
Opt_bsize,
Opt_clientaddr,
Opt_cto,
+ Opt_alignwrite,
Opt_fg,
Opt_fscache,
Opt_fscache_flag,
@@ -149,6 +150,7 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_u32 ("bsize", Opt_bsize),
fsparam_string("clientaddr", Opt_clientaddr),
fsparam_flag_no("cto", Opt_cto),
+ fsparam_flag_no("alignwrite", Opt_alignwrite),
fsparam_flag ("fg", Opt_fg),
fsparam_flag_no("fsc", Opt_fscache_flag),
fsparam_string("fsc", Opt_fscache),
@@ -592,6 +594,12 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
else
ctx->flags |= NFS_MOUNT_TRUNK_DISCOVERY;
break;
+ case Opt_alignwrite:
+ if (result.negated)
+ ctx->flags |= NFS_MOUNT_NO_ALIGNWRITE;
+ else
+ ctx->flags &= ~NFS_MOUNT_NO_ALIGNWRITE;
+ break;
case Opt_ac:
if (result.negated)
ctx->flags |= NFS_MOUNT_NOAC;
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 11ff2b2e060f..f13d25d95b85 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -62,7 +62,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
}
/*
- * get an NFS2/NFS3 root dentry from the root filehandle
+ * get a root dentry from the root filehandle
*/
int nfs_get_root(struct super_block *s, struct fs_context *fc)
{
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b4914a11c3c2..542c7d97b235 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -2461,35 +2461,54 @@ static void nfs_destroy_inodecache(void)
kmem_cache_destroy(nfs_inode_cachep);
}
+struct workqueue_struct *nfslocaliod_workqueue;
struct workqueue_struct *nfsiod_workqueue;
EXPORT_SYMBOL_GPL(nfsiod_workqueue);
/*
- * start up the nfsiod workqueue
+ * Destroy the nfsiod workqueues
*/
-static int nfsiod_start(void)
+static void nfsiod_stop(void)
{
struct workqueue_struct *wq;
- dprintk("RPC: creating workqueue nfsiod\n");
- wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
- if (wq == NULL)
- return -ENOMEM;
- nfsiod_workqueue = wq;
- return 0;
+
+ wq = nfsiod_workqueue;
+ if (wq != NULL) {
+ nfsiod_workqueue = NULL;
+ destroy_workqueue(wq);
+ }
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ wq = nfslocaliod_workqueue;
+ if (wq != NULL) {
+ nfslocaliod_workqueue = NULL;
+ destroy_workqueue(wq);
+ }
+#endif /* CONFIG_NFS_LOCALIO */
}
/*
- * Destroy the nfsiod workqueue
+ * Start the nfsiod workqueues
*/
-static void nfsiod_stop(void)
+static int nfsiod_start(void)
{
- struct workqueue_struct *wq;
-
- wq = nfsiod_workqueue;
- if (wq == NULL)
- return;
- nfsiod_workqueue = NULL;
- destroy_workqueue(wq);
+ dprintk("RPC: creating workqueue nfsiod\n");
+ nfsiod_workqueue = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ if (nfsiod_workqueue == NULL)
+ return -ENOMEM;
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ /*
+ * localio writes need to use a normal (non-memreclaim) workqueue.
+ * When we start getting low on space, XFS goes and calls flush_work() on
+ * a non-memreclaim work queue, which causes a priority inversion problem.
+ */
+ dprintk("RPC: creating workqueue nfslocaliod\n");
+ nfslocaliod_workqueue = alloc_workqueue("nfslocaliod", WQ_UNBOUND, 0);
+ if (unlikely(nfslocaliod_workqueue == NULL)) {
+ nfsiod_stop();
+ return -ENOMEM;
+ }
+#endif /* CONFIG_NFS_LOCALIO */
+ return 0;
}
unsigned int nfs_net_id;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 5902a9beca1f..430733e3eff2 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -9,6 +9,7 @@
#include <linux/crc32.h>
#include <linux/sunrpc/addr.h>
#include <linux/nfs_page.h>
+#include <linux/nfslocalio.h>
#include <linux/wait_bit.h>
#define NFS_SB_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
@@ -308,7 +309,8 @@ void nfs_pgio_header_free(struct nfs_pgio_header *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
- const struct rpc_call_ops *call_ops, int how, int flags);
+ const struct rpc_call_ops *call_ops, int how, int flags,
+ struct nfsd_file *localio);
void nfs_free_request(struct nfs_page *req);
struct nfs_pgio_mirror *
nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc);
@@ -438,6 +440,7 @@ int nfs_check_flags(int);
/* inode.c */
extern struct workqueue_struct *nfsiod_workqueue;
+extern struct workqueue_struct *nfslocaliod_workqueue;
extern struct inode *nfs_alloc_inode(struct super_block *sb);
extern void nfs_free_inode(struct inode *);
extern int nfs_write_inode(struct inode *, struct writeback_control *);
@@ -449,6 +452,51 @@ extern void nfs_set_cache_invalid(struct inode *inode, unsigned long flags);
extern bool nfs_check_cache_invalid(struct inode *, unsigned long);
extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+/* localio.c */
+extern void nfs_local_disable(struct nfs_client *);
+extern void nfs_local_probe(struct nfs_client *);
+extern struct nfsd_file *nfs_local_open_fh(struct nfs_client *,
+ const struct cred *,
+ struct nfs_fh *,
+ const fmode_t);
+extern int nfs_local_doio(struct nfs_client *,
+ struct nfsd_file *,
+ struct nfs_pgio_header *,
+ const struct rpc_call_ops *);
+extern int nfs_local_commit(struct nfsd_file *,
+ struct nfs_commit_data *,
+ const struct rpc_call_ops *, int);
+extern bool nfs_server_is_local(const struct nfs_client *clp);
+
+#else /* CONFIG_NFS_LOCALIO */
+static inline void nfs_local_disable(struct nfs_client *clp) {}
+static inline void nfs_local_probe(struct nfs_client *clp) {}
+static inline struct nfsd_file *
+nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, const fmode_t mode)
+{
+ return NULL;
+}
+static inline int nfs_local_doio(struct nfs_client *clp,
+ struct nfsd_file *localio,
+ struct nfs_pgio_header *hdr,
+ const struct rpc_call_ops *call_ops)
+{
+ return -EINVAL;
+}
+static inline int nfs_local_commit(struct nfsd_file *localio,
+ struct nfs_commit_data *data,
+ const struct rpc_call_ops *call_ops, int how)
+{
+ return -EINVAL;
+}
+static inline bool nfs_server_is_local(const struct nfs_client *clp)
+{
+ return false;
+}
+#endif /* CONFIG_NFS_LOCALIO */
+
/* super.c */
extern const struct super_operations nfs_sops;
bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t);
@@ -505,7 +553,6 @@ extern int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
struct nfs_open_context *ctx,
struct folio *folio);
extern void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio);
-extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
/* super.c */
@@ -528,7 +575,8 @@ extern int nfs_initiate_commit(struct rpc_clnt *clnt,
struct nfs_commit_data *data,
const struct nfs_rpc_ops *nfs_ops,
const struct rpc_call_ops *call_ops,
- int how, int flags);
+ int how, int flags,
+ struct nfsd_file *localio);
extern void nfs_init_commit(struct nfs_commit_data *data,
struct list_head *head,
struct pnfs_layout_segment *lseg,
diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c
new file mode 100644
index 000000000000..c29cdf51c458
--- /dev/null
+++ b/fs/nfs/localio.c
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NFS client support for local clients to bypass network stack
+ *
+ * Copyright (C) 2014 Weston Andros Adamson <dros@primarydata.com>
+ * Copyright (C) 2019 Trond Myklebust <trond.myklebust@hammerspace.com>
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/vfs.h>
+#include <linux/file.h>
+#include <linux/inet.h>
+#include <linux/sunrpc/addr.h>
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
+#include <linux/nfs_common.h>
+#include <linux/nfslocalio.h>
+#include <linux/module.h>
+#include <linux/bvec.h>
+
+#include <linux/nfs.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_xdr.h>
+
+#include "internal.h"
+#include "pnfs.h"
+#include "nfstrace.h"
+
+#define NFSDBG_FACILITY NFSDBG_VFS
+
+struct nfs_local_kiocb {
+ struct kiocb kiocb;
+ struct bio_vec *bvec;
+ struct nfs_pgio_header *hdr;
+ struct work_struct work;
+ struct nfsd_file *localio;
+};
+
+struct nfs_local_fsync_ctx {
+ struct nfsd_file *localio;
+ struct nfs_commit_data *data;
+ struct work_struct work;
+ struct kref kref;
+ struct completion *done;
+};
+static void nfs_local_fsync_work(struct work_struct *work);
+
+static bool localio_enabled __read_mostly = true;
+module_param(localio_enabled, bool, 0644);
+
+static inline bool nfs_client_is_local(const struct nfs_client *clp)
+{
+ return !!test_bit(NFS_CS_LOCAL_IO, &clp->cl_flags);
+}
+
+bool nfs_server_is_local(const struct nfs_client *clp)
+{
+ return nfs_client_is_local(clp) && localio_enabled;
+}
+EXPORT_SYMBOL_GPL(nfs_server_is_local);
+
+/*
+ * UUID_IS_LOCAL XDR functions
+ */
+
+static void localio_xdr_enc_uuidargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const void *data)
+{
+ const u8 *uuid = data;
+
+ encode_opaque_fixed(xdr, uuid, UUID_SIZE);
+}
+
+static int localio_xdr_dec_uuidres(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ void *result)
+{
+ /* void return */
+ return 0;
+}
+
+static const struct rpc_procinfo nfs_localio_procedures[] = {
+ [LOCALIOPROC_UUID_IS_LOCAL] = {
+ .p_proc = LOCALIOPROC_UUID_IS_LOCAL,
+ .p_encode = localio_xdr_enc_uuidargs,
+ .p_decode = localio_xdr_dec_uuidres,
+ .p_arglen = XDR_QUADLEN(UUID_SIZE),
+ .p_replen = 0,
+ .p_statidx = LOCALIOPROC_UUID_IS_LOCAL,
+ .p_name = "UUID_IS_LOCAL",
+ },
+};
+
+static unsigned int nfs_localio_counts[ARRAY_SIZE(nfs_localio_procedures)];
+static const struct rpc_version nfslocalio_version1 = {
+ .number = 1,
+ .nrprocs = ARRAY_SIZE(nfs_localio_procedures),
+ .procs = nfs_localio_procedures,
+ .counts = nfs_localio_counts,
+};
+
+static const struct rpc_version *nfslocalio_version[] = {
+ [1] = &nfslocalio_version1,
+};
+
+extern const struct rpc_program nfslocalio_program;
+static struct rpc_stat nfslocalio_rpcstat = { &nfslocalio_program };
+
+const struct rpc_program nfslocalio_program = {
+ .name = "nfslocalio",
+ .number = NFS_LOCALIO_PROGRAM,
+ .nrvers = ARRAY_SIZE(nfslocalio_version),
+ .version = nfslocalio_version,
+ .stats = &nfslocalio_rpcstat,
+};
+
+/*
+ * nfs_local_enable - enable local i/o for an nfs_client
+ */
+static void nfs_local_enable(struct nfs_client *clp)
+{
+ spin_lock(&clp->cl_localio_lock);
+ set_bit(NFS_CS_LOCAL_IO, &clp->cl_flags);
+ trace_nfs_local_enable(clp);
+ spin_unlock(&clp->cl_localio_lock);
+}
+
+/*
+ * nfs_local_disable - disable local i/o for an nfs_client
+ */
+void nfs_local_disable(struct nfs_client *clp)
+{
+ spin_lock(&clp->cl_localio_lock);
+ if (test_and_clear_bit(NFS_CS_LOCAL_IO, &clp->cl_flags)) {
+ trace_nfs_local_disable(clp);
+ nfs_uuid_invalidate_one_client(&clp->cl_uuid);
+ }
+ spin_unlock(&clp->cl_localio_lock);
+}
+
+/*
+ * nfs_init_localioclient - Initialise an NFS localio client connection
+ */
+static struct rpc_clnt *nfs_init_localioclient(struct nfs_client *clp)
+{
+ struct rpc_clnt *rpcclient_localio;
+
+ rpcclient_localio = rpc_bind_new_program(clp->cl_rpcclient,
+ &nfslocalio_program, 1);
+
+ dprintk_rcu("%s: server (%s) %s NFS LOCALIO.\n",
+ __func__, rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
+ (IS_ERR(rpcclient_localio) ? "does not support" : "supports"));
+
+ return rpcclient_localio;
+}
+
+static bool nfs_server_uuid_is_local(struct nfs_client *clp)
+{
+ u8 uuid[UUID_SIZE];
+ struct rpc_message msg = {
+ .rpc_argp = &uuid,
+ };
+ struct rpc_clnt *rpcclient_localio;
+ int status;
+
+ rpcclient_localio = nfs_init_localioclient(clp);
+ if (IS_ERR(rpcclient_localio))
+ return false;
+
+ export_uuid(uuid, &clp->cl_uuid.uuid);
+
+ msg.rpc_proc = &nfs_localio_procedures[LOCALIOPROC_UUID_IS_LOCAL];
+ status = rpc_call_sync(rpcclient_localio, &msg, 0);
+ dprintk("%s: NFS reply UUID_IS_LOCAL: status=%d\n",
+ __func__, status);
+ rpc_shutdown_client(rpcclient_localio);
+
+ /* Server is only local if it initialized required struct members */
+ if (status || !clp->cl_uuid.net || !clp->cl_uuid.dom)
+ return false;
+
+ return true;
+}
+
+/*
+ * nfs_local_probe - probe local i/o support for an nfs_server and nfs_client
+ * - called after alloc_client and init_client (so cl_rpcclient exists)
+ * - this function is idempotent, it can be called for old or new clients
+ */
+void nfs_local_probe(struct nfs_client *clp)
+{
+ /* Disallow localio if disabled via sysfs or AUTH_SYS isn't used */
+ if (!localio_enabled ||
+ clp->cl_rpcclient->cl_auth->au_flavor != RPC_AUTH_UNIX) {
+ nfs_local_disable(clp);
+ return;
+ }
+
+ if (nfs_client_is_local(clp)) {
+ /* If already enabled, disable and re-enable */
+ nfs_local_disable(clp);
+ }
+
+ nfs_uuid_begin(&clp->cl_uuid);
+ if (nfs_server_uuid_is_local(clp))
+ nfs_local_enable(clp);
+ nfs_uuid_end(&clp->cl_uuid);
+}
+EXPORT_SYMBOL_GPL(nfs_local_probe);
+
+/*
+ * nfs_local_open_fh - open a local filehandle in terms of nfsd_file
+ *
+ * Returns a pointer to a struct nfsd_file or NULL
+ */
+struct nfsd_file *
+nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, const fmode_t mode)
+{
+ struct nfsd_file *localio;
+ int status;
+
+ if (!nfs_server_is_local(clp))
+ return NULL;
+ if (mode & ~(FMODE_READ | FMODE_WRITE))
+ return NULL;
+
+ localio = nfs_open_local_fh(&clp->cl_uuid, clp->cl_rpcclient,
+ cred, fh, mode);
+ if (IS_ERR(localio)) {
+ status = PTR_ERR(localio);
+ trace_nfs_local_open_fh(fh, mode, status);
+ switch (status) {
+ case -ENOMEM:
+ case -ENXIO:
+ case -ENOENT:
+ /* Revalidate localio, will disable if unsupported */
+ nfs_local_probe(clp);
+ }
+ return NULL;
+ }
+ return localio;
+}
+EXPORT_SYMBOL_GPL(nfs_local_open_fh);
+
+static struct bio_vec *
+nfs_bvec_alloc_and_import_pagevec(struct page **pagevec,
+ unsigned int npages, gfp_t flags)
+{
+ struct bio_vec *bvec, *p;
+
+ bvec = kmalloc_array(npages, sizeof(*bvec), flags);
+ if (bvec != NULL) {
+ for (p = bvec; npages > 0; p++, pagevec++, npages--) {
+ p->bv_page = *pagevec;
+ p->bv_len = PAGE_SIZE;
+ p->bv_offset = 0;
+ }
+ }
+ return bvec;
+}
+
+static void
+nfs_local_iocb_free(struct nfs_local_kiocb *iocb)
+{
+ kfree(iocb->bvec);
+ kfree(iocb);
+}
+
+static struct nfs_local_kiocb *
+nfs_local_iocb_alloc(struct nfs_pgio_header *hdr,
+ struct nfsd_file *localio, gfp_t flags)
+{
+ struct nfs_local_kiocb *iocb;
+
+ iocb = kmalloc(sizeof(*iocb), flags);
+ if (iocb == NULL)
+ return NULL;
+ iocb->bvec = nfs_bvec_alloc_and_import_pagevec(hdr->page_array.pagevec,
+ hdr->page_array.npages, flags);
+ if (iocb->bvec == NULL) {
+ kfree(iocb);
+ return NULL;
+ }
+ init_sync_kiocb(&iocb->kiocb, nfs_to->nfsd_file_file(localio));
+ iocb->kiocb.ki_pos = hdr->args.offset;
+ iocb->localio = localio;
+ iocb->hdr = hdr;
+ iocb->kiocb.ki_flags &= ~IOCB_APPEND;
+ return iocb;
+}
+
+static void
+nfs_local_iter_init(struct iov_iter *i, struct nfs_local_kiocb *iocb, int dir)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+
+ iov_iter_bvec(i, dir, iocb->bvec, hdr->page_array.npages,
+ hdr->args.count + hdr->args.pgbase);
+ if (hdr->args.pgbase != 0)
+ iov_iter_advance(i, hdr->args.pgbase);
+}
+
+static void
+nfs_local_hdr_release(struct nfs_pgio_header *hdr,
+ const struct rpc_call_ops *call_ops)
+{
+ call_ops->rpc_call_done(&hdr->task, hdr);
+ call_ops->rpc_release(hdr);
+}
+
+static void
+nfs_local_pgio_init(struct nfs_pgio_header *hdr,
+ const struct rpc_call_ops *call_ops)
+{
+ hdr->task.tk_ops = call_ops;
+ if (!hdr->task.tk_start)
+ hdr->task.tk_start = ktime_get();
+}
+
+static void
+nfs_local_pgio_done(struct nfs_pgio_header *hdr, long status)
+{
+ if (status >= 0) {
+ hdr->res.count = status;
+ hdr->res.op_status = NFS4_OK;
+ hdr->task.tk_status = 0;
+ } else {
+ hdr->res.op_status = nfs4_stat_to_errno(status);
+ hdr->task.tk_status = status;
+ }
+}
+
+static void
+nfs_local_pgio_release(struct nfs_local_kiocb *iocb)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+
+ nfs_to->nfsd_file_put_local(iocb->localio);
+ nfs_local_iocb_free(iocb);
+ nfs_local_hdr_release(hdr, hdr->task.tk_ops);
+}
+
+static void
+nfs_local_read_done(struct nfs_local_kiocb *iocb, long status)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+ struct file *filp = iocb->kiocb.ki_filp;
+
+ nfs_local_pgio_done(hdr, status);
+
+ if (hdr->res.count != hdr->args.count ||
+ hdr->args.offset + hdr->res.count >= i_size_read(file_inode(filp)))
+ hdr->res.eof = true;
+
+ dprintk("%s: read %ld bytes eof %d.\n", __func__,
+ status > 0 ? status : 0, hdr->res.eof);
+}
+
+static void nfs_local_call_read(struct work_struct *work)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(work, struct nfs_local_kiocb, work);
+ struct file *filp = iocb->kiocb.ki_filp;
+ const struct cred *save_cred;
+ struct iov_iter iter;
+ ssize_t status;
+
+ save_cred = override_creds(filp->f_cred);
+
+ nfs_local_iter_init(&iter, iocb, READ);
+
+ status = filp->f_op->read_iter(&iocb->kiocb, &iter);
+ WARN_ON_ONCE(status == -EIOCBQUEUED);
+
+ nfs_local_read_done(iocb, status);
+ nfs_local_pgio_release(iocb);
+
+ revert_creds(save_cred);
+}
+
+static int
+nfs_do_local_read(struct nfs_pgio_header *hdr,
+ struct nfsd_file *localio,
+ const struct rpc_call_ops *call_ops)
+{
+ struct nfs_local_kiocb *iocb;
+
+ dprintk("%s: vfs_read count=%u pos=%llu\n",
+ __func__, hdr->args.count, hdr->args.offset);
+
+ iocb = nfs_local_iocb_alloc(hdr, localio, GFP_KERNEL);
+ if (iocb == NULL)
+ return -ENOMEM;
+
+ nfs_local_pgio_init(hdr, call_ops);
+ hdr->res.eof = false;
+
+ INIT_WORK(&iocb->work, nfs_local_call_read);
+ queue_work(nfslocaliod_workqueue, &iocb->work);
+
+ return 0;
+}
+
+static void
+nfs_copy_boot_verifier(struct nfs_write_verifier *verifier, struct inode *inode)
+{
+ struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ u32 *verf = (u32 *)verifier->data;
+ int seq = 0;
+
+ do {
+ read_seqbegin_or_lock(&clp->cl_boot_lock, &seq);
+ verf[0] = (u32)clp->cl_nfssvc_boot.tv_sec;
+ verf[1] = (u32)clp->cl_nfssvc_boot.tv_nsec;
+ } while (need_seqretry(&clp->cl_boot_lock, seq));
+ done_seqretry(&clp->cl_boot_lock, seq);
+}
+
+static void
+nfs_reset_boot_verifier(struct inode *inode)
+{
+ struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+
+ write_seqlock(&clp->cl_boot_lock);
+ ktime_get_real_ts64(&clp->cl_nfssvc_boot);
+ write_sequnlock(&clp->cl_boot_lock);
+}
+
+static void
+nfs_set_local_verifier(struct inode *inode,
+ struct nfs_writeverf *verf,
+ enum nfs3_stable_how how)
+{
+ nfs_copy_boot_verifier(&verf->verifier, inode);
+ verf->committed = how;
+}
+
+/* Factored out from fs/nfsd/vfs.h:fh_getattr() */
+static int __vfs_getattr(struct path *p, struct kstat *stat, int version)
+{
+ u32 request_mask = STATX_BASIC_STATS;
+
+ if (version == 4)
+ request_mask |= (STATX_BTIME | STATX_CHANGE_COOKIE);
+ return vfs_getattr(p, stat, request_mask, AT_STATX_SYNC_AS_STAT);
+}
+
+/* Copied from fs/nfsd/nfsfh.c:nfsd4_change_attribute() */
+static u64 __nfsd4_change_attribute(const struct kstat *stat,
+ const struct inode *inode)
+{
+ u64 chattr;
+
+ if (stat->result_mask & STATX_CHANGE_COOKIE) {
+ chattr = stat->change_cookie;
+ if (S_ISREG(inode->i_mode) &&
+ !(stat->attributes & STATX_ATTR_CHANGE_MONOTONIC)) {
+ chattr += (u64)stat->ctime.tv_sec << 30;
+ chattr += stat->ctime.tv_nsec;
+ }
+ } else {
+ chattr = time_to_chattr(&stat->ctime);
+ }
+ return chattr;
+}
+
+static void nfs_local_vfs_getattr(struct nfs_local_kiocb *iocb)
+{
+ struct kstat stat;
+ struct file *filp = iocb->kiocb.ki_filp;
+ struct nfs_pgio_header *hdr = iocb->hdr;
+ struct nfs_fattr *fattr = hdr->res.fattr;
+ int version = NFS_PROTO(hdr->inode)->version;
+
+ if (unlikely(!fattr) || __vfs_getattr(&filp->f_path, &stat, version))
+ return;
+
+ fattr->valid = (NFS_ATTR_FATTR_FILEID |
+ NFS_ATTR_FATTR_CHANGE |
+ NFS_ATTR_FATTR_SIZE |
+ NFS_ATTR_FATTR_ATIME |
+ NFS_ATTR_FATTR_MTIME |
+ NFS_ATTR_FATTR_CTIME |
+ NFS_ATTR_FATTR_SPACE_USED);
+
+ fattr->fileid = stat.ino;
+ fattr->size = stat.size;
+ fattr->atime = stat.atime;
+ fattr->mtime = stat.mtime;
+ fattr->ctime = stat.ctime;
+ if (version == 4) {
+ fattr->change_attr =
+ __nfsd4_change_attribute(&stat, file_inode(filp));
+ } else
+ fattr->change_attr = nfs_timespec_to_change_attr(&fattr->ctime);
+ fattr->du.nfs3.used = stat.blocks << 9;
+}
+
+static void
+nfs_local_write_done(struct nfs_local_kiocb *iocb, long status)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+ struct inode *inode = hdr->inode;
+
+ dprintk("%s: wrote %ld bytes.\n", __func__, status > 0 ? status : 0);
+
+ /* Handle short writes as if they are ENOSPC */
+ if (status > 0 && status < hdr->args.count) {
+ hdr->mds_offset += status;
+ hdr->args.offset += status;
+ hdr->args.pgbase += status;
+ hdr->args.count -= status;
+ nfs_set_pgio_error(hdr, -ENOSPC, hdr->args.offset);
+ status = -ENOSPC;
+ }
+ if (status < 0)
+ nfs_reset_boot_verifier(inode);
+ else if (nfs_should_remove_suid(inode)) {
+ /* Deal with the suid/sgid bit corner case */
+ spin_lock(&inode->i_lock);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
+ spin_unlock(&inode->i_lock);
+ }
+ nfs_local_pgio_done(hdr, status);
+}
+
+static void nfs_local_call_write(struct work_struct *work)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(work, struct nfs_local_kiocb, work);
+ struct file *filp = iocb->kiocb.ki_filp;
+ unsigned long old_flags = current->flags;
+ const struct cred *save_cred;
+ struct iov_iter iter;
+ ssize_t status;
+
+ current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
+ save_cred = override_creds(filp->f_cred);
+
+ nfs_local_iter_init(&iter, iocb, WRITE);
+
+ file_start_write(filp);
+ status = filp->f_op->write_iter(&iocb->kiocb, &iter);
+ file_end_write(filp);
+ WARN_ON_ONCE(status == -EIOCBQUEUED);
+
+ nfs_local_write_done(iocb, status);
+ nfs_local_vfs_getattr(iocb);
+ nfs_local_pgio_release(iocb);
+
+ revert_creds(save_cred);
+ current->flags = old_flags;
+}
+
+static int
+nfs_do_local_write(struct nfs_pgio_header *hdr,
+ struct nfsd_file *localio,
+ const struct rpc_call_ops *call_ops)
+{
+ struct nfs_local_kiocb *iocb;
+
+ dprintk("%s: vfs_write count=%u pos=%llu %s\n",
+ __func__, hdr->args.count, hdr->args.offset,
+ (hdr->args.stable == NFS_UNSTABLE) ? "unstable" : "stable");
+
+ iocb = nfs_local_iocb_alloc(hdr, localio, GFP_NOIO);
+ if (iocb == NULL)
+ return -ENOMEM;
+
+ switch (hdr->args.stable) {
+ default:
+ break;
+ case NFS_DATA_SYNC:
+ iocb->kiocb.ki_flags |= IOCB_DSYNC;
+ break;
+ case NFS_FILE_SYNC:
+ iocb->kiocb.ki_flags |= IOCB_DSYNC|IOCB_SYNC;
+ }
+ nfs_local_pgio_init(hdr, call_ops);
+
+ nfs_set_local_verifier(hdr->inode, hdr->res.verf, hdr->args.stable);
+
+ INIT_WORK(&iocb->work, nfs_local_call_write);
+ queue_work(nfslocaliod_workqueue, &iocb->work);
+
+ return 0;
+}
+
+int nfs_local_doio(struct nfs_client *clp, struct nfsd_file *localio,
+ struct nfs_pgio_header *hdr,
+ const struct rpc_call_ops *call_ops)
+{
+ int status = 0;
+ struct file *filp = nfs_to->nfsd_file_file(localio);
+
+ if (!hdr->args.count)
+ return 0;
+ /* Don't support filesystems without read_iter/write_iter */
+ if (!filp->f_op->read_iter || !filp->f_op->write_iter) {
+ nfs_local_disable(clp);
+ status = -EAGAIN;
+ goto out;
+ }
+
+ switch (hdr->rw_mode) {
+ case FMODE_READ:
+ status = nfs_do_local_read(hdr, localio, call_ops);
+ break;
+ case FMODE_WRITE:
+ status = nfs_do_local_write(hdr, localio, call_ops);
+ break;
+ default:
+ dprintk("%s: invalid mode: %d\n", __func__,
+ hdr->rw_mode);
+ status = -EINVAL;
+ }
+out:
+ if (status != 0) {
+ nfs_to->nfsd_file_put_local(localio);
+ hdr->task.tk_status = status;
+ nfs_local_hdr_release(hdr, call_ops);
+ }
+ return status;
+}
+
+static void
+nfs_local_init_commit(struct nfs_commit_data *data,
+ const struct rpc_call_ops *call_ops)
+{
+ data->task.tk_ops = call_ops;
+}
+
+static int
+nfs_local_run_commit(struct file *filp, struct nfs_commit_data *data)
+{
+ loff_t start = data->args.offset;
+ loff_t end = LLONG_MAX;
+
+ if (data->args.count > 0) {
+ end = start + data->args.count - 1;
+ if (end < start)
+ end = LLONG_MAX;
+ }
+
+ dprintk("%s: commit %llu - %llu\n", __func__, start, end);
+ return vfs_fsync_range(filp, start, end, 0);
+}
+
+static void
+nfs_local_commit_done(struct nfs_commit_data *data, int status)
+{
+ if (status >= 0) {
+ nfs_set_local_verifier(data->inode,
+ data->res.verf,
+ NFS_FILE_SYNC);
+ data->res.op_status = NFS4_OK;
+ data->task.tk_status = 0;
+ } else {
+ nfs_reset_boot_verifier(data->inode);
+ data->res.op_status = nfs4_stat_to_errno(status);
+ data->task.tk_status = status;
+ }
+}
+
+static void
+nfs_local_release_commit_data(struct nfsd_file *localio,
+ struct nfs_commit_data *data,
+ const struct rpc_call_ops *call_ops)
+{
+ nfs_to->nfsd_file_put_local(localio);
+ call_ops->rpc_call_done(&data->task, data);
+ call_ops->rpc_release(data);
+}
+
+static struct nfs_local_fsync_ctx *
+nfs_local_fsync_ctx_alloc(struct nfs_commit_data *data,
+ struct nfsd_file *localio, gfp_t flags)
+{
+ struct nfs_local_fsync_ctx *ctx = kmalloc(sizeof(*ctx), flags);
+
+ if (ctx != NULL) {
+ ctx->localio = localio;
+ ctx->data = data;
+ INIT_WORK(&ctx->work, nfs_local_fsync_work);
+ kref_init(&ctx->kref);
+ ctx->done = NULL;
+ }
+ return ctx;
+}
+
+static void
+nfs_local_fsync_ctx_kref_free(struct kref *kref)
+{
+ kfree(container_of(kref, struct nfs_local_fsync_ctx, kref));
+}
+
+static void
+nfs_local_fsync_ctx_put(struct nfs_local_fsync_ctx *ctx)
+{
+ kref_put(&ctx->kref, nfs_local_fsync_ctx_kref_free);
+}
+
+static void
+nfs_local_fsync_ctx_free(struct nfs_local_fsync_ctx *ctx)
+{
+ nfs_local_release_commit_data(ctx->localio, ctx->data,
+ ctx->data->task.tk_ops);
+ nfs_local_fsync_ctx_put(ctx);
+}
+
+static void
+nfs_local_fsync_work(struct work_struct *work)
+{
+ struct nfs_local_fsync_ctx *ctx;
+ int status;
+
+ ctx = container_of(work, struct nfs_local_fsync_ctx, work);
+
+ status = nfs_local_run_commit(nfs_to->nfsd_file_file(ctx->localio),
+ ctx->data);
+ nfs_local_commit_done(ctx->data, status);
+ if (ctx->done != NULL)
+ complete(ctx->done);
+ nfs_local_fsync_ctx_free(ctx);
+}
+
+int nfs_local_commit(struct nfsd_file *localio,
+ struct nfs_commit_data *data,
+ const struct rpc_call_ops *call_ops, int how)
+{
+ struct nfs_local_fsync_ctx *ctx;
+
+ ctx = nfs_local_fsync_ctx_alloc(data, localio, GFP_KERNEL);
+ if (!ctx) {
+ nfs_local_commit_done(data, -ENOMEM);
+ nfs_local_release_commit_data(localio, data, call_ops);
+ return -ENOMEM;
+ }
+
+ nfs_local_init_commit(data, call_ops);
+ kref_get(&ctx->kref);
+ if (how & FLUSH_SYNC) {
+ DECLARE_COMPLETION_ONSTACK(done);
+ ctx->done = &done;
+ queue_work(nfsiod_workqueue, &ctx->work);
+ wait_for_completion(&done);
+ } else
+ queue_work(nfsiod_workqueue, &ctx->work);
+ nfs_local_fsync_ctx_put(ctx);
+ return 0;
+}
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index c19093814296..6e75c6c2d234 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -22,14 +22,12 @@
#include <linux/nfs.h>
#include <linux/nfs2.h>
#include <linux/nfs_fs.h>
+#include <linux/nfs_common.h>
#include "nfstrace.h"
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_XDR
-/* Mapping from NFS error code to "errno" error code. */
-#define errno_NFSERR_IO EIO
-
/*
* Declare the space requirements for NFS arguments and replies as
* number of 32bit-words
@@ -64,8 +62,6 @@
#define NFS_readdirres_sz (1+NFS_pagepad_sz)
#define NFS_statfsres_sz (1+NFS_info_sz)
-static int nfs_stat_to_errno(enum nfs_stat);
-
/*
* Encode/decode NFSv2 basic data types
*
@@ -1054,70 +1050,6 @@ out_default:
return nfs_stat_to_errno(status);
}
-
-/*
- * We need to translate between nfs status return values and
- * the local errno values which may not be the same.
- */
-static const struct {
- int stat;
- int errno;
-} nfs_errtbl[] = {
- { NFS_OK, 0 },
- { NFSERR_PERM, -EPERM },
- { NFSERR_NOENT, -ENOENT },
- { NFSERR_IO, -errno_NFSERR_IO},
- { NFSERR_NXIO, -ENXIO },
-/* { NFSERR_EAGAIN, -EAGAIN }, */
- { NFSERR_ACCES, -EACCES },
- { NFSERR_EXIST, -EEXIST },
- { NFSERR_XDEV, -EXDEV },
- { NFSERR_NODEV, -ENODEV },
- { NFSERR_NOTDIR, -ENOTDIR },
- { NFSERR_ISDIR, -EISDIR },
- { NFSERR_INVAL, -EINVAL },
- { NFSERR_FBIG, -EFBIG },
- { NFSERR_NOSPC, -ENOSPC },
- { NFSERR_ROFS, -EROFS },
- { NFSERR_MLINK, -EMLINK },
- { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
- { NFSERR_NOTEMPTY, -ENOTEMPTY },
- { NFSERR_DQUOT, -EDQUOT },
- { NFSERR_STALE, -ESTALE },
- { NFSERR_REMOTE, -EREMOTE },
-#ifdef EWFLUSH
- { NFSERR_WFLUSH, -EWFLUSH },
-#endif
- { NFSERR_BADHANDLE, -EBADHANDLE },
- { NFSERR_NOT_SYNC, -ENOTSYNC },
- { NFSERR_BAD_COOKIE, -EBADCOOKIE },
- { NFSERR_NOTSUPP, -ENOTSUPP },
- { NFSERR_TOOSMALL, -ETOOSMALL },
- { NFSERR_SERVERFAULT, -EREMOTEIO },
- { NFSERR_BADTYPE, -EBADTYPE },
- { NFSERR_JUKEBOX, -EJUKEBOX },
- { -1, -EIO }
-};
-
-/**
- * nfs_stat_to_errno - convert an NFS status code to a local errno
- * @status: NFS status code to convert
- *
- * Returns a local errno value, or -EIO if the NFS status code is
- * not recognized. This function is used jointly by NFSv2 and NFSv3.
- */
-static int nfs_stat_to_errno(enum nfs_stat status)
-{
- int i;
-
- for (i = 0; nfs_errtbl[i].stat != -1; i++) {
- if (nfs_errtbl[i].stat == (int)status)
- return nfs_errtbl[i].errno;
- }
- dprintk("NFS: Unrecognized nfs status value: %u\n", status);
- return nfs_errtbl[i].errno;
-}
-
#define PROC(proc, argtype, restype, timer) \
[NFSPROC_##proc] = { \
.p_proc = NFSPROC_##proc, \
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 60f032be805a..4ae01c10b7e2 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -21,14 +21,13 @@
#include <linux/nfs3.h>
#include <linux/nfs_fs.h>
#include <linux/nfsacl.h>
+#include <linux/nfs_common.h>
+
#include "nfstrace.h"
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_XDR
-/* Mapping from NFS error code to "errno" error code. */
-#define errno_NFSERR_IO EIO
-
/*
* Declare the space requirements for NFS arguments and replies as
* number of 32bit-words
@@ -91,8 +90,6 @@
NFS3_pagepad_sz)
#define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz)
-static int nfs3_stat_to_errno(enum nfs_stat);
-
/*
* Map file type to S_IFMT bits
*/
@@ -1406,7 +1403,7 @@ static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1445,7 +1442,7 @@ static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1495,7 +1492,7 @@ out_default:
error = decode_post_op_attr(xdr, result->dir_attr, userns);
if (unlikely(error))
goto out;
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1537,7 +1534,7 @@ static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1578,7 +1575,7 @@ static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1658,7 +1655,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1728,7 +1725,7 @@ static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1795,7 +1792,7 @@ out_default:
error = decode_wcc_data(xdr, result->dir_attr, userns);
if (unlikely(error))
goto out;
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1835,7 +1832,7 @@ static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1881,7 +1878,7 @@ static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1926,7 +1923,7 @@ static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/**
@@ -2101,7 +2098,7 @@ out_default:
error = decode_post_op_attr(xdr, result->dir_attr, rpc_rqst_userns(req));
if (unlikely(error))
goto out;
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -2167,7 +2164,7 @@ static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -2243,7 +2240,7 @@ static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -2304,7 +2301,7 @@ static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -2350,7 +2347,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
#ifdef CONFIG_NFS_V3_ACL
@@ -2416,7 +2413,7 @@ static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
@@ -2435,76 +2432,11 @@ static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
#endif /* CONFIG_NFS_V3_ACL */
-
-/*
- * We need to translate between nfs status return values and
- * the local errno values which may not be the same.
- */
-static const struct {
- int stat;
- int errno;
-} nfs_errtbl[] = {
- { NFS_OK, 0 },
- { NFSERR_PERM, -EPERM },
- { NFSERR_NOENT, -ENOENT },
- { NFSERR_IO, -errno_NFSERR_IO},
- { NFSERR_NXIO, -ENXIO },
-/* { NFSERR_EAGAIN, -EAGAIN }, */
- { NFSERR_ACCES, -EACCES },
- { NFSERR_EXIST, -EEXIST },
- { NFSERR_XDEV, -EXDEV },
- { NFSERR_NODEV, -ENODEV },
- { NFSERR_NOTDIR, -ENOTDIR },
- { NFSERR_ISDIR, -EISDIR },
- { NFSERR_INVAL, -EINVAL },
- { NFSERR_FBIG, -EFBIG },
- { NFSERR_NOSPC, -ENOSPC },
- { NFSERR_ROFS, -EROFS },
- { NFSERR_MLINK, -EMLINK },
- { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
- { NFSERR_NOTEMPTY, -ENOTEMPTY },
- { NFSERR_DQUOT, -EDQUOT },
- { NFSERR_STALE, -ESTALE },
- { NFSERR_REMOTE, -EREMOTE },
-#ifdef EWFLUSH
- { NFSERR_WFLUSH, -EWFLUSH },
-#endif
- { NFSERR_BADHANDLE, -EBADHANDLE },
- { NFSERR_NOT_SYNC, -ENOTSYNC },
- { NFSERR_BAD_COOKIE, -EBADCOOKIE },
- { NFSERR_NOTSUPP, -ENOTSUPP },
- { NFSERR_TOOSMALL, -ETOOSMALL },
- { NFSERR_SERVERFAULT, -EREMOTEIO },
- { NFSERR_BADTYPE, -EBADTYPE },
- { NFSERR_JUKEBOX, -EJUKEBOX },
- { -1, -EIO }
-};
-
-/**
- * nfs3_stat_to_errno - convert an NFS status code to a local errno
- * @status: NFS status code to convert
- *
- * Returns a local errno value, or -EIO if the NFS status code is
- * not recognized. This function is used jointly by NFSv2 and NFSv3.
- */
-static int nfs3_stat_to_errno(enum nfs_stat status)
-{
- int i;
-
- for (i = 0; nfs_errtbl[i].stat != -1; i++) {
- if (nfs_errtbl[i].stat == (int)status)
- return nfs_errtbl[i].errno;
- }
- dprintk("NFS: Unrecognized nfs status value: %u\n", status);
- return nfs_errtbl[i].errno;
-}
-
-
#define PROC(proc, argtype, restype, timer) \
[NFS3PROC_##proc] = { \
.p_proc = NFS3PROC_##proc, \
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index c2045a2a9d0f..7d383d29a995 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -83,7 +83,7 @@ struct nfs4_minor_version_ops {
#define NFS_SEQID_CONFIRMED 1
struct nfs_seqid_counter {
ktime_t create_time;
- int owner_id;
+ u64 owner_id;
int flags;
u32 counter;
spinlock_t lock; /* Protects the list */
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index b8ffbe52ba15..cd2fbde2e6d7 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3904,6 +3904,18 @@ static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
#define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_OPEN_ARGUMENTS - 1UL)
+#define FATTR4_WORD2_NFS42_TIME_DELEG_MASK \
+ (FATTR4_WORD2_TIME_DELEG_MODIFY|FATTR4_WORD2_TIME_DELEG_ACCESS)
+static bool nfs4_server_delegtime_capable(struct nfs4_server_caps_res *res)
+{
+ u32 share_access_want = res->open_caps.oa_share_access_want[0];
+ u32 attr_bitmask = res->attr_bitmask[2];
+
+ return (share_access_want & NFS4_SHARE_WANT_DELEG_TIMESTAMPS) &&
+ ((attr_bitmask & FATTR4_WORD2_NFS42_TIME_DELEG_MASK) ==
+ FATTR4_WORD2_NFS42_TIME_DELEG_MASK);
+}
+
static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
{
u32 minorversion = server->nfs_client->cl_minorversion;
@@ -3982,8 +3994,6 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
#endif
if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS)
server->caps |= NFS_CAP_FS_LOCATIONS;
- if (res.attr_bitmask[2] & FATTR4_WORD2_TIME_DELEG_MODIFY)
- server->caps |= NFS_CAP_DELEGTIME;
if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
@@ -4011,6 +4021,8 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
if (res.open_caps.oa_share_access_want[0] &
NFS4_SHARE_WANT_OPEN_XOR_DELEGATION)
server->caps |= NFS_CAP_OPEN_XOR;
+ if (nfs4_server_delegtime_capable(&res))
+ server->caps |= NFS_CAP_DELEGTIME;
memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 877f682b45f2..581864a15888 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -501,11 +501,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
sp = kzalloc(sizeof(*sp), gfp_flags);
if (!sp)
return NULL;
- sp->so_seqid.owner_id = ida_alloc(&server->openowner_id, gfp_flags);
- if (sp->so_seqid.owner_id < 0) {
- kfree(sp);
- return NULL;
- }
+ sp->so_seqid.owner_id = atomic64_inc_return(&server->owner_ctr);
sp->so_server = server;
sp->so_cred = get_cred(cred);
spin_lock_init(&sp->so_lock);
@@ -536,7 +532,6 @@ static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
{
nfs4_destroy_seqid_counter(&sp->so_seqid);
put_cred(sp->so_cred);
- ida_free(&sp->so_server->openowner_id, sp->so_seqid.owner_id);
kfree(sp);
}
@@ -879,19 +874,13 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
refcount_set(&lsp->ls_count, 1);
lsp->ls_state = state;
lsp->ls_owner = owner;
- lsp->ls_seqid.owner_id = ida_alloc(&server->lockowner_id, GFP_KERNEL_ACCOUNT);
- if (lsp->ls_seqid.owner_id < 0)
- goto out_free;
+ lsp->ls_seqid.owner_id = atomic64_inc_return(&server->owner_ctr);
INIT_LIST_HEAD(&lsp->ls_locks);
return lsp;
-out_free:
- kfree(lsp);
- return NULL;
}
void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
{
- ida_free(&server->lockowner_id, lsp->ls_seqid.owner_id);
nfs4_destroy_seqid_counter(&lsp->ls_seqid);
kfree(lsp);
}
@@ -1957,6 +1946,7 @@ restart:
set_bit(ops->owner_flag_bit, &sp->so_flags);
nfs4_put_state_owner(sp);
status = nfs4_recovery_handle_error(clp, status);
+ nfs4_free_state_owners(&freeme);
return (status != 0) ? status : -EAGAIN;
}
@@ -2023,6 +2013,12 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
nfs_mark_client_ready(clp, -EPERM);
clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
return -EPERM;
+ case -ETIMEDOUT:
+ if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
+ nfs_mark_client_ready(clp, -EIO);
+ return -EIO;
+ }
+ fallthrough;
case -EACCES:
case -NFS4ERR_DELAY:
case -EAGAIN:
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 61190d6a5a77..e8ac3f615f93 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -52,6 +52,7 @@
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
+#include <linux/nfs_common.h>
#include "nfs4_fs.h"
#include "nfs4trace.h"
@@ -63,11 +64,7 @@
#define NFSDBG_FACILITY NFSDBG_XDR
-/* Mapping from NFS error code to "errno" error code. */
-#define errno_NFSERR_IO EIO
-
struct compound_hdr;
-static int nfs4_stat_to_errno(int);
static void encode_layoutget(struct xdr_stream *xdr,
const struct nfs4_layoutget_args *args,
struct compound_hdr *hdr);
@@ -975,11 +972,6 @@ static __be32 *reserve_space(struct xdr_stream *xdr, size_t nbytes)
return p;
}
-static void encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
-{
- WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
-}
-
static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
{
WARN_ON_ONCE(xdr_stream_encode_opaque(xdr, str, len) < 0);
@@ -1424,12 +1416,12 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
*/
encode_nfs4_seqid(xdr, arg->seqid);
encode_share_access(xdr, arg->share_access);
- p = reserve_space(xdr, 36);
+ p = reserve_space(xdr, 40);
p = xdr_encode_hyper(p, arg->clientid);
- *p++ = cpu_to_be32(24);
+ *p++ = cpu_to_be32(28);
p = xdr_encode_opaque_fixed(p, "open id:", 8);
*p++ = cpu_to_be32(arg->server->s_dev);
- *p++ = cpu_to_be32(arg->id.uniquifier);
+ p = xdr_encode_hyper(p, arg->id.uniquifier);
xdr_encode_hyper(p, arg->id.create_time);
}
@@ -4408,14 +4400,6 @@ static int decode_access(struct xdr_stream *xdr, u32 *supported, u32 *access)
return 0;
}
-static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len)
-{
- ssize_t ret = xdr_stream_decode_opaque_fixed(xdr, buf, len);
- if (unlikely(ret < 0))
- return -EIO;
- return 0;
-}
-
static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
{
return decode_opaque_fixed(xdr, stateid, NFS4_STATEID_SIZE);
@@ -7620,72 +7604,6 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
return 0;
}
-/*
- * We need to translate between nfs status return values and
- * the local errno values which may not be the same.
- */
-static struct {
- int stat;
- int errno;
-} nfs_errtbl[] = {
- { NFS4_OK, 0 },
- { NFS4ERR_PERM, -EPERM },
- { NFS4ERR_NOENT, -ENOENT },
- { NFS4ERR_IO, -errno_NFSERR_IO},
- { NFS4ERR_NXIO, -ENXIO },
- { NFS4ERR_ACCESS, -EACCES },
- { NFS4ERR_EXIST, -EEXIST },
- { NFS4ERR_XDEV, -EXDEV },
- { NFS4ERR_NOTDIR, -ENOTDIR },
- { NFS4ERR_ISDIR, -EISDIR },
- { NFS4ERR_INVAL, -EINVAL },
- { NFS4ERR_FBIG, -EFBIG },
- { NFS4ERR_NOSPC, -ENOSPC },
- { NFS4ERR_ROFS, -EROFS },
- { NFS4ERR_MLINK, -EMLINK },
- { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
- { NFS4ERR_NOTEMPTY, -ENOTEMPTY },
- { NFS4ERR_DQUOT, -EDQUOT },
- { NFS4ERR_STALE, -ESTALE },
- { NFS4ERR_BADHANDLE, -EBADHANDLE },
- { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
- { NFS4ERR_NOTSUPP, -ENOTSUPP },
- { NFS4ERR_TOOSMALL, -ETOOSMALL },
- { NFS4ERR_SERVERFAULT, -EREMOTEIO },
- { NFS4ERR_BADTYPE, -EBADTYPE },
- { NFS4ERR_LOCKED, -EAGAIN },
- { NFS4ERR_SYMLINK, -ELOOP },
- { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
- { NFS4ERR_DEADLOCK, -EDEADLK },
- { NFS4ERR_NOXATTR, -ENODATA },
- { NFS4ERR_XATTR2BIG, -E2BIG },
- { -1, -EIO }
-};
-
-/*
- * Convert an NFS error code to a local one.
- * This one is used jointly by NFSv2 and NFSv3.
- */
-static int
-nfs4_stat_to_errno(int stat)
-{
- int i;
- for (i = 0; nfs_errtbl[i].stat != -1; i++) {
- if (nfs_errtbl[i].stat == stat)
- return nfs_errtbl[i].errno;
- }
- if (stat <= 10000 || stat > 10100) {
- /* The server is looney tunes. */
- return -EREMOTEIO;
- }
- /* If we cannot translate the error, the recovery routines should
- * handle it.
- * Note: remaining NFSv4 error codes have values > 10000, so should
- * not conflict with native Linux error codes.
- */
- return -stat;
-}
-
#ifdef CONFIG_NFS_V4_2
#include "nfs42xdr.c"
#endif /* CONFIG_NFS_V4_2 */
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 352fdaed4075..1eab98c277fa 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -1685,6 +1685,67 @@ TRACE_EVENT(nfs_mount_path,
TP_printk("path='%s'", __get_str(path))
);
+TRACE_EVENT(nfs_local_open_fh,
+ TP_PROTO(
+ const struct nfs_fh *fh,
+ fmode_t fmode,
+ int error
+ ),
+
+ TP_ARGS(fh, fmode, error),
+
+ TP_STRUCT__entry(
+ __field(int, error)
+ __field(u32, fhandle)
+ __field(unsigned int, fmode)
+ ),
+
+ TP_fast_assign(
+ __entry->error = error;
+ __entry->fhandle = nfs_fhandle_hash(fh);
+ __entry->fmode = (__force unsigned int)fmode;
+ ),
+
+ TP_printk(
+ "error=%d fhandle=0x%08x mode=%s",
+ __entry->error,
+ __entry->fhandle,
+ show_fs_fmode_flags(__entry->fmode)
+ )
+);
+
+DECLARE_EVENT_CLASS(nfs_local_client_event,
+ TP_PROTO(
+ const struct nfs_client *clp
+ ),
+
+ TP_ARGS(clp),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, protocol)
+ __string(server, clp->cl_hostname)
+ ),
+
+ TP_fast_assign(
+ __entry->protocol = clp->rpc_ops->version;
+ __assign_str(server);
+ ),
+
+ TP_printk(
+ "server=%s NFSv%u", __get_str(server), __entry->protocol
+ )
+);
+
+#define DEFINE_NFS_LOCAL_CLIENT_EVENT(name) \
+ DEFINE_EVENT(nfs_local_client_event, name, \
+ TP_PROTO( \
+ const struct nfs_client *clp \
+ ), \
+ TP_ARGS(clp))
+
+DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_local_enable);
+DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_local_disable);
+
DECLARE_EVENT_CLASS(nfs_xdr_event,
TP_PROTO(
const struct xdr_stream *xdr,
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 04124f226665..e27c07bd8929 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -731,7 +731,8 @@ static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
- const struct rpc_call_ops *call_ops, int how, int flags)
+ const struct rpc_call_ops *call_ops, int how, int flags,
+ struct nfsd_file *localio)
{
struct rpc_task *task;
struct rpc_message msg = {
@@ -761,6 +762,10 @@ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
hdr->args.count,
(unsigned long long)hdr->args.offset);
+ if (localio)
+ return nfs_local_doio(NFS_SERVER(hdr->inode)->nfs_client,
+ localio, hdr, call_ops);
+
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -953,6 +958,12 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
ret = nfs_generic_pgio(desc, hdr);
if (ret == 0) {
+ struct nfs_client *clp = NFS_SERVER(hdr->inode)->nfs_client;
+
+ struct nfsd_file *localio =
+ nfs_local_open_fh(clp, hdr->cred,
+ hdr->args.fh, hdr->args.context->mode);
+
if (NFS_SERVER(hdr->inode)->nfs_client->cl_minorversion)
task_flags = RPC_TASK_MOVEABLE;
ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
@@ -961,7 +972,8 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
NFS_PROTO(hdr->inode),
desc->pg_rpc_callops,
desc->pg_ioflags,
- RPC_TASK_CRED_NOREF | task_flags);
+ RPC_TASK_CRED_NOREF | task_flags,
+ localio);
}
return ret;
}
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index a74ee69a2fa6..dbef837e871a 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -490,7 +490,7 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
nfs_initiate_commit(NFS_CLIENT(inode), data,
NFS_PROTO(data->inode),
data->mds_ops, how,
- RPC_TASK_CRED_NOREF);
+ RPC_TASK_CRED_NOREF, NULL);
} else {
nfs_init_commit(data, NULL, data->lseg, cinfo);
initiate_commit(data, how);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index a6103333b666..81bd1b9aba17 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -48,8 +48,7 @@ static struct nfs_pgio_header *nfs_readhdr_alloc(void)
static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
{
- if (rhdr->res.scratch != NULL)
- kfree(rhdr->res.scratch);
+ kfree(rhdr->res.scratch);
kmem_cache_free(nfs_rdata_cachep, rhdr);
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 97b386032b71..9723b6c53397 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -551,6 +551,9 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
else
seq_puts(m, ",local_lock=posix");
+ if (nfss->flags & NFS_MOUNT_NO_ALIGNWRITE)
+ seq_puts(m, ",noalignwrite");
+
if (nfss->flags & NFS_MOUNT_WRITE_EAGER) {
if (nfss->flags & NFS_MOUNT_WRITE_WAIT)
seq_puts(m, ",write=wait");
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index d074d0ceb4f0..ead2dc55952d 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -772,8 +772,7 @@ static void nfs_inode_add_request(struct nfs_page *req)
nfs_lock_request(req);
spin_lock(&mapping->i_private_lock);
set_bit(PG_MAPPED, &req->wb_flags);
- folio_set_private(folio);
- folio->private = req;
+ folio_attach_private(folio, req);
spin_unlock(&mapping->i_private_lock);
atomic_long_inc(&nfsi->nrequests);
/* this a head request for a page group - mark it as having an
@@ -797,8 +796,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
spin_lock(&mapping->i_private_lock);
if (likely(folio)) {
- folio->private = NULL;
- folio_clear_private(folio);
+ folio_detach_private(folio);
clear_bit(PG_MAPPED, &req->wb_head->wb_flags);
}
spin_unlock(&mapping->i_private_lock);
@@ -1297,7 +1295,10 @@ static int nfs_can_extend_write(struct file *file, struct folio *folio,
struct file_lock_context *flctx = locks_inode_context(inode);
struct file_lock *fl;
int ret;
+ unsigned int mntflags = NFS_SERVER(inode)->flags;
+ if (mntflags & NFS_MOUNT_NO_ALIGNWRITE)
+ return 0;
if (file->f_flags & O_DSYNC)
return 0;
if (!nfs_folio_write_uptodate(folio, pagelen))
@@ -1663,7 +1664,8 @@ EXPORT_SYMBOL_GPL(nfs_commitdata_release);
int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
const struct nfs_rpc_ops *nfs_ops,
const struct rpc_call_ops *call_ops,
- int how, int flags)
+ int how, int flags,
+ struct nfsd_file *localio)
{
struct rpc_task *task;
int priority = flush_task_priority(how);
@@ -1692,6 +1694,9 @@ int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
dprintk("NFS: initiated commit call\n");
+ if (localio)
+ return nfs_local_commit(localio, data, call_ops, how);
+
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -1791,6 +1796,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
struct nfs_commit_info *cinfo)
{
struct nfs_commit_data *data;
+ struct nfsd_file *localio;
unsigned short task_flags = 0;
/* another commit raced with us */
@@ -1807,9 +1813,12 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
nfs_init_commit(data, head, NULL, cinfo);
if (NFS_SERVER(inode)->nfs_client->cl_minorversion)
task_flags = RPC_TASK_MOVEABLE;
+
+ localio = nfs_local_open_fh(NFS_SERVER(inode)->nfs_client, data->cred,
+ data->args.fh, data->context->mode);
return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
data->mds_ops, how,
- RPC_TASK_CRED_NOREF | task_flags);
+ RPC_TASK_CRED_NOREF | task_flags, localio);
}
/*
diff --git a/fs/nfs_common/Makefile b/fs/nfs_common/Makefile
index 119c75ab9fd0..a5e54809701e 100644
--- a/fs/nfs_common/Makefile
+++ b/fs/nfs_common/Makefile
@@ -6,5 +6,10 @@
obj-$(CONFIG_NFS_ACL_SUPPORT) += nfs_acl.o
nfs_acl-objs := nfsacl.o
+obj-$(CONFIG_NFS_COMMON_LOCALIO_SUPPORT) += nfs_localio.o
+nfs_localio-objs := nfslocalio.o
+
obj-$(CONFIG_GRACE_PERIOD) += grace.o
obj-$(CONFIG_NFS_V4_2_SSC_HELPER) += nfs_ssc.o
+
+obj-$(CONFIG_NFS_COMMON) += common.o
diff --git a/fs/nfs_common/common.c b/fs/nfs_common/common.c
new file mode 100644
index 000000000000..34a115176f97
--- /dev/null
+++ b/fs/nfs_common/common.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/module.h>
+#include <linux/nfs_common.h>
+#include <linux/nfs4.h>
+
+/*
+ * We need to translate between nfs status return values and
+ * the local errno values which may not be the same.
+ */
+static const struct {
+ int stat;
+ int errno;
+} nfs_errtbl[] = {
+ { NFS_OK, 0 },
+ { NFSERR_PERM, -EPERM },
+ { NFSERR_NOENT, -ENOENT },
+ { NFSERR_IO, -errno_NFSERR_IO},
+ { NFSERR_NXIO, -ENXIO },
+/* { NFSERR_EAGAIN, -EAGAIN }, */
+ { NFSERR_ACCES, -EACCES },
+ { NFSERR_EXIST, -EEXIST },
+ { NFSERR_XDEV, -EXDEV },
+ { NFSERR_NODEV, -ENODEV },
+ { NFSERR_NOTDIR, -ENOTDIR },
+ { NFSERR_ISDIR, -EISDIR },
+ { NFSERR_INVAL, -EINVAL },
+ { NFSERR_FBIG, -EFBIG },
+ { NFSERR_NOSPC, -ENOSPC },
+ { NFSERR_ROFS, -EROFS },
+ { NFSERR_MLINK, -EMLINK },
+ { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
+ { NFSERR_NOTEMPTY, -ENOTEMPTY },
+ { NFSERR_DQUOT, -EDQUOT },
+ { NFSERR_STALE, -ESTALE },
+ { NFSERR_REMOTE, -EREMOTE },
+#ifdef EWFLUSH
+ { NFSERR_WFLUSH, -EWFLUSH },
+#endif
+ { NFSERR_BADHANDLE, -EBADHANDLE },
+ { NFSERR_NOT_SYNC, -ENOTSYNC },
+ { NFSERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFSERR_NOTSUPP, -ENOTSUPP },
+ { NFSERR_TOOSMALL, -ETOOSMALL },
+ { NFSERR_SERVERFAULT, -EREMOTEIO },
+ { NFSERR_BADTYPE, -EBADTYPE },
+ { NFSERR_JUKEBOX, -EJUKEBOX },
+ { -1, -EIO }
+};
+
+/**
+ * nfs_stat_to_errno - convert an NFS status code to a local errno
+ * @status: NFS status code to convert
+ *
+ * Returns a local errno value, or -EIO if the NFS status code is
+ * not recognized. This function is used jointly by NFSv2 and NFSv3.
+ */
+int nfs_stat_to_errno(enum nfs_stat status)
+{
+ int i;
+
+ for (i = 0; nfs_errtbl[i].stat != -1; i++) {
+ if (nfs_errtbl[i].stat == (int)status)
+ return nfs_errtbl[i].errno;
+ }
+ return nfs_errtbl[i].errno;
+}
+EXPORT_SYMBOL_GPL(nfs_stat_to_errno);
+
+/*
+ * We need to translate between nfs v4 status return values and
+ * the local errno values which may not be the same.
+ */
+static const struct {
+ int stat;
+ int errno;
+} nfs4_errtbl[] = {
+ { NFS4_OK, 0 },
+ { NFS4ERR_PERM, -EPERM },
+ { NFS4ERR_NOENT, -ENOENT },
+ { NFS4ERR_IO, -errno_NFSERR_IO},
+ { NFS4ERR_NXIO, -ENXIO },
+ { NFS4ERR_ACCESS, -EACCES },
+ { NFS4ERR_EXIST, -EEXIST },
+ { NFS4ERR_XDEV, -EXDEV },
+ { NFS4ERR_NOTDIR, -ENOTDIR },
+ { NFS4ERR_ISDIR, -EISDIR },
+ { NFS4ERR_INVAL, -EINVAL },
+ { NFS4ERR_FBIG, -EFBIG },
+ { NFS4ERR_NOSPC, -ENOSPC },
+ { NFS4ERR_ROFS, -EROFS },
+ { NFS4ERR_MLINK, -EMLINK },
+ { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
+ { NFS4ERR_NOTEMPTY, -ENOTEMPTY },
+ { NFS4ERR_DQUOT, -EDQUOT },
+ { NFS4ERR_STALE, -ESTALE },
+ { NFS4ERR_BADHANDLE, -EBADHANDLE },
+ { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFS4ERR_NOTSUPP, -ENOTSUPP },
+ { NFS4ERR_TOOSMALL, -ETOOSMALL },
+ { NFS4ERR_SERVERFAULT, -EREMOTEIO },
+ { NFS4ERR_BADTYPE, -EBADTYPE },
+ { NFS4ERR_LOCKED, -EAGAIN },
+ { NFS4ERR_SYMLINK, -ELOOP },
+ { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
+ { NFS4ERR_DEADLOCK, -EDEADLK },
+ { NFS4ERR_NOXATTR, -ENODATA },
+ { NFS4ERR_XATTR2BIG, -E2BIG },
+ { -1, -EIO }
+};
+
+/*
+ * Convert an NFS error code to a local one.
+ * This one is used by NFSv4.
+ */
+int nfs4_stat_to_errno(int stat)
+{
+ int i;
+ for (i = 0; nfs4_errtbl[i].stat != -1; i++) {
+ if (nfs4_errtbl[i].stat == stat)
+ return nfs4_errtbl[i].errno;
+ }
+ if (stat <= 10000 || stat > 10100) {
+ /* The server is looney tunes. */
+ return -EREMOTEIO;
+ }
+ /* If we cannot translate the error, the recovery routines should
+ * handle it.
+ * Note: remaining NFSv4 error codes have values > 10000, so should
+ * not conflict with native Linux error codes.
+ */
+ return -stat;
+}
+EXPORT_SYMBOL_GPL(nfs4_stat_to_errno);
diff --git a/fs/nfs_common/nfslocalio.c b/fs/nfs_common/nfslocalio.c
new file mode 100644
index 000000000000..42b479b9191f
--- /dev/null
+++ b/fs/nfs_common/nfslocalio.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+
+#include <linux/module.h>
+#include <linux/rculist.h>
+#include <linux/nfslocalio.h>
+#include <net/netns/generic.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("NFS localio protocol bypass support");
+
+static DEFINE_SPINLOCK(nfs_uuid_lock);
+
+/*
+ * Global list of nfs_uuid_t instances
+ * that is protected by nfs_uuid_lock.
+ */
+static LIST_HEAD(nfs_uuids);
+
+void nfs_uuid_begin(nfs_uuid_t *nfs_uuid)
+{
+ nfs_uuid->net = NULL;
+ nfs_uuid->dom = NULL;
+ uuid_gen(&nfs_uuid->uuid);
+
+ spin_lock(&nfs_uuid_lock);
+ list_add_tail_rcu(&nfs_uuid->list, &nfs_uuids);
+ spin_unlock(&nfs_uuid_lock);
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_begin);
+
+void nfs_uuid_end(nfs_uuid_t *nfs_uuid)
+{
+ if (nfs_uuid->net == NULL) {
+ spin_lock(&nfs_uuid_lock);
+ list_del_init(&nfs_uuid->list);
+ spin_unlock(&nfs_uuid_lock);
+ }
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_end);
+
+static nfs_uuid_t * nfs_uuid_lookup_locked(const uuid_t *uuid)
+{
+ nfs_uuid_t *nfs_uuid;
+
+ list_for_each_entry(nfs_uuid, &nfs_uuids, list)
+ if (uuid_equal(&nfs_uuid->uuid, uuid))
+ return nfs_uuid;
+
+ return NULL;
+}
+
+static struct module *nfsd_mod;
+
+void nfs_uuid_is_local(const uuid_t *uuid, struct list_head *list,
+ struct net *net, struct auth_domain *dom,
+ struct module *mod)
+{
+ nfs_uuid_t *nfs_uuid;
+
+ spin_lock(&nfs_uuid_lock);
+ nfs_uuid = nfs_uuid_lookup_locked(uuid);
+ if (nfs_uuid) {
+ kref_get(&dom->ref);
+ nfs_uuid->dom = dom;
+ /*
+ * We don't hold a ref on the net, but instead put
+ * ourselves on a list so the net pointer can be
+ * invalidated.
+ */
+ list_move(&nfs_uuid->list, list);
+ rcu_assign_pointer(nfs_uuid->net, net);
+
+ __module_get(mod);
+ nfsd_mod = mod;
+ }
+ spin_unlock(&nfs_uuid_lock);
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_is_local);
+
+static void nfs_uuid_put_locked(nfs_uuid_t *nfs_uuid)
+{
+ if (nfs_uuid->net) {
+ module_put(nfsd_mod);
+ nfs_uuid->net = NULL;
+ }
+ if (nfs_uuid->dom) {
+ auth_domain_put(nfs_uuid->dom);
+ nfs_uuid->dom = NULL;
+ }
+ list_del_init(&nfs_uuid->list);
+}
+
+void nfs_uuid_invalidate_clients(struct list_head *list)
+{
+ nfs_uuid_t *nfs_uuid, *tmp;
+
+ spin_lock(&nfs_uuid_lock);
+ list_for_each_entry_safe(nfs_uuid, tmp, list, list)
+ nfs_uuid_put_locked(nfs_uuid);
+ spin_unlock(&nfs_uuid_lock);
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_invalidate_clients);
+
+void nfs_uuid_invalidate_one_client(nfs_uuid_t *nfs_uuid)
+{
+ if (nfs_uuid->net) {
+ spin_lock(&nfs_uuid_lock);
+ nfs_uuid_put_locked(nfs_uuid);
+ spin_unlock(&nfs_uuid_lock);
+ }
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_invalidate_one_client);
+
+struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid,
+ struct rpc_clnt *rpc_clnt, const struct cred *cred,
+ const struct nfs_fh *nfs_fh, const fmode_t fmode)
+{
+ struct net *net;
+ struct nfsd_file *localio;
+
+ /*
+ * Not running in nfsd context, so must safely get reference on nfsd_serv.
+ * But the server may already be shutting down, if so disallow new localio.
+ * uuid->net is NOT a counted reference, but rcu_read_lock() ensures that
+ * if uuid->net is not NULL, then calling nfsd_serv_try_get() is safe
+ * and if it succeeds we will have an implied reference to the net.
+ *
+ * Otherwise NFS may not have ref on NFSD and therefore cannot safely
+ * make 'nfs_to' calls.
+ */
+ rcu_read_lock();
+ net = rcu_dereference(uuid->net);
+ if (!net || !nfs_to->nfsd_serv_try_get(net)) {
+ rcu_read_unlock();
+ return ERR_PTR(-ENXIO);
+ }
+ rcu_read_unlock();
+ /* We have an implied reference to net thanks to nfsd_serv_try_get */
+ localio = nfs_to->nfsd_open_local_fh(net, uuid->dom, rpc_clnt,
+ cred, nfs_fh, fmode);
+ if (IS_ERR(localio))
+ nfs_to->nfsd_serv_put(net);
+ return localio;
+}
+EXPORT_SYMBOL_GPL(nfs_open_local_fh);
+
+/*
+ * The NFS LOCALIO code needs to call into NFSD using various symbols,
+ * but cannot be statically linked, because that will make the NFS
+ * module always depend on the NFSD module.
+ *
+ * 'nfs_to' provides NFS access to NFSD functions needed for LOCALIO,
+ * its lifetime is tightly coupled to the NFSD module and will always
+ * be available to NFS LOCALIO because any successful client<->server
+ * LOCALIO handshake results in a reference on the NFSD module (above),
+ * so NFS implicitly holds a reference to the NFSD module and its
+ * functions in the 'nfs_to' nfsd_localio_operations cannot disappear.
+ *
+ * If the last NFS client using LOCALIO disconnects (and its reference
+ * on NFSD dropped) then NFSD could be unloaded, resulting in 'nfs_to'
+ * functions being invalid pointers. But if NFSD isn't loaded then NFS
+ * will not be able to handshake with NFSD and will have no cause to
+ * try to call 'nfs_to' function pointers. If/when NFSD is reloaded it
+ * will reinitialize the 'nfs_to' function pointers and make LOCALIO
+ * possible.
+ */
+const struct nfsd_localio_operations *nfs_to;
+EXPORT_SYMBOL_GPL(nfs_to);
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index ec2ab6429e00..c0bd1509ccd4 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -7,6 +7,7 @@ config NFSD
select LOCKD
select SUNRPC
select EXPORTFS
+ select NFS_COMMON
select NFS_ACL_SUPPORT if NFSD_V2_ACL
select NFS_ACL_SUPPORT if NFSD_V3_ACL
depends on MULTIUSER
diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile
index b8736a82e57c..18cbd3fa7691 100644
--- a/fs/nfsd/Makefile
+++ b/fs/nfsd/Makefile
@@ -23,3 +23,4 @@ nfsd-$(CONFIG_NFSD_PNFS) += nfs4layouts.o
nfsd-$(CONFIG_NFSD_BLOCKLAYOUT) += blocklayout.o blocklayoutxdr.o
nfsd-$(CONFIG_NFSD_SCSILAYOUT) += blocklayout.o blocklayoutxdr.o
nfsd-$(CONFIG_NFSD_FLEXFILELAYOUT) += flexfilelayout.o flexfilelayoutxdr.o
+nfsd-$(CONFIG_NFS_LOCALIO) += localio.o
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 7bb4f2075ac5..c82d8e3e0d4f 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1074,10 +1074,30 @@ static struct svc_export *exp_find(struct cache_detail *cd,
return exp;
}
+/**
+ * check_nfsd_access - check if access to export is allowed.
+ * @exp: svc_export that is being accessed.
+ * @rqstp: svc_rqst attempting to access @exp (will be NULL for LOCALIO).
+ *
+ * Return values:
+ * %nfs_ok if access is granted, or
+ * %nfserr_wrongsec if access is denied
+ */
__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
{
struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors;
- struct svc_xprt *xprt = rqstp->rq_xprt;
+ struct svc_xprt *xprt;
+
+ /*
+ * If rqstp is NULL, this is a LOCALIO request which will only
+ * ever use a filehandle/credential pair for which access has
+ * been affirmed (by ACCESS or OPEN NFS requests) over the
+ * wire. So there is no need for further checks here.
+ */
+ if (!rqstp)
+ return nfs_ok;
+
+ xprt = rqstp->rq_xprt;
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_NONE) {
if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags))
@@ -1098,17 +1118,17 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
ok:
/* legacy gss-only clients are always OK: */
if (exp->ex_client == rqstp->rq_gssclient)
- return 0;
+ return nfs_ok;
/* ip-address based client; check sec= export option: */
for (f = exp->ex_flavors; f < end; f++) {
if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
- return 0;
+ return nfs_ok;
}
/* defaults in absence of sec= options: */
if (exp->ex_nflavors == 0) {
if (rqstp->rq_cred.cr_flavor == RPC_AUTH_NULL ||
rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX)
- return 0;
+ return nfs_ok;
}
/* If the compound op contains a spo_must_allowed op,
@@ -1118,7 +1138,7 @@ ok:
*/
if (nfsd4_spo_must_allow(rqstp))
- return 0;
+ return nfs_ok;
denied:
return nfserr_wrongsec;
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index 24e8f1fbcebb..19bb88c7eebd 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -52,7 +52,7 @@
#define NFSD_FILE_CACHE_UP (0)
/* We only care about NFSD_MAY_READ/WRITE for this cache */
-#define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE)
+#define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE|NFSD_MAY_LOCALIO)
static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
static DEFINE_PER_CPU(unsigned long, nfsd_file_acquisitions);
@@ -390,6 +390,34 @@ nfsd_file_put(struct nfsd_file *nf)
nfsd_file_free(nf);
}
+/**
+ * nfsd_file_put_local - put the reference to nfsd_file and local nfsd_serv
+ * @nf: nfsd_file of which to put the references
+ *
+ * First put the reference of the nfsd_file and then put the
+ * reference to the associated nn->nfsd_serv.
+ */
+void
+nfsd_file_put_local(struct nfsd_file *nf)
+{
+ struct net *net = nf->nf_net;
+
+ nfsd_file_put(nf);
+ nfsd_serv_put(net);
+}
+
+/**
+ * nfsd_file_file - get the backing file of an nfsd_file
+ * @nf: nfsd_file of which to access the backing file.
+ *
+ * Return backing file for @nf.
+ */
+struct file *
+nfsd_file_file(struct nfsd_file *nf)
+{
+ return nf->nf_file;
+}
+
static void
nfsd_file_dispose_list(struct list_head *dispose)
{
@@ -982,12 +1010,14 @@ nfsd_file_is_cached(struct inode *inode)
}
static __be32
-nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+nfsd_file_do_acquire(struct svc_rqst *rqstp, struct net *net,
+ struct svc_cred *cred,
+ struct auth_domain *client,
+ struct svc_fh *fhp,
unsigned int may_flags, struct file *file,
struct nfsd_file **pnf, bool want_gc)
{
unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
- struct net *net = SVC_NET(rqstp);
struct nfsd_file *new, *nf;
bool stale_retry = true;
bool open_retry = true;
@@ -996,8 +1026,13 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
int ret;
retry:
- status = fh_verify(rqstp, fhp, S_IFREG,
- may_flags|NFSD_MAY_OWNER_OVERRIDE);
+ if (rqstp) {
+ status = fh_verify(rqstp, fhp, S_IFREG,
+ may_flags|NFSD_MAY_OWNER_OVERRIDE);
+ } else {
+ status = fh_verify_local(net, cred, client, fhp, S_IFREG,
+ may_flags|NFSD_MAY_OWNER_OVERRIDE);
+ }
if (status != nfs_ok)
return status;
inode = d_inode(fhp->fh_dentry);
@@ -1143,7 +1178,8 @@ __be32
nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct nfsd_file **pnf)
{
- return nfsd_file_do_acquire(rqstp, fhp, may_flags, NULL, pnf, true);
+ return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
+ fhp, may_flags, NULL, pnf, true);
}
/**
@@ -1167,7 +1203,55 @@ __be32
nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct nfsd_file **pnf)
{
- return nfsd_file_do_acquire(rqstp, fhp, may_flags, NULL, pnf, false);
+ return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
+ fhp, may_flags, NULL, pnf, false);
+}
+
+/**
+ * nfsd_file_acquire_local - Get a struct nfsd_file with an open file for localio
+ * @net: The network namespace in which to perform a lookup
+ * @cred: the user credential with which to validate access
+ * @client: the auth_domain for LOCALIO lookup
+ * @fhp: the NFS filehandle of the file to be opened
+ * @may_flags: NFSD_MAY_ settings for the file
+ * @pnf: OUT: new or found "struct nfsd_file" object
+ *
+ * This file lookup interface provide access to a file given the
+ * filehandle and credential. No connection-based authorisation
+ * is performed and in that way it is quite different to other
+ * file access mediated by nfsd. It allows a kernel module such as the NFS
+ * client to reach across network and filesystem namespaces to access
+ * a file. The security implications of this should be carefully
+ * considered before use.
+ *
+ * The nfsd_file object returned by this API is reference-counted
+ * and garbage-collected. The object is retained for a few
+ * seconds after the final nfsd_file_put() in case the caller
+ * wants to re-use it.
+ *
+ * Return values:
+ * %nfs_ok - @pnf points to an nfsd_file with its reference
+ * count boosted.
+ *
+ * On error, an nfsstat value in network byte order is returned.
+ */
+__be32
+nfsd_file_acquire_local(struct net *net, struct svc_cred *cred,
+ struct auth_domain *client, struct svc_fh *fhp,
+ unsigned int may_flags, struct nfsd_file **pnf)
+{
+ /*
+ * Save creds before calling nfsd_file_do_acquire() (which calls
+ * nfsd_setuser). Important because caller (LOCALIO) is from
+ * client context.
+ */
+ const struct cred *save_cred = get_current_cred();
+ __be32 beres;
+
+ beres = nfsd_file_do_acquire(NULL, net, cred, client,
+ fhp, may_flags, NULL, pnf, true);
+ revert_creds(save_cred);
+ return beres;
}
/**
@@ -1193,7 +1277,8 @@ nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct file *file,
struct nfsd_file **pnf)
{
- return nfsd_file_do_acquire(rqstp, fhp, may_flags, file, pnf, false);
+ return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
+ fhp, may_flags, file, pnf, false);
}
/*
diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
index 3fbec24eea6c..cadf3c2689c4 100644
--- a/fs/nfsd/filecache.h
+++ b/fs/nfsd/filecache.h
@@ -55,7 +55,9 @@ void nfsd_file_cache_shutdown(void);
int nfsd_file_cache_start_net(struct net *net);
void nfsd_file_cache_shutdown_net(struct net *net);
void nfsd_file_put(struct nfsd_file *nf);
+void nfsd_file_put_local(struct nfsd_file *nf);
struct nfsd_file *nfsd_file_get(struct nfsd_file *nf);
+struct file *nfsd_file_file(struct nfsd_file *nf);
void nfsd_file_close_inode_sync(struct inode *inode);
void nfsd_file_net_dispose(struct nfsd_net *nn);
bool nfsd_file_is_cached(struct inode *inode);
@@ -66,5 +68,8 @@ __be32 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
__be32 nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct file *file,
struct nfsd_file **nfp);
+__be32 nfsd_file_acquire_local(struct net *net, struct svc_cred *cred,
+ struct auth_domain *client, struct svc_fh *fhp,
+ unsigned int may_flags, struct nfsd_file **pnf);
int nfsd_file_cache_stats_show(struct seq_file *m, void *v);
#endif /* _FS_NFSD_FILECACHE_H */
diff --git a/fs/nfsd/localio.c b/fs/nfsd/localio.c
new file mode 100644
index 000000000000..291e9c69cae4
--- /dev/null
+++ b/fs/nfsd/localio.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NFS server support for local clients to bypass network stack
+ *
+ * Copyright (C) 2014 Weston Andros Adamson <dros@primarydata.com>
+ * Copyright (C) 2019 Trond Myklebust <trond.myklebust@hammerspace.com>
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+
+#include <linux/exportfs.h>
+#include <linux/sunrpc/svcauth.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs_common.h>
+#include <linux/nfslocalio.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_xdr.h>
+#include <linux/string.h>
+
+#include "nfsd.h"
+#include "vfs.h"
+#include "netns.h"
+#include "filecache.h"
+#include "cache.h"
+
+static const struct nfsd_localio_operations nfsd_localio_ops = {
+ .nfsd_serv_try_get = nfsd_serv_try_get,
+ .nfsd_serv_put = nfsd_serv_put,
+ .nfsd_open_local_fh = nfsd_open_local_fh,
+ .nfsd_file_put_local = nfsd_file_put_local,
+ .nfsd_file_file = nfsd_file_file,
+};
+
+void nfsd_localio_ops_init(void)
+{
+ nfs_to = &nfsd_localio_ops;
+}
+
+/**
+ * nfsd_open_local_fh - lookup a local filehandle @nfs_fh and map to nfsd_file
+ *
+ * @net: 'struct net' to get the proper nfsd_net required for LOCALIO access
+ * @dom: 'struct auth_domain' required for LOCALIO access
+ * @rpc_clnt: rpc_clnt that the client established
+ * @cred: cred that the client established
+ * @nfs_fh: filehandle to lookup
+ * @fmode: fmode_t to use for open
+ *
+ * This function maps a local fh to a path on a local filesystem.
+ * This is useful when the nfs client has the local server mounted - it can
+ * avoid all the NFS overhead with reads, writes and commits.
+ *
+ * On successful return, returned nfsd_file will have its nf_net member
+ * set. Caller (NFS client) is responsible for calling nfsd_serv_put and
+ * nfsd_file_put (via nfs_to->nfsd_file_put_local).
+ */
+struct nfsd_file *
+nfsd_open_local_fh(struct net *net, struct auth_domain *dom,
+ struct rpc_clnt *rpc_clnt, const struct cred *cred,
+ const struct nfs_fh *nfs_fh, const fmode_t fmode)
+{
+ int mayflags = NFSD_MAY_LOCALIO;
+ struct svc_cred rq_cred;
+ struct svc_fh fh;
+ struct nfsd_file *localio;
+ __be32 beres;
+
+ if (nfs_fh->size > NFS4_FHSIZE)
+ return ERR_PTR(-EINVAL);
+
+ /* nfs_fh -> svc_fh */
+ fh_init(&fh, NFS4_FHSIZE);
+ fh.fh_handle.fh_size = nfs_fh->size;
+ memcpy(fh.fh_handle.fh_raw, nfs_fh->data, nfs_fh->size);
+
+ if (fmode & FMODE_READ)
+ mayflags |= NFSD_MAY_READ;
+ if (fmode & FMODE_WRITE)
+ mayflags |= NFSD_MAY_WRITE;
+
+ svcauth_map_clnt_to_svc_cred_local(rpc_clnt, cred, &rq_cred);
+
+ beres = nfsd_file_acquire_local(net, &rq_cred, dom,
+ &fh, mayflags, &localio);
+ if (beres)
+ localio = ERR_PTR(nfs_stat_to_errno(be32_to_cpu(beres)));
+
+ fh_put(&fh);
+ if (rq_cred.cr_group_info)
+ put_group_info(rq_cred.cr_group_info);
+
+ return localio;
+}
+EXPORT_SYMBOL_GPL(nfsd_open_local_fh);
+
+/*
+ * UUID_IS_LOCAL XDR functions
+ */
+
+static __be32 localio_proc_null(struct svc_rqst *rqstp)
+{
+ return rpc_success;
+}
+
+struct localio_uuidarg {
+ uuid_t uuid;
+};
+
+static __be32 localio_proc_uuid_is_local(struct svc_rqst *rqstp)
+{
+ struct localio_uuidarg *argp = rqstp->rq_argp;
+ struct net *net = SVC_NET(rqstp);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ nfs_uuid_is_local(&argp->uuid, &nn->local_clients,
+ net, rqstp->rq_client, THIS_MODULE);
+
+ return rpc_success;
+}
+
+static bool localio_decode_uuidarg(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr)
+{
+ struct localio_uuidarg *argp = rqstp->rq_argp;
+ u8 uuid[UUID_SIZE];
+
+ if (decode_opaque_fixed(xdr, uuid, UUID_SIZE))
+ return false;
+ import_uuid(&argp->uuid, uuid);
+
+ return true;
+}
+
+static const struct svc_procedure localio_procedures1[] = {
+ [LOCALIOPROC_NULL] = {
+ .pc_func = localio_proc_null,
+ .pc_decode = nfssvc_decode_voidarg,
+ .pc_encode = nfssvc_encode_voidres,
+ .pc_argsize = sizeof(struct nfsd_voidargs),
+ .pc_ressize = sizeof(struct nfsd_voidres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = 0,
+ .pc_name = "NULL",
+ },
+ [LOCALIOPROC_UUID_IS_LOCAL] = {
+ .pc_func = localio_proc_uuid_is_local,
+ .pc_decode = localio_decode_uuidarg,
+ .pc_encode = nfssvc_encode_voidres,
+ .pc_argsize = sizeof(struct localio_uuidarg),
+ .pc_argzero = sizeof(struct localio_uuidarg),
+ .pc_ressize = sizeof(struct nfsd_voidres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_name = "UUID_IS_LOCAL",
+ },
+};
+
+#define LOCALIO_NR_PROCEDURES ARRAY_SIZE(localio_procedures1)
+static DEFINE_PER_CPU_ALIGNED(unsigned long,
+ localio_count[LOCALIO_NR_PROCEDURES]);
+const struct svc_version localio_version1 = {
+ .vs_vers = 1,
+ .vs_nproc = LOCALIO_NR_PROCEDURES,
+ .vs_proc = localio_procedures1,
+ .vs_dispatch = nfsd_dispatch,
+ .vs_count = localio_count,
+ .vs_xdrsize = XDR_QUADLEN(UUID_SIZE),
+ .vs_hidden = true,
+};
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 37b8bfdcfeea..26f7b34d1a03 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -13,6 +13,7 @@
#include <linux/filelock.h>
#include <linux/nfs4.h>
#include <linux/percpu_counter.h>
+#include <linux/percpu-refcount.h>
#include <linux/siphash.h>
#include <linux/sunrpc/stats.h>
@@ -139,7 +140,9 @@ struct nfsd_net {
struct svc_info nfsd_info;
#define nfsd_serv nfsd_info.serv
-
+ struct percpu_ref nfsd_serv_ref;
+ struct completion nfsd_serv_confirm_done;
+ struct completion nfsd_serv_free_done;
/*
* clientid and stateid data for construction of net unique COPY
@@ -214,6 +217,10 @@ struct nfsd_net {
/* last time an admin-revoke happened for NFSv4.0 */
time64_t nfs40_last_revoke;
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ /* Local clients to be invalidated when net is shut down */
+ struct list_head local_clients;
+#endif
};
/* Simple check to find out if a given net was properly initialized */
@@ -222,6 +229,9 @@ struct nfsd_net {
extern bool nfsd_support_version(int vers);
extern unsigned int nfsd_net_id;
+bool nfsd_serv_try_get(struct net *net);
+void nfsd_serv_put(struct net *net);
+
void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn);
void nfsd_reset_write_verifier(struct nfsd_net *nn);
#endif /* __NFSD_NETNS_H__ */
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 1c9e5b4bcb0a..3adbc05ebaac 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -18,6 +18,7 @@
#include <linux/sunrpc/svc.h>
#include <linux/module.h>
#include <linux/fsnotify.h>
+#include <linux/nfslocalio.h>
#include "idmap.h"
#include "nfsd.h"
@@ -2246,7 +2247,7 @@ static __net_init int nfsd_net_init(struct net *net)
if (retval)
goto out_repcache_error;
memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats));
- nn->nfsd_svcstats.program = &nfsd_program;
+ nn->nfsd_svcstats.program = &nfsd_programs[0];
for (i = 0; i < sizeof(nn->nfsd_versions); i++)
nn->nfsd_versions[i] = nfsd_support_version(i);
for (i = 0; i < sizeof(nn->nfsd4_minorversions); i++)
@@ -2257,7 +2258,9 @@ static __net_init int nfsd_net_init(struct net *net)
get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
seqlock_init(&nn->writeverf_lock);
nfsd_proc_stat_init(net);
-
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ INIT_LIST_HEAD(&nn->local_clients);
+#endif
return 0;
out_repcache_error:
@@ -2268,6 +2271,22 @@ out_export_error:
return retval;
}
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+/**
+ * nfsd_net_pre_exit - Disconnect localio clients from net namespace
+ * @net: a network namespace that is about to be destroyed
+ *
+ * This invalidated ->net pointers held by localio clients
+ * while they can still safely access nn->counter.
+ */
+static __net_exit void nfsd_net_pre_exit(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ nfs_uuid_invalidate_clients(&nn->local_clients);
+}
+#endif
+
/**
* nfsd_net_exit - Release the nfsd_net portion of a net namespace
* @net: a network namespace that is about to be destroyed
@@ -2285,6 +2304,9 @@ static __net_exit void nfsd_net_exit(struct net *net)
static struct pernet_operations nfsd_net_ops = {
.init = nfsd_net_init,
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ .pre_exit = nfsd_net_pre_exit,
+#endif
.exit = nfsd_net_exit,
.id = &nfsd_net_id,
.size = sizeof(struct nfsd_net),
@@ -2322,6 +2344,7 @@ static int __init init_nfsd(void)
retval = genl_register_family(&nfsd_nl_family);
if (retval)
goto out_free_all;
+ nfsd_localio_ops_init();
return 0;
out_free_all:
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 4ccbf014a2c7..4b56ba1e8e48 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -85,7 +85,7 @@ struct nfsd_genl_rqstp {
u32 rq_opnum[NFSD_MAX_OPS_PER_COMPOUND];
};
-extern struct svc_program nfsd_program;
+extern struct svc_program nfsd_programs[];
extern const struct svc_version nfsd_version2, nfsd_version3, nfsd_version4;
extern struct mutex nfsd_mutex;
extern spinlock_t nfsd_drc_lock;
@@ -146,6 +146,10 @@ extern const struct svc_version nfsd_acl_version3;
#endif
#endif
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+extern const struct svc_version localio_version1;
+#endif
+
struct nfsd_net;
enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL };
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 50d23d56f403..40ad58a6a036 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -87,23 +87,24 @@ nfsd_mode_check(struct dentry *dentry, umode_t requested)
return nfserr_wrong_type;
}
-static bool nfsd_originating_port_ok(struct svc_rqst *rqstp, int flags)
+static bool nfsd_originating_port_ok(struct svc_rqst *rqstp,
+ struct svc_cred *cred,
+ struct svc_export *exp)
{
- if (flags & NFSEXP_INSECURE_PORT)
+ if (nfsexp_flags(cred, exp) & NFSEXP_INSECURE_PORT)
return true;
/* We don't require gss requests to use low ports: */
- if (rqstp->rq_cred.cr_flavor >= RPC_AUTH_GSS)
+ if (cred->cr_flavor >= RPC_AUTH_GSS)
return true;
return test_bit(RQ_SECURE, &rqstp->rq_flags);
}
static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
+ struct svc_cred *cred,
struct svc_export *exp)
{
- int flags = nfsexp_flags(&rqstp->rq_cred, exp);
-
/* Check if the request originated from a secure port. */
- if (!nfsd_originating_port_ok(rqstp, flags)) {
+ if (rqstp && !nfsd_originating_port_ok(rqstp, cred, exp)) {
RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
dprintk("nfsd: request from insecure port %s!\n",
svc_print_addr(rqstp, buf, sizeof(buf)));
@@ -111,7 +112,7 @@ static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
}
/* Set user creds for this exportpoint */
- return nfserrno(nfsd_setuser(&rqstp->rq_cred, exp));
+ return nfserrno(nfsd_setuser(cred, exp));
}
static inline __be32 check_pseudo_root(struct dentry *dentry,
@@ -141,7 +142,11 @@ static inline __be32 check_pseudo_root(struct dentry *dentry,
* dentry. On success, the results are used to set fh_export and
* fh_dentry.
*/
-static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
+static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net,
+ struct svc_cred *cred,
+ struct auth_domain *client,
+ struct auth_domain *gssclient,
+ struct svc_fh *fhp)
{
struct knfsd_fh *fh = &fhp->fh_handle;
struct fid *fid = NULL;
@@ -183,8 +188,8 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
data_left -= len;
if (data_left < 0)
return error;
- exp = rqst_exp_find(&rqstp->rq_chandle, SVC_NET(rqstp),
- rqstp->rq_client, rqstp->rq_gssclient,
+ exp = rqst_exp_find(rqstp ? &rqstp->rq_chandle : NULL,
+ net, client, gssclient,
fh->fh_fsid_type, fh->fh_fsid);
fid = (struct fid *)(fh->fh_fsid + len);
@@ -219,7 +224,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
put_cred(override_creds(new));
put_cred(new);
} else {
- error = nfsd_setuser_and_check_port(rqstp, exp);
+ error = nfsd_setuser_and_check_port(rqstp, cred, exp);
if (error)
goto out;
}
@@ -266,20 +271,20 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
fhp->fh_dentry = dentry;
fhp->fh_export = exp;
- switch (rqstp->rq_vers) {
- case 4:
+ switch (fhp->fh_maxsize) {
+ case NFS4_FHSIZE:
if (dentry->d_sb->s_export_op->flags & EXPORT_OP_NOATOMIC_ATTR)
fhp->fh_no_atomic_attr = true;
fhp->fh_64bit_cookies = true;
break;
- case 3:
+ case NFS3_FHSIZE:
if (dentry->d_sb->s_export_op->flags & EXPORT_OP_NOWCC)
fhp->fh_no_wcc = true;
fhp->fh_64bit_cookies = true;
if (exp->ex_flags & NFSEXP_V4ROOT)
goto out;
break;
- case 2:
+ case NFS_FHSIZE:
fhp->fh_no_wcc = true;
if (EX_WGATHER(exp))
fhp->fh_use_wgather = true;
@@ -294,42 +299,33 @@ out:
}
/**
- * fh_verify - filehandle lookup and access checking
- * @rqstp: pointer to current rpc request
+ * __fh_verify - filehandle lookup and access checking
+ * @rqstp: RPC transaction context, or NULL
+ * @net: net namespace in which to perform the export lookup
+ * @cred: RPC user credential
+ * @client: RPC auth domain
+ * @gssclient: RPC GSS auth domain, or NULL
* @fhp: filehandle to be verified
* @type: expected type of object pointed to by filehandle
* @access: type of access needed to object
*
- * Look up a dentry from the on-the-wire filehandle, check the client's
- * access to the export, and set the current task's credentials.
- *
- * Regardless of success or failure of fh_verify(), fh_put() should be
- * called on @fhp when the caller is finished with the filehandle.
- *
- * fh_verify() may be called multiple times on a given filehandle, for
- * example, when processing an NFSv4 compound. The first call will look
- * up a dentry using the on-the-wire filehandle. Subsequent calls will
- * skip the lookup and just perform the other checks and possibly change
- * the current task's credentials.
- *
- * @type specifies the type of object expected using one of the S_IF*
- * constants defined in include/linux/stat.h. The caller may use zero
- * to indicate that it doesn't care, or a negative integer to indicate
- * that it expects something not of the given type.
- *
- * @access is formed from the NFSD_MAY_* constants defined in
- * fs/nfsd/vfs.h.
+ * See fh_verify() for further descriptions of @fhp, @type, and @access.
*/
-__be32
-fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
+static __be32
+__fh_verify(struct svc_rqst *rqstp,
+ struct net *net, struct svc_cred *cred,
+ struct auth_domain *client,
+ struct auth_domain *gssclient,
+ struct svc_fh *fhp, umode_t type, int access)
{
- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_export *exp = NULL;
struct dentry *dentry;
__be32 error;
if (!fhp->fh_dentry) {
- error = nfsd_set_fh_dentry(rqstp, fhp);
+ error = nfsd_set_fh_dentry(rqstp, net, cred, client,
+ gssclient, fhp);
if (error)
goto out;
}
@@ -358,7 +354,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
if (error)
goto out;
- error = nfsd_setuser_and_check_port(rqstp, exp);
+ error = nfsd_setuser_and_check_port(rqstp, cred, exp);
if (error)
goto out;
@@ -388,7 +384,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
skip_pseudoflavor_check:
/* Finally, check access permissions. */
- error = nfsd_permission(&rqstp->rq_cred, exp, dentry, access);
+ error = nfsd_permission(cred, exp, dentry, access);
out:
trace_nfsd_fh_verify_err(rqstp, fhp, type, access, error);
if (error == nfserr_stale)
@@ -396,6 +392,63 @@ out:
return error;
}
+/**
+ * fh_verify_local - filehandle lookup and access checking
+ * @net: net namespace in which to perform the export lookup
+ * @cred: RPC user credential
+ * @client: RPC auth domain
+ * @fhp: filehandle to be verified
+ * @type: expected type of object pointed to by filehandle
+ * @access: type of access needed to object
+ *
+ * This API can be used by callers who do not have an RPC
+ * transaction context (ie are not running in an nfsd thread).
+ *
+ * See fh_verify() for further descriptions of @fhp, @type, and @access.
+ */
+__be32
+fh_verify_local(struct net *net, struct svc_cred *cred,
+ struct auth_domain *client, struct svc_fh *fhp,
+ umode_t type, int access)
+{
+ return __fh_verify(NULL, net, cred, client, NULL,
+ fhp, type, access);
+}
+
+/**
+ * fh_verify - filehandle lookup and access checking
+ * @rqstp: pointer to current rpc request
+ * @fhp: filehandle to be verified
+ * @type: expected type of object pointed to by filehandle
+ * @access: type of access needed to object
+ *
+ * Look up a dentry from the on-the-wire filehandle, check the client's
+ * access to the export, and set the current task's credentials.
+ *
+ * Regardless of success or failure of fh_verify(), fh_put() should be
+ * called on @fhp when the caller is finished with the filehandle.
+ *
+ * fh_verify() may be called multiple times on a given filehandle, for
+ * example, when processing an NFSv4 compound. The first call will look
+ * up a dentry using the on-the-wire filehandle. Subsequent calls will
+ * skip the lookup and just perform the other checks and possibly change
+ * the current task's credentials.
+ *
+ * @type specifies the type of object expected using one of the S_IF*
+ * constants defined in include/linux/stat.h. The caller may use zero
+ * to indicate that it doesn't care, or a negative integer to indicate
+ * that it expects something not of the given type.
+ *
+ * @access is formed from the NFSD_MAY_* constants defined in
+ * fs/nfsd/vfs.h.
+ */
+__be32
+fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
+{
+ return __fh_verify(rqstp, SVC_NET(rqstp), &rqstp->rq_cred,
+ rqstp->rq_client, rqstp->rq_gssclient,
+ fhp, type, access);
+}
/*
* Compose a file handle for an NFS reply.
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index 8d46e203d139..5b7394801dc4 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -217,6 +217,8 @@ extern char * SVCFH_fmt(struct svc_fh *fhp);
* Function prototypes
*/
__be32 fh_verify(struct svc_rqst *, struct svc_fh *, umode_t, int);
+__be32 fh_verify_local(struct net *, struct svc_cred *, struct auth_domain *,
+ struct svc_fh *, umode_t, int);
__be32 fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *);
__be32 fh_update(struct svc_fh *);
void fh_put(struct svc_fh *);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index defc430f912f..e236135ddc63 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -19,6 +19,7 @@
#include <linux/sunrpc/svc_xprt.h>
#include <linux/lockd/bind.h>
#include <linux/nfsacl.h>
+#include <linux/nfslocalio.h>
#include <linux/seq_file.h>
#include <linux/inetdevice.h>
#include <net/addrconf.h>
@@ -35,7 +36,6 @@
#define NFSDDBG_FACILITY NFSDDBG_SVC
atomic_t nfsd_th_cnt = ATOMIC_INIT(0);
-extern struct svc_program nfsd_program;
static int nfsd(void *vrqstp);
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
static int nfsd_acl_rpcbind_set(struct net *,
@@ -80,6 +80,15 @@ DEFINE_SPINLOCK(nfsd_drc_lock);
unsigned long nfsd_drc_max_mem;
unsigned long nfsd_drc_mem_used;
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+static const struct svc_version *localio_versions[] = {
+ [1] = &localio_version1,
+};
+
+#define NFSD_LOCALIO_NRVERS ARRAY_SIZE(localio_versions)
+
+#endif /* CONFIG_NFS_LOCALIO */
+
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
static const struct svc_version *nfsd_acl_version[] = {
# if defined(CONFIG_NFSD_V2_ACL)
@@ -90,20 +99,9 @@ static const struct svc_version *nfsd_acl_version[] = {
# endif
};
-#define NFSD_ACL_MINVERS 2
+#define NFSD_ACL_MINVERS 2
#define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
-static struct svc_program nfsd_acl_program = {
- .pg_prog = NFS_ACL_PROGRAM,
- .pg_nvers = NFSD_ACL_NRVERS,
- .pg_vers = nfsd_acl_version,
- .pg_name = "nfsacl",
- .pg_class = "nfsd",
- .pg_authenticate = &svc_set_client,
- .pg_init_request = nfsd_acl_init_request,
- .pg_rpcbind_set = nfsd_acl_rpcbind_set,
-};
-
#endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
static const struct svc_version *nfsd_version[NFSD_MAXVERS+1] = {
@@ -116,18 +114,41 @@ static const struct svc_version *nfsd_version[NFSD_MAXVERS+1] = {
#endif
};
-struct svc_program nfsd_program = {
-#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
- .pg_next = &nfsd_acl_program,
-#endif
+struct svc_program nfsd_programs[] = {
+ {
.pg_prog = NFS_PROGRAM, /* program number */
.pg_nvers = NFSD_MAXVERS+1, /* nr of entries in nfsd_version */
.pg_vers = nfsd_version, /* version table */
.pg_name = "nfsd", /* program name */
.pg_class = "nfsd", /* authentication class */
- .pg_authenticate = &svc_set_client, /* export authentication */
+ .pg_authenticate = svc_set_client, /* export authentication */
.pg_init_request = nfsd_init_request,
.pg_rpcbind_set = nfsd_rpcbind_set,
+ },
+#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+ {
+ .pg_prog = NFS_ACL_PROGRAM,
+ .pg_nvers = NFSD_ACL_NRVERS,
+ .pg_vers = nfsd_acl_version,
+ .pg_name = "nfsacl",
+ .pg_class = "nfsd",
+ .pg_authenticate = svc_set_client,
+ .pg_init_request = nfsd_acl_init_request,
+ .pg_rpcbind_set = nfsd_acl_rpcbind_set,
+ },
+#endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ {
+ .pg_prog = NFS_LOCALIO_PROGRAM,
+ .pg_nvers = NFSD_LOCALIO_NRVERS,
+ .pg_vers = localio_versions,
+ .pg_name = "nfslocalio",
+ .pg_class = "nfsd",
+ .pg_authenticate = svc_set_client,
+ .pg_init_request = svc_generic_init_request,
+ .pg_rpcbind_set = svc_generic_rpcbind_set,
+ }
+#endif /* CONFIG_NFS_LOCALIO */
};
bool nfsd_support_version(int vers)
@@ -193,6 +214,34 @@ int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change
return 0;
}
+bool nfsd_serv_try_get(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ return (nn && percpu_ref_tryget_live(&nn->nfsd_serv_ref));
+}
+
+void nfsd_serv_put(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ percpu_ref_put(&nn->nfsd_serv_ref);
+}
+
+static void nfsd_serv_done(struct percpu_ref *ref)
+{
+ struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_serv_ref);
+
+ complete(&nn->nfsd_serv_confirm_done);
+}
+
+static void nfsd_serv_free(struct percpu_ref *ref)
+{
+ struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_serv_ref);
+
+ complete(&nn->nfsd_serv_free_done);
+}
+
/*
* Maximum number of nfsd processes
*/
@@ -392,6 +441,7 @@ static void nfsd_shutdown_net(struct net *net)
lockd_down(net);
nn->lockd_up = false;
}
+ percpu_ref_exit(&nn->nfsd_serv_ref);
nn->nfsd_net_up = false;
nfsd_shutdown_generic();
}
@@ -471,6 +521,13 @@ void nfsd_destroy_serv(struct net *net)
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_serv *serv = nn->nfsd_serv;
+ lockdep_assert_held(&nfsd_mutex);
+
+ percpu_ref_kill_and_confirm(&nn->nfsd_serv_ref, nfsd_serv_done);
+ wait_for_completion(&nn->nfsd_serv_confirm_done);
+ wait_for_completion(&nn->nfsd_serv_free_done);
+ /* percpu_ref_exit is called in nfsd_shutdown_net */
+
spin_lock(&nfsd_notifier_lock);
nn->nfsd_serv = NULL;
spin_unlock(&nfsd_notifier_lock);
@@ -595,10 +652,18 @@ int nfsd_create_serv(struct net *net)
if (nn->nfsd_serv)
return 0;
+ error = percpu_ref_init(&nn->nfsd_serv_ref, nfsd_serv_free,
+ 0, GFP_KERNEL);
+ if (error)
+ return error;
+ init_completion(&nn->nfsd_serv_free_done);
+ init_completion(&nn->nfsd_serv_confirm_done);
+
if (nfsd_max_blksize == 0)
nfsd_max_blksize = nfsd_get_default_max_blksize();
nfsd_reset_versions(nn);
- serv = svc_create_pooled(&nfsd_program, &nn->nfsd_svcstats,
+ serv = svc_create_pooled(nfsd_programs, ARRAY_SIZE(nfsd_programs),
+ &nn->nfsd_svcstats,
nfsd_max_blksize, nfsd);
if (serv == NULL)
return -ENOMEM;
@@ -905,7 +970,7 @@ nfsd(void *vrqstp)
}
/**
- * nfsd_dispatch - Process an NFS or NFSACL Request
+ * nfsd_dispatch - Process an NFS or NFSACL or LOCALIO Request
* @rqstp: incoming request
*
* This RPC dispatcher integrates the NFS server's duplicate reply cache.
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index 7ab66497e261..c625966cfcf3 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -86,7 +86,8 @@ DEFINE_NFSD_XDR_ERR_EVENT(cant_encode);
{ NFSD_MAY_NOT_BREAK_LEASE, "NOT_BREAK_LEASE" }, \
{ NFSD_MAY_BYPASS_GSS, "BYPASS_GSS" }, \
{ NFSD_MAY_READ_IF_EXEC, "READ_IF_EXEC" }, \
- { NFSD_MAY_64BIT_COOKIE, "64BIT_COOKIE" })
+ { NFSD_MAY_64BIT_COOKIE, "64BIT_COOKIE" }, \
+ { NFSD_MAY_LOCALIO, "LOCALIO" })
TRACE_EVENT(nfsd_compound,
TP_PROTO(
@@ -193,7 +194,7 @@ TRACE_EVENT(nfsd_compound_encode_err,
{ S_IFIFO, "FIFO" }, \
{ S_IFSOCK, "SOCK" })
-TRACE_EVENT(nfsd_fh_verify,
+TRACE_EVENT_CONDITION(nfsd_fh_verify,
TP_PROTO(
const struct svc_rqst *rqstp,
const struct svc_fh *fhp,
@@ -201,6 +202,7 @@ TRACE_EVENT(nfsd_fh_verify,
int access
),
TP_ARGS(rqstp, fhp, type, access),
+ TP_CONDITION(rqstp != NULL),
TP_STRUCT__entry(
__field(unsigned int, netns_ino)
__sockaddr(server, rqstp->rq_xprt->xpt_remotelen)
@@ -239,7 +241,7 @@ TRACE_EVENT_CONDITION(nfsd_fh_verify_err,
__be32 error
),
TP_ARGS(rqstp, fhp, type, access, error),
- TP_CONDITION(error),
+ TP_CONDITION(rqstp != NULL && error),
TP_STRUCT__entry(
__field(unsigned int, netns_ino)
__sockaddr(server, rqstp->rq_xprt->xpt_remotelen)
@@ -295,12 +297,13 @@ DECLARE_EVENT_CLASS(nfsd_fh_err_class,
__entry->status)
)
-#define DEFINE_NFSD_FH_ERR_EVENT(name) \
-DEFINE_EVENT(nfsd_fh_err_class, nfsd_##name, \
- TP_PROTO(struct svc_rqst *rqstp, \
- struct svc_fh *fhp, \
- int status), \
- TP_ARGS(rqstp, fhp, status))
+#define DEFINE_NFSD_FH_ERR_EVENT(name) \
+DEFINE_EVENT_CONDITION(nfsd_fh_err_class, nfsd_##name, \
+ TP_PROTO(struct svc_rqst *rqstp, \
+ struct svc_fh *fhp, \
+ int status), \
+ TP_ARGS(rqstp, fhp, status), \
+ TP_CONDITION(rqstp != NULL))
DEFINE_NFSD_FH_ERR_EVENT(set_fh_dentry_badexport);
DEFINE_NFSD_FH_ERR_EVENT(set_fh_dentry_badhandle);
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 01947561d375..3ff146522556 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -33,6 +33,8 @@
#define NFSD_MAY_64BIT_COOKIE 0x1000 /* 64 bit readdir cookies for >= NFSv3 */
+#define NFSD_MAY_LOCALIO 0x2000 /* for tracing, reflects when localio used */
+
#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 67ee176b8824..c675fc40ce2d 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -22,7 +22,6 @@ static struct vfsmount *nsfs_mnt;
static long ns_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg);
static const struct file_operations ns_file_operations = {
- .llseek = no_llseek,
.unlocked_ioctl = ns_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1fea43c33b6b..db72b3e924b3 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -156,9 +156,8 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count,
&ext_flags);
if (err) {
- mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
- "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
- (unsigned long long)p_blkno);
+ mlog(ML_ERROR, "get_blocks() failed, inode: 0x%p, "
+ "block: %llu\n", inode, (unsigned long long)iblock);
goto bail;
}
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 70a768b623cf..f7672472fa82 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -973,7 +973,13 @@ int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
}
while (done < nr) {
- down_read(&OCFS2_I(inode)->ip_alloc_sem);
+ if (!down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem)) {
+ rc = -EAGAIN;
+ mlog(ML_ERROR,
+ "Inode #%llu ip_alloc_sem is temporarily unavailable\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ break;
+ }
rc = ocfs2_extent_map_get_blocks(inode, v_block + done,
&p_block, &p_count, NULL);
up_read(&OCFS2_I(inode)->ip_alloc_sem);
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 4f85508538fc..004393b13c0a 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -25,6 +25,7 @@
#include "namei.h"
#include "ocfs2_trace.h"
#include "file.h"
+#include "symlink.h"
#include <linux/bio.h>
#include <linux/blkdev.h>
@@ -4148,8 +4149,9 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
int ret;
struct inode *inode = d_inode(old_dentry);
struct buffer_head *new_bh = NULL;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
- if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
+ if (oi->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
ret = -EINVAL;
mlog_errno(ret);
goto out;
@@ -4175,6 +4177,26 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
goto out_unlock;
}
+ if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) &&
+ (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
+ /*
+ * Adjust extent record count to reserve space for extended attribute.
+ * Inline data count had been adjusted in ocfs2_duplicate_inline_data().
+ */
+ struct ocfs2_inode_info *new_oi = OCFS2_I(new_inode);
+
+ if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
+ !(ocfs2_inode_is_fast_symlink(new_inode))) {
+ struct ocfs2_dinode *new_di = (struct ocfs2_dinode *)new_bh->b_data;
+ struct ocfs2_dinode *old_di = (struct ocfs2_dinode *)old_bh->b_data;
+ struct ocfs2_extent_list *el = &new_di->id2.i_list;
+ int inline_size = le16_to_cpu(old_di->i_xattr_inline_size);
+
+ le16_add_cpu(&el->l_count, -(inline_size /
+ sizeof(struct ocfs2_extent_rec)));
+ }
+ }
+
ret = ocfs2_create_reflink_node(inode, old_bh,
new_inode, new_bh, preserve);
if (ret) {
@@ -4182,7 +4204,7 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
goto inode_unlock;
}
- if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
+ if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
ret = ocfs2_reflink_xattrs(inode, old_bh,
new_inode, new_bh,
preserve);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 0e58a5ce539e..dd0a05365e79 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -6511,16 +6511,7 @@ static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args)
}
new_oi = OCFS2_I(args->new_inode);
- /*
- * Adjust extent record count to reserve space for extended attribute.
- * Inline data count had been adjusted in ocfs2_duplicate_inline_data().
- */
- if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
- !(ocfs2_inode_is_fast_symlink(args->new_inode))) {
- struct ocfs2_extent_list *el = &new_di->id2.i_list;
- le16_add_cpu(&el->l_count, -(inline_size /
- sizeof(struct ocfs2_extent_rec)));
- }
+
spin_lock(&new_oi->ip_lock);
new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL;
new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 2b7a5a3a7a2f..4504493b20be 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -117,7 +117,7 @@ static int ovl_real_fdget_meta(const struct file *file, struct fd *real,
struct file *f = ovl_open_realfile(file, &realpath);
if (IS_ERR(f))
return PTR_ERR(f);
- real->word = (unsigned long)ovl_open_realfile(file, &realpath) | FDPUT_FPUT;
+ real->word = (unsigned long)f | FDPUT_FPUT;
return 0;
}
diff --git a/fs/pidfs.c b/fs/pidfs.c
index 7ffdc88dfb52..80675b6bf884 100644
--- a/fs/pidfs.c
+++ b/fs/pidfs.c
@@ -120,6 +120,7 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct nsproxy *nsp __free(put_nsproxy) = NULL;
struct pid *pid = pidfd_pid(file);
struct ns_common *ns_common = NULL;
+ struct pid_namespace *pid_ns;
if (arg)
return -EINVAL;
@@ -202,7 +203,9 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case PIDFD_GET_PID_NAMESPACE:
if (IS_ENABLED(CONFIG_PID_NS)) {
rcu_read_lock();
- ns_common = to_ns_common( get_pid_ns(task_active_pid_ns(task)));
+ pid_ns = task_active_pid_ns(task);
+ if (pid_ns)
+ ns_common = to_ns_common(get_pid_ns(pid_ns));
rcu_read_unlock();
}
break;
diff --git a/fs/pipe.c b/fs/pipe.c
index 4083ba492cb6..12b22c2723b7 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -1231,7 +1231,6 @@ err:
const struct file_operations pipefifo_fops = {
.open = fifo_open,
- .llseek = no_llseek,
.read_iter = pipe_read,
.write_iter = pipe_write,
.poll = pipe_poll,
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
index 7481b21a0489..2d851f596a72 100644
--- a/fs/smb/client/cifsencrypt.c
+++ b/fs/smb/client/cifsencrypt.c
@@ -416,7 +416,7 @@ find_timestamp(struct cifs_ses *ses)
}
static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
- const struct nls_table *nls_cp)
+ const struct nls_table *nls_cp, struct shash_desc *hmacmd5)
{
int rc = 0;
int len;
@@ -425,34 +425,26 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
wchar_t *domain;
wchar_t *server;
- if (!ses->server->secmech.hmacmd5) {
- cifs_dbg(VFS, "%s: can't generate ntlmv2 hash\n", __func__);
- return -1;
- }
-
/* calculate md4 hash of password */
E_md4hash(ses->password, nt_hash, nls_cp);
- rc = crypto_shash_setkey(ses->server->secmech.hmacmd5->tfm, nt_hash,
- CIFS_NTHASH_SIZE);
+ rc = crypto_shash_setkey(hmacmd5->tfm, nt_hash, CIFS_NTHASH_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: Could not set NT Hash as a key\n", __func__);
+ cifs_dbg(VFS, "%s: Could not set NT hash as a key, rc=%d\n", __func__, rc);
return rc;
}
- rc = crypto_shash_init(ses->server->secmech.hmacmd5);
+ rc = crypto_shash_init(hmacmd5);
if (rc) {
- cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
+ cifs_dbg(VFS, "%s: Could not init HMAC-MD5, rc=%d\n", __func__, rc);
return rc;
}
/* convert ses->user_name to unicode */
len = ses->user_name ? strlen(ses->user_name) : 0;
user = kmalloc(2 + (len * 2), GFP_KERNEL);
- if (user == NULL) {
- rc = -ENOMEM;
- return rc;
- }
+ if (user == NULL)
+ return -ENOMEM;
if (len) {
len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp);
@@ -461,11 +453,10 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
*(u16 *)user = 0;
}
- rc = crypto_shash_update(ses->server->secmech.hmacmd5,
- (char *)user, 2 * len);
+ rc = crypto_shash_update(hmacmd5, (char *)user, 2 * len);
kfree(user);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with user\n", __func__);
+ cifs_dbg(VFS, "%s: Could not update with user, rc=%d\n", __func__, rc);
return rc;
}
@@ -474,19 +465,15 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
len = strlen(ses->domainName);
domain = kmalloc(2 + (len * 2), GFP_KERNEL);
- if (domain == NULL) {
- rc = -ENOMEM;
- return rc;
- }
+ if (domain == NULL)
+ return -ENOMEM;
+
len = cifs_strtoUTF16((__le16 *)domain, ses->domainName, len,
nls_cp);
- rc =
- crypto_shash_update(ses->server->secmech.hmacmd5,
- (char *)domain, 2 * len);
+ rc = crypto_shash_update(hmacmd5, (char *)domain, 2 * len);
kfree(domain);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with domain\n",
- __func__);
+ cifs_dbg(VFS, "%s: Could not update with domain, rc=%d\n", __func__, rc);
return rc;
}
} else {
@@ -494,33 +481,27 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
len = strlen(ses->ip_addr);
server = kmalloc(2 + (len * 2), GFP_KERNEL);
- if (server == NULL) {
- rc = -ENOMEM;
- return rc;
- }
- len = cifs_strtoUTF16((__le16 *)server, ses->ip_addr, len,
- nls_cp);
- rc =
- crypto_shash_update(ses->server->secmech.hmacmd5,
- (char *)server, 2 * len);
+ if (server == NULL)
+ return -ENOMEM;
+
+ len = cifs_strtoUTF16((__le16 *)server, ses->ip_addr, len, nls_cp);
+ rc = crypto_shash_update(hmacmd5, (char *)server, 2 * len);
kfree(server);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with server\n",
- __func__);
+ cifs_dbg(VFS, "%s: Could not update with server, rc=%d\n", __func__, rc);
return rc;
}
}
- rc = crypto_shash_final(ses->server->secmech.hmacmd5,
- ntlmv2_hash);
+ rc = crypto_shash_final(hmacmd5, ntlmv2_hash);
if (rc)
- cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
+ cifs_dbg(VFS, "%s: Could not generate MD5 hash, rc=%d\n", __func__, rc);
return rc;
}
static int
-CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
+CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash, struct shash_desc *hmacmd5)
{
int rc;
struct ntlmv2_resp *ntlmv2 = (struct ntlmv2_resp *)
@@ -531,43 +512,33 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
hash_len = ses->auth_key.len - (CIFS_SESS_KEY_SIZE +
offsetof(struct ntlmv2_resp, challenge.key[0]));
- if (!ses->server->secmech.hmacmd5) {
- cifs_dbg(VFS, "%s: can't generate ntlmv2 hash\n", __func__);
- return -1;
- }
-
- rc = crypto_shash_setkey(ses->server->secmech.hmacmd5->tfm,
- ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
+ rc = crypto_shash_setkey(hmacmd5->tfm, ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
- __func__);
+ cifs_dbg(VFS, "%s: Could not set NTLMv2 hash as a key, rc=%d\n", __func__, rc);
return rc;
}
- rc = crypto_shash_init(ses->server->secmech.hmacmd5);
+ rc = crypto_shash_init(hmacmd5);
if (rc) {
- cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
+ cifs_dbg(VFS, "%s: Could not init HMAC-MD5, rc=%d\n", __func__, rc);
return rc;
}
if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED)
- memcpy(ntlmv2->challenge.key,
- ses->ntlmssp->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
+ memcpy(ntlmv2->challenge.key, ses->ntlmssp->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
else
- memcpy(ntlmv2->challenge.key,
- ses->server->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
- rc = crypto_shash_update(ses->server->secmech.hmacmd5,
- ntlmv2->challenge.key, hash_len);
+ memcpy(ntlmv2->challenge.key, ses->server->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
+
+ rc = crypto_shash_update(hmacmd5, ntlmv2->challenge.key, hash_len);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
+ cifs_dbg(VFS, "%s: Could not update with response, rc=%d\n", __func__, rc);
return rc;
}
/* Note that the MD5 digest over writes anon.challenge_key.key */
- rc = crypto_shash_final(ses->server->secmech.hmacmd5,
- ntlmv2->ntlmv2_hash);
+ rc = crypto_shash_final(hmacmd5, ntlmv2->ntlmv2_hash);
if (rc)
- cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
+ cifs_dbg(VFS, "%s: Could not generate MD5 hash, rc=%d\n", __func__, rc);
return rc;
}
@@ -575,6 +546,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
int
setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
{
+ struct shash_desc *hmacmd5 = NULL;
int rc;
int baselen;
unsigned int tilen;
@@ -640,55 +612,51 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
cifs_server_lock(ses->server);
- rc = cifs_alloc_hash("hmac(md5)", &ses->server->secmech.hmacmd5);
+ rc = cifs_alloc_hash("hmac(md5)", &hmacmd5);
if (rc) {
+ cifs_dbg(VFS, "Could not allocate HMAC-MD5, rc=%d\n", rc);
goto unlock;
}
/* calculate ntlmv2_hash */
- rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp);
+ rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp, hmacmd5);
if (rc) {
- cifs_dbg(VFS, "Could not get v2 hash rc %d\n", rc);
+ cifs_dbg(VFS, "Could not get NTLMv2 hash, rc=%d\n", rc);
goto unlock;
}
/* calculate first part of the client response (CR1) */
- rc = CalcNTLMv2_response(ses, ntlmv2_hash);
+ rc = CalcNTLMv2_response(ses, ntlmv2_hash, hmacmd5);
if (rc) {
- cifs_dbg(VFS, "Could not calculate CR1 rc: %d\n", rc);
+ cifs_dbg(VFS, "Could not calculate CR1, rc=%d\n", rc);
goto unlock;
}
/* now calculate the session key for NTLMv2 */
- rc = crypto_shash_setkey(ses->server->secmech.hmacmd5->tfm,
- ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
+ rc = crypto_shash_setkey(hmacmd5->tfm, ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
- __func__);
+ cifs_dbg(VFS, "%s: Could not set NTLMv2 hash as a key, rc=%d\n", __func__, rc);
goto unlock;
}
- rc = crypto_shash_init(ses->server->secmech.hmacmd5);
+ rc = crypto_shash_init(hmacmd5);
if (rc) {
- cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
+ cifs_dbg(VFS, "%s: Could not init HMAC-MD5, rc=%d\n", __func__, rc);
goto unlock;
}
- rc = crypto_shash_update(ses->server->secmech.hmacmd5,
- ntlmv2->ntlmv2_hash,
- CIFS_HMAC_MD5_HASH_SIZE);
+ rc = crypto_shash_update(hmacmd5, ntlmv2->ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
+ cifs_dbg(VFS, "%s: Could not update with response, rc=%d\n", __func__, rc);
goto unlock;
}
- rc = crypto_shash_final(ses->server->secmech.hmacmd5,
- ses->auth_key.response);
+ rc = crypto_shash_final(hmacmd5, ses->auth_key.response);
if (rc)
- cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
-
+ cifs_dbg(VFS, "%s: Could not generate MD5 hash, rc=%d\n", __func__, rc);
unlock:
cifs_server_unlock(ses->server);
+ cifs_free_hash(&hmacmd5);
setup_ntlmv2_rsp_ret:
kfree_sensitive(tiblob);
@@ -732,16 +700,19 @@ cifs_crypto_secmech_release(struct TCP_Server_Info *server)
cifs_free_hash(&server->secmech.aes_cmac);
cifs_free_hash(&server->secmech.hmacsha256);
cifs_free_hash(&server->secmech.md5);
- cifs_free_hash(&server->secmech.sha512);
- cifs_free_hash(&server->secmech.hmacmd5);
- if (server->secmech.enc) {
- crypto_free_aead(server->secmech.enc);
- server->secmech.enc = NULL;
- }
+ if (!SERVER_IS_CHAN(server)) {
+ if (server->secmech.enc) {
+ crypto_free_aead(server->secmech.enc);
+ server->secmech.enc = NULL;
+ }
- if (server->secmech.dec) {
- crypto_free_aead(server->secmech.dec);
+ if (server->secmech.dec) {
+ crypto_free_aead(server->secmech.dec);
+ server->secmech.dec = NULL;
+ }
+ } else {
+ server->secmech.enc = NULL;
server->secmech.dec = NULL;
}
}
diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
index 61ded59b858f..71b720dbb2ce 100644
--- a/fs/smb/client/cifsfs.h
+++ b/fs/smb/client/cifsfs.h
@@ -146,6 +146,6 @@ extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
/* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 50
-#define CIFS_VERSION "2.50"
+#define SMB3_PRODUCT_BUILD 51
+#define CIFS_VERSION "2.51"
#endif /* _CIFSFS_H */
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index a71a988a92f9..315aac5dec05 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -178,10 +178,8 @@ struct session_key {
/* crypto hashing related structure/fields, not specific to a sec mech */
struct cifs_secmech {
- struct shash_desc *hmacmd5; /* hmacmd5 hash function, for NTLMv2/CR1 hashes */
struct shash_desc *md5; /* md5 hash function, for CIFS/SMB1 signatures */
struct shash_desc *hmacsha256; /* hmac-sha256 hash function, for SMB2 signatures */
- struct shash_desc *sha512; /* sha512 hash function, for SMB3.1.1 preauth hash */
struct shash_desc *aes_cmac; /* block-cipher based MAC function, for SMB3 signatures */
struct crypto_aead *enc; /* smb3 encryption AEAD TFM (AES-CCM and AES-GCM) */
@@ -821,6 +819,7 @@ struct TCP_Server_Info {
* format: \\HOST\SHARE[\OPTIONAL PATH]
*/
char *leaf_fullpath;
+ bool dfs_conn:1;
};
static inline bool is_smb1(struct TCP_Server_Info *server)
@@ -1059,6 +1058,7 @@ struct cifs_ses {
struct list_head smb_ses_list;
struct list_head rlist; /* reconnect list */
struct list_head tcon_list;
+ struct list_head dlist; /* dfs list */
struct cifs_tcon *tcon_ipc;
spinlock_t ses_lock; /* protect anything here that is not protected */
struct mutex session_mutex;
@@ -1287,6 +1287,7 @@ struct cifs_tcon {
/* BB add field for back pointer to sb struct(s)? */
#ifdef CONFIG_CIFS_DFS_UPCALL
struct delayed_work dfs_cache_work;
+ struct list_head dfs_ses_list;
#endif
struct delayed_work query_interfaces; /* query interfaces workqueue job */
char *origin_fullpath; /* canonical copy of smb3_fs_context::source */
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index c69e3f48a60c..68c716e6261b 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -724,15 +724,9 @@ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
-/* Put references of @ses and its children */
static inline void cifs_put_smb_ses(struct cifs_ses *ses)
{
- struct cifs_ses *next;
-
- do {
- next = ses->dfs_root_ses;
- __cifs_put_smb_ses(ses);
- } while ((ses = next));
+ __cifs_put_smb_ses(ses);
}
/* Get an active reference of @ses and its children.
@@ -746,9 +740,7 @@ static inline void cifs_put_smb_ses(struct cifs_ses *ses)
static inline void cifs_smb_ses_inc_refcount(struct cifs_ses *ses)
{
lockdep_assert_held(&cifs_tcp_ses_lock);
-
- for (; ses; ses = ses->dfs_root_ses)
- ses->ses_count++;
+ ses->ses_count++;
}
static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 08a41c7aaf72..adf8758847f6 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -811,13 +811,9 @@ cifs_read_iter_from_socket(struct TCP_Server_Info *server, struct iov_iter *iter
unsigned int to_read)
{
struct msghdr smb_msg = { .msg_iter = *iter };
- int ret;
iov_iter_truncate(&smb_msg.msg_iter, to_read);
- ret = cifs_readv_from_socket(server, &smb_msg);
- if (ret > 0)
- iov_iter_advance(iter, ret);
- return ret;
+ return cifs_readv_from_socket(server, &smb_msg);
}
static bool
@@ -1530,6 +1526,9 @@ static int match_server(struct TCP_Server_Info *server,
if (server->nosharesock)
return 0;
+ if (!match_super && (ctx->dfs_conn || server->dfs_conn))
+ return 0;
+
/* If multidialect negotiation see if existing sessions match one */
if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) {
if (server->vals->protocol_id < SMB30_PROT_ID)
@@ -1723,6 +1722,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
if (ctx->nosharesock)
tcp_ses->nosharesock = true;
+ tcp_ses->dfs_conn = ctx->dfs_conn;
tcp_ses->ops = ctx->ops;
tcp_ses->vals = ctx->vals;
@@ -1873,13 +1873,15 @@ out_err:
}
/* this function must be called with ses_lock and chan_lock held */
-static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+static int match_session(struct cifs_ses *ses,
+ struct smb3_fs_context *ctx,
+ bool match_super)
{
if (ctx->sectype != Unspecified &&
ctx->sectype != ses->sectype)
return 0;
- if (ctx->dfs_root_ses != ses->dfs_root_ses)
+ if (!match_super && ctx->dfs_root_ses != ses->dfs_root_ses)
return 0;
/*
@@ -1998,7 +2000,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
continue;
}
spin_lock(&ses->chan_lock);
- if (match_session(ses, ctx)) {
+ if (match_session(ses, ctx, false)) {
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
ret = ses;
@@ -2058,8 +2060,7 @@ void __cifs_put_smb_ses(struct cifs_ses *ses)
if (do_logoff) {
xid = get_xid();
rc = server->ops->logoff(xid, ses);
- if (rc)
- cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
+ cifs_server_dbg(FYI, "%s: Session Logoff: rc=%d\n",
__func__, rc);
_free_xid(xid);
}
@@ -2382,8 +2383,6 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
* need to lock before changing something in the session.
*/
spin_lock(&cifs_tcp_ses_lock);
- if (ctx->dfs_root_ses)
- cifs_smb_ses_inc_refcount(ctx->dfs_root_ses);
ses->dfs_root_ses = ctx->dfs_root_ses;
list_add(&ses->smb_ses_list, &server->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
@@ -2458,6 +2457,7 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
{
unsigned int xid;
struct cifs_ses *ses;
+ LIST_HEAD(ses_list);
/*
* IPC tcon share the lifetime of their session and are
@@ -2482,6 +2482,9 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
list_del_init(&tcon->tcon_list);
tcon->status = TID_EXITING;
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ list_replace_init(&tcon->dfs_ses_list, &ses_list);
+#endif
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
@@ -2509,6 +2512,9 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
cifs_fscache_release_super_cookie(tcon);
tconInfoFree(tcon, netfs_trace_tcon_ref_free);
cifs_put_smb_ses(ses);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ dfs_put_root_smb_sessions(&ses_list);
+#endif
}
/**
@@ -2892,7 +2898,7 @@ cifs_match_super(struct super_block *sb, void *data)
spin_lock(&ses->chan_lock);
spin_lock(&tcon->tc_lock);
if (!match_server(tcp_srv, ctx, true) ||
- !match_session(ses, ctx) ||
+ !match_session(ses, ctx, true) ||
!match_tcon(tcon, ctx) ||
!match_prepath(sb, tcon, mnt_data)) {
rc = 0;
@@ -3623,13 +3629,12 @@ out:
int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
{
struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
- bool isdfs;
int rc;
- rc = dfs_mount_share(&mnt_ctx, &isdfs);
+ rc = dfs_mount_share(&mnt_ctx);
if (rc)
goto error;
- if (!isdfs)
+ if (!ctx->dfs_conn)
goto out;
/*
@@ -4034,7 +4039,7 @@ cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses)
}
static struct cifs_tcon *
-__cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
{
int rc;
struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
@@ -4132,17 +4137,6 @@ out:
return tcon;
}
-static struct cifs_tcon *
-cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
-{
- struct cifs_tcon *ret;
-
- cifs_mount_lock();
- ret = __cifs_construct_tcon(cifs_sb, fsuid);
- cifs_mount_unlock();
- return ret;
-}
-
struct cifs_tcon *
cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
{
@@ -4212,9 +4206,9 @@ tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
struct tcon_link *
cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
{
- int ret;
- kuid_t fsuid = current_fsuid();
struct tcon_link *tlink, *newtlink;
+ kuid_t fsuid = current_fsuid();
+ int err;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
@@ -4249,9 +4243,9 @@ cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
spin_unlock(&cifs_sb->tlink_tree_lock);
} else {
wait_for_construction:
- ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
+ err = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
TASK_INTERRUPTIBLE);
- if (ret) {
+ if (err) {
cifs_put_tlink(tlink);
return ERR_PTR(-ERESTARTSYS);
}
@@ -4262,8 +4256,9 @@ wait_for_construction:
/* return error if we tried this already recently */
if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
+ err = PTR_ERR(tlink->tl_tcon);
cifs_put_tlink(tlink);
- return ERR_PTR(-EACCES);
+ return ERR_PTR(err);
}
if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
@@ -4275,8 +4270,11 @@ wait_for_construction:
wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
if (IS_ERR(tlink->tl_tcon)) {
+ err = PTR_ERR(tlink->tl_tcon);
+ if (err == -ENOKEY)
+ err = -EACCES;
cifs_put_tlink(tlink);
- return ERR_PTR(-EACCES);
+ return ERR_PTR(err);
}
return tlink;
diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c
index 3ec965547e3d..3f6077c68d68 100644
--- a/fs/smb/client/dfs.c
+++ b/fs/smb/client/dfs.c
@@ -69,7 +69,7 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
* Get an active reference of @ses so that next call to cifs_put_tcon() won't
* release it as any new DFS referrals must go through its IPC tcon.
*/
-static void add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
+static void set_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct cifs_ses *ses = mnt_ctx->ses;
@@ -95,7 +95,7 @@ static inline int parse_dfs_target(struct smb3_fs_context *ctx,
return rc;
}
-static int set_ref_paths(struct cifs_mount_ctx *mnt_ctx,
+static int setup_dfs_ref(struct cifs_mount_ctx *mnt_ctx,
struct dfs_info3_param *tgt,
struct dfs_ref_walk *rw)
{
@@ -120,6 +120,7 @@ static int set_ref_paths(struct cifs_mount_ctx *mnt_ctx,
}
ref_walk_path(rw) = ref_path;
ref_walk_fpath(rw) = full_path;
+ ref_walk_ses(rw) = ctx->dfs_root_ses;
return 0;
}
@@ -128,11 +129,11 @@ static int __dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx,
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct dfs_info3_param tgt = {};
- bool is_refsrv;
int rc = -ENOENT;
again:
do {
+ ctx->dfs_root_ses = ref_walk_ses(rw);
if (ref_walk_empty(rw)) {
rc = dfs_get_referral(mnt_ctx, ref_walk_path(rw) + 1,
NULL, ref_walk_tl(rw));
@@ -158,10 +159,7 @@ again:
if (rc)
continue;
- is_refsrv = tgt.server_type == DFS_TYPE_ROOT ||
- DFS_INTERLINK(tgt.flags);
ref_walk_set_tgt_hint(rw);
-
if (tgt.flags & DFSREF_STORAGE_SERVER) {
rc = cifs_mount_get_tcon(mnt_ctx);
if (!rc)
@@ -172,12 +170,10 @@ again:
continue;
}
- if (is_refsrv)
- add_root_smb_session(mnt_ctx);
-
+ set_root_smb_session(mnt_ctx);
rc = ref_walk_advance(rw);
if (!rc) {
- rc = set_ref_paths(mnt_ctx, &tgt, rw);
+ rc = setup_dfs_ref(mnt_ctx, &tgt, rw);
if (!rc) {
rc = -EREMOTE;
goto again;
@@ -193,20 +189,22 @@ out:
return rc;
}
-static int dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx)
+static int dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx,
+ struct dfs_ref_walk **rw)
{
- struct dfs_ref_walk *rw;
int rc;
- rw = ref_walk_alloc();
- if (IS_ERR(rw))
- return PTR_ERR(rw);
+ *rw = ref_walk_alloc();
+ if (IS_ERR(*rw)) {
+ rc = PTR_ERR(*rw);
+ *rw = NULL;
+ return rc;
+ }
- ref_walk_init(rw);
- rc = set_ref_paths(mnt_ctx, NULL, rw);
+ ref_walk_init(*rw);
+ rc = setup_dfs_ref(mnt_ctx, NULL, *rw);
if (!rc)
- rc = __dfs_referral_walk(mnt_ctx, rw);
- ref_walk_free(rw);
+ rc = __dfs_referral_walk(mnt_ctx, *rw);
return rc;
}
@@ -214,16 +212,16 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
{
struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct dfs_ref_walk *rw = NULL;
struct cifs_tcon *tcon;
char *origin_fullpath;
- bool new_tcon = true;
int rc;
origin_fullpath = dfs_get_path(cifs_sb, ctx->source);
if (IS_ERR(origin_fullpath))
return PTR_ERR(origin_fullpath);
- rc = dfs_referral_walk(mnt_ctx);
+ rc = dfs_referral_walk(mnt_ctx, &rw);
if (!rc) {
/*
* Prevent superblock from being created with any missing
@@ -241,21 +239,16 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
tcon = mnt_ctx->tcon;
spin_lock(&tcon->tc_lock);
- if (!tcon->origin_fullpath) {
- tcon->origin_fullpath = origin_fullpath;
- origin_fullpath = NULL;
- } else {
- new_tcon = false;
- }
+ tcon->origin_fullpath = origin_fullpath;
+ origin_fullpath = NULL;
+ ref_walk_set_tcon(rw, tcon);
spin_unlock(&tcon->tc_lock);
-
- if (new_tcon) {
- queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
- dfs_cache_get_ttl() * HZ);
- }
+ queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
+ dfs_cache_get_ttl() * HZ);
out:
kfree(origin_fullpath);
+ ref_walk_free(rw);
return rc;
}
@@ -279,7 +272,7 @@ static int update_fs_context_dstaddr(struct smb3_fs_context *ctx)
return rc;
}
-int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
+int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
bool nodfs = ctx->nodfs;
@@ -289,7 +282,6 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
if (rc)
return rc;
- *isdfs = false;
rc = get_session(mnt_ctx, NULL);
if (rc)
return rc;
@@ -317,10 +309,15 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
return rc;
}
- *isdfs = true;
- add_root_smb_session(mnt_ctx);
- rc = __dfs_mount_share(mnt_ctx);
- dfs_put_root_smb_sessions(mnt_ctx);
+ if (!ctx->dfs_conn) {
+ ctx->dfs_conn = true;
+ cifs_mount_put_conns(mnt_ctx);
+ rc = get_session(mnt_ctx, NULL);
+ }
+ if (!rc) {
+ set_root_smb_session(mnt_ctx);
+ rc = __dfs_mount_share(mnt_ctx);
+ }
return rc;
}
diff --git a/fs/smb/client/dfs.h b/fs/smb/client/dfs.h
index e5c4dcf83750..1aa2bc65b3bc 100644
--- a/fs/smb/client/dfs.h
+++ b/fs/smb/client/dfs.h
@@ -19,6 +19,7 @@
struct dfs_ref {
char *path;
char *full_path;
+ struct cifs_ses *ses;
struct dfs_cache_tgt_list tl;
struct dfs_cache_tgt_iterator *tit;
};
@@ -38,6 +39,7 @@ struct dfs_ref_walk {
#define ref_walk_path(w) (ref_walk_cur(w)->path)
#define ref_walk_fpath(w) (ref_walk_cur(w)->full_path)
#define ref_walk_tl(w) (&ref_walk_cur(w)->tl)
+#define ref_walk_ses(w) (ref_walk_cur(w)->ses)
static inline struct dfs_ref_walk *ref_walk_alloc(void)
{
@@ -60,14 +62,19 @@ static inline void __ref_walk_free(struct dfs_ref *ref)
kfree(ref->path);
kfree(ref->full_path);
dfs_cache_free_tgts(&ref->tl);
+ if (ref->ses)
+ cifs_put_smb_ses(ref->ses);
memset(ref, 0, sizeof(*ref));
}
static inline void ref_walk_free(struct dfs_ref_walk *rw)
{
- struct dfs_ref *ref = ref_walk_start(rw);
+ struct dfs_ref *ref;
- for (; ref <= ref_walk_end(rw); ref++)
+ if (!rw)
+ return;
+
+ for (ref = ref_walk_start(rw); ref <= ref_walk_end(rw); ref++)
__ref_walk_free(ref);
kfree(rw);
}
@@ -116,9 +123,22 @@ static inline void ref_walk_set_tgt_hint(struct dfs_ref_walk *rw)
ref_walk_tit(rw));
}
+static inline void ref_walk_set_tcon(struct dfs_ref_walk *rw,
+ struct cifs_tcon *tcon)
+{
+ struct dfs_ref *ref = ref_walk_start(rw);
+
+ for (; ref <= ref_walk_cur(rw); ref++) {
+ if (WARN_ON_ONCE(!ref->ses))
+ continue;
+ list_add(&ref->ses->dlist, &tcon->dfs_ses_list);
+ ref->ses = NULL;
+ }
+}
+
int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref,
struct smb3_fs_context *ctx);
-int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs);
+int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx);
static inline char *dfs_get_path(struct cifs_sb_info *cifs_sb, const char *path)
{
@@ -142,20 +162,14 @@ static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *p
* references of all DFS root sessions that were used across the mount process
* in dfs_mount_share().
*/
-static inline void dfs_put_root_smb_sessions(struct cifs_mount_ctx *mnt_ctx)
+static inline void dfs_put_root_smb_sessions(struct list_head *head)
{
- const struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
- struct cifs_ses *ses = ctx->dfs_root_ses;
- struct cifs_ses *cur;
-
- if (!ses)
- return;
+ struct cifs_ses *ses, *n;
- for (cur = ses; cur; cur = cur->dfs_root_ses) {
- if (cur->dfs_root_ses)
- cifs_put_smb_ses(cur->dfs_root_ses);
+ list_for_each_entry_safe(ses, n, head, dlist) {
+ list_del_init(&ses->dlist);
+ cifs_put_smb_ses(ses);
}
- cifs_put_smb_ses(ses);
}
#endif /* _CIFS_DFS_H */
diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c
index 11c8efecf7aa..110f03df012a 100644
--- a/fs/smb/client/dfs_cache.c
+++ b/fs/smb/client/dfs_cache.c
@@ -126,6 +126,7 @@ static inline void free_tgts(struct cache_entry *ce)
static inline void flush_cache_ent(struct cache_entry *ce)
{
+ cifs_dbg(FYI, "%s: %s\n", __func__, ce->path);
hlist_del_init(&ce->hlist);
kfree(ce->path);
free_tgts(ce);
@@ -441,34 +442,31 @@ static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int n
return ce;
}
-static void remove_oldest_entry_locked(void)
+/* Remove all referrals that have a single target or oldest entry */
+static void purge_cache(void)
{
int i;
struct cache_entry *ce;
- struct cache_entry *to_del = NULL;
-
- WARN_ON(!rwsem_is_locked(&htable_rw_lock));
+ struct cache_entry *oldest = NULL;
for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
struct hlist_head *l = &cache_htable[i];
+ struct hlist_node *n;
- hlist_for_each_entry(ce, l, hlist) {
+ hlist_for_each_entry_safe(ce, n, l, hlist) {
if (hlist_unhashed(&ce->hlist))
continue;
- if (!to_del || timespec64_compare(&ce->etime,
- &to_del->etime) < 0)
- to_del = ce;
+ if (ce->numtgts == 1)
+ flush_cache_ent(ce);
+ else if (!oldest ||
+ timespec64_compare(&ce->etime,
+ &oldest->etime) < 0)
+ oldest = ce;
}
}
- if (!to_del) {
- cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
- return;
- }
-
- cifs_dbg(FYI, "%s: removing entry\n", __func__);
- dump_ce(to_del);
- flush_cache_ent(to_del);
+ if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES && oldest)
+ flush_cache_ent(oldest);
}
/* Add a new DFS cache entry */
@@ -484,7 +482,7 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
- remove_oldest_entry_locked();
+ purge_cache();
}
rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
@@ -1095,16 +1093,18 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
return 0;
}
-static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
+static bool target_share_equal(struct cifs_tcon *tcon, const char *s1)
{
- char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
+ struct TCP_Server_Info *server = tcon->ses->server;
+ struct sockaddr_storage ss;
const char *host;
+ const char *s2 = &tcon->tree_name[1];
size_t hostlen;
- struct sockaddr_storage ss;
+ char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
bool match;
int rc;
- if (strcasecmp(s1, s2))
+ if (strcasecmp(s2, s1))
return false;
/*
@@ -1128,34 +1128,6 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
return match;
}
-/*
- * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
- * target shares in @refs.
- */
-static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
- const char *path,
- struct dfs_cache_tgt_list *old_tl,
- struct dfs_cache_tgt_list *new_tl)
-{
- struct dfs_cache_tgt_iterator *oit, *nit;
-
- for (oit = dfs_cache_get_tgt_iterator(old_tl); oit;
- oit = dfs_cache_get_next_tgt(old_tl, oit)) {
- for (nit = dfs_cache_get_tgt_iterator(new_tl); nit;
- nit = dfs_cache_get_next_tgt(new_tl, nit)) {
- if (target_share_equal(server,
- dfs_cache_get_tgt_name(oit),
- dfs_cache_get_tgt_name(nit))) {
- dfs_cache_noreq_update_tgthint(path, nit);
- return;
- }
- }
- }
-
- cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
- cifs_signal_cifsd_for_reconnect(server, true);
-}
-
static bool is_ses_good(struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
@@ -1172,41 +1144,35 @@ static bool is_ses_good(struct cifs_ses *ses)
return ret;
}
-/* Refresh dfs referral of @ses and mark it for reconnect if needed */
-static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh)
+static char *get_ses_refpath(struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
- DFS_CACHE_TGT_LIST(old_tl);
- DFS_CACHE_TGT_LIST(new_tl);
- bool needs_refresh = false;
- struct cache_entry *ce;
- unsigned int xid;
- char *path = NULL;
- int rc = 0;
-
- xid = get_xid();
+ char *path = ERR_PTR(-ENOENT);
mutex_lock(&server->refpath_lock);
if (server->leaf_fullpath) {
path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC);
if (!path)
- rc = -ENOMEM;
+ path = ERR_PTR(-ENOMEM);
}
mutex_unlock(&server->refpath_lock);
- if (!path)
- goto out;
+ return path;
+}
- down_read(&htable_rw_lock);
- ce = lookup_cache_entry(path);
- needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
- if (!IS_ERR(ce)) {
- rc = get_targets(ce, &old_tl);
- cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
- }
- up_read(&htable_rw_lock);
+/* Refresh dfs referral of @ses */
+static void refresh_ses_referral(struct cifs_ses *ses)
+{
+ struct cache_entry *ce;
+ unsigned int xid;
+ char *path;
+ int rc = 0;
- if (!needs_refresh) {
- rc = 0;
+ xid = get_xid();
+
+ path = get_ses_refpath(ses);
+ if (IS_ERR(path)) {
+ rc = PTR_ERR(path);
+ path = NULL;
goto out;
}
@@ -1217,29 +1183,106 @@ static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh)
goto out;
}
- ce = cache_refresh_path(xid, ses, path, true);
- if (!IS_ERR(ce)) {
- rc = get_targets(ce, &new_tl);
+ ce = cache_refresh_path(xid, ses, path, false);
+ if (!IS_ERR(ce))
up_read(&htable_rw_lock);
- cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
- mark_for_reconnect_if_needed(server, path, &old_tl, &new_tl);
- }
+ else
+ rc = PTR_ERR(ce);
out:
free_xid(xid);
- dfs_cache_free_tgts(&old_tl);
- dfs_cache_free_tgts(&new_tl);
kfree(path);
}
-static inline void refresh_ses_referral(struct cifs_ses *ses)
+static int __refresh_tcon_referral(struct cifs_tcon *tcon,
+ const char *path,
+ struct dfs_info3_param *refs,
+ int numrefs, bool force_refresh)
{
- __refresh_ses_referral(ses, false);
+ struct cache_entry *ce;
+ bool reconnect = force_refresh;
+ int rc = 0;
+ int i;
+
+ if (unlikely(!numrefs))
+ return 0;
+
+ if (force_refresh) {
+ for (i = 0; i < numrefs; i++) {
+ /* TODO: include prefix paths in the matching */
+ if (target_share_equal(tcon, refs[i].node_name)) {
+ reconnect = false;
+ break;
+ }
+ }
+ }
+
+ down_write(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ if (!IS_ERR(ce)) {
+ if (force_refresh || cache_entry_expired(ce))
+ rc = update_cache_entry_locked(ce, refs, numrefs);
+ } else if (PTR_ERR(ce) == -ENOENT) {
+ ce = add_cache_entry_locked(refs, numrefs);
+ }
+ up_write(&htable_rw_lock);
+
+ if (IS_ERR(ce))
+ rc = PTR_ERR(ce);
+ if (reconnect) {
+ cifs_tcon_dbg(FYI, "%s: mark for reconnect\n", __func__);
+ cifs_signal_cifsd_for_reconnect(tcon->ses->server, true);
+ }
+ return rc;
}
-static inline void force_refresh_ses_referral(struct cifs_ses *ses)
+static void refresh_tcon_referral(struct cifs_tcon *tcon, bool force_refresh)
{
- __refresh_ses_referral(ses, true);
+ struct dfs_info3_param *refs = NULL;
+ struct cache_entry *ce;
+ struct cifs_ses *ses;
+ unsigned int xid;
+ bool needs_refresh;
+ char *path;
+ int numrefs = 0;
+ int rc = 0;
+
+ xid = get_xid();
+ ses = tcon->ses;
+
+ path = get_ses_refpath(ses);
+ if (IS_ERR(path)) {
+ rc = PTR_ERR(path);
+ path = NULL;
+ goto out;
+ }
+
+ down_read(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
+ if (!needs_refresh) {
+ up_read(&htable_rw_lock);
+ goto out;
+ }
+ up_read(&htable_rw_lock);
+
+ ses = CIFS_DFS_ROOT_SES(ses);
+ if (!is_ses_good(ses)) {
+ cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
+ __func__);
+ goto out;
+ }
+
+ rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
+ if (!rc) {
+ rc = __refresh_tcon_referral(tcon, path, refs,
+ numrefs, force_refresh);
+ }
+
+out:
+ free_xid(xid);
+ kfree(path);
+ free_dfs_info_array(refs, numrefs);
}
/**
@@ -1280,7 +1323,7 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
*/
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
- force_refresh_ses_referral(tcon->ses);
+ refresh_tcon_referral(tcon, true);
return 0;
}
@@ -1292,8 +1335,9 @@ void dfs_cache_refresh(struct work_struct *work)
tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
- for (ses = tcon->ses; ses; ses = ses->dfs_root_ses)
+ list_for_each_entry(ses, &tcon->dfs_ses_list, dlist)
refresh_ses_referral(ses);
+ refresh_tcon_referral(tcon, false);
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
atomic_read(&dfs_cache_ttl) * HZ);
diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
index cf577ec0dd0a..69f9d938b336 100644
--- a/fs/smb/client/fs_context.h
+++ b/fs/smb/client/fs_context.h
@@ -284,6 +284,7 @@ struct smb3_fs_context {
struct cifs_ses *dfs_root_ses;
bool dfs_automount:1; /* set for dfs automount only */
enum cifs_reparse_type reparse_type;
+ bool dfs_conn:1; /* set for dfs mounts */
};
extern const struct fs_parameter_spec smb3_fs_parameters[];
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 331a86074ae7..647f9bedd9fc 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -834,10 +834,6 @@ static void cifs_open_info_to_fattr(struct cifs_fattr *fattr,
fattr->cf_mode = S_IFREG | cifs_sb->ctx->file_mode;
fattr->cf_dtype = DT_REG;
- /* clear write bits if ATTR_READONLY is set */
- if (fattr->cf_cifsattrs & ATTR_READONLY)
- fattr->cf_mode &= ~(S_IWUGO);
-
/*
* Don't accept zero nlink from non-unix servers unless
* delete is pending. Instead mark it as unknown.
@@ -850,6 +846,10 @@ static void cifs_open_info_to_fattr(struct cifs_fattr *fattr,
}
}
+ /* clear write bits if ATTR_READONLY is set */
+ if (fattr->cf_cifsattrs & ATTR_READONLY)
+ fattr->cf_mode &= ~(S_IWUGO);
+
out_reparse:
if (S_ISLNK(fattr->cf_mode)) {
if (likely(data->symlink_target))
@@ -1267,11 +1267,14 @@ handle_mnt_opt:
__func__, rc);
goto out;
}
- }
-
- /* fill in remaining high mode bits e.g. SUID, VTX */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
+ } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
+ /* fill in remaining high mode bits e.g. SUID, VTX */
cifs_sfu_mode(fattr, full_path, cifs_sb, xid);
+ else if (!(tcon->posix_extensions))
+ /* clear write bits if ATTR_READONLY is set */
+ if (fattr->cf_cifsattrs & ATTR_READONLY)
+ fattr->cf_mode &= ~(S_IWUGO);
+
/* check for Minshall+French symlinks */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index dab526191b07..054f10ebf65a 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -145,6 +145,9 @@ tcon_info_alloc(bool dir_leases_enabled, enum smb3_tcon_ref_trace trace)
mutex_init(&ret_buf->fscache_lock);
#endif
trace_smb3_tcon_ref(ret_buf->debug_id, ret_buf->tc_count, trace);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
+#endif
return ret_buf;
}
@@ -1108,7 +1111,8 @@ static void tcon_super_cb(struct super_block *sb, void *arg)
t2 = cifs_sb_master_tcon(cifs_sb);
spin_lock(&t2->tc_lock);
- if (t1->ses == t2->ses &&
+ if ((t1->ses == t2->ses ||
+ t1->ses->dfs_root_ses == t2->ses->dfs_root_ses) &&
t1->ses->server == t2->ses->server &&
t2->origin_fullpath &&
dfs_src_pathname_equal(t2->origin_fullpath, t1->origin_fullpath))
diff --git a/fs/smb/client/namespace.c b/fs/smb/client/namespace.c
index 4a517b280f2b..0f788031b740 100644
--- a/fs/smb/client/namespace.c
+++ b/fs/smb/client/namespace.c
@@ -240,7 +240,7 @@ static struct vfsmount *cifs_do_automount(struct path *path)
ctx->source = NULL;
goto out;
}
- ctx->dfs_automount = is_dfs_mount(mntpt);
+ ctx->dfs_automount = ctx->dfs_conn = is_dfs_mount(mntpt);
cifs_dbg(FYI, "%s: ctx: source=%s UNC=%s prepath=%s dfs_automount=%d\n",
__func__, ctx->source, ctx->UNC, ctx->prepath, ctx->dfs_automount);
diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
index 48c27581ec51..3b48a093cfb1 100644
--- a/fs/smb/client/reparse.c
+++ b/fs/smb/client/reparse.c
@@ -108,8 +108,8 @@ static int nfs_set_reparse_buf(struct reparse_posix_data *buf,
buf->InodeType = cpu_to_le64(type);
buf->ReparseDataLength = cpu_to_le16(len + dlen -
sizeof(struct reparse_data_buffer));
- *(__le64 *)buf->DataBuffer = cpu_to_le64(((u64)MAJOR(dev) << 32) |
- MINOR(dev));
+ *(__le64 *)buf->DataBuffer = cpu_to_le64(((u64)MINOR(dev) << 32) |
+ MAJOR(dev));
iov->iov_base = buf;
iov->iov_len = len + dlen;
return 0;
@@ -468,7 +468,7 @@ static void wsl_to_fattr(struct cifs_open_info_data *data,
else if (!strncmp(name, SMB2_WSL_XATTR_MODE, nlen))
fattr->cf_mode = (umode_t)le32_to_cpu(*(__le32 *)v);
else if (!strncmp(name, SMB2_WSL_XATTR_DEV, nlen))
- fattr->cf_rdev = wsl_mkdev(v);
+ fattr->cf_rdev = reparse_mkdev(v);
} while (next);
out:
fattr->cf_dtype = S_DT(fattr->cf_mode);
@@ -485,11 +485,11 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
switch (le64_to_cpu(buf->InodeType)) {
case NFS_SPECFILE_CHR:
fattr->cf_mode |= S_IFCHR;
- fattr->cf_rdev = reparse_nfs_mkdev(buf);
+ fattr->cf_rdev = reparse_mkdev(buf->DataBuffer);
break;
case NFS_SPECFILE_BLK:
fattr->cf_mode |= S_IFBLK;
- fattr->cf_rdev = reparse_nfs_mkdev(buf);
+ fattr->cf_rdev = reparse_mkdev(buf->DataBuffer);
break;
case NFS_SPECFILE_FIFO:
fattr->cf_mode |= S_IFIFO;
diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h
index 2c0644bc4e65..158e7b7aae64 100644
--- a/fs/smb/client/reparse.h
+++ b/fs/smb/client/reparse.h
@@ -18,14 +18,7 @@
*/
#define IO_REPARSE_TAG_INTERNAL ((__u32)~0U)
-static inline dev_t reparse_nfs_mkdev(struct reparse_posix_data *buf)
-{
- u64 v = le64_to_cpu(*(__le64 *)buf->DataBuffer);
-
- return MKDEV(v >> 32, v & 0xffffffff);
-}
-
-static inline dev_t wsl_mkdev(void *ptr)
+static inline dev_t reparse_mkdev(void *ptr)
{
u64 v = le64_to_cpu(*(__le64 *)ptr);
diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
index 3216f786908f..03c0b484a4b5 100644
--- a/fs/smb/client/sess.c
+++ b/fs/smb/client/sess.c
@@ -624,7 +624,7 @@ cifs_ses_add_channel(struct cifs_ses *ses,
* to sign packets before we generate the channel signing key
* (we sign with the session key)
*/
- rc = smb311_crypto_shash_allocate(chan->server);
+ rc = smb3_crypto_shash_allocate(chan->server);
if (rc) {
cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__);
mutex_unlock(&ses->session_mutex);
diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
index f3c4b70b77b9..bdeb12ff53e3 100644
--- a/fs/smb/client/smb2misc.c
+++ b/fs/smb/client/smb2misc.c
@@ -906,41 +906,41 @@ smb311_update_preauth_hash(struct cifs_ses *ses, struct TCP_Server_Info *server,
|| (hdr->Status !=
cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))))
return 0;
-
ok:
- rc = smb311_crypto_shash_allocate(server);
- if (rc)
+ rc = cifs_alloc_hash("sha512", &sha512);
+ if (rc) {
+ cifs_dbg(VFS, "%s: Could not allocate SHA512 shash, rc=%d\n", __func__, rc);
return rc;
+ }
- sha512 = server->secmech.sha512;
rc = crypto_shash_init(sha512);
if (rc) {
- cifs_dbg(VFS, "%s: Could not init sha512 shash\n", __func__);
- return rc;
+ cifs_dbg(VFS, "%s: Could not init SHA512 shash, rc=%d\n", __func__, rc);
+ goto err_free;
}
rc = crypto_shash_update(sha512, ses->preauth_sha_hash,
SMB2_PREAUTH_HASH_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update sha512 shash\n", __func__);
- return rc;
+ cifs_dbg(VFS, "%s: Could not update SHA512 shash, rc=%d\n", __func__, rc);
+ goto err_free;
}
for (i = 0; i < nvec; i++) {
rc = crypto_shash_update(sha512, iov[i].iov_base, iov[i].iov_len);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update sha512 shash\n",
- __func__);
- return rc;
+ cifs_dbg(VFS, "%s: Could not update SHA512 shash, rc=%d\n", __func__, rc);
+ goto err_free;
}
}
rc = crypto_shash_final(sha512, ses->preauth_sha_hash);
if (rc) {
- cifs_dbg(VFS, "%s: Could not finalize sha512 shash\n",
- __func__);
- return rc;
+ cifs_dbg(VFS, "%s: Could not finalize SHA12 shash, rc=%d\n", __func__, rc);
+ goto err_free;
}
+err_free:
+ cifs_free_hash(&sha512);
return 0;
}
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 7381ec333c6d..177173072bfa 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -4309,7 +4309,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
*/
static int
crypt_message(struct TCP_Server_Info *server, int num_rqst,
- struct smb_rqst *rqst, int enc)
+ struct smb_rqst *rqst, int enc, struct crypto_aead *tfm)
{
struct smb2_transform_hdr *tr_hdr =
(struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
@@ -4320,8 +4320,6 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
u8 key[SMB3_ENC_DEC_KEY_SIZE];
struct aead_request *req;
u8 *iv;
- DECLARE_CRYPTO_WAIT(wait);
- struct crypto_aead *tfm;
unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
void *creq;
size_t sensitive_size;
@@ -4333,14 +4331,6 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
return rc;
}
- rc = smb3_crypto_aead_allocate(server);
- if (rc) {
- cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
- return rc;
- }
-
- tfm = enc ? server->secmech.enc : server->secmech.dec;
-
if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
(server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
@@ -4380,11 +4370,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
aead_request_set_crypt(req, sg, sg, crypt_len, iv);
aead_request_set_ad(req, assoc_data_len);
- aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &wait);
-
- rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
- : crypto_aead_decrypt(req), &wait);
+ rc = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
if (!rc && enc)
memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
@@ -4526,7 +4512,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
/* fill the 1st iov with a transform header */
fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
- rc = crypt_message(server, num_rqst, new_rq, 1);
+ rc = crypt_message(server, num_rqst, new_rq, 1, server->secmech.enc);
cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
if (rc)
goto err_free;
@@ -4551,8 +4537,9 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
unsigned int buf_data_size, struct iov_iter *iter,
bool is_offloaded)
{
- struct kvec iov[2];
+ struct crypto_aead *tfm;
struct smb_rqst rqst = {NULL};
+ struct kvec iov[2];
size_t iter_size = 0;
int rc;
@@ -4568,9 +4555,31 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
iter_size = iov_iter_count(iter);
}
- rc = crypt_message(server, 1, &rqst, 0);
+ if (is_offloaded) {
+ if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
+ (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
+ tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
+ else
+ tfm = crypto_alloc_aead("ccm(aes)", 0, 0);
+ if (IS_ERR(tfm)) {
+ rc = PTR_ERR(tfm);
+ cifs_server_dbg(VFS, "%s: Failed alloc decrypt TFM, rc=%d\n", __func__, rc);
+
+ return rc;
+ }
+ } else {
+ if (unlikely(!server->secmech.dec))
+ return -EIO;
+
+ tfm = server->secmech.dec;
+ }
+
+ rc = crypt_message(server, 1, &rqst, 0, tfm);
cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
+ if (is_offloaded)
+ crypto_free_aead(tfm);
+
if (rc)
return rc;
@@ -4869,9 +4878,12 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
goto discard_data;
server->total_read += rc;
- if (rc < len)
- iov_iter_zero(len - rc, &iter);
- iov_iter_revert(&iter, len);
+ if (rc < len) {
+ struct iov_iter tmp = iter;
+
+ iov_iter_advance(&tmp, rc);
+ iov_iter_zero(len - rc, &tmp);
+ }
iov_iter_truncate(&iter, dw->len);
rc = cifs_discard_remaining_data(server);
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 2cb1bf65a172..02828b9c3cb3 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -1266,6 +1266,16 @@ SMB2_negotiate(const unsigned int xid,
else
cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
}
+
+ if (server->cipher_type && !rc) {
+ if (!SERVER_IS_CHAN(server)) {
+ rc = smb3_crypto_aead_allocate(server);
+ } else {
+ /* For channels, just reuse the primary server crypto secmech. */
+ server->secmech.enc = server->primary_server->secmech.enc;
+ server->secmech.dec = server->primary_server->secmech.dec;
+ }
+ }
neg_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
@@ -4866,7 +4876,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
#endif
if (result) {
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
- trace_smb3_write_err(wdata->xid,
+ trace_smb3_write_err(wdata->rreq->debug_id,
+ wdata->subreq.debug_index,
+ wdata->xid,
wdata->req->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid, wdata->subreq.start,
wdata->subreq.len, wdata->result);
@@ -4874,7 +4886,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
pr_warn_once("Out of space writing to %s\n",
tcon->tree_name);
} else
- trace_smb3_write_done(0 /* no xid */,
+ trace_smb3_write_done(wdata->rreq->debug_id,
+ wdata->subreq.debug_index,
+ wdata->xid,
wdata->req->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid,
wdata->subreq.start, wdata->subreq.len);
@@ -4952,7 +4966,9 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
offsetof(struct smb2_write_req, Buffer));
req->RemainingBytes = 0;
- trace_smb3_write_enter(wdata->xid,
+ trace_smb3_write_enter(wdata->rreq->debug_id,
+ wdata->subreq.debug_index,
+ wdata->xid,
io_parms->persistent_fid,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
@@ -5032,7 +5048,9 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
wdata, flags, &wdata->credits);
/* Can't touch wdata if rc == 0 */
if (rc) {
- trace_smb3_write_err(xid,
+ trace_smb3_write_err(wdata->rreq->debug_id,
+ wdata->subreq.debug_index,
+ xid,
io_parms->persistent_fid,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
@@ -5112,7 +5130,7 @@ replay_again:
offsetof(struct smb2_write_req, Buffer));
req->RemainingBytes = 0;
- trace_smb3_write_enter(xid, io_parms->persistent_fid,
+ trace_smb3_write_enter(0, 0, xid, io_parms->persistent_fid,
io_parms->tcon->tid, io_parms->tcon->ses->Suid,
io_parms->offset, io_parms->length);
@@ -5133,7 +5151,7 @@ replay_again:
rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
if (rc) {
- trace_smb3_write_err(xid,
+ trace_smb3_write_err(0, 0, xid,
req->PersistentFileId,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
@@ -5142,7 +5160,7 @@ replay_again:
cifs_dbg(VFS, "Send error in write = %d\n", rc);
} else {
*nbytes = le32_to_cpu(rsp->DataLength);
- trace_smb3_write_done(xid,
+ trace_smb3_write_done(0, 0, xid,
req->PersistentFileId,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
index c7e1b149877a..56a896ff7cd9 100644
--- a/fs/smb/client/smb2proto.h
+++ b/fs/smb/client/smb2proto.h
@@ -291,7 +291,7 @@ extern int smb2_validate_and_copy_iov(unsigned int offset,
extern void smb2_copy_fs_info_to_kstatfs(
struct smb2_fs_full_size_info *pfs_inf,
struct kstatfs *kst);
-extern int smb311_crypto_shash_allocate(struct TCP_Server_Info *server);
+extern int smb3_crypto_shash_allocate(struct TCP_Server_Info *server);
extern int smb311_update_preauth_hash(struct cifs_ses *ses,
struct TCP_Server_Info *server,
struct kvec *iov, int nvec);
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
index e4636fca821d..f7e04c40d22e 100644
--- a/fs/smb/client/smb2transport.c
+++ b/fs/smb/client/smb2transport.c
@@ -26,8 +26,7 @@
#include "../common/smb2status.h"
#include "smb2glob.h"
-static int
-smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
+int smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
{
struct cifs_secmech *p = &server->secmech;
int rc;
@@ -46,33 +45,6 @@ err:
return rc;
}
-int
-smb311_crypto_shash_allocate(struct TCP_Server_Info *server)
-{
- struct cifs_secmech *p = &server->secmech;
- int rc = 0;
-
- rc = cifs_alloc_hash("hmac(sha256)", &p->hmacsha256);
- if (rc)
- return rc;
-
- rc = cifs_alloc_hash("cmac(aes)", &p->aes_cmac);
- if (rc)
- goto err;
-
- rc = cifs_alloc_hash("sha512", &p->sha512);
- if (rc)
- goto err;
-
- return 0;
-
-err:
- cifs_free_hash(&p->aes_cmac);
- cifs_free_hash(&p->hmacsha256);
- return rc;
-}
-
-
static
int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
{
@@ -242,7 +214,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
ses = smb2_find_smb_ses(server, le64_to_cpu(shdr->SessionId));
if (unlikely(!ses)) {
- cifs_server_dbg(VFS, "%s: Could not find session\n", __func__);
+ cifs_server_dbg(FYI, "%s: Could not find session\n", __func__);
return -ENOENT;
}
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index 8e9964001e2a..0b52d22a91a0 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -157,6 +157,7 @@ DEFINE_EVENT(smb3_rw_err_class, smb3_##name, \
TP_ARGS(rreq_debug_id, rreq_debug_index, xid, fid, tid, sesid, offset, len, rc))
DEFINE_SMB3_RW_ERR_EVENT(read_err);
+DEFINE_SMB3_RW_ERR_EVENT(write_err);
/* For logging errors in other file I/O ops */
DECLARE_EVENT_CLASS(smb3_other_err_class,
@@ -202,7 +203,6 @@ DEFINE_EVENT(smb3_other_err_class, smb3_##name, \
int rc), \
TP_ARGS(xid, fid, tid, sesid, offset, len, rc))
-DEFINE_SMB3_OTHER_ERR_EVENT(write_err);
DEFINE_SMB3_OTHER_ERR_EVENT(query_dir_err);
DEFINE_SMB3_OTHER_ERR_EVENT(zero_err);
DEFINE_SMB3_OTHER_ERR_EVENT(falloc_err);
@@ -370,6 +370,8 @@ DEFINE_EVENT(smb3_rw_done_class, smb3_##name, \
DEFINE_SMB3_RW_DONE_EVENT(read_enter);
DEFINE_SMB3_RW_DONE_EVENT(read_done);
+DEFINE_SMB3_RW_DONE_EVENT(write_enter);
+DEFINE_SMB3_RW_DONE_EVENT(write_done);
/* For logging successful other op */
DECLARE_EVENT_CLASS(smb3_other_done_class,
@@ -411,11 +413,9 @@ DEFINE_EVENT(smb3_other_done_class, smb3_##name, \
__u32 len), \
TP_ARGS(xid, fid, tid, sesid, offset, len))
-DEFINE_SMB3_OTHER_DONE_EVENT(write_enter);
DEFINE_SMB3_OTHER_DONE_EVENT(query_dir_enter);
DEFINE_SMB3_OTHER_DONE_EVENT(zero_enter);
DEFINE_SMB3_OTHER_DONE_EVENT(falloc_enter);
-DEFINE_SMB3_OTHER_DONE_EVENT(write_done);
DEFINE_SMB3_OTHER_DONE_EVENT(query_dir_done);
DEFINE_SMB3_OTHER_DONE_EVENT(zero_done);
DEFINE_SMB3_OTHER_DONE_EVENT(falloc_done);
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index fd5a85d43759..91812150186c 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -1817,11 +1817,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
length = data_len; /* An RDMA read is already done. */
else
#endif
- {
length = cifs_read_iter_from_socket(server, &rdata->subreq.io_iter,
data_len);
- iov_iter_revert(&rdata->subreq.io_iter, data_len);
- }
if (length > 0)
rdata->got_bytes += length;
server->total_read += length;
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index c769f9dbc0b4..9f272cc8f566 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -6,7 +6,7 @@
* Note that, due to trying to use names similar to the protocol specifications,
* there are many mixed case field names in the structures below. Although
* this does not match typical Linux kernel style, it is necessary to be
- * able to match against the protocol specfication.
+ * able to match against the protocol specification.
*
* SMB2 commands
* Some commands have minimal (wct=0,bcc=0), or uninteresting, responses
@@ -491,7 +491,7 @@ struct smb2_encryption_neg_context {
__le16 ContextType; /* 2 */
__le16 DataLength;
__le32 Reserved;
- /* CipherCount usally 2, but can be 3 when AES256-GCM enabled */
+ /* CipherCount usually 2, but can be 3 when AES256-GCM enabled */
__le16 CipherCount; /* AES128-GCM and AES128-CCM by default */
__le16 Ciphers[];
} __packed;
@@ -1061,7 +1061,7 @@ struct smb2_server_client_notification {
#define IL_IMPERSONATION cpu_to_le32(0x00000002)
#define IL_DELEGATE cpu_to_le32(0x00000003)
-/* File Attrubutes */
+/* File Attributes */
#define FILE_ATTRIBUTE_READONLY 0x00000001
#define FILE_ATTRIBUTE_HIDDEN 0x00000002
#define FILE_ATTRIBUTE_SYSTEM 0x00000004
diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
index cac80e7bfefc..aa2a37a7ce84 100644
--- a/fs/smb/server/connection.c
+++ b/fs/smb/server/connection.c
@@ -25,7 +25,7 @@ DECLARE_RWSEM(conn_list_lock);
/**
* ksmbd_conn_free() - free resources of the connection instance
*
- * @conn: connection instance to be cleand up
+ * @conn: connection instance to be cleaned up
*
* During the thread termination, the corresponding conn instance
* resources(sock/memory) are released and finally the conn object is freed.
diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h
index f4e55199938d..38e6fd2da3b8 100644
--- a/fs/smb/server/ksmbd_netlink.h
+++ b/fs/smb/server/ksmbd_netlink.h
@@ -213,7 +213,7 @@ struct ksmbd_tree_connect_response {
};
/*
- * IPC Request struture to disconnect tree connection.
+ * IPC Request structure to disconnect tree connection.
*/
struct ksmbd_tree_disconnect_request {
__u64 session_id; /* session id */
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index 246cde380dfb..4142c7ad5fa9 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -796,7 +796,7 @@ out:
/**
* smb2_lease_break_noti() - break lease when a new client request
* write lease
- * @opinfo: conains lease state information
+ * @opinfo: contains lease state information
*
* Return: 0 on success, otherwise error
*/
@@ -1484,7 +1484,7 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
}
/**
- * parse_lease_state() - parse lease context containted in file open request
+ * parse_lease_state() - parse lease context contained in file open request
* @open_req: buffer containing smb2 file open(create) request
*
* Return: allocated lease context object on success, otherwise NULL
diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
index c402d4abe826..231d2d224656 100644
--- a/fs/smb/server/server.c
+++ b/fs/smb/server/server.c
@@ -279,7 +279,7 @@ static void handle_ksmbd_work(struct work_struct *wk)
/**
* queue_ksmbd_work() - queue a smb request to worker thread queue
- * for proccessing smb command and sending response
+ * for processing smb command and sending response
* @conn: connection instance
*
* read remaining data from socket create and submit work.
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index e6bdc1b20727..7460089c186f 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -1335,8 +1335,7 @@ static int ntlm_negotiate(struct ksmbd_work *work,
return rc;
sz = le16_to_cpu(rsp->SecurityBufferOffset);
- chgblob =
- (struct challenge_message *)((char *)&rsp->hdr.ProtocolId + sz);
+ chgblob = (struct challenge_message *)rsp->Buffer;
memset(chgblob, 0, sizeof(struct challenge_message));
if (!work->conn->use_spnego) {
@@ -1369,9 +1368,7 @@ static int ntlm_negotiate(struct ksmbd_work *work,
goto out;
}
- sz = le16_to_cpu(rsp->SecurityBufferOffset);
- unsafe_memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len,
- /* alloc is larger than blob, see smb2_allocate_rsp_buf() */);
+ memcpy(rsp->Buffer, spnego_blob, spnego_blob_len);
rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len);
out:
@@ -1453,10 +1450,7 @@ static int ntlm_authenticate(struct ksmbd_work *work,
if (rc)
return -ENOMEM;
- sz = le16_to_cpu(rsp->SecurityBufferOffset);
- unsafe_memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob,
- spnego_blob_len,
- /* alloc is larger than blob, see smb2_allocate_rsp_buf() */);
+ memcpy(rsp->Buffer, spnego_blob, spnego_blob_len);
rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len);
kfree(spnego_blob);
}
@@ -2058,18 +2052,20 @@ out_err1:
* @access: file access flags
* @disposition: file disposition flags
* @may_flags: set with MAY_ flags
- * @is_dir: is creating open flags for directory
+ * @coptions: file creation options
+ * @mode: file mode
*
* Return: file open flags
*/
static int smb2_create_open_flags(bool file_present, __le32 access,
__le32 disposition,
int *may_flags,
- bool is_dir)
+ __le32 coptions,
+ umode_t mode)
{
int oflags = O_NONBLOCK | O_LARGEFILE;
- if (is_dir) {
+ if (coptions & FILE_DIRECTORY_FILE_LE || S_ISDIR(mode)) {
access &= ~FILE_WRITE_DESIRE_ACCESS_LE;
ksmbd_debug(SMB, "Discard write access to a directory\n");
}
@@ -2086,7 +2082,7 @@ static int smb2_create_open_flags(bool file_present, __le32 access,
*may_flags = MAY_OPEN | MAY_READ;
}
- if (access == FILE_READ_ATTRIBUTES_LE)
+ if (access == FILE_READ_ATTRIBUTES_LE || S_ISBLK(mode) || S_ISCHR(mode))
oflags |= O_PATH;
if (file_present) {
@@ -3181,8 +3177,8 @@ int smb2_open(struct ksmbd_work *work)
open_flags = smb2_create_open_flags(file_present, daccess,
req->CreateDisposition,
&may_flags,
- req->CreateOptions & FILE_DIRECTORY_FILE_LE ||
- (file_present && S_ISDIR(d_inode(path.dentry)->i_mode)));
+ req->CreateOptions,
+ file_present ? d_inode(path.dentry)->i_mode : 0);
if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
if (open_flags & (O_CREAT | O_TRUNC)) {
@@ -3531,8 +3527,9 @@ int smb2_open(struct ksmbd_work *work)
memcpy(fp->create_guid, dh_info.CreateGuid,
SMB2_CREATE_GUID_SIZE);
if (dh_info.timeout)
- fp->durable_timeout = min(dh_info.timeout,
- DURABLE_HANDLE_MAX_TIMEOUT);
+ fp->durable_timeout =
+ min_t(unsigned int, dh_info.timeout,
+ DURABLE_HANDLE_MAX_TIMEOUT);
else
fp->durable_timeout = 60;
}
@@ -4586,7 +4583,7 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
path = &fp->filp->f_path;
/* single EA entry is requested with given user.* name */
if (req->InputBufferLength) {
- if (le32_to_cpu(req->InputBufferLength) <
+ if (le32_to_cpu(req->InputBufferLength) <=
sizeof(struct smb2_ea_info_req))
return -EINVAL;
@@ -8090,7 +8087,7 @@ int smb2_ioctl(struct ksmbd_work *work)
goto out;
}
- if (in_buf_len < sizeof(struct copychunk_ioctl_req)) {
+ if (in_buf_len <= sizeof(struct copychunk_ioctl_req)) {
ret = -EINVAL;
goto out;
}
diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h
index 3be7d5ae65a8..73aff20e22d0 100644
--- a/fs/smb/server/smb2pdu.h
+++ b/fs/smb/server/smb2pdu.h
@@ -194,7 +194,7 @@ struct copychunk_ioctl_req {
__le64 ResumeKey[3];
__le32 ChunkCount;
__le32 Reserved;
- __u8 Chunks[1]; /* array of srv_copychunk */
+ __u8 Chunks[]; /* array of srv_copychunk */
} __packed;
struct srv_copychunk {
@@ -370,7 +370,7 @@ struct smb2_file_attr_tag_info {
struct smb2_ea_info_req {
__le32 NextEntryOffset;
__u8 EaNameLength;
- char name[1];
+ char name[];
} __packed; /* level 15 Query */
struct smb2_ea_info {
diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
index cc4bb2377cbd..5b8d75e78ffb 100644
--- a/fs/smb/server/smb_common.c
+++ b/fs/smb/server/smb_common.c
@@ -488,7 +488,7 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
* @shortname: destination short filename
*
* Return: shortname length or 0 when source long name is '.' or '..'
- * TODO: Though this function comforms the restriction of 8.3 Filename spec,
+ * TODO: Though this function conforms the restriction of 8.3 Filename spec,
* but the result is different with Windows 7's one. need to check.
*/
int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
diff --git a/fs/smb/server/vfs_cache.h b/fs/smb/server/vfs_cache.h
index b0f6d0f94cb8..5bbb179736c2 100644
--- a/fs/smb/server/vfs_cache.h
+++ b/fs/smb/server/vfs_cache.h
@@ -100,8 +100,8 @@ struct ksmbd_file {
struct list_head blocked_works;
struct list_head lock_list;
- int durable_timeout;
- int durable_scavenger_timeout;
+ unsigned int durable_timeout;
+ unsigned int durable_scavenger_timeout;
/* if ls is happening on directory, below is valid*/
struct ksmbd_readdir_data readdir_data;
diff --git a/fs/smb/server/xattr.h b/fs/smb/server/xattr.h
index fa3e27d6971b..505101a8104c 100644
--- a/fs/smb/server/xattr.h
+++ b/fs/smb/server/xattr.h
@@ -99,7 +99,7 @@ struct xattr_ntacl {
__u8 posix_acl_hash[XATTR_SD_HASH_SIZE]; /* 64bytes hash for posix acl */
};
-/* DOS ATTRIBUITE XATTR PREFIX */
+/* DOS ATTRIBUTE XATTR PREFIX */
#define DOS_ATTRIBUTE_PREFIX "DOSATTRIB"
#define DOS_ATTRIBUTE_PREFIX_LEN (sizeof(DOS_ATTRIBUTE_PREFIX) - 1)
#define XATTR_NAME_DOS_ATTRIBUTE (XATTR_USER_PREFIX DOS_ATTRIBUTE_PREFIX)
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index d91cec93d968..5cc69beaa62e 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2807,7 +2807,6 @@ static const struct file_operations dfs_fops = {
.read = dfs_file_read,
.write = dfs_file_write,
.owner = THIS_MODULE,
- .llseek = no_llseek,
};
/**
@@ -2952,7 +2951,6 @@ static const struct file_operations dfs_global_fops = {
.read = dfs_global_file_read,
.write = dfs_global_file_write,
.owner = THIS_MODULE,
- .llseek = no_llseek,
};
/**
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 19ec49a9179b..eeadbaeccf88 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -919,6 +919,10 @@
#define RUNTIME_CONST(t,x) NAMED_SECTION(runtime_##t##_##x)
+#define RUNTIME_CONST_VARIABLES \
+ RUNTIME_CONST(shift, d_hash_shift) \
+ RUNTIME_CONST(ptr, dentry_hashtable)
+
/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
#define KUNIT_TABLE() \
. = ALIGN(8); \
diff --git a/include/linux/einj-cxl.h b/include/cxl/einj.h
index 624ff6ff41f9..624ff6ff41f9 100644
--- a/include/linux/einj-cxl.h
+++ b/include/cxl/einj.h
diff --git a/include/linux/cxl-event.h b/include/cxl/event.h
index 0bea1afbd747..0bea1afbd747 100644
--- a/include/linux/cxl-event.h
+++ b/include/cxl/event.h
diff --git a/include/cxl/mailbox.h b/include/cxl/mailbox.h
new file mode 100644
index 000000000000..bacd111e75f1
--- /dev/null
+++ b/include/cxl/mailbox.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation. */
+#ifndef __CXL_MBOX_H__
+#define __CXL_MBOX_H__
+#include <linux/rcuwait.h>
+
+struct cxl_mbox_cmd;
+
+/**
+ * struct cxl_mailbox - context for CXL mailbox operations
+ * @host: device that hosts the mailbox
+ * @payload_size: Size of space for payload
+ * (CXL 3.1 8.2.8.4.3 Mailbox Capabilities Register)
+ * @mbox_mutex: mutex protects device mailbox and firmware
+ * @mbox_wait: rcuwait for mailbox
+ * @mbox_send: @dev specific transport for transmitting mailbox commands
+ */
+struct cxl_mailbox {
+ struct device *host;
+ size_t payload_size;
+ struct mutex mbox_mutex; /* lock to protect mailbox context */
+ struct rcuwait mbox_wait;
+ int (*mbox_send)(struct cxl_mailbox *cxl_mbox, struct cxl_mbox_cmd *cmd);
+};
+
+int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host);
+
+#endif
diff --git a/include/dt-bindings/clock/cirrus,ep9301-syscon.h b/include/dt-bindings/clock/cirrus,ep9301-syscon.h
new file mode 100644
index 000000000000..6bb8f532e7d0
--- /dev/null
+++ b/include/dt-bindings/clock/cirrus,ep9301-syscon.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+#ifndef DT_BINDINGS_CIRRUS_EP93XX_CLOCK_H
+#define DT_BINDINGS_CIRRUS_EP93XX_CLOCK_H
+
+#define EP93XX_CLK_PLL1 0
+#define EP93XX_CLK_PLL2 1
+
+#define EP93XX_CLK_FCLK 2
+#define EP93XX_CLK_HCLK 3
+#define EP93XX_CLK_PCLK 4
+
+#define EP93XX_CLK_UART 5
+#define EP93XX_CLK_SPI 6
+#define EP93XX_CLK_PWM 7
+#define EP93XX_CLK_USB 8
+
+#define EP93XX_CLK_M2M0 9
+#define EP93XX_CLK_M2M1 10
+
+#define EP93XX_CLK_M2P0 11
+#define EP93XX_CLK_M2P1 12
+#define EP93XX_CLK_M2P2 13
+#define EP93XX_CLK_M2P3 14
+#define EP93XX_CLK_M2P4 15
+#define EP93XX_CLK_M2P5 16
+#define EP93XX_CLK_M2P6 17
+#define EP93XX_CLK_M2P7 18
+#define EP93XX_CLK_M2P8 19
+#define EP93XX_CLK_M2P9 20
+
+#define EP93XX_CLK_UART1 21
+#define EP93XX_CLK_UART2 22
+#define EP93XX_CLK_UART3 23
+
+#define EP93XX_CLK_ADC 24
+#define EP93XX_CLK_ADC_EN 25
+
+#define EP93XX_CLK_KEYPAD 26
+
+#define EP93XX_CLK_VIDEO 27
+
+#define EP93XX_CLK_I2S_MCLK 28
+#define EP93XX_CLK_I2S_SCLK 29
+#define EP93XX_CLK_I2S_LRCLK 30
+
+#endif /* DT_BINDINGS_CIRRUS_EP93XX_CLOCK_H */
diff --git a/include/dt-bindings/iio/adi,ad4695.h b/include/dt-bindings/iio/adi,ad4695.h
new file mode 100644
index 000000000000..9fbef542bf67
--- /dev/null
+++ b/include/dt-bindings/iio/adi,ad4695.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_ADI_AD4695_H
+#define _DT_BINDINGS_ADI_AD4695_H
+
+#define AD4695_COMMON_MODE_REFGND 0xFF
+#define AD4695_COMMON_MODE_COM 0xFE
+
+#endif /* _DT_BINDINGS_ADI_AD4695_H */
diff --git a/include/dt-bindings/interconnect/qcom,msm8937.h b/include/dt-bindings/interconnect/qcom,msm8937.h
new file mode 100644
index 000000000000..98b8a4637aab
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8937.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm MSM8937 interconnect IDs
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8937_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8937_H
+
+/* BIMC fabric */
+#define MAS_APPS_PROC 0
+#define MAS_OXILI 1
+#define MAS_SNOC_BIMC_0 2
+#define MAS_SNOC_BIMC_2 3
+#define MAS_SNOC_BIMC_1 4
+#define MAS_TCU_0 5
+#define SLV_EBI 6
+#define SLV_BIMC_SNOC 7
+
+/* PCNOC fabric */
+#define MAS_SPDM 0
+#define MAS_BLSP_1 1
+#define MAS_BLSP_2 2
+#define MAS_USB_HS1 3
+#define MAS_XI_USB_HS1 4
+#define MAS_CRYPTO 5
+#define MAS_SDCC_1 6
+#define MAS_SDCC_2 7
+#define MAS_SNOC_PCNOC 8
+#define PCNOC_M_0 9
+#define PCNOC_M_1 10
+#define PCNOC_INT_0 11
+#define PCNOC_INT_1 12
+#define PCNOC_INT_2 13
+#define PCNOC_INT_3 14
+#define PCNOC_S_0 15
+#define PCNOC_S_1 16
+#define PCNOC_S_2 17
+#define PCNOC_S_3 18
+#define PCNOC_S_4 19
+#define PCNOC_S_6 20
+#define PCNOC_S_7 21
+#define PCNOC_S_8 22
+#define SLV_SDCC_2 23
+#define SLV_SPDM 24
+#define SLV_PDM 25
+#define SLV_PRNG 26
+#define SLV_TCSR 27
+#define SLV_SNOC_CFG 28
+#define SLV_MESSAGE_RAM 29
+#define SLV_CAMERA_SS_CFG 30
+#define SLV_DISP_SS_CFG 31
+#define SLV_VENUS_CFG 32
+#define SLV_GPU_CFG 33
+#define SLV_TLMM 34
+#define SLV_BLSP_1 35
+#define SLV_BLSP_2 36
+#define SLV_PMIC_ARB 37
+#define SLV_SDCC_1 38
+#define SLV_CRYPTO_0_CFG 39
+#define SLV_USB_HS 40
+#define SLV_TCU 41
+#define SLV_PCNOC_SNOC 42
+
+/* SNOC fabric */
+#define MAS_QDSS_BAM 0
+#define MAS_BIMC_SNOC 1
+#define MAS_PCNOC_SNOC 2
+#define MAS_QDSS_ETR 3
+#define QDSS_INT 4
+#define SNOC_INT_0 5
+#define SNOC_INT_1 6
+#define SNOC_INT_2 7
+#define SLV_KPSS_AHB 8
+#define SLV_WCSS 9
+#define SLV_SNOC_BIMC_1 10
+#define SLV_IMEM 11
+#define SLV_SNOC_PCNOC 12
+#define SLV_QDSS_STM 13
+#define SLV_CATS_1 14
+#define SLV_LPASS 15
+
+/* SNOC-MM fabric */
+#define MAS_JPEG 0
+#define MAS_MDP 1
+#define MAS_VENUS 2
+#define MAS_VFE0 3
+#define MAS_VFE1 4
+#define MAS_CPP 5
+#define SLV_SNOC_BIMC_0 6
+#define SLV_SNOC_BIMC_2 7
+#define SLV_CATS_0 8
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_MSM8937_H */
diff --git a/include/dt-bindings/interconnect/qcom,msm8976.h b/include/dt-bindings/interconnect/qcom,msm8976.h
new file mode 100644
index 000000000000..4ea90f22320e
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8976.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm MSM8976 interconnect IDs
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8976_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8976_H
+
+/* BIMC fabric */
+#define MAS_APPS_PROC 0
+#define MAS_SMMNOC_BIMC 1
+#define MAS_SNOC_BIMC 2
+#define MAS_TCU_0 3
+#define SLV_EBI 4
+#define SLV_BIMC_SNOC 5
+
+/* PCNOC fabric */
+#define MAS_USB_HS2 0
+#define MAS_BLSP_1 1
+#define MAS_USB_HS1 2
+#define MAS_BLSP_2 3
+#define MAS_CRYPTO 4
+#define MAS_SDCC_1 5
+#define MAS_SDCC_2 6
+#define MAS_SDCC_3 7
+#define MAS_SNOC_PCNOC 8
+#define MAS_LPASS_AHB 9
+#define MAS_SPDM 10
+#define MAS_DEHR 11
+#define MAS_XM_USB_HS1 12
+#define PCNOC_M_0 13
+#define PCNOC_M_1 14
+#define PCNOC_INT_0 15
+#define PCNOC_INT_1 16
+#define PCNOC_INT_2 17
+#define PCNOC_S_1 18
+#define PCNOC_S_2 19
+#define PCNOC_S_3 20
+#define PCNOC_S_4 21
+#define PCNOC_S_8 22
+#define PCNOC_S_9 23
+#define SLV_TCSR 24
+#define SLV_TLMM 25
+#define SLV_CRYPTO_0_CFG 26
+#define SLV_MESSAGE_RAM 27
+#define SLV_PDM 28
+#define SLV_PRNG 29
+#define SLV_PMIC_ARB 30
+#define SLV_SNOC_CFG 31
+#define SLV_DCC_CFG 32
+#define SLV_CAMERA_SS_CFG 33
+#define SLV_DISP_SS_CFG 34
+#define SLV_VENUS_CFG 35
+#define SLV_SDCC_1 36
+#define SLV_BLSP_1 37
+#define SLV_USB_HS 38
+#define SLV_SDCC_3 39
+#define SLV_SDCC_2 40
+#define SLV_GPU_CFG 41
+#define SLV_USB_HS2 42
+#define SLV_BLSP_2 43
+#define SLV_PCNOC_SNOC 44
+
+/* SNOC fabric */
+#define MAS_QDSS_BAM 0
+#define MAS_BIMC_SNOC 1
+#define MAS_PCNOC_SNOC 2
+#define MAS_QDSS_ETR 3
+#define MAS_LPASS_PROC 4
+#define MAS_IPA 5
+#define QDSS_INT 6
+#define SNOC_INT_0 7
+#define SNOC_INT_1 8
+#define SNOC_INT_2 9
+#define SLV_KPSS_AHB 10
+#define SLV_SNOC_BIMC 11
+#define SLV_IMEM 12
+#define SLV_SNOC_PCNOC 13
+#define SLV_QDSS_STM 14
+#define SLV_CATS_0 15
+#define SLV_CATS_1 16
+#define SLV_LPASS 17
+
+/* SNOC-MM fabric */
+#define MAS_JPEG 0
+#define MAS_OXILI 1
+#define MAS_MDP0 2
+#define MAS_MDP1 3
+#define MAS_VENUS_0 4
+#define MAS_VENUS_1 5
+#define MAS_VFE_0 6
+#define MAS_VFE_1 7
+#define MAS_CPP 8
+#define MM_INT_0 9
+#define SLV_SMMNOC_BIMC 10
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_MSM8976_H */
diff --git a/include/dt-bindings/interconnect/qcom,sm8350.h b/include/dt-bindings/interconnect/qcom,sm8350.h
index c7f7ed315aeb..2282f93607bc 100644
--- a/include/dt-bindings/interconnect/qcom,sm8350.h
+++ b/include/dt-bindings/interconnect/qcom,sm8350.h
@@ -119,9 +119,6 @@
#define SLAVE_SERVICE_GEM_NOC_1 16
#define SLAVE_SERVICE_GEM_NOC_2 17
#define SLAVE_SERVICE_GEM_NOC 18
-#define MASTER_MNOC_HF_MEM_NOC_DISP 19
-#define MASTER_MNOC_SF_MEM_NOC_DISP 20
-#define SLAVE_LLCC_DISP 21
#define MASTER_CNOC_LPASS_AG_NOC 0
#define SLAVE_LPASS_CORE_CFG 1
@@ -133,8 +130,6 @@
#define MASTER_LLCC 0
#define SLAVE_EBI1 1
-#define MASTER_LLCC_DISP 2
-#define SLAVE_EBI1_DISP 3
#define MASTER_CAMNOC_HF 0
#define MASTER_CAMNOC_ICP 1
@@ -149,11 +144,6 @@
#define SLAVE_MNOC_HF_MEM_NOC 10
#define SLAVE_MNOC_SF_MEM_NOC 11
#define SLAVE_SERVICE_MNOC 12
-#define MASTER_MDP0_DISP 13
-#define MASTER_MDP1_DISP 14
-#define MASTER_ROTATOR_DISP 15
-#define SLAVE_MNOC_HF_MEM_NOC_DISP 16
-#define SLAVE_MNOC_SF_MEM_NOC_DISP 17
#define MASTER_CDSP_NOC_CFG 0
#define MASTER_CDSP_PROC 1
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 1655c4c23a78..4d5ee84c468b 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -363,6 +363,7 @@ void acpi_unregister_gsi (u32 gsi);
struct pci_dev;
+struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin);
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
bool acpi_isa_irq_available(int irq);
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h
index e4004d1e6725..b3643de9931d 100644
--- a/include/linux/attribute_container.h
+++ b/include/linux/attribute_container.h
@@ -61,14 +61,8 @@ int attribute_container_device_trigger_safe(struct device *dev,
int (*undo)(struct attribute_container *,
struct device *,
struct device *));
-void attribute_container_trigger(struct device *dev,
- int (*fn)(struct attribute_container *,
- struct device *));
int attribute_container_add_attrs(struct device *classdev);
int attribute_container_add_class_device(struct device *classdev);
-int attribute_container_add_class_device_adapter(struct attribute_container *cont,
- struct device *dev,
- struct device *classdev);
void attribute_container_remove_attrs(struct device *classdev);
void attribute_container_class_device_del(struct device *classdev);
struct attribute_container *attribute_container_classdev_to_container(struct device *);
diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h
index 662b8ae54b6a..31762324bcc9 100644
--- a/include/linux/auxiliary_bus.h
+++ b/include/linux/auxiliary_bus.h
@@ -271,6 +271,6 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
struct auxiliary_device *auxiliary_find_device(struct device *start,
const void *data,
- int (*match)(struct device *dev, const void *data));
+ device_match_t match);
#endif /* _AUXILIARY_BUS_H_ */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index d3b66d77df7a..262b6596eca5 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -203,12 +203,12 @@ unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
* the bit offset of all zero areas this function finds is multiples of that
* power of 2. A @align_mask of 0 means no alignment is required.
*/
-static inline unsigned long
-bitmap_find_next_zero_area(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask)
+static __always_inline
+unsigned long bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask)
{
return bitmap_find_next_zero_area_off(map, size, start, nr,
align_mask, 0);
@@ -228,7 +228,7 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig,
#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
-static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
+static __always_inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
@@ -238,7 +238,7 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
memset(dst, 0, len);
}
-static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+static __always_inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
@@ -248,8 +248,8 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
memset(dst, 0xff, len);
}
-static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
- unsigned int nbits)
+static __always_inline
+void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
@@ -262,8 +262,8 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
/*
* Copy bitmap and clear tail bits in last word.
*/
-static inline void bitmap_copy_clear_tail(unsigned long *dst,
- const unsigned long *src, unsigned int nbits)
+static __always_inline
+void bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
bitmap_copy(dst, src, nbits);
if (nbits % BITS_PER_LONG)
@@ -318,16 +318,18 @@ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits))
#endif
-static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_and(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_and(dst, src1, src2, nbits);
}
-static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+void bitmap_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 | *src2;
@@ -335,8 +337,9 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
__bitmap_or(dst, src1, src2, nbits);
}
-static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+void bitmap_xor(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 ^ *src2;
@@ -344,16 +347,17 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
__bitmap_xor(dst, src1, src2, nbits);
}
-static inline bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_andnot(dst, src1, src2, nbits);
}
-static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
- unsigned int nbits)
+static __always_inline
+void bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = ~(*src);
@@ -368,8 +372,8 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr
#endif
#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
-static inline bool bitmap_equal(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_equal(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
@@ -388,10 +392,9 @@ static inline bool bitmap_equal(const unsigned long *src1,
*
* Returns: True if (*@src1 | *@src2) == *@src3, false otherwise
*/
-static inline bool bitmap_or_equal(const unsigned long *src1,
- const unsigned long *src2,
- const unsigned long *src3,
- unsigned int nbits)
+static __always_inline
+bool bitmap_or_equal(const unsigned long *src1, const unsigned long *src2,
+ const unsigned long *src3, unsigned int nbits)
{
if (!small_const_nbits(nbits))
return __bitmap_or_equal(src1, src2, src3, nbits);
@@ -399,9 +402,8 @@ static inline bool bitmap_or_equal(const unsigned long *src1,
return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits));
}
-static inline bool bitmap_intersects(const unsigned long *src1,
- const unsigned long *src2,
- unsigned int nbits)
+static __always_inline
+bool bitmap_intersects(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
@@ -409,8 +411,8 @@ static inline bool bitmap_intersects(const unsigned long *src1,
return __bitmap_intersects(src1, src2, nbits);
}
-static inline bool bitmap_subset(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_subset(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
@@ -418,7 +420,8 @@ static inline bool bitmap_subset(const unsigned long *src1,
return __bitmap_subset(src1, src2, nbits);
}
-static inline bool bitmap_empty(const unsigned long *src, unsigned nbits)
+static __always_inline
+bool bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -426,7 +429,8 @@ static inline bool bitmap_empty(const unsigned long *src, unsigned nbits)
return find_first_bit(src, nbits) == nbits;
}
-static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
+static __always_inline
+bool bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
@@ -460,8 +464,8 @@ unsigned long bitmap_weight_andnot(const unsigned long *src1,
return __bitmap_weight_andnot(src1, src2, nbits);
}
-static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
- unsigned int nbits)
+static __always_inline
+void bitmap_set(unsigned long *map, unsigned int start, unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__set_bit(start, map);
@@ -476,8 +480,8 @@ static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
__bitmap_set(map, start, nbits);
}
-static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
- unsigned int nbits)
+static __always_inline
+void bitmap_clear(unsigned long *map, unsigned int start, unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__clear_bit(start, map);
@@ -492,8 +496,9 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
__bitmap_clear(map, start, nbits);
}
-static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits)
+static __always_inline
+void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
@@ -501,8 +506,9 @@ static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *s
__bitmap_shift_right(dst, src, shift, nbits);
}
-static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits)
+static __always_inline
+void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits);
@@ -510,11 +516,12 @@ static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *sr
__bitmap_shift_left(dst, src, shift, nbits);
}
-static inline void bitmap_replace(unsigned long *dst,
- const unsigned long *old,
- const unsigned long *new,
- const unsigned long *mask,
- unsigned int nbits)
+static __always_inline
+void bitmap_replace(unsigned long *dst,
+ const unsigned long *old,
+ const unsigned long *new,
+ const unsigned long *mask,
+ unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*old & ~(*mask)) | (*new & *mask);
@@ -557,8 +564,9 @@ static inline void bitmap_replace(unsigned long *dst,
* bitmap_gather() can be seen as the 'reverse' bitmap_scatter() operation.
* See bitmap_scatter() for details related to this relationship.
*/
-static inline void bitmap_scatter(unsigned long *dst, const unsigned long *src,
- const unsigned long *mask, unsigned int nbits)
+static __always_inline
+void bitmap_scatter(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
{
unsigned int n = 0;
unsigned int bit;
@@ -611,8 +619,9 @@ static inline void bitmap_scatter(unsigned long *dst, const unsigned long *src,
* bitmap_scatter(res, src, mask, n) and a call to
* bitmap_scatter(res, result, mask, n) will lead to the same res value.
*/
-static inline void bitmap_gather(unsigned long *dst, const unsigned long *src,
- const unsigned long *mask, unsigned int nbits)
+static __always_inline
+void bitmap_gather(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
{
unsigned int n = 0;
unsigned int bit;
@@ -623,9 +632,9 @@ static inline void bitmap_gather(unsigned long *dst, const unsigned long *src,
__assign_bit(n++, dst, test_bit(bit, src));
}
-static inline void bitmap_next_set_region(unsigned long *bitmap,
- unsigned int *rs, unsigned int *re,
- unsigned int end)
+static __always_inline
+void bitmap_next_set_region(unsigned long *bitmap, unsigned int *rs,
+ unsigned int *re, unsigned int end)
{
*rs = find_next_bit(bitmap, end, *rs);
*re = find_next_zero_bit(bitmap, end, *rs + 1);
@@ -640,7 +649,8 @@ static inline void bitmap_next_set_region(unsigned long *bitmap,
* This is the complement to __bitmap_find_free_region() and releases
* the found region (by clearing it in the bitmap).
*/
-static inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
+static __always_inline
+void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
{
bitmap_clear(bitmap, pos, BIT(order));
}
@@ -656,7 +666,8 @@ static inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos
* Returns: 0 on success, or %-EBUSY if specified region wasn't
* free (not all bits were zero).
*/
-static inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
+static __always_inline
+int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
{
unsigned int len = BIT(order);
@@ -680,7 +691,8 @@ static inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos
* Returns: the bit offset in bitmap of the allocated region,
* or -errno on failure.
*/
-static inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
+static __always_inline
+int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
{
unsigned int pos, end; /* scans bitmap by regions of size order */
@@ -734,7 +746,7 @@ static inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bi
* That is ``(u32 *)(&val)[0]`` gets the upper 32 bits,
* but we expect the lower 32-bits of u64.
*/
-static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
+static __always_inline void bitmap_from_u64(unsigned long *dst, u64 mask)
{
bitmap_from_arr64(dst, &mask, 64);
}
@@ -749,9 +761,8 @@ static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
* @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return
* value is undefined.
*/
-static inline unsigned long bitmap_read(const unsigned long *map,
- unsigned long start,
- unsigned long nbits)
+static __always_inline
+unsigned long bitmap_read(const unsigned long *map, unsigned long start, unsigned long nbits)
{
size_t index = BIT_WORD(start);
unsigned long offset = start % BITS_PER_LONG;
@@ -784,8 +795,9 @@ static inline unsigned long bitmap_read(const unsigned long *map,
*
* For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed.
*/
-static inline void bitmap_write(unsigned long *map, unsigned long value,
- unsigned long start, unsigned long nbits)
+static __always_inline
+void bitmap_write(unsigned long *map, unsigned long value,
+ unsigned long start, unsigned long nbits)
{
size_t index;
unsigned long offset;
diff --git a/include/linux/bits.h b/include/linux/bits.h
index 0eb24d21aac2..60044b608817 100644
--- a/include/linux/bits.h
+++ b/include/linux/bits.h
@@ -36,4 +36,19 @@
#define GENMASK_ULL(h, l) \
(GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
+#if !defined(__ASSEMBLY__)
+/*
+ * Missing asm support
+ *
+ * __GENMASK_U128() depends on _BIT128() which would not work
+ * in the asm code, as it shifts an 'unsigned __init128' data
+ * type instead of direct representation of 128 bit constants
+ * such as long and unsigned long. The fundamental problem is
+ * that a 128 bit constant will get silently truncated by the
+ * gcc compiler.
+ */
+#define GENMASK_U128(h, l) \
+ (GENMASK_INPUT_CHECK(h, l) + __GENMASK_U128(h, l))
+#endif
+
#endif /* __LINUX_BITS_H */
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
index de98049b7ded..676f8f860c47 100644
--- a/include/linux/blk-integrity.h
+++ b/include/linux/blk-integrity.h
@@ -25,9 +25,10 @@ static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t,
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
-int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
- struct scatterlist *);
+int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
+int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
+ ssize_t bytes, u32 seed);
static inline bool
blk_integrity_queue_supports_integrity(struct request_queue *q)
@@ -96,12 +97,18 @@ static inline int blk_rq_count_integrity_sg(struct request_queue *q,
{
return 0;
}
-static inline int blk_rq_map_integrity_sg(struct request_queue *q,
- struct bio *b,
+static inline int blk_rq_map_integrity_sg(struct request *q,
struct scatterlist *s)
{
return 0;
}
+static inline int blk_rq_integrity_map_user(struct request *rq,
+ void __user *ubuf,
+ ssize_t bytes,
+ u32 seed)
+{
+ return -EINVAL;
+}
static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
{
return NULL;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8d304b1d16b1..4fecf46ef681 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -149,10 +149,7 @@ struct request {
* physical address coalescing is performed.
*/
unsigned short nr_phys_segments;
-
-#ifdef CONFIG_BLK_DEV_INTEGRITY
unsigned short nr_integrity_segments;
-#endif
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct bio_crypt_ctx *crypt_ctx;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 413ebdff974b..dce7615c35e7 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -251,11 +251,9 @@ struct bio {
struct bio_crypt_ctx *bi_crypt_context;
#endif
- union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
- struct bio_integrity_payload *bi_integrity; /* data integrity */
+ struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
- };
unsigned short bi_vcnt; /* how many bio_vec's */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 643c9020a35a..50c3b959da28 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -968,8 +968,6 @@ static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
/*
* Access functions for manipulating queue properties
*/
-extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
-extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 0c3893c47171..19d8ca8ac960 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2246,7 +2246,16 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
-struct bpf_map *__bpf_map_get(struct fd f);
+
+static inline struct bpf_map *__bpf_map_get(struct fd f)
+{
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+ if (unlikely(fd_file(f)->f_op != &bpf_map_fops))
+ return ERR_PTR(-EINVAL);
+ return fd_file(f)->private_data;
+}
+
void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index f66f6aac74f6..d7941478158c 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -449,8 +449,6 @@ extern int ceph_osdc_init(struct ceph_osd_client *osdc,
extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
extern void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc);
-extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
- struct ceph_msg *msg);
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index a3d3e888cf1f..038b2d523bf8 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -4,6 +4,142 @@
#include <linux/compiler.h>
+/**
+ * DOC: scope-based cleanup helpers
+ *
+ * The "goto error" pattern is notorious for introducing subtle resource
+ * leaks. It is tedious and error prone to add new resource acquisition
+ * constraints into code paths that already have several unwind
+ * conditions. The "cleanup" helpers enable the compiler to help with
+ * this tedium and can aid in maintaining LIFO (last in first out)
+ * unwind ordering to avoid unintentional leaks.
+ *
+ * As drivers make up the majority of the kernel code base, here is an
+ * example of using these helpers to clean up PCI drivers. The target of
+ * the cleanups are occasions where a goto is used to unwind a device
+ * reference (pci_dev_put()), or unlock the device (pci_dev_unlock())
+ * before returning.
+ *
+ * The DEFINE_FREE() macro can arrange for PCI device references to be
+ * dropped when the associated variable goes out of scope::
+ *
+ * DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
+ * ...
+ * struct pci_dev *dev __free(pci_dev_put) =
+ * pci_get_slot(parent, PCI_DEVFN(0, 0));
+ *
+ * The above will automatically call pci_dev_put() if @dev is non-NULL
+ * when @dev goes out of scope (automatic variable scope). If a function
+ * wants to invoke pci_dev_put() on error, but return @dev (i.e. without
+ * freeing it) on success, it can do::
+ *
+ * return no_free_ptr(dev);
+ *
+ * ...or::
+ *
+ * return_ptr(dev);
+ *
+ * The DEFINE_GUARD() macro can arrange for the PCI device lock to be
+ * dropped when the scope where guard() is invoked ends::
+ *
+ * DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
+ * ...
+ * guard(pci_dev)(dev);
+ *
+ * The lifetime of the lock obtained by the guard() helper follows the
+ * scope of automatic variable declaration. Take the following example::
+ *
+ * func(...)
+ * {
+ * if (...) {
+ * ...
+ * guard(pci_dev)(dev); // pci_dev_lock() invoked here
+ * ...
+ * } // <- implied pci_dev_unlock() triggered here
+ * }
+ *
+ * Observe the lock is held for the remainder of the "if ()" block not
+ * the remainder of "func()".
+ *
+ * Now, when a function uses both __free() and guard(), or multiple
+ * instances of __free(), the LIFO order of variable definition order
+ * matters. GCC documentation says:
+ *
+ * "When multiple variables in the same scope have cleanup attributes,
+ * at exit from the scope their associated cleanup functions are run in
+ * reverse order of definition (last defined, first cleanup)."
+ *
+ * When the unwind order matters it requires that variables be defined
+ * mid-function scope rather than at the top of the file. Take the
+ * following example and notice the bug highlighted by "!!"::
+ *
+ * LIST_HEAD(list);
+ * DEFINE_MUTEX(lock);
+ *
+ * struct object {
+ * struct list_head node;
+ * };
+ *
+ * static struct object *alloc_add(void)
+ * {
+ * struct object *obj;
+ *
+ * lockdep_assert_held(&lock);
+ * obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ * if (obj) {
+ * LIST_HEAD_INIT(&obj->node);
+ * list_add(obj->node, &list):
+ * }
+ * return obj;
+ * }
+ *
+ * static void remove_free(struct object *obj)
+ * {
+ * lockdep_assert_held(&lock);
+ * list_del(&obj->node);
+ * kfree(obj);
+ * }
+ *
+ * DEFINE_FREE(remove_free, struct object *, if (_T) remove_free(_T))
+ * static int init(void)
+ * {
+ * struct object *obj __free(remove_free) = NULL;
+ * int err;
+ *
+ * guard(mutex)(&lock);
+ * obj = alloc_add();
+ *
+ * if (!obj)
+ * return -ENOMEM;
+ *
+ * err = other_init(obj);
+ * if (err)
+ * return err; // remove_free() called without the lock!!
+ *
+ * no_free_ptr(obj);
+ * return 0;
+ * }
+ *
+ * That bug is fixed by changing init() to call guard() and define +
+ * initialize @obj in this order::
+ *
+ * guard(mutex)(&lock);
+ * struct object *obj __free(remove_free) = alloc_add();
+ *
+ * Given that the "__free(...) = NULL" pattern for variables defined at
+ * the top of the function poses this potential interdependency problem
+ * the recommendation is to always define and assign variables in one
+ * statement and not group variable definitions at the top of the
+ * function when __free() is used.
+ *
+ * Lastly, given that the benefit of cleanup helpers is removal of
+ * "goto", and that the "goto" statement can jump between scopes, the
+ * expectation is that usage of "goto" and cleanup helpers is never
+ * mixed in the same function. I.e. for a given routine, convert all
+ * resources that need a "goto" cleanup to scope-based cleanup, or
+ * convert none of them.
+ */
+
/*
* DEFINE_FREE(name, type, free):
* simple helper macro that defines the required wrapper for a __free()
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index ec55bcce4146..4d4e23b6e3e7 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -133,7 +133,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
/* Annotate a C jump table to allow objtool to follow the code flow */
-#define __annotate_jump_table __section(".rodata..c_jump_table")
+#define __annotate_jump_table __section(".rodata..c_jump_table,\"a\",@progbits #")
#else /* !CONFIG_OBJTOOL */
#define annotate_reachable()
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index edeb8532ce0f..45e598fe3476 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -42,7 +42,7 @@ extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
int dump_user_range(struct coredump_params *cprm, unsigned long start,
unsigned long len);
-extern int do_coredump(const kernel_siginfo_t *siginfo);
+extern void do_coredump(const kernel_siginfo_t *siginfo);
/*
* Logging for the coredump code, ratelimited.
@@ -62,11 +62,7 @@ extern int do_coredump(const kernel_siginfo_t *siginfo);
#define coredump_report_failure(fmt, ...) __COREDUMP_PRINTK(KERN_WARNING, fmt, ##__VA_ARGS__)
#else
-static inline int do_coredump(const kernel_siginfo_t *siginfo)
-{
- /* Coredump support is not available, can't fail. */
- return 0;
-}
+static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
#define coredump_report(...)
#define coredump_report_failure(...)
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index 51ac441a37c3..89b0ac0014b0 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -49,12 +49,21 @@
* Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload.
* Used to associate a CPU with the CoreSight Trace ID.
* [07:00] - Trace ID - uses 8 bits to make value easy to read in file.
- * [59:08] - Unused (SBZ)
- * [63:60] - Version
+ * [39:08] - Sink ID - as reported in /sys/bus/event_source/devices/cs_etm/sinks/
+ * Added in minor version 1.
+ * [55:40] - Unused (SBZ)
+ * [59:56] - Minor Version - previously existing fields are compatible with
+ * all minor versions.
+ * [63:60] - Major Version - previously existing fields mean different things
+ * in new major versions.
*/
#define CS_AUX_HW_ID_TRACE_ID_MASK GENMASK_ULL(7, 0)
-#define CS_AUX_HW_ID_VERSION_MASK GENMASK_ULL(63, 60)
+#define CS_AUX_HW_ID_SINK_ID_MASK GENMASK_ULL(39, 8)
-#define CS_AUX_HW_ID_CURR_VERSION 0
+#define CS_AUX_HW_ID_MINOR_VERSION_MASK GENMASK_ULL(59, 56)
+#define CS_AUX_HW_ID_MAJOR_VERSION_MASK GENMASK_ULL(63, 60)
+
+#define CS_AUX_HW_ID_MAJOR_VERSION 0
+#define CS_AUX_HW_ID_MINOR_VERSION 1
#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index f09ace92176e..c13342594278 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -218,6 +218,24 @@ struct coresight_sysfs_link {
const char *target_name;
};
+/* architecturally we have 128 IDs some of which are reserved */
+#define CORESIGHT_TRACE_IDS_MAX 128
+
+/**
+ * Trace ID map.
+ *
+ * @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs.
+ * Initialised so that the reserved IDs are permanently marked as
+ * in use.
+ * @perf_cs_etm_session_active: Number of Perf sessions using this ID map.
+ */
+struct coresight_trace_id_map {
+ DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
+ atomic_t __percpu *cpu_map;
+ atomic_t perf_cs_etm_session_active;
+ spinlock_t lock;
+};
+
/**
* struct coresight_device - representation of a device as used by the framework
* @pdata: Platform data with device connections associated to this device.
@@ -271,6 +289,7 @@ struct coresight_device {
bool sysfs_sink_activated;
struct dev_ext_attribute *ea;
struct coresight_device *def_sink;
+ struct coresight_trace_id_map perf_sink_id_map;
/* sysfs links between components */
int nr_links;
bool has_conns_grp;
@@ -365,7 +384,7 @@ struct coresight_ops_link {
struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev);
int (*enable)(struct coresight_device *csdev, struct perf_event *event,
- enum cs_mode mode);
+ enum cs_mode mode, struct coresight_trace_id_map *id_map);
void (*disable)(struct coresight_device *csdev,
struct perf_event *event);
};
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 53158de44b83..9278a50d514f 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -30,7 +30,7 @@
extern unsigned int nr_cpu_ids;
#endif
-static inline void set_nr_cpu_ids(unsigned int nr)
+static __always_inline void set_nr_cpu_ids(unsigned int nr)
{
#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
WARN_ON(nr != nr_cpu_ids);
@@ -149,7 +149,7 @@ static __always_inline unsigned int cpumask_check(unsigned int cpu)
*
* Return: >= nr_cpu_ids if no cpus set.
*/
-static inline unsigned int cpumask_first(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_first(const struct cpumask *srcp)
{
return find_first_bit(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -160,7 +160,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
*
* Return: >= nr_cpu_ids if all cpus are set.
*/
-static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
return find_first_zero_bit(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -172,7 +172,7 @@ static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
*
* Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
*/
-static inline
+static __always_inline
unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
@@ -186,7 +186,7 @@ unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask
*
* Return: >= nr_cpu_ids if no cpus set in all.
*/
-static inline
+static __always_inline
unsigned int cpumask_first_and_and(const struct cpumask *srcp1,
const struct cpumask *srcp2,
const struct cpumask *srcp3)
@@ -201,7 +201,7 @@ unsigned int cpumask_first_and_and(const struct cpumask *srcp1,
*
* Return: >= nr_cpumask_bits if no CPUs set.
*/
-static inline unsigned int cpumask_last(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_last(const struct cpumask *srcp)
{
return find_last_bit(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -213,7 +213,7 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
*
* Return: >= nr_cpu_ids if no further cpus set.
*/
-static inline
+static __always_inline
unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
@@ -229,7 +229,8 @@ unsigned int cpumask_next(int n, const struct cpumask *srcp)
*
* Return: >= nr_cpu_ids if no further cpus unset.
*/
-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
@@ -239,18 +240,21 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
#if NR_CPUS == 1
/* Uniprocessor: there is only one valid CPU */
-static inline unsigned int cpumask_local_spread(unsigned int i, int node)
+static __always_inline
+unsigned int cpumask_local_spread(unsigned int i, int node)
{
return 0;
}
-static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return cpumask_first_and(src1p, src2p);
}
-static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_any_distribute(const struct cpumask *srcp)
{
return cpumask_first(srcp);
}
@@ -269,9 +273,9 @@ unsigned int cpumask_any_distribute(const struct cpumask *srcp);
*
* Return: >= nr_cpu_ids if no further cpus set in both.
*/
-static inline
+static __always_inline
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
- const struct cpumask *src2p)
+ const struct cpumask *src2p)
{
/* -1 is a legal arg here. */
if (n != -1)
@@ -291,7 +295,7 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits)
#if NR_CPUS == 1
-static inline
+static __always_inline
unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
{
cpumask_check(start);
@@ -394,7 +398,7 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
* Often used to find any cpu but smp_processor_id() in a mask.
* Return: >= nr_cpu_ids if no cpus set.
*/
-static inline
+static __always_inline
unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
{
unsigned int i;
@@ -414,7 +418,7 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
*
* Returns >= nr_cpu_ids if no cpus set.
*/
-static inline
+static __always_inline
unsigned int cpumask_any_and_but(const struct cpumask *mask1,
const struct cpumask *mask2,
unsigned int cpu)
@@ -436,7 +440,8 @@ unsigned int cpumask_any_and_but(const struct cpumask *mask1,
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
-static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
{
return find_nth_bit(cpumask_bits(srcp), small_cpumask_bits, cpumask_check(cpu));
}
@@ -449,7 +454,7 @@ static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *s
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
-static inline
+static __always_inline
unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
const struct cpumask *srcp2)
{
@@ -465,7 +470,7 @@ unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
-static inline
+static __always_inline
unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
const struct cpumask *srcp2)
{
@@ -508,12 +513,14 @@ unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
-static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline
+void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
-static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline
+void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
@@ -557,7 +564,8 @@ static __always_inline void __cpumask_assign_cpu(int cpu, struct cpumask *dstp,
*
* Return: true if @cpu is set in @cpumask, else returns false
*/
-static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
@@ -571,7 +579,8 @@ static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpum
*
* Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
-static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -585,7 +594,8 @@ static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cp
*
* Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
-static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -594,7 +604,7 @@ static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *
* cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
-static inline void cpumask_setall(struct cpumask *dstp)
+static __always_inline void cpumask_setall(struct cpumask *dstp)
{
if (small_const_nbits(small_cpumask_bits)) {
cpumask_bits(dstp)[0] = BITMAP_LAST_WORD_MASK(nr_cpumask_bits);
@@ -607,7 +617,7 @@ static inline void cpumask_setall(struct cpumask *dstp)
* cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
-static inline void cpumask_clear(struct cpumask *dstp)
+static __always_inline void cpumask_clear(struct cpumask *dstp)
{
bitmap_zero(cpumask_bits(dstp), large_cpumask_bits);
}
@@ -620,9 +630,9 @@ static inline void cpumask_clear(struct cpumask *dstp)
*
* Return: false if *@dstp is empty, else returns true
*/
-static inline bool cpumask_and(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_and(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
@@ -634,8 +644,9 @@ static inline bool cpumask_and(struct cpumask *dstp,
* @src1p: the first input
* @src2p: the second input
*/
-static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
@@ -647,9 +658,9 @@ static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
*/
-static inline void cpumask_xor(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+void cpumask_xor(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
@@ -663,9 +674,9 @@ static inline void cpumask_xor(struct cpumask *dstp,
*
* Return: false if *@dstp is empty, else returns true
*/
-static inline bool cpumask_andnot(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
@@ -678,8 +689,8 @@ static inline bool cpumask_andnot(struct cpumask *dstp,
*
* Return: true if the cpumasks are equal, false if not
*/
-static inline bool cpumask_equal(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
@@ -694,9 +705,9 @@ static inline bool cpumask_equal(const struct cpumask *src1p,
* Return: true if first cpumask ORed with second cpumask == third cpumask,
* otherwise false
*/
-static inline bool cpumask_or_equal(const struct cpumask *src1p,
- const struct cpumask *src2p,
- const struct cpumask *src3p)
+static __always_inline
+bool cpumask_or_equal(const struct cpumask *src1p, const struct cpumask *src2p,
+ const struct cpumask *src3p)
{
return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p),
cpumask_bits(src3p), small_cpumask_bits);
@@ -710,8 +721,8 @@ static inline bool cpumask_or_equal(const struct cpumask *src1p,
* Return: true if first cpumask ANDed with second cpumask is non-empty,
* otherwise false
*/
-static inline bool cpumask_intersects(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
@@ -724,8 +735,8 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
*
* Return: true if *@src1p is a subset of *@src2p, else returns false
*/
-static inline bool cpumask_subset(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
@@ -737,7 +748,7 @@ static inline bool cpumask_subset(const struct cpumask *src1p,
*
* Return: true if srcp is empty (has no bits set), else false
*/
-static inline bool cpumask_empty(const struct cpumask *srcp)
+static __always_inline bool cpumask_empty(const struct cpumask *srcp)
{
return bitmap_empty(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -748,7 +759,7 @@ static inline bool cpumask_empty(const struct cpumask *srcp)
*
* Return: true if srcp is full (has all bits set), else false
*/
-static inline bool cpumask_full(const struct cpumask *srcp)
+static __always_inline bool cpumask_full(const struct cpumask *srcp)
{
return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
}
@@ -759,7 +770,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
*
* Return: count of bits set in *srcp
*/
-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_weight(const struct cpumask *srcp)
{
return bitmap_weight(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -771,8 +782,8 @@ static inline unsigned int cpumask_weight(const struct cpumask *srcp)
*
* Return: count of bits set in both *srcp1 and *srcp2
*/
-static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
- const struct cpumask *srcp2)
+static __always_inline
+unsigned int cpumask_weight_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
@@ -784,8 +795,9 @@ static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
*
* Return: count of bits set in both *srcp1 and *srcp2
*/
-static inline unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
- const struct cpumask *srcp2)
+static __always_inline
+unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
{
return bitmap_weight_andnot(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
@@ -796,8 +808,8 @@ static inline unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
* @srcp: the input to shift
* @n: the number of bits to shift by
*/
-static inline void cpumask_shift_right(struct cpumask *dstp,
- const struct cpumask *srcp, int n)
+static __always_inline
+void cpumask_shift_right(struct cpumask *dstp, const struct cpumask *srcp, int n)
{
bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
small_cpumask_bits);
@@ -809,8 +821,8 @@ static inline void cpumask_shift_right(struct cpumask *dstp,
* @srcp: the input to shift
* @n: the number of bits to shift by
*/
-static inline void cpumask_shift_left(struct cpumask *dstp,
- const struct cpumask *srcp, int n)
+static __always_inline
+void cpumask_shift_left(struct cpumask *dstp, const struct cpumask *srcp, int n)
{
bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
nr_cpumask_bits);
@@ -821,8 +833,8 @@ static inline void cpumask_shift_left(struct cpumask *dstp,
* @dstp: the result
* @srcp: the input cpumask
*/
-static inline void cpumask_copy(struct cpumask *dstp,
- const struct cpumask *srcp)
+static __always_inline
+void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp)
{
bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), large_cpumask_bits);
}
@@ -858,8 +870,8 @@ static inline void cpumask_copy(struct cpumask *dstp,
*
* Return: -errno, or 0 for success.
*/
-static inline int cpumask_parse_user(const char __user *buf, int len,
- struct cpumask *dstp)
+static __always_inline
+int cpumask_parse_user(const char __user *buf, int len, struct cpumask *dstp)
{
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -872,8 +884,8 @@ static inline int cpumask_parse_user(const char __user *buf, int len,
*
* Return: -errno, or 0 for success.
*/
-static inline int cpumask_parselist_user(const char __user *buf, int len,
- struct cpumask *dstp)
+static __always_inline
+int cpumask_parselist_user(const char __user *buf, int len, struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
nr_cpumask_bits);
@@ -886,7 +898,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
*
* Return: -errno, or 0 for success.
*/
-static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
+static __always_inline int cpumask_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -898,7 +910,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
*
* Return: -errno, or 0 for success.
*/
-static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
+static __always_inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -908,7 +920,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
*
* Return: size to allocate for a &struct cpumask in bytes
*/
-static inline unsigned int cpumask_size(void)
+static __always_inline unsigned int cpumask_size(void)
{
return bitmap_size(large_cpumask_bits);
}
@@ -920,7 +932,7 @@ static inline unsigned int cpumask_size(void)
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
-static inline
+static __always_inline
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
@@ -938,13 +950,13 @@ bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
*
* Return: %true if allocation succeeded, %false if not
*/
-static inline
+static __always_inline
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
}
-static inline
+static __always_inline
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var(mask, flags | __GFP_ZERO);
@@ -954,7 +966,7 @@ void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
-static inline bool cpumask_available(cpumask_var_t mask)
+static __always_inline bool cpumask_available(cpumask_var_t mask)
{
return mask != NULL;
}
@@ -964,43 +976,43 @@ static inline bool cpumask_available(cpumask_var_t mask)
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
#define __cpumask_var_read_mostly
-static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+static __always_inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return true;
}
-static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+static __always_inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
return true;
}
-static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+static __always_inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
cpumask_clear(*mask);
return true;
}
-static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+static __always_inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
cpumask_clear(*mask);
return true;
}
-static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
+static __always_inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
}
-static inline void free_cpumask_var(cpumask_var_t mask)
+static __always_inline void free_cpumask_var(cpumask_var_t mask)
{
}
-static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
+static __always_inline void free_bootmem_cpumask_var(cpumask_var_t mask)
{
}
-static inline bool cpumask_available(cpumask_var_t mask)
+static __always_inline bool cpumask_available(cpumask_var_t mask)
{
return true;
}
@@ -1058,7 +1070,7 @@ void set_cpu_online(unsigned int cpu, bool online);
((struct cpumask *)(1 ? (bitmap) \
: (void *)sizeof(__check_is_bitmap(bitmap))))
-static inline int __check_is_bitmap(const unsigned long *bitmap)
+static __always_inline int __check_is_bitmap(const unsigned long *bitmap)
{
return 1;
}
@@ -1073,7 +1085,7 @@ static inline int __check_is_bitmap(const unsigned long *bitmap)
extern const unsigned long
cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
-static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
+static __always_inline const struct cpumask *get_cpu_mask(unsigned int cpu)
{
const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
p -= cpu / BITS_PER_LONG;
@@ -1100,32 +1112,32 @@ static __always_inline unsigned int num_online_cpus(void)
#define num_present_cpus() cpumask_weight(cpu_present_mask)
#define num_active_cpus() cpumask_weight(cpu_active_mask)
-static inline bool cpu_online(unsigned int cpu)
+static __always_inline bool cpu_online(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_online_mask);
}
-static inline bool cpu_enabled(unsigned int cpu)
+static __always_inline bool cpu_enabled(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_enabled_mask);
}
-static inline bool cpu_possible(unsigned int cpu)
+static __always_inline bool cpu_possible(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_possible_mask);
}
-static inline bool cpu_present(unsigned int cpu)
+static __always_inline bool cpu_present(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_present_mask);
}
-static inline bool cpu_active(unsigned int cpu)
+static __always_inline bool cpu_active(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_active_mask);
}
-static inline bool cpu_dying(unsigned int cpu)
+static __always_inline bool cpu_dying(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_dying_mask);
}
@@ -1138,32 +1150,32 @@ static inline bool cpu_dying(unsigned int cpu)
#define num_present_cpus() 1U
#define num_active_cpus() 1U
-static inline bool cpu_online(unsigned int cpu)
+static __always_inline bool cpu_online(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_possible(unsigned int cpu)
+static __always_inline bool cpu_possible(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_enabled(unsigned int cpu)
+static __always_inline bool cpu_enabled(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_present(unsigned int cpu)
+static __always_inline bool cpu_present(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_active(unsigned int cpu)
+static __always_inline bool cpu_active(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_dying(unsigned int cpu)
+static __always_inline bool cpu_dying(unsigned int cpu)
{
return false;
}
@@ -1197,7 +1209,7 @@ static inline bool cpu_dying(unsigned int cpu)
* Return: the length of the (null-terminated) @buf string, zero if
* nothing is copied.
*/
-static inline ssize_t
+static __always_inline ssize_t
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
{
return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
@@ -1220,9 +1232,9 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
* Return: the length of how many bytes have been copied, excluding
* terminating '\0'.
*/
-static inline ssize_t
-cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
- loff_t off, size_t count)
+static __always_inline
+ssize_t cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
{
return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
nr_cpu_ids, off, count) - 1;
@@ -1242,9 +1254,9 @@ cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
* Return: the length of how many bytes have been copied, excluding
* terminating '\0'.
*/
-static inline ssize_t
-cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
- loff_t off, size_t count)
+static __always_inline
+ssize_t cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
{
return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
nr_cpu_ids, off, count) - 1;
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index c9c65b132c0f..0928a6c8ae1e 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -57,7 +57,6 @@ static const struct file_operations __fops = { \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
.write = (__is_signed) ? debugfs_attr_write_signed : debugfs_attr_write, \
- .llseek = no_llseek, \
}
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 53ca3a913d06..8321f65897f3 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -524,7 +524,6 @@ int dm_post_suspending(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
-union map_info *dm_get_rq_mapinfo(struct request *rq);
#ifdef CONFIG_BLK_DEV_ZONED
struct dm_report_zones_args {
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index 807831d6bf0f..cdc4757217f9 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -126,6 +126,9 @@ struct bus_attribute {
int __must_check bus_create_file(const struct bus_type *bus, struct bus_attribute *attr);
void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr);
+/* Matching function type for drivers/base APIs to find a specific device */
+typedef int (*device_match_t)(struct device *dev, const void *data);
+
/* Generic device matching functions that all busses can use to match with */
int device_match_name(struct device *dev, const void *name);
int device_match_of_node(struct device *dev, const void *np);
@@ -139,8 +142,7 @@ int device_match_any(struct device *dev, const void *unused);
int bus_for_each_dev(const struct bus_type *bus, struct device *start, void *data,
int (*fn)(struct device *dev, void *data));
struct device *bus_find_device(const struct bus_type *bus, struct device *start,
- const void *data,
- int (*match)(struct device *dev, const void *data));
+ const void *data, device_match_t match);
/**
* bus_find_device_by_name - device iterator for locating a particular device
* of a specific name.
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
index c576b49c55c2..518c9c83d64b 100644
--- a/include/linux/device/class.h
+++ b/include/linux/device/class.h
@@ -95,7 +95,7 @@ void class_dev_iter_exit(struct class_dev_iter *iter);
int class_for_each_device(const struct class *class, const struct device *start, void *data,
int (*fn)(struct device *dev, void *data));
struct device *class_find_device(const struct class *class, const struct device *start,
- const void *data, int (*match)(struct device *, const void *));
+ const void *data, device_match_t match);
/**
* class_find_device_by_name - device iterator for locating a particular device
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index 1fc8b68786de..5c04b8e3833b 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -157,7 +157,7 @@ int __must_check driver_for_each_device(struct device_driver *drv, struct device
void *data, int (*fn)(struct device *dev, void *));
struct device *driver_find_device(const struct device_driver *drv,
struct device *start, const void *data,
- int (*match)(struct device *dev, const void *data));
+ device_match_t match);
/**
* driver_find_device_by_name - device iterator for locating a particular device
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 6bf3c4fe8511..e28d88066033 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -764,8 +764,6 @@ extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_mem_reserve(phys_addr_t addr, u64 size);
extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
-extern void efi_initialize_iomem_resources(struct resource *code_resource,
- struct resource *data_resource, struct resource *bss_resource);
extern u64 efi_get_fdt_params(struct efi_memory_map_data *data);
extern struct kobject *efi_kobj;
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 01bee2b289c2..b0b821edfd97 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -19,7 +19,6 @@
#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_EXTENSION_LEN 8 /* max size of extension */
-#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
@@ -28,6 +27,7 @@
#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
#define F2FS_BLK_END_BYTES(blk) (F2FS_BLK_TO_BYTES(blk + 1) - 1)
+#define F2FS_BLK_ALIGN(x) (F2FS_BYTES_TO_BLK((x) + F2FS_BLKSIZE - 1))
/* 0, 1(node nid), 2(meta nid) are reserved node id */
#define F2FS_RESERVED_NODE_NUM 3
@@ -278,7 +278,7 @@ struct node_footer {
#define F2FS_INLINE_DATA 0x02 /* file inline data flag */
#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
-#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
+#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries (obsolete) */
#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
#define F2FS_PIN_FILE 0x40 /* file should not be gced */
#define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */
diff --git a/include/linux/find.h b/include/linux/find.h
index 5dfca4225fef..68685714bc18 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -52,7 +52,7 @@ unsigned long _find_next_bit_le(const unsigned long *addr, unsigned
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
@@ -81,7 +81,7 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
@@ -112,7 +112,7 @@ unsigned long find_next_and_bit(const unsigned long *addr1,
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_andnot_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
@@ -142,7 +142,7 @@ unsigned long find_next_andnot_bit(const unsigned long *addr1,
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_or_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
@@ -171,7 +171,7 @@ unsigned long find_next_or_bit(const unsigned long *addr1,
* Returns the bit number of the next zero bit
* If no bits are zero, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
@@ -198,7 +198,7 @@ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
* Returns the bit number of the first set bit.
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
@@ -224,7 +224,7 @@ unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
* Returns the bit number of the N'th set bit.
* If no such, returns >= @size.
*/
-static inline
+static __always_inline
unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
{
if (n >= size)
@@ -249,7 +249,7 @@ unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsign
* Returns the bit number of the N'th set bit.
* If no such, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size, unsigned long n)
{
@@ -276,7 +276,7 @@ unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *
* Returns the bit number of the N'th set bit.
* If no such, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size, unsigned long n)
{
@@ -332,7 +332,7 @@ unsigned long find_nth_and_andnot_bit(const unsigned long *addr1,
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size)
@@ -357,7 +357,7 @@ unsigned long find_first_and_bit(const unsigned long *addr1,
* Returns the bit number for the first set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_first_and_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
const unsigned long *addr3,
@@ -381,7 +381,7 @@ unsigned long find_first_and_and_bit(const unsigned long *addr1,
* Returns the bit number of the first cleared bit.
* If no bits are zero, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
@@ -402,7 +402,7 @@ unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
*
* Returns the bit number of the last set bit, or size.
*/
-static inline
+static __always_inline
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
@@ -425,7 +425,7 @@ unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
* Returns the bit number for the next set bit, or first set bit up to @offset
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_and_bit_wrap(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size, unsigned long offset)
@@ -448,7 +448,7 @@ unsigned long find_next_and_bit_wrap(const unsigned long *addr1,
* Returns the bit number for the next set bit, or first set bit up to @offset
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_bit_wrap(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
@@ -465,7 +465,7 @@ unsigned long find_next_bit_wrap(const unsigned long *addr,
* Helper for for_each_set_bit_wrap(). Make sure you're doing right thing
* before using it alone.
*/
-static inline
+static __always_inline
unsigned long __for_each_wrap(const unsigned long *bitmap, unsigned long size,
unsigned long start, unsigned long n)
{
@@ -506,20 +506,20 @@ extern unsigned long find_next_clump8(unsigned long *clump,
#if defined(__LITTLE_ENDIAN)
-static inline unsigned long find_next_zero_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
+static __always_inline
+unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset)
{
return find_next_zero_bit(addr, size, offset);
}
-static inline unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
+static __always_inline
+unsigned long find_next_bit_le(const void *addr, unsigned long size, unsigned long offset)
{
return find_next_bit(addr, size, offset);
}
-static inline unsigned long find_first_zero_bit_le(const void *addr,
- unsigned long size)
+static __always_inline
+unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
{
return find_first_zero_bit(addr, size);
}
@@ -527,7 +527,7 @@ static inline unsigned long find_first_zero_bit_le(const void *addr,
#elif defined(__BIG_ENDIAN)
#ifndef find_next_zero_bit_le
-static inline
+static __always_inline
unsigned long find_next_zero_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
@@ -546,7 +546,7 @@ unsigned long find_next_zero_bit_le(const void *addr, unsigned
#endif
#ifndef find_first_zero_bit_le
-static inline
+static __always_inline
unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
{
if (small_const_nbits(size)) {
@@ -560,7 +560,7 @@ unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
#endif
#ifndef find_next_bit_le
-static inline
+static __always_inline
unsigned long find_next_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h
index 955680c3bb5f..af871405ae55 100644
--- a/include/linux/folio_queue.h
+++ b/include/linux/folio_queue.h
@@ -3,6 +3,12 @@
*
* Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
+ *
+ * See:
+ *
+ * Documentation/core-api/folio_queue.rst
+ *
+ * for a description of the API.
*/
#ifndef _LINUX_FOLIO_QUEUE_H
@@ -33,6 +39,13 @@ struct folio_queue {
#endif
};
+/**
+ * folioq_init - Initialise a folio queue segment
+ * @folioq: The segment to initialise
+ *
+ * Initialise a folio queue segment. Note that the folio pointers are
+ * left uninitialised.
+ */
static inline void folioq_init(struct folio_queue *folioq)
{
folio_batch_init(&folioq->vec);
@@ -43,62 +56,155 @@ static inline void folioq_init(struct folio_queue *folioq)
folioq->marks3 = 0;
}
+/**
+ * folioq_nr_slots: Query the capacity of a folio queue segment
+ * @folioq: The segment to query
+ *
+ * Query the number of folios that a particular folio queue segment might hold.
+ * [!] NOTE: This must not be assumed to be the same for every segment!
+ */
static inline unsigned int folioq_nr_slots(const struct folio_queue *folioq)
{
return PAGEVEC_SIZE;
}
+/**
+ * folioq_count: Query the occupancy of a folio queue segment
+ * @folioq: The segment to query
+ *
+ * Query the number of folios that have been added to a folio queue segment.
+ * Note that this is not decreased as folios are removed from a segment.
+ */
static inline unsigned int folioq_count(struct folio_queue *folioq)
{
return folio_batch_count(&folioq->vec);
}
+/**
+ * folioq_count: Query if a folio queue segment is full
+ * @folioq: The segment to query
+ *
+ * Query if a folio queue segment is fully occupied. Note that this does not
+ * change if folios are removed from a segment.
+ */
static inline bool folioq_full(struct folio_queue *folioq)
{
//return !folio_batch_space(&folioq->vec);
return folioq_count(folioq) >= folioq_nr_slots(folioq);
}
+/**
+ * folioq_is_marked: Check first folio mark in a folio queue segment
+ * @folioq: The segment to query
+ * @slot: The slot number of the folio to query
+ *
+ * Determine if the first mark is set for the folio in the specified slot in a
+ * folio queue segment.
+ */
static inline bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot)
{
return test_bit(slot, &folioq->marks);
}
+/**
+ * folioq_mark: Set the first mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Set the first mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
static inline void folioq_mark(struct folio_queue *folioq, unsigned int slot)
{
set_bit(slot, &folioq->marks);
}
+/**
+ * folioq_unmark: Clear the first mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Clear the first mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
static inline void folioq_unmark(struct folio_queue *folioq, unsigned int slot)
{
clear_bit(slot, &folioq->marks);
}
+/**
+ * folioq_is_marked2: Check second folio mark in a folio queue segment
+ * @folioq: The segment to query
+ * @slot: The slot number of the folio to query
+ *
+ * Determine if the second mark is set for the folio in the specified slot in a
+ * folio queue segment.
+ */
static inline bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot)
{
return test_bit(slot, &folioq->marks2);
}
+/**
+ * folioq_mark2: Set the second mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Set the second mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
static inline void folioq_mark2(struct folio_queue *folioq, unsigned int slot)
{
set_bit(slot, &folioq->marks2);
}
+/**
+ * folioq_unmark2: Clear the second mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Clear the second mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
static inline void folioq_unmark2(struct folio_queue *folioq, unsigned int slot)
{
clear_bit(slot, &folioq->marks2);
}
+/**
+ * folioq_is_marked3: Check third folio mark in a folio queue segment
+ * @folioq: The segment to query
+ * @slot: The slot number of the folio to query
+ *
+ * Determine if the third mark is set for the folio in the specified slot in a
+ * folio queue segment.
+ */
static inline bool folioq_is_marked3(const struct folio_queue *folioq, unsigned int slot)
{
return test_bit(slot, &folioq->marks3);
}
+/**
+ * folioq_mark3: Set the third mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Set the third mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
static inline void folioq_mark3(struct folio_queue *folioq, unsigned int slot)
{
set_bit(slot, &folioq->marks3);
}
+/**
+ * folioq_unmark3: Clear the third mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Clear the third mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
static inline void folioq_unmark3(struct folio_queue *folioq, unsigned int slot)
{
clear_bit(slot, &folioq->marks3);
@@ -111,6 +217,19 @@ static inline unsigned int __folio_order(struct folio *folio)
return folio->_flags_1 & 0xff;
}
+/**
+ * folioq_append: Add a folio to a folio queue segment
+ * @folioq: The segment to add to
+ * @folio: The folio to add
+ *
+ * Add a folio to the tail of the sequence in a folio queue segment, increasing
+ * the occupancy count and returning the slot number for the folio just added.
+ * The folio size is extracted and stored in the queue and the marks are left
+ * unmodified.
+ *
+ * Note that it's left up to the caller to check that the segment capacity will
+ * not be exceeded and to extend the queue.
+ */
static inline unsigned int folioq_append(struct folio_queue *folioq, struct folio *folio)
{
unsigned int slot = folioq->vec.nr++;
@@ -120,6 +239,19 @@ static inline unsigned int folioq_append(struct folio_queue *folioq, struct foli
return slot;
}
+/**
+ * folioq_append_mark: Add a folio to a folio queue segment
+ * @folioq: The segment to add to
+ * @folio: The folio to add
+ *
+ * Add a folio to the tail of the sequence in a folio queue segment, increasing
+ * the occupancy count and returning the slot number for the folio just added.
+ * The folio size is extracted and stored in the queue, the first mark is set
+ * and and the second and third marks are left unmodified.
+ *
+ * Note that it's left up to the caller to check that the segment capacity will
+ * not be exceeded and to extend the queue.
+ */
static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct folio *folio)
{
unsigned int slot = folioq->vec.nr++;
@@ -130,21 +262,57 @@ static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct
return slot;
}
+/**
+ * folioq_folio: Get a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the folio in the specified slot from a folio queue segment. Note
+ * that no bounds check is made and if the slot hasn't been added into yet, the
+ * pointer will be undefined. If the slot has been cleared, NULL will be
+ * returned.
+ */
static inline struct folio *folioq_folio(const struct folio_queue *folioq, unsigned int slot)
{
return folioq->vec.folios[slot];
}
+/**
+ * folioq_folio_order: Get the order of a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the order of the folio in the specified slot from a folio queue
+ * segment. Note that no bounds check is made and if the slot hasn't been
+ * added into yet, the order returned will be 0.
+ */
static inline unsigned int folioq_folio_order(const struct folio_queue *folioq, unsigned int slot)
{
return folioq->orders[slot];
}
+/**
+ * folioq_folio_size: Get the size of a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the size of the folio in the specified slot from a folio queue
+ * segment. Note that no bounds check is made and if the slot hasn't been
+ * added into yet, the size returned will be PAGE_SIZE.
+ */
static inline size_t folioq_folio_size(const struct folio_queue *folioq, unsigned int slot)
{
return PAGE_SIZE << folioq_folio_order(folioq, slot);
}
+/**
+ * folioq_clear: Clear a folio from a folio queue segment
+ * @folioq: The segment to clear
+ * @slot: The folio slot to clear
+ *
+ * Clear a folio from a sequence in a folio queue segment and clear its marks.
+ * The occupancy count is left unchanged.
+ */
static inline void folioq_clear(struct folio_queue *folioq, unsigned int slot)
{
folioq->vec.folios[slot] = NULL;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 776298fbfcb4..e3c603d01337 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1229,6 +1229,7 @@ extern int send_sigurg(struct file *file);
#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */
+#define SB_I_NOIDMAP 0x00002000 /* No idmapped mounts on this superblock */
/* Possible states of 'frozen' field */
enum {
@@ -3233,7 +3234,6 @@ extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern loff_t noop_llseek(struct file *file, loff_t offset, int whence);
-#define no_llseek NULL
extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize);
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 083c860fd28e..c90ec889bfc2 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -436,7 +436,7 @@ void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
u16 if_id);
-extern struct bus_type fsl_mc_bus_type;
+extern const struct bus_type fsl_mc_bus_type;
extern struct device_type fsl_mc_bus_dprc_type;
extern struct device_type fsl_mc_bus_dpni_type;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 98c47c394b89..e4697539b665 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -692,6 +692,9 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask,
bool allow_alloc_fallback);
+struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask);
+
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
pgoff_t idx);
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
@@ -1060,6 +1063,13 @@ static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
}
static inline struct folio *
+alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask)
+{
+ return NULL;
+}
+
+static inline struct folio *
alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask,
bool allow_alloc_fallback)
diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h
index 8099759d7242..37d56914d485 100644
--- a/include/linux/iio/backend.h
+++ b/include/linux/iio/backend.h
@@ -3,6 +3,7 @@
#define _IIO_BACKEND_H_
#include <linux/types.h>
+#include <linux/iio/iio.h>
struct iio_chan_spec;
struct fwnode_handle;
@@ -17,11 +18,13 @@ enum iio_backend_data_type {
};
enum iio_backend_data_source {
- IIO_BACKEND_INTERNAL_CONTINUOS_WAVE,
+ IIO_BACKEND_INTERNAL_CONTINUOUS_WAVE,
IIO_BACKEND_EXTERNAL,
IIO_BACKEND_DATA_SOURCE_MAX
};
+#define iio_backend_debugfs_ptr(ptr) PTR_IF(IS_ENABLED(CONFIG_DEBUG_FS), ptr)
+
/**
* IIO_BACKEND_EX_INFO - Helper for an IIO extended channel attribute
* @_name: Attribute name
@@ -54,6 +57,8 @@ enum iio_backend_test_pattern {
IIO_BACKEND_NO_TEST_PATTERN,
/* modified prbs9 */
IIO_BACKEND_ADI_PRBS_9A = 32,
+ /* modified prbs23 */
+ IIO_BACKEND_ADI_PRBS_23A,
IIO_BACKEND_TEST_PATTERN_MAX
};
@@ -81,6 +86,9 @@ enum iio_backend_sample_trigger {
* @extend_chan_spec: Extend an IIO channel.
* @ext_info_set: Extended info setter.
* @ext_info_get: Extended info getter.
+ * @read_raw: Read a channel attribute from a backend device
+ * @debugfs_print_chan_status: Print channel status into a buffer.
+ * @debugfs_reg_access: Read or write register value of backend.
**/
struct iio_backend_ops {
int (*enable)(struct iio_backend *back);
@@ -113,11 +121,31 @@ struct iio_backend_ops {
const char *buf, size_t len);
int (*ext_info_get)(struct iio_backend *back, uintptr_t private,
const struct iio_chan_spec *chan, char *buf);
+ int (*read_raw)(struct iio_backend *back,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask);
+ int (*debugfs_print_chan_status)(struct iio_backend *back,
+ unsigned int chan, char *buf,
+ size_t len);
+ int (*debugfs_reg_access)(struct iio_backend *back, unsigned int reg,
+ unsigned int writeval, unsigned int *readval);
+};
+
+/**
+ * struct iio_backend_info - info structure for an iio_backend
+ * @name: Backend name.
+ * @ops: Backend operations.
+ */
+struct iio_backend_info {
+ const char *name;
+ const struct iio_backend_ops *ops;
};
int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan);
int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan);
int devm_iio_backend_enable(struct device *dev, struct iio_backend *back);
+int iio_backend_enable(struct iio_backend *back);
+void iio_backend_disable(struct iio_backend *back);
int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan,
const struct iio_backend_data_fmt *data);
int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan,
@@ -141,17 +169,41 @@ ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private,
const char *buf, size_t len);
ssize_t iio_backend_ext_info_get(struct iio_dev *indio_dev, uintptr_t private,
const struct iio_chan_spec *chan, char *buf);
-
-int iio_backend_extend_chan_spec(struct iio_dev *indio_dev,
- struct iio_backend *back,
+int iio_backend_read_raw(struct iio_backend *back,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask);
+int iio_backend_extend_chan_spec(struct iio_backend *back,
struct iio_chan_spec *chan);
void *iio_backend_get_priv(const struct iio_backend *conv);
struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name);
+struct iio_backend *devm_iio_backend_fwnode_get(struct device *dev,
+ const char *name,
+ struct fwnode_handle *fwnode);
struct iio_backend *
__devm_iio_backend_get_from_fwnode_lookup(struct device *dev,
struct fwnode_handle *fwnode);
int devm_iio_backend_register(struct device *dev,
- const struct iio_backend_ops *ops, void *priv);
+ const struct iio_backend_info *info, void *priv);
+
+static inline int iio_backend_read_scale(struct iio_backend *back,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ return iio_backend_read_raw(back, chan, val, val2, IIO_CHAN_INFO_SCALE);
+}
+
+static inline int iio_backend_read_offset(struct iio_backend *back,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ return iio_backend_read_raw(back, chan, val, val2,
+ IIO_CHAN_INFO_OFFSET);
+}
+ssize_t iio_backend_debugfs_print_chan_status(struct iio_backend *back,
+ unsigned int chan, char *buf,
+ size_t len);
+void iio_backend_debugfs_add(struct iio_backend *back,
+ struct iio_dev *indio_dev);
#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 894309294182..18779b631e90 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -609,7 +609,7 @@ struct iio_dev {
int scan_bytes;
const unsigned long *available_scan_masks;
- unsigned masklength;
+ unsigned __private masklength;
const unsigned long *active_scan_mask;
bool scan_timestamp;
struct iio_trigger *trig;
@@ -810,6 +810,23 @@ static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
}
#endif
+/**
+ * iio_device_suspend_triggering() - suspend trigger attached to an iio_dev
+ * @indio_dev: iio_dev associated with the device that will have triggers suspended
+ *
+ * Return 0 if successful, negative otherwise
+ **/
+int iio_device_suspend_triggering(struct iio_dev *indio_dev);
+
+/**
+ * iio_device_resume_triggering() - resume trigger attached to an iio_dev
+ * that was previously suspended with iio_device_suspend_triggering()
+ * @indio_dev: iio_dev associated with the device that will have triggers resumed
+ *
+ * Return 0 if successful, negative otherwise
+ **/
+int iio_device_resume_triggering(struct iio_dev *indio_dev);
+
#ifdef CONFIG_ACPI
bool iio_read_acpi_mount_matrix(struct device *dev,
struct iio_mount_matrix *orientation,
@@ -855,6 +872,26 @@ static inline const struct iio_scan_type
return &chan->scan_type;
}
+/**
+ * iio_get_masklength - Get length of the channels mask
+ * @indio_dev: the IIO device to get the masklength for
+ */
+static inline unsigned int iio_get_masklength(const struct iio_dev *indio_dev)
+{
+ return ACCESS_PRIVATE(indio_dev, masklength);
+}
+
+int iio_active_scan_mask_index(struct iio_dev *indio_dev);
+
+/**
+ * iio_for_each_active_channel - Iterated over active channels
+ * @indio_dev: the IIO device
+ * @chan: Holds the index of the enabled channel
+ */
+#define iio_for_each_active_channel(indio_dev, chan) \
+ for_each_set_bit((chan), (indio_dev)->active_scan_mask, \
+ iio_get_masklength(indio_dev))
+
ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 5fcbc254d186..8c4f3bb24429 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -269,15 +269,6 @@ extern unsigned long __stop_kprobe_blacklist[];
extern struct kretprobe_blackpoint kretprobe_blacklist[];
-#ifdef CONFIG_KPROBES_SANITY_TEST
-extern int init_test_probes(void);
-#else /* !CONFIG_KPROBES_SANITY_TEST */
-static inline int init_test_probes(void)
-{
- return 0;
-}
-#endif /* CONFIG_KPROBES_SANITY_TEST */
-
extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0d5125a3e31a..db567d26f7b9 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1529,8 +1529,22 @@ static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
#endif
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
-int kvm_arch_hardware_enable(void);
-void kvm_arch_hardware_disable(void);
+/*
+ * kvm_arch_{enable,disable}_virtualization() are called on one CPU, under
+ * kvm_usage_lock, immediately after/before 0=>1 and 1=>0 transitions of
+ * kvm_usage_count, i.e. at the beginning of the generic hardware enabling
+ * sequence, and at the end of the generic hardware disabling sequence.
+ */
+void kvm_arch_enable_virtualization(void);
+void kvm_arch_disable_virtualization(void);
+/*
+ * kvm_arch_{enable,disable}_virtualization_cpu() are called on "every" CPU to
+ * do the actual twiddling of hardware bits. The hooks are called on all
+ * online CPUs when KVM enables/disabled virtualization, and on a single CPU
+ * when that CPU is onlined/offlined (including for Resume/Suspend).
+ */
+int kvm_arch_enable_virtualization_cpu(void);
+void kvm_arch_disable_virtualization_cpu(void);
#endif
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 1d59513bf230..9eca013aa5e1 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -431,7 +431,7 @@ LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr,
struct bpf_token *token)
LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog)
LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr,
- struct path *path)
+ const struct path *path)
LSM_HOOK(void, LSM_RET_VOID, bpf_token_free, struct bpf_token *token)
LSM_HOOK(int, 0, bpf_token_cmd, const struct bpf_token *token, enum bpf_cmd cmd)
LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap)
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index fc4d75c6cec3..673d5cae7c81 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -467,6 +467,7 @@ static inline __init_memblock bool memblock_bottom_up(void)
phys_addr_t memblock_phys_mem_size(void);
phys_addr_t memblock_reserved_size(void);
+unsigned long memblock_estimated_nr_free_pages(void);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h
index cd4d5c8781f5..b1b219bc3422 100644
--- a/include/linux/mnt_idmapping.h
+++ b/include/linux/mnt_idmapping.h
@@ -9,6 +9,7 @@ struct mnt_idmap;
struct user_namespace;
extern struct mnt_idmap nop_mnt_idmap;
+extern struct mnt_idmap invalid_mnt_idmap;
extern struct user_namespace init_user_ns;
typedef struct {
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index a561c629d89f..2bf91b57591b 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -49,7 +49,6 @@ static inline void mutex_destroy(struct mutex *lock) {}
#endif
-#ifndef CONFIG_PREEMPT_RT
/**
* mutex_init - initialize the mutex
* @mutex: the mutex to be initialized
@@ -65,6 +64,18 @@ do { \
__mutex_init((mutex), #mutex, &__key); \
} while (0)
+/**
+ * mutex_init_with_key - initialize a mutex with a given lockdep key
+ * @mutex: the mutex to be initialized
+ * @key: the lockdep key to be associated with the mutex
+ *
+ * Initialize the mutex to the unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
+#define mutex_init_with_key(mutex, key) __mutex_init((mutex), #mutex, (key))
+
+#ifndef CONFIG_PREEMPT_RT
#define __MUTEX_INITIALIZER(lockname) \
{ .owner = ATOMIC_LONG_INIT(0) \
, .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
@@ -111,12 +122,6 @@ do { \
__mutex_rt_init((mutex), name, key); \
} while (0)
-#define mutex_init(mutex) \
-do { \
- static struct lock_class_key __key; \
- \
- __mutex_init((mutex), #mutex, &__key); \
-} while (0)
#endif /* CONFIG_PREEMPT_RT */
#ifdef CONFIG_DEBUG_MUTEXES
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 2683b2b77612..2b8aac2c70ad 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -376,15 +376,11 @@ int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
struct nf_conn;
enum nf_nat_manip_type;
struct nlattr;
-enum ip_conntrack_dir;
struct nf_nat_hook {
int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip,
const struct nlattr *attr);
void (*decode_session)(struct sk_buff *skb, struct flowi *fl);
- unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct,
- enum nf_nat_manip_type mtype,
- enum ip_conntrack_dir dir);
void (*remove_nat_bysrc)(struct nf_conn *ct);
};
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index ceb70a926b95..9ad727ddfedb 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -8,11 +8,20 @@
#ifndef _LINUX_NFS_H
#define _LINUX_NFS_H
+#include <linux/cred.h>
+#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/string.h>
#include <linux/crc32.h>
#include <uapi/linux/nfs.h>
+/* The LOCALIO program is entirely private to Linux and is
+ * NOT part of the uapi.
+ */
+#define NFS_LOCALIO_PROGRAM 400122
+#define LOCALIOPROC_NULL 0
+#define LOCALIOPROC_UUID_IS_LOCAL 1
+
/*
* This is the kernel NFS client file handle representation
*/
diff --git a/include/linux/nfs_common.h b/include/linux/nfs_common.h
new file mode 100644
index 000000000000..5fc02df88252
--- /dev/null
+++ b/include/linux/nfs_common.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file contains constants and methods used by both NFS client and server.
+ */
+#ifndef _LINUX_NFS_COMMON_H
+#define _LINUX_NFS_COMMON_H
+
+#include <linux/errno.h>
+#include <uapi/linux/nfs.h>
+
+/* Mapping from NFS error code to "errno" error code. */
+#define errno_NFSERR_IO EIO
+
+int nfs_stat_to_errno(enum nfs_stat status);
+int nfs4_stat_to_errno(int stat);
+
+#endif /* _LINUX_NFS_COMMON_H */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 1df86ab98c77..853df3fcd4c2 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -8,6 +8,7 @@
#include <linux/wait.h>
#include <linux/nfs_xdr.h>
#include <linux/sunrpc/xprt.h>
+#include <linux/nfslocalio.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
@@ -49,6 +50,7 @@ struct nfs_client {
#define NFS_CS_DS 7 /* - Server is a DS */
#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
#define NFS_CS_PNFS 9 /* - Server used for pnfs */
+#define NFS_CS_LOCAL_IO 10 /* - client is local */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -125,6 +127,13 @@ struct nfs_client {
struct net *cl_net;
struct list_head pending_cb_stateids;
struct rcu_head rcu;
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ struct timespec64 cl_nfssvc_boot;
+ seqlock_t cl_boot_lock;
+ nfs_uuid_t cl_uuid;
+ spinlock_t cl_localio_lock;
+#endif /* CONFIG_NFS_LOCALIO */
};
/*
@@ -158,6 +167,7 @@ struct nfs_server {
#define NFS_MOUNT_WRITE_WAIT 0x02000000
#define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000
#define NFS_MOUNT_SHUTDOWN 0x08000000
+#define NFS_MOUNT_NO_ALIGNWRITE 0x10000000
unsigned int fattr_valid; /* Valid attributes */
unsigned int caps; /* server capabilities */
@@ -234,8 +244,7 @@ struct nfs_server {
/* the following fields are protected by nfs_client->cl_lock */
struct rb_root state_owners;
#endif
- struct ida openowner_id;
- struct ida lockowner_id;
+ atomic64_t owner_ctr;
struct list_head state_owners_lru;
struct list_head layouts;
struct list_head delegations;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 45623af3e7b8..12d8e47bc5a3 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -446,7 +446,7 @@ struct nfs42_clone_res {
struct stateowner_id {
__u64 create_time;
- __u32 uniquifier;
+ __u64 uniquifier;
};
struct nfs4_open_delegation {
@@ -1854,6 +1854,24 @@ struct nfs_rpc_ops {
};
/*
+ * Helper functions used by NFS client and/or server
+ */
+static inline void encode_opaque_fixed(struct xdr_stream *xdr,
+ const void *buf, size_t len)
+{
+ WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
+}
+
+static inline int decode_opaque_fixed(struct xdr_stream *xdr,
+ void *buf, size_t len)
+{
+ ssize_t ret = xdr_stream_decode_opaque_fixed(xdr, buf, len);
+ if (unlikely(ret < 0))
+ return -EIO;
+ return 0;
+}
+
+/*
* Function vectors etc. for the NFS client
*/
extern const struct nfs_rpc_ops nfs_v2_clientops;
@@ -1866,4 +1884,4 @@ extern const struct rpc_version nfs_version4;
extern const struct rpc_version nfsacl_version3;
extern const struct rpc_program nfsacl_program;
-#endif
+#endif /* _LINUX_NFS_XDR_H */
diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h
new file mode 100644
index 000000000000..b353abe00357
--- /dev/null
+++ b/include/linux/nfslocalio.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+#ifndef __LINUX_NFSLOCALIO_H
+#define __LINUX_NFSLOCALIO_H
+
+/* nfsd_file structure is purposely kept opaque to NFS client */
+struct nfsd_file;
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/uuid.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svcauth.h>
+#include <linux/nfs.h>
+#include <net/net_namespace.h>
+
+/*
+ * Useful to allow a client to negotiate if localio
+ * possible with its server.
+ *
+ * See Documentation/filesystems/nfs/localio.rst for more detail.
+ */
+typedef struct {
+ uuid_t uuid;
+ struct list_head list;
+ struct net __rcu *net; /* nfsd's network namespace */
+ struct auth_domain *dom; /* auth_domain for localio */
+} nfs_uuid_t;
+
+void nfs_uuid_begin(nfs_uuid_t *);
+void nfs_uuid_end(nfs_uuid_t *);
+void nfs_uuid_is_local(const uuid_t *, struct list_head *,
+ struct net *, struct auth_domain *, struct module *);
+void nfs_uuid_invalidate_clients(struct list_head *list);
+void nfs_uuid_invalidate_one_client(nfs_uuid_t *nfs_uuid);
+
+/* localio needs to map filehandle -> struct nfsd_file */
+extern struct nfsd_file *
+nfsd_open_local_fh(struct net *, struct auth_domain *, struct rpc_clnt *,
+ const struct cred *, const struct nfs_fh *,
+ const fmode_t) __must_hold(rcu);
+
+struct nfsd_localio_operations {
+ bool (*nfsd_serv_try_get)(struct net *);
+ void (*nfsd_serv_put)(struct net *);
+ struct nfsd_file *(*nfsd_open_local_fh)(struct net *,
+ struct auth_domain *,
+ struct rpc_clnt *,
+ const struct cred *,
+ const struct nfs_fh *,
+ const fmode_t);
+ void (*nfsd_file_put_local)(struct nfsd_file *);
+ struct file *(*nfsd_file_file)(struct nfsd_file *);
+} ____cacheline_aligned;
+
+extern void nfsd_localio_ops_init(void);
+extern const struct nfsd_localio_operations *nfs_to;
+
+struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *,
+ struct rpc_clnt *, const struct cred *,
+ const struct nfs_fh *, const fmode_t);
+
+#else /* CONFIG_NFS_LOCALIO */
+static inline void nfsd_localio_ops_init(void)
+{
+}
+#endif /* CONFIG_NFS_LOCALIO */
+
+#endif /* __LINUX_NFSLOCALIO_H */
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index b61438313a73..9fd7a0ce9c1a 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -107,11 +107,11 @@ extern nodemask_t _unused_nodemask_arg_;
*/
#define nodemask_pr_args(maskp) __nodemask_pr_numnodes(maskp), \
__nodemask_pr_bits(maskp)
-static inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m)
+static __always_inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m)
{
return m ? MAX_NUMNODES : 0;
}
-static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
+static __always_inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
{
return m ? m->bits : NULL;
}
@@ -132,19 +132,19 @@ static __always_inline void __node_set(int node, volatile nodemask_t *dstp)
}
#define node_clear(node, dst) __node_clear((node), &(dst))
-static inline void __node_clear(int node, volatile nodemask_t *dstp)
+static __always_inline void __node_clear(int node, volatile nodemask_t *dstp)
{
clear_bit(node, dstp->bits);
}
#define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES)
-static inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
+static __always_inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
#define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES)
-static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
+static __always_inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
@@ -154,14 +154,14 @@ static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
#define node_test_and_set(node, nodemask) \
__node_test_and_set((node), &(nodemask))
-static inline bool __node_test_and_set(int node, nodemask_t *addr)
+static __always_inline bool __node_test_and_set(int node, nodemask_t *addr)
{
return test_and_set_bit(node, addr->bits);
}
#define nodes_and(dst, src1, src2) \
__nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -169,7 +169,7 @@ static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_or(dst, src1, src2) \
__nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -177,7 +177,7 @@ static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_xor(dst, src1, src2) \
__nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -185,7 +185,7 @@ static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_andnot(dst, src1, src2) \
__nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -193,7 +193,7 @@ static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_complement(dst, src) \
__nodes_complement(&(dst), &(src), MAX_NUMNODES)
-static inline void __nodes_complement(nodemask_t *dstp,
+static __always_inline void __nodes_complement(nodemask_t *dstp,
const nodemask_t *srcp, unsigned int nbits)
{
bitmap_complement(dstp->bits, srcp->bits, nbits);
@@ -201,7 +201,7 @@ static inline void __nodes_complement(nodemask_t *dstp,
#define nodes_equal(src1, src2) \
__nodes_equal(&(src1), &(src2), MAX_NUMNODES)
-static inline bool __nodes_equal(const nodemask_t *src1p,
+static __always_inline bool __nodes_equal(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
@@ -209,7 +209,7 @@ static inline bool __nodes_equal(const nodemask_t *src1p,
#define nodes_intersects(src1, src2) \
__nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
-static inline bool __nodes_intersects(const nodemask_t *src1p,
+static __always_inline bool __nodes_intersects(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
@@ -217,33 +217,33 @@ static inline bool __nodes_intersects(const nodemask_t *src1p,
#define nodes_subset(src1, src2) \
__nodes_subset(&(src1), &(src2), MAX_NUMNODES)
-static inline bool __nodes_subset(const nodemask_t *src1p,
+static __always_inline bool __nodes_subset(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
-static inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
+static __always_inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
-static inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
+static __always_inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
+static __always_inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
#define nodes_shift_right(dst, src, n) \
__nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES)
-static inline void __nodes_shift_right(nodemask_t *dstp,
+static __always_inline void __nodes_shift_right(nodemask_t *dstp,
const nodemask_t *srcp, int n, int nbits)
{
bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
@@ -251,7 +251,7 @@ static inline void __nodes_shift_right(nodemask_t *dstp,
#define nodes_shift_left(dst, src, n) \
__nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES)
-static inline void __nodes_shift_left(nodemask_t *dstp,
+static __always_inline void __nodes_shift_left(nodemask_t *dstp,
const nodemask_t *srcp, int n, int nbits)
{
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
@@ -261,13 +261,13 @@ static inline void __nodes_shift_left(nodemask_t *dstp,
> MAX_NUMNODES, then the silly min_ts could be dropped. */
#define first_node(src) __first_node(&(src))
-static inline unsigned int __first_node(const nodemask_t *srcp)
+static __always_inline unsigned int __first_node(const nodemask_t *srcp)
{
return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
}
#define next_node(n, src) __next_node((n), &(src))
-static inline unsigned int __next_node(int n, const nodemask_t *srcp)
+static __always_inline unsigned int __next_node(int n, const nodemask_t *srcp)
{
return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
@@ -277,7 +277,7 @@ static inline unsigned int __next_node(int n, const nodemask_t *srcp)
* the first node in src if needed. Returns MAX_NUMNODES if src is empty.
*/
#define next_node_in(n, src) __next_node_in((n), &(src))
-static inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
+static __always_inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
{
unsigned int ret = __next_node(node, srcp);
@@ -286,7 +286,7 @@ static inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
return ret;
}
-static inline void init_nodemask_of_node(nodemask_t *mask, int node)
+static __always_inline void init_nodemask_of_node(nodemask_t *mask, int node)
{
nodes_clear(*mask);
node_set(node, *mask);
@@ -304,7 +304,7 @@ static inline void init_nodemask_of_node(nodemask_t *mask, int node)
})
#define first_unset_node(mask) __first_unset_node(&(mask))
-static inline unsigned int __first_unset_node(const nodemask_t *maskp)
+static __always_inline unsigned int __first_unset_node(const nodemask_t *maskp)
{
return min_t(unsigned int, MAX_NUMNODES,
find_first_zero_bit(maskp->bits, MAX_NUMNODES));
@@ -338,21 +338,21 @@ static inline unsigned int __first_unset_node(const nodemask_t *maskp)
#define nodemask_parse_user(ubuf, ulen, dst) \
__nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES)
-static inline int __nodemask_parse_user(const char __user *buf, int len,
+static __always_inline int __nodemask_parse_user(const char __user *buf, int len,
nodemask_t *dstp, int nbits)
{
return bitmap_parse_user(buf, len, dstp->bits, nbits);
}
#define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES)
-static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
+static __always_inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
{
return bitmap_parselist(buf, dstp->bits, nbits);
}
#define node_remap(oldbit, old, new) \
__node_remap((oldbit), &(old), &(new), MAX_NUMNODES)
-static inline int __node_remap(int oldbit,
+static __always_inline int __node_remap(int oldbit,
const nodemask_t *oldp, const nodemask_t *newp, int nbits)
{
return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
@@ -360,7 +360,7 @@ static inline int __node_remap(int oldbit,
#define nodes_remap(dst, src, old, new) \
__nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES)
-static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
+static __always_inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
const nodemask_t *oldp, const nodemask_t *newp, int nbits)
{
bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
@@ -368,7 +368,7 @@ static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
#define nodes_onto(dst, orig, relmap) \
__nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES)
-static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
+static __always_inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
const nodemask_t *relmapp, int nbits)
{
bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
@@ -376,7 +376,7 @@ static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
#define nodes_fold(dst, orig, sz) \
__nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES)
-static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
+static __always_inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
int sz, int nbits)
{
bitmap_fold(dstp->bits, origp->bits, sz, nbits);
@@ -418,22 +418,22 @@ enum node_states {
extern nodemask_t node_states[NR_NODE_STATES];
#if MAX_NUMNODES > 1
-static inline int node_state(int node, enum node_states state)
+static __always_inline int node_state(int node, enum node_states state)
{
return node_isset(node, node_states[state]);
}
-static inline void node_set_state(int node, enum node_states state)
+static __always_inline void node_set_state(int node, enum node_states state)
{
__node_set(node, &node_states[state]);
}
-static inline void node_clear_state(int node, enum node_states state)
+static __always_inline void node_clear_state(int node, enum node_states state)
{
__node_clear(node, &node_states[state]);
}
-static inline int num_node_state(enum node_states state)
+static __always_inline int num_node_state(enum node_states state)
{
return nodes_weight(node_states[state]);
}
@@ -443,11 +443,11 @@ static inline int num_node_state(enum node_states state)
#define first_online_node first_node(node_states[N_ONLINE])
#define first_memory_node first_node(node_states[N_MEMORY])
-static inline unsigned int next_online_node(int nid)
+static __always_inline unsigned int next_online_node(int nid)
{
return next_node(nid, node_states[N_ONLINE]);
}
-static inline unsigned int next_memory_node(int nid)
+static __always_inline unsigned int next_memory_node(int nid)
{
return next_node(nid, node_states[N_MEMORY]);
}
@@ -455,13 +455,13 @@ static inline unsigned int next_memory_node(int nid)
extern unsigned int nr_node_ids;
extern unsigned int nr_online_nodes;
-static inline void node_set_online(int nid)
+static __always_inline void node_set_online(int nid)
{
node_set_state(nid, N_ONLINE);
nr_online_nodes = num_node_state(N_ONLINE);
}
-static inline void node_set_offline(int nid)
+static __always_inline void node_set_offline(int nid)
{
node_clear_state(nid, N_ONLINE);
nr_online_nodes = num_node_state(N_ONLINE);
@@ -469,20 +469,20 @@ static inline void node_set_offline(int nid)
#else
-static inline int node_state(int node, enum node_states state)
+static __always_inline int node_state(int node, enum node_states state)
{
return node == 0;
}
-static inline void node_set_state(int node, enum node_states state)
+static __always_inline void node_set_state(int node, enum node_states state)
{
}
-static inline void node_clear_state(int node, enum node_states state)
+static __always_inline void node_clear_state(int node, enum node_states state)
{
}
-static inline int num_node_state(enum node_states state)
+static __always_inline int num_node_state(enum node_states state)
{
return 1;
}
@@ -502,7 +502,7 @@ static inline int num_node_state(enum node_states state)
#endif
-static inline int node_random(const nodemask_t *maskp)
+static __always_inline int node_random(const nodemask_t *maskp)
{
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
int w, bit;
diff --git a/include/linux/platform_data/ad5449.h b/include/linux/platform_data/ad5449.h
deleted file mode 100644
index d687ef5726c2..000000000000
--- a/include/linux/platform_data/ad5449.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * AD5415, AD5426, AD5429, AD5432, AD5439, AD5443, AD5449 Digital to Analog
- * Converter driver.
- *
- * Copyright 2012 Analog Devices Inc.
- * Author: Lars-Peter Clausen <lars@metafoo.de>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_AD5449_H__
-#define __LINUX_PLATFORM_DATA_AD5449_H__
-
-/**
- * enum ad5449_sdo_mode - AD5449 SDO pin configuration
- * @AD5449_SDO_DRIVE_FULL: Drive the SDO pin with full strength.
- * @AD5449_SDO_DRIVE_WEAK: Drive the SDO pin with not full strength.
- * @AD5449_SDO_OPEN_DRAIN: Operate the SDO pin in open-drain mode.
- * @AD5449_SDO_DISABLED: Disable the SDO pin, in this mode it is not possible to
- * read back from the device.
- */
-enum ad5449_sdo_mode {
- AD5449_SDO_DRIVE_FULL = 0x0,
- AD5449_SDO_DRIVE_WEAK = 0x1,
- AD5449_SDO_OPEN_DRAIN = 0x2,
- AD5449_SDO_DISABLED = 0x3,
-};
-
-/**
- * struct ad5449_platform_data - Platform data for the ad5449 DAC driver
- * @sdo_mode: SDO pin mode
- * @hardware_clear_to_midscale: Whether asserting the hardware CLR pin sets the
- * outputs to midscale (true) or to zero scale(false).
- */
-struct ad5449_platform_data {
- enum ad5449_sdo_mode sdo_mode;
- bool hardware_clear_to_midscale;
-};
-
-#endif
diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h
deleted file mode 100644
index eb9805bb3fe8..000000000000
--- a/include/linux/platform_data/dma-ep93xx.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-
-/*
- * M2P channels.
- *
- * Note that these values are also directly used for setting the PPALLOC
- * register.
- */
-#define EP93XX_DMA_I2S1 0
-#define EP93XX_DMA_I2S2 1
-#define EP93XX_DMA_AAC1 2
-#define EP93XX_DMA_AAC2 3
-#define EP93XX_DMA_AAC3 4
-#define EP93XX_DMA_I2S3 5
-#define EP93XX_DMA_UART1 6
-#define EP93XX_DMA_UART2 7
-#define EP93XX_DMA_UART3 8
-#define EP93XX_DMA_IRDA 9
-/* M2M channels */
-#define EP93XX_DMA_SSP 10
-#define EP93XX_DMA_IDE 11
-
-/**
- * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine
- * @port: peripheral which is requesting the channel
- * @direction: TX/RX channel
- * @name: optional name for the channel, this is displayed in /proc/interrupts
- *
- * This information is passed as private channel parameter in a filter
- * function. Note that this is only needed for slave/cyclic channels. For
- * memcpy channels %NULL data should be passed.
- */
-struct ep93xx_dma_data {
- int port;
- enum dma_transfer_direction direction;
- const char *name;
-};
-
-/**
- * struct ep93xx_dma_chan_data - platform specific data for a DMA channel
- * @name: name of the channel, used for getting the right clock for the channel
- * @base: mapped registers
- * @irq: interrupt number used by this channel
- */
-struct ep93xx_dma_chan_data {
- const char *name;
- void __iomem *base;
- int irq;
-};
-
-/**
- * struct ep93xx_dma_platform_data - platform data for the dmaengine driver
- * @channels: array of channels which are passed to the driver
- * @num_channels: number of channels in the array
- *
- * This structure is passed to the DMA engine driver via platform data. For
- * M2P channels, contract is that even channels are for TX and odd for RX.
- * There is no requirement for the M2M channels.
- */
-struct ep93xx_dma_platform_data {
- struct ep93xx_dma_chan_data *channels;
- size_t num_channels;
-};
-
-static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
-{
- return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
-}
-
-/**
- * ep93xx_dma_chan_direction - returns direction the channel can be used
- * @chan: channel
- *
- * This function can be used in filter functions to find out whether the
- * channel supports given DMA direction. Only M2P channels have such
- * limitation, for M2M channels the direction is configurable.
- */
-static inline enum dma_transfer_direction
-ep93xx_dma_chan_direction(struct dma_chan *chan)
-{
- if (!ep93xx_dma_chan_is_m2p(chan))
- return DMA_TRANS_NONE;
-
- /* even channels are for TX, odd for RX */
- return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
-}
-
-#endif /* __ASM_ARCH_DMA_H */
diff --git a/include/linux/platform_data/eth-ep93xx.h b/include/linux/platform_data/eth-ep93xx.h
deleted file mode 100644
index 8eef637a804d..000000000000
--- a/include/linux/platform_data/eth-ep93xx.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_PLATFORM_DATA_ETH_EP93XX
-#define _LINUX_PLATFORM_DATA_ETH_EP93XX
-
-struct ep93xx_eth_data {
- unsigned char dev_addr[6];
- unsigned char phy_id;
-};
-
-#endif
diff --git a/include/linux/platform_data/keypad-ep93xx.h b/include/linux/platform_data/keypad-ep93xx.h
deleted file mode 100644
index 3054fced8509..000000000000
--- a/include/linux/platform_data/keypad-ep93xx.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __KEYPAD_EP93XX_H
-#define __KEYPAD_EP93XX_H
-
-struct matrix_keymap_data;
-
-/* flags for the ep93xx_keypad driver */
-#define EP93XX_KEYPAD_DISABLE_3_KEY (1<<0) /* disable 3-key reset */
-#define EP93XX_KEYPAD_DIAG_MODE (1<<1) /* diagnostic mode */
-#define EP93XX_KEYPAD_BACK_DRIVE (1<<2) /* back driving mode */
-#define EP93XX_KEYPAD_TEST_MODE (1<<3) /* scan only column 0 */
-#define EP93XX_KEYPAD_AUTOREPEAT (1<<4) /* enable key autorepeat */
-
-/**
- * struct ep93xx_keypad_platform_data - platform specific device structure
- * @keymap_data: pointer to &matrix_keymap_data
- * @debounce: debounce start count; terminal count is 0xff
- * @prescale: row/column counter pre-scaler load value
- * @flags: see above
- */
-struct ep93xx_keypad_platform_data {
- struct matrix_keymap_data *keymap_data;
- unsigned int debounce;
- unsigned int prescale;
- unsigned int flags;
- unsigned int clk_rate;
-};
-
-#define EP93XX_MATRIX_ROWS (8)
-#define EP93XX_MATRIX_COLS (8)
-
-#endif /* __KEYPAD_EP93XX_H */
diff --git a/include/linux/platform_data/spi-ep93xx.h b/include/linux/platform_data/spi-ep93xx.h
deleted file mode 100644
index b439f2a896e0..000000000000
--- a/include/linux/platform_data/spi-ep93xx.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_MACH_EP93XX_SPI_H
-#define __ASM_MACH_EP93XX_SPI_H
-
-struct spi_device;
-
-/**
- * struct ep93xx_spi_info - EP93xx specific SPI descriptor
- * @use_dma: use DMA for the transfers
- */
-struct ep93xx_spi_info {
- bool use_dma;
-};
-
-#endif /* __ASM_MACH_EP93XX_SPI_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index d422db6eec63..7132623e4658 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -52,7 +52,7 @@ struct platform_device {
extern int platform_device_register(struct platform_device *);
extern void platform_device_unregister(struct platform_device *);
-extern struct bus_type platform_bus_type;
+extern const struct bus_type platform_bus_type;
extern struct device platform_bus;
extern struct resource *platform_get_resource(struct platform_device *,
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index c09cdcc99471..189140bf11fc 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -40,7 +40,7 @@ struct sbitmap_word {
/**
* @swap_lock: serializes simultaneous updates of ->word and ->cleared
*/
- spinlock_t swap_lock;
+ raw_spinlock_t swap_lock;
} ____cacheline_aligned_in_smp;
/**
diff --git a/include/linux/security.h b/include/linux/security.h
index c37c32ebbdcd..b86ec2afc691 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -2182,7 +2182,7 @@ extern int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
struct bpf_token *token);
extern void security_bpf_prog_free(struct bpf_prog *prog);
extern int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
- struct path *path);
+ const struct path *path);
extern void security_bpf_token_free(struct bpf_token *token);
extern int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
extern int security_bpf_token_capable(const struct bpf_token *token, int cap);
@@ -2222,7 +2222,7 @@ static inline void security_bpf_prog_free(struct bpf_prog *prog)
{ }
static inline int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
- struct path *path)
+ const struct path *path)
{
return 0;
}
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index fd59ed2cca53..e0717c8393d7 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -193,7 +193,7 @@ void serial8250_do_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate);
void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
- unsigned int quot, unsigned int quot_frac);
+ unsigned int quot);
int fsl8250_handle_irq(struct uart_port *port);
int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr);
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index 1672cf0810ef..102aa33d956c 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -246,24 +246,28 @@
S5PV210_UFCON_TXTRIG4 | \
S5PV210_UFCON_RXTRIG4)
-#define APPLE_S5L_UCON_RXTO_ENA 9
-#define APPLE_S5L_UCON_RXTHRESH_ENA 12
-#define APPLE_S5L_UCON_TXTHRESH_ENA 13
-#define APPLE_S5L_UCON_RXTO_ENA_MSK (1 << APPLE_S5L_UCON_RXTO_ENA)
-#define APPLE_S5L_UCON_RXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_RXTHRESH_ENA)
-#define APPLE_S5L_UCON_TXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_TXTHRESH_ENA)
+#define APPLE_S5L_UCON_RXTO_ENA 9
+#define APPLE_S5L_UCON_RXTO_LEGACY_ENA 11
+#define APPLE_S5L_UCON_RXTHRESH_ENA 12
+#define APPLE_S5L_UCON_TXTHRESH_ENA 13
+#define APPLE_S5L_UCON_RXTO_ENA_MSK BIT(APPLE_S5L_UCON_RXTO_ENA)
+#define APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK BIT(APPLE_S5L_UCON_RXTO_LEGACY_ENA)
+#define APPLE_S5L_UCON_RXTHRESH_ENA_MSK BIT(APPLE_S5L_UCON_RXTHRESH_ENA)
+#define APPLE_S5L_UCON_TXTHRESH_ENA_MSK BIT(APPLE_S5L_UCON_TXTHRESH_ENA)
#define APPLE_S5L_UCON_DEFAULT (S3C2410_UCON_TXIRQMODE | \
S3C2410_UCON_RXIRQMODE | \
S3C2410_UCON_RXFIFO_TOI)
#define APPLE_S5L_UCON_MASK (APPLE_S5L_UCON_RXTO_ENA_MSK | \
+ APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK | \
APPLE_S5L_UCON_RXTHRESH_ENA_MSK | \
APPLE_S5L_UCON_TXTHRESH_ENA_MSK)
-#define APPLE_S5L_UTRSTAT_RXTHRESH (1<<4)
-#define APPLE_S5L_UTRSTAT_TXTHRESH (1<<5)
-#define APPLE_S5L_UTRSTAT_RXTO (1<<9)
-#define APPLE_S5L_UTRSTAT_ALL_FLAGS (0x3f0)
+#define APPLE_S5L_UTRSTAT_RXTO_LEGACY BIT(3)
+#define APPLE_S5L_UTRSTAT_RXTHRESH BIT(4)
+#define APPLE_S5L_UTRSTAT_TXTHRESH BIT(5)
+#define APPLE_S5L_UTRSTAT_RXTO BIT(9)
+#define APPLE_S5L_UTRSTAT_ALL_FLAGS GENMASK(9, 3)
#ifndef __ASSEMBLY__
diff --git a/include/linux/soc/cirrus/ep93xx.h b/include/linux/soc/cirrus/ep93xx.h
index 56fbe2dc59b1..3e6cf2b25a97 100644
--- a/include/linux/soc/cirrus/ep93xx.h
+++ b/include/linux/soc/cirrus/ep93xx.h
@@ -2,7 +2,18 @@
#ifndef _SOC_EP93XX_H
#define _SOC_EP93XX_H
-struct platform_device;
+struct regmap;
+struct spinlock_t;
+
+enum ep93xx_soc_model {
+ EP93XX_9301_SOC,
+ EP93XX_9307_SOC,
+ EP93XX_9312_SOC,
+};
+
+#include <linux/auxiliary_bus.h>
+#include <linux/compiler_types.h>
+#include <linux/container_of.h>
#define EP93XX_CHIP_REV_D0 3
#define EP93XX_CHIP_REV_D1 4
@@ -10,28 +21,18 @@ struct platform_device;
#define EP93XX_CHIP_REV_E1 6
#define EP93XX_CHIP_REV_E2 7
-#ifdef CONFIG_ARCH_EP93XX
-int ep93xx_pwm_acquire_gpio(struct platform_device *pdev);
-void ep93xx_pwm_release_gpio(struct platform_device *pdev);
-int ep93xx_ide_acquire_gpio(struct platform_device *pdev);
-void ep93xx_ide_release_gpio(struct platform_device *pdev);
-int ep93xx_keypad_acquire_gpio(struct platform_device *pdev);
-void ep93xx_keypad_release_gpio(struct platform_device *pdev);
-int ep93xx_i2s_acquire(void);
-void ep93xx_i2s_release(void);
-unsigned int ep93xx_chip_revision(void);
+struct ep93xx_regmap_adev {
+ struct auxiliary_device adev;
+ struct regmap *map;
+ void __iomem *base;
+ spinlock_t *lock;
+ void (*write)(struct regmap *map, spinlock_t *lock, unsigned int reg,
+ unsigned int val);
+ void (*update_bits)(struct regmap *map, spinlock_t *lock,
+ unsigned int reg, unsigned int mask, unsigned int val);
+};
-#else
-static inline int ep93xx_pwm_acquire_gpio(struct platform_device *pdev) { return 0; }
-static inline void ep93xx_pwm_release_gpio(struct platform_device *pdev) {}
-static inline int ep93xx_ide_acquire_gpio(struct platform_device *pdev) { return 0; }
-static inline void ep93xx_ide_release_gpio(struct platform_device *pdev) {}
-static inline int ep93xx_keypad_acquire_gpio(struct platform_device *pdev) { return 0; }
-static inline void ep93xx_keypad_release_gpio(struct platform_device *pdev) {}
-static inline int ep93xx_i2s_acquire(void) { return 0; }
-static inline void ep93xx_i2s_release(void) {}
-static inline unsigned int ep93xx_chip_revision(void) { return 0; }
-
-#endif
+#define to_ep93xx_regmap_adev(_adev) \
+ container_of((_adev), struct ep93xx_regmap_adev, adev)
#endif
diff --git a/include/linux/soc/qcom/geni-se.h b/include/linux/soc/qcom/geni-se.h
index 0f038a1a0330..c3bca9c0bf2c 100644
--- a/include/linux/soc/qcom/geni-se.h
+++ b/include/linux/soc/qcom/geni-se.h
@@ -88,11 +88,15 @@ struct geni_se {
#define SE_GENI_M_IRQ_STATUS 0x610
#define SE_GENI_M_IRQ_EN 0x614
#define SE_GENI_M_IRQ_CLEAR 0x618
+#define SE_GENI_M_IRQ_EN_SET 0x61c
+#define SE_GENI_M_IRQ_EN_CLEAR 0x620
#define SE_GENI_S_CMD0 0x630
#define SE_GENI_S_CMD_CTRL_REG 0x634
#define SE_GENI_S_IRQ_STATUS 0x640
#define SE_GENI_S_IRQ_EN 0x644
#define SE_GENI_S_IRQ_CLEAR 0x648
+#define SE_GENI_S_IRQ_EN_SET 0x64c
+#define SE_GENI_S_IRQ_EN_CLEAR 0x650
#define SE_GENI_TX_FIFOn 0x700
#define SE_GENI_RX_FIFOn 0x780
#define SE_GENI_TX_FIFO_STATUS 0x800
@@ -101,6 +105,8 @@ struct geni_se {
#define SE_GENI_RX_WATERMARK_REG 0x810
#define SE_GENI_RX_RFR_WATERMARK_REG 0x814
#define SE_GENI_IOS 0x908
+#define SE_GENI_M_GP_LENGTH 0x910
+#define SE_GENI_S_GP_LENGTH 0x914
#define SE_DMA_TX_IRQ_STAT 0xc40
#define SE_DMA_TX_IRQ_CLR 0xc44
#define SE_DMA_TX_FSM_RST 0xc58
@@ -234,6 +240,9 @@ struct geni_se {
#define IO2_DATA_IN BIT(1)
#define RX_DATA_IN BIT(0)
+/* SE_GENI_M_GP_LENGTH and SE_GENI_S_GP_LENGTH fields */
+#define GP_LENGTH GENMASK(31, 0)
+
/* SE_DMA_TX_IRQ_STAT Register fields */
#define TX_DMA_DONE BIT(0)
#define TX_EOT BIT(1)
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 0c77ba488bba..fec1e8a1570c 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -151,13 +151,15 @@ struct rpc_task_setup {
#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT)
#define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE)
-#define RPC_TASK_RUNNING 0
-#define RPC_TASK_QUEUED 1
-#define RPC_TASK_ACTIVE 2
-#define RPC_TASK_NEED_XMIT 3
-#define RPC_TASK_NEED_RECV 4
-#define RPC_TASK_MSG_PIN_WAIT 5
-#define RPC_TASK_SIGNALLED 6
+enum {
+ RPC_TASK_RUNNING,
+ RPC_TASK_QUEUED,
+ RPC_TASK_ACTIVE,
+ RPC_TASK_NEED_XMIT,
+ RPC_TASK_NEED_RECV,
+ RPC_TASK_MSG_PIN_WAIT,
+ RPC_TASK_SIGNALLED,
+};
#define rpc_test_and_set_running(t) \
test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index c419a61f60e5..e68fecf6eab5 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -67,9 +67,10 @@ enum {
* We currently do not support more than one RPC program per daemon.
*/
struct svc_serv {
- struct svc_program * sv_program; /* RPC program */
+ struct svc_program * sv_programs; /* RPC programs */
struct svc_stat * sv_stats; /* RPC statistics */
spinlock_t sv_lock;
+ unsigned int sv_nprogs; /* Number of sv_programs */
unsigned int sv_nrthreads; /* # of server threads */
unsigned int sv_maxconn; /* max connections allowed or
* '0' causing max to be based
@@ -360,10 +361,9 @@ struct svc_process_info {
};
/*
- * List of RPC programs on the same transport endpoint
+ * RPC program - an array of these can use the same transport endpoint
*/
struct svc_program {
- struct svc_program * pg_next; /* other programs (same xprt) */
u32 pg_prog; /* program number */
unsigned int pg_lovers; /* lowest version */
unsigned int pg_hivers; /* highest version */
@@ -441,6 +441,7 @@ bool svc_rqst_replace_page(struct svc_rqst *rqstp,
void svc_rqst_release_pages(struct svc_rqst *rqstp);
void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *prog,
+ unsigned int nprog,
struct svc_stat *stats,
unsigned int bufsize,
int (*threadfn)(void *data));
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 63cf6fb26dcc..2e111153f7cd 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -14,6 +14,7 @@
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/gss_api.h>
+#include <linux/sunrpc/clnt.h>
#include <linux/hash.h>
#include <linux/stringhash.h>
#include <linux/cred.h>
@@ -157,6 +158,10 @@ extern enum svc_auth_status svc_set_client(struct svc_rqst *rqstp);
extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops);
extern void svc_auth_unregister(rpc_authflavor_t flavor);
+extern void svcauth_map_clnt_to_svc_cred_local(struct rpc_clnt *clnt,
+ const struct cred *,
+ struct svc_cred *);
+
extern struct auth_domain *unix_domain_find(char *name);
extern void auth_domain_put(struct auth_domain *item);
extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 6be396bb4297..93a9f3070b48 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -64,6 +64,13 @@ struct tp_module {
bool trace_module_has_bad_taint(struct module *mod);
extern int register_tracepoint_module_notifier(struct notifier_block *nb);
extern int unregister_tracepoint_module_notifier(struct notifier_block *nb);
+void for_each_module_tracepoint(void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv);
+void for_each_tracepoint_in_module(struct module *,
+ void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv);
#else
static inline bool trace_module_has_bad_taint(struct module *mod)
{
@@ -79,6 +86,19 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb)
{
return 0;
}
+static inline
+void for_each_module_tracepoint(void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv)
+{
+}
+static inline
+void for_each_tracepoint_in_module(struct module *mod,
+ void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv)
+{
+}
#endif /* CONFIG_MODULES */
/*
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 832997a9da0a..672d8fc2abdb 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -495,6 +495,12 @@ struct usb_dev_state;
struct usb_tt;
+enum usb_link_tunnel_mode {
+ USB_LINK_UNKNOWN = 0,
+ USB_LINK_NATIVE,
+ USB_LINK_TUNNELED,
+};
+
enum usb_port_connect_type {
USB_PORT_CONNECT_TYPE_UNKNOWN = 0,
USB_PORT_CONNECT_TYPE_HOT_PLUG,
@@ -605,6 +611,7 @@ struct usb3_lpm_parameters {
* WUSB devices are not, until we authorize them from user space.
* FIXME -- complete doc
* @authenticated: Crypto authentication passed
+ * @tunnel_mode: Connection native or tunneled over USB4
* @lpm_capable: device supports LPM
* @lpm_devinit_allow: Allow USB3 device initiated LPM, exit latency is in range
* @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
@@ -714,6 +721,7 @@ struct usb_device {
unsigned do_remote_wakeup:1;
unsigned reset_resume:1;
unsigned port_is_suspended:1;
+ enum usb_link_tunnel_mode tunnel_mode;
int slot_id;
struct usb2_lpm_parameters l1_params;
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index af3cd2aae4bc..6e38fb9d2117 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -256,7 +256,7 @@ int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f,
struct usb_ep *_ep);
int usb_func_wakeup(struct usb_function *func);
-#define MAX_CONFIG_INTERFACES 16 /* arbitrary; max 255 */
+#define MAX_CONFIG_INTERFACES 32
/**
* struct usb_configuration - represents one gadget configuration
diff --git a/drivers/usb/gadget/u_f.h b/include/linux/usb/func_utils.h
index e313c3b8dcb1..c8795c965109 100644
--- a/drivers/usb/gadget/u_f.h
+++ b/include/linux/usb/func_utils.h
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * u_f.h
+ * func_utils.h
*
* Utility definitions for USB functions
*
@@ -10,8 +10,8 @@
* Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
-#ifndef __U_F_H__
-#define __U_F_H__
+#ifndef _FUNC_UTILS_H_
+#define _FUNC_UTILS_H_
#include <linux/usb/gadget.h>
#include <linux/overflow.h>
@@ -83,4 +83,4 @@ static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
usb_ep_free_request(ep, req);
}
-#endif /* __U_F_H__ */
+#endif /* _FUNC_UTILS_H_ */
diff --git a/include/linux/usb/gadget_configfs.h b/include/linux/usb/gadget_configfs.h
index d61aebd68128..6b5d6838f865 100644
--- a/include/linux/usb/gadget_configfs.h
+++ b/include/linux/usb/gadget_configfs.h
@@ -4,9 +4,6 @@
#include <linux/configfs.h>
-int check_user_usb_string(const char *name,
- struct usb_gadget_strings *stringtab_dev);
-
#define GS_STRINGS_W(__struct, __name) \
static ssize_t __struct##_##__name##_store(struct config_item *item, \
const char *page, size_t len) \
@@ -37,7 +34,7 @@ static struct configfs_item_operations struct_in##_langid_item_ops = { \
.release = struct_in##_attr_release, \
}; \
\
-static struct config_item_type struct_in##_langid_type = { \
+static const struct config_item_type struct_in##_langid_type = { \
.ct_item_ops = &struct_in##_langid_item_ops, \
.ct_attrs = struct_in##_langid_attrs, \
.ct_owner = THIS_MODULE, \
@@ -94,7 +91,7 @@ static struct configfs_group_operations struct_in##_strings_ops = { \
.drop_item = &struct_in##_strings_drop, \
}; \
\
-static struct config_item_type struct_in##_strings_type = { \
+static const struct config_item_type struct_in##_strings_type = { \
.ct_group_ops = &struct_in##_strings_ops, \
.ct_owner = THIS_MODULE, \
}
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 1a0a4dc87980..75b2b763f1ba 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -311,8 +311,11 @@ struct usb_serial_driver {
#define to_usb_serial_driver(d) \
container_of(d, struct usb_serial_driver, driver)
-int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
- const char *name, const struct usb_device_id *id_table);
+#define usb_serial_register_drivers(serial_drivers, name, id_table) \
+ __usb_serial_register_drivers(serial_drivers, THIS_MODULE, name, id_table)
+int __usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
+ struct module *owner, const char *name,
+ const struct usb_device_id *id_table);
void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]);
void usb_serial_port_softint(struct usb_serial_port *port);
diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h
index 0ab39b6ea205..f7f5cfbdef12 100644
--- a/include/linux/usb/tcpci.h
+++ b/include/linux/usb/tcpci.h
@@ -63,15 +63,12 @@
#define TCPC_ROLE_CTRL 0x1a
#define TCPC_ROLE_CTRL_DRP BIT(6)
-#define TCPC_ROLE_CTRL_RP_VAL_SHIFT 4
-#define TCPC_ROLE_CTRL_RP_VAL_MASK 0x3
+#define TCPC_ROLE_CTRL_RP_VAL GENMASK(5, 4)
#define TCPC_ROLE_CTRL_RP_VAL_DEF 0x0
#define TCPC_ROLE_CTRL_RP_VAL_1_5 0x1
#define TCPC_ROLE_CTRL_RP_VAL_3_0 0x2
-#define TCPC_ROLE_CTRL_CC2_SHIFT 2
-#define TCPC_ROLE_CTRL_CC2_MASK 0x3
-#define TCPC_ROLE_CTRL_CC1_SHIFT 0
-#define TCPC_ROLE_CTRL_CC1_MASK 0x3
+#define TCPC_ROLE_CTRL_CC2 GENMASK(3, 2)
+#define TCPC_ROLE_CTRL_CC1 GENMASK(1, 0)
#define TCPC_ROLE_CTRL_CC_RA 0x0
#define TCPC_ROLE_CTRL_CC_RP 0x1
#define TCPC_ROLE_CTRL_CC_RD 0x2
@@ -92,11 +89,9 @@
#define TCPC_CC_STATUS_TERM BIT(4)
#define TCPC_CC_STATUS_TERM_RP 0
#define TCPC_CC_STATUS_TERM_RD 1
+#define TCPC_CC_STATUS_CC2 GENMASK(3, 2)
+#define TCPC_CC_STATUS_CC1 GENMASK(1, 0)
#define TCPC_CC_STATE_SRC_OPEN 0
-#define TCPC_CC_STATUS_CC2_SHIFT 2
-#define TCPC_CC_STATUS_CC2_MASK 0x3
-#define TCPC_CC_STATUS_CC1_SHIFT 0
-#define TCPC_CC_STATUS_CC1_MASK 0x3
#define TCPC_POWER_STATUS 0x1e
#define TCPC_POWER_STATUS_DBG_ACC_CON BIT(7)
@@ -134,9 +129,8 @@
#define TCPC_MSG_HDR_INFO 0x2e
#define TCPC_MSG_HDR_INFO_DATA_ROLE BIT(3)
+#define TCPC_MSG_HDR_INFO_REV GENMASK(2, 1)
#define TCPC_MSG_HDR_INFO_PWR_ROLE BIT(0)
-#define TCPC_MSG_HDR_INFO_REV_SHIFT 1
-#define TCPC_MSG_HDR_INFO_REV_MASK 0x3
#define TCPC_RX_DETECT 0x2f
#define TCPC_RX_DETECT_HARD_RESET BIT(5)
@@ -154,10 +148,8 @@
#define TCPC_RX_DATA 0x34 /* through 0x4f */
#define TCPC_TRANSMIT 0x50
-#define TCPC_TRANSMIT_RETRY_SHIFT 4
-#define TCPC_TRANSMIT_RETRY_MASK 0x3
-#define TCPC_TRANSMIT_TYPE_SHIFT 0
-#define TCPC_TRANSMIT_TYPE_MASK 0x7
+#define TCPC_TRANSMIT_RETRY GENMASK(5, 4)
+#define TCPC_TRANSMIT_TYPE GENMASK(2, 0)
#define TCPC_TX_BYTE_CNT 0x51
#define TCPC_TX_HDR 0x52
@@ -178,8 +170,7 @@
#define tcpc_presenting_rd(reg, cc) \
(!(TCPC_ROLE_CTRL_DRP & (reg)) && \
- (((reg) & (TCPC_ROLE_CTRL_## cc ##_MASK << TCPC_ROLE_CTRL_## cc ##_SHIFT)) == \
- (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_## cc ##_SHIFT)))
+ FIELD_GET(TCPC_ROLE_CTRL_## cc, reg) == TCPC_ROLE_CTRL_CC_RD)
struct tcpci;
@@ -190,7 +181,7 @@ struct tcpci;
* Optional; Callback to perform chip specific operations when FRS
* is sourcing vbus.
* @auto_discharge_disconnect:
- * Optional; Enables TCPC to autonously discharge vbus on disconnect.
+ * Optional; Enables TCPC to autonomously discharge vbus on disconnect.
* @vbus_vsafe0v:
* optional; Set when TCPC can detect whether vbus is at VSAFE0V.
* @set_partner_usb_comm_capable:
@@ -256,7 +247,7 @@ static inline enum typec_cc_status tcpci_to_typec_cc(unsigned int cc, bool sink)
if (sink)
return TYPEC_CC_RP_3_0;
fallthrough;
- case 0x0:
+ case TCPC_CC_STATE_SRC_OPEN:
default:
return TYPEC_CC_OPEN;
}
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 9f08a584d707..0b9f1e598e3a 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -76,8 +76,23 @@ struct usbnet {
# define EVENT_LINK_CHANGE 11
# define EVENT_SET_RX_MODE 12
# define EVENT_NO_IP_ALIGN 13
+/* This one is special, as it indicates that the device is going away
+ * there are cyclic dependencies between tasklet, timer and bh
+ * that must be broken
+ */
+# define EVENT_UNPLUG 31
};
+static inline bool usbnet_going_away(struct usbnet *ubn)
+{
+ return test_bit(EVENT_UNPLUG, &ubn->flags);
+}
+
+static inline void usbnet_mark_going_away(struct usbnet *ubn)
+{
+ set_bit(EVENT_UNPLUG, &ubn->flags);
+}
+
static inline struct usb_driver *driver_of(struct usb_interface *intf)
{
return to_usb_driver(intf->dev.driver);
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 7977ca03ac7a..2e7a30fe6b92 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -582,11 +582,20 @@ void vdpa_set_status(struct vdpa_device *vdev, u8 status);
* @dev: vdpa device to remove
* Driver need to remove the specified device by calling
* _vdpa_unregister_device().
+ * @dev_set_attr: change a vdpa device's attr after it was create
+ * @mdev: parent device to use for device
+ * @dev: vdpa device structure
+ * @config:Attributes to be set for the device.
+ * The driver needs to check the mask of the structure and then set
+ * the related information to the vdpa device. The driver must return 0
+ * if set successfully.
*/
struct vdpa_mgmtdev_ops {
int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config);
void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
+ int (*dev_set_attr)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev,
+ const struct vdpa_dev_set_config *config);
};
/**
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f77f812bfbe7..d1948d357dad 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -2435,9 +2435,26 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk)
{
const struct sk_buff *skb = tcp_rtx_queue_head(sk);
u32 rto = inet_csk(sk)->icsk_rto;
- u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
- return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
+ if (likely(skb)) {
+ u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
+
+ return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
+ } else {
+ WARN_ONCE(1,
+ "rtx queue emtpy: "
+ "out:%u sacked:%u lost:%u retrans:%u "
+ "tlp_high_seq:%u sk_state:%u ca_state:%u "
+ "advmss:%u mss_cache:%u pmtu:%u\n",
+ tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
+ tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
+ tcp_sk(sk)->tlp_high_seq, sk->sk_state,
+ inet_csk(sk)->icsk_ca_state,
+ tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
+ inet_csk(sk)->icsk_pmtu_cookie);
+ return jiffies_to_usecs(rto);
+ }
+
}
/*
diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h
index f57f05331d73..569f86a44aaa 100644
--- a/include/trace/events/dma.h
+++ b/include/trace/events/dma.h
@@ -176,9 +176,9 @@ TRACE_EVENT(dma_free,
);
TRACE_EVENT(dma_map_sg,
- TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
int ents, enum dma_data_direction dir, unsigned long attrs),
- TP_ARGS(dev, sg, nents, ents, dir, attrs),
+ TP_ARGS(dev, sgl, nents, ents, dir, attrs),
TP_STRUCT__entry(
__string(device, dev_name(dev))
@@ -190,17 +190,17 @@ TRACE_EVENT(dma_map_sg,
),
TP_fast_assign(
+ struct scatterlist *sg;
int i;
__assign_str(device);
- for (i = 0; i < nents; i++)
- ((u64 *)__get_dynamic_array(phys_addrs))[i] =
- sg_phys(sg + i);
- for (i = 0; i < ents; i++) {
+ for_each_sg(sgl, sg, nents, i)
+ ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
+ for_each_sg(sgl, sg, ents, i) {
((u64 *)__get_dynamic_array(dma_addrs))[i] =
- sg_dma_address(sg + i);
+ sg_dma_address(sg);
((unsigned int *)__get_dynamic_array(lengths))[i] =
- sg_dma_len(sg + i);
+ sg_dma_len(sg);
}
__entry->dir = dir;
__entry->attrs = attrs;
@@ -222,9 +222,9 @@ TRACE_EVENT(dma_map_sg,
);
TRACE_EVENT(dma_unmap_sg,
- TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs),
- TP_ARGS(dev, sg, nents, dir, attrs),
+ TP_ARGS(dev, sgl, nents, dir, attrs),
TP_STRUCT__entry(
__string(device, dev_name(dev))
@@ -234,12 +234,12 @@ TRACE_EVENT(dma_unmap_sg,
),
TP_fast_assign(
+ struct scatterlist *sg;
int i;
__assign_str(device);
- for (i = 0; i < nents; i++)
- ((u64 *)__get_dynamic_array(addrs))[i] =
- sg_phys(sg + i);
+ for_each_sg(sgl, sg, nents, i)
+ ((u64 *)__get_dynamic_array(addrs))[i] = sg_phys(sg);
__entry->dir = dir;
__entry->attrs = attrs;
),
@@ -290,9 +290,9 @@ DEFINE_EVENT(dma_sync_single, dma_sync_single_for_device,
TP_ARGS(dev, dma_addr, size, dir));
DECLARE_EVENT_CLASS(dma_sync_sg,
- TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir),
- TP_ARGS(dev, sg, nents, dir),
+ TP_ARGS(dev, sgl, nents, dir),
TP_STRUCT__entry(
__string(device, dev_name(dev))
@@ -302,14 +302,15 @@ DECLARE_EVENT_CLASS(dma_sync_sg,
),
TP_fast_assign(
+ struct scatterlist *sg;
int i;
__assign_str(device);
- for (i = 0; i < nents; i++) {
+ for_each_sg(sgl, sg, nents, i) {
((u64 *)__get_dynamic_array(dma_addrs))[i] =
- sg_dma_address(sg + i);
+ sg_dma_address(sg);
((unsigned int *)__get_dynamic_array(lengths))[i] =
- sg_dma_len(sg + i);
+ sg_dma_len(sg);
}
__entry->dir = dir;
),
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index ed794b5fefbe..2851c823095b 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -139,7 +139,8 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
{ CP_NODE_NEED_CP, "node needs cp" }, \
{ CP_FASTBOOT_MODE, "fastboot mode" }, \
{ CP_SPEC_LOG_NUM, "log type is 2" }, \
- { CP_RECOVER_DIR, "dir needs recovery" })
+ { CP_RECOVER_DIR, "dir needs recovery" }, \
+ { CP_XATTR_DIR, "dir's xattr updated" })
#define show_shutdown_mode(type) \
__print_symbolic(type, \
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index 76bd42a96815..1d7c52821e55 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -448,7 +448,8 @@ TRACE_EVENT(netfs_folio,
),
TP_fast_assign(
- __entry->ino = folio->mapping->host->i_ino;
+ struct address_space *__m = READ_ONCE(folio->mapping);
+ __entry->ino = __m ? __m->host->i_ino : 0;
__entry->why = why;
__entry->index = folio_index(folio);
__entry->nr = folio_nr_pages(folio);
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index d44a8118b2ed..1fd92021a573 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -236,6 +236,12 @@ struct binder_frozen_status_info {
__u32 async_recv;
};
+struct binder_frozen_state_info {
+ binder_uintptr_t cookie;
+ __u32 is_frozen;
+ __u32 reserved;
+};
+
/* struct binder_extened_error - extended error information
* @id: identifier for the failed operation
* @command: command as defined by binder_driver_return_protocol
@@ -467,6 +473,17 @@ enum binder_driver_return_protocol {
/*
* The target of the last async transaction is frozen. No parameters.
*/
+
+ BR_FROZEN_BINDER = _IOR('r', 21, struct binder_frozen_state_info),
+ /*
+ * The cookie and a boolean (is_frozen) that indicates whether the process
+ * transitioned into a frozen or an unfrozen state.
+ */
+
+ BR_CLEAR_FREEZE_NOTIFICATION_DONE = _IOR('r', 22, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
};
enum binder_driver_command_protocol {
@@ -550,6 +567,25 @@ enum binder_driver_command_protocol {
/*
* binder_transaction_data_sg: the sent command.
*/
+
+ BC_REQUEST_FREEZE_NOTIFICATION =
+ _IOW('c', 19, struct binder_handle_cookie),
+ /*
+ * int: handle
+ * void *: cookie
+ */
+
+ BC_CLEAR_FREEZE_NOTIFICATION = _IOW('c', 20,
+ struct binder_handle_cookie),
+ /*
+ * int: handle
+ * void *: cookie
+ */
+
+ BC_FREEZE_NOTIFICATION_DONE = _IOW('c', 21, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
};
#endif /* _UAPI_LINUX_BINDER_H */
diff --git a/include/uapi/linux/bits.h b/include/uapi/linux/bits.h
index 3c2a101986a3..5ee30f882736 100644
--- a/include/uapi/linux/bits.h
+++ b/include/uapi/linux/bits.h
@@ -12,4 +12,7 @@
(((~_ULL(0)) - (_ULL(1) << (l)) + 1) & \
(~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h))))
+#define __GENMASK_U128(h, l) \
+ ((_BIT128((h)) << 1) - (_BIT128(l)))
+
#endif /* _UAPI_LINUX_BITS_H */
diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
index a429381e7ca5..e16be0d37746 100644
--- a/include/uapi/linux/const.h
+++ b/include/uapi/linux/const.h
@@ -28,6 +28,23 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
+#if !defined(__ASSEMBLY__)
+/*
+ * Missing asm support
+ *
+ * __BIT128() would not work in the asm code, as it shifts an
+ * 'unsigned __init128' data type as direct representation of
+ * 128 bit constants is not supported in the gcc compiler, as
+ * they get silently truncated.
+ *
+ * TODO: Please revisit this implementation when gcc compiler
+ * starts representing 128 bit constants directly like long
+ * and unsigned long etc. Subsequently drop the comment for
+ * GENMASK_U128() which would then start supporting asm code.
+ */
+#define _BIT128(x) ((unsigned __int128)(1) << (x))
+#endif
+
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
diff --git a/include/uapi/linux/exfat.h b/include/uapi/linux/exfat.h
new file mode 100644
index 000000000000..46d95b16fc4b
--- /dev/null
+++ b/include/uapi/linux/exfat.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2024 Unisoc Technologies Co., Ltd.
+ */
+
+#ifndef _UAPI_LINUX_EXFAT_H
+#define _UAPI_LINUX_EXFAT_H
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * exfat-specific ioctl commands
+ */
+
+#define EXFAT_IOC_SHUTDOWN _IOR('X', 125, __u32)
+
+/*
+ * Flags used by EXFAT_IOC_SHUTDOWN
+ */
+
+#define EXFAT_GOING_DOWN_DEFAULT 0x0 /* default with full sync */
+#define EXFAT_GOING_DOWN_FULLSYNC 0x1 /* going down with full sync*/
+#define EXFAT_GOING_DOWN_NOSYNC 0x2 /* going down */
+
+#endif /* _UAPI_LINUX_EXFAT_H */
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index d08b99d60f6f..f1e99458e29e 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -217,6 +217,9 @@
* - add backing_id to fuse_open_out, add FOPEN_PASSTHROUGH open flag
* - add FUSE_NO_EXPORT_SUPPORT init flag
* - add FUSE_NOTIFY_RESEND, add FUSE_HAS_RESEND init flag
+ *
+ * 7.41
+ * - add FUSE_ALLOW_IDMAP
*/
#ifndef _LINUX_FUSE_H
@@ -252,7 +255,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 40
+#define FUSE_KERNEL_MINOR_VERSION 41
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -421,6 +424,7 @@ struct fuse_file_lock {
* FUSE_NO_EXPORT_SUPPORT: explicitly disable export support
* FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit
* of the request ID indicates resend requests
+ * FUSE_ALLOW_IDMAP: allow creation of idmapped mounts
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -466,6 +470,7 @@ struct fuse_file_lock {
/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP
+#define FUSE_ALLOW_IDMAP (1ULL << 40)
/**
* CUSE INIT request/reply flags
@@ -984,6 +989,21 @@ struct fuse_fallocate_in {
*/
#define FUSE_UNIQUE_RESEND (1ULL << 63)
+/**
+ * This value will be set by the kernel to
+ * (struct fuse_in_header).{uid,gid} fields in
+ * case when:
+ * - fuse daemon enabled FUSE_ALLOW_IDMAP
+ * - idmapping information is not available and uid/gid
+ * can not be mapped in accordance with an idmapping.
+ *
+ * Note: an idmapping information always available
+ * for inode creation operations like:
+ * FUSE_MKNOD, FUSE_SYMLINK, FUSE_MKDIR, FUSE_TMPFILE,
+ * FUSE_CREATE and FUSE_RENAME2 (with RENAME_WHITEOUT).
+ */
+#define FUSE_INVALID_UIDGID ((uint32_t)(-1))
+
struct fuse_in_header {
uint32_t len;
uint32_t opcode;
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 44d73ba8788d..91f0f7e214a5 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -254,6 +254,9 @@ struct usb_ctrlrequest {
#define USB_DT_DEVICE_CAPABILITY 0x10
#define USB_DT_WIRELESS_ENDPOINT_COMP 0x11
#define USB_DT_WIRE_ADAPTER 0x21
+/* From USB Device Firmware Upgrade Specification, Revision 1.1 */
+#define USB_DT_DFU_FUNCTIONAL 0x21
+/* these are from the Wireless USB spec */
#define USB_DT_RPIPE 0x22
#define USB_DT_CS_RADIO_CONTROL 0x23
/* From the T10 UAS specification */
@@ -329,9 +332,10 @@ struct usb_device_descriptor {
#define USB_CLASS_USB_TYPE_C_BRIDGE 0x12
#define USB_CLASS_MISC 0xef
#define USB_CLASS_APP_SPEC 0xfe
-#define USB_CLASS_VENDOR_SPEC 0xff
+#define USB_SUBCLASS_DFU 0x01
-#define USB_SUBCLASS_VENDOR_SPEC 0xff
+#define USB_CLASS_VENDOR_SPEC 0xff
+#define USB_SUBCLASS_VENDOR_SPEC 0xff
/*-------------------------------------------------------------------------*/
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index 9f88de9c3d66..2ebdba111a8f 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -3,6 +3,7 @@
#define _UAPI__LINUX_FUNCTIONFS_H__
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/ioctl.h>
@@ -37,6 +38,31 @@ struct usb_endpoint_descriptor_no_audio {
__u8 bInterval;
} __attribute__((packed));
+/**
+ * struct usb_dfu_functional_descriptor - DFU Functional descriptor
+ * @bLength: Size of the descriptor (bytes)
+ * @bDescriptorType: USB_DT_DFU_FUNCTIONAL
+ * @bmAttributes: DFU attributes
+ * @wDetachTimeOut: Maximum time to wait after DFU_DETACH (ms, le16)
+ * @wTransferSize: Maximum number of bytes per control-write (le16)
+ * @bcdDFUVersion: DFU Spec version (BCD, le16)
+ */
+struct usb_dfu_functional_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bmAttributes;
+ __le16 wDetachTimeOut;
+ __le16 wTransferSize;
+ __le16 bcdDFUVersion;
+} __attribute__ ((packed));
+
+/* from DFU functional descriptor bmAttributes */
+#define DFU_FUNC_ATT_CAN_DOWNLOAD _BITUL(0)
+#define DFU_FUNC_ATT_CAN_UPLOAD _BITUL(1)
+#define DFU_FUNC_ATT_MANIFEST_TOLERANT _BITUL(2)
+#define DFU_FUNC_ATT_WILL_DETACH _BITUL(3)
+
+
struct usb_functionfs_descs_head_v2 {
__le32 magic;
__le32 length;
@@ -104,23 +130,38 @@ struct usb_ffs_dmabuf_transfer_req {
#ifndef __KERNEL__
-/*
+/**
+ * DOC: descriptors
+ *
* Descriptors format:
*
+ * +-----+-----------+--------------+--------------------------------------+
* | off | name | type | description |
- * |-----+-----------+--------------+--------------------------------------|
+ * +-----+-----------+--------------+--------------------------------------+
* | 0 | magic | LE32 | FUNCTIONFS_DESCRIPTORS_MAGIC_V2 |
+ * +-----+-----------+--------------+--------------------------------------+
* | 4 | length | LE32 | length of the whole data chunk |
+ * +-----+-----------+--------------+--------------------------------------+
* | 8 | flags | LE32 | combination of functionfs_flags |
+ * +-----+-----------+--------------+--------------------------------------+
* | | eventfd | LE32 | eventfd file descriptor |
+ * +-----+-----------+--------------+--------------------------------------+
* | | fs_count | LE32 | number of full-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | hs_count | LE32 | number of high-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | ss_count | LE32 | number of super-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | os_count | LE32 | number of MS OS descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | fs_descrs | Descriptor[] | list of full-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | hs_descrs | Descriptor[] | list of high-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | ss_descrs | Descriptor[] | list of super-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | os_descrs | OSDesc[] | list of MS OS descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
*
* Depending on which flags are set, various fields may be missing in the
* structure. Any flags that are not recognised cause the whole block to be
@@ -128,71 +169,111 @@ struct usb_ffs_dmabuf_transfer_req {
*
* Legacy descriptors format (deprecated as of 3.14):
*
+ * +-----+-----------+--------------+--------------------------------------+
* | off | name | type | description |
- * |-----+-----------+--------------+--------------------------------------|
+ * +-----+-----------+--------------+--------------------------------------+
* | 0 | magic | LE32 | FUNCTIONFS_DESCRIPTORS_MAGIC |
+ * +-----+-----------+--------------+--------------------------------------+
* | 4 | length | LE32 | length of the whole data chunk |
+ * +-----+-----------+--------------+--------------------------------------+
* | 8 | fs_count | LE32 | number of full-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | 12 | hs_count | LE32 | number of high-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | 16 | fs_descrs | Descriptor[] | list of full-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | hs_descrs | Descriptor[] | list of high-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
*
* All numbers must be in little endian order.
*
* Descriptor[] is an array of valid USB descriptors which have the following
* format:
*
+ * +-----+-----------------+------+--------------------------+
* | off | name | type | description |
- * |-----+-----------------+------+--------------------------|
+ * +-----+-----------------+------+--------------------------+
* | 0 | bLength | U8 | length of the descriptor |
+ * +-----+-----------------+------+--------------------------+
* | 1 | bDescriptorType | U8 | descriptor type |
+ * +-----+-----------------+------+--------------------------+
* | 2 | payload | | descriptor's payload |
+ * +-----+-----------------+------+--------------------------+
*
* OSDesc[] is an array of valid MS OS Feature Descriptors which have one of
* the following formats:
*
+ * +-----+-----------------+------+--------------------------+
* | off | name | type | description |
- * |-----+-----------------+------+--------------------------|
+ * +-----+-----------------+------+--------------------------+
* | 0 | inteface | U8 | related interface number |
+ * +-----+-----------------+------+--------------------------+
* | 1 | dwLength | U32 | length of the descriptor |
+ * +-----+-----------------+------+--------------------------+
* | 5 | bcdVersion | U16 | currently supported: 1 |
+ * +-----+-----------------+------+--------------------------+
* | 7 | wIndex | U16 | currently supported: 4 |
+ * +-----+-----------------+------+--------------------------+
* | 9 | bCount | U8 | number of ext. compat. |
+ * +-----+-----------------+------+--------------------------+
* | 10 | Reserved | U8 | 0 |
+ * +-----+-----------------+------+--------------------------+
* | 11 | ExtCompat[] | | list of ext. compat. d. |
+ * +-----+-----------------+------+--------------------------+
*
+ * +-----+-----------------+------+--------------------------+
* | off | name | type | description |
- * |-----+-----------------+------+--------------------------|
+ * +-----+-----------------+------+--------------------------+
* | 0 | inteface | U8 | related interface number |
+ * +-----+-----------------+------+--------------------------+
* | 1 | dwLength | U32 | length of the descriptor |
+ * +-----+-----------------+------+--------------------------+
* | 5 | bcdVersion | U16 | currently supported: 1 |
+ * +-----+-----------------+------+--------------------------+
* | 7 | wIndex | U16 | currently supported: 5 |
+ * +-----+-----------------+------+--------------------------+
* | 9 | wCount | U16 | number of ext. compat. |
+ * +-----+-----------------+------+--------------------------+
* | 11 | ExtProp[] | | list of ext. prop. d. |
+ * +-----+-----------------+------+--------------------------+
*
* ExtCompat[] is an array of valid Extended Compatiblity descriptors
* which have the following format:
*
+ * +-----+-----------------------+------+-------------------------------------+
* | off | name | type | description |
- * |-----+-----------------------+------+-------------------------------------|
+ * +-----+-----------------------+------+-------------------------------------+
* | 0 | bFirstInterfaceNumber | U8 | index of the interface or of the 1st|
+ * +-----+-----------------------+------+-------------------------------------+
* | | | | interface in an IAD group |
+ * +-----+-----------------------+------+-------------------------------------+
* | 1 | Reserved | U8 | 1 |
+ * +-----+-----------------------+------+-------------------------------------+
* | 2 | CompatibleID | U8[8]| compatible ID string |
+ * +-----+-----------------------+------+-------------------------------------+
* | 10 | SubCompatibleID | U8[8]| subcompatible ID string |
+ * +-----+-----------------------+------+-------------------------------------+
* | 18 | Reserved | U8[6]| 0 |
+ * +-----+-----------------------+------+-------------------------------------+
*
* ExtProp[] is an array of valid Extended Properties descriptors
* which have the following format:
*
+ * +-----+-----------------------+------+-------------------------------------+
* | off | name | type | description |
- * |-----+-----------------------+------+-------------------------------------|
+ * +-----+-----------------------+------+-------------------------------------+
* | 0 | dwSize | U32 | length of the descriptor |
+ * +-----+-----------------------+------+-------------------------------------+
* | 4 | dwPropertyDataType | U32 | 1..7 |
+ * +-----+-----------------------+------+-------------------------------------+
* | 8 | wPropertyNameLength | U16 | bPropertyName length (NL) |
+ * +-----+-----------------------+------+-------------------------------------+
* | 10 | bPropertyName |U8[NL]| name of this property |
+ * +-----+-----------------------+------+-------------------------------------+
* |10+NL| dwPropertyDataLength | U32 | bPropertyData length (DL) |
+ * +-----+-----------------------+------+-------------------------------------+
* |14+NL| bProperty |U8[DL]| payload of this property |
+ * +-----+-----------------------+------+-------------------------------------+
*/
struct usb_functionfs_strings_head {
diff --git a/include/uapi/linux/usb/g_hid.h b/include/uapi/linux/usb/g_hid.h
new file mode 100644
index 000000000000..b965092db476
--- /dev/null
+++ b/include/uapi/linux/usb/g_hid.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+
+#ifndef __UAPI_LINUX_USB_G_HID_H
+#define __UAPI_LINUX_USB_G_HID_H
+
+#include <linux/types.h>
+
+/* Maximum HID report length for High-Speed USB (i.e. USB 2.0) */
+#define MAX_REPORT_LENGTH 64
+
+/**
+ * struct usb_hidg_report - response to GET_REPORT
+ * @report_id: report ID that this is a response for
+ * @userspace_req:
+ * !0 this report is used for any pending GET_REPORT request
+ * but wait on userspace to issue a new report on future requests
+ * 0 this report is to be used for any future GET_REPORT requests
+ * @length: length of the report response
+ * @data: report response
+ * @padding: padding for 32/64 bit compatibility
+ *
+ * Structure used by GADGET_HID_WRITE_GET_REPORT ioctl on /dev/hidg*.
+ */
+struct usb_hidg_report {
+ __u8 report_id;
+ __u8 userspace_req;
+ __u16 length;
+ __u8 data[MAX_REPORT_LENGTH];
+ __u8 padding[4];
+};
+
+/* The 'g' code is used by gadgetfs and hid gadget ioctl requests.
+ * Don't add any colliding codes to either driver, and keep
+ * them in unique ranges.
+ */
+
+#define GADGET_HID_READ_GET_REPORT_ID _IOR('g', 0x41, __u8)
+#define GADGET_HID_WRITE_GET_REPORT _IOW('g', 0x42, struct usb_hidg_report)
+
+#endif /* __UAPI_LINUX_USB_G_HID_H */
diff --git a/include/uapi/linux/usb/gadgetfs.h b/include/uapi/linux/usb/gadgetfs.h
index 835473910a49..9754822b2a40 100644
--- a/include/uapi/linux/usb/gadgetfs.h
+++ b/include/uapi/linux/usb/gadgetfs.h
@@ -62,7 +62,7 @@ struct usb_gadgetfs_event {
};
-/* The 'g' code is also used by printer gadget ioctl requests.
+/* The 'g' code is also used by printer and hid gadget ioctl requests.
* Don't add any colliding codes to either driver, and keep
* them in unique ranges (size 0x20 for now).
*/
diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h
index 842bf1201ac4..71edf2c70cc3 100644
--- a/include/uapi/linux/vdpa.h
+++ b/include/uapi/linux/vdpa.h
@@ -19,6 +19,7 @@ enum vdpa_command {
VDPA_CMD_DEV_GET, /* can dump */
VDPA_CMD_DEV_CONFIG_GET, /* can dump */
VDPA_CMD_DEV_VSTATS_GET,
+ VDPA_CMD_DEV_ATTR_SET,
};
enum vdpa_attr {
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index ddaa45e723c4..ee35a372805d 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -71,7 +71,13 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_CACHES 7 /* Disk caches */
#define VIRTIO_BALLOON_S_HTLB_PGALLOC 8 /* Hugetlb page allocations */
#define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */
-#define VIRTIO_BALLOON_S_NR 10
+#define VIRTIO_BALLOON_S_OOM_KILL 10 /* OOM killer invocations */
+#define VIRTIO_BALLOON_S_ALLOC_STALL 11 /* Stall count of memory allocatoin */
+#define VIRTIO_BALLOON_S_ASYNC_SCAN 12 /* Amount of memory scanned asynchronously */
+#define VIRTIO_BALLOON_S_DIRECT_SCAN 13 /* Amount of memory scanned directly */
+#define VIRTIO_BALLOON_S_ASYNC_RECLAIM 14 /* Amount of memory reclaimed asynchronously */
+#define VIRTIO_BALLOON_S_DIRECT_RECLAIM 15 /* Amount of memory reclaimed directly */
+#define VIRTIO_BALLOON_S_NR 16
#define VIRTIO_BALLOON_S_NAMES_WITH_PREFIX(VIRTIO_BALLOON_S_NAMES_prefix) { \
VIRTIO_BALLOON_S_NAMES_prefix "swap-in", \
@@ -83,7 +89,13 @@ struct virtio_balloon_config {
VIRTIO_BALLOON_S_NAMES_prefix "available-memory", \
VIRTIO_BALLOON_S_NAMES_prefix "disk-caches", \
VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-allocations", \
- VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures" \
+ VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures", \
+ VIRTIO_BALLOON_S_NAMES_prefix "oom-kills", \
+ VIRTIO_BALLOON_S_NAMES_prefix "alloc-stalls", \
+ VIRTIO_BALLOON_S_NAMES_prefix "async-scans", \
+ VIRTIO_BALLOON_S_NAMES_prefix "direct-scans", \
+ VIRTIO_BALLOON_S_NAMES_prefix "async-reclaims", \
+ VIRTIO_BALLOON_S_NAMES_prefix "direct-reclaims" \
}
#define VIRTIO_BALLOON_S_NAMES VIRTIO_BALLOON_S_NAMES_WITH_PREFIX("")
diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
index 8b8c5d1420fe..8e2c8fd44764 100644
--- a/include/uapi/xen/privcmd.h
+++ b/include/uapi/xen/privcmd.h
@@ -126,6 +126,11 @@ struct privcmd_ioeventfd {
__u8 pad[2];
};
+struct privcmd_pcidev_get_gsi {
+ __u32 sbdf;
+ __u32 gsi;
+};
+
/*
* @cmd: IOCTL_PRIVCMD_HYPERCALL
* @arg: &privcmd_hypercall_t
@@ -157,5 +162,7 @@ struct privcmd_ioeventfd {
_IOW('P', 8, struct privcmd_irqfd)
#define IOCTL_PRIVCMD_IOEVENTFD \
_IOW('P', 9, struct privcmd_ioeventfd)
+#define IOCTL_PRIVCMD_PCIDEV_GET_GSI \
+ _IOC(_IOC_NONE, 'P', 10, sizeof(struct privcmd_pcidev_get_gsi))
#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
diff --git a/include/xen/acpi.h b/include/xen/acpi.h
index b1e11863144d..daa96a22d257 100644
--- a/include/xen/acpi.h
+++ b/include/xen/acpi.h
@@ -67,10 +67,37 @@ static inline void xen_acpi_sleep_register(void)
acpi_suspend_lowlevel = xen_acpi_suspend_lowlevel;
}
}
+int xen_pvh_setup_gsi(int gsi, int trigger, int polarity);
+int xen_acpi_get_gsi_info(struct pci_dev *dev,
+ int *gsi_out,
+ int *trigger_out,
+ int *polarity_out);
#else
static inline void xen_acpi_sleep_register(void)
{
}
+
+static inline int xen_pvh_setup_gsi(int gsi, int trigger, int polarity)
+{
+ return -1;
+}
+
+static inline int xen_acpi_get_gsi_info(struct pci_dev *dev,
+ int *gsi_out,
+ int *trigger_out,
+ int *polarity_out)
+{
+ return -1;
+}
+#endif
+
+#ifdef CONFIG_XEN_PCI_STUB
+int pcistub_get_gsi_from_sbdf(unsigned int sbdf);
+#else
+static inline int pcistub_get_gsi_from_sbdf(unsigned int sbdf)
+{
+ return -1;
+}
#endif
#endif /* _XEN_ACPI_H */
diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h
index 38deb1214613..918f47d87d7a 100644
--- a/include/xen/interface/elfnote.h
+++ b/include/xen/interface/elfnote.h
@@ -11,7 +11,9 @@
#define __XEN_PUBLIC_ELFNOTE_H__
/*
- * The notes should live in a SHT_NOTE segment and have "Xen" in the
+ * `incontents 200 elfnotes ELF notes
+ *
+ * The notes should live in a PT_NOTE segment and have "Xen" in the
* name field.
*
* Numeric types are either 4 or 8 bytes depending on the content of
@@ -22,6 +24,8 @@
*
* String values (for non-legacy) are NULL terminated ASCII, also known
* as ASCIZ type.
+ *
+ * Xen only uses ELF Notes contained in x86 binaries.
*/
/*
@@ -52,7 +56,7 @@
#define XEN_ELFNOTE_VIRT_BASE 3
/*
- * The offset of the ELF paddr field from the acutal required
+ * The offset of the ELF paddr field from the actual required
* pseudo-physical address (numeric).
*
* This is used to maintain backwards compatibility with older kernels
@@ -92,7 +96,12 @@
#define XEN_ELFNOTE_LOADER 8
/*
- * The kernel supports PAE (x86/32 only, string = "yes" or "no").
+ * The kernel supports PAE (x86/32 only, string = "yes", "no" or
+ * "bimodal").
+ *
+ * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting
+ * may be given as "yes,bimodal" which will cause older Xen to treat
+ * this kernel as PAE.
*
* LEGACY: PAE (n.b. The legacy interface included a provision to
* indicate 'extended-cr3' support allowing L3 page tables to be
@@ -149,7 +158,9 @@
* The (non-default) location the initial phys-to-machine map should be
* placed at by the hypervisor (Dom0) or the tools (DomU).
* The kernel must be prepared for this mapping to be established using
- * large pages, despite such otherwise not being available to guests.
+ * large pages, despite such otherwise not being available to guests. Note
+ * that these large pages may be misaligned in PFN space (they'll obviously
+ * be aligned in MFN and virtual address spaces).
* The kernel must also be able to handle the page table pages used for
* this mapping not being accessible through the initial mapping.
* (Only x86-64 supports this at present.)
@@ -186,8 +197,80 @@
#define XEN_ELFNOTE_PHYS32_ENTRY 18
/*
+ * Physical loading constraints for PVH kernels
+ *
+ * The presence of this note indicates the kernel supports relocating itself.
+ *
+ * The note may include up to three 32bit values to place constraints on the
+ * guest physical loading addresses and alignment for a PVH kernel. Values
+ * are read in the following order:
+ * - a required start alignment (default 0x200000)
+ * - a minimum address for the start of the image (default 0; see below)
+ * - a maximum address for the last byte of the image (default 0xffffffff)
+ *
+ * When this note specifies an alignment value, it is used. Otherwise the
+ * maximum p_align value from loadable ELF Program Headers is used, if it is
+ * greater than or equal to 4k (0x1000). Otherwise, the default is used.
+ */
+#define XEN_ELFNOTE_PHYS32_RELOC 19
+
+/*
* The number of the highest elfnote defined.
*/
-#define XEN_ELFNOTE_MAX XEN_ELFNOTE_PHYS32_ENTRY
+#define XEN_ELFNOTE_MAX XEN_ELFNOTE_PHYS32_RELOC
+
+/*
+ * System information exported through crash notes.
+ *
+ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO
+ * note in case of a system crash. This note will contain various
+ * information about the system, see xen/include/xen/elfcore.h.
+ */
+#define XEN_ELFNOTE_CRASH_INFO 0x1000001
+
+/*
+ * System registers exported through crash notes.
+ *
+ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS
+ * note per cpu in case of a system crash. This note is architecture
+ * specific and will contain registers not saved in the "CORE" note.
+ * See xen/include/xen/elfcore.h for more information.
+ */
+#define XEN_ELFNOTE_CRASH_REGS 0x1000002
+
+
+/*
+ * xen dump-core none note.
+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE
+ * in its dump file to indicate that the file is xen dump-core
+ * file. This note doesn't have any other information.
+ * See tools/libxc/xc_core.h for more information.
+ */
+#define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000
+
+/*
+ * xen dump-core header note.
+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER
+ * in its dump file.
+ * See tools/libxc/xc_core.h for more information.
+ */
+#define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001
+
+/*
+ * xen dump-core xen version note.
+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION
+ * in its dump file. It contains the xen version obtained via the
+ * XENVER hypercall.
+ * See tools/libxc/xc_core.h for more information.
+ */
+#define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002
+
+/*
+ * xen dump-core format version note.
+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION
+ * in its dump file. It contains a format version identifier.
+ * See tools/libxc/xc_core.h for more information.
+ */
+#define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003
#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
index a237af867873..df74e65a884b 100644
--- a/include/xen/interface/physdev.h
+++ b/include/xen/interface/physdev.h
@@ -256,6 +256,13 @@ struct physdev_pci_device_add {
*/
#define PHYSDEVOP_prepare_msix 30
#define PHYSDEVOP_release_msix 31
+/*
+ * Notify the hypervisor that a PCI device has been reset, so that any
+ * internally cached state is regenerated. Should be called after any
+ * device reset performed by the hardware domain.
+ */
+#define PHYSDEVOP_pci_device_reset 32
+
struct physdev_pci_device {
/* IN */
uint16_t seg;
@@ -263,6 +270,16 @@ struct physdev_pci_device {
uint8_t devfn;
};
+struct pci_device_reset {
+ struct physdev_pci_device dev;
+#define PCI_DEVICE_RESET_COLD 0x0
+#define PCI_DEVICE_RESET_WARM 0x1
+#define PCI_DEVICE_RESET_HOT 0x2
+#define PCI_DEVICE_RESET_FLR 0x3
+#define PCI_DEVICE_RESET_MASK 0x3
+ uint32_t flags;
+};
+
#define PHYSDEVOP_DBGP_RESET_PREPARE 1
#define PHYSDEVOP_DBGP_RESET_DONE 2
diff --git a/include/xen/pci.h b/include/xen/pci.h
index b8337cf85fd1..424b8ea89ca8 100644
--- a/include/xen/pci.h
+++ b/include/xen/pci.h
@@ -4,10 +4,16 @@
#define __XEN_PCI_H__
#if defined(CONFIG_XEN_DOM0)
+int xen_reset_device(const struct pci_dev *dev);
int xen_find_device_domain_owner(struct pci_dev *dev);
int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
int xen_unregister_device_domain_owner(struct pci_dev *dev);
#else
+static inline int xen_reset_device(const struct pci_dev *dev)
+{
+ return -1;
+}
+
static inline int xen_find_device_domain_owner(struct pci_dev *dev)
{
return -1;
diff --git a/init/Kconfig b/init/Kconfig
index b05467014041..fbd0cb06a50a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -60,6 +60,13 @@ config LLD_VERSION
default $(ld-version) if LD_IS_LLD
default 0
+config RUSTC_VERSION
+ int
+ default $(shell,$(srctree)/scripts/rustc-version.sh $(RUSTC))
+ help
+ It does not depend on `RUST` since that one may need to use the version
+ in a `depends on`.
+
config RUST_IS_AVAILABLE
def_bool $(success,$(srctree)/scripts/rust_is_available.sh)
help
@@ -1935,12 +1942,14 @@ config RUST
bool "Rust support"
depends on HAVE_RUST
depends on RUST_IS_AVAILABLE
- depends on !CFI_CLANG
depends on !MODVERSIONS
- depends on !GCC_PLUGINS
+ depends on !GCC_PLUGIN_RANDSTRUCT
depends on !RANDSTRUCT
- depends on !SHADOW_CALL_STACK
depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
+ depends on !CFI_CLANG || RUSTC_VERSION >= 107900 && $(cc-option,-fsanitize=kcfi -fsanitize-cfi-icall-experimental-normalize-integers)
+ select CFI_ICALL_NORMALIZE_INTEGERS if CFI_CLANG
+ depends on !CALL_PADDING || RUSTC_VERSION >= 108000
+ depends on !KASAN_SW_TAGS
help
Enables Rust support in the kernel.
@@ -1957,7 +1966,9 @@ config RUST
config RUSTC_VERSION_TEXT
string
depends on RUST
- default "$(shell,$(RUSTC) --version 2>/dev/null)"
+ default "$(RUSTC_VERSION_TEXT)"
+ help
+ See `CC_VERSION_TEXT`.
config BINDGEN_VERSION_TEXT
string
diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
index 0a79aee6523d..29da6d3838f6 100644
--- a/kernel/bpf/bpf_inode_storage.c
+++ b/kernel/bpf/bpf_inode_storage.c
@@ -78,13 +78,12 @@ void bpf_inode_storage_free(struct inode *inode)
static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_local_storage_data *sdata;
- struct fd f = fdget_raw(*(int *)key);
+ CLASS(fd_raw, f)(*(int *)key);
- if (!fd_file(f))
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
sdata = inode_storage_lookup(file_inode(fd_file(f)), map, true);
- fdput(f);
return sdata ? sdata->data : NULL;
}
@@ -92,19 +91,16 @@ static long bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key,
void *value, u64 map_flags)
{
struct bpf_local_storage_data *sdata;
- struct fd f = fdget_raw(*(int *)key);
+ CLASS(fd_raw, f)(*(int *)key);
- if (!fd_file(f))
+ if (fd_empty(f))
return -EBADF;
- if (!inode_storage_ptr(file_inode(fd_file(f)))) {
- fdput(f);
+ if (!inode_storage_ptr(file_inode(fd_file(f))))
return -EBADF;
- }
sdata = bpf_local_storage_update(file_inode(fd_file(f)),
(struct bpf_local_storage_map *)map,
value, map_flags, GFP_ATOMIC);
- fdput(f);
return PTR_ERR_OR_ZERO(sdata);
}
@@ -123,15 +119,11 @@ static int inode_storage_delete(struct inode *inode, struct bpf_map *map)
static long bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key)
{
- struct fd f = fdget_raw(*(int *)key);
- int err;
+ CLASS(fd_raw, f)(*(int *)key);
- if (!fd_file(f))
+ if (fd_empty(f))
return -EBADF;
-
- err = inode_storage_delete(file_inode(fd_file(f)), map);
- fdput(f);
- return err;
+ return inode_storage_delete(file_inode(fd_file(f)), map);
}
/* *gfp_flags* is a hidden argument provided by the verifier */
diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c
index 112581cf97e7..106735145948 100644
--- a/kernel/bpf/bpf_iter.c
+++ b/kernel/bpf/bpf_iter.c
@@ -283,7 +283,6 @@ static int iter_release(struct inode *inode, struct file *file)
const struct file_operations bpf_iter_fops = {
.open = iter_open,
- .llseek = no_llseek,
.read = bpf_seq_read,
.release = iter_release,
};
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 83bbf935c562..75e4fe83c509 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -7711,21 +7711,16 @@ int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
struct btf *btf_get_by_fd(int fd)
{
struct btf *btf;
- struct fd f;
+ CLASS(fd, f)(fd);
- f = fdget(fd);
-
- if (!fd_file(f))
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
- if (fd_file(f)->f_op != &btf_fops) {
- fdput(f);
+ if (fd_file(f)->f_op != &btf_fops)
return ERR_PTR(-EINVAL);
- }
btf = fd_file(f)->private_data;
refcount_inc(&btf->refcnt);
- fdput(f);
return btf;
}
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index b4f18c85d7bc..645bd30bc9a9 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -11,24 +11,18 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
{
struct bpf_map *inner_map, *inner_map_meta;
u32 inner_map_meta_size;
- struct fd f;
- int ret;
+ CLASS(fd, f)(inner_map_ufd);
- f = fdget(inner_map_ufd);
inner_map = __bpf_map_get(f);
if (IS_ERR(inner_map))
return inner_map;
/* Does not support >1 level map-in-map */
- if (inner_map->inner_map_meta) {
- ret = -EINVAL;
- goto put;
- }
+ if (inner_map->inner_map_meta)
+ return ERR_PTR(-EINVAL);
- if (!inner_map->ops->map_meta_equal) {
- ret = -ENOTSUPP;
- goto put;
- }
+ if (!inner_map->ops->map_meta_equal)
+ return ERR_PTR(-ENOTSUPP);
inner_map_meta_size = sizeof(*inner_map_meta);
/* In some cases verifier needs to access beyond just base map. */
@@ -36,10 +30,8 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
inner_map_meta_size = sizeof(struct bpf_array);
inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
- if (!inner_map_meta) {
- ret = -ENOMEM;
- goto put;
- }
+ if (!inner_map_meta)
+ return ERR_PTR(-ENOMEM);
inner_map_meta->map_type = inner_map->map_type;
inner_map_meta->key_size = inner_map->key_size;
@@ -53,8 +45,9 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
* invalid/empty/valid, but ERR_PTR in case of errors. During
* equality NULL or IS_ERR is equivalent.
*/
- ret = PTR_ERR(inner_map_meta->record);
- goto free;
+ struct bpf_map *ret = ERR_CAST(inner_map_meta->record);
+ kfree(inner_map_meta);
+ return ret;
}
/* Note: We must use the same BTF, as we also used btf_record_dup above
* which relies on BTF being same for both maps, as some members like
@@ -77,14 +70,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
inner_array_meta->elem_size = inner_array->elem_size;
inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1;
}
-
- fdput(f);
return inner_map_meta;
-free:
- kfree(inner_map_meta);
-put:
- fdput(f);
- return ERR_PTR(ret);
}
void bpf_map_meta_free(struct bpf_map *map_meta)
@@ -110,9 +96,8 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
int ufd)
{
struct bpf_map *inner_map, *inner_map_meta;
- struct fd f;
+ CLASS(fd, f)(ufd);
- f = fdget(ufd);
inner_map = __bpf_map_get(f);
if (IS_ERR(inner_map))
return inner_map;
@@ -123,7 +108,6 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
else
inner_map = ERR_PTR(-EINVAL);
- fdput(f);
return inner_map;
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 8386f25bc532..a8f1808a1ca5 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1425,21 +1425,6 @@ put_token:
return err;
}
-/* if error is returned, fd is released.
- * On success caller should complete fd access with matching fdput()
- */
-struct bpf_map *__bpf_map_get(struct fd f)
-{
- if (!fd_file(f))
- return ERR_PTR(-EBADF);
- if (fd_file(f)->f_op != &bpf_map_fops) {
- fdput(f);
- return ERR_PTR(-EINVAL);
- }
-
- return fd_file(f)->private_data;
-}
-
void bpf_map_inc(struct bpf_map *map)
{
atomic64_inc(&map->refcnt);
@@ -1455,15 +1440,11 @@ EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
struct bpf_map *bpf_map_get(u32 ufd)
{
- struct fd f = fdget(ufd);
- struct bpf_map *map;
+ CLASS(fd, f)(ufd);
+ struct bpf_map *map = __bpf_map_get(f);
- map = __bpf_map_get(f);
- if (IS_ERR(map))
- return map;
-
- bpf_map_inc(map);
- fdput(f);
+ if (!IS_ERR(map))
+ bpf_map_inc(map);
return map;
}
@@ -1471,15 +1452,11 @@ EXPORT_SYMBOL(bpf_map_get);
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
{
- struct fd f = fdget(ufd);
- struct bpf_map *map;
+ CLASS(fd, f)(ufd);
+ struct bpf_map *map = __bpf_map_get(f);
- map = __bpf_map_get(f);
- if (IS_ERR(map))
- return map;
-
- bpf_map_inc_with_uref(map);
- fdput(f);
+ if (!IS_ERR(map))
+ bpf_map_inc_with_uref(map);
return map;
}
@@ -1544,11 +1521,9 @@ static int map_lookup_elem(union bpf_attr *attr)
{
void __user *ukey = u64_to_user_ptr(attr->key);
void __user *uvalue = u64_to_user_ptr(attr->value);
- int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *value;
u32 value_size;
- struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
@@ -1557,26 +1532,20 @@ static int map_lookup_elem(union bpf_attr *attr)
if (attr->flags & ~BPF_F_LOCK)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
- if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
- err = -EPERM;
- goto err_put;
- }
+ if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
+ return -EPERM;
if ((attr->flags & BPF_F_LOCK) &&
- !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
- err = -EINVAL;
- goto err_put;
- }
+ !btf_record_has_field(map->record, BPF_SPIN_LOCK))
+ return -EINVAL;
key = __bpf_copy_key(ukey, map->key_size);
- if (IS_ERR(key)) {
- err = PTR_ERR(key);
- goto err_put;
- }
+ if (IS_ERR(key))
+ return PTR_ERR(key);
value_size = bpf_map_value_size(map);
@@ -1607,8 +1576,6 @@ free_value:
kvfree(value);
free_key:
kvfree(key);
-err_put:
- fdput(f);
return err;
}
@@ -1619,17 +1586,15 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
{
bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
- int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *value;
u32 value_size;
- struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -1667,7 +1632,6 @@ free_key:
kvfree(key);
err_put:
bpf_map_write_active_dec(map);
- fdput(f);
return err;
}
@@ -1676,16 +1640,14 @@ err_put:
static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
{
bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
- int ufd = attr->map_fd;
struct bpf_map *map;
- struct fd f;
void *key;
int err;
if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -1722,7 +1684,6 @@ out:
kvfree(key);
err_put:
bpf_map_write_active_dec(map);
- fdput(f);
return err;
}
@@ -1733,30 +1694,24 @@ static int map_get_next_key(union bpf_attr *attr)
{
void __user *ukey = u64_to_user_ptr(attr->key);
void __user *unext_key = u64_to_user_ptr(attr->next_key);
- int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *next_key;
- struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
- if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
- err = -EPERM;
- goto err_put;
- }
+ if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
+ return -EPERM;
if (ukey) {
key = __bpf_copy_key(ukey, map->key_size);
- if (IS_ERR(key)) {
- err = PTR_ERR(key);
- goto err_put;
- }
+ if (IS_ERR(key))
+ return PTR_ERR(key);
} else {
key = NULL;
}
@@ -1788,8 +1743,6 @@ free_next_key:
kvfree(next_key);
free_key:
kvfree(key);
-err_put:
- fdput(f);
return err;
}
@@ -2018,11 +1971,9 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
{
void __user *ukey = u64_to_user_ptr(attr->key);
void __user *uvalue = u64_to_user_ptr(attr->value);
- int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *value;
u32 value_size;
- struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
@@ -2031,7 +1982,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
if (attr->flags & ~BPF_F_LOCK)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -2101,7 +2052,6 @@ free_key:
kvfree(key);
err_put:
bpf_map_write_active_dec(map);
- fdput(f);
return err;
}
@@ -2109,27 +2059,22 @@ err_put:
static int map_freeze(const union bpf_attr *attr)
{
- int err = 0, ufd = attr->map_fd;
+ int err = 0;
struct bpf_map *map;
- struct fd f;
if (CHECK_ATTR(BPF_MAP_FREEZE))
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
- if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) {
- fdput(f);
+ if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record))
return -ENOTSUPP;
- }
- if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
- fdput(f);
+ if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE))
return -EPERM;
- }
mutex_lock(&map->freeze_mutex);
if (bpf_map_write_active(map)) {
@@ -2144,7 +2089,6 @@ static int map_freeze(const union bpf_attr *attr)
WRITE_ONCE(map->frozen, true);
err_put:
mutex_unlock(&map->freeze_mutex);
- fdput(f);
return err;
}
@@ -2414,18 +2358,6 @@ int bpf_prog_new_fd(struct bpf_prog *prog)
O_RDWR | O_CLOEXEC);
}
-static struct bpf_prog *____bpf_prog_get(struct fd f)
-{
- if (!fd_file(f))
- return ERR_PTR(-EBADF);
- if (fd_file(f)->f_op != &bpf_prog_fops) {
- fdput(f);
- return ERR_PTR(-EINVAL);
- }
-
- return fd_file(f)->private_data;
-}
-
void bpf_prog_add(struct bpf_prog *prog, int i)
{
atomic64_add(i, &prog->aux->refcnt);
@@ -2481,20 +2413,19 @@ bool bpf_prog_get_ok(struct bpf_prog *prog,
static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
bool attach_drv)
{
- struct fd f = fdget(ufd);
+ CLASS(fd, f)(ufd);
struct bpf_prog *prog;
- prog = ____bpf_prog_get(f);
- if (IS_ERR(prog))
- return prog;
- if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
- prog = ERR_PTR(-EINVAL);
- goto out;
- }
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+ if (fd_file(f)->f_op != &bpf_prog_fops)
+ return ERR_PTR(-EINVAL);
+
+ prog = fd_file(f)->private_data;
+ if (!bpf_prog_get_ok(prog, attach_type, attach_drv))
+ return ERR_PTR(-EINVAL);
bpf_prog_inc(prog);
-out:
- fdput(f);
return prog;
}
@@ -3263,20 +3194,16 @@ int bpf_link_new_fd(struct bpf_link *link)
struct bpf_link *bpf_link_get_from_fd(u32 ufd)
{
- struct fd f = fdget(ufd);
+ CLASS(fd, f)(ufd);
struct bpf_link *link;
- if (!fd_file(f))
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
- if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll) {
- fdput(f);
+ if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll)
return ERR_PTR(-EINVAL);
- }
link = fd_file(f)->private_data;
bpf_link_inc(link);
- fdput(f);
-
return link;
}
EXPORT_SYMBOL(bpf_link_get_from_fd);
@@ -4981,33 +4908,25 @@ static int bpf_link_get_info_by_fd(struct file *file,
static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
- int ufd = attr->info.bpf_fd;
- struct fd f;
- int err;
-
if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
return -EINVAL;
- f = fdget(ufd);
- if (!fd_file(f))
+ CLASS(fd, f)(attr->info.bpf_fd);
+ if (fd_empty(f))
return -EBADFD;
if (fd_file(f)->f_op == &bpf_prog_fops)
- err = bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
+ return bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
uattr);
else if (fd_file(f)->f_op == &bpf_map_fops)
- err = bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
+ return bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
uattr);
else if (fd_file(f)->f_op == &btf_fops)
- err = bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr);
+ return bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr);
else if (fd_file(f)->f_op == &bpf_link_fops || fd_file(f)->f_op == &bpf_link_fops_poll)
- err = bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data,
+ return bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data,
attr, uattr);
- else
- err = -EINVAL;
-
- fdput(f);
- return err;
+ return -EINVAL;
}
#define BPF_BTF_LOAD_LAST_FIELD btf_token_fd
@@ -5195,14 +5114,13 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
struct bpf_map *map;
- int err, ufd;
- struct fd f;
+ int err;
if (CHECK_ATTR(BPF_MAP_BATCH))
return -EINVAL;
- ufd = attr->batch.map_fd;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->batch.map_fd);
+
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -5230,7 +5148,6 @@ err_put:
maybe_wait_bpf_programs(map);
bpf_map_write_active_dec(map);
}
- fdput(f);
return err;
}
diff --git a/kernel/bpf/token.c b/kernel/bpf/token.c
index 9a1d356e79ed..dcbec1a0dfb3 100644
--- a/kernel/bpf/token.c
+++ b/kernel/bpf/token.c
@@ -116,67 +116,52 @@ int bpf_token_create(union bpf_attr *attr)
struct user_namespace *userns;
struct inode *inode;
struct file *file;
+ CLASS(fd, f)(attr->token_create.bpffs_fd);
struct path path;
- struct fd f;
+ struct super_block *sb;
umode_t mode;
int err, fd;
- f = fdget(attr->token_create.bpffs_fd);
- if (!fd_file(f))
+ if (fd_empty(f))
return -EBADF;
path = fd_file(f)->f_path;
- path_get(&path);
- fdput(f);
+ sb = path.dentry->d_sb;
- if (path.dentry != path.mnt->mnt_sb->s_root) {
- err = -EINVAL;
- goto out_path;
- }
- if (path.mnt->mnt_sb->s_op != &bpf_super_ops) {
- err = -EINVAL;
- goto out_path;
- }
+ if (path.dentry != sb->s_root)
+ return -EINVAL;
+ if (sb->s_op != &bpf_super_ops)
+ return -EINVAL;
err = path_permission(&path, MAY_ACCESS);
if (err)
- goto out_path;
+ return err;
- userns = path.dentry->d_sb->s_user_ns;
+ userns = sb->s_user_ns;
/*
* Enforce that creators of BPF tokens are in the same user
* namespace as the BPF FS instance. This makes reasoning about
* permissions a lot easier and we can always relax this later.
*/
- if (current_user_ns() != userns) {
- err = -EPERM;
- goto out_path;
- }
- if (!ns_capable(userns, CAP_BPF)) {
- err = -EPERM;
- goto out_path;
- }
+ if (current_user_ns() != userns)
+ return -EPERM;
+ if (!ns_capable(userns, CAP_BPF))
+ return -EPERM;
/* Creating BPF token in init_user_ns doesn't make much sense. */
- if (current_user_ns() == &init_user_ns) {
- err = -EOPNOTSUPP;
- goto out_path;
- }
+ if (current_user_ns() == &init_user_ns)
+ return -EOPNOTSUPP;
- mnt_opts = path.dentry->d_sb->s_fs_info;
+ mnt_opts = sb->s_fs_info;
if (mnt_opts->delegate_cmds == 0 &&
mnt_opts->delegate_maps == 0 &&
mnt_opts->delegate_progs == 0 &&
- mnt_opts->delegate_attachs == 0) {
- err = -ENOENT; /* no BPF token delegation is set up */
- goto out_path;
- }
+ mnt_opts->delegate_attachs == 0)
+ return -ENOENT; /* no BPF token delegation is set up */
mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
- inode = bpf_get_inode(path.mnt->mnt_sb, NULL, mode);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- goto out_path;
- }
+ inode = bpf_get_inode(sb, NULL, mode);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
inode->i_op = &bpf_token_iops;
inode->i_fop = &bpf_token_fops;
@@ -185,8 +170,7 @@ int bpf_token_create(union bpf_attr *attr)
file = alloc_file_pseudo(inode, path.mnt, BPF_TOKEN_INODE_NAME, O_RDWR, &bpf_token_fops);
if (IS_ERR(file)) {
iput(inode);
- err = PTR_ERR(file);
- goto out_path;
+ return PTR_ERR(file);
}
token = kzalloc(sizeof(*token), GFP_USER);
@@ -218,33 +202,27 @@ int bpf_token_create(union bpf_attr *attr)
file->private_data = token;
fd_install(fd, file);
- path_put(&path);
return fd;
out_token:
bpf_token_free(token);
out_file:
fput(file);
-out_path:
- path_put(&path);
return err;
}
struct bpf_token *bpf_token_get_from_fd(u32 ufd)
{
- struct fd f = fdget(ufd);
+ CLASS(fd, f)(ufd);
struct bpf_token *token;
- if (!fd_file(f))
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
- if (fd_file(f)->f_op != &bpf_token_fops) {
- fdput(f);
+ if (fd_file(f)->f_op != &bpf_token_fops)
return ERR_PTR(-EINVAL);
- }
token = fd_file(f)->private_data;
bpf_token_inc(token);
- fdput(f);
return token;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index dd86282ccaa4..9a7ed527e47e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -18920,6 +18920,53 @@ static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
}
+/* Add map behind fd to used maps list, if it's not already there, and return
+ * its index. Also set *reused to true if this map was already in the list of
+ * used maps.
+ * Returns <0 on error, or >= 0 index, on success.
+ */
+static int add_used_map_from_fd(struct bpf_verifier_env *env, int fd, bool *reused)
+{
+ CLASS(fd, f)(fd);
+ struct bpf_map *map;
+ int i;
+
+ map = __bpf_map_get(f);
+ if (IS_ERR(map)) {
+ verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
+ return PTR_ERR(map);
+ }
+
+ /* check whether we recorded this map already */
+ for (i = 0; i < env->used_map_cnt; i++) {
+ if (env->used_maps[i] == map) {
+ *reused = true;
+ return i;
+ }
+ }
+
+ if (env->used_map_cnt >= MAX_USED_MAPS) {
+ verbose(env, "The total number of maps per program has reached the limit of %u\n",
+ MAX_USED_MAPS);
+ return -E2BIG;
+ }
+
+ if (env->prog->sleepable)
+ atomic64_inc(&map->sleepable_refcnt);
+
+ /* hold the map. If the program is rejected by verifier,
+ * the map will be released by release_maps() or it
+ * will be used by the valid program until it's unloaded
+ * and all maps are released in bpf_free_used_maps()
+ */
+ bpf_map_inc(map);
+
+ *reused = false;
+ env->used_maps[env->used_map_cnt++] = map;
+
+ return env->used_map_cnt - 1;
+}
+
/* find and rewrite pseudo imm in ld_imm64 instructions:
*
* 1. if it accesses map FD, replace it with actual map pointer.
@@ -18931,7 +18978,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
- int i, j, err;
+ int i, err;
err = bpf_prog_calc_tag(env->prog);
if (err)
@@ -18948,9 +18995,10 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
struct bpf_insn_aux_data *aux;
struct bpf_map *map;
- struct fd f;
+ int map_idx;
u64 addr;
u32 fd;
+ bool reused;
if (i == insn_cnt - 1 || insn[1].code != 0 ||
insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
@@ -19011,20 +19059,18 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
break;
}
- f = fdget(fd);
- map = __bpf_map_get(f);
- if (IS_ERR(map)) {
- verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
- return PTR_ERR(map);
- }
+ map_idx = add_used_map_from_fd(env, fd, &reused);
+ if (map_idx < 0)
+ return map_idx;
+ map = env->used_maps[map_idx];
+
+ aux = &env->insn_aux_data[i];
+ aux->map_index = map_idx;
err = check_map_prog_compatibility(env, map, env->prog);
- if (err) {
- fdput(f);
+ if (err)
return err;
- }
- aux = &env->insn_aux_data[i];
if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
addr = (unsigned long)map;
@@ -19033,13 +19079,11 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
if (off >= BPF_MAX_VAR_OFF) {
verbose(env, "direct value offset of %u is not allowed\n", off);
- fdput(f);
return -EINVAL;
}
if (!map->ops->map_direct_value_addr) {
verbose(env, "no direct value access support for this map type\n");
- fdput(f);
return -EINVAL;
}
@@ -19047,7 +19091,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
if (err) {
verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
map->value_size, off);
- fdput(f);
return err;
}
@@ -19058,70 +19101,39 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
insn[0].imm = (u32)addr;
insn[1].imm = addr >> 32;
- /* check whether we recorded this map already */
- for (j = 0; j < env->used_map_cnt; j++) {
- if (env->used_maps[j] == map) {
- aux->map_index = j;
- fdput(f);
- goto next_insn;
- }
- }
-
- if (env->used_map_cnt >= MAX_USED_MAPS) {
- verbose(env, "The total number of maps per program has reached the limit of %u\n",
- MAX_USED_MAPS);
- fdput(f);
- return -E2BIG;
- }
-
- if (env->prog->sleepable)
- atomic64_inc(&map->sleepable_refcnt);
- /* hold the map. If the program is rejected by verifier,
- * the map will be released by release_maps() or it
- * will be used by the valid program until it's unloaded
- * and all maps are released in bpf_free_used_maps()
- */
- bpf_map_inc(map);
-
- aux->map_index = env->used_map_cnt;
- env->used_maps[env->used_map_cnt++] = map;
+ /* proceed with extra checks only if its newly added used map */
+ if (reused)
+ goto next_insn;
if (bpf_map_is_cgroup_storage(map) &&
bpf_cgroup_storage_assign(env->prog->aux, map)) {
verbose(env, "only one cgroup storage of each type is allowed\n");
- fdput(f);
return -EBUSY;
}
if (map->map_type == BPF_MAP_TYPE_ARENA) {
if (env->prog->aux->arena) {
verbose(env, "Only one arena per program\n");
- fdput(f);
return -EBUSY;
}
if (!env->allow_ptr_leaks || !env->bpf_capable) {
verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n");
- fdput(f);
return -EPERM;
}
if (!env->prog->jit_requested) {
verbose(env, "JIT is required to use arena\n");
- fdput(f);
return -EOPNOTSUPP;
}
if (!bpf_jit_supports_arena()) {
verbose(env, "JIT doesn't support arena\n");
- fdput(f);
return -EOPNOTSUPP;
}
env->prog->aux->arena = (void *)map;
if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) {
verbose(env, "arena's user address must be set via map_extra or mmap()\n");
- fdput(f);
return -EINVAL;
}
}
- fdput(f);
next_insn:
insn++;
i++;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5a8071c45c80..e3589c4287cb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6821,7 +6821,6 @@ static int perf_fasync(int fd, struct file *filp, int on)
}
static const struct file_operations perf_fops = {
- .llseek = no_llseek,
.release = perf_release,
.read = perf_read,
.poll = perf_poll,
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 2ec796e2f055..4b52cb2ae6d6 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1545,7 +1545,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
if (!area->bitmap)
goto free_area;
- area->page = alloc_page(GFP_HIGHUSER);
+ area->page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
if (!area->page)
goto free_bitmap;
diff --git a/kernel/fork.c b/kernel/fork.c
index cbdaca45d0c1..60c0b4868fd4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -999,7 +999,7 @@ void __init __weak arch_task_cache_init(void) { }
static void __init set_max_threads(unsigned int max_threads_suggested)
{
u64 threads;
- unsigned long nr_pages = PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size());
+ unsigned long nr_pages = memblock_estimated_nr_free_pages();
/*
* The number of threads shall be limited such that the thread
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 6dc76b590703..93a822d3c468 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -168,7 +168,7 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key)
jump_label_update(key);
/*
* Ensure that when static_key_fast_inc_not_disabled() or
- * static_key_slow_try_dec() observe the positive value,
+ * static_key_dec_not_one() observe the positive value,
* they must also observe all the text changes.
*/
atomic_set_release(&key->enabled, 1);
@@ -250,7 +250,7 @@ void static_key_disable(struct static_key *key)
}
EXPORT_SYMBOL_GPL(static_key_disable);
-static bool static_key_slow_try_dec(struct static_key *key)
+static bool static_key_dec_not_one(struct static_key *key)
{
int v;
@@ -274,6 +274,14 @@ static bool static_key_slow_try_dec(struct static_key *key)
* enabled. This suggests an ordering problem on the user side.
*/
WARN_ON_ONCE(v < 0);
+
+ /*
+ * Warn about underflow, and lie about success in an attempt to
+ * not make things worse.
+ */
+ if (WARN_ON_ONCE(v == 0))
+ return true;
+
if (v <= 1)
return false;
} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
@@ -284,15 +292,27 @@ static bool static_key_slow_try_dec(struct static_key *key)
static void __static_key_slow_dec_cpuslocked(struct static_key *key)
{
lockdep_assert_cpus_held();
+ int val;
- if (static_key_slow_try_dec(key))
+ if (static_key_dec_not_one(key))
return;
guard(mutex)(&jump_label_mutex);
- if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
+ val = atomic_read(&key->enabled);
+ /*
+ * It should be impossible to observe -1 with jump_label_mutex held,
+ * see static_key_slow_inc_cpuslocked().
+ */
+ if (WARN_ON_ONCE(val == -1))
+ return;
+ /*
+ * Cannot already be 0, something went sideways.
+ */
+ if (WARN_ON_ONCE(val == 0))
+ return;
+
+ if (atomic_dec_and_test(&key->enabled))
jump_label_update(key);
- else
- WARN_ON_ONCE(!static_key_slow_try_dec(key));
}
static void __static_key_slow_dec(struct static_key *key)
@@ -329,7 +349,7 @@ void __static_key_slow_dec_deferred(struct static_key *key,
{
STATIC_KEY_CHECK_USE(key);
- if (static_key_slow_try_dec(key))
+ if (static_key_dec_not_one(key))
return;
schedule_delayed_work(work, timeout);
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 7963deac33c3..536bd471557f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -788,7 +788,7 @@ static void lockdep_print_held_locks(struct task_struct *p)
printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
else
printk("%d lock%s held by %s/%d:\n", depth,
- depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
+ str_plural(depth), p->comm, task_pid_nr(p));
/*
* It's not reliable to print a task's held locks if it's not sleeping
* and it's not the current task.
@@ -2084,6 +2084,9 @@ static noinline void print_bfs_bug(int ret)
/*
* Breadth-first-search failed, graph got corrupted?
*/
+ if (ret == BFS_EQUEUEFULL)
+ pr_warn("Increase LOCKDEP_CIRCULAR_QUEUE_BITS to avoid this warning:\n");
+
WARN(1, "lockdep bfs error:%d\n", ret);
}
@@ -6263,25 +6266,27 @@ static struct pending_free *get_pending_free(void)
static void free_zapped_rcu(struct rcu_head *cb);
/*
- * Schedule an RCU callback if no RCU callback is pending. Must be called with
- * the graph lock held.
- */
-static void call_rcu_zapped(struct pending_free *pf)
+* See if we need to queue an RCU callback, must called with
+* the lockdep lock held, returns false if either we don't have
+* any pending free or the callback is already scheduled.
+* Otherwise, a call_rcu() must follow this function call.
+*/
+static bool prepare_call_rcu_zapped(struct pending_free *pf)
{
WARN_ON_ONCE(inside_selftest());
if (list_empty(&pf->zapped))
- return;
+ return false;
if (delayed_free.scheduled)
- return;
+ return false;
delayed_free.scheduled = true;
WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
delayed_free.index ^= 1;
- call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+ return true;
}
/* The caller must hold the graph lock. May be called from RCU context. */
@@ -6307,6 +6312,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
{
struct pending_free *pf;
unsigned long flags;
+ bool need_callback;
if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
return;
@@ -6318,14 +6324,18 @@ static void free_zapped_rcu(struct rcu_head *ch)
pf = delayed_free.pf + (delayed_free.index ^ 1);
__free_zapped_classes(pf);
delayed_free.scheduled = false;
+ need_callback =
+ prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
+ lockdep_unlock();
+ raw_local_irq_restore(flags);
/*
- * If there's anything on the open list, close and start a new callback.
- */
- call_rcu_zapped(delayed_free.pf + delayed_free.index);
+ * If there's pending free and its callback has not been scheduled,
+ * queue an RCU callback.
+ */
+ if (need_callback)
+ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
- lockdep_unlock();
- raw_local_irq_restore(flags);
}
/*
@@ -6365,6 +6375,7 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
{
struct pending_free *pf;
unsigned long flags;
+ bool need_callback;
init_data_structures_once();
@@ -6372,10 +6383,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
lockdep_lock();
pf = get_pending_free();
__lockdep_free_key_range(pf, start, size);
- call_rcu_zapped(pf);
+ need_callback = prepare_call_rcu_zapped(pf);
lockdep_unlock();
raw_local_irq_restore(flags);
-
+ if (need_callback)
+ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
/*
* Wait for any possible iterators from look_up_lock_class() to pass
* before continuing to free the memory they refer to.
@@ -6469,6 +6481,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
struct pending_free *pf;
unsigned long flags;
int locked;
+ bool need_callback = false;
raw_local_irq_save(flags);
locked = graph_lock();
@@ -6477,11 +6490,13 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
pf = get_pending_free();
__lockdep_reset_lock(pf, lock);
- call_rcu_zapped(pf);
+ need_callback = prepare_call_rcu_zapped(pf);
graph_unlock();
out_irq:
raw_local_irq_restore(flags);
+ if (need_callback)
+ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
}
/*
@@ -6525,6 +6540,7 @@ void lockdep_unregister_key(struct lock_class_key *key)
struct pending_free *pf;
unsigned long flags;
bool found = false;
+ bool need_callback = false;
might_sleep();
@@ -6545,11 +6561,14 @@ void lockdep_unregister_key(struct lock_class_key *key)
if (found) {
pf = get_pending_free();
__lockdep_free_key_range(pf, key, 1);
- call_rcu_zapped(pf);
+ need_callback = prepare_call_rcu_zapped(pf);
}
lockdep_unlock();
raw_local_irq_restore(flags);
+ if (need_callback)
+ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
synchronize_rcu();
}
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index e2bfb1db589d..6db0f43fc4df 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -424,7 +424,7 @@ static void seq_line(struct seq_file *m, char c, int offset, int length)
for (i = 0; i < offset; i++)
seq_puts(m, " ");
for (i = 0; i < length; i++)
- seq_printf(m, "%c", c);
+ seq_putc(m, c);
seq_puts(m, "\n");
}
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 5ded7dff46ef..2bbb6eca5144 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -181,12 +181,21 @@ static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
__rwsem_set_reader_owned(sem, current);
}
+#ifdef CONFIG_DEBUG_RWSEMS
+/*
+ * Return just the real task structure pointer of the owner
+ */
+static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
+{
+ return (struct task_struct *)
+ (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
+}
+
/*
* Return true if the rwsem is owned by a reader.
*/
static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
{
-#ifdef CONFIG_DEBUG_RWSEMS
/*
* Check the count to see if it is write-locked.
*/
@@ -194,11 +203,9 @@ static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
if (count & RWSEM_WRITER_MASK)
return false;
-#endif
return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
}
-#ifdef CONFIG_DEBUG_RWSEMS
/*
* With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
* is a task pointer in owner of a reader-owned rwsem, it will be the
@@ -266,15 +273,6 @@ static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
}
/*
- * Return just the real task structure pointer of the owner
- */
-static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
-{
- return (struct task_struct *)
- (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
-}
-
-/*
* Return the real task structure pointer of the owner and the embedded
* flags in the owner. pflags must be non-NULL.
*/
diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig
index 4047b6d48255..7c6588148d42 100644
--- a/kernel/module/Kconfig
+++ b/kernel/module/Kconfig
@@ -160,6 +160,7 @@ config MODULE_UNLOAD_TAINT_TRACKING
config MODVERSIONS
bool "Module versioning support"
+ depends on !COMPILE_TEST
help
Usually, you have to use modules compiled with your kernel.
Saying Y here makes it sometimes possible to use modules
@@ -228,7 +229,7 @@ comment "Do not forget to sign required modules with scripts/sign-file"
depends on MODULE_SIG_FORCE && !MODULE_SIG_ALL
choice
- prompt "Which hash algorithm should modules be signed with?"
+ prompt "Hash algorithm to sign modules"
depends on MODULE_SIG || IMA_APPRAISE_MODSIG
help
This determines which sort of hashing algorithm will be used during
@@ -238,31 +239,31 @@ choice
the signature on that module.
config MODULE_SIG_SHA1
- bool "Sign modules with SHA-1"
+ bool "SHA-1"
select CRYPTO_SHA1
config MODULE_SIG_SHA256
- bool "Sign modules with SHA-256"
+ bool "SHA-256"
select CRYPTO_SHA256
config MODULE_SIG_SHA384
- bool "Sign modules with SHA-384"
+ bool "SHA-384"
select CRYPTO_SHA512
config MODULE_SIG_SHA512
- bool "Sign modules with SHA-512"
+ bool "SHA-512"
select CRYPTO_SHA512
config MODULE_SIG_SHA3_256
- bool "Sign modules with SHA3-256"
+ bool "SHA3-256"
select CRYPTO_SHA3
config MODULE_SIG_SHA3_384
- bool "Sign modules with SHA3-384"
+ bool "SHA3-384"
select CRYPTO_SHA3
config MODULE_SIG_SHA3_512
- bool "Sign modules with SHA3-512"
+ bool "SHA3-512"
select CRYPTO_SHA3
endchoice
@@ -278,64 +279,65 @@ config MODULE_SIG_HASH
default "sha3-384" if MODULE_SIG_SHA3_384
default "sha3-512" if MODULE_SIG_SHA3_512
-choice
- prompt "Module compression mode"
+config MODULE_COMPRESS
+ bool "Module compression"
help
- This option allows you to choose the algorithm which will be used to
- compress modules when 'make modules_install' is run. (or, you can
- choose to not compress modules at all.)
-
- External modules will also be compressed in the same way during the
- installation.
-
- For modules inside an initrd or initramfs, it's more efficient to
- compress the whole initrd or initramfs instead.
-
+ Enable module compression to reduce on-disk size of module binaries.
This is fully compatible with signed modules.
- Please note that the tool used to load modules needs to support the
- corresponding algorithm. module-init-tools MAY support gzip, and kmod
- MAY support gzip, xz and zstd.
+ The tool used to work with modules needs to support the selected
+ compression type. kmod MAY support gzip, xz and zstd. Other tools
+ might have a limited selection of the supported types.
- Your build system needs to provide the appropriate compression tool
- to compress the modules.
+ Note that for modules inside an initrd or initramfs, it's more
+ efficient to compress the whole ramdisk instead.
- If in doubt, select 'None'.
+ If unsure, say N.
-config MODULE_COMPRESS_NONE
- bool "None"
+choice
+ prompt "Module compression type"
+ depends on MODULE_COMPRESS
help
- Do not compress modules. The installed modules are suffixed
- with .ko.
+ Choose the supported algorithm for module compression.
config MODULE_COMPRESS_GZIP
bool "GZIP"
help
- Compress modules with GZIP. The installed modules are suffixed
- with .ko.gz.
+ Support modules compressed with GZIP. The installed modules are
+ suffixed with .ko.gz.
config MODULE_COMPRESS_XZ
bool "XZ"
help
- Compress modules with XZ. The installed modules are suffixed
- with .ko.xz.
+ Support modules compressed with XZ. The installed modules are
+ suffixed with .ko.xz.
config MODULE_COMPRESS_ZSTD
bool "ZSTD"
help
- Compress modules with ZSTD. The installed modules are suffixed
- with .ko.zst.
+ Support modules compressed with ZSTD. The installed modules are
+ suffixed with .ko.zst.
endchoice
+config MODULE_COMPRESS_ALL
+ bool "Automatically compress all modules"
+ default y
+ depends on MODULE_COMPRESS
+ help
+ Compress all modules during 'make modules_install'.
+
+ Your build system needs to provide the appropriate compression tool
+ for the selected compression type. External modules will also be
+ compressed in the same way during the installation.
+
config MODULE_DECOMPRESS
bool "Support in-kernel module decompression"
- depends on MODULE_COMPRESS_GZIP || MODULE_COMPRESS_XZ || MODULE_COMPRESS_ZSTD
+ depends on MODULE_COMPRESS
select ZLIB_INFLATE if MODULE_COMPRESS_GZIP
select XZ_DEC if MODULE_COMPRESS_XZ
select ZSTD_DECOMPRESS if MODULE_COMPRESS_ZSTD
help
-
Support for decompressing kernel modules by the kernel itself
instead of relying on userspace to perform this task. Useful when
load pinning security policy is enabled.
diff --git a/kernel/module/debug_kmemleak.c b/kernel/module/debug_kmemleak.c
index 12a569d361e8..b4cc03842d70 100644
--- a/kernel/module/debug_kmemleak.c
+++ b/kernel/module/debug_kmemleak.c
@@ -12,19 +12,9 @@
void kmemleak_load_module(const struct module *mod,
const struct load_info *info)
{
- unsigned int i;
-
- /* only scan the sections containing data */
- kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
-
- for (i = 1; i < info->hdr->e_shnum; i++) {
- /* Scan all writable sections that's not executable */
- if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
- !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
- (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
- continue;
-
- kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
- info->sechdrs[i].sh_size, GFP_KERNEL);
+ /* only scan writable, non-executable sections */
+ for_each_mod_mem_type(type) {
+ if (type != MOD_DATA && type != MOD_INIT_DATA)
+ kmemleak_no_scan(mod->mem[type].base);
}
}
diff --git a/kernel/module/sysfs.c b/kernel/module/sysfs.c
index 26efe1305c12..456358e1fdc4 100644
--- a/kernel/module/sysfs.c
+++ b/kernel/module/sysfs.c
@@ -69,12 +69,13 @@ static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
kfree(sect_attrs);
}
-static void add_sect_attrs(struct module *mod, const struct load_info *info)
+static int add_sect_attrs(struct module *mod, const struct load_info *info)
{
unsigned int nloaded = 0, i, size[2];
struct module_sect_attrs *sect_attrs;
struct module_sect_attr *sattr;
struct bin_attribute **gattr;
+ int ret;
/* Count loaded sections and allocate structures */
for (i = 0; i < info->hdr->e_shnum; i++)
@@ -85,7 +86,7 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]);
sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
if (!sect_attrs)
- return;
+ return -ENOMEM;
/* Setup section attributes. */
sect_attrs->grp.name = "sections";
@@ -103,8 +104,10 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
sattr->address = sec->sh_addr;
sattr->battr.attr.name =
kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL);
- if (!sattr->battr.attr.name)
+ if (!sattr->battr.attr.name) {
+ ret = -ENOMEM;
goto out;
+ }
sect_attrs->nsections++;
sattr->battr.read = module_sect_read;
sattr->battr.size = MODULE_SECT_READ_SIZE;
@@ -113,13 +116,15 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
}
*gattr = NULL;
- if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
+ ret = sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp);
+ if (ret)
goto out;
mod->sect_attrs = sect_attrs;
- return;
+ return 0;
out:
free_sect_attrs(sect_attrs);
+ return ret;
}
static void remove_sect_attrs(struct module *mod)
@@ -158,15 +163,12 @@ static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
kfree(notes_attrs);
}
-static void add_notes_attrs(struct module *mod, const struct load_info *info)
+static int add_notes_attrs(struct module *mod, const struct load_info *info)
{
unsigned int notes, loaded, i;
struct module_notes_attrs *notes_attrs;
struct bin_attribute *nattr;
-
- /* failed to create section attributes, so can't create notes */
- if (!mod->sect_attrs)
- return;
+ int ret;
/* Count notes sections and allocate structures. */
notes = 0;
@@ -176,12 +178,12 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
++notes;
if (notes == 0)
- return;
+ return 0;
notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes),
GFP_KERNEL);
if (!notes_attrs)
- return;
+ return -ENOMEM;
notes_attrs->notes = notes;
nattr = &notes_attrs->attrs[0];
@@ -201,19 +203,23 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
}
notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
- if (!notes_attrs->dir)
+ if (!notes_attrs->dir) {
+ ret = -ENOMEM;
goto out;
+ }
- for (i = 0; i < notes; ++i)
- if (sysfs_create_bin_file(notes_attrs->dir,
- &notes_attrs->attrs[i]))
+ for (i = 0; i < notes; ++i) {
+ ret = sysfs_create_bin_file(notes_attrs->dir, &notes_attrs->attrs[i]);
+ if (ret)
goto out;
+ }
mod->notes_attrs = notes_attrs;
- return;
+ return 0;
out:
free_notes_attrs(notes_attrs, i);
+ return ret;
}
static void remove_notes_attrs(struct module *mod)
@@ -223,9 +229,15 @@ static void remove_notes_attrs(struct module *mod)
}
#else /* !CONFIG_KALLSYMS */
-static inline void add_sect_attrs(struct module *mod, const struct load_info *info) { }
+static inline int add_sect_attrs(struct module *mod, const struct load_info *info)
+{
+ return 0;
+}
static inline void remove_sect_attrs(struct module *mod) { }
-static inline void add_notes_attrs(struct module *mod, const struct load_info *info) { }
+static inline int add_notes_attrs(struct module *mod, const struct load_info *info)
+{
+ return 0;
+}
static inline void remove_notes_attrs(struct module *mod) { }
#endif /* CONFIG_KALLSYMS */
@@ -385,11 +397,20 @@ int mod_sysfs_setup(struct module *mod,
if (err)
goto out_unreg_modinfo_attrs;
- add_sect_attrs(mod, info);
- add_notes_attrs(mod, info);
+ err = add_sect_attrs(mod, info);
+ if (err)
+ goto out_del_usage_links;
+
+ err = add_notes_attrs(mod, info);
+ if (err)
+ goto out_unreg_sect_attrs;
return 0;
+out_unreg_sect_attrs:
+ remove_sect_attrs(mod);
+out_del_usage_links:
+ del_usage_links(mod);
out_unreg_modinfo_attrs:
module_remove_modinfo_attrs(mod, -1);
out_unreg_param:
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 3aa41ba22129..3f9e3efb9f6e 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -447,7 +447,6 @@ static const struct file_operations snapshot_fops = {
.release = snapshot_release,
.read = snapshot_read,
.write = snapshot_write,
- .llseek = no_llseek,
.unlocked_ioctl = snapshot_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = snapshot_compat_ioctl,
diff --git a/kernel/relay.c b/kernel/relay.c
index a8e90e98bf2c..a8ae436dc77e 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1079,7 +1079,6 @@ const struct file_operations relay_file_operations = {
.poll = relay_file_poll,
.mmap = relay_file_mmap,
.read = relay_file_read,
- .llseek = no_llseek,
.release = relay_file_release,
};
EXPORT_SYMBOL_GPL(relay_file_operations);
diff --git a/kernel/signal.c b/kernel/signal.c
index 6e57036f947f..4344860ffcac 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2888,8 +2888,6 @@ relock:
current->flags |= PF_SIGNALED;
if (sig_kernel_coredump(signr)) {
- int ret;
-
if (print_fatal_signals)
print_fatal_signal(signr);
proc_coredump_connector(current);
@@ -2901,24 +2899,7 @@ relock:
* first and our do_group_exit call below will use
* that value and ignore the one we pass it.
*/
- ret = do_coredump(&ksig->info);
- if (ret)
- coredump_report_failure("coredump has not been created, error %d",
- ret);
- else if (!IS_ENABLED(CONFIG_COREDUMP)) {
- /*
- * Coredumps are not available, can't fail collecting
- * the coredump.
- *
- * Leave a note though that the coredump is going to be
- * not created. This is not an error or a warning as disabling
- * support in the kernel for coredumps isn't commonplace, and
- * the user must've built the kernel with the custom config so
- * let them know all works as desired.
- */
- coredump_report("no coredump collected as "
- "that is disabled in the kernel configuration");
- }
+ do_coredump(&ksig->info);
}
/*
diff --git a/kernel/static_call_inline.c b/kernel/static_call_inline.c
index 639397b5491c..5259cda486d0 100644
--- a/kernel/static_call_inline.c
+++ b/kernel/static_call_inline.c
@@ -411,6 +411,17 @@ static void static_call_del_module(struct module *mod)
for (site = start; site < stop; site++) {
key = static_call_key(site);
+
+ /*
+ * If the key was not updated due to a memory allocation
+ * failure in __static_call_init() then treating key::sites
+ * as key::mods in the code below would cause random memory
+ * access and #GP. In that case all subsequent sites have
+ * not been touched either, so stop iterating.
+ */
+ if (!static_call_key_has_mods(key))
+ break;
+
if (key == prev_key)
continue;
@@ -442,7 +453,7 @@ static int static_call_module_notify(struct notifier_block *nb,
case MODULE_STATE_COMING:
ret = static_call_add_module(mod);
if (ret) {
- WARN(1, "Failed to allocate memory for static calls");
+ pr_warn("Failed to allocate memory for static calls\n");
static_call_del_module(mod);
}
break;
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index 4782edcbe7b9..c2f3d0c490d5 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -168,7 +168,6 @@ static int posix_clock_release(struct inode *inode, struct file *fp)
static const struct file_operations posix_clock_file_operations = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = posix_clock_read,
.poll = posix_clock_poll,
.unlocked_ioctl = posix_clock_ioctl,
diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c
index df0745a42a3f..dc819aec43e8 100644
--- a/kernel/trace/rv/rv.c
+++ b/kernel/trace/rv/rv.c
@@ -306,7 +306,6 @@ static ssize_t monitor_enable_write_data(struct file *filp, const char __user *u
static const struct file_operations interface_enable_fops = {
.open = simple_open,
- .llseek = no_llseek,
.write = monitor_enable_write_data,
.read = monitor_enable_read_data,
};
@@ -329,7 +328,6 @@ static ssize_t monitor_desc_read_data(struct file *filp, char __user *user_buf,
static const struct file_operations interface_desc_fops = {
.open = simple_open,
- .llseek = no_llseek,
.read = monitor_desc_read_data,
};
@@ -674,7 +672,6 @@ static ssize_t monitoring_on_write_data(struct file *filp, const char __user *us
static const struct file_operations monitoring_on_fops = {
.open = simple_open,
- .llseek = no_llseek,
.write = monitoring_on_write_data,
.read = monitoring_on_read_data,
};
diff --git a/kernel/trace/rv/rv_reactors.c b/kernel/trace/rv/rv_reactors.c
index 6aae106695b6..7b49cbe388d4 100644
--- a/kernel/trace/rv/rv_reactors.c
+++ b/kernel/trace/rv/rv_reactors.c
@@ -426,7 +426,6 @@ static ssize_t reacting_on_write_data(struct file *filp, const char __user *user
static const struct file_operations reacting_on_fops = {
.open = simple_open,
- .llseek = no_llseek,
.write = reacting_on_write_data,
.read = reacting_on_read_data,
};
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b4f348b4653f..c01375adc471 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -7557,7 +7557,6 @@ static const struct file_operations tracing_pipe_fops = {
.read = tracing_read_pipe,
.splice_read = tracing_splice_read_pipe,
.release = tracing_release_pipe,
- .llseek = no_llseek,
};
static const struct file_operations tracing_entries_fops = {
@@ -7636,7 +7635,6 @@ static const struct file_operations snapshot_raw_fops = {
.read = tracing_buffers_read,
.release = tracing_buffers_release,
.splice_read = tracing_buffers_splice_read,
- .llseek = no_llseek,
};
#endif /* CONFIG_TRACER_SNAPSHOT */
@@ -8466,7 +8464,6 @@ static const struct file_operations tracing_buffers_fops = {
.flush = tracing_buffers_flush,
.splice_read = tracing_buffers_splice_read,
.unlocked_ioctl = tracing_buffers_ioctl,
- .llseek = no_llseek,
.mmap = tracing_buffers_mmap,
};
diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
index 62e6a8f4aae9..a079abd8955b 100644
--- a/kernel/trace/trace_fprobe.c
+++ b/kernel/trace/trace_fprobe.c
@@ -21,6 +21,7 @@
#define FPROBE_EVENT_SYSTEM "fprobes"
#define TRACEPOINT_EVENT_SYSTEM "tracepoints"
#define RETHOOK_MAXACTIVE_MAX 4096
+#define TRACEPOINT_STUB ERR_PTR(-ENOENT)
static int trace_fprobe_create(const char *raw_command);
static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev);
@@ -385,6 +386,7 @@ static struct trace_fprobe *alloc_trace_fprobe(const char *group,
const char *event,
const char *symbol,
struct tracepoint *tpoint,
+ struct module *mod,
int maxactive,
int nargs, bool is_return)
{
@@ -405,6 +407,7 @@ static struct trace_fprobe *alloc_trace_fprobe(const char *group,
tf->fp.entry_handler = fentry_dispatcher;
tf->tpoint = tpoint;
+ tf->mod = mod;
tf->fp.nr_maxactive = maxactive;
ret = trace_probe_init(&tf->tp, event, group, false, nargs);
@@ -672,6 +675,24 @@ static int unregister_fprobe_event(struct trace_fprobe *tf)
return trace_probe_unregister_event_call(&tf->tp);
}
+static int __regsiter_tracepoint_fprobe(struct trace_fprobe *tf)
+{
+ struct tracepoint *tpoint = tf->tpoint;
+ unsigned long ip = (unsigned long)tpoint->probestub;
+ int ret;
+
+ /*
+ * Here, we do 2 steps to enable fprobe on a tracepoint.
+ * At first, put __probestub_##TP function on the tracepoint
+ * and put a fprobe on the stub function.
+ */
+ ret = tracepoint_probe_register_prio_may_exist(tpoint,
+ tpoint->probestub, NULL, 0);
+ if (ret < 0)
+ return ret;
+ return register_fprobe_ips(&tf->fp, &ip, 1);
+}
+
/* Internal register function - just handle fprobe and flags */
static int __register_trace_fprobe(struct trace_fprobe *tf)
{
@@ -698,18 +719,12 @@ static int __register_trace_fprobe(struct trace_fprobe *tf)
tf->fp.flags |= FPROBE_FL_DISABLED;
if (trace_fprobe_is_tracepoint(tf)) {
- struct tracepoint *tpoint = tf->tpoint;
- unsigned long ip = (unsigned long)tpoint->probestub;
- /*
- * Here, we do 2 steps to enable fprobe on a tracepoint.
- * At first, put __probestub_##TP function on the tracepoint
- * and put a fprobe on the stub function.
- */
- ret = tracepoint_probe_register_prio_may_exist(tpoint,
- tpoint->probestub, NULL, 0);
- if (ret < 0)
- return ret;
- return register_fprobe_ips(&tf->fp, &ip, 1);
+
+ /* This tracepoint is not loaded yet */
+ if (tf->tpoint == TRACEPOINT_STUB)
+ return 0;
+
+ return __regsiter_tracepoint_fprobe(tf);
}
/* TODO: handle filter, nofilter or symbol list */
@@ -862,20 +877,106 @@ end:
return ret;
}
+struct __find_tracepoint_cb_data {
+ const char *tp_name;
+ struct tracepoint *tpoint;
+ struct module *mod;
+};
+
+static void __find_tracepoint_module_cb(struct tracepoint *tp, struct module *mod, void *priv)
+{
+ struct __find_tracepoint_cb_data *data = priv;
+
+ if (!data->tpoint && !strcmp(data->tp_name, tp->name)) {
+ data->tpoint = tp;
+ if (!data->mod) {
+ data->mod = mod;
+ if (!try_module_get(data->mod)) {
+ data->tpoint = NULL;
+ data->mod = NULL;
+ }
+ }
+ }
+}
+
+static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
+{
+ struct __find_tracepoint_cb_data *data = priv;
+
+ if (!data->tpoint && !strcmp(data->tp_name, tp->name))
+ data->tpoint = tp;
+}
+
+/*
+ * Find a tracepoint from kernel and module. If the tracepoint is in a module,
+ * this increments the module refcount to prevent unloading until the
+ * trace_fprobe is registered to the list. After registering the trace_fprobe
+ * on the trace_fprobe list, the module refcount is decremented because
+ * tracepoint_probe_module_cb will handle it.
+ */
+static struct tracepoint *find_tracepoint(const char *tp_name,
+ struct module **tp_mod)
+{
+ struct __find_tracepoint_cb_data data = {
+ .tp_name = tp_name,
+ .mod = NULL,
+ };
+
+ for_each_kernel_tracepoint(__find_tracepoint_cb, &data);
+
+ if (!data.tpoint && IS_ENABLED(CONFIG_MODULES)) {
+ for_each_module_tracepoint(__find_tracepoint_module_cb, &data);
+ *tp_mod = data.mod;
+ }
+
+ return data.tpoint;
+}
+
#ifdef CONFIG_MODULES
+static void reenable_trace_fprobe(struct trace_fprobe *tf)
+{
+ struct trace_probe *tp = &tf->tp;
+
+ list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
+ __enable_trace_fprobe(tf);
+ }
+}
+
+static struct tracepoint *find_tracepoint_in_module(struct module *mod,
+ const char *tp_name)
+{
+ struct __find_tracepoint_cb_data data = {
+ .tp_name = tp_name,
+ .mod = mod,
+ };
+
+ for_each_tracepoint_in_module(mod, __find_tracepoint_module_cb, &data);
+ return data.tpoint;
+}
+
static int __tracepoint_probe_module_cb(struct notifier_block *self,
unsigned long val, void *data)
{
struct tp_module *tp_mod = data;
+ struct tracepoint *tpoint;
struct trace_fprobe *tf;
struct dyn_event *pos;
- if (val != MODULE_STATE_GOING)
+ if (val != MODULE_STATE_GOING && val != MODULE_STATE_COMING)
return NOTIFY_DONE;
mutex_lock(&event_mutex);
for_each_trace_fprobe(tf, pos) {
- if (tp_mod->mod == tf->mod) {
+ if (val == MODULE_STATE_COMING && tf->tpoint == TRACEPOINT_STUB) {
+ tpoint = find_tracepoint_in_module(tp_mod->mod, tf->symbol);
+ if (tpoint) {
+ tf->tpoint = tpoint;
+ tf->mod = tp_mod->mod;
+ if (!WARN_ON_ONCE(__regsiter_tracepoint_fprobe(tf)) &&
+ trace_probe_is_enabled(&tf->tp))
+ reenable_trace_fprobe(tf);
+ }
+ } else if (val == MODULE_STATE_GOING && tp_mod->mod == tf->mod) {
tracepoint_probe_unregister(tf->tpoint,
tf->tpoint->probestub, NULL);
tf->tpoint = NULL;
@@ -892,30 +993,6 @@ static struct notifier_block tracepoint_module_nb = {
};
#endif /* CONFIG_MODULES */
-struct __find_tracepoint_cb_data {
- const char *tp_name;
- struct tracepoint *tpoint;
-};
-
-static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
-{
- struct __find_tracepoint_cb_data *data = priv;
-
- if (!data->tpoint && !strcmp(data->tp_name, tp->name))
- data->tpoint = tp;
-}
-
-static struct tracepoint *find_tracepoint(const char *tp_name)
-{
- struct __find_tracepoint_cb_data data = {
- .tp_name = tp_name,
- };
-
- for_each_kernel_tracepoint(__find_tracepoint_cb, &data);
-
- return data.tpoint;
-}
-
static int parse_symbol_and_return(int argc, const char *argv[],
char **symbol, bool *is_return,
bool is_tracepoint)
@@ -996,6 +1073,7 @@ static int __trace_fprobe_create(int argc, const char *argv[])
char abuf[MAX_BTF_ARGS_LEN];
char *dbuf = NULL;
bool is_tracepoint = false;
+ struct module *tp_mod = NULL;
struct tracepoint *tpoint = NULL;
struct traceprobe_parse_context ctx = {
.flags = TPARG_FL_KERNEL | TPARG_FL_FPROBE,
@@ -1080,15 +1158,20 @@ static int __trace_fprobe_create(int argc, const char *argv[])
if (is_tracepoint) {
ctx.flags |= TPARG_FL_TPOINT;
- tpoint = find_tracepoint(symbol);
- if (!tpoint) {
+ tpoint = find_tracepoint(symbol, &tp_mod);
+ if (tpoint) {
+ ctx.funcname = kallsyms_lookup(
+ (unsigned long)tpoint->probestub,
+ NULL, NULL, NULL, sbuf);
+ } else if (IS_ENABLED(CONFIG_MODULES)) {
+ /* This *may* be loaded afterwards */
+ tpoint = TRACEPOINT_STUB;
+ ctx.funcname = symbol;
+ } else {
trace_probe_log_set_index(1);
trace_probe_log_err(0, NO_TRACEPOINT);
goto parse_error;
}
- ctx.funcname = kallsyms_lookup(
- (unsigned long)tpoint->probestub,
- NULL, NULL, NULL, sbuf);
} else
ctx.funcname = symbol;
@@ -1110,8 +1193,8 @@ static int __trace_fprobe_create(int argc, const char *argv[])
goto out;
/* setup a probe */
- tf = alloc_trace_fprobe(group, event, symbol, tpoint, maxactive,
- argc, is_return);
+ tf = alloc_trace_fprobe(group, event, symbol, tpoint, tp_mod,
+ maxactive, argc, is_return);
if (IS_ERR(tf)) {
ret = PTR_ERR(tf);
/* This must return -ENOMEM, else there is a bug */
@@ -1119,10 +1202,6 @@ static int __trace_fprobe_create(int argc, const char *argv[])
goto out; /* We know tf is not allocated */
}
- if (is_tracepoint)
- tf->mod = __module_text_address(
- (unsigned long)tf->tpoint->probestub);
-
/* parse arguments */
for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
trace_probe_log_set_index(i + 2);
@@ -1155,6 +1234,8 @@ static int __trace_fprobe_create(int argc, const char *argv[])
}
out:
+ if (tp_mod)
+ module_put(tp_mod);
traceprobe_finish_parse(&ctx);
trace_probe_log_clear();
kfree(new_argv);
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index f7443e996b1b..c40531d2cbad 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -17,6 +17,7 @@
#include <linux/string.h>
#include <linux/rculist.h>
#include <linux/filter.h>
+#include <linux/percpu.h>
#include "trace_dynevent.h"
#include "trace_probe.h"
@@ -62,7 +63,7 @@ struct trace_uprobe {
struct uprobe *uprobe;
unsigned long offset;
unsigned long ref_ctr_offset;
- unsigned long nhit;
+ unsigned long __percpu *nhits;
struct trace_probe tp;
};
@@ -337,6 +338,12 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
if (!tu)
return ERR_PTR(-ENOMEM);
+ tu->nhits = alloc_percpu(unsigned long);
+ if (!tu->nhits) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
ret = trace_probe_init(&tu->tp, event, group, true, nargs);
if (ret < 0)
goto error;
@@ -349,6 +356,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
return tu;
error:
+ free_percpu(tu->nhits);
kfree(tu);
return ERR_PTR(ret);
@@ -362,6 +370,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu)
path_put(&tu->path);
trace_probe_cleanup(&tu->tp);
kfree(tu->filename);
+ free_percpu(tu->nhits);
kfree(tu);
}
@@ -815,13 +824,21 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
{
struct dyn_event *ev = v;
struct trace_uprobe *tu;
+ unsigned long nhits;
+ int cpu;
if (!is_trace_uprobe(ev))
return 0;
tu = to_trace_uprobe(ev);
+
+ nhits = 0;
+ for_each_possible_cpu(cpu) {
+ nhits += per_cpu(*tu->nhits, cpu);
+ }
+
seq_printf(m, " %s %-44s %15lu\n", tu->filename,
- trace_probe_name(&tu->tp), tu->nhit);
+ trace_probe_name(&tu->tp), nhits);
return 0;
}
@@ -1508,7 +1525,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
int ret = 0;
tu = container_of(con, struct trace_uprobe, consumer);
- tu->nhit++;
+
+ this_cpu_inc(*tu->nhits);
udd.tu = tu;
udd.bp_addr = instruction_pointer(regs);
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 8d1507dd0724..8879da16ef4d 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -735,6 +735,48 @@ static __init int init_tracepoints(void)
return ret;
}
__initcall(init_tracepoints);
+
+/**
+ * for_each_tracepoint_in_module - iteration on all tracepoints in a module
+ * @mod: module
+ * @fct: callback
+ * @priv: private data
+ */
+void for_each_tracepoint_in_module(struct module *mod,
+ void (*fct)(struct tracepoint *tp,
+ struct module *mod, void *priv),
+ void *priv)
+{
+ tracepoint_ptr_t *begin, *end, *iter;
+
+ lockdep_assert_held(&tracepoint_module_list_mutex);
+
+ if (!mod)
+ return;
+
+ begin = mod->tracepoints_ptrs;
+ end = mod->tracepoints_ptrs + mod->num_tracepoints;
+
+ for (iter = begin; iter < end; iter++)
+ fct(tracepoint_ptr_deref(iter), mod, priv);
+}
+
+/**
+ * for_each_module_tracepoint - iteration on all tracepoints in all modules
+ * @fct: callback
+ * @priv: private data
+ */
+void for_each_module_tracepoint(void (*fct)(struct tracepoint *tp,
+ struct module *mod, void *priv),
+ void *priv)
+{
+ struct tp_module *tp_mod;
+
+ mutex_lock(&tracepoint_module_list_mutex);
+ list_for_each_entry(tp_mod, &tracepoint_module_list, list)
+ for_each_tracepoint_in_module(tp_mod->mod, fct, priv);
+ mutex_unlock(&tracepoint_module_list_mutex);
+}
#endif /* CONFIG_MODULES */
/**
diff --git a/lib/list-test.c b/lib/list-test.c
index 4f3dc75baec1..e207c4c98d70 100644
--- a/lib/list-test.c
+++ b/lib/list-test.c
@@ -408,13 +408,10 @@ static void list_test_list_cut_position(struct kunit *test)
KUNIT_EXPECT_EQ(test, i, 2);
- i = 0;
list_for_each(cur, &list1) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i++;
}
-
- KUNIT_EXPECT_EQ(test, i, 1);
}
static void list_test_list_cut_before(struct kunit *test)
@@ -439,13 +436,10 @@ static void list_test_list_cut_before(struct kunit *test)
KUNIT_EXPECT_EQ(test, i, 1);
- i = 0;
list_for_each(cur, &list1) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i++;
}
-
- KUNIT_EXPECT_EQ(test, i, 2);
}
static void list_test_list_splice(struct kunit *test)
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 5e2e93307f0d..d3412984170c 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -65,7 +65,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap_word *map,
{
unsigned long mask, word_mask;
- guard(spinlock_irqsave)(&map->swap_lock);
+ guard(raw_spinlock_irqsave)(&map->swap_lock);
if (!map->cleared) {
if (depth == 0)
@@ -136,7 +136,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
}
for (i = 0; i < sb->map_nr; i++)
- spin_lock_init(&sb->map[i].swap_lock);
+ raw_spin_lock_init(&sb->map[i].swap_lock);
return 0;
}
diff --git a/lib/test_bits.c b/lib/test_bits.c
index 01313980f175..c7b38d91e1f1 100644
--- a/lib/test_bits.c
+++ b/lib/test_bits.c
@@ -39,6 +39,36 @@ static void genmask_ull_test(struct kunit *test)
#endif
}
+static void genmask_u128_test(struct kunit *test)
+{
+#ifdef CONFIG_ARCH_SUPPORTS_INT128
+ /* Below 64 bit masks */
+ KUNIT_EXPECT_EQ(test, 0x0000000000000001ull, GENMASK_U128(0, 0));
+ KUNIT_EXPECT_EQ(test, 0x0000000000000003ull, GENMASK_U128(1, 0));
+ KUNIT_EXPECT_EQ(test, 0x0000000000000006ull, GENMASK_U128(2, 1));
+ KUNIT_EXPECT_EQ(test, 0x00000000ffffffffull, GENMASK_U128(31, 0));
+ KUNIT_EXPECT_EQ(test, 0x000000ffffe00000ull, GENMASK_U128(39, 21));
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_U128(63, 0));
+
+ /* Above 64 bit masks - only 64 bit portion can be validated once */
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_U128(64, 0) >> 1);
+ KUNIT_EXPECT_EQ(test, 0x00000000ffffffffull, GENMASK_U128(81, 50) >> 50);
+ KUNIT_EXPECT_EQ(test, 0x0000000000ffffffull, GENMASK_U128(87, 64) >> 64);
+ KUNIT_EXPECT_EQ(test, 0x0000000000ff0000ull, GENMASK_U128(87, 80) >> 64);
+
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_U128(127, 0) >> 64);
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, (u64)GENMASK_U128(127, 0));
+ KUNIT_EXPECT_EQ(test, 0x0000000000000003ull, GENMASK_U128(127, 126) >> 126);
+ KUNIT_EXPECT_EQ(test, 0x0000000000000001ull, GENMASK_U128(127, 127) >> 127);
+#ifdef TEST_GENMASK_FAILURES
+ /* these should fail compilation */
+ GENMASK_U128(0, 1);
+ GENMASK_U128(0, 10);
+ GENMASK_U128(9, 10);
+#endif /* TEST_GENMASK_FAILURES */
+#endif /* CONFIG_ARCH_SUPPORTS_INT128 */
+}
+
static void genmask_input_check_test(struct kunit *test)
{
unsigned int x, y;
@@ -56,12 +86,16 @@ static void genmask_input_check_test(struct kunit *test)
/* Valid input */
KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(1, 1));
KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(39, 21));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(100, 80));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(110, 65));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(127, 0));
}
static struct kunit_case bits_test_cases[] = {
KUNIT_CASE(genmask_test),
KUNIT_CASE(genmask_ull_test),
+ KUNIT_CASE(genmask_u128_test),
KUNIT_CASE(genmask_input_check_test),
{}
};
diff --git a/mm/Kconfig b/mm/Kconfig
index 09aebca1cae3..4c9f5ea13271 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -595,6 +595,7 @@ config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
config SPLIT_PTE_PTLOCKS
def_bool y
depends on MMU
+ depends on SMP
depends on NR_CPUS >= 4
depends on !ARM || CPU_CACHE_VIPT
depends on !PARISC || PA20
diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig
index fecb8172410c..35b72f88983a 100644
--- a/mm/damon/Kconfig
+++ b/mm/damon/Kconfig
@@ -9,7 +9,7 @@ config DAMON
access frequency of each memory region. The information can be useful
for performance-centric DRAM level memory management.
- See https://damonitor.github.io/doc/html/latest-damon/index.html for
+ See https://www.kernel.org/doc/html/latest/mm/damon/index.html for
more information.
config DAMON_KUNIT_TEST
diff --git a/mm/filemap.c b/mm/filemap.c
index bbaed3dd5049..36d22968be9a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2196,6 +2196,10 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
if (xa_is_value(folio))
goto update_start;
+ /* If we landed in the middle of a THP, continue at its end. */
+ if (xa_is_sibling(folio))
+ goto update_start;
+
if (!folio_try_get(folio))
goto retry;
diff --git a/mm/gup.c b/mm/gup.c
index 8232c8c9c372..a82890b46a36 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -3700,6 +3700,7 @@ long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
ret = PTR_ERR(folio);
if (ret != -EEXIST)
goto err;
+ folio = NULL;
}
}
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0580ac9e47b9..3ca89e0279a7 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -4115,7 +4115,6 @@ out:
static const struct file_operations split_huge_pages_fops = {
.owner = THIS_MODULE,
.write = split_huge_pages_write,
- .llseek = no_llseek,
};
static int __init split_huge_pages_debugfs(void)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index def84d8bcf2d..190fa05635f4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2390,6 +2390,23 @@ struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
return folio;
}
+struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask)
+{
+ struct folio *folio;
+
+ spin_lock_irq(&hugetlb_lock);
+ folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid,
+ nmask);
+ if (folio) {
+ VM_BUG_ON(!h->resv_huge_pages);
+ h->resv_huge_pages--;
+ }
+
+ spin_unlock_irq(&hugetlb_lock);
+ return folio;
+}
+
/* folio migration callback function */
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback)
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index 7634dd2a6128..b88543e5c0cc 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -44,7 +44,8 @@ ifndef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX
CFLAGS_KASAN_TEST += -fno-builtin
endif
-CFLAGS_kasan_test.o := $(CFLAGS_KASAN_TEST)
+CFLAGS_kasan_test_c.o := $(CFLAGS_KASAN_TEST)
+RUSTFLAGS_kasan_test_rust.o := $(RUSTFLAGS_KASAN)
CFLAGS_kasan_test_module.o := $(CFLAGS_KASAN_TEST)
obj-y := common.o report.o
@@ -52,5 +53,10 @@ obj-$(CONFIG_KASAN_GENERIC) += init.o generic.o report_generic.o shadow.o quaran
obj-$(CONFIG_KASAN_HW_TAGS) += hw_tags.o report_hw_tags.o tags.o report_tags.o
obj-$(CONFIG_KASAN_SW_TAGS) += init.o report_sw_tags.o shadow.o sw_tags.o tags.o report_tags.o
+kasan_test-objs := kasan_test_c.o
+ifdef CONFIG_RUST
+ kasan_test-objs += kasan_test_rust.o
+endif
+
obj-$(CONFIG_KASAN_KUNIT_TEST) += kasan_test.o
obj-$(CONFIG_KASAN_MODULE_TEST) += kasan_test_module.o
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index fb2b9ac0659a..f438a6cdc964 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -555,6 +555,12 @@ static inline bool kasan_arch_is_ready(void) { return true; }
void kasan_kunit_test_suite_start(void);
void kasan_kunit_test_suite_end(void);
+#ifdef CONFIG_RUST
+char kasan_test_rust_uaf(void);
+#else
+static inline char kasan_test_rust_uaf(void) { return '\0'; }
+#endif
+
#else /* CONFIG_KASAN_KUNIT_TEST */
static inline void kasan_kunit_test_suite_start(void) { }
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test_c.c
index 567d33b493e2..a181e4780d9d 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test_c.c
@@ -1944,6 +1944,16 @@ static void match_all_mem_tag(struct kunit *test)
kfree(ptr);
}
+/*
+ * Check that Rust performing a use-after-free using `unsafe` is detected.
+ * This is a smoke test to make sure that Rust is being sanitized properly.
+ */
+static void rust_uaf(struct kunit *test)
+{
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_RUST);
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_test_rust_uaf());
+}
+
static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_right),
KUNIT_CASE(kmalloc_oob_left),
@@ -2017,6 +2027,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(match_all_not_assigned),
KUNIT_CASE(match_all_ptr_tag),
KUNIT_CASE(match_all_mem_tag),
+ KUNIT_CASE(rust_uaf),
{}
};
diff --git a/mm/kasan/kasan_test_rust.rs b/mm/kasan/kasan_test_rust.rs
new file mode 100644
index 000000000000..caa7175964ef
--- /dev/null
+++ b/mm/kasan/kasan_test_rust.rs
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Helper crate for KASAN testing.
+//!
+//! Provides behavior to check the sanitization of Rust code.
+
+use core::ptr::addr_of_mut;
+use kernel::prelude::*;
+
+/// Trivial UAF - allocate a big vector, grab a pointer partway through,
+/// drop the vector, and touch it.
+#[no_mangle]
+pub extern "C" fn kasan_test_rust_uaf() -> u8 {
+ let mut v: Vec<u8> = Vec::new();
+ for _ in 0..4096 {
+ v.push(0x42, GFP_KERNEL).unwrap();
+ }
+ let ptr: *mut u8 = addr_of_mut!(v[2048]);
+ drop(v);
+ unsafe { *ptr }
+}
diff --git a/mm/kfence/report.c b/mm/kfence/report.c
index 451991a3a8f2..6370c5207d1a 100644
--- a/mm/kfence/report.c
+++ b/mm/kfence/report.c
@@ -109,7 +109,7 @@ static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadat
const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track;
u64 ts_sec = track->ts_nsec;
unsigned long rem_nsec = do_div(ts_sec, NSEC_PER_SEC);
- u64 interval_nsec = local_clock() - meta->alloc_track.ts_nsec;
+ u64 interval_nsec = local_clock() - track->ts_nsec;
unsigned long rem_interval_nsec = do_div(interval_nsec, NSEC_PER_SEC);
/* Timestamp matches printk timestamp format. */
diff --git a/mm/memblock.c b/mm/memblock.c
index 0a77a748a8eb..0389ce5cd281 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1731,6 +1731,23 @@ phys_addr_t __init_memblock memblock_reserved_size(void)
return memblock.reserved.total_size;
}
+/**
+ * memblock_estimated_nr_free_pages - return estimated number of free pages
+ * from memblock point of view
+ *
+ * During bootup, subsystems might need a rough estimate of the number of free
+ * pages in the whole system, before precise numbers are available from the
+ * buddy. Especially with CONFIG_DEFERRED_STRUCT_PAGE_INIT, the numbers
+ * obtained from the buddy might be very imprecise during bootup.
+ *
+ * Return:
+ * An estimated number of free pages from memblock point of view.
+ */
+unsigned long __init memblock_estimated_nr_free_pages(void)
+{
+ return PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size());
+}
+
/* lowest address */
phys_addr_t __init_memblock memblock_start_of_DRAM(void)
{
diff --git a/mm/memfd.c b/mm/memfd.c
index e7b7c5294d59..c17c3ea701a1 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -79,23 +79,25 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
* alloc from. Also, the folio will be pinned for an indefinite
* amount of time, so it is not expected to be migrated away.
*/
- gfp_mask = htlb_alloc_mask(hstate_file(memfd));
+ struct hstate *h = hstate_file(memfd);
+
+ gfp_mask = htlb_alloc_mask(h);
gfp_mask &= ~(__GFP_HIGHMEM | __GFP_MOVABLE);
+ idx >>= huge_page_order(h);
- folio = alloc_hugetlb_folio_nodemask(hstate_file(memfd),
- numa_node_id(),
- NULL,
- gfp_mask,
- false);
- if (folio && folio_try_get(folio)) {
+ folio = alloc_hugetlb_folio_reserve(h,
+ numa_node_id(),
+ NULL,
+ gfp_mask);
+ if (folio) {
err = hugetlb_add_to_page_cache(folio,
memfd->f_mapping,
idx);
if (err) {
folio_put(folio);
- free_huge_folio(folio);
return ERR_PTR(err);
}
+ folio_unlock(folio);
return folio;
}
return ERR_PTR(-ENOMEM);
diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c
index 9842acebd05e..fc14fe53e9b7 100644
--- a/mm/memory-tiers.c
+++ b/mm/memory-tiers.c
@@ -768,10 +768,10 @@ int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
pr_info(
"memory-tiers: the performance of DRAM node %d mismatches that of the reference\n"
"DRAM node %d.\n", nid, default_dram_perf_ref_nid);
- pr_info(" performance of reference DRAM node %d:\n",
- default_dram_perf_ref_nid);
+ pr_info(" performance of reference DRAM node %d from %s:\n",
+ default_dram_perf_ref_nid, default_dram_perf_ref_source);
dump_hmem_attrs(&default_dram_perf, " ");
- pr_info(" performance of DRAM node %d:\n", nid);
+ pr_info(" performance of DRAM node %d from %s:\n", nid, source);
dump_hmem_attrs(perf, " ");
pr_info(
" disable default DRAM node performance based abstract distance algorithm.\n");
diff --git a/mm/migrate.c b/mm/migrate.c
index dfdb3a136bf8..df91248755e4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1196,7 +1196,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
int rc = -EAGAIN;
int old_page_state = 0;
struct anon_vma *anon_vma = NULL;
- bool is_lru = !__folio_test_movable(src);
+ bool is_lru = data_race(!__folio_test_movable(src));
bool locked = false;
bool dst_locked = false;
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index bcdab9c23b40..63f988f0c9e8 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -40,6 +40,12 @@ config NET_9P_XEN
This builds support for a transport for 9pfs between
two Xen domains.
+config NET_9P_USBG
+ bool "9P USB Gadget Transport"
+ depends on USB_GADGET=y || USB_GADGET=NET_9P
+ help
+ This builds support for a transport for 9pfs over
+ usb gadget.
config NET_9P_RDMA
depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS
diff --git a/net/9p/Makefile b/net/9p/Makefile
index 1df9b344c30b..22794a451c3f 100644
--- a/net/9p/Makefile
+++ b/net/9p/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_NET_9P_FD) += 9pnet_fd.o
obj-$(CONFIG_NET_9P_XEN) += 9pnet_xen.o
obj-$(CONFIG_NET_9P_VIRTIO) += 9pnet_virtio.o
obj-$(CONFIG_NET_9P_RDMA) += 9pnet_rdma.o
+obj-$(CONFIG_NET_9P_USBG) += 9pnet_usbg.o
9pnet-objs := \
mod.o \
@@ -23,3 +24,6 @@ obj-$(CONFIG_NET_9P_RDMA) += 9pnet_rdma.o
9pnet_rdma-objs := \
trans_rdma.o \
+
+9pnet_usbg-objs := \
+ trans_usbg.o \
diff --git a/net/9p/trans_usbg.c b/net/9p/trans_usbg.c
new file mode 100644
index 000000000000..975b76839dca
--- /dev/null
+++ b/net/9p/trans_usbg.c
@@ -0,0 +1,956 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * trans_usbg.c - USB peripheral usb9pfs configuration driver and transport.
+ *
+ * Copyright (C) 2024 Michael Grzeschik <m.grzeschik@pengutronix.de>
+ */
+
+/* Gadget usb9pfs only needs two bulk endpoints, and will use the usb9pfs
+ * transport to mount host exported filesystem via usb gadget.
+ */
+
+/* +--------------------------+ | +--------------------------+
+ * | 9PFS mounting client | | | 9PFS exporting server |
+ * SW | | | | |
+ * | (this:trans_usbg) | | |(e.g. diod or nfs-ganesha)|
+ * +-------------^------------+ | +-------------^------------+
+ * | | |
+ * ------------------|------------------------------------|-------------
+ * | | |
+ * +-------------v------------+ | +-------------v------------+
+ * | | | | |
+ * HW | USB Device Controller <---------> USB Host Controller |
+ * | | | | |
+ * +--------------------------+ | +--------------------------+
+ */
+
+#include <linux/cleanup.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/func_utils.h>
+
+#include <net/9p/9p.h>
+#include <net/9p/client.h>
+#include <net/9p/transport.h>
+
+#define DEFAULT_BUFLEN 16384
+
+struct f_usb9pfs {
+ struct p9_client *client;
+
+ /* 9p request lock for en/dequeue */
+ spinlock_t lock;
+
+ struct usb_request *in_req;
+ struct usb_request *out_req;
+
+ struct usb_ep *in_ep;
+ struct usb_ep *out_ep;
+
+ struct completion send;
+ struct completion received;
+
+ unsigned int buflen;
+
+ struct usb_function function;
+};
+
+static inline struct f_usb9pfs *func_to_usb9pfs(struct usb_function *f)
+{
+ return container_of(f, struct f_usb9pfs, function);
+}
+
+struct f_usb9pfs_opts {
+ struct usb_function_instance func_inst;
+ unsigned int buflen;
+
+ struct f_usb9pfs_dev *dev;
+
+ /* Read/write access to configfs attributes is handled by configfs.
+ *
+ * This is to protect the data from concurrent access by read/write
+ * and create symlink/remove symlink.
+ */
+ struct mutex lock;
+ int refcnt;
+};
+
+struct f_usb9pfs_dev {
+ struct f_usb9pfs *usb9pfs;
+ struct f_usb9pfs_opts *opts;
+ char tag[41];
+ bool inuse;
+
+ struct list_head usb9pfs_instance;
+};
+
+static DEFINE_MUTEX(usb9pfs_lock);
+static struct list_head usbg_instance_list;
+
+static int usb9pfs_queue_tx(struct f_usb9pfs *usb9pfs, struct p9_req_t *p9_tx_req,
+ gfp_t gfp_flags)
+{
+ struct usb_composite_dev *cdev = usb9pfs->function.config->cdev;
+ struct usb_request *req = usb9pfs->in_req;
+ int ret;
+
+ if (!(p9_tx_req->tc.size % usb9pfs->in_ep->maxpacket))
+ req->zero = 1;
+
+ req->buf = p9_tx_req->tc.sdata;
+ req->length = p9_tx_req->tc.size;
+ req->context = p9_tx_req;
+
+ dev_dbg(&cdev->gadget->dev, "%s usb9pfs send --> %d/%d, zero: %d\n",
+ usb9pfs->in_ep->name, req->actual, req->length, req->zero);
+
+ ret = usb_ep_queue(usb9pfs->in_ep, req, gfp_flags);
+ if (ret)
+ req->context = NULL;
+
+ dev_dbg(&cdev->gadget->dev, "tx submit --> %d\n", ret);
+
+ return ret;
+}
+
+static int usb9pfs_queue_rx(struct f_usb9pfs *usb9pfs, struct usb_request *req,
+ gfp_t gfp_flags)
+{
+ struct usb_composite_dev *cdev = usb9pfs->function.config->cdev;
+ int ret;
+
+ ret = usb_ep_queue(usb9pfs->out_ep, req, gfp_flags);
+
+ dev_dbg(&cdev->gadget->dev, "rx submit --> %d\n", ret);
+
+ return ret;
+}
+
+static int usb9pfs_transmit(struct f_usb9pfs *usb9pfs, struct p9_req_t *p9_req)
+{
+ int ret = 0;
+
+ guard(spinlock_irqsave)(&usb9pfs->lock);
+
+ ret = usb9pfs_queue_tx(usb9pfs, p9_req, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ list_del(&p9_req->req_list);
+
+ p9_req_get(p9_req);
+
+ return ret;
+}
+
+static void usb9pfs_tx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_usb9pfs *usb9pfs = ep->driver_data;
+ struct usb_composite_dev *cdev = usb9pfs->function.config->cdev;
+ struct p9_req_t *p9_tx_req = req->context;
+ unsigned long flags;
+
+ /* reset zero packages */
+ req->zero = 0;
+
+ if (req->status) {
+ dev_err(&cdev->gadget->dev, "%s usb9pfs complete --> %d, %d/%d\n",
+ ep->name, req->status, req->actual, req->length);
+ return;
+ }
+
+ dev_dbg(&cdev->gadget->dev, "%s usb9pfs complete --> %d, %d/%d\n",
+ ep->name, req->status, req->actual, req->length);
+
+ spin_lock_irqsave(&usb9pfs->lock, flags);
+ WRITE_ONCE(p9_tx_req->status, REQ_STATUS_SENT);
+
+ p9_req_put(usb9pfs->client, p9_tx_req);
+
+ req->context = NULL;
+
+ spin_unlock_irqrestore(&usb9pfs->lock, flags);
+
+ complete(&usb9pfs->send);
+}
+
+static struct p9_req_t *usb9pfs_rx_header(struct f_usb9pfs *usb9pfs, void *buf)
+{
+ struct p9_req_t *p9_rx_req;
+ struct p9_fcall rc;
+ int ret;
+
+ /* start by reading header */
+ rc.sdata = buf;
+ rc.offset = 0;
+ rc.capacity = P9_HDRSZ;
+ rc.size = P9_HDRSZ;
+
+ p9_debug(P9_DEBUG_TRANS, "mux %p got %zu bytes\n", usb9pfs,
+ rc.capacity - rc.offset);
+
+ ret = p9_parse_header(&rc, &rc.size, NULL, NULL, 0);
+ if (ret) {
+ p9_debug(P9_DEBUG_ERROR,
+ "error parsing header: %d\n", ret);
+ return NULL;
+ }
+
+ p9_debug(P9_DEBUG_TRANS,
+ "mux %p pkt: size: %d bytes tag: %d\n",
+ usb9pfs, rc.size, rc.tag);
+
+ p9_rx_req = p9_tag_lookup(usb9pfs->client, rc.tag);
+ if (!p9_rx_req || p9_rx_req->status != REQ_STATUS_SENT) {
+ p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", rc.tag);
+ return NULL;
+ }
+
+ if (rc.size > p9_rx_req->rc.capacity) {
+ p9_debug(P9_DEBUG_ERROR,
+ "requested packet size too big: %d for tag %d with capacity %zd\n",
+ rc.size, rc.tag, p9_rx_req->rc.capacity);
+ p9_req_put(usb9pfs->client, p9_rx_req);
+ return NULL;
+ }
+
+ if (!p9_rx_req->rc.sdata) {
+ p9_debug(P9_DEBUG_ERROR,
+ "No recv fcall for tag %d (req %p), disconnecting!\n",
+ rc.tag, p9_rx_req);
+ p9_req_put(usb9pfs->client, p9_rx_req);
+ return NULL;
+ }
+
+ return p9_rx_req;
+}
+
+static void usb9pfs_rx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_usb9pfs *usb9pfs = ep->driver_data;
+ struct usb_composite_dev *cdev = usb9pfs->function.config->cdev;
+ struct p9_req_t *p9_rx_req;
+
+ if (req->status) {
+ dev_err(&cdev->gadget->dev, "%s usb9pfs complete --> %d, %d/%d\n",
+ ep->name, req->status, req->actual, req->length);
+ return;
+ }
+
+ p9_rx_req = usb9pfs_rx_header(usb9pfs, req->buf);
+ if (!p9_rx_req)
+ return;
+
+ memcpy(p9_rx_req->rc.sdata, req->buf, req->actual);
+
+ p9_rx_req->rc.size = req->actual;
+
+ p9_client_cb(usb9pfs->client, p9_rx_req, REQ_STATUS_RCVD);
+ p9_req_put(usb9pfs->client, p9_rx_req);
+
+ complete(&usb9pfs->received);
+}
+
+static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
+{
+ int value;
+
+ value = usb_ep_disable(ep);
+ if (value < 0)
+ dev_info(&cdev->gadget->dev,
+ "disable %s --> %d\n", ep->name, value);
+}
+
+static void disable_usb9pfs(struct f_usb9pfs *usb9pfs)
+{
+ struct usb_composite_dev *cdev =
+ usb9pfs->function.config->cdev;
+
+ if (usb9pfs->in_req) {
+ usb_ep_free_request(usb9pfs->in_ep, usb9pfs->in_req);
+ usb9pfs->in_req = NULL;
+ }
+
+ if (usb9pfs->out_req) {
+ usb_ep_free_request(usb9pfs->out_ep, usb9pfs->out_req);
+ usb9pfs->out_req = NULL;
+ }
+
+ disable_ep(cdev, usb9pfs->in_ep);
+ disable_ep(cdev, usb9pfs->out_ep);
+ dev_dbg(&cdev->gadget->dev, "%s disabled\n",
+ usb9pfs->function.name);
+}
+
+static int alloc_requests(struct usb_composite_dev *cdev,
+ struct f_usb9pfs *usb9pfs)
+{
+ int ret;
+
+ usb9pfs->in_req = usb_ep_alloc_request(usb9pfs->in_ep, GFP_ATOMIC);
+ if (!usb9pfs->in_req) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ usb9pfs->out_req = alloc_ep_req(usb9pfs->out_ep, usb9pfs->buflen);
+ if (!usb9pfs->out_req) {
+ ret = -ENOENT;
+ goto fail_in;
+ }
+
+ usb9pfs->in_req->complete = usb9pfs_tx_complete;
+ usb9pfs->out_req->complete = usb9pfs_rx_complete;
+
+ /* length will be set in complete routine */
+ usb9pfs->in_req->context = usb9pfs;
+ usb9pfs->out_req->context = usb9pfs;
+
+ return 0;
+
+fail_in:
+ usb_ep_free_request(usb9pfs->in_ep, usb9pfs->in_req);
+fail:
+ return ret;
+}
+
+static int enable_endpoint(struct usb_composite_dev *cdev,
+ struct f_usb9pfs *usb9pfs, struct usb_ep *ep)
+{
+ int ret;
+
+ ret = config_ep_by_speed(cdev->gadget, &usb9pfs->function, ep);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(ep);
+ if (ret < 0)
+ return ret;
+
+ ep->driver_data = usb9pfs;
+
+ return 0;
+}
+
+static int
+enable_usb9pfs(struct usb_composite_dev *cdev, struct f_usb9pfs *usb9pfs)
+{
+ struct p9_client *client;
+ int ret = 0;
+
+ ret = enable_endpoint(cdev, usb9pfs, usb9pfs->in_ep);
+ if (ret)
+ goto out;
+
+ ret = enable_endpoint(cdev, usb9pfs, usb9pfs->out_ep);
+ if (ret)
+ goto disable_in;
+
+ ret = alloc_requests(cdev, usb9pfs);
+ if (ret)
+ goto disable_out;
+
+ client = usb9pfs->client;
+ if (client)
+ client->status = Connected;
+
+ dev_dbg(&cdev->gadget->dev, "%s enabled\n", usb9pfs->function.name);
+ return 0;
+
+disable_out:
+ usb_ep_disable(usb9pfs->out_ep);
+disable_in:
+ usb_ep_disable(usb9pfs->in_ep);
+out:
+ return ret;
+}
+
+static int p9_usbg_create(struct p9_client *client, const char *devname, char *args)
+{
+ struct f_usb9pfs_dev *dev;
+ struct f_usb9pfs *usb9pfs;
+ int ret = -ENOENT;
+ int found = 0;
+
+ if (!devname)
+ return -EINVAL;
+
+ guard(mutex)(&usb9pfs_lock);
+
+ list_for_each_entry(dev, &usbg_instance_list, usb9pfs_instance) {
+ if (!strncmp(devname, dev->tag, strlen(devname))) {
+ if (!dev->inuse) {
+ dev->inuse = true;
+ found = 1;
+ break;
+ }
+ ret = -EBUSY;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_err("no channels available for device %s\n", devname);
+ return ret;
+ }
+
+ usb9pfs = dev->usb9pfs;
+ if (!usb9pfs)
+ return -EINVAL;
+
+ client->trans = (void *)usb9pfs;
+ if (!usb9pfs->in_req)
+ client->status = Disconnected;
+ else
+ client->status = Connected;
+ usb9pfs->client = client;
+
+ client->trans_mod->maxsize = usb9pfs->buflen;
+
+ complete(&usb9pfs->received);
+
+ return 0;
+}
+
+static void usb9pfs_clear_tx(struct f_usb9pfs *usb9pfs)
+{
+ struct p9_req_t *req;
+
+ guard(spinlock_irqsave)(&usb9pfs->lock);
+
+ req = usb9pfs->in_req->context;
+ if (!req)
+ return;
+
+ if (!req->t_err)
+ req->t_err = -ECONNRESET;
+
+ p9_client_cb(usb9pfs->client, req, REQ_STATUS_ERROR);
+}
+
+static void p9_usbg_close(struct p9_client *client)
+{
+ struct f_usb9pfs *usb9pfs;
+ struct f_usb9pfs_dev *dev;
+ struct f_usb9pfs_opts *opts;
+
+ if (!client)
+ return;
+
+ usb9pfs = client->trans;
+ if (!usb9pfs)
+ return;
+
+ client->status = Disconnected;
+
+ usb9pfs_clear_tx(usb9pfs);
+
+ opts = container_of(usb9pfs->function.fi,
+ struct f_usb9pfs_opts, func_inst);
+
+ dev = opts->dev;
+
+ mutex_lock(&usb9pfs_lock);
+ dev->inuse = false;
+ mutex_unlock(&usb9pfs_lock);
+}
+
+static int p9_usbg_request(struct p9_client *client, struct p9_req_t *p9_req)
+{
+ struct f_usb9pfs *usb9pfs = client->trans;
+ int ret;
+
+ if (client->status != Connected)
+ return -EBUSY;
+
+ ret = wait_for_completion_killable(&usb9pfs->received);
+ if (ret)
+ return ret;
+
+ ret = usb9pfs_transmit(usb9pfs, p9_req);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_killable(&usb9pfs->send);
+ if (ret)
+ return ret;
+
+ return usb9pfs_queue_rx(usb9pfs, usb9pfs->out_req, GFP_ATOMIC);
+}
+
+static int p9_usbg_cancel(struct p9_client *client, struct p9_req_t *req)
+{
+ struct f_usb9pfs *usb9pfs = client->trans;
+ int ret = 1;
+
+ p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
+
+ guard(spinlock_irqsave)(&usb9pfs->lock);
+
+ if (req->status == REQ_STATUS_UNSENT) {
+ list_del(&req->req_list);
+ WRITE_ONCE(req->status, REQ_STATUS_FLSHD);
+ p9_req_put(client, req);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static struct p9_trans_module p9_usbg_trans = {
+ .name = "usbg",
+ .create = p9_usbg_create,
+ .close = p9_usbg_close,
+ .request = p9_usbg_request,
+ .cancel = p9_usbg_cancel,
+ .owner = THIS_MODULE,
+};
+
+/*-------------------------------------------------------------------------*/
+
+#define USB_PROTOCOL_9PFS 0x09
+
+static struct usb_interface_descriptor usb9pfs_intf = {
+ .bLength = sizeof(usb9pfs_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = USB_PROTOCOL_9PFS,
+
+ /* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor fs_usb9pfs_source_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor fs_usb9pfs_sink_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_usb9pfs_descs[] = {
+ (struct usb_descriptor_header *)&usb9pfs_intf,
+ (struct usb_descriptor_header *)&fs_usb9pfs_sink_desc,
+ (struct usb_descriptor_header *)&fs_usb9pfs_source_desc,
+ NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor hs_usb9pfs_source_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor hs_usb9pfs_sink_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *hs_usb9pfs_descs[] = {
+ (struct usb_descriptor_header *)&usb9pfs_intf,
+ (struct usb_descriptor_header *)&hs_usb9pfs_source_desc,
+ (struct usb_descriptor_header *)&hs_usb9pfs_sink_desc,
+ NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor ss_usb9pfs_source_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_usb9pfs_source_comp_desc = {
+ .bLength = USB_DT_SS_EP_COMP_SIZE,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_endpoint_descriptor ss_usb9pfs_sink_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_usb9pfs_sink_comp_desc = {
+ .bLength = USB_DT_SS_EP_COMP_SIZE,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_descriptor_header *ss_usb9pfs_descs[] = {
+ (struct usb_descriptor_header *)&usb9pfs_intf,
+ (struct usb_descriptor_header *)&ss_usb9pfs_source_desc,
+ (struct usb_descriptor_header *)&ss_usb9pfs_source_comp_desc,
+ (struct usb_descriptor_header *)&ss_usb9pfs_sink_desc,
+ (struct usb_descriptor_header *)&ss_usb9pfs_sink_comp_desc,
+ NULL,
+};
+
+/* function-specific strings: */
+static struct usb_string strings_usb9pfs[] = {
+ [0].s = "usb9pfs input to output",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_usb9pfs = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_usb9pfs,
+};
+
+static struct usb_gadget_strings *usb9pfs_strings[] = {
+ &stringtab_usb9pfs,
+ NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int usb9pfs_func_bind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct f_usb9pfs *usb9pfs = func_to_usb9pfs(f);
+ struct f_usb9pfs_opts *opts;
+ struct usb_composite_dev *cdev = c->cdev;
+ int ret;
+ int id;
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ usb9pfs_intf.bInterfaceNumber = id;
+
+ id = usb_string_id(cdev);
+ if (id < 0)
+ return id;
+ strings_usb9pfs[0].id = id;
+ usb9pfs_intf.iInterface = id;
+
+ /* allocate endpoints */
+ usb9pfs->in_ep = usb_ep_autoconfig(cdev->gadget,
+ &fs_usb9pfs_source_desc);
+ if (!usb9pfs->in_ep)
+ goto autoconf_fail;
+
+ usb9pfs->out_ep = usb_ep_autoconfig(cdev->gadget,
+ &fs_usb9pfs_sink_desc);
+ if (!usb9pfs->out_ep)
+ goto autoconf_fail;
+
+ /* support high speed hardware */
+ hs_usb9pfs_source_desc.bEndpointAddress =
+ fs_usb9pfs_source_desc.bEndpointAddress;
+ hs_usb9pfs_sink_desc.bEndpointAddress =
+ fs_usb9pfs_sink_desc.bEndpointAddress;
+
+ /* support super speed hardware */
+ ss_usb9pfs_source_desc.bEndpointAddress =
+ fs_usb9pfs_source_desc.bEndpointAddress;
+ ss_usb9pfs_sink_desc.bEndpointAddress =
+ fs_usb9pfs_sink_desc.bEndpointAddress;
+
+ ret = usb_assign_descriptors(f, fs_usb9pfs_descs, hs_usb9pfs_descs,
+ ss_usb9pfs_descs, ss_usb9pfs_descs);
+ if (ret)
+ return ret;
+
+ opts = container_of(f->fi, struct f_usb9pfs_opts, func_inst);
+ opts->dev->usb9pfs = usb9pfs;
+
+ dev_dbg(&cdev->gadget->dev, "%s speed %s: IN/%s, OUT/%s\n",
+ (gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")),
+ f->name, usb9pfs->in_ep->name, usb9pfs->out_ep->name);
+
+ return 0;
+
+autoconf_fail:
+ ERROR(cdev, "%s: can't autoconfigure on %s\n",
+ f->name, cdev->gadget->name);
+ return -ENODEV;
+}
+
+static void usb9pfs_func_unbind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct f_usb9pfs *usb9pfs = func_to_usb9pfs(f);
+
+ disable_usb9pfs(usb9pfs);
+}
+
+static void usb9pfs_free_func(struct usb_function *f)
+{
+ struct f_usb9pfs *usb9pfs = func_to_usb9pfs(f);
+ struct f_usb9pfs_opts *opts;
+
+ kfree(usb9pfs);
+
+ opts = container_of(f->fi, struct f_usb9pfs_opts, func_inst);
+
+ mutex_lock(&opts->lock);
+ opts->refcnt--;
+ mutex_unlock(&opts->lock);
+
+ usb_free_all_descriptors(f);
+}
+
+static int usb9pfs_set_alt(struct usb_function *f,
+ unsigned int intf, unsigned int alt)
+{
+ struct f_usb9pfs *usb9pfs = func_to_usb9pfs(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ return enable_usb9pfs(cdev, usb9pfs);
+}
+
+static void usb9pfs_disable(struct usb_function *f)
+{
+ struct f_usb9pfs *usb9pfs = func_to_usb9pfs(f);
+
+ usb9pfs_clear_tx(usb9pfs);
+}
+
+static struct usb_function *usb9pfs_alloc(struct usb_function_instance *fi)
+{
+ struct f_usb9pfs_opts *usb9pfs_opts;
+ struct f_usb9pfs *usb9pfs;
+
+ usb9pfs = kzalloc(sizeof(*usb9pfs), GFP_KERNEL);
+ if (!usb9pfs)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&usb9pfs->lock);
+
+ init_completion(&usb9pfs->send);
+ init_completion(&usb9pfs->received);
+
+ usb9pfs_opts = container_of(fi, struct f_usb9pfs_opts, func_inst);
+
+ mutex_lock(&usb9pfs_opts->lock);
+ usb9pfs_opts->refcnt++;
+ mutex_unlock(&usb9pfs_opts->lock);
+
+ usb9pfs->buflen = usb9pfs_opts->buflen;
+
+ usb9pfs->function.name = "usb9pfs";
+ usb9pfs->function.bind = usb9pfs_func_bind;
+ usb9pfs->function.unbind = usb9pfs_func_unbind;
+ usb9pfs->function.set_alt = usb9pfs_set_alt;
+ usb9pfs->function.disable = usb9pfs_disable;
+ usb9pfs->function.strings = usb9pfs_strings;
+
+ usb9pfs->function.free_func = usb9pfs_free_func;
+
+ return &usb9pfs->function;
+}
+
+static inline struct f_usb9pfs_opts *to_f_usb9pfs_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_usb9pfs_opts,
+ func_inst.group);
+}
+
+static inline struct f_usb9pfs_opts *fi_to_f_usb9pfs_opts(struct usb_function_instance *fi)
+{
+ return container_of(fi, struct f_usb9pfs_opts, func_inst);
+}
+
+static void usb9pfs_attr_release(struct config_item *item)
+{
+ struct f_usb9pfs_opts *usb9pfs_opts = to_f_usb9pfs_opts(item);
+
+ usb_put_function_instance(&usb9pfs_opts->func_inst);
+}
+
+static struct configfs_item_operations usb9pfs_item_ops = {
+ .release = usb9pfs_attr_release,
+};
+
+static ssize_t f_usb9pfs_opts_buflen_show(struct config_item *item, char *page)
+{
+ struct f_usb9pfs_opts *opts = to_f_usb9pfs_opts(item);
+ int ret;
+
+ mutex_lock(&opts->lock);
+ ret = sysfs_emit(page, "%d\n", opts->buflen);
+ mutex_unlock(&opts->lock);
+
+ return ret;
+}
+
+static ssize_t f_usb9pfs_opts_buflen_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct f_usb9pfs_opts *opts = to_f_usb9pfs_opts(item);
+ int ret;
+ u32 num;
+
+ guard(mutex)(&opts->lock);
+
+ if (opts->refcnt)
+ return -EBUSY;
+
+ ret = kstrtou32(page, 0, &num);
+ if (ret)
+ return ret;
+
+ opts->buflen = num;
+
+ return len;
+}
+
+CONFIGFS_ATTR(f_usb9pfs_opts_, buflen);
+
+static struct configfs_attribute *usb9pfs_attrs[] = {
+ &f_usb9pfs_opts_attr_buflen,
+ NULL,
+};
+
+static const struct config_item_type usb9pfs_func_type = {
+ .ct_item_ops = &usb9pfs_item_ops,
+ .ct_attrs = usb9pfs_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct f_usb9pfs_dev *_usb9pfs_do_find_dev(const char *tag)
+{
+ struct f_usb9pfs_dev *usb9pfs_dev;
+
+ if (!tag)
+ return NULL;
+
+ list_for_each_entry(usb9pfs_dev, &usbg_instance_list, usb9pfs_instance) {
+ if (strcmp(usb9pfs_dev->tag, tag) == 0)
+ return usb9pfs_dev;
+ }
+
+ return NULL;
+}
+
+static int usb9pfs_tag_instance(struct f_usb9pfs_dev *dev, const char *tag)
+{
+ struct f_usb9pfs_dev *existing;
+ int ret = 0;
+
+ guard(mutex)(&usb9pfs_lock);
+
+ existing = _usb9pfs_do_find_dev(tag);
+ if (!existing)
+ strscpy(dev->tag, tag, ARRAY_SIZE(dev->tag));
+ else if (existing != dev)
+ ret = -EBUSY;
+
+ return ret;
+}
+
+static int usb9pfs_set_inst_tag(struct usb_function_instance *fi, const char *tag)
+{
+ if (strlen(tag) >= sizeof_field(struct f_usb9pfs_dev, tag))
+ return -ENAMETOOLONG;
+ return usb9pfs_tag_instance(fi_to_f_usb9pfs_opts(fi)->dev, tag);
+}
+
+static void usb9pfs_free_instance(struct usb_function_instance *fi)
+{
+ struct f_usb9pfs_opts *usb9pfs_opts =
+ container_of(fi, struct f_usb9pfs_opts, func_inst);
+ struct f_usb9pfs_dev *dev = usb9pfs_opts->dev;
+
+ mutex_lock(&usb9pfs_lock);
+ list_del(&dev->usb9pfs_instance);
+ mutex_unlock(&usb9pfs_lock);
+
+ kfree(usb9pfs_opts);
+}
+
+static struct usb_function_instance *usb9pfs_alloc_instance(void)
+{
+ struct f_usb9pfs_opts *usb9pfs_opts;
+ struct f_usb9pfs_dev *dev;
+
+ usb9pfs_opts = kzalloc(sizeof(*usb9pfs_opts), GFP_KERNEL);
+ if (!usb9pfs_opts)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&usb9pfs_opts->lock);
+
+ usb9pfs_opts->func_inst.set_inst_name = usb9pfs_set_inst_tag;
+ usb9pfs_opts->func_inst.free_func_inst = usb9pfs_free_instance;
+
+ usb9pfs_opts->buflen = DEFAULT_BUFLEN;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (IS_ERR(dev)) {
+ kfree(usb9pfs_opts);
+ return ERR_CAST(dev);
+ }
+
+ usb9pfs_opts->dev = dev;
+ dev->opts = usb9pfs_opts;
+
+ config_group_init_type_name(&usb9pfs_opts->func_inst.group, "",
+ &usb9pfs_func_type);
+
+ mutex_lock(&usb9pfs_lock);
+ list_add_tail(&dev->usb9pfs_instance, &usbg_instance_list);
+ mutex_unlock(&usb9pfs_lock);
+
+ return &usb9pfs_opts->func_inst;
+}
+DECLARE_USB_FUNCTION(usb9pfs, usb9pfs_alloc_instance, usb9pfs_alloc);
+
+static int __init usb9pfs_modinit(void)
+{
+ int ret;
+
+ INIT_LIST_HEAD(&usbg_instance_list);
+
+ ret = usb_function_register(&usb9pfsusb_func);
+ if (!ret)
+ v9fs_register_trans(&p9_usbg_trans);
+
+ return ret;
+}
+
+static void __exit usb9pfs_modexit(void)
+{
+ usb_function_unregister(&usb9pfsusb_func);
+ v9fs_unregister_trans(&p9_usbg_trans);
+}
+
+module_init(usb9pfs_modinit);
+module_exit(usb9pfs_modexit);
+
+MODULE_ALIAS_9P("usbg");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("USB gadget 9pfs transport");
+MODULE_AUTHOR("Michael Grzeschik");
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 3c8b78d9c4d1..d1b5705dc0c6 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1254,7 +1254,7 @@ static int ceph_dns_resolve_name(const char *name, size_t namelen,
colon_p = memchr(name, ':', namelen);
if (delim_p && colon_p)
- end = delim_p < colon_p ? delim_p : colon_p;
+ end = min(delim_p, colon_p);
else if (!delim_p && colon_p)
end = colon_p;
else {
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 724b6856fcc3..242c91a6e3d3 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -67,46 +67,39 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
{
- u32 ufd = attr->target_fd;
struct bpf_map *map;
- struct fd f;
int ret;
if (attr->attach_flags || attr->replace_bpf_fd)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->target_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
mutex_lock(&sockmap_mutex);
ret = sock_map_prog_update(map, prog, NULL, NULL, attr->attach_type);
mutex_unlock(&sockmap_mutex);
- fdput(f);
return ret;
}
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
{
- u32 ufd = attr->target_fd;
struct bpf_prog *prog;
struct bpf_map *map;
- struct fd f;
int ret;
if (attr->attach_flags || attr->replace_bpf_fd)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->target_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
prog = bpf_prog_get(attr->attach_bpf_fd);
- if (IS_ERR(prog)) {
- ret = PTR_ERR(prog);
- goto put_map;
- }
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
if (prog->type != ptype) {
ret = -EINVAL;
@@ -118,8 +111,6 @@ int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
mutex_unlock(&sockmap_mutex);
put_prog:
bpf_prog_put(prog);
-put_map:
- fdput(f);
return ret;
}
@@ -1551,18 +1542,17 @@ int sock_map_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
- u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd;
+ u32 prog_cnt = 0, flags = 0;
struct bpf_prog **pprog;
struct bpf_prog *prog;
struct bpf_map *map;
- struct fd f;
u32 id = 0;
int ret;
if (attr->query.query_flags)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->target_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -1594,7 +1584,6 @@ end:
copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
ret = -EFAULT;
- fdput(f);
return ret;
}
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 04504b2b51df..87fd945a0d27 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -239,9 +239,8 @@ static int nf_reject_fill_skb_dst(struct sk_buff *skb_in)
void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
int hook)
{
- struct sk_buff *nskb;
- struct iphdr *niph;
const struct tcphdr *oth;
+ struct sk_buff *nskb;
struct tcphdr _oth;
oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
@@ -266,14 +265,12 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
skb_reserve(nskb, LL_MAX_HEADER);
- niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
- ip4_dst_hoplimit(skb_dst(nskb)));
+ nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
+ ip4_dst_hoplimit(skb_dst(nskb)));
nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
if (ip_route_me_harder(net, sk, nskb, RTN_UNSPEC))
goto free_nskb;
- niph = ip_hdr(nskb);
-
/* "Never happens" */
if (nskb->len > dst_mtu(skb_dst(nskb)))
goto free_nskb;
@@ -290,6 +287,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
*/
if (nf_bridge_info_exists(oldskb)) {
struct ethhdr *oeth = eth_hdr(oldskb);
+ struct iphdr *niph = ip_hdr(nskb);
struct net_device *br_indev;
br_indev = nf_bridge_get_physindev(oldskb, net);
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 08d4b7132d4c..1c9c686d9522 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -323,6 +323,7 @@ config IPV6_RPL_LWTUNNEL
bool "IPv6: RPL Source Routing Header support"
depends on IPV6
select LWTUNNEL
+ select DST_CACHE
help
Support for RFC6554 RPL Source Routing Header using the lightweight
tunnels mechanism.
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index dedee264b8f6..7db0437140bf 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -223,33 +223,23 @@ void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
const struct tcphdr *oth, unsigned int otcplen)
{
struct tcphdr *tcph;
- int needs_ack;
skb_reset_transport_header(nskb);
- tcph = skb_put(nskb, sizeof(struct tcphdr));
+ tcph = skb_put_zero(nskb, sizeof(struct tcphdr));
/* Truncate to length (no data) */
tcph->doff = sizeof(struct tcphdr)/4;
tcph->source = oth->dest;
tcph->dest = oth->source;
if (oth->ack) {
- needs_ack = 0;
tcph->seq = oth->ack_seq;
- tcph->ack_seq = 0;
} else {
- needs_ack = 1;
tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
otcplen - (oth->doff<<2));
- tcph->seq = 0;
+ tcph->ack = 1;
}
- /* Reset flags */
- ((u_int8_t *)tcph)[13] = 0;
tcph->rst = 1;
- tcph->ack = needs_ack;
- tcph->window = 0;
- tcph->urg_ptr = 0;
- tcph->check = 0;
/* Adjust TCP checksum */
tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
@@ -283,7 +273,6 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
const struct tcphdr *otcph;
unsigned int otcplen, hh_len;
const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
- struct ipv6hdr *ip6h;
struct dst_entry *dst = NULL;
struct flowi6 fl6;
@@ -339,8 +328,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
nskb->mark = fl6.flowi6_mark;
skb_reserve(nskb, hh_len + dst->header_len);
- ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
- ip6_dst_hoplimit(dst));
+ nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, ip6_dst_hoplimit(dst));
nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen);
nf_ct_attach(nskb, oldskb);
@@ -355,6 +343,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
*/
if (nf_bridge_info_exists(oldskb)) {
struct ethhdr *oeth = eth_hdr(oldskb);
+ struct ipv6hdr *ip6h = ipv6_hdr(nskb);
struct net_device *br_indev;
br_indev = nf_bridge_get_physindev(oldskb, net);
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index 25b8a67a63a4..85149c774505 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -187,7 +187,6 @@ static const struct file_operations minstrel_ht_stat_fops = {
.open = minstrel_ht_stats_open,
.read = minstrel_stats_read,
.release = minstrel_stats_release,
- .llseek = no_llseek,
};
static char *
@@ -323,7 +322,6 @@ static const struct file_operations minstrel_ht_stat_csv_fops = {
.open = minstrel_ht_stats_csv_open,
.read = minstrel_stats_read,
.release = minstrel_stats_release,
- .llseek = no_llseek,
};
void
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index d3cb53b008f5..9db3e2b0b1c3 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -988,6 +988,56 @@ static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
tstamp->start = ktime_get_real_ns();
}
+/**
+ * nf_ct_match_reverse - check if ct1 and ct2 refer to identical flow
+ * @ct1: conntrack in hash table to check against
+ * @ct2: merge candidate
+ *
+ * returns true if ct1 and ct2 happen to refer to the same flow, but
+ * in opposing directions, i.e.
+ * ct1: a:b -> c:d
+ * ct2: c:d -> a:b
+ * for both directions. If so, @ct2 should not have been created
+ * as the skb should have been picked up as ESTABLISHED flow.
+ * But ct1 was not yet committed to hash table before skb that created
+ * ct2 had arrived.
+ *
+ * Note we don't compare netns because ct entries in different net
+ * namespace cannot clash to begin with.
+ *
+ * @return: true if ct1 and ct2 are identical when swapping origin/reply.
+ */
+static bool
+nf_ct_match_reverse(const struct nf_conn *ct1, const struct nf_conn *ct2)
+{
+ u16 id1, id2;
+
+ if (!nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ &ct2->tuplehash[IP_CT_DIR_REPLY].tuple))
+ return false;
+
+ if (!nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
+ &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
+ return false;
+
+ id1 = nf_ct_zone_id(nf_ct_zone(ct1), IP_CT_DIR_ORIGINAL);
+ id2 = nf_ct_zone_id(nf_ct_zone(ct2), IP_CT_DIR_REPLY);
+ if (id1 != id2)
+ return false;
+
+ id1 = nf_ct_zone_id(nf_ct_zone(ct1), IP_CT_DIR_REPLY);
+ id2 = nf_ct_zone_id(nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL);
+
+ return id1 == id2;
+}
+
+static int nf_ct_can_merge(const struct nf_conn *ct,
+ const struct nf_conn *loser_ct)
+{
+ return nf_ct_match(ct, loser_ct) ||
+ nf_ct_match_reverse(ct, loser_ct);
+}
+
/* caller must hold locks to prevent concurrent changes */
static int __nf_ct_resolve_clash(struct sk_buff *skb,
struct nf_conntrack_tuple_hash *h)
@@ -999,11 +1049,7 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb,
loser_ct = nf_ct_get(skb, &ctinfo);
- if (nf_ct_is_dying(ct))
- return NF_DROP;
-
- if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
- nf_ct_match(ct, loser_ct)) {
+ if (nf_ct_can_merge(ct, loser_ct)) {
struct net *net = nf_ct_net(ct);
nf_conntrack_get(&ct->ct_general);
@@ -2151,80 +2197,6 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
nf_conntrack_get(skb_nfct(nskb));
}
-static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo)
-{
- const struct nf_nat_hook *nat_hook;
- struct nf_conntrack_tuple_hash *h;
- struct nf_conntrack_tuple tuple;
- unsigned int status;
- int dataoff;
- u16 l3num;
- u8 l4num;
-
- l3num = nf_ct_l3num(ct);
-
- dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
- if (dataoff <= 0)
- return NF_DROP;
-
- if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
- l4num, net, &tuple))
- return NF_DROP;
-
- if (ct->status & IPS_SRC_NAT) {
- memcpy(tuple.src.u3.all,
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all,
- sizeof(tuple.src.u3.all));
- tuple.src.u.all =
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all;
- }
-
- if (ct->status & IPS_DST_NAT) {
- memcpy(tuple.dst.u3.all,
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all,
- sizeof(tuple.dst.u3.all));
- tuple.dst.u.all =
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all;
- }
-
- h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple);
- if (!h)
- return NF_ACCEPT;
-
- /* Store status bits of the conntrack that is clashing to re-do NAT
- * mangling according to what it has been done already to this packet.
- */
- status = ct->status;
-
- nf_ct_put(ct);
- ct = nf_ct_tuplehash_to_ctrack(h);
- nf_ct_set(skb, ct, ctinfo);
-
- nat_hook = rcu_dereference(nf_nat_hook);
- if (!nat_hook)
- return NF_ACCEPT;
-
- if (status & IPS_SRC_NAT) {
- unsigned int verdict = nat_hook->manip_pkt(skb, ct,
- NF_NAT_MANIP_SRC,
- IP_CT_DIR_ORIGINAL);
- if (verdict != NF_ACCEPT)
- return verdict;
- }
-
- if (status & IPS_DST_NAT) {
- unsigned int verdict = nat_hook->manip_pkt(skb, ct,
- NF_NAT_MANIP_DST,
- IP_CT_DIR_ORIGINAL);
- if (verdict != NF_ACCEPT)
- return verdict;
- }
-
- return NF_ACCEPT;
-}
-
/* This packet is coming from userspace via nf_queue, complete the packet
* processing after the helper invocation in nf_confirm().
*/
@@ -2288,17 +2260,6 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
if (!ct)
return NF_ACCEPT;
- if (!nf_ct_is_confirmed(ct)) {
- int ret = __nf_conntrack_update(net, skb, ct, ctinfo);
-
- if (ret != NF_ACCEPT)
- return ret;
-
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct)
- return NF_ACCEPT;
- }
-
return nf_confirm_cthelper(skb, ct, ctinfo);
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 123e2e933e9b..6a1239433830 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -382,7 +382,7 @@ nla_put_failure:
#define ctnetlink_dump_secctx(a, b) (0)
#endif
-#ifdef CONFIG_NF_CONNTRACK_LABELS
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
static inline int ctnetlink_label_size(const struct nf_conn *ct)
{
struct nf_conn_labels *labels = nf_ct_labels_find(ct);
@@ -391,6 +391,7 @@ static inline int ctnetlink_label_size(const struct nf_conn *ct)
return 0;
return nla_total_size(sizeof(labels->bits));
}
+#endif
static int
ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
@@ -411,10 +412,6 @@ ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
return 0;
}
-#else
-#define ctnetlink_dump_labels(a, b) (0)
-#define ctnetlink_label_size(a) (0)
-#endif
#define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
@@ -652,7 +649,6 @@ static size_t ctnetlink_proto_size(const struct nf_conn *ct)
return len + len4;
}
-#endif
static inline size_t ctnetlink_acct_size(const struct nf_conn *ct)
{
@@ -690,6 +686,7 @@ static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct)
return 0;
#endif
}
+#endif
#ifdef CONFIG_NF_CONNTRACK_EVENTS
static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct)
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 6d8da6dddf99..4085c436e306 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -183,7 +183,35 @@ hash_by_src(const struct net *net,
return reciprocal_scale(hash, nf_nat_htable_size);
}
-/* Is this tuple already taken? (not by us) */
+/**
+ * nf_nat_used_tuple - check if proposed nat tuple clashes with existing entry
+ * @tuple: proposed NAT binding
+ * @ignored_conntrack: our (unconfirmed) conntrack entry
+ *
+ * A conntrack entry can be inserted to the connection tracking table
+ * if there is no existing entry with an identical tuple in either direction.
+ *
+ * Example:
+ * INITIATOR -> NAT/PAT -> RESPONDER
+ *
+ * INITIATOR passes through NAT/PAT ("us") and SNAT is done (saddr rewrite).
+ * Then, later, NAT/PAT itself also connects to RESPONDER.
+ *
+ * This will not work if the SNAT done earlier has same IP:PORT source pair.
+ *
+ * Conntrack table has:
+ * ORIGINAL: $IP_INITIATOR:$SPORT -> $IP_RESPONDER:$DPORT
+ * REPLY: $IP_RESPONDER:$DPORT -> $IP_NAT:$SPORT
+ *
+ * and new locally originating connection wants:
+ * ORIGINAL: $IP_NAT:$SPORT -> $IP_RESPONDER:$DPORT
+ * REPLY: $IP_RESPONDER:$DPORT -> $IP_NAT:$SPORT
+ *
+ * ... which would mean incoming packets cannot be distinguished between
+ * the existing and the newly added entry (identical IP_CT_DIR_REPLY tuple).
+ *
+ * @return: true if the proposed NAT mapping collides with an existing entry.
+ */
static int
nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_conntrack)
@@ -200,6 +228,94 @@ nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
}
+static bool nf_nat_allow_clash(const struct nf_conn *ct)
+{
+ return nf_ct_l4proto_find(nf_ct_protonum(ct))->allow_clash;
+}
+
+/**
+ * nf_nat_used_tuple_new - check if to-be-inserted conntrack collides with existing entry
+ * @tuple: proposed NAT binding
+ * @ignored_ct: our (unconfirmed) conntrack entry
+ *
+ * Same as nf_nat_used_tuple, but also check for rare clash in reverse
+ * direction. Should be called only when @tuple has not been altered, i.e.
+ * @ignored_conntrack will not be subject to NAT.
+ *
+ * @return: true if the proposed NAT mapping collides with existing entry.
+ */
+static noinline bool
+nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_ct)
+{
+ static const unsigned long uses_nat = IPS_NAT_MASK | IPS_SEQ_ADJUST_BIT;
+ const struct nf_conntrack_tuple_hash *thash;
+ const struct nf_conntrack_zone *zone;
+ struct nf_conn *ct;
+ bool taken = true;
+ struct net *net;
+
+ if (!nf_nat_used_tuple(tuple, ignored_ct))
+ return false;
+
+ if (!nf_nat_allow_clash(ignored_ct))
+ return true;
+
+ /* Initial choice clashes with existing conntrack.
+ * Check for (rare) reverse collision.
+ *
+ * This can happen when new packets are received in both directions
+ * at the exact same time on different CPUs.
+ *
+ * Without SMP, first packet creates new conntrack entry and second
+ * packet is resolved as established reply packet.
+ *
+ * With parallel processing, both packets could be picked up as
+ * new and both get their own ct entry allocated.
+ *
+ * If ignored_conntrack and colliding ct are not subject to NAT then
+ * pretend the tuple is available and let later clash resolution
+ * handle this at insertion time.
+ *
+ * Without it, the 'reply' packet has its source port rewritten
+ * by nat engine.
+ */
+ if (READ_ONCE(ignored_ct->status) & uses_nat)
+ return true;
+
+ net = nf_ct_net(ignored_ct);
+ zone = nf_ct_zone(ignored_ct);
+
+ thash = nf_conntrack_find_get(net, zone, tuple);
+ if (unlikely(!thash)) /* clashing entry went away */
+ return false;
+
+ ct = nf_ct_tuplehash_to_ctrack(thash);
+
+ /* NB: IP_CT_DIR_ORIGINAL should be impossible because
+ * nf_nat_used_tuple() handles origin collisions.
+ *
+ * Handle remote chance other CPU confirmed its ct right after.
+ */
+ if (thash->tuple.dst.dir != IP_CT_DIR_REPLY)
+ goto out;
+
+ /* clashing connection subject to NAT? Retry with new tuple. */
+ if (READ_ONCE(ct->status) & uses_nat)
+ goto out;
+
+ if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ &ignored_ct->tuplehash[IP_CT_DIR_REPLY].tuple) &&
+ nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+ &ignored_ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) {
+ taken = false;
+ goto out;
+ }
+out:
+ nf_ct_put(ct);
+ return taken;
+}
+
static bool nf_nat_may_kill(struct nf_conn *ct, unsigned long flags)
{
static const unsigned long flags_refuse = IPS_FIXED_TIMEOUT |
@@ -611,7 +727,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
/* try the original tuple first */
if (nf_in_range(orig_tuple, range)) {
- if (!nf_nat_used_tuple(orig_tuple, ct)) {
+ if (!nf_nat_used_tuple_new(orig_tuple, ct)) {
*tuple = *orig_tuple;
return;
}
@@ -1208,7 +1324,6 @@ static const struct nf_nat_hook nat_hook = {
#ifdef CONFIG_XFRM
.decode_session = __nf_nat_decode_session,
#endif
- .manip_pkt = nf_nat_manip_pkt,
.remove_nat_bysrc = nf_nat_cleanup_conntrack,
};
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 57259b5f3ef5..a24fe62650a7 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1849,7 +1849,7 @@ static int nft_dump_basechain_hook(struct sk_buff *skb, int family,
if (!hook_list)
hook_list = &basechain->hook_list;
- list_for_each_entry(hook, hook_list, list) {
+ list_for_each_entry_rcu(hook, hook_list, list) {
if (!first)
first = hook;
@@ -6684,7 +6684,7 @@ static int nft_setelem_catchall_insert(const struct net *net,
}
}
- catchall = kmalloc(sizeof(*catchall), GFP_KERNEL);
+ catchall = kmalloc(sizeof(*catchall), GFP_KERNEL_ACCOUNT);
if (!catchall)
return -ENOMEM;
@@ -9207,7 +9207,7 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
FLOW_BLOCK_UNBIND);
list_del_rcu(&hook->list);
- kfree(hook);
+ kfree_rcu(hook, rcu);
}
kfree(flowtable->name);
module_put(flowtable->data.type->owner);
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 52cdfee17f73..7ca4f0d21fe2 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -535,7 +535,7 @@ nft_match_large_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
struct xt_match *m = expr->ops->data;
int ret;
- priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL);
+ priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL_ACCOUNT);
if (!priv->info)
return -ENOMEM;
@@ -808,7 +808,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
goto err;
}
- ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
+ ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL_ACCOUNT);
if (!ops) {
err = -ENOMEM;
goto err;
@@ -898,7 +898,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
goto err;
}
- ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
+ ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL_ACCOUNT);
if (!ops) {
err = -ENOMEM;
goto err;
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 5defe6e4fd98..e35588137995 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -163,7 +163,7 @@ static int nft_log_init(const struct nft_ctx *ctx,
nla = tb[NFTA_LOG_PREFIX];
if (nla != NULL) {
- priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
+ priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL_ACCOUNT);
if (priv->prefix == NULL)
return -ENOMEM;
nla_strscpy(priv->prefix, nla, nla_len(nla) + 1);
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 8c8eb14d647b..05cd1e6e6a2f 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -952,7 +952,7 @@ static int nft_secmark_obj_init(const struct nft_ctx *ctx,
if (tb[NFTA_SECMARK_CTX] == NULL)
return -EINVAL;
- priv->ctx = nla_strdup(tb[NFTA_SECMARK_CTX], GFP_KERNEL);
+ priv->ctx = nla_strdup(tb[NFTA_SECMARK_CTX], GFP_KERNEL_ACCOUNT);
if (!priv->ctx)
return -ENOMEM;
diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
index 7d29db7c2ac0..bd058babfc82 100644
--- a/net/netfilter/nft_numgen.c
+++ b/net/netfilter/nft_numgen.c
@@ -66,7 +66,7 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx,
if (priv->offset + priv->modulus - 1 < priv->offset)
return -EOVERFLOW;
- priv->counter = kmalloc(sizeof(*priv->counter), GFP_KERNEL);
+ priv->counter = kmalloc(sizeof(*priv->counter), GFP_KERNEL_ACCOUNT);
if (!priv->counter)
return -ENOMEM;
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index eb4c4a4ac7ac..7be342b495f5 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -663,7 +663,7 @@ static int pipapo_realloc_mt(struct nft_pipapo_field *f,
check_add_overflow(rules, extra, &rules_alloc))
return -EOVERFLOW;
- new_mt = kvmalloc_array(rules_alloc, sizeof(*new_mt), GFP_KERNEL);
+ new_mt = kvmalloc_array(rules_alloc, sizeof(*new_mt), GFP_KERNEL_ACCOUNT);
if (!new_mt)
return -ENOMEM;
@@ -936,7 +936,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
return;
}
- new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL);
+ new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL_ACCOUNT);
if (!new_lt)
return;
@@ -1212,7 +1212,7 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
scratch = kzalloc_node(struct_size(scratch, map,
bsize_max * 2) +
NFT_PIPAPO_ALIGN_HEADROOM,
- GFP_KERNEL, cpu_to_node(i));
+ GFP_KERNEL_ACCOUNT, cpu_to_node(i));
if (!scratch) {
/* On failure, there's no need to undo previous
* allocations: this means that some scratch maps have
@@ -1427,7 +1427,7 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
struct nft_pipapo_match *new;
int i;
- new = kmalloc(struct_size(new, f, old->field_count), GFP_KERNEL);
+ new = kmalloc(struct_size(new, f, old->field_count), GFP_KERNEL_ACCOUNT);
if (!new)
return NULL;
@@ -1457,7 +1457,7 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
new_lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS(src->bb) *
src->bsize * sizeof(*dst->lt) +
NFT_PIPAPO_ALIGN_HEADROOM,
- GFP_KERNEL);
+ GFP_KERNEL_ACCOUNT);
if (!new_lt)
goto out_lt;
@@ -1470,7 +1470,8 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
if (src->rules > 0) {
dst->mt = kvmalloc_array(src->rules_alloc,
- sizeof(*src->mt), GFP_KERNEL);
+ sizeof(*src->mt),
+ GFP_KERNEL_ACCOUNT);
if (!dst->mt)
goto out_mt;
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
index 60a76e6e348e..5c6ed68cc6e0 100644
--- a/net/netfilter/nft_tunnel.c
+++ b/net/netfilter/nft_tunnel.c
@@ -509,13 +509,14 @@ static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
return err;
}
- md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
+ md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL,
+ GFP_KERNEL_ACCOUNT);
if (!md)
return -ENOMEM;
memcpy(&md->u.tun_info, &info, sizeof(info));
#ifdef CONFIG_DST_CACHE
- err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
+ err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL_ACCOUNT);
if (err < 0) {
metadata_dst_free(md);
return err;
diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c
index 41ece61eb57a..00c51cf693f3 100644
--- a/net/qrtr/af_qrtr.c
+++ b/net/qrtr/af_qrtr.c
@@ -884,7 +884,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
mutex_lock(&qrtr_node_lock);
list_for_each_entry(node, &qrtr_all_nodes, item) {
- skbn = skb_clone(skb, GFP_KERNEL);
+ skbn = pskb_copy(skb, GFP_KERNEL);
if (!skbn)
break;
skb_set_owner_w(skbn, skb->sk);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 13a5126bc36e..7d3e82e4c2fc 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1394,7 +1394,6 @@ static const struct file_operations rfkill_fops = {
.release = rfkill_fop_release,
.unlocked_ioctl = rfkill_fop_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .llseek = no_llseek,
};
#define RFKILL_NAME "rfkill"
diff --git a/net/socket.c b/net/socket.c
index 7b046dd3e9a7..601ad74930ef 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -153,7 +153,6 @@ static void sock_show_fdinfo(struct seq_file *m, struct file *f)
static const struct file_operations socket_file_ops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read_iter = sock_read_iter,
.write_iter = sock_write_iter,
.poll = sock_poll,
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 95ff74706104..1bd3e531b0e0 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -731,11 +731,10 @@ static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
static void cache_revisit_request(struct cache_head *item)
{
struct cache_deferred_req *dreq;
- struct list_head pending;
struct hlist_node *tmp;
int hash = DFR_HASH(item);
+ LIST_HEAD(pending);
- INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock);
hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
@@ -756,10 +755,8 @@ static void cache_revisit_request(struct cache_head *item)
void cache_clean_deferred(void *owner)
{
struct cache_deferred_req *dreq, *tmp;
- struct list_head pending;
+ LIST_HEAD(pending);
-
- INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock);
list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
@@ -1085,9 +1082,8 @@ static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
{
struct cache_queue *cq, *tmp;
struct cache_request *cr;
- struct list_head dequeued;
+ LIST_HEAD(dequeued);
- INIT_LIST_HEAD(&dequeued);
spin_lock(&queue_lock);
list_for_each_entry_safe(cq, tmp, &detail->queue, list)
if (!cq->reader) {
@@ -1596,7 +1592,6 @@ static int cache_release_procfs(struct inode *inode, struct file *filp)
}
static const struct proc_ops cache_channel_proc_ops = {
- .proc_lseek = no_llseek,
.proc_read = cache_read_procfs,
.proc_write = cache_write_procfs,
.proc_poll = cache_poll_procfs,
@@ -1662,7 +1657,6 @@ static const struct proc_ops cache_flush_proc_ops = {
.proc_read = read_flush_procfs,
.proc_write = write_flush_procfs,
.proc_release = release_flush_procfs,
- .proc_lseek = no_llseek,
};
static void remove_cache_proc_entries(struct cache_detail *cd)
@@ -1815,7 +1809,6 @@ static int cache_release_pipefs(struct inode *inode, struct file *filp)
const struct file_operations cache_file_operations_pipefs = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = cache_read_pipefs,
.write = cache_write_pipefs,
.poll = cache_poll_pipefs,
@@ -1881,7 +1874,6 @@ const struct file_operations cache_flush_operations_pipefs = {
.read = read_flush_pipefs,
.write = write_flush_pipefs,
.release = release_flush_pipefs,
- .llseek = no_llseek,
};
int sunrpc_cache_register_pipefs(struct dentry *parent,
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 09f29a95f2bc..0090162ee8c3 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -48,13 +48,8 @@
# define RPCDBG_FACILITY RPCDBG_CALL
#endif
-/*
- * All RPC clients are linked into this list
- */
-
static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
-
static void call_start(struct rpc_task *task);
static void call_reserve(struct rpc_task *task);
static void call_reserveresult(struct rpc_task *task);
@@ -546,7 +541,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
.connect_timeout = args->connect_timeout,
.reconnect_timeout = args->reconnect_timeout,
};
- char servername[48];
+ char servername[RPC_MAXNETNAMELEN];
struct rpc_clnt *clnt;
int i;
@@ -1893,12 +1888,6 @@ call_allocate(struct rpc_task *task)
if (req->rq_buffer)
return;
- if (proc->p_proc != 0) {
- BUG_ON(proc->p_arglen == 0);
- if (proc->p_decode != NULL)
- BUG_ON(proc->p_replen == 0);
- }
-
/*
* Calculate the size (in quads) of the RPC call
* and reply headers, and convert both values
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 910a5d850d04..7ce3721c06ca 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -385,7 +385,6 @@ rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static const struct file_operations rpc_pipe_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = rpc_pipe_read,
.write = rpc_pipe_write,
.poll = rpc_pipe_poll,
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 9aff845196ce..7e7f4e0390c7 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -440,10 +440,11 @@ EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
static int svc_uses_rpcbind(struct svc_serv *serv)
{
- struct svc_program *progp;
- unsigned int i;
+ unsigned int p, i;
+
+ for (p = 0; p < serv->sv_nprogs; p++) {
+ struct svc_program *progp = &serv->sv_programs[p];
- for (progp = serv->sv_program; progp; progp = progp->pg_next) {
for (i = 0; i < progp->pg_nvers; i++) {
if (progp->pg_vers[i] == NULL)
continue;
@@ -480,7 +481,7 @@ __svc_init_bc(struct svc_serv *serv)
* Create an RPC service
*/
static struct svc_serv *
-__svc_create(struct svc_program *prog, struct svc_stat *stats,
+__svc_create(struct svc_program *prog, int nprogs, struct svc_stat *stats,
unsigned int bufsize, int npools, int (*threadfn)(void *data))
{
struct svc_serv *serv;
@@ -491,7 +492,8 @@ __svc_create(struct svc_program *prog, struct svc_stat *stats,
if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
return NULL;
serv->sv_name = prog->pg_name;
- serv->sv_program = prog;
+ serv->sv_programs = prog;
+ serv->sv_nprogs = nprogs;
serv->sv_stats = stats;
if (bufsize > RPCSVC_MAXPAYLOAD)
bufsize = RPCSVC_MAXPAYLOAD;
@@ -499,17 +501,18 @@ __svc_create(struct svc_program *prog, struct svc_stat *stats,
serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
serv->sv_threadfn = threadfn;
xdrsize = 0;
- while (prog) {
- prog->pg_lovers = prog->pg_nvers-1;
- for (vers=0; vers<prog->pg_nvers ; vers++)
- if (prog->pg_vers[vers]) {
- prog->pg_hivers = vers;
- if (prog->pg_lovers > vers)
- prog->pg_lovers = vers;
- if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
- xdrsize = prog->pg_vers[vers]->vs_xdrsize;
+ for (i = 0; i < nprogs; i++) {
+ struct svc_program *progp = &prog[i];
+
+ progp->pg_lovers = progp->pg_nvers-1;
+ for (vers = 0; vers < progp->pg_nvers ; vers++)
+ if (progp->pg_vers[vers]) {
+ progp->pg_hivers = vers;
+ if (progp->pg_lovers > vers)
+ progp->pg_lovers = vers;
+ if (progp->pg_vers[vers]->vs_xdrsize > xdrsize)
+ xdrsize = progp->pg_vers[vers]->vs_xdrsize;
}
- prog = prog->pg_next;
}
serv->sv_xdrsize = xdrsize;
INIT_LIST_HEAD(&serv->sv_tempsocks);
@@ -558,13 +561,14 @@ __svc_create(struct svc_program *prog, struct svc_stat *stats,
struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize,
int (*threadfn)(void *data))
{
- return __svc_create(prog, NULL, bufsize, 1, threadfn);
+ return __svc_create(prog, 1, NULL, bufsize, 1, threadfn);
}
EXPORT_SYMBOL_GPL(svc_create);
/**
* svc_create_pooled - Create an RPC service with pooled threads
- * @prog: the RPC program the new service will handle
+ * @prog: Array of RPC programs the new service will handle
+ * @nprogs: Number of programs in the array
* @stats: the stats struct if desired
* @bufsize: maximum message size for @prog
* @threadfn: a function to service RPC requests for @prog
@@ -572,6 +576,7 @@ EXPORT_SYMBOL_GPL(svc_create);
* Returns an instantiated struct svc_serv object or NULL.
*/
struct svc_serv *svc_create_pooled(struct svc_program *prog,
+ unsigned int nprogs,
struct svc_stat *stats,
unsigned int bufsize,
int (*threadfn)(void *data))
@@ -579,7 +584,7 @@ struct svc_serv *svc_create_pooled(struct svc_program *prog,
struct svc_serv *serv;
unsigned int npools = svc_pool_map_get();
- serv = __svc_create(prog, stats, bufsize, npools, threadfn);
+ serv = __svc_create(prog, nprogs, stats, bufsize, npools, threadfn);
if (!serv)
goto out_err;
serv->sv_is_pooled = true;
@@ -602,16 +607,16 @@ svc_destroy(struct svc_serv **servp)
*servp = NULL;
- dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name);
+ dprintk("svc: svc_destroy(%s)\n", serv->sv_programs->pg_name);
timer_shutdown_sync(&serv->sv_temptimer);
/*
* Remaining transports at this point are not expected.
*/
WARN_ONCE(!list_empty(&serv->sv_permsocks),
- "SVC: permsocks remain for %s\n", serv->sv_program->pg_name);
+ "SVC: permsocks remain for %s\n", serv->sv_programs->pg_name);
WARN_ONCE(!list_empty(&serv->sv_tempsocks),
- "SVC: tempsocks remain for %s\n", serv->sv_program->pg_name);
+ "SVC: tempsocks remain for %s\n", serv->sv_programs->pg_name);
cache_clean_deferred(serv);
@@ -1148,15 +1153,16 @@ int svc_register(const struct svc_serv *serv, struct net *net,
const int family, const unsigned short proto,
const unsigned short port)
{
- struct svc_program *progp;
- unsigned int i;
+ unsigned int p, i;
int error = 0;
WARN_ON_ONCE(proto == 0 && port == 0);
if (proto == 0 && port == 0)
return -EINVAL;
- for (progp = serv->sv_program; progp; progp = progp->pg_next) {
+ for (p = 0; p < serv->sv_nprogs; p++) {
+ struct svc_program *progp = &serv->sv_programs[p];
+
for (i = 0; i < progp->pg_nvers; i++) {
error = progp->pg_rpcbind_set(net, progp, i,
@@ -1208,13 +1214,14 @@ static void __svc_unregister(struct net *net, const u32 program, const u32 versi
static void svc_unregister(const struct svc_serv *serv, struct net *net)
{
struct sighand_struct *sighand;
- struct svc_program *progp;
unsigned long flags;
- unsigned int i;
+ unsigned int p, i;
clear_thread_flag(TIF_SIGPENDING);
- for (progp = serv->sv_program; progp; progp = progp->pg_next) {
+ for (p = 0; p < serv->sv_nprogs; p++) {
+ struct svc_program *progp = &serv->sv_programs[p];
+
for (i = 0; i < progp->pg_nvers; i++) {
if (progp->pg_vers[i] == NULL)
continue;
@@ -1320,7 +1327,7 @@ svc_process_common(struct svc_rqst *rqstp)
struct svc_process_info process;
enum svc_auth_status auth_res;
unsigned int aoffset;
- int rc;
+ int pr, rc;
__be32 *p;
/* Will be turned off only when NFSv4 Sessions are used */
@@ -1344,9 +1351,12 @@ svc_process_common(struct svc_rqst *rqstp)
rqstp->rq_vers = be32_to_cpup(p++);
rqstp->rq_proc = be32_to_cpup(p);
- for (progp = serv->sv_program; progp; progp = progp->pg_next)
+ for (pr = 0; pr < serv->sv_nprogs; pr++) {
+ progp = &serv->sv_programs[pr];
+
if (rqstp->rq_prog == progp->pg_prog)
break;
+ }
/*
* Decode auth data, and add verifier to reply buffer.
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 53ebc719ff5a..43c57124de52 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -268,7 +268,7 @@ static int _svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
spin_unlock(&svc_xprt_class_lock);
newxprt = xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
if (IS_ERR(newxprt)) {
- trace_svc_xprt_create_err(serv->sv_program->pg_name,
+ trace_svc_xprt_create_err(serv->sv_programs->pg_name,
xcl->xcl_name, sap, len,
newxprt);
module_put(xcl->xcl_owner);
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 93d9e949e265..55b4d2874188 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -18,6 +18,7 @@
#include <linux/sunrpc/svcauth.h>
#include <linux/err.h>
#include <linux/hash.h>
+#include <linux/user_namespace.h>
#include <trace/events/sunrpc.h>
@@ -175,6 +176,33 @@ rpc_authflavor_t svc_auth_flavor(struct svc_rqst *rqstp)
}
EXPORT_SYMBOL_GPL(svc_auth_flavor);
+/**
+ * svcauth_map_clnt_to_svc_cred_local - maps a generic cred
+ * to a svc_cred suitable for use in nfsd.
+ * @clnt: rpc_clnt associated with nfs client
+ * @cred: generic cred associated with nfs client
+ * @svc: returned svc_cred that is suitable for use in nfsd
+ */
+void svcauth_map_clnt_to_svc_cred_local(struct rpc_clnt *clnt,
+ const struct cred *cred,
+ struct svc_cred *svc)
+{
+ struct user_namespace *userns = clnt->cl_cred ?
+ clnt->cl_cred->user_ns : &init_user_ns;
+
+ memset(svc, 0, sizeof(struct svc_cred));
+
+ svc->cr_uid = KUIDT_INIT(from_kuid_munged(userns, cred->fsuid));
+ svc->cr_gid = KGIDT_INIT(from_kgid_munged(userns, cred->fsgid));
+ svc->cr_flavor = clnt->cl_auth->au_flavor;
+ if (cred->group_info)
+ svc->cr_group_info = get_group_info(cred->group_info);
+ /* These aren't relevant for local (network is bypassed) */
+ svc->cr_principal = NULL;
+ svc->cr_gss_mech = NULL;
+}
+EXPORT_SYMBOL_GPL(svcauth_map_clnt_to_svc_cred_local);
+
/**************************************************
* 'auth_domains' are stored in a hash table indexed by name.
* When the last reference to an 'auth_domain' is dropped,
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 04b45588ae6f..8ca98b146ec8 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -697,7 +697,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
rqstp->rq_auth_stat = rpc_autherr_badcred;
ipm = ip_map_cached_get(xprt);
if (ipm == NULL)
- ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class,
+ ipm = __ip_map_lookup(sn->ip_map_cache,
+ rqstp->rq_server->sv_programs->pg_class,
&sin6->sin6_addr);
if (ipm == NULL)
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index e0160da4ef43..85e423921734 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -94,6 +94,63 @@ out_rcu:
return ret;
}
+/* Caller need to hold vsock->tx_lock on vq */
+static int virtio_transport_send_skb(struct sk_buff *skb, struct virtqueue *vq,
+ struct virtio_vsock *vsock)
+{
+ int ret, in_sg = 0, out_sg = 0;
+ struct scatterlist **sgs;
+
+ sgs = vsock->out_sgs;
+ sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
+ sizeof(*virtio_vsock_hdr(skb)));
+ out_sg++;
+
+ if (!skb_is_nonlinear(skb)) {
+ if (skb->len > 0) {
+ sg_init_one(sgs[out_sg], skb->data, skb->len);
+ out_sg++;
+ }
+ } else {
+ struct skb_shared_info *si;
+ int i;
+
+ /* If skb is nonlinear, then its buffer must contain
+ * only header and nothing more. Data is stored in
+ * the fragged part.
+ */
+ WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb)));
+
+ si = skb_shinfo(skb);
+
+ for (i = 0; i < si->nr_frags; i++) {
+ skb_frag_t *skb_frag = &si->frags[i];
+ void *va;
+
+ /* We will use 'page_to_virt()' for the userspace page
+ * here, because virtio or dma-mapping layers will call
+ * 'virt_to_phys()' later to fill the buffer descriptor.
+ * We don't touch memory at "virtual" address of this page.
+ */
+ va = page_to_virt(skb_frag_page(skb_frag));
+ sg_init_one(sgs[out_sg],
+ va + skb_frag_off(skb_frag),
+ skb_frag_size(skb_frag));
+ out_sg++;
+ }
+ }
+
+ ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
+ /* Usually this means that there is no more space available in
+ * the vq
+ */
+ if (ret < 0)
+ return ret;
+
+ virtio_transport_deliver_tap_pkt(skb);
+ return 0;
+}
+
static void
virtio_transport_send_pkt_work(struct work_struct *work)
{
@@ -111,66 +168,22 @@ virtio_transport_send_pkt_work(struct work_struct *work)
vq = vsock->vqs[VSOCK_VQ_TX];
for (;;) {
- int ret, in_sg = 0, out_sg = 0;
- struct scatterlist **sgs;
struct sk_buff *skb;
bool reply;
+ int ret;
skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
if (!skb)
break;
reply = virtio_vsock_skb_reply(skb);
- sgs = vsock->out_sgs;
- sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
- sizeof(*virtio_vsock_hdr(skb)));
- out_sg++;
-
- if (!skb_is_nonlinear(skb)) {
- if (skb->len > 0) {
- sg_init_one(sgs[out_sg], skb->data, skb->len);
- out_sg++;
- }
- } else {
- struct skb_shared_info *si;
- int i;
-
- /* If skb is nonlinear, then its buffer must contain
- * only header and nothing more. Data is stored in
- * the fragged part.
- */
- WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb)));
-
- si = skb_shinfo(skb);
-
- for (i = 0; i < si->nr_frags; i++) {
- skb_frag_t *skb_frag = &si->frags[i];
- void *va;
- /* We will use 'page_to_virt()' for the userspace page
- * here, because virtio or dma-mapping layers will call
- * 'virt_to_phys()' later to fill the buffer descriptor.
- * We don't touch memory at "virtual" address of this page.
- */
- va = page_to_virt(skb_frag_page(skb_frag));
- sg_init_one(sgs[out_sg],
- va + skb_frag_off(skb_frag),
- skb_frag_size(skb_frag));
- out_sg++;
- }
- }
-
- ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
- /* Usually this means that there is no more space available in
- * the vq
- */
+ ret = virtio_transport_send_skb(skb, vq, vsock);
if (ret < 0) {
virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
break;
}
- virtio_transport_deliver_tap_pkt(skb);
-
if (reply) {
struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
int val;
@@ -195,6 +208,28 @@ out:
queue_work(virtio_vsock_workqueue, &vsock->rx_work);
}
+/* Caller need to hold RCU for vsock.
+ * Returns 0 if the packet is successfully put on the vq.
+ */
+static int virtio_transport_send_skb_fast_path(struct virtio_vsock *vsock, struct sk_buff *skb)
+{
+ struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX];
+ int ret;
+
+ /* Inside RCU, can't sleep! */
+ ret = mutex_trylock(&vsock->tx_lock);
+ if (unlikely(ret == 0))
+ return -EBUSY;
+
+ ret = virtio_transport_send_skb(skb, vq, vsock);
+ if (ret == 0)
+ virtqueue_kick(vq);
+
+ mutex_unlock(&vsock->tx_lock);
+
+ return ret;
+}
+
static int
virtio_transport_send_pkt(struct sk_buff *skb)
{
@@ -218,11 +253,20 @@ virtio_transport_send_pkt(struct sk_buff *skb)
goto out_rcu;
}
- if (virtio_vsock_skb_reply(skb))
- atomic_inc(&vsock->queued_replies);
+ /* If send_pkt_queue is empty, we can safely bypass this queue
+ * because packet order is maintained and (try) to put the packet
+ * on the virtqueue using virtio_transport_send_skb_fast_path.
+ * If this fails we simply put the packet on the intermediate
+ * queue and schedule the worker.
+ */
+ if (!skb_queue_empty_lockless(&vsock->send_pkt_queue) ||
+ virtio_transport_send_skb_fast_path(vsock, skb)) {
+ if (virtio_vsock_skb_reply(skb))
+ atomic_inc(&vsock->queued_replies);
- virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
- queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
+ virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
+ queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
+ }
out_rcu:
rcu_read_unlock();
diff --git a/rust/Makefile b/rust/Makefile
index f168d2c98a15..b5e0a73b78f3 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -8,16 +8,16 @@ always-$(CONFIG_RUST) += exports_core_generated.h
# Missing prototypes are expected in the helpers since these are exported
# for Rust only, thus there is no header nor prototypes.
-obj-$(CONFIG_RUST) += helpers.o
-CFLAGS_REMOVE_helpers.o = -Wmissing-prototypes -Wmissing-declarations
+obj-$(CONFIG_RUST) += helpers/helpers.o
+CFLAGS_REMOVE_helpers/helpers.o = -Wmissing-prototypes -Wmissing-declarations
always-$(CONFIG_RUST) += libmacros.so
no-clean-files += libmacros.so
always-$(CONFIG_RUST) += bindings/bindings_generated.rs bindings/bindings_helpers_generated.rs
obj-$(CONFIG_RUST) += alloc.o bindings.o kernel.o
-always-$(CONFIG_RUST) += exports_alloc_generated.h exports_bindings_generated.h \
- exports_kernel_generated.h
+always-$(CONFIG_RUST) += exports_alloc_generated.h exports_helpers_generated.h \
+ exports_bindings_generated.h exports_kernel_generated.h
always-$(CONFIG_RUST) += uapi/uapi_generated.rs
obj-$(CONFIG_RUST) += uapi.o
@@ -63,6 +63,7 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
OBJTREE=$(abspath $(objtree)) \
$(RUSTDOC) $(if $(rustdoc_host),$(rust_common_flags),$(rust_flags)) \
$(rustc_target_flags) -L$(objtree)/$(obj) \
+ -Zunstable-options --generate-link-to-definition \
--output $(rustdoc_output) \
--crate-name $(subst rustdoc-,,$@) \
$(if $(rustdoc_host),,--sysroot=/dev/null) \
@@ -270,7 +271,7 @@ quiet_cmd_bindgen = BINDGEN $@
cmd_bindgen = \
$(BINDGEN) $< $(bindgen_target_flags) \
--use-core --with-derive-default --ctypes-prefix core::ffi --no-layout-tests \
- --no-debug '.*' \
+ --no-debug '.*' --enable-function-attribute-detection \
-o $@ -- $(bindgen_c_flags_final) -DMODULE \
$(bindgen_target_cflags) $(bindgen_target_extra)
@@ -299,13 +300,13 @@ $(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_cflags = \
-I$(objtree)/$(obj) -Wno-missing-prototypes -Wno-missing-declarations
$(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_extra = ; \
sed -Ei 's/pub fn rust_helper_([a-zA-Z0-9_]*)/#[link_name="rust_helper_\1"]\n pub fn \1/g' $@
-$(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers.c FORCE
+$(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers/helpers.c FORCE
$(call if_changed_dep,bindgen)
quiet_cmd_exports = EXPORTS $@
cmd_exports = \
$(NM) -p --defined-only $< \
- | awk '/ (T|R|D|B) / {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@
+ | awk '$$2~/(T|R|D|B)/ && $$3!~/__cfi/ {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@
$(obj)/exports_core_generated.h: $(obj)/core.o FORCE
$(call if_changed,exports)
@@ -313,6 +314,18 @@ $(obj)/exports_core_generated.h: $(obj)/core.o FORCE
$(obj)/exports_alloc_generated.h: $(obj)/alloc.o FORCE
$(call if_changed,exports)
+# Even though Rust kernel modules should never use the bindings directly,
+# symbols from the `bindings` crate and the C helpers need to be exported
+# because Rust generics and inlined functions may not get their code generated
+# in the crate where they are defined. Other helpers, called from non-inline
+# functions, may not be exported, in principle. However, in general, the Rust
+# compiler does not guarantee codegen will be performed for a non-inline
+# function either. Therefore, we export all symbols from helpers and bindings.
+# In the future, this may be revisited to reduce the number of exports after
+# the compiler is informed about the places codegen is required.
+$(obj)/exports_helpers_generated.h: $(obj)/helpers/helpers.o FORCE
+ $(call if_changed,exports)
+
$(obj)/exports_bindings_generated.h: $(obj)/bindings.o FORCE
$(call if_changed,exports)
@@ -329,9 +342,7 @@ quiet_cmd_rustc_procmacro = $(RUSTC_OR_CLIPPY_QUIET) P $@
--crate-name $(patsubst lib%.so,%,$(notdir $@)) $<
# Procedural macros can only be used with the `rustc` that compiled it.
-# Therefore, to get `libmacros.so` automatically recompiled when the compiler
-# version changes, we add `core.o` as a dependency (even if it is not needed).
-$(obj)/libmacros.so: $(src)/macros/lib.rs $(obj)/core.o FORCE
+$(obj)/libmacros.so: $(src)/macros/lib.rs FORCE
+$(call if_changed_dep,rustc_procmacro)
quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L $@
@@ -344,7 +355,8 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L
--crate-type rlib -L$(objtree)/$(obj) \
--crate-name $(patsubst %.o,%,$(notdir $@)) $< \
--sysroot=/dev/null \
- $(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@)
+ $(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@) \
+ $(cmd_objtool)
rust-analyzer:
$(Q)$(srctree)/scripts/generate_rust_analyzer.py \
@@ -366,44 +378,50 @@ ifneq ($(or $(CONFIG_ARM64),$(and $(CONFIG_RISCV),$(CONFIG_64BIT))),)
__ashlti3 __lshrti3
endif
+define rule_rustc_library
+ $(call cmd_and_fixdep,rustc_library)
+ $(call cmd,gen_objtooldep)
+endef
+
$(obj)/core.o: private skip_clippy = 1
$(obj)/core.o: private skip_flags = -Wunreachable_pub
$(obj)/core.o: private rustc_objcopy = $(foreach sym,$(redirect-intrinsics),--redefine-sym $(sym)=__rust$(sym))
$(obj)/core.o: private rustc_target_flags = $(core-cfgs)
-$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
- +$(call if_changed_dep,rustc_library)
+$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs \
+ $(wildcard $(objtree)/include/config/RUSTC_VERSION_TEXT) FORCE
+ +$(call if_changed_rule,rustc_library)
ifneq ($(or $(CONFIG_X86_64),$(CONFIG_X86_32)),)
$(obj)/core.o: scripts/target.json
endif
$(obj)/compiler_builtins.o: private rustc_objcopy = -w -W '__*'
$(obj)/compiler_builtins.o: $(src)/compiler_builtins.rs $(obj)/core.o FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
$(obj)/alloc.o: private skip_clippy = 1
$(obj)/alloc.o: private skip_flags = -Wunreachable_pub
$(obj)/alloc.o: private rustc_target_flags = $(alloc-cfgs)
$(obj)/alloc.o: $(RUST_LIB_SRC)/alloc/src/lib.rs $(obj)/compiler_builtins.o FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
$(obj)/build_error.o: $(src)/build_error.rs $(obj)/compiler_builtins.o FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
$(obj)/bindings.o: $(src)/bindings/lib.rs \
$(obj)/compiler_builtins.o \
$(obj)/bindings/bindings_generated.rs \
$(obj)/bindings/bindings_helpers_generated.rs FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
$(obj)/uapi.o: $(src)/uapi/lib.rs \
$(obj)/compiler_builtins.o \
$(obj)/uapi/uapi_generated.rs FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
$(obj)/kernel.o: private rustc_target_flags = --extern alloc \
--extern build_error --extern macros --extern bindings --extern uapi
$(obj)/kernel.o: $(src)/kernel/lib.rs $(obj)/alloc.o $(obj)/build_error.o \
$(obj)/libmacros.so $(obj)/bindings.o $(obj)/uapi.o FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
endif # CONFIG_RUST
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index b940a5777330..ae82e9c941af 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -7,8 +7,8 @@
*/
#include <kunit/test.h>
-#include <linux/blk_types.h>
#include <linux/blk-mq.h>
+#include <linux/blk_types.h>
#include <linux/blkdev.h>
#include <linux/errname.h>
#include <linux/ethtool.h>
diff --git a/rust/exports.c b/rust/exports.c
index 3803c21d1403..e5695f3b45b7 100644
--- a/rust/exports.c
+++ b/rust/exports.c
@@ -17,6 +17,7 @@
#include "exports_core_generated.h"
#include "exports_alloc_generated.h"
+#include "exports_helpers_generated.h"
#include "exports_bindings_generated.h"
#include "exports_kernel_generated.h"
diff --git a/rust/helpers.c b/rust/helpers.c
deleted file mode 100644
index 92d3c03ae1bd..000000000000
--- a/rust/helpers.c
+++ /dev/null
@@ -1,239 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Non-trivial C macros cannot be used in Rust. Similarly, inlined C functions
- * cannot be called either. This file explicitly creates functions ("helpers")
- * that wrap those so that they can be called from Rust.
- *
- * Even though Rust kernel modules should never use the bindings directly, some
- * of these helpers need to be exported because Rust generics and inlined
- * functions may not get their code generated in the crate where they are
- * defined. Other helpers, called from non-inline functions, may not be
- * exported, in principle. However, in general, the Rust compiler does not
- * guarantee codegen will be performed for a non-inline function either.
- * Therefore, this file exports all the helpers. In the future, this may be
- * revisited to reduce the number of exports after the compiler is informed
- * about the places codegen is required.
- *
- * All symbols are exported as GPL-only to guarantee no GPL-only feature is
- * accidentally exposed.
- *
- * Sorted alphabetically.
- */
-
-#include <kunit/test-bug.h>
-#include <linux/bug.h>
-#include <linux/build_bug.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/errname.h>
-#include <linux/gfp.h>
-#include <linux/highmem.h>
-#include <linux/mutex.h>
-#include <linux/refcount.h>
-#include <linux/sched/signal.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-
-__noreturn void rust_helper_BUG(void)
-{
- BUG();
-}
-EXPORT_SYMBOL_GPL(rust_helper_BUG);
-
-unsigned long rust_helper_copy_from_user(void *to, const void __user *from,
- unsigned long n)
-{
- return copy_from_user(to, from, n);
-}
-EXPORT_SYMBOL_GPL(rust_helper_copy_from_user);
-
-unsigned long rust_helper_copy_to_user(void __user *to, const void *from,
- unsigned long n)
-{
- return copy_to_user(to, from, n);
-}
-EXPORT_SYMBOL_GPL(rust_helper_copy_to_user);
-
-void rust_helper_mutex_lock(struct mutex *lock)
-{
- mutex_lock(lock);
-}
-EXPORT_SYMBOL_GPL(rust_helper_mutex_lock);
-
-void rust_helper___spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key)
-{
-#ifdef CONFIG_DEBUG_SPINLOCK
- __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
-#else
- spin_lock_init(lock);
-#endif
-}
-EXPORT_SYMBOL_GPL(rust_helper___spin_lock_init);
-
-void rust_helper_spin_lock(spinlock_t *lock)
-{
- spin_lock(lock);
-}
-EXPORT_SYMBOL_GPL(rust_helper_spin_lock);
-
-void rust_helper_spin_unlock(spinlock_t *lock)
-{
- spin_unlock(lock);
-}
-EXPORT_SYMBOL_GPL(rust_helper_spin_unlock);
-
-void rust_helper_init_wait(struct wait_queue_entry *wq_entry)
-{
- init_wait(wq_entry);
-}
-EXPORT_SYMBOL_GPL(rust_helper_init_wait);
-
-int rust_helper_signal_pending(struct task_struct *t)
-{
- return signal_pending(t);
-}
-EXPORT_SYMBOL_GPL(rust_helper_signal_pending);
-
-struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
-{
- return alloc_pages(gfp_mask, order);
-}
-EXPORT_SYMBOL_GPL(rust_helper_alloc_pages);
-
-void *rust_helper_kmap_local_page(struct page *page)
-{
- return kmap_local_page(page);
-}
-EXPORT_SYMBOL_GPL(rust_helper_kmap_local_page);
-
-void rust_helper_kunmap_local(const void *addr)
-{
- kunmap_local(addr);
-}
-EXPORT_SYMBOL_GPL(rust_helper_kunmap_local);
-
-refcount_t rust_helper_REFCOUNT_INIT(int n)
-{
- return (refcount_t)REFCOUNT_INIT(n);
-}
-EXPORT_SYMBOL_GPL(rust_helper_REFCOUNT_INIT);
-
-void rust_helper_refcount_inc(refcount_t *r)
-{
- refcount_inc(r);
-}
-EXPORT_SYMBOL_GPL(rust_helper_refcount_inc);
-
-bool rust_helper_refcount_dec_and_test(refcount_t *r)
-{
- return refcount_dec_and_test(r);
-}
-EXPORT_SYMBOL_GPL(rust_helper_refcount_dec_and_test);
-
-__force void *rust_helper_ERR_PTR(long err)
-{
- return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(rust_helper_ERR_PTR);
-
-bool rust_helper_IS_ERR(__force const void *ptr)
-{
- return IS_ERR(ptr);
-}
-EXPORT_SYMBOL_GPL(rust_helper_IS_ERR);
-
-long rust_helper_PTR_ERR(__force const void *ptr)
-{
- return PTR_ERR(ptr);
-}
-EXPORT_SYMBOL_GPL(rust_helper_PTR_ERR);
-
-const char *rust_helper_errname(int err)
-{
- return errname(err);
-}
-EXPORT_SYMBOL_GPL(rust_helper_errname);
-
-struct task_struct *rust_helper_get_current(void)
-{
- return current;
-}
-EXPORT_SYMBOL_GPL(rust_helper_get_current);
-
-void rust_helper_get_task_struct(struct task_struct *t)
-{
- get_task_struct(t);
-}
-EXPORT_SYMBOL_GPL(rust_helper_get_task_struct);
-
-void rust_helper_put_task_struct(struct task_struct *t)
-{
- put_task_struct(t);
-}
-EXPORT_SYMBOL_GPL(rust_helper_put_task_struct);
-
-struct kunit *rust_helper_kunit_get_current_test(void)
-{
- return kunit_get_current_test();
-}
-EXPORT_SYMBOL_GPL(rust_helper_kunit_get_current_test);
-
-void rust_helper_init_work_with_key(struct work_struct *work, work_func_t func,
- bool onstack, const char *name,
- struct lock_class_key *key)
-{
- __init_work(work, onstack);
- work->data = (atomic_long_t)WORK_DATA_INIT();
- lockdep_init_map(&work->lockdep_map, name, key, 0);
- INIT_LIST_HEAD(&work->entry);
- work->func = func;
-}
-EXPORT_SYMBOL_GPL(rust_helper_init_work_with_key);
-
-void * __must_check __realloc_size(2)
-rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
-{
- return krealloc(objp, new_size, flags);
-}
-EXPORT_SYMBOL_GPL(rust_helper_krealloc);
-
-/*
- * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
- * use it in contexts where Rust expects a `usize` like slice (array) indices.
- * `usize` is defined to be the same as C's `uintptr_t` type (can hold any
- * pointer) but not necessarily the same as `size_t` (can hold the size of any
- * single object). Most modern platforms use the same concrete integer type for
- * both of them, but in case we find ourselves on a platform where
- * that's not true, fail early instead of risking ABI or
- * integer-overflow issues.
- *
- * If your platform fails this assertion, it means that you are in
- * danger of integer-overflow bugs (even if you attempt to add
- * `--no-size_t-is-usize`). It may be easiest to change the kernel ABI on
- * your platform such that `size_t` matches `uintptr_t` (i.e., to increase
- * `size_t`, because `uintptr_t` has to be at least as big as `size_t`).
- */
-static_assert(
- sizeof(size_t) == sizeof(uintptr_t) &&
- __alignof__(size_t) == __alignof__(uintptr_t),
- "Rust code expects C `size_t` to match Rust `usize`"
-);
-
-// This will soon be moved to a separate file, so no need to merge with above.
-#include <linux/blk-mq.h>
-#include <linux/blkdev.h>
-
-void *rust_helper_blk_mq_rq_to_pdu(struct request *rq)
-{
- return blk_mq_rq_to_pdu(rq);
-}
-EXPORT_SYMBOL_GPL(rust_helper_blk_mq_rq_to_pdu);
-
-struct request *rust_helper_blk_mq_rq_from_pdu(void *pdu)
-{
- return blk_mq_rq_from_pdu(pdu);
-}
-EXPORT_SYMBOL_GPL(rust_helper_blk_mq_rq_from_pdu);
diff --git a/rust/helpers/blk.c b/rust/helpers/blk.c
new file mode 100644
index 000000000000..cc9f4e6a2d23
--- /dev/null
+++ b/rust/helpers/blk.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/blk-mq.h>
+#include <linux/blkdev.h>
+
+void *rust_helper_blk_mq_rq_to_pdu(struct request *rq)
+{
+ return blk_mq_rq_to_pdu(rq);
+}
+
+struct request *rust_helper_blk_mq_rq_from_pdu(void *pdu)
+{
+ return blk_mq_rq_from_pdu(pdu);
+}
diff --git a/rust/helpers/bug.c b/rust/helpers/bug.c
new file mode 100644
index 000000000000..e2d13babc737
--- /dev/null
+++ b/rust/helpers/bug.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bug.h>
+
+__noreturn void rust_helper_BUG(void)
+{
+ BUG();
+}
diff --git a/rust/helpers/build_assert.c b/rust/helpers/build_assert.c
new file mode 100644
index 000000000000..6a54b2680b14
--- /dev/null
+++ b/rust/helpers/build_assert.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/build_bug.h>
+
+/*
+ * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
+ * use it in contexts where Rust expects a `usize` like slice (array) indices.
+ * `usize` is defined to be the same as C's `uintptr_t` type (can hold any
+ * pointer) but not necessarily the same as `size_t` (can hold the size of any
+ * single object). Most modern platforms use the same concrete integer type for
+ * both of them, but in case we find ourselves on a platform where
+ * that's not true, fail early instead of risking ABI or
+ * integer-overflow issues.
+ *
+ * If your platform fails this assertion, it means that you are in
+ * danger of integer-overflow bugs (even if you attempt to add
+ * `--no-size_t-is-usize`). It may be easiest to change the kernel ABI on
+ * your platform such that `size_t` matches `uintptr_t` (i.e., to increase
+ * `size_t`, because `uintptr_t` has to be at least as big as `size_t`).
+ */
+static_assert(
+ sizeof(size_t) == sizeof(uintptr_t) &&
+ __alignof__(size_t) == __alignof__(uintptr_t),
+ "Rust code expects C `size_t` to match Rust `usize`"
+);
diff --git a/rust/helpers/build_bug.c b/rust/helpers/build_bug.c
new file mode 100644
index 000000000000..e994f7b5928c
--- /dev/null
+++ b/rust/helpers/build_bug.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/errname.h>
+
+const char *rust_helper_errname(int err)
+{
+ return errname(err);
+}
diff --git a/rust/helpers/err.c b/rust/helpers/err.c
new file mode 100644
index 000000000000..be3d45ef78a2
--- /dev/null
+++ b/rust/helpers/err.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/err.h>
+#include <linux/export.h>
+
+__force void *rust_helper_ERR_PTR(long err)
+{
+ return ERR_PTR(err);
+}
+
+bool rust_helper_IS_ERR(__force const void *ptr)
+{
+ return IS_ERR(ptr);
+}
+
+long rust_helper_PTR_ERR(__force const void *ptr)
+{
+ return PTR_ERR(ptr);
+}
diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c
new file mode 100644
index 000000000000..30f40149f3a9
--- /dev/null
+++ b/rust/helpers/helpers.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Non-trivial C macros cannot be used in Rust. Similarly, inlined C functions
+ * cannot be called either. This file explicitly creates functions ("helpers")
+ * that wrap those so that they can be called from Rust.
+ *
+ * Sorted alphabetically.
+ */
+
+#include "blk.c"
+#include "bug.c"
+#include "build_assert.c"
+#include "build_bug.c"
+#include "err.c"
+#include "kunit.c"
+#include "mutex.c"
+#include "page.c"
+#include "rbtree.c"
+#include "refcount.c"
+#include "signal.c"
+#include "slab.c"
+#include "spinlock.c"
+#include "task.c"
+#include "uaccess.c"
+#include "wait.c"
+#include "workqueue.c"
diff --git a/rust/helpers/kunit.c b/rust/helpers/kunit.c
new file mode 100644
index 000000000000..9d725067eb3b
--- /dev/null
+++ b/rust/helpers/kunit.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/test-bug.h>
+#include <linux/export.h>
+
+struct kunit *rust_helper_kunit_get_current_test(void)
+{
+ return kunit_get_current_test();
+}
diff --git a/rust/helpers/mutex.c b/rust/helpers/mutex.c
new file mode 100644
index 000000000000..200db7e6279f
--- /dev/null
+++ b/rust/helpers/mutex.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+
+void rust_helper_mutex_lock(struct mutex *lock)
+{
+ mutex_lock(lock);
+}
diff --git a/rust/helpers/page.c b/rust/helpers/page.c
new file mode 100644
index 000000000000..b3f2b8fbf87f
--- /dev/null
+++ b/rust/helpers/page.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+
+struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
+{
+ return alloc_pages(gfp_mask, order);
+}
+
+void *rust_helper_kmap_local_page(struct page *page)
+{
+ return kmap_local_page(page);
+}
+
+void rust_helper_kunmap_local(const void *addr)
+{
+ kunmap_local(addr);
+}
diff --git a/rust/helpers/rbtree.c b/rust/helpers/rbtree.c
new file mode 100644
index 000000000000..6d404b84a9b5
--- /dev/null
+++ b/rust/helpers/rbtree.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/rbtree.h>
+
+void rust_helper_rb_link_node(struct rb_node *node, struct rb_node *parent,
+ struct rb_node **rb_link)
+{
+ rb_link_node(node, parent, rb_link);
+}
diff --git a/rust/helpers/refcount.c b/rust/helpers/refcount.c
new file mode 100644
index 000000000000..f47afc148ec3
--- /dev/null
+++ b/rust/helpers/refcount.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/refcount.h>
+
+refcount_t rust_helper_REFCOUNT_INIT(int n)
+{
+ return (refcount_t)REFCOUNT_INIT(n);
+}
+
+void rust_helper_refcount_inc(refcount_t *r)
+{
+ refcount_inc(r);
+}
+
+bool rust_helper_refcount_dec_and_test(refcount_t *r)
+{
+ return refcount_dec_and_test(r);
+}
diff --git a/rust/helpers/signal.c b/rust/helpers/signal.c
new file mode 100644
index 000000000000..63c407f80c26
--- /dev/null
+++ b/rust/helpers/signal.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/sched/signal.h>
+
+int rust_helper_signal_pending(struct task_struct *t)
+{
+ return signal_pending(t);
+}
diff --git a/rust/helpers/slab.c b/rust/helpers/slab.c
new file mode 100644
index 000000000000..f043e087f9d6
--- /dev/null
+++ b/rust/helpers/slab.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/slab.h>
+
+void * __must_check __realloc_size(2)
+rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
+{
+ return krealloc(objp, new_size, flags);
+}
diff --git a/rust/helpers/spinlock.c b/rust/helpers/spinlock.c
new file mode 100644
index 000000000000..acc1376b833c
--- /dev/null
+++ b/rust/helpers/spinlock.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/spinlock.h>
+
+void rust_helper___spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
+#else
+ spin_lock_init(lock);
+#endif
+}
+
+void rust_helper_spin_lock(spinlock_t *lock)
+{
+ spin_lock(lock);
+}
+
+void rust_helper_spin_unlock(spinlock_t *lock)
+{
+ spin_unlock(lock);
+}
diff --git a/rust/helpers/task.c b/rust/helpers/task.c
new file mode 100644
index 000000000000..7ac789232d11
--- /dev/null
+++ b/rust/helpers/task.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/sched/task.h>
+
+struct task_struct *rust_helper_get_current(void)
+{
+ return current;
+}
+
+void rust_helper_get_task_struct(struct task_struct *t)
+{
+ get_task_struct(t);
+}
+
+void rust_helper_put_task_struct(struct task_struct *t)
+{
+ put_task_struct(t);
+}
diff --git a/rust/helpers/uaccess.c b/rust/helpers/uaccess.c
new file mode 100644
index 000000000000..f49076f813cd
--- /dev/null
+++ b/rust/helpers/uaccess.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/uaccess.h>
+
+unsigned long rust_helper_copy_from_user(void *to, const void __user *from,
+ unsigned long n)
+{
+ return copy_from_user(to, from, n);
+}
+
+unsigned long rust_helper_copy_to_user(void __user *to, const void *from,
+ unsigned long n)
+{
+ return copy_to_user(to, from, n);
+}
diff --git a/rust/helpers/wait.c b/rust/helpers/wait.c
new file mode 100644
index 000000000000..c7336bbf2750
--- /dev/null
+++ b/rust/helpers/wait.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/wait.h>
+
+void rust_helper_init_wait(struct wait_queue_entry *wq_entry)
+{
+ init_wait(wq_entry);
+}
diff --git a/rust/helpers/workqueue.c b/rust/helpers/workqueue.c
new file mode 100644
index 000000000000..f59427acc323
--- /dev/null
+++ b/rust/helpers/workqueue.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/workqueue.h>
+
+void rust_helper_init_work_with_key(struct work_struct *work, work_func_t func,
+ bool onstack, const char *name,
+ struct lock_class_key *key)
+{
+ __init_work(work, onstack);
+ work->data = (atomic_long_t)WORK_DATA_INIT();
+ lockdep_init_map(&work->lockdep_map, name, key, 0);
+ INIT_LIST_HEAD(&work->entry);
+ work->func = func;
+}
diff --git a/rust/kernel/alloc/box_ext.rs b/rust/kernel/alloc/box_ext.rs
index 9f1c1c489189..7009ad78d4e0 100644
--- a/rust/kernel/alloc/box_ext.rs
+++ b/rust/kernel/alloc/box_ext.rs
@@ -4,7 +4,7 @@
use super::{AllocError, Flags};
use alloc::boxed::Box;
-use core::mem::MaybeUninit;
+use core::{mem::MaybeUninit, ptr, result::Result};
/// Extensions to [`Box`].
pub trait BoxExt<T>: Sized {
@@ -17,6 +17,24 @@ pub trait BoxExt<T>: Sized {
///
/// The allocation may fail, in which case an error is returned.
fn new_uninit(flags: Flags) -> Result<Box<MaybeUninit<T>>, AllocError>;
+
+ /// Drops the contents, but keeps the allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::alloc::{flags, box_ext::BoxExt};
+ /// let value = Box::new([0; 32], flags::GFP_KERNEL)?;
+ /// assert_eq!(*value, [0; 32]);
+ /// let mut value = Box::drop_contents(value);
+ /// // Now we can re-use `value`:
+ /// value.write([1; 32]);
+ /// // SAFETY: We just wrote to it.
+ /// let value = unsafe { value.assume_init() };
+ /// assert_eq!(*value, [1; 32]);
+ /// # Ok::<(), Error>(())
+ /// ```
+ fn drop_contents(this: Self) -> Box<MaybeUninit<T>>;
}
impl<T> BoxExt<T> for Box<T> {
@@ -55,4 +73,17 @@ impl<T> BoxExt<T> for Box<T> {
// zero-sized types, we use `NonNull::dangling`.
Ok(unsafe { Box::from_raw(ptr) })
}
+
+ fn drop_contents(this: Self) -> Box<MaybeUninit<T>> {
+ let ptr = Box::into_raw(this);
+ // SAFETY: `ptr` is valid, because it came from `Box::into_raw`.
+ unsafe { ptr::drop_in_place(ptr) };
+
+ // CAST: `MaybeUninit<T>` is a transparent wrapper of `T`.
+ let ptr = ptr.cast::<MaybeUninit<T>>();
+
+ // SAFETY: `ptr` is valid for writes, because it came from `Box::into_raw` and it is valid for
+ // reads, since the pointer came from `Box::into_raw` and the type is `MaybeUninit<T>`.
+ unsafe { Box::from_raw(ptr) }
+ }
}
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
index 145f5c397009..6f1587a2524e 100644
--- a/rust/kernel/error.rs
+++ b/rust/kernel/error.rs
@@ -135,8 +135,11 @@ impl Error {
/// Returns the error encoded as a pointer.
#[allow(dead_code)]
pub(crate) fn to_ptr<T>(self) -> *mut T {
+ #[cfg_attr(target_pointer_width = "32", allow(clippy::useless_conversion))]
// SAFETY: `self.0` is a valid error due to its invariant.
- unsafe { bindings::ERR_PTR(self.0.into()) as *mut _ }
+ unsafe {
+ bindings::ERR_PTR(self.0.into()) as *mut _
+ }
}
/// Returns a string representing the error, if one exists.
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index 495c09ebe3a3..a17ac8762d8f 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -213,6 +213,7 @@
use crate::{
alloc::{box_ext::BoxExt, AllocError, Flags},
error::{self, Error},
+ sync::Arc,
sync::UniqueArc,
types::{Opaque, ScopeGuard},
};
@@ -742,6 +743,74 @@ macro_rules! try_init {
};
}
+/// Asserts that a field on a struct using `#[pin_data]` is marked with `#[pin]` ie. that it is
+/// structurally pinned.
+///
+/// # Example
+///
+/// This will succeed:
+/// ```
+/// use kernel::assert_pinned;
+/// #[pin_data]
+/// struct MyStruct {
+/// #[pin]
+/// some_field: u64,
+/// }
+///
+/// assert_pinned!(MyStruct, some_field, u64);
+/// ```
+///
+/// This will fail:
+// TODO: replace with `compile_fail` when supported.
+/// ```ignore
+/// use kernel::assert_pinned;
+/// #[pin_data]
+/// struct MyStruct {
+/// some_field: u64,
+/// }
+///
+/// assert_pinned!(MyStruct, some_field, u64);
+/// ```
+///
+/// Some uses of the macro may trigger the `can't use generic parameters from outer item` error. To
+/// work around this, you may pass the `inline` parameter to the macro. The `inline` parameter can
+/// only be used when the macro is invoked from a function body.
+/// ```
+/// use kernel::assert_pinned;
+/// #[pin_data]
+/// struct Foo<T> {
+/// #[pin]
+/// elem: T,
+/// }
+///
+/// impl<T> Foo<T> {
+/// fn project(self: Pin<&mut Self>) -> Pin<&mut T> {
+/// assert_pinned!(Foo<T>, elem, T, inline);
+///
+/// // SAFETY: The field is structurally pinned.
+/// unsafe { self.map_unchecked_mut(|me| &mut me.elem) }
+/// }
+/// }
+/// ```
+#[macro_export]
+macro_rules! assert_pinned {
+ ($ty:ty, $field:ident, $field_ty:ty, inline) => {
+ let _ = move |ptr: *mut $field_ty| {
+ // SAFETY: This code is unreachable.
+ let data = unsafe { <$ty as $crate::init::__internal::HasPinData>::__pin_data() };
+ let init = $crate::init::__internal::AlwaysFail::<$field_ty>::new();
+ // SAFETY: This code is unreachable.
+ unsafe { data.$field(ptr, init) }.ok();
+ };
+ };
+
+ ($ty:ty, $field:ident, $field_ty:ty) => {
+ const _: () = {
+ $crate::assert_pinned!($ty, $field, $field_ty, inline);
+ };
+ };
+}
+
/// A pin-initializer for the type `T`.
///
/// To use this initializer, you will need a suitable memory location that can hold a `T`. This can
@@ -1107,11 +1176,17 @@ unsafe impl<T, E> PinInit<T, E> for T {
/// Smart pointer that can initialize memory in-place.
pub trait InPlaceInit<T>: Sized {
+ /// Pinned version of `Self`.
+ ///
+ /// If a type already implicitly pins its pointee, `Pin<Self>` is unnecessary. In this case use
+ /// `Self`, otherwise just use `Pin<Self>`.
+ type PinnedSelf;
+
/// Use the given pin-initializer to pin-initialize a `T` inside of a new smart pointer of this
/// type.
///
/// If `T: !Unpin` it will not be able to move afterwards.
- fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Pin<Self>, E>
+ fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E>
where
E: From<AllocError>;
@@ -1119,7 +1194,7 @@ pub trait InPlaceInit<T>: Sized {
/// type.
///
/// If `T: !Unpin` it will not be able to move afterwards.
- fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> error::Result<Pin<Self>>
+ fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> error::Result<Self::PinnedSelf>
where
Error: From<E>,
{
@@ -1148,19 +1223,35 @@ pub trait InPlaceInit<T>: Sized {
}
}
+impl<T> InPlaceInit<T> for Arc<T> {
+ type PinnedSelf = Self;
+
+ #[inline]
+ fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E>
+ where
+ E: From<AllocError>,
+ {
+ UniqueArc::try_pin_init(init, flags).map(|u| u.into())
+ }
+
+ #[inline]
+ fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ UniqueArc::try_init(init, flags).map(|u| u.into())
+ }
+}
+
impl<T> InPlaceInit<T> for Box<T> {
+ type PinnedSelf = Pin<Self>;
+
#[inline]
- fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Pin<Self>, E>
+ fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E>
where
E: From<AllocError>,
{
- let mut this = <Box<_> as BoxExt<_>>::new_uninit(flags)?;
- let slot = this.as_mut_ptr();
- // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
- // slot is valid and will not be moved, because we pin it later.
- unsafe { init.__pinned_init(slot)? };
- // SAFETY: All fields have been initialized.
- Ok(unsafe { this.assume_init() }.into())
+ <Box<_> as BoxExt<_>>::new_uninit(flags)?.write_pin_init(init)
}
#[inline]
@@ -1168,29 +1259,19 @@ impl<T> InPlaceInit<T> for Box<T> {
where
E: From<AllocError>,
{
- let mut this = <Box<_> as BoxExt<_>>::new_uninit(flags)?;
- let slot = this.as_mut_ptr();
- // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
- // slot is valid.
- unsafe { init.__init(slot)? };
- // SAFETY: All fields have been initialized.
- Ok(unsafe { this.assume_init() })
+ <Box<_> as BoxExt<_>>::new_uninit(flags)?.write_init(init)
}
}
impl<T> InPlaceInit<T> for UniqueArc<T> {
+ type PinnedSelf = Pin<Self>;
+
#[inline]
- fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Pin<Self>, E>
+ fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E>
where
E: From<AllocError>,
{
- let mut this = UniqueArc::new_uninit(flags)?;
- let slot = this.as_mut_ptr();
- // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
- // slot is valid and will not be moved, because we pin it later.
- unsafe { init.__pinned_init(slot)? };
- // SAFETY: All fields have been initialized.
- Ok(unsafe { this.assume_init() }.into())
+ UniqueArc::new_uninit(flags)?.write_pin_init(init)
}
#[inline]
@@ -1198,13 +1279,67 @@ impl<T> InPlaceInit<T> for UniqueArc<T> {
where
E: From<AllocError>,
{
- let mut this = UniqueArc::new_uninit(flags)?;
- let slot = this.as_mut_ptr();
+ UniqueArc::new_uninit(flags)?.write_init(init)
+ }
+}
+
+/// Smart pointer containing uninitialized memory and that can write a value.
+pub trait InPlaceWrite<T> {
+ /// The type `Self` turns into when the contents are initialized.
+ type Initialized;
+
+ /// Use the given initializer to write a value into `self`.
+ ///
+ /// Does not drop the current value and considers it as uninitialized memory.
+ fn write_init<E>(self, init: impl Init<T, E>) -> Result<Self::Initialized, E>;
+
+ /// Use the given pin-initializer to write a value into `self`.
+ ///
+ /// Does not drop the current value and considers it as uninitialized memory.
+ fn write_pin_init<E>(self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E>;
+}
+
+impl<T> InPlaceWrite<T> for Box<MaybeUninit<T>> {
+ type Initialized = Box<T>;
+
+ fn write_init<E>(mut self, init: impl Init<T, E>) -> Result<Self::Initialized, E> {
+ let slot = self.as_mut_ptr();
// SAFETY: When init errors/panics, slot will get deallocated but not dropped,
// slot is valid.
unsafe { init.__init(slot)? };
// SAFETY: All fields have been initialized.
- Ok(unsafe { this.assume_init() })
+ Ok(unsafe { self.assume_init() })
+ }
+
+ fn write_pin_init<E>(mut self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E> {
+ let slot = self.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid and will not be moved, because we pin it later.
+ unsafe { init.__pinned_init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { self.assume_init() }.into())
+ }
+}
+
+impl<T> InPlaceWrite<T> for UniqueArc<MaybeUninit<T>> {
+ type Initialized = UniqueArc<T>;
+
+ fn write_init<E>(mut self, init: impl Init<T, E>) -> Result<Self::Initialized, E> {
+ let slot = self.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid.
+ unsafe { init.__init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { self.assume_init() })
+ }
+
+ fn write_pin_init<E>(mut self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E> {
+ let slot = self.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid and will not be moved, because we pin it later.
+ unsafe { init.__pinned_init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { self.assume_init() }.into())
}
}
diff --git a/rust/kernel/init/__internal.rs b/rust/kernel/init/__internal.rs
index db3372619ecd..13cefd37512f 100644
--- a/rust/kernel/init/__internal.rs
+++ b/rust/kernel/init/__internal.rs
@@ -228,3 +228,32 @@ impl OnlyCallFromDrop {
Self(())
}
}
+
+/// Initializer that always fails.
+///
+/// Used by [`assert_pinned!`].
+///
+/// [`assert_pinned!`]: crate::assert_pinned
+pub struct AlwaysFail<T: ?Sized> {
+ _t: PhantomData<T>,
+}
+
+impl<T: ?Sized> AlwaysFail<T> {
+ /// Creates a new initializer that always fails.
+ pub fn new() -> Self {
+ Self { _t: PhantomData }
+ }
+}
+
+impl<T: ?Sized> Default for AlwaysFail<T> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+// SAFETY: `__pinned_init` always fails, which is always okay.
+unsafe impl<T: ?Sized> PinInit<T, ()> for AlwaysFail<T> {
+ unsafe fn __pinned_init(self, _slot: *mut T) -> Result<(), ()> {
+ Err(())
+ }
+}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 58ed400198bf..22a3bfa5a9e9 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -38,12 +38,14 @@ pub mod init;
pub mod ioctl;
#[cfg(CONFIG_KUNIT)]
pub mod kunit;
+pub mod list;
#[cfg(CONFIG_NET)]
pub mod net;
pub mod page;
pub mod prelude;
pub mod print;
pub mod sizes;
+pub mod rbtree;
mod static_assert;
#[doc(hidden)]
pub mod std_vendor;
diff --git a/rust/kernel/list.rs b/rust/kernel/list.rs
new file mode 100644
index 000000000000..5b4aec29eb67
--- /dev/null
+++ b/rust/kernel/list.rs
@@ -0,0 +1,686 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! A linked list implementation.
+
+use crate::init::PinInit;
+use crate::sync::ArcBorrow;
+use crate::types::Opaque;
+use core::iter::{DoubleEndedIterator, FusedIterator};
+use core::marker::PhantomData;
+use core::ptr;
+
+mod impl_list_item_mod;
+pub use self::impl_list_item_mod::{
+ impl_has_list_links, impl_has_list_links_self_ptr, impl_list_item, HasListLinks, HasSelfPtr,
+};
+
+mod arc;
+pub use self::arc::{impl_list_arc_safe, AtomicTracker, ListArc, ListArcSafe, TryNewListArc};
+
+mod arc_field;
+pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
+
+/// A linked list.
+///
+/// All elements in this linked list will be [`ListArc`] references to the value. Since a value can
+/// only have one `ListArc` (for each pair of prev/next pointers), this ensures that the same
+/// prev/next pointers are not used for several linked lists.
+///
+/// # Invariants
+///
+/// * If the list is empty, then `first` is null. Otherwise, `first` points at the `ListLinks`
+/// field of the first element in the list.
+/// * All prev/next pointers in `ListLinks` fields of items in the list are valid and form a cycle.
+/// * For every item in the list, the list owns the associated [`ListArc`] reference and has
+/// exclusive access to the `ListLinks` field.
+pub struct List<T: ?Sized + ListItem<ID>, const ID: u64 = 0> {
+ first: *mut ListLinksFields,
+ _ty: PhantomData<ListArc<T, ID>>,
+}
+
+// SAFETY: This is a container of `ListArc<T, ID>`, and access to the container allows the same
+// type of access to the `ListArc<T, ID>` elements.
+unsafe impl<T, const ID: u64> Send for List<T, ID>
+where
+ ListArc<T, ID>: Send,
+ T: ?Sized + ListItem<ID>,
+{
+}
+// SAFETY: This is a container of `ListArc<T, ID>`, and access to the container allows the same
+// type of access to the `ListArc<T, ID>` elements.
+unsafe impl<T, const ID: u64> Sync for List<T, ID>
+where
+ ListArc<T, ID>: Sync,
+ T: ?Sized + ListItem<ID>,
+{
+}
+
+/// Implemented by types where a [`ListArc<Self>`] can be inserted into a [`List`].
+///
+/// # Safety
+///
+/// Implementers must ensure that they provide the guarantees documented on methods provided by
+/// this trait.
+///
+/// [`ListArc<Self>`]: ListArc
+pub unsafe trait ListItem<const ID: u64 = 0>: ListArcSafe<ID> {
+ /// Views the [`ListLinks`] for this value.
+ ///
+ /// # Guarantees
+ ///
+ /// If there is a previous call to `prepare_to_insert` and there is no call to `post_remove`
+ /// since the most recent such call, then this returns the same pointer as the one returned by
+ /// the most recent call to `prepare_to_insert`.
+ ///
+ /// Otherwise, the returned pointer points at a read-only [`ListLinks`] with two null pointers.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must point at a valid value. (It need not be in an `Arc`.)
+ unsafe fn view_links(me: *const Self) -> *mut ListLinks<ID>;
+
+ /// View the full value given its [`ListLinks`] field.
+ ///
+ /// Can only be used when the value is in a list.
+ ///
+ /// # Guarantees
+ ///
+ /// * Returns the same pointer as the one passed to the most recent call to `prepare_to_insert`.
+ /// * The returned pointer is valid until the next call to `post_remove`.
+ ///
+ /// # Safety
+ ///
+ /// * The provided pointer must originate from the most recent call to `prepare_to_insert`, or
+ /// from a call to `view_links` that happened after the most recent call to
+ /// `prepare_to_insert`.
+ /// * Since the most recent call to `prepare_to_insert`, the `post_remove` method must not have
+ /// been called.
+ unsafe fn view_value(me: *mut ListLinks<ID>) -> *const Self;
+
+ /// This is called when an item is inserted into a [`List`].
+ ///
+ /// # Guarantees
+ ///
+ /// The caller is granted exclusive access to the returned [`ListLinks`] until `post_remove` is
+ /// called.
+ ///
+ /// # Safety
+ ///
+ /// * The provided pointer must point at a valid value in an [`Arc`].
+ /// * Calls to `prepare_to_insert` and `post_remove` on the same value must alternate.
+ /// * The caller must own the [`ListArc`] for this value.
+ /// * The caller must not give up ownership of the [`ListArc`] unless `post_remove` has been
+ /// called after this call to `prepare_to_insert`.
+ ///
+ /// [`Arc`]: crate::sync::Arc
+ unsafe fn prepare_to_insert(me: *const Self) -> *mut ListLinks<ID>;
+
+ /// This undoes a previous call to `prepare_to_insert`.
+ ///
+ /// # Guarantees
+ ///
+ /// The returned pointer is the pointer that was originally passed to `prepare_to_insert`.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must be the pointer returned by the most recent call to
+ /// `prepare_to_insert`.
+ unsafe fn post_remove(me: *mut ListLinks<ID>) -> *const Self;
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+struct ListLinksFields {
+ next: *mut ListLinksFields,
+ prev: *mut ListLinksFields,
+}
+
+/// The prev/next pointers for an item in a linked list.
+///
+/// # Invariants
+///
+/// The fields are null if and only if this item is not in a list.
+#[repr(transparent)]
+pub struct ListLinks<const ID: u64 = 0> {
+ // This type is `!Unpin` for aliasing reasons as the pointers are part of an intrusive linked
+ // list.
+ inner: Opaque<ListLinksFields>,
+}
+
+// SAFETY: The only way to access/modify the pointers inside of `ListLinks<ID>` is via holding the
+// associated `ListArc<T, ID>`. Since that type correctly implements `Send`, it is impossible to
+// move this an instance of this type to a different thread if the pointees are `!Send`.
+unsafe impl<const ID: u64> Send for ListLinks<ID> {}
+// SAFETY: The type is opaque so immutable references to a ListLinks are useless. Therefore, it's
+// okay to have immutable access to a ListLinks from several threads at once.
+unsafe impl<const ID: u64> Sync for ListLinks<ID> {}
+
+impl<const ID: u64> ListLinks<ID> {
+ /// Creates a new initializer for this type.
+ pub fn new() -> impl PinInit<Self> {
+ // INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will
+ // not be constructed in an `Arc` that already has a `ListArc`.
+ ListLinks {
+ inner: Opaque::new(ListLinksFields {
+ prev: ptr::null_mut(),
+ next: ptr::null_mut(),
+ }),
+ }
+ }
+
+ /// # Safety
+ ///
+ /// `me` must be dereferenceable.
+ #[inline]
+ unsafe fn fields(me: *mut Self) -> *mut ListLinksFields {
+ // SAFETY: The caller promises that the pointer is valid.
+ unsafe { Opaque::raw_get(ptr::addr_of!((*me).inner)) }
+ }
+
+ /// # Safety
+ ///
+ /// `me` must be dereferenceable.
+ #[inline]
+ unsafe fn from_fields(me: *mut ListLinksFields) -> *mut Self {
+ me.cast()
+ }
+}
+
+/// Similar to [`ListLinks`], but also contains a pointer to the full value.
+///
+/// This type can be used instead of [`ListLinks`] to support lists with trait objects.
+#[repr(C)]
+pub struct ListLinksSelfPtr<T: ?Sized, const ID: u64 = 0> {
+ /// The `ListLinks` field inside this value.
+ ///
+ /// This is public so that it can be used with `impl_has_list_links!`.
+ pub inner: ListLinks<ID>,
+ // UnsafeCell is not enough here because we use `Opaque::uninit` as a dummy value, and
+ // `ptr::null()` doesn't work for `T: ?Sized`.
+ self_ptr: Opaque<*const T>,
+}
+
+// SAFETY: The fields of a ListLinksSelfPtr can be moved across thread boundaries.
+unsafe impl<T: ?Sized + Send, const ID: u64> Send for ListLinksSelfPtr<T, ID> {}
+// SAFETY: The type is opaque so immutable references to a ListLinksSelfPtr are useless. Therefore,
+// it's okay to have immutable access to a ListLinks from several threads at once.
+//
+// Note that `inner` being a public field does not prevent this type from being opaque, since
+// `inner` is a opaque type.
+unsafe impl<T: ?Sized + Sync, const ID: u64> Sync for ListLinksSelfPtr<T, ID> {}
+
+impl<T: ?Sized, const ID: u64> ListLinksSelfPtr<T, ID> {
+ /// The offset from the [`ListLinks`] to the self pointer field.
+ pub const LIST_LINKS_SELF_PTR_OFFSET: usize = core::mem::offset_of!(Self, self_ptr);
+
+ /// Creates a new initializer for this type.
+ pub fn new() -> impl PinInit<Self> {
+ // INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will
+ // not be constructed in an `Arc` that already has a `ListArc`.
+ Self {
+ inner: ListLinks {
+ inner: Opaque::new(ListLinksFields {
+ prev: ptr::null_mut(),
+ next: ptr::null_mut(),
+ }),
+ },
+ self_ptr: Opaque::uninit(),
+ }
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> List<T, ID> {
+ /// Creates a new empty list.
+ pub const fn new() -> Self {
+ Self {
+ first: ptr::null_mut(),
+ _ty: PhantomData,
+ }
+ }
+
+ /// Returns whether this list is empty.
+ pub fn is_empty(&self) -> bool {
+ self.first.is_null()
+ }
+
+ /// Add the provided item to the back of the list.
+ pub fn push_back(&mut self, item: ListArc<T, ID>) {
+ let raw_item = ListArc::into_raw(item);
+ // SAFETY:
+ // * We just got `raw_item` from a `ListArc`, so it's in an `Arc`.
+ // * Since we have ownership of the `ListArc`, `post_remove` must have been called after
+ // the most recent call to `prepare_to_insert`, if any.
+ // * We own the `ListArc`.
+ // * Removing items from this list is always done using `remove_internal_inner`, which
+ // calls `post_remove` before giving up ownership.
+ let list_links = unsafe { T::prepare_to_insert(raw_item) };
+ // SAFETY: We have not yet called `post_remove`, so `list_links` is still valid.
+ let item = unsafe { ListLinks::fields(list_links) };
+
+ if self.first.is_null() {
+ self.first = item;
+ // SAFETY: The caller just gave us ownership of these fields.
+ // INVARIANT: A linked list with one item should be cyclic.
+ unsafe {
+ (*item).next = item;
+ (*item).prev = item;
+ }
+ } else {
+ let next = self.first;
+ // SAFETY: By the type invariant, this pointer is valid or null. We just checked that
+ // it's not null, so it must be valid.
+ let prev = unsafe { (*next).prev };
+ // SAFETY: Pointers in a linked list are never dangling, and the caller just gave us
+ // ownership of the fields on `item`.
+ // INVARIANT: This correctly inserts `item` between `prev` and `next`.
+ unsafe {
+ (*item).next = next;
+ (*item).prev = prev;
+ (*prev).next = item;
+ (*next).prev = item;
+ }
+ }
+ }
+
+ /// Add the provided item to the front of the list.
+ pub fn push_front(&mut self, item: ListArc<T, ID>) {
+ let raw_item = ListArc::into_raw(item);
+ // SAFETY:
+ // * We just got `raw_item` from a `ListArc`, so it's in an `Arc`.
+ // * If this requirement is violated, then the previous caller of `prepare_to_insert`
+ // violated the safety requirement that they can't give up ownership of the `ListArc`
+ // until they call `post_remove`.
+ // * We own the `ListArc`.
+ // * Removing items] from this list is always done using `remove_internal_inner`, which
+ // calls `post_remove` before giving up ownership.
+ let list_links = unsafe { T::prepare_to_insert(raw_item) };
+ // SAFETY: We have not yet called `post_remove`, so `list_links` is still valid.
+ let item = unsafe { ListLinks::fields(list_links) };
+
+ if self.first.is_null() {
+ // SAFETY: The caller just gave us ownership of these fields.
+ // INVARIANT: A linked list with one item should be cyclic.
+ unsafe {
+ (*item).next = item;
+ (*item).prev = item;
+ }
+ } else {
+ let next = self.first;
+ // SAFETY: We just checked that `next` is non-null.
+ let prev = unsafe { (*next).prev };
+ // SAFETY: Pointers in a linked list are never dangling, and the caller just gave us
+ // ownership of the fields on `item`.
+ // INVARIANT: This correctly inserts `item` between `prev` and `next`.
+ unsafe {
+ (*item).next = next;
+ (*item).prev = prev;
+ (*prev).next = item;
+ (*next).prev = item;
+ }
+ }
+ self.first = item;
+ }
+
+ /// Removes the last item from this list.
+ pub fn pop_back(&mut self) -> Option<ListArc<T, ID>> {
+ if self.first.is_null() {
+ return None;
+ }
+
+ // SAFETY: We just checked that the list is not empty.
+ let last = unsafe { (*self.first).prev };
+ // SAFETY: The last item of this list is in this list.
+ Some(unsafe { self.remove_internal(last) })
+ }
+
+ /// Removes the first item from this list.
+ pub fn pop_front(&mut self) -> Option<ListArc<T, ID>> {
+ if self.first.is_null() {
+ return None;
+ }
+
+ // SAFETY: The first item of this list is in this list.
+ Some(unsafe { self.remove_internal(self.first) })
+ }
+
+ /// Removes the provided item from this list and returns it.
+ ///
+ /// This returns `None` if the item is not in the list. (Note that by the safety requirements,
+ /// this means that the item is not in any list.)
+ ///
+ /// # Safety
+ ///
+ /// `item` must not be in a different linked list (with the same id).
+ pub unsafe fn remove(&mut self, item: &T) -> Option<ListArc<T, ID>> {
+ let mut item = unsafe { ListLinks::fields(T::view_links(item)) };
+ // SAFETY: The user provided a reference, and reference are never dangling.
+ //
+ // As for why this is not a data race, there are two cases:
+ //
+ // * If `item` is not in any list, then these fields are read-only and null.
+ // * If `item` is in this list, then we have exclusive access to these fields since we
+ // have a mutable reference to the list.
+ //
+ // In either case, there's no race.
+ let ListLinksFields { next, prev } = unsafe { *item };
+
+ debug_assert_eq!(next.is_null(), prev.is_null());
+ if !next.is_null() {
+ // This is really a no-op, but this ensures that `item` is a raw pointer that was
+ // obtained without going through a pointer->reference->pointer conversion roundtrip.
+ // This ensures that the list is valid under the more restrictive strict provenance
+ // ruleset.
+ //
+ // SAFETY: We just checked that `next` is not null, and it's not dangling by the
+ // list invariants.
+ unsafe {
+ debug_assert_eq!(item, (*next).prev);
+ item = (*next).prev;
+ }
+
+ // SAFETY: We just checked that `item` is in a list, so the caller guarantees that it
+ // is in this list. The pointers are in the right order.
+ Some(unsafe { self.remove_internal_inner(item, next, prev) })
+ } else {
+ None
+ }
+ }
+
+ /// Removes the provided item from the list.
+ ///
+ /// # Safety
+ ///
+ /// `item` must point at an item in this list.
+ unsafe fn remove_internal(&mut self, item: *mut ListLinksFields) -> ListArc<T, ID> {
+ // SAFETY: The caller promises that this pointer is not dangling, and there's no data race
+ // since we have a mutable reference to the list containing `item`.
+ let ListLinksFields { next, prev } = unsafe { *item };
+ // SAFETY: The pointers are ok and in the right order.
+ unsafe { self.remove_internal_inner(item, next, prev) }
+ }
+
+ /// Removes the provided item from the list.
+ ///
+ /// # Safety
+ ///
+ /// The `item` pointer must point at an item in this list, and we must have `(*item).next ==
+ /// next` and `(*item).prev == prev`.
+ unsafe fn remove_internal_inner(
+ &mut self,
+ item: *mut ListLinksFields,
+ next: *mut ListLinksFields,
+ prev: *mut ListLinksFields,
+ ) -> ListArc<T, ID> {
+ // SAFETY: We have exclusive access to the pointers of items in the list, and the prev/next
+ // pointers are always valid for items in a list.
+ //
+ // INVARIANT: There are three cases:
+ // * If the list has at least three items, then after removing the item, `prev` and `next`
+ // will be next to each other.
+ // * If the list has two items, then the remaining item will point at itself.
+ // * If the list has one item, then `next == prev == item`, so these writes have no
+ // effect. The list remains unchanged and `item` is still in the list for now.
+ unsafe {
+ (*next).prev = prev;
+ (*prev).next = next;
+ }
+ // SAFETY: We have exclusive access to items in the list.
+ // INVARIANT: `item` is being removed, so the pointers should be null.
+ unsafe {
+ (*item).prev = ptr::null_mut();
+ (*item).next = ptr::null_mut();
+ }
+ // INVARIANT: There are three cases:
+ // * If `item` was not the first item, then `self.first` should remain unchanged.
+ // * If `item` was the first item and there is another item, then we just updated
+ // `prev->next` to `next`, which is the new first item, and setting `item->next` to null
+ // did not modify `prev->next`.
+ // * If `item` was the only item in the list, then `prev == item`, and we just set
+ // `item->next` to null, so this correctly sets `first` to null now that the list is
+ // empty.
+ if self.first == item {
+ // SAFETY: The `prev` pointer is the value that `item->prev` had when it was in this
+ // list, so it must be valid. There is no race since `prev` is still in the list and we
+ // still have exclusive access to the list.
+ self.first = unsafe { (*prev).next };
+ }
+
+ // SAFETY: `item` used to be in the list, so it is dereferenceable by the type invariants
+ // of `List`.
+ let list_links = unsafe { ListLinks::from_fields(item) };
+ // SAFETY: Any pointer in the list originates from a `prepare_to_insert` call.
+ let raw_item = unsafe { T::post_remove(list_links) };
+ // SAFETY: The above call to `post_remove` guarantees that we can recreate the `ListArc`.
+ unsafe { ListArc::from_raw(raw_item) }
+ }
+
+ /// Moves all items from `other` into `self`.
+ ///
+ /// The items of `other` are added to the back of `self`, so the last item of `other` becomes
+ /// the last item of `self`.
+ pub fn push_all_back(&mut self, other: &mut List<T, ID>) {
+ // First, we insert the elements into `self`. At the end, we make `other` empty.
+ if self.is_empty() {
+ // INVARIANT: All of the elements in `other` become elements of `self`.
+ self.first = other.first;
+ } else if !other.is_empty() {
+ let other_first = other.first;
+ // SAFETY: The other list is not empty, so this pointer is valid.
+ let other_last = unsafe { (*other_first).prev };
+ let self_first = self.first;
+ // SAFETY: The self list is not empty, so this pointer is valid.
+ let self_last = unsafe { (*self_first).prev };
+
+ // SAFETY: We have exclusive access to both lists, so we can update the pointers.
+ // INVARIANT: This correctly sets the pointers to merge both lists. We do not need to
+ // update `self.first` because the first element of `self` does not change.
+ unsafe {
+ (*self_first).prev = other_last;
+ (*other_last).next = self_first;
+ (*self_last).next = other_first;
+ (*other_first).prev = self_last;
+ }
+ }
+
+ // INVARIANT: The other list is now empty, so update its pointer.
+ other.first = ptr::null_mut();
+ }
+
+ /// Returns a cursor to the first element of the list.
+ ///
+ /// If the list is empty, this returns `None`.
+ pub fn cursor_front(&mut self) -> Option<Cursor<'_, T, ID>> {
+ if self.first.is_null() {
+ None
+ } else {
+ Some(Cursor {
+ current: self.first,
+ list: self,
+ })
+ }
+ }
+
+ /// Creates an iterator over the list.
+ pub fn iter(&self) -> Iter<'_, T, ID> {
+ // INVARIANT: If the list is empty, both pointers are null. Otherwise, both pointers point
+ // at the first element of the same list.
+ Iter {
+ current: self.first,
+ stop: self.first,
+ _ty: PhantomData,
+ }
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> Default for List<T, ID> {
+ fn default() -> Self {
+ List::new()
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> Drop for List<T, ID> {
+ fn drop(&mut self) {
+ while let Some(item) = self.pop_front() {
+ drop(item);
+ }
+ }
+}
+
+/// An iterator over a [`List`].
+///
+/// # Invariants
+///
+/// * There must be a [`List`] that is immutably borrowed for the duration of `'a`.
+/// * The `current` pointer is null or points at a value in that [`List`].
+/// * The `stop` pointer is equal to the `first` field of that [`List`].
+#[derive(Clone)]
+pub struct Iter<'a, T: ?Sized + ListItem<ID>, const ID: u64 = 0> {
+ current: *mut ListLinksFields,
+ stop: *mut ListLinksFields,
+ _ty: PhantomData<&'a ListArc<T, ID>>,
+}
+
+impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> Iterator for Iter<'a, T, ID> {
+ type Item = ArcBorrow<'a, T>;
+
+ fn next(&mut self) -> Option<ArcBorrow<'a, T>> {
+ if self.current.is_null() {
+ return None;
+ }
+
+ let current = self.current;
+
+ // SAFETY: We just checked that `current` is not null, so it is in a list, and hence not
+ // dangling. There's no race because the iterator holds an immutable borrow to the list.
+ let next = unsafe { (*current).next };
+ // INVARIANT: If `current` was the last element of the list, then this updates it to null.
+ // Otherwise, we update it to the next element.
+ self.current = if next != self.stop {
+ next
+ } else {
+ ptr::null_mut()
+ };
+
+ // SAFETY: The `current` pointer points at a value in the list.
+ let item = unsafe { T::view_value(ListLinks::from_fields(current)) };
+ // SAFETY:
+ // * All values in a list are stored in an `Arc`.
+ // * The value cannot be removed from the list for the duration of the lifetime annotated
+ // on the returned `ArcBorrow`, because removing it from the list would require mutable
+ // access to the list. However, the `ArcBorrow` is annotated with the iterator's
+ // lifetime, and the list is immutably borrowed for that lifetime.
+ // * Values in a list never have a `UniqueArc` reference.
+ Some(unsafe { ArcBorrow::from_raw(item) })
+ }
+}
+
+/// A cursor into a [`List`].
+///
+/// # Invariants
+///
+/// The `current` pointer points a value in `list`.
+pub struct Cursor<'a, T: ?Sized + ListItem<ID>, const ID: u64 = 0> {
+ current: *mut ListLinksFields,
+ list: &'a mut List<T, ID>,
+}
+
+impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> Cursor<'a, T, ID> {
+ /// Access the current element of this cursor.
+ pub fn current(&self) -> ArcBorrow<'_, T> {
+ // SAFETY: The `current` pointer points a value in the list.
+ let me = unsafe { T::view_value(ListLinks::from_fields(self.current)) };
+ // SAFETY:
+ // * All values in a list are stored in an `Arc`.
+ // * The value cannot be removed from the list for the duration of the lifetime annotated
+ // on the returned `ArcBorrow`, because removing it from the list would require mutable
+ // access to the cursor or the list. However, the `ArcBorrow` holds an immutable borrow
+ // on the cursor, which in turn holds a mutable borrow on the list, so any such
+ // mutable access requires first releasing the immutable borrow on the cursor.
+ // * Values in a list never have a `UniqueArc` reference, because the list has a `ListArc`
+ // reference, and `UniqueArc` references must be unique.
+ unsafe { ArcBorrow::from_raw(me) }
+ }
+
+ /// Move the cursor to the next element.
+ pub fn next(self) -> Option<Cursor<'a, T, ID>> {
+ // SAFETY: The `current` field is always in a list.
+ let next = unsafe { (*self.current).next };
+
+ if next == self.list.first {
+ None
+ } else {
+ // INVARIANT: Since `self.current` is in the `list`, its `next` pointer is also in the
+ // `list`.
+ Some(Cursor {
+ current: next,
+ list: self.list,
+ })
+ }
+ }
+
+ /// Move the cursor to the previous element.
+ pub fn prev(self) -> Option<Cursor<'a, T, ID>> {
+ // SAFETY: The `current` field is always in a list.
+ let prev = unsafe { (*self.current).prev };
+
+ if self.current == self.list.first {
+ None
+ } else {
+ // INVARIANT: Since `self.current` is in the `list`, its `prev` pointer is also in the
+ // `list`.
+ Some(Cursor {
+ current: prev,
+ list: self.list,
+ })
+ }
+ }
+
+ /// Remove the current element from the list.
+ pub fn remove(self) -> ListArc<T, ID> {
+ // SAFETY: The `current` pointer always points at a member of the list.
+ unsafe { self.list.remove_internal(self.current) }
+ }
+}
+
+impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> FusedIterator for Iter<'a, T, ID> {}
+
+impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> IntoIterator for &'a List<T, ID> {
+ type IntoIter = Iter<'a, T, ID>;
+ type Item = ArcBorrow<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T, ID> {
+ self.iter()
+ }
+}
+
+/// An owning iterator into a [`List`].
+pub struct IntoIter<T: ?Sized + ListItem<ID>, const ID: u64 = 0> {
+ list: List<T, ID>,
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> Iterator for IntoIter<T, ID> {
+ type Item = ListArc<T, ID>;
+
+ fn next(&mut self) -> Option<ListArc<T, ID>> {
+ self.list.pop_front()
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> FusedIterator for IntoIter<T, ID> {}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> DoubleEndedIterator for IntoIter<T, ID> {
+ fn next_back(&mut self) -> Option<ListArc<T, ID>> {
+ self.list.pop_back()
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> IntoIterator for List<T, ID> {
+ type IntoIter = IntoIter<T, ID>;
+ type Item = ListArc<T, ID>;
+
+ fn into_iter(self) -> IntoIter<T, ID> {
+ IntoIter { list: self }
+ }
+}
diff --git a/rust/kernel/list/arc.rs b/rust/kernel/list/arc.rs
new file mode 100644
index 000000000000..d801b9dc6291
--- /dev/null
+++ b/rust/kernel/list/arc.rs
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! A wrapper around `Arc` for linked lists.
+
+use crate::alloc::{AllocError, Flags};
+use crate::prelude::*;
+use crate::sync::{Arc, ArcBorrow, UniqueArc};
+use core::marker::{PhantomPinned, Unsize};
+use core::ops::Deref;
+use core::pin::Pin;
+use core::sync::atomic::{AtomicBool, Ordering};
+
+/// Declares that this type has some way to ensure that there is exactly one `ListArc` instance for
+/// this id.
+///
+/// Types that implement this trait should include some kind of logic for keeping track of whether
+/// a [`ListArc`] exists or not. We refer to this logic as "the tracking inside `T`".
+///
+/// We allow the case where the tracking inside `T` thinks that a [`ListArc`] exists, but actually,
+/// there isn't a [`ListArc`]. However, we do not allow the opposite situation where a [`ListArc`]
+/// exists, but the tracking thinks it doesn't. This is because the former can at most result in us
+/// failing to create a [`ListArc`] when the operation could succeed, whereas the latter can result
+/// in the creation of two [`ListArc`] references. Only the latter situation can lead to memory
+/// safety issues.
+///
+/// A consequence of the above is that you may implement the tracking inside `T` by not actually
+/// keeping track of anything. To do this, you always claim that a [`ListArc`] exists, even if
+/// there isn't one. This implementation is allowed by the above rule, but it means that
+/// [`ListArc`] references can only be created if you have ownership of *all* references to the
+/// refcounted object, as you otherwise have no way of knowing whether a [`ListArc`] exists.
+pub trait ListArcSafe<const ID: u64 = 0> {
+ /// Informs the tracking inside this type that it now has a [`ListArc`] reference.
+ ///
+ /// This method may be called even if the tracking inside this type thinks that a `ListArc`
+ /// reference exists. (But only if that's not actually the case.)
+ ///
+ /// # Safety
+ ///
+ /// Must not be called if a [`ListArc`] already exist for this value.
+ unsafe fn on_create_list_arc_from_unique(self: Pin<&mut Self>);
+
+ /// Informs the tracking inside this type that there is no [`ListArc`] reference anymore.
+ ///
+ /// # Safety
+ ///
+ /// Must only be called if there is no [`ListArc`] reference, but the tracking thinks there is.
+ unsafe fn on_drop_list_arc(&self);
+}
+
+/// Declares that this type is able to safely attempt to create `ListArc`s at any time.
+///
+/// # Safety
+///
+/// The guarantees of `try_new_list_arc` must be upheld.
+pub unsafe trait TryNewListArc<const ID: u64 = 0>: ListArcSafe<ID> {
+ /// Attempts to convert an `Arc<Self>` into an `ListArc<Self>`. Returns `true` if the
+ /// conversion was successful.
+ ///
+ /// This method should not be called directly. Use [`ListArc::try_from_arc`] instead.
+ ///
+ /// # Guarantees
+ ///
+ /// If this call returns `true`, then there is no [`ListArc`] pointing to this value.
+ /// Additionally, this call will have transitioned the tracking inside `Self` from not thinking
+ /// that a [`ListArc`] exists, to thinking that a [`ListArc`] exists.
+ fn try_new_list_arc(&self) -> bool;
+}
+
+/// Declares that this type supports [`ListArc`].
+///
+/// This macro supports a few different strategies for implementing the tracking inside the type:
+///
+/// * The `untracked` strategy does not actually keep track of whether a [`ListArc`] exists. When
+/// using this strategy, the only way to create a [`ListArc`] is using a [`UniqueArc`].
+/// * The `tracked_by` strategy defers the tracking to a field of the struct. The user much specify
+/// which field to defer the tracking to. The field must implement [`ListArcSafe`]. If the field
+/// implements [`TryNewListArc`], then the type will also implement [`TryNewListArc`].
+///
+/// The `tracked_by` strategy is usually used by deferring to a field of type
+/// [`AtomicTracker`]. However, it is also possible to defer the tracking to another struct
+/// using also using this macro.
+#[macro_export]
+macro_rules! impl_list_arc_safe {
+ (impl$({$($generics:tt)*})? ListArcSafe<$num:tt> for $t:ty { untracked; } $($rest:tt)*) => {
+ impl$(<$($generics)*>)? $crate::list::ListArcSafe<$num> for $t {
+ unsafe fn on_create_list_arc_from_unique(self: ::core::pin::Pin<&mut Self>) {}
+ unsafe fn on_drop_list_arc(&self) {}
+ }
+ $crate::list::impl_list_arc_safe! { $($rest)* }
+ };
+
+ (impl$({$($generics:tt)*})? ListArcSafe<$num:tt> for $t:ty {
+ tracked_by $field:ident : $fty:ty;
+ } $($rest:tt)*) => {
+ impl$(<$($generics)*>)? $crate::list::ListArcSafe<$num> for $t {
+ unsafe fn on_create_list_arc_from_unique(self: ::core::pin::Pin<&mut Self>) {
+ $crate::assert_pinned!($t, $field, $fty, inline);
+
+ // SAFETY: This field is structurally pinned as per the above assertion.
+ let field = unsafe {
+ ::core::pin::Pin::map_unchecked_mut(self, |me| &mut me.$field)
+ };
+ // SAFETY: The caller promises that there is no `ListArc`.
+ unsafe {
+ <$fty as $crate::list::ListArcSafe<$num>>::on_create_list_arc_from_unique(field)
+ };
+ }
+ unsafe fn on_drop_list_arc(&self) {
+ // SAFETY: The caller promises that there is no `ListArc` reference, and also
+ // promises that the tracking thinks there is a `ListArc` reference.
+ unsafe { <$fty as $crate::list::ListArcSafe<$num>>::on_drop_list_arc(&self.$field) };
+ }
+ }
+ unsafe impl$(<$($generics)*>)? $crate::list::TryNewListArc<$num> for $t
+ where
+ $fty: TryNewListArc<$num>,
+ {
+ fn try_new_list_arc(&self) -> bool {
+ <$fty as $crate::list::TryNewListArc<$num>>::try_new_list_arc(&self.$field)
+ }
+ }
+ $crate::list::impl_list_arc_safe! { $($rest)* }
+ };
+
+ () => {};
+}
+pub use impl_list_arc_safe;
+
+/// A wrapper around [`Arc`] that's guaranteed unique for the given id.
+///
+/// The `ListArc` type can be thought of as a special reference to a refcounted object that owns the
+/// permission to manipulate the `next`/`prev` pointers stored in the refcounted object. By ensuring
+/// that each object has only one `ListArc` reference, the owner of that reference is assured
+/// exclusive access to the `next`/`prev` pointers. When a `ListArc` is inserted into a [`List`],
+/// the [`List`] takes ownership of the `ListArc` reference.
+///
+/// There are various strategies to ensuring that a value has only one `ListArc` reference. The
+/// simplest is to convert a [`UniqueArc`] into a `ListArc`. However, the refcounted object could
+/// also keep track of whether a `ListArc` exists using a boolean, which could allow for the
+/// creation of new `ListArc` references from an [`Arc`] reference. Whatever strategy is used, the
+/// relevant tracking is referred to as "the tracking inside `T`", and the [`ListArcSafe`] trait
+/// (and its subtraits) are used to update the tracking when a `ListArc` is created or destroyed.
+///
+/// Note that we allow the case where the tracking inside `T` thinks that a `ListArc` exists, but
+/// actually, there isn't a `ListArc`. However, we do not allow the opposite situation where a
+/// `ListArc` exists, but the tracking thinks it doesn't. This is because the former can at most
+/// result in us failing to create a `ListArc` when the operation could succeed, whereas the latter
+/// can result in the creation of two `ListArc` references.
+///
+/// While this `ListArc` is unique for the given id, there still might exist normal `Arc`
+/// references to the object.
+///
+/// # Invariants
+///
+/// * Each reference counted object has at most one `ListArc` for each value of `ID`.
+/// * The tracking inside `T` is aware that a `ListArc` reference exists.
+///
+/// [`List`]: crate::list::List
+#[repr(transparent)]
+pub struct ListArc<T, const ID: u64 = 0>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ arc: Arc<T>,
+}
+
+impl<T: ListArcSafe<ID>, const ID: u64> ListArc<T, ID> {
+ /// Constructs a new reference counted instance of `T`.
+ #[inline]
+ pub fn new(contents: T, flags: Flags) -> Result<Self, AllocError> {
+ Ok(Self::from(UniqueArc::new(contents, flags)?))
+ }
+
+ /// Use the given initializer to in-place initialize a `T`.
+ ///
+ /// If `T: !Unpin` it will not be able to move afterwards.
+ // We don't implement `InPlaceInit` because `ListArc` is implicitly pinned. This is similar to
+ // what we do for `Arc`.
+ #[inline]
+ pub fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ Ok(Self::from(UniqueArc::try_pin_init(init, flags)?))
+ }
+
+ /// Use the given initializer to in-place initialize a `T`.
+ ///
+ /// This is equivalent to [`ListArc<T>::pin_init`], since a [`ListArc`] is always pinned.
+ #[inline]
+ pub fn init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ Ok(Self::from(UniqueArc::try_init(init, flags)?))
+ }
+}
+
+impl<T, const ID: u64> From<UniqueArc<T>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ /// Convert a [`UniqueArc`] into a [`ListArc`].
+ #[inline]
+ fn from(unique: UniqueArc<T>) -> Self {
+ Self::from(Pin::from(unique))
+ }
+}
+
+impl<T, const ID: u64> From<Pin<UniqueArc<T>>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ /// Convert a pinned [`UniqueArc`] into a [`ListArc`].
+ #[inline]
+ fn from(mut unique: Pin<UniqueArc<T>>) -> Self {
+ // SAFETY: We have a `UniqueArc`, so there is no `ListArc`.
+ unsafe { T::on_create_list_arc_from_unique(unique.as_mut()) };
+ let arc = Arc::from(unique);
+ // SAFETY: We just called `on_create_list_arc_from_unique` on an arc without a `ListArc`,
+ // so we can create a `ListArc`.
+ unsafe { Self::transmute_from_arc(arc) }
+ }
+}
+
+impl<T, const ID: u64> ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ /// Creates two `ListArc`s from a [`UniqueArc`].
+ ///
+ /// The two ids must be different.
+ #[inline]
+ pub fn pair_from_unique<const ID2: u64>(unique: UniqueArc<T>) -> (Self, ListArc<T, ID2>)
+ where
+ T: ListArcSafe<ID2>,
+ {
+ Self::pair_from_pin_unique(Pin::from(unique))
+ }
+
+ /// Creates two `ListArc`s from a pinned [`UniqueArc`].
+ ///
+ /// The two ids must be different.
+ #[inline]
+ pub fn pair_from_pin_unique<const ID2: u64>(
+ mut unique: Pin<UniqueArc<T>>,
+ ) -> (Self, ListArc<T, ID2>)
+ where
+ T: ListArcSafe<ID2>,
+ {
+ build_assert!(ID != ID2);
+
+ // SAFETY: We have a `UniqueArc`, so there is no `ListArc`.
+ unsafe { <T as ListArcSafe<ID>>::on_create_list_arc_from_unique(unique.as_mut()) };
+ // SAFETY: We have a `UniqueArc`, so there is no `ListArc`.
+ unsafe { <T as ListArcSafe<ID2>>::on_create_list_arc_from_unique(unique.as_mut()) };
+
+ let arc1 = Arc::from(unique);
+ let arc2 = Arc::clone(&arc1);
+
+ // SAFETY: We just called `on_create_list_arc_from_unique` on an arc without a `ListArc`
+ // for both IDs (which are different), so we can create two `ListArc`s.
+ unsafe {
+ (
+ Self::transmute_from_arc(arc1),
+ ListArc::transmute_from_arc(arc2),
+ )
+ }
+ }
+
+ /// Try to create a new `ListArc`.
+ ///
+ /// This fails if this value already has a `ListArc`.
+ pub fn try_from_arc(arc: Arc<T>) -> Result<Self, Arc<T>>
+ where
+ T: TryNewListArc<ID>,
+ {
+ if arc.try_new_list_arc() {
+ // SAFETY: The `try_new_list_arc` method returned true, so we made the tracking think
+ // that a `ListArc` exists. This lets us create a `ListArc`.
+ Ok(unsafe { Self::transmute_from_arc(arc) })
+ } else {
+ Err(arc)
+ }
+ }
+
+ /// Try to create a new `ListArc`.
+ ///
+ /// This fails if this value already has a `ListArc`.
+ pub fn try_from_arc_borrow(arc: ArcBorrow<'_, T>) -> Option<Self>
+ where
+ T: TryNewListArc<ID>,
+ {
+ if arc.try_new_list_arc() {
+ // SAFETY: The `try_new_list_arc` method returned true, so we made the tracking think
+ // that a `ListArc` exists. This lets us create a `ListArc`.
+ Some(unsafe { Self::transmute_from_arc(Arc::from(arc)) })
+ } else {
+ None
+ }
+ }
+
+ /// Try to create a new `ListArc`.
+ ///
+ /// If it's not possible to create a new `ListArc`, then the `Arc` is dropped. This will never
+ /// run the destructor of the value.
+ pub fn try_from_arc_or_drop(arc: Arc<T>) -> Option<Self>
+ where
+ T: TryNewListArc<ID>,
+ {
+ match Self::try_from_arc(arc) {
+ Ok(list_arc) => Some(list_arc),
+ Err(arc) => Arc::into_unique_or_drop(arc).map(Self::from),
+ }
+ }
+
+ /// Transmutes an [`Arc`] into a `ListArc` without updating the tracking inside `T`.
+ ///
+ /// # Safety
+ ///
+ /// * The value must not already have a `ListArc` reference.
+ /// * The tracking inside `T` must think that there is a `ListArc` reference.
+ #[inline]
+ unsafe fn transmute_from_arc(arc: Arc<T>) -> Self {
+ // INVARIANT: By the safety requirements, the invariants on `ListArc` are satisfied.
+ Self { arc }
+ }
+
+ /// Transmutes a `ListArc` into an [`Arc`] without updating the tracking inside `T`.
+ ///
+ /// After this call, the tracking inside `T` will still think that there is a `ListArc`
+ /// reference.
+ #[inline]
+ fn transmute_to_arc(self) -> Arc<T> {
+ // Use a transmute to skip destructor.
+ //
+ // SAFETY: ListArc is repr(transparent).
+ unsafe { core::mem::transmute(self) }
+ }
+
+ /// Convert ownership of this `ListArc` into a raw pointer.
+ ///
+ /// The returned pointer is indistinguishable from pointers returned by [`Arc::into_raw`]. The
+ /// tracking inside `T` will still think that a `ListArc` exists after this call.
+ #[inline]
+ pub fn into_raw(self) -> *const T {
+ Arc::into_raw(Self::transmute_to_arc(self))
+ }
+
+ /// Take ownership of the `ListArc` from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must satisfy the safety requirements of [`Arc::from_raw`].
+ /// * The value must not already have a `ListArc` reference.
+ /// * The tracking inside `T` must think that there is a `ListArc` reference.
+ #[inline]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ // SAFETY: The pointer satisfies the safety requirements for `Arc::from_raw`.
+ let arc = unsafe { Arc::from_raw(ptr) };
+ // SAFETY: The value doesn't already have a `ListArc` reference, but the tracking thinks it
+ // does.
+ unsafe { Self::transmute_from_arc(arc) }
+ }
+
+ /// Converts the `ListArc` into an [`Arc`].
+ #[inline]
+ pub fn into_arc(self) -> Arc<T> {
+ let arc = Self::transmute_to_arc(self);
+ // SAFETY: There is no longer a `ListArc`, but the tracking thinks there is.
+ unsafe { T::on_drop_list_arc(&arc) };
+ arc
+ }
+
+ /// Clone a `ListArc` into an [`Arc`].
+ #[inline]
+ pub fn clone_arc(&self) -> Arc<T> {
+ self.arc.clone()
+ }
+
+ /// Returns a reference to an [`Arc`] from the given [`ListArc`].
+ ///
+ /// This is useful when the argument of a function call is an [`&Arc`] (e.g., in a method
+ /// receiver), but we have a [`ListArc`] instead.
+ ///
+ /// [`&Arc`]: Arc
+ #[inline]
+ pub fn as_arc(&self) -> &Arc<T> {
+ &self.arc
+ }
+
+ /// Returns an [`ArcBorrow`] from the given [`ListArc`].
+ ///
+ /// This is useful when the argument of a function call is an [`ArcBorrow`] (e.g., in a method
+ /// receiver), but we have an [`Arc`] instead. Getting an [`ArcBorrow`] is free when optimised.
+ #[inline]
+ pub fn as_arc_borrow(&self) -> ArcBorrow<'_, T> {
+ self.arc.as_arc_borrow()
+ }
+
+ /// Compare whether two [`ListArc`] pointers reference the same underlying object.
+ #[inline]
+ pub fn ptr_eq(this: &Self, other: &Self) -> bool {
+ Arc::ptr_eq(&this.arc, &other.arc)
+ }
+}
+
+impl<T, const ID: u64> Deref for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ self.arc.deref()
+ }
+}
+
+impl<T, const ID: u64> Drop for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY: There is no longer a `ListArc`, but the tracking thinks there is by the type
+ // invariants on `Self`.
+ unsafe { T::on_drop_list_arc(&self.arc) };
+ }
+}
+
+impl<T, const ID: u64> AsRef<Arc<T>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ #[inline]
+ fn as_ref(&self) -> &Arc<T> {
+ self.as_arc()
+ }
+}
+
+// This is to allow [`ListArc`] (and variants) to be used as the type of `self`.
+impl<T, const ID: u64> core::ops::Receiver for ListArc<T, ID> where T: ListArcSafe<ID> + ?Sized {}
+
+// This is to allow coercion from `ListArc<T>` to `ListArc<U>` if `T` can be converted to the
+// dynamically-sized type (DST) `U`.
+impl<T, U, const ID: u64> core::ops::CoerceUnsized<ListArc<U, ID>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + Unsize<U> + ?Sized,
+ U: ListArcSafe<ID> + ?Sized,
+{
+}
+
+// This is to allow `ListArc<U>` to be dispatched on when `ListArc<T>` can be coerced into
+// `ListArc<U>`.
+impl<T, U, const ID: u64> core::ops::DispatchFromDyn<ListArc<U, ID>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + Unsize<U> + ?Sized,
+ U: ListArcSafe<ID> + ?Sized,
+{
+}
+
+/// A utility for tracking whether a [`ListArc`] exists using an atomic.
+///
+/// # Invariant
+///
+/// If the boolean is `false`, then there is no [`ListArc`] for this value.
+#[repr(transparent)]
+pub struct AtomicTracker<const ID: u64 = 0> {
+ inner: AtomicBool,
+ // This value needs to be pinned to justify the INVARIANT: comment in `AtomicTracker::new`.
+ _pin: PhantomPinned,
+}
+
+impl<const ID: u64> AtomicTracker<ID> {
+ /// Creates a new initializer for this type.
+ pub fn new() -> impl PinInit<Self> {
+ // INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will
+ // not be constructed in an `Arc` that already has a `ListArc`.
+ Self {
+ inner: AtomicBool::new(false),
+ _pin: PhantomPinned,
+ }
+ }
+
+ fn project_inner(self: Pin<&mut Self>) -> &mut AtomicBool {
+ // SAFETY: The `inner` field is not structurally pinned, so we may obtain a mutable
+ // reference to it even if we only have a pinned reference to `self`.
+ unsafe { &mut Pin::into_inner_unchecked(self).inner }
+ }
+}
+
+impl<const ID: u64> ListArcSafe<ID> for AtomicTracker<ID> {
+ unsafe fn on_create_list_arc_from_unique(self: Pin<&mut Self>) {
+ // INVARIANT: We just created a ListArc, so the boolean should be true.
+ *self.project_inner().get_mut() = true;
+ }
+
+ unsafe fn on_drop_list_arc(&self) {
+ // INVARIANT: We just dropped a ListArc, so the boolean should be false.
+ self.inner.store(false, Ordering::Release);
+ }
+}
+
+// SAFETY: If this method returns `true`, then by the type invariant there is no `ListArc` before
+// this call, so it is okay to create a new `ListArc`.
+//
+// The acquire ordering will synchronize with the release store from the destruction of any
+// previous `ListArc`, so if there was a previous `ListArc`, then the destruction of the previous
+// `ListArc` happens-before the creation of the new `ListArc`.
+unsafe impl<const ID: u64> TryNewListArc<ID> for AtomicTracker<ID> {
+ fn try_new_list_arc(&self) -> bool {
+ // INVARIANT: If this method returns true, then the boolean used to be false, and is no
+ // longer false, so it is okay for the caller to create a new [`ListArc`].
+ self.inner
+ .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
+ .is_ok()
+ }
+}
diff --git a/rust/kernel/list/arc_field.rs b/rust/kernel/list/arc_field.rs
new file mode 100644
index 000000000000..2330f673427a
--- /dev/null
+++ b/rust/kernel/list/arc_field.rs
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! A field that is exclusively owned by a [`ListArc`].
+//!
+//! This can be used to have reference counted struct where one of the reference counted pointers
+//! has exclusive access to a field of the struct.
+//!
+//! [`ListArc`]: crate::list::ListArc
+
+use core::cell::UnsafeCell;
+
+/// A field owned by a specific [`ListArc`].
+///
+/// [`ListArc`]: crate::list::ListArc
+pub struct ListArcField<T, const ID: u64 = 0> {
+ value: UnsafeCell<T>,
+}
+
+// SAFETY: If the inner type is thread-safe, then it's also okay for `ListArc` to be thread-safe.
+unsafe impl<T: Send + Sync, const ID: u64> Send for ListArcField<T, ID> {}
+// SAFETY: If the inner type is thread-safe, then it's also okay for `ListArc` to be thread-safe.
+unsafe impl<T: Send + Sync, const ID: u64> Sync for ListArcField<T, ID> {}
+
+impl<T, const ID: u64> ListArcField<T, ID> {
+ /// Creates a new `ListArcField`.
+ pub fn new(value: T) -> Self {
+ Self {
+ value: UnsafeCell::new(value),
+ }
+ }
+
+ /// Access the value when we have exclusive access to the `ListArcField`.
+ ///
+ /// This allows access to the field using an `UniqueArc` instead of a `ListArc`.
+ pub fn get_mut(&mut self) -> &mut T {
+ self.value.get_mut()
+ }
+
+ /// Unsafely assert that you have shared access to the `ListArc` for this field.
+ ///
+ /// # Safety
+ ///
+ /// The caller must have shared access to the `ListArc<ID>` containing the struct with this
+ /// field for the duration of the returned reference.
+ pub unsafe fn assert_ref(&self) -> &T {
+ // SAFETY: The caller has shared access to the `ListArc`, so they also have shared access
+ // to this field.
+ unsafe { &*self.value.get() }
+ }
+
+ /// Unsafely assert that you have mutable access to the `ListArc` for this field.
+ ///
+ /// # Safety
+ ///
+ /// The caller must have mutable access to the `ListArc<ID>` containing the struct with this
+ /// field for the duration of the returned reference.
+ #[allow(clippy::mut_from_ref)]
+ pub unsafe fn assert_mut(&self) -> &mut T {
+ // SAFETY: The caller has exclusive access to the `ListArc`, so they also have exclusive
+ // access to this field.
+ unsafe { &mut *self.value.get() }
+ }
+}
+
+/// Defines getters for a [`ListArcField`].
+#[macro_export]
+macro_rules! define_list_arc_field_getter {
+ ($pub:vis fn $name:ident(&self $(<$id:tt>)?) -> &$typ:ty { $field:ident }
+ $($rest:tt)*
+ ) => {
+ $pub fn $name<'a>(self: &'a $crate::list::ListArc<Self $(, $id)?>) -> &'a $typ {
+ let field = &(&**self).$field;
+ // SAFETY: We have a shared reference to the `ListArc`.
+ unsafe { $crate::list::ListArcField::<$typ $(, $id)?>::assert_ref(field) }
+ }
+
+ $crate::list::define_list_arc_field_getter!($($rest)*);
+ };
+
+ ($pub:vis fn $name:ident(&mut self $(<$id:tt>)?) -> &mut $typ:ty { $field:ident }
+ $($rest:tt)*
+ ) => {
+ $pub fn $name<'a>(self: &'a mut $crate::list::ListArc<Self $(, $id)?>) -> &'a mut $typ {
+ let field = &(&**self).$field;
+ // SAFETY: We have a mutable reference to the `ListArc`.
+ unsafe { $crate::list::ListArcField::<$typ $(, $id)?>::assert_mut(field) }
+ }
+
+ $crate::list::define_list_arc_field_getter!($($rest)*);
+ };
+
+ () => {};
+}
+pub use define_list_arc_field_getter;
diff --git a/rust/kernel/list/impl_list_item_mod.rs b/rust/kernel/list/impl_list_item_mod.rs
new file mode 100644
index 000000000000..a0438537cee1
--- /dev/null
+++ b/rust/kernel/list/impl_list_item_mod.rs
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Helpers for implementing list traits safely.
+
+use crate::list::ListLinks;
+
+/// Declares that this type has a `ListLinks<ID>` field at a fixed offset.
+///
+/// This trait is only used to help implement `ListItem` safely. If `ListItem` is implemented
+/// manually, then this trait is not needed. Use the [`impl_has_list_links!`] macro to implement
+/// this trait.
+///
+/// # Safety
+///
+/// All values of this type must have a `ListLinks<ID>` field at the given offset.
+///
+/// The behavior of `raw_get_list_links` must not be changed.
+pub unsafe trait HasListLinks<const ID: u64 = 0> {
+ /// The offset of the `ListLinks` field.
+ const OFFSET: usize;
+
+ /// Returns a pointer to the [`ListLinks<T, ID>`] field.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must point at a valid struct of type `Self`.
+ ///
+ /// [`ListLinks<T, ID>`]: ListLinks
+ // We don't really need this method, but it's necessary for the implementation of
+ // `impl_has_list_links!` to be correct.
+ #[inline]
+ unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut ListLinks<ID> {
+ // SAFETY: The caller promises that the pointer is valid. The implementer promises that the
+ // `OFFSET` constant is correct.
+ unsafe { (ptr as *mut u8).add(Self::OFFSET) as *mut ListLinks<ID> }
+ }
+}
+
+/// Implements the [`HasListLinks`] trait for the given type.
+#[macro_export]
+macro_rules! impl_has_list_links {
+ ($(impl$(<$($implarg:ident),*>)?
+ HasListLinks$(<$id:tt>)?
+ for $self:ident $(<$($selfarg:ty),*>)?
+ { self$(.$field:ident)* }
+ )*) => {$(
+ // SAFETY: The implementation of `raw_get_list_links` only compiles if the field has the
+ // right type.
+ //
+ // The behavior of `raw_get_list_links` is not changed since the `addr_of_mut!` macro is
+ // equivalent to the pointer offset operation in the trait definition.
+ unsafe impl$(<$($implarg),*>)? $crate::list::HasListLinks$(<$id>)? for
+ $self $(<$($selfarg),*>)?
+ {
+ const OFFSET: usize = ::core::mem::offset_of!(Self, $($field).*) as usize;
+
+ #[inline]
+ unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut $crate::list::ListLinks$(<$id>)? {
+ // SAFETY: The caller promises that the pointer is not dangling. We know that this
+ // expression doesn't follow any pointers, as the `offset_of!` invocation above
+ // would otherwise not compile.
+ unsafe { ::core::ptr::addr_of_mut!((*ptr)$(.$field)*) }
+ }
+ }
+ )*};
+}
+pub use impl_has_list_links;
+
+/// Declares that the `ListLinks<ID>` field in this struct is inside a `ListLinksSelfPtr<T, ID>`.
+///
+/// # Safety
+///
+/// The `ListLinks<ID>` field of this struct at the offset `HasListLinks<ID>::OFFSET` must be
+/// inside a `ListLinksSelfPtr<T, ID>`.
+pub unsafe trait HasSelfPtr<T: ?Sized, const ID: u64 = 0>
+where
+ Self: HasListLinks<ID>,
+{
+}
+
+/// Implements the [`HasListLinks`] and [`HasSelfPtr`] traits for the given type.
+#[macro_export]
+macro_rules! impl_has_list_links_self_ptr {
+ ($(impl$({$($implarg:tt)*})?
+ HasSelfPtr<$item_type:ty $(, $id:tt)?>
+ for $self:ident $(<$($selfarg:ty),*>)?
+ { self.$field:ident }
+ )*) => {$(
+ // SAFETY: The implementation of `raw_get_list_links` only compiles if the field has the
+ // right type.
+ unsafe impl$(<$($implarg)*>)? $crate::list::HasSelfPtr<$item_type $(, $id)?> for
+ $self $(<$($selfarg),*>)?
+ {}
+
+ unsafe impl$(<$($implarg)*>)? $crate::list::HasListLinks$(<$id>)? for
+ $self $(<$($selfarg),*>)?
+ {
+ const OFFSET: usize = ::core::mem::offset_of!(Self, $field) as usize;
+
+ #[inline]
+ unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut $crate::list::ListLinks$(<$id>)? {
+ // SAFETY: The caller promises that the pointer is not dangling.
+ let ptr: *mut $crate::list::ListLinksSelfPtr<$item_type $(, $id)?> =
+ unsafe { ::core::ptr::addr_of_mut!((*ptr).$field) };
+ ptr.cast()
+ }
+ }
+ )*};
+}
+pub use impl_has_list_links_self_ptr;
+
+/// Implements the [`ListItem`] trait for the given type.
+///
+/// Requires that the type implements [`HasListLinks`]. Use the [`impl_has_list_links!`] macro to
+/// implement that trait.
+///
+/// [`ListItem`]: crate::list::ListItem
+#[macro_export]
+macro_rules! impl_list_item {
+ (
+ $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $t:ty {
+ using ListLinks;
+ })*
+ ) => {$(
+ // SAFETY: See GUARANTEES comment on each method.
+ unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $t {
+ // GUARANTEES:
+ // * This returns the same pointer as `prepare_to_insert` because `prepare_to_insert`
+ // is implemented in terms of `view_links`.
+ // * By the type invariants of `ListLinks`, the `ListLinks` has two null pointers when
+ // this value is not in a list.
+ unsafe fn view_links(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
+ // SAFETY: The caller guarantees that `me` points at a valid value of type `Self`.
+ unsafe {
+ <Self as $crate::list::HasListLinks<$num>>::raw_get_list_links(me.cast_mut())
+ }
+ }
+
+ // GUARANTEES:
+ // * `me` originates from the most recent call to `prepare_to_insert`, which just added
+ // `offset` to the pointer passed to `prepare_to_insert`. This method subtracts
+ // `offset` from `me` so it returns the pointer originally passed to
+ // `prepare_to_insert`.
+ // * The pointer remains valid until the next call to `post_remove` because the caller
+ // of the most recent call to `prepare_to_insert` promised to retain ownership of the
+ // `ListArc` containing `Self` until the next call to `post_remove`. The value cannot
+ // be destroyed while a `ListArc` reference exists.
+ unsafe fn view_value(me: *mut $crate::list::ListLinks<$num>) -> *const Self {
+ let offset = <Self as $crate::list::HasListLinks<$num>>::OFFSET;
+ // SAFETY: `me` originates from the most recent call to `prepare_to_insert`, so it
+ // points at the field at offset `offset` in a value of type `Self`. Thus,
+ // subtracting `offset` from `me` is still in-bounds of the allocation.
+ unsafe { (me as *const u8).sub(offset) as *const Self }
+ }
+
+ // GUARANTEES:
+ // This implementation of `ListItem` will not give out exclusive access to the same
+ // `ListLinks` several times because calls to `prepare_to_insert` and `post_remove`
+ // must alternate and exclusive access is given up when `post_remove` is called.
+ //
+ // Other invocations of `impl_list_item!` also cannot give out exclusive access to the
+ // same `ListLinks` because you can only implement `ListItem` once for each value of
+ // `ID`, and the `ListLinks` fields only work with the specified `ID`.
+ unsafe fn prepare_to_insert(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
+ // SAFETY: The caller promises that `me` points at a valid value.
+ unsafe { <Self as $crate::list::ListItem<$num>>::view_links(me) }
+ }
+
+ // GUARANTEES:
+ // * `me` originates from the most recent call to `prepare_to_insert`, which just added
+ // `offset` to the pointer passed to `prepare_to_insert`. This method subtracts
+ // `offset` from `me` so it returns the pointer originally passed to
+ // `prepare_to_insert`.
+ unsafe fn post_remove(me: *mut $crate::list::ListLinks<$num>) -> *const Self {
+ let offset = <Self as $crate::list::HasListLinks<$num>>::OFFSET;
+ // SAFETY: `me` originates from the most recent call to `prepare_to_insert`, so it
+ // points at the field at offset `offset` in a value of type `Self`. Thus,
+ // subtracting `offset` from `me` is still in-bounds of the allocation.
+ unsafe { (me as *const u8).sub(offset) as *const Self }
+ }
+ }
+ )*};
+
+ (
+ $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $t:ty {
+ using ListLinksSelfPtr;
+ })*
+ ) => {$(
+ // SAFETY: See GUARANTEES comment on each method.
+ unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $t {
+ // GUARANTEES:
+ // This implementation of `ListItem` will not give out exclusive access to the same
+ // `ListLinks` several times because calls to `prepare_to_insert` and `post_remove`
+ // must alternate and exclusive access is given up when `post_remove` is called.
+ //
+ // Other invocations of `impl_list_item!` also cannot give out exclusive access to the
+ // same `ListLinks` because you can only implement `ListItem` once for each value of
+ // `ID`, and the `ListLinks` fields only work with the specified `ID`.
+ unsafe fn prepare_to_insert(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
+ // SAFETY: The caller promises that `me` points at a valid value of type `Self`.
+ let links_field = unsafe { <Self as $crate::list::ListItem<$num>>::view_links(me) };
+
+ let spoff = $crate::list::ListLinksSelfPtr::<Self, $num>::LIST_LINKS_SELF_PTR_OFFSET;
+ // Goes via the offset as the field is private.
+ //
+ // SAFETY: The constant is equal to `offset_of!(ListLinksSelfPtr, self_ptr)`, so
+ // the pointer stays in bounds of the allocation.
+ let self_ptr = unsafe { (links_field as *const u8).add(spoff) }
+ as *const $crate::types::Opaque<*const Self>;
+ let cell_inner = $crate::types::Opaque::raw_get(self_ptr);
+
+ // SAFETY: This value is not accessed in any other places than `prepare_to_insert`,
+ // `post_remove`, or `view_value`. By the safety requirements of those methods,
+ // none of these three methods may be called in parallel with this call to
+ // `prepare_to_insert`, so this write will not race with any other access to the
+ // value.
+ unsafe { ::core::ptr::write(cell_inner, me) };
+
+ links_field
+ }
+
+ // GUARANTEES:
+ // * This returns the same pointer as `prepare_to_insert` because `prepare_to_insert`
+ // returns the return value of `view_links`.
+ // * By the type invariants of `ListLinks`, the `ListLinks` has two null pointers when
+ // this value is not in a list.
+ unsafe fn view_links(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
+ // SAFETY: The caller promises that `me` points at a valid value of type `Self`.
+ unsafe { <Self as HasListLinks<$num>>::raw_get_list_links(me.cast_mut()) }
+ }
+
+ // This function is also used as the implementation of `post_remove`, so the caller
+ // may choose to satisfy the safety requirements of `post_remove` instead of the safety
+ // requirements for `view_value`.
+ //
+ // GUARANTEES: (always)
+ // * This returns the same pointer as the one passed to the most recent call to
+ // `prepare_to_insert` since that call wrote that pointer to this location. The value
+ // is only modified in `prepare_to_insert`, so it has not been modified since the
+ // most recent call.
+ //
+ // GUARANTEES: (only when using the `view_value` safety requirements)
+ // * The pointer remains valid until the next call to `post_remove` because the caller
+ // of the most recent call to `prepare_to_insert` promised to retain ownership of the
+ // `ListArc` containing `Self` until the next call to `post_remove`. The value cannot
+ // be destroyed while a `ListArc` reference exists.
+ unsafe fn view_value(links_field: *mut $crate::list::ListLinks<$num>) -> *const Self {
+ let spoff = $crate::list::ListLinksSelfPtr::<Self, $num>::LIST_LINKS_SELF_PTR_OFFSET;
+ // SAFETY: The constant is equal to `offset_of!(ListLinksSelfPtr, self_ptr)`, so
+ // the pointer stays in bounds of the allocation.
+ let self_ptr = unsafe { (links_field as *const u8).add(spoff) }
+ as *const ::core::cell::UnsafeCell<*const Self>;
+ let cell_inner = ::core::cell::UnsafeCell::raw_get(self_ptr);
+ // SAFETY: This is not a data race, because the only function that writes to this
+ // value is `prepare_to_insert`, but by the safety requirements the
+ // `prepare_to_insert` method may not be called in parallel with `view_value` or
+ // `post_remove`.
+ unsafe { ::core::ptr::read(cell_inner) }
+ }
+
+ // GUARANTEES:
+ // The first guarantee of `view_value` is exactly what `post_remove` guarantees.
+ unsafe fn post_remove(me: *mut $crate::list::ListLinks<$num>) -> *const Self {
+ // SAFETY: This specific implementation of `view_value` allows the caller to
+ // promise the safety requirements of `post_remove` instead of the safety
+ // requirements for `view_value`.
+ unsafe { <Self as $crate::list::ListItem<$num>>::view_value(me) }
+ }
+ }
+ )*};
+}
+pub use impl_list_item;
diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs
index b37a0b3180fb..4571daec0961 100644
--- a/rust/kernel/prelude.rs
+++ b/rust/kernel/prelude.rs
@@ -37,6 +37,6 @@ pub use super::error::{code::*, Error, Result};
pub use super::{str::CStr, ThisModule};
-pub use super::init::{InPlaceInit, Init, PinInit};
+pub use super::init::{InPlaceInit, InPlaceWrite, Init, PinInit};
pub use super::current;
diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
index a78aa3514a0a..508b0221256c 100644
--- a/rust/kernel/print.rs
+++ b/rust/kernel/print.rs
@@ -4,7 +4,7 @@
//!
//! C header: [`include/linux/printk.h`](srctree/include/linux/printk.h)
//!
-//! Reference: <https://www.kernel.org/doc/html/latest/core-api/printk-basics.html>
+//! Reference: <https://docs.kernel.org/core-api/printk-basics.html>
use core::{
ffi::{c_char, c_void},
@@ -197,7 +197,7 @@ macro_rules! print_macro (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_emerg`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_emerg
+/// [`pr_emerg`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_emerg
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -221,7 +221,7 @@ macro_rules! pr_emerg (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_alert`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_alert
+/// [`pr_alert`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_alert
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -245,7 +245,7 @@ macro_rules! pr_alert (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_crit`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_crit
+/// [`pr_crit`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_crit
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -269,7 +269,7 @@ macro_rules! pr_crit (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_err`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_err
+/// [`pr_err`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_err
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -293,7 +293,7 @@ macro_rules! pr_err (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_warn`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_warn
+/// [`pr_warn`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_warn
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -317,7 +317,7 @@ macro_rules! pr_warn (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_notice`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_notice
+/// [`pr_notice`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_notice
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -341,7 +341,7 @@ macro_rules! pr_notice (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_info`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_info
+/// [`pr_info`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_info
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -367,7 +367,7 @@ macro_rules! pr_info (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_debug`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_debug
+/// [`pr_debug`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_debug
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -395,7 +395,7 @@ macro_rules! pr_debug (
/// `alloc::format!` for information about the formatting syntax.
///
/// [`pr_info!`]: crate::pr_info!
-/// [`pr_cont`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_cont
+/// [`pr_cont`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_cont
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs
new file mode 100644
index 000000000000..25eb36fd1cdc
--- /dev/null
+++ b/rust/kernel/rbtree.rs
@@ -0,0 +1,1278 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Red-black trees.
+//!
+//! C header: [`include/linux/rbtree.h`](srctree/include/linux/rbtree.h)
+//!
+//! Reference: <https://docs.kernel.org/core-api/rbtree.html>
+
+use crate::{alloc::Flags, bindings, container_of, error::Result, prelude::*};
+use alloc::boxed::Box;
+use core::{
+ cmp::{Ord, Ordering},
+ marker::PhantomData,
+ mem::MaybeUninit,
+ ptr::{addr_of_mut, from_mut, NonNull},
+};
+
+/// A red-black tree with owned nodes.
+///
+/// It is backed by the kernel C red-black trees.
+///
+/// # Examples
+///
+/// In the example below we do several operations on a tree. We note that insertions may fail if
+/// the system is out of memory.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::{RBTree, RBTreeNode, RBTreeNodeReservation}};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Check the nodes we just inserted.
+/// {
+/// assert_eq!(tree.get(&10).unwrap(), &100);
+/// assert_eq!(tree.get(&20).unwrap(), &200);
+/// assert_eq!(tree.get(&30).unwrap(), &300);
+/// }
+///
+/// // Iterate over the nodes we just inserted.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &300));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Print all elements.
+/// for (key, value) in &tree {
+/// pr_info!("{} = {}\n", key, value);
+/// }
+///
+/// // Replace one of the elements.
+/// tree.try_create_and_insert(10, 1000, flags::GFP_KERNEL)?;
+///
+/// // Check that the tree reflects the replacement.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &1000));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &300));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Change the value of one of the elements.
+/// *tree.get_mut(&30).unwrap() = 3000;
+///
+/// // Check that the tree reflects the update.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &1000));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &3000));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Remove an element.
+/// tree.remove(&10);
+///
+/// // Check that the tree reflects the removal.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &3000));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// In the example below, we first allocate a node, acquire a spinlock, then insert the node into
+/// the tree. This is useful when the insertion context does not allow sleeping, for example, when
+/// holding a spinlock.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::{RBTree, RBTreeNode}, sync::SpinLock};
+///
+/// fn insert_test(tree: &SpinLock<RBTree<u32, u32>>) -> Result {
+/// // Pre-allocate node. This may fail (as it allocates memory).
+/// let node = RBTreeNode::new(10, 100, flags::GFP_KERNEL)?;
+///
+/// // Insert node while holding the lock. It is guaranteed to succeed with no allocation
+/// // attempts.
+/// let mut guard = tree.lock();
+/// guard.insert(node);
+/// Ok(())
+/// }
+/// ```
+///
+/// In the example below, we reuse an existing node allocation from an element we removed.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::{RBTree, RBTreeNodeReservation}};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Check the nodes we just inserted.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &300));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Remove a node, getting back ownership of it.
+/// let existing = tree.remove(&30).unwrap();
+///
+/// // Check that the tree reflects the removal.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Create a preallocated reservation that we can re-use later.
+/// let reservation = RBTreeNodeReservation::new(flags::GFP_KERNEL)?;
+///
+/// // Insert a new node into the tree, reusing the previous allocation. This is guaranteed to
+/// // succeed (no memory allocations).
+/// tree.insert(reservation.into_node(15, 150));
+///
+/// // Check that the tree reflect the new insertion.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&15, &150));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// # Invariants
+///
+/// Non-null parent/children pointers stored in instances of the `rb_node` C struct are always
+/// valid, and pointing to a field of our internal representation of a node.
+pub struct RBTree<K, V> {
+ root: bindings::rb_root,
+ _p: PhantomData<Node<K, V>>,
+}
+
+// SAFETY: An [`RBTree`] allows the same kinds of access to its values that a struct allows to its
+// fields, so we use the same Send condition as would be used for a struct with K and V fields.
+unsafe impl<K: Send, V: Send> Send for RBTree<K, V> {}
+
+// SAFETY: An [`RBTree`] allows the same kinds of access to its values that a struct allows to its
+// fields, so we use the same Sync condition as would be used for a struct with K and V fields.
+unsafe impl<K: Sync, V: Sync> Sync for RBTree<K, V> {}
+
+impl<K, V> RBTree<K, V> {
+ /// Creates a new and empty tree.
+ pub fn new() -> Self {
+ Self {
+ // INVARIANT: There are no nodes in the tree, so the invariant holds vacuously.
+ root: bindings::rb_root::default(),
+ _p: PhantomData,
+ }
+ }
+
+ /// Returns an iterator over the tree nodes, sorted by key.
+ pub fn iter(&self) -> Iter<'_, K, V> {
+ Iter {
+ _tree: PhantomData,
+ // INVARIANT:
+ // - `self.root` is a valid pointer to a tree root.
+ // - `bindings::rb_first` produces a valid pointer to a node given `root` is valid.
+ iter_raw: IterRaw {
+ // SAFETY: by the invariants, all pointers are valid.
+ next: unsafe { bindings::rb_first(&self.root) },
+ _phantom: PhantomData,
+ },
+ }
+ }
+
+ /// Returns a mutable iterator over the tree nodes, sorted by key.
+ pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
+ IterMut {
+ _tree: PhantomData,
+ // INVARIANT:
+ // - `self.root` is a valid pointer to a tree root.
+ // - `bindings::rb_first` produces a valid pointer to a node given `root` is valid.
+ iter_raw: IterRaw {
+ // SAFETY: by the invariants, all pointers are valid.
+ next: unsafe { bindings::rb_first(from_mut(&mut self.root)) },
+ _phantom: PhantomData,
+ },
+ }
+ }
+
+ /// Returns an iterator over the keys of the nodes in the tree, in sorted order.
+ pub fn keys(&self) -> impl Iterator<Item = &'_ K> {
+ self.iter().map(|(k, _)| k)
+ }
+
+ /// Returns an iterator over the values of the nodes in the tree, sorted by key.
+ pub fn values(&self) -> impl Iterator<Item = &'_ V> {
+ self.iter().map(|(_, v)| v)
+ }
+
+ /// Returns a mutable iterator over the values of the nodes in the tree, sorted by key.
+ pub fn values_mut(&mut self) -> impl Iterator<Item = &'_ mut V> {
+ self.iter_mut().map(|(_, v)| v)
+ }
+
+ /// Returns a cursor over the tree nodes, starting with the smallest key.
+ pub fn cursor_front(&mut self) -> Option<Cursor<'_, K, V>> {
+ let root = addr_of_mut!(self.root);
+ // SAFETY: `self.root` is always a valid root node
+ let current = unsafe { bindings::rb_first(root) };
+ NonNull::new(current).map(|current| {
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self`.
+ Cursor {
+ current,
+ tree: self,
+ }
+ })
+ }
+
+ /// Returns a cursor over the tree nodes, starting with the largest key.
+ pub fn cursor_back(&mut self) -> Option<Cursor<'_, K, V>> {
+ let root = addr_of_mut!(self.root);
+ // SAFETY: `self.root` is always a valid root node
+ let current = unsafe { bindings::rb_last(root) };
+ NonNull::new(current).map(|current| {
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self`.
+ Cursor {
+ current,
+ tree: self,
+ }
+ })
+ }
+}
+
+impl<K, V> RBTree<K, V>
+where
+ K: Ord,
+{
+ /// Tries to insert a new value into the tree.
+ ///
+ /// It overwrites a node if one already exists with the same key and returns it (containing the
+ /// key/value pair). Returns [`None`] if a node with the same key didn't already exist.
+ ///
+ /// Returns an error if it cannot allocate memory for the new node.
+ pub fn try_create_and_insert(
+ &mut self,
+ key: K,
+ value: V,
+ flags: Flags,
+ ) -> Result<Option<RBTreeNode<K, V>>> {
+ Ok(self.insert(RBTreeNode::new(key, value, flags)?))
+ }
+
+ /// Inserts a new node into the tree.
+ ///
+ /// It overwrites a node if one already exists with the same key and returns it (containing the
+ /// key/value pair). Returns [`None`] if a node with the same key didn't already exist.
+ ///
+ /// This function always succeeds.
+ pub fn insert(&mut self, node: RBTreeNode<K, V>) -> Option<RBTreeNode<K, V>> {
+ match self.raw_entry(&node.node.key) {
+ RawEntry::Occupied(entry) => Some(entry.replace(node)),
+ RawEntry::Vacant(entry) => {
+ entry.insert(node);
+ None
+ }
+ }
+ }
+
+ fn raw_entry(&mut self, key: &K) -> RawEntry<'_, K, V> {
+ let raw_self: *mut RBTree<K, V> = self;
+ // The returned `RawEntry` is used to call either `rb_link_node` or `rb_replace_node`.
+ // The parameters of `bindings::rb_link_node` are as follows:
+ // - `node`: A pointer to an uninitialized node being inserted.
+ // - `parent`: A pointer to an existing node in the tree. One of its child pointers must be
+ // null, and `node` will become a child of `parent` by replacing that child pointer
+ // with a pointer to `node`.
+ // - `rb_link`: A pointer to either the left-child or right-child field of `parent`. This
+ // specifies which child of `parent` should hold `node` after this call. The
+ // value of `*rb_link` must be null before the call to `rb_link_node`. If the
+ // red/black tree is empty, then it’s also possible for `parent` to be null. In
+ // this case, `rb_link` is a pointer to the `root` field of the red/black tree.
+ //
+ // We will traverse the tree looking for a node that has a null pointer as its child,
+ // representing an empty subtree where we can insert our new node. We need to make sure
+ // that we preserve the ordering of the nodes in the tree. In each iteration of the loop
+ // we store `parent` and `child_field_of_parent`, and the new `node` will go somewhere
+ // in the subtree of `parent` that `child_field_of_parent` points at. Once
+ // we find an empty subtree, we can insert the new node using `rb_link_node`.
+ let mut parent = core::ptr::null_mut();
+ let mut child_field_of_parent: &mut *mut bindings::rb_node =
+ // SAFETY: `raw_self` is a valid pointer to the `RBTree` (created from `self` above).
+ unsafe { &mut (*raw_self).root.rb_node };
+ while !(*child_field_of_parent).is_null() {
+ let curr = *child_field_of_parent;
+ // SAFETY: All links fields we create are in a `Node<K, V>`.
+ let node = unsafe { container_of!(curr, Node<K, V>, links) };
+
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ match key.cmp(unsafe { &(*node).key }) {
+ // SAFETY: `curr` is a non-null node so it is valid by the type invariants.
+ Ordering::Less => child_field_of_parent = unsafe { &mut (*curr).rb_left },
+ // SAFETY: `curr` is a non-null node so it is valid by the type invariants.
+ Ordering::Greater => child_field_of_parent = unsafe { &mut (*curr).rb_right },
+ Ordering::Equal => {
+ return RawEntry::Occupied(OccupiedEntry {
+ rbtree: self,
+ node_links: curr,
+ })
+ }
+ }
+ parent = curr;
+ }
+
+ RawEntry::Vacant(RawVacantEntry {
+ rbtree: raw_self,
+ parent,
+ child_field_of_parent,
+ _phantom: PhantomData,
+ })
+ }
+
+ /// Gets the given key's corresponding entry in the map for in-place manipulation.
+ pub fn entry(&mut self, key: K) -> Entry<'_, K, V> {
+ match self.raw_entry(&key) {
+ RawEntry::Occupied(entry) => Entry::Occupied(entry),
+ RawEntry::Vacant(entry) => Entry::Vacant(VacantEntry { raw: entry, key }),
+ }
+ }
+
+ /// Used for accessing the given node, if it exists.
+ pub fn find_mut(&mut self, key: &K) -> Option<OccupiedEntry<'_, K, V>> {
+ match self.raw_entry(key) {
+ RawEntry::Occupied(entry) => Some(entry),
+ RawEntry::Vacant(_entry) => None,
+ }
+ }
+
+ /// Returns a reference to the value corresponding to the key.
+ pub fn get(&self, key: &K) -> Option<&V> {
+ let mut node = self.root.rb_node;
+ while !node.is_null() {
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(node, Node<K, V>, links) };
+ // SAFETY: `this` is a non-null node so it is valid by the type invariants.
+ node = match key.cmp(unsafe { &(*this).key }) {
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ Ordering::Less => unsafe { (*node).rb_left },
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ Ordering::Greater => unsafe { (*node).rb_right },
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ Ordering::Equal => return Some(unsafe { &(*this).value }),
+ }
+ }
+ None
+ }
+
+ /// Returns a mutable reference to the value corresponding to the key.
+ pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
+ self.find_mut(key).map(|node| node.into_mut())
+ }
+
+ /// Removes the node with the given key from the tree.
+ ///
+ /// It returns the node that was removed if one exists, or [`None`] otherwise.
+ pub fn remove_node(&mut self, key: &K) -> Option<RBTreeNode<K, V>> {
+ self.find_mut(key).map(OccupiedEntry::remove_node)
+ }
+
+ /// Removes the node with the given key from the tree.
+ ///
+ /// It returns the value that was removed if one exists, or [`None`] otherwise.
+ pub fn remove(&mut self, key: &K) -> Option<V> {
+ self.find_mut(key).map(OccupiedEntry::remove)
+ }
+
+ /// Returns a cursor over the tree nodes based on the given key.
+ ///
+ /// If the given key exists, the cursor starts there.
+ /// Otherwise it starts with the first larger key in sort order.
+ /// If there is no larger key, it returns [`None`].
+ pub fn cursor_lower_bound(&mut self, key: &K) -> Option<Cursor<'_, K, V>>
+ where
+ K: Ord,
+ {
+ let mut node = self.root.rb_node;
+ let mut best_match: Option<NonNull<Node<K, V>>> = None;
+ while !node.is_null() {
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(node, Node<K, V>, links) }.cast_mut();
+ // SAFETY: `this` is a non-null node so it is valid by the type invariants.
+ let this_key = unsafe { &(*this).key };
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ let left_child = unsafe { (*node).rb_left };
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ let right_child = unsafe { (*node).rb_right };
+ match key.cmp(this_key) {
+ Ordering::Equal => {
+ best_match = NonNull::new(this);
+ break;
+ }
+ Ordering::Greater => {
+ node = right_child;
+ }
+ Ordering::Less => {
+ let is_better_match = match best_match {
+ None => true,
+ Some(best) => {
+ // SAFETY: `best` is a non-null node so it is valid by the type invariants.
+ let best_key = unsafe { &(*best.as_ptr()).key };
+ best_key > this_key
+ }
+ };
+ if is_better_match {
+ best_match = NonNull::new(this);
+ }
+ node = left_child;
+ }
+ };
+ }
+
+ let best = best_match?;
+
+ // SAFETY: `best` is a non-null node so it is valid by the type invariants.
+ let links = unsafe { addr_of_mut!((*best.as_ptr()).links) };
+
+ NonNull::new(links).map(|current| {
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self`.
+ Cursor {
+ current,
+ tree: self,
+ }
+ })
+ }
+}
+
+impl<K, V> Default for RBTree<K, V> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<K, V> Drop for RBTree<K, V> {
+ fn drop(&mut self) {
+ // SAFETY: `root` is valid as it's embedded in `self` and we have a valid `self`.
+ let mut next = unsafe { bindings::rb_first_postorder(&self.root) };
+
+ // INVARIANT: The loop invariant is that all tree nodes from `next` in postorder are valid.
+ while !next.is_null() {
+ // SAFETY: All links fields we create are in a `Node<K, V>`.
+ let this = unsafe { container_of!(next, Node<K, V>, links) };
+
+ // Find out what the next node is before disposing of the current one.
+ // SAFETY: `next` and all nodes in postorder are still valid.
+ next = unsafe { bindings::rb_next_postorder(next) };
+
+ // INVARIANT: This is the destructor, so we break the type invariant during clean-up,
+ // but it is not observable. The loop invariant is still maintained.
+
+ // SAFETY: `this` is valid per the loop invariant.
+ unsafe { drop(Box::from_raw(this.cast_mut())) };
+ }
+ }
+}
+
+/// A bidirectional cursor over the tree nodes, sorted by key.
+///
+/// # Examples
+///
+/// In the following example, we obtain a cursor to the first element in the tree.
+/// The cursor allows us to iterate bidirectionally over key/value pairs in the tree.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Get a cursor to the first element.
+/// let mut cursor = tree.cursor_front().unwrap();
+/// let mut current = cursor.current();
+/// assert_eq!(current, (&10, &100));
+///
+/// // Move the cursor, updating it to the 2nd element.
+/// cursor = cursor.move_next().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // Peek at the next element without impacting the cursor.
+/// let next = cursor.peek_next().unwrap();
+/// assert_eq!(next, (&30, &300));
+/// current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // Moving past the last element causes the cursor to return [`None`].
+/// cursor = cursor.move_next().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+/// let cursor = cursor.move_next();
+/// assert!(cursor.is_none());
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// A cursor can also be obtained at the last element in the tree.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// let mut cursor = tree.cursor_back().unwrap();
+/// let current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// Obtaining a cursor returns [`None`] if the tree is empty.
+///
+/// ```
+/// use kernel::rbtree::RBTree;
+///
+/// let mut tree: RBTree<u16, u16> = RBTree::new();
+/// assert!(tree.cursor_front().is_none());
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// [`RBTree::cursor_lower_bound`] can be used to start at an arbitrary node in the tree.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert five elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(40, 400, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(50, 500, flags::GFP_KERNEL)?;
+///
+/// // If the provided key exists, a cursor to that key is returned.
+/// let cursor = tree.cursor_lower_bound(&20).unwrap();
+/// let current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // If the provided key doesn't exist, a cursor to the first larger element in sort order is returned.
+/// let cursor = tree.cursor_lower_bound(&25).unwrap();
+/// let current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+///
+/// // If there is no larger key, [`None`] is returned.
+/// let cursor = tree.cursor_lower_bound(&55);
+/// assert!(cursor.is_none());
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// The cursor allows mutation of values in the tree.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Retrieve a cursor.
+/// let mut cursor = tree.cursor_front().unwrap();
+///
+/// // Get a mutable reference to the current value.
+/// let (k, v) = cursor.current_mut();
+/// *v = 1000;
+///
+/// // The updated value is reflected in the tree.
+/// let updated = tree.get(&10).unwrap();
+/// assert_eq!(updated, &1000);
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// It also allows node removal. The following examples demonstrate the behavior of removing the current node.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Remove the first element.
+/// let mut cursor = tree.cursor_front().unwrap();
+/// let mut current = cursor.current();
+/// assert_eq!(current, (&10, &100));
+/// cursor = cursor.remove_current().0.unwrap();
+///
+/// // If a node exists after the current element, it is returned.
+/// current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // Get a cursor to the last element, and remove it.
+/// cursor = tree.cursor_back().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+///
+/// // Since there is no next node, the previous node is returned.
+/// cursor = cursor.remove_current().0.unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // Removing the last element in the tree returns [`None`].
+/// assert!(cursor.remove_current().0.is_none());
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// Nodes adjacent to the current node can also be removed.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Get a cursor to the first element.
+/// let mut cursor = tree.cursor_front().unwrap();
+/// let mut current = cursor.current();
+/// assert_eq!(current, (&10, &100));
+///
+/// // Calling `remove_prev` from the first element returns [`None`].
+/// assert!(cursor.remove_prev().is_none());
+///
+/// // Get a cursor to the last element.
+/// cursor = tree.cursor_back().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+///
+/// // Calling `remove_prev` removes and returns the middle element.
+/// assert_eq!(cursor.remove_prev().unwrap().to_key_value(), (20, 200));
+///
+/// // Calling `remove_next` from the last element returns [`None`].
+/// assert!(cursor.remove_next().is_none());
+///
+/// // Move to the first element
+/// cursor = cursor.move_prev().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&10, &100));
+///
+/// // Calling `remove_next` removes and returns the last element.
+/// assert_eq!(cursor.remove_next().unwrap().to_key_value(), (30, 300));
+///
+/// # Ok::<(), Error>(())
+///
+/// ```
+///
+/// # Invariants
+/// - `current` points to a node that is in the same [`RBTree`] as `tree`.
+pub struct Cursor<'a, K, V> {
+ tree: &'a mut RBTree<K, V>,
+ current: NonNull<bindings::rb_node>,
+}
+
+// SAFETY: The [`Cursor`] has exclusive access to both `K` and `V`, so it is sufficient to require them to be `Send`.
+// The cursor only gives out immutable references to the keys, but since it has excusive access to those same
+// keys, `Send` is sufficient. `Sync` would be okay, but it is more restrictive to the user.
+unsafe impl<'a, K: Send, V: Send> Send for Cursor<'a, K, V> {}
+
+// SAFETY: The [`Cursor`] gives out immutable references to K and mutable references to V,
+// so it has the same thread safety requirements as mutable references.
+unsafe impl<'a, K: Sync, V: Sync> Sync for Cursor<'a, K, V> {}
+
+impl<'a, K, V> Cursor<'a, K, V> {
+ /// The current node
+ pub fn current(&self) -> (&K, &V) {
+ // SAFETY:
+ // - `self.current` is a valid node by the type invariants.
+ // - We have an immutable reference by the function signature.
+ unsafe { Self::to_key_value(self.current) }
+ }
+
+ /// The current node, with a mutable value
+ pub fn current_mut(&mut self) -> (&K, &mut V) {
+ // SAFETY:
+ // - `self.current` is a valid node by the type invariants.
+ // - We have an mutable reference by the function signature.
+ unsafe { Self::to_key_value_mut(self.current) }
+ }
+
+ /// Remove the current node from the tree.
+ ///
+ /// Returns a tuple where the first element is a cursor to the next node, if it exists,
+ /// else the previous node, else [`None`] (if the tree becomes empty). The second element
+ /// is the removed node.
+ pub fn remove_current(self) -> (Option<Self>, RBTreeNode<K, V>) {
+ let prev = self.get_neighbor_raw(Direction::Prev);
+ let next = self.get_neighbor_raw(Direction::Next);
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(self.current.as_ptr(), Node<K, V>, links) }.cast_mut();
+ // SAFETY: `this` is valid by the type invariants as described above.
+ let node = unsafe { Box::from_raw(this) };
+ let node = RBTreeNode { node };
+ // SAFETY: The reference to the tree used to create the cursor outlives the cursor, so
+ // the tree cannot change. By the tree invariant, all nodes are valid.
+ unsafe { bindings::rb_erase(&mut (*this).links, addr_of_mut!(self.tree.root)) };
+
+ let current = match (prev, next) {
+ (_, Some(next)) => next,
+ (Some(prev), None) => prev,
+ (None, None) => {
+ return (None, node);
+ }
+ };
+
+ (
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self.tree`.
+ Some(Self {
+ current,
+ tree: self.tree,
+ }),
+ node,
+ )
+ }
+
+ /// Remove the previous node, returning it if it exists.
+ pub fn remove_prev(&mut self) -> Option<RBTreeNode<K, V>> {
+ self.remove_neighbor(Direction::Prev)
+ }
+
+ /// Remove the next node, returning it if it exists.
+ pub fn remove_next(&mut self) -> Option<RBTreeNode<K, V>> {
+ self.remove_neighbor(Direction::Next)
+ }
+
+ fn remove_neighbor(&mut self, direction: Direction) -> Option<RBTreeNode<K, V>> {
+ if let Some(neighbor) = self.get_neighbor_raw(direction) {
+ let neighbor = neighbor.as_ptr();
+ // SAFETY: The reference to the tree used to create the cursor outlives the cursor, so
+ // the tree cannot change. By the tree invariant, all nodes are valid.
+ unsafe { bindings::rb_erase(neighbor, addr_of_mut!(self.tree.root)) };
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(neighbor, Node<K, V>, links) }.cast_mut();
+ // SAFETY: `this` is valid by the type invariants as described above.
+ let node = unsafe { Box::from_raw(this) };
+ return Some(RBTreeNode { node });
+ }
+ None
+ }
+
+ /// Move the cursor to the previous node, returning [`None`] if it doesn't exist.
+ pub fn move_prev(self) -> Option<Self> {
+ self.mv(Direction::Prev)
+ }
+
+ /// Move the cursor to the next node, returning [`None`] if it doesn't exist.
+ pub fn move_next(self) -> Option<Self> {
+ self.mv(Direction::Next)
+ }
+
+ fn mv(self, direction: Direction) -> Option<Self> {
+ // INVARIANT:
+ // - `neighbor` is a valid node in the [`RBTree`] pointed to by `self.tree`.
+ self.get_neighbor_raw(direction).map(|neighbor| Self {
+ tree: self.tree,
+ current: neighbor,
+ })
+ }
+
+ /// Access the previous node without moving the cursor.
+ pub fn peek_prev(&self) -> Option<(&K, &V)> {
+ self.peek(Direction::Prev)
+ }
+
+ /// Access the previous node without moving the cursor.
+ pub fn peek_next(&self) -> Option<(&K, &V)> {
+ self.peek(Direction::Next)
+ }
+
+ fn peek(&self, direction: Direction) -> Option<(&K, &V)> {
+ self.get_neighbor_raw(direction).map(|neighbor| {
+ // SAFETY:
+ // - `neighbor` is a valid tree node.
+ // - By the function signature, we have an immutable reference to `self`.
+ unsafe { Self::to_key_value(neighbor) }
+ })
+ }
+
+ /// Access the previous node mutably without moving the cursor.
+ pub fn peek_prev_mut(&mut self) -> Option<(&K, &mut V)> {
+ self.peek_mut(Direction::Prev)
+ }
+
+ /// Access the next node mutably without moving the cursor.
+ pub fn peek_next_mut(&mut self) -> Option<(&K, &mut V)> {
+ self.peek_mut(Direction::Next)
+ }
+
+ fn peek_mut(&mut self, direction: Direction) -> Option<(&K, &mut V)> {
+ self.get_neighbor_raw(direction).map(|neighbor| {
+ // SAFETY:
+ // - `neighbor` is a valid tree node.
+ // - By the function signature, we have a mutable reference to `self`.
+ unsafe { Self::to_key_value_mut(neighbor) }
+ })
+ }
+
+ fn get_neighbor_raw(&self, direction: Direction) -> Option<NonNull<bindings::rb_node>> {
+ // SAFETY: `self.current` is valid by the type invariants.
+ let neighbor = unsafe {
+ match direction {
+ Direction::Prev => bindings::rb_prev(self.current.as_ptr()),
+ Direction::Next => bindings::rb_next(self.current.as_ptr()),
+ }
+ };
+
+ NonNull::new(neighbor)
+ }
+
+ /// SAFETY:
+ /// - `node` must be a valid pointer to a node in an [`RBTree`].
+ /// - The caller has immutable access to `node` for the duration of 'b.
+ unsafe fn to_key_value<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, &'b V) {
+ // SAFETY: the caller guarantees that `node` is a valid pointer in an `RBTree`.
+ let (k, v) = unsafe { Self::to_key_value_raw(node) };
+ // SAFETY: the caller guarantees immutable access to `node`.
+ (k, unsafe { &*v })
+ }
+
+ /// SAFETY:
+ /// - `node` must be a valid pointer to a node in an [`RBTree`].
+ /// - The caller has mutable access to `node` for the duration of 'b.
+ unsafe fn to_key_value_mut<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, &'b mut V) {
+ // SAFETY: the caller guarantees that `node` is a valid pointer in an `RBTree`.
+ let (k, v) = unsafe { Self::to_key_value_raw(node) };
+ // SAFETY: the caller guarantees mutable access to `node`.
+ (k, unsafe { &mut *v })
+ }
+
+ /// SAFETY:
+ /// - `node` must be a valid pointer to a node in an [`RBTree`].
+ /// - The caller has immutable access to the key for the duration of 'b.
+ unsafe fn to_key_value_raw<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, *mut V) {
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(node.as_ptr(), Node<K, V>, links) }.cast_mut();
+ // SAFETY: The passed `node` is the current node or a non-null neighbor,
+ // thus `this` is valid by the type invariants.
+ let k = unsafe { &(*this).key };
+ // SAFETY: The passed `node` is the current node or a non-null neighbor,
+ // thus `this` is valid by the type invariants.
+ let v = unsafe { addr_of_mut!((*this).value) };
+ (k, v)
+ }
+}
+
+/// Direction for [`Cursor`] operations.
+enum Direction {
+ /// the node immediately before, in sort order
+ Prev,
+ /// the node immediately after, in sort order
+ Next,
+}
+
+impl<'a, K, V> IntoIterator for &'a RBTree<K, V> {
+ type Item = (&'a K, &'a V);
+ type IntoIter = Iter<'a, K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+/// An iterator over the nodes of a [`RBTree`].
+///
+/// Instances are created by calling [`RBTree::iter`].
+pub struct Iter<'a, K, V> {
+ _tree: PhantomData<&'a RBTree<K, V>>,
+ iter_raw: IterRaw<K, V>,
+}
+
+// SAFETY: The [`Iter`] gives out immutable references to K and V, so it has the same
+// thread safety requirements as immutable references.
+unsafe impl<'a, K: Sync, V: Sync> Send for Iter<'a, K, V> {}
+
+// SAFETY: The [`Iter`] gives out immutable references to K and V, so it has the same
+// thread safety requirements as immutable references.
+unsafe impl<'a, K: Sync, V: Sync> Sync for Iter<'a, K, V> {}
+
+impl<'a, K, V> Iterator for Iter<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ // SAFETY: Due to `self._tree`, `k` and `v` are valid for the lifetime of `'a`.
+ self.iter_raw.next().map(|(k, v)| unsafe { (&*k, &*v) })
+ }
+}
+
+impl<'a, K, V> IntoIterator for &'a mut RBTree<K, V> {
+ type Item = (&'a K, &'a mut V);
+ type IntoIter = IterMut<'a, K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+/// A mutable iterator over the nodes of a [`RBTree`].
+///
+/// Instances are created by calling [`RBTree::iter_mut`].
+pub struct IterMut<'a, K, V> {
+ _tree: PhantomData<&'a mut RBTree<K, V>>,
+ iter_raw: IterRaw<K, V>,
+}
+
+// SAFETY: The [`IterMut`] has exclusive access to both `K` and `V`, so it is sufficient to require them to be `Send`.
+// The iterator only gives out immutable references to the keys, but since the iterator has excusive access to those same
+// keys, `Send` is sufficient. `Sync` would be okay, but it is more restrictive to the user.
+unsafe impl<'a, K: Send, V: Send> Send for IterMut<'a, K, V> {}
+
+// SAFETY: The [`IterMut`] gives out immutable references to K and mutable references to V, so it has the same
+// thread safety requirements as mutable references.
+unsafe impl<'a, K: Sync, V: Sync> Sync for IterMut<'a, K, V> {}
+
+impl<'a, K, V> Iterator for IterMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter_raw.next().map(|(k, v)|
+ // SAFETY: Due to `&mut self`, we have exclusive access to `k` and `v`, for the lifetime of `'a`.
+ unsafe { (&*k, &mut *v) })
+ }
+}
+
+/// A raw iterator over the nodes of a [`RBTree`].
+///
+/// # Invariants
+/// - `self.next` is a valid pointer.
+/// - `self.next` points to a node stored inside of a valid `RBTree`.
+struct IterRaw<K, V> {
+ next: *mut bindings::rb_node,
+ _phantom: PhantomData<fn() -> (K, V)>,
+}
+
+impl<K, V> Iterator for IterRaw<K, V> {
+ type Item = (*mut K, *mut V);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.next.is_null() {
+ return None;
+ }
+
+ // SAFETY: By the type invariant of `IterRaw`, `self.next` is a valid node in an `RBTree`,
+ // and by the type invariant of `RBTree`, all nodes point to the links field of `Node<K, V>` objects.
+ let cur = unsafe { container_of!(self.next, Node<K, V>, links) }.cast_mut();
+
+ // SAFETY: `self.next` is a valid tree node by the type invariants.
+ self.next = unsafe { bindings::rb_next(self.next) };
+
+ // SAFETY: By the same reasoning above, it is safe to dereference the node.
+ Some(unsafe { (addr_of_mut!((*cur).key), addr_of_mut!((*cur).value)) })
+ }
+}
+
+/// A memory reservation for a red-black tree node.
+///
+///
+/// It contains the memory needed to hold a node that can be inserted into a red-black tree. One
+/// can be obtained by directly allocating it ([`RBTreeNodeReservation::new`]).
+pub struct RBTreeNodeReservation<K, V> {
+ node: Box<MaybeUninit<Node<K, V>>>,
+}
+
+impl<K, V> RBTreeNodeReservation<K, V> {
+ /// Allocates memory for a node to be eventually initialised and inserted into the tree via a
+ /// call to [`RBTree::insert`].
+ pub fn new(flags: Flags) -> Result<RBTreeNodeReservation<K, V>> {
+ Ok(RBTreeNodeReservation {
+ node: <Box<_> as BoxExt<_>>::new_uninit(flags)?,
+ })
+ }
+}
+
+// SAFETY: This doesn't actually contain K or V, and is just a memory allocation. Those can always
+// be moved across threads.
+unsafe impl<K, V> Send for RBTreeNodeReservation<K, V> {}
+
+// SAFETY: This doesn't actually contain K or V, and is just a memory allocation.
+unsafe impl<K, V> Sync for RBTreeNodeReservation<K, V> {}
+
+impl<K, V> RBTreeNodeReservation<K, V> {
+ /// Initialises a node reservation.
+ ///
+ /// It then becomes an [`RBTreeNode`] that can be inserted into a tree.
+ pub fn into_node(mut self, key: K, value: V) -> RBTreeNode<K, V> {
+ self.node.write(Node {
+ key,
+ value,
+ links: bindings::rb_node::default(),
+ });
+ // SAFETY: We just wrote to it.
+ let node = unsafe { self.node.assume_init() };
+ RBTreeNode { node }
+ }
+}
+
+/// A red-black tree node.
+///
+/// The node is fully initialised (with key and value) and can be inserted into a tree without any
+/// extra allocations or failure paths.
+pub struct RBTreeNode<K, V> {
+ node: Box<Node<K, V>>,
+}
+
+impl<K, V> RBTreeNode<K, V> {
+ /// Allocates and initialises a node that can be inserted into the tree via
+ /// [`RBTree::insert`].
+ pub fn new(key: K, value: V, flags: Flags) -> Result<RBTreeNode<K, V>> {
+ Ok(RBTreeNodeReservation::new(flags)?.into_node(key, value))
+ }
+
+ /// Get the key and value from inside the node.
+ pub fn to_key_value(self) -> (K, V) {
+ (self.node.key, self.node.value)
+ }
+}
+
+// SAFETY: If K and V can be sent across threads, then it's also okay to send [`RBTreeNode`] across
+// threads.
+unsafe impl<K: Send, V: Send> Send for RBTreeNode<K, V> {}
+
+// SAFETY: If K and V can be accessed without synchronization, then it's also okay to access
+// [`RBTreeNode`] without synchronization.
+unsafe impl<K: Sync, V: Sync> Sync for RBTreeNode<K, V> {}
+
+impl<K, V> RBTreeNode<K, V> {
+ /// Drop the key and value, but keep the allocation.
+ ///
+ /// It then becomes a reservation that can be re-initialised into a different node (i.e., with
+ /// a different key and/or value).
+ ///
+ /// The existing key and value are dropped in-place as part of this operation, that is, memory
+ /// may be freed (but only for the key/value; memory for the node itself is kept for reuse).
+ pub fn into_reservation(self) -> RBTreeNodeReservation<K, V> {
+ RBTreeNodeReservation {
+ node: Box::drop_contents(self.node),
+ }
+ }
+}
+
+/// A view into a single entry in a map, which may either be vacant or occupied.
+///
+/// This enum is constructed from the [`RBTree::entry`].
+///
+/// [`entry`]: fn@RBTree::entry
+pub enum Entry<'a, K, V> {
+ /// This [`RBTree`] does not have a node with this key.
+ Vacant(VacantEntry<'a, K, V>),
+ /// This [`RBTree`] already has a node with this key.
+ Occupied(OccupiedEntry<'a, K, V>),
+}
+
+/// Like [`Entry`], except that it doesn't have ownership of the key.
+enum RawEntry<'a, K, V> {
+ Vacant(RawVacantEntry<'a, K, V>),
+ Occupied(OccupiedEntry<'a, K, V>),
+}
+
+/// A view into a vacant entry in a [`RBTree`]. It is part of the [`Entry`] enum.
+pub struct VacantEntry<'a, K, V> {
+ key: K,
+ raw: RawVacantEntry<'a, K, V>,
+}
+
+/// Like [`VacantEntry`], but doesn't hold on to the key.
+///
+/// # Invariants
+/// - `parent` may be null if the new node becomes the root.
+/// - `child_field_of_parent` is a valid pointer to the left-child or right-child of `parent`. If `parent` is
+/// null, it is a pointer to the root of the [`RBTree`].
+struct RawVacantEntry<'a, K, V> {
+ rbtree: *mut RBTree<K, V>,
+ /// The node that will become the parent of the new node if we insert one.
+ parent: *mut bindings::rb_node,
+ /// This points to the left-child or right-child field of `parent`, or `root` if `parent` is
+ /// null.
+ child_field_of_parent: *mut *mut bindings::rb_node,
+ _phantom: PhantomData<&'a mut RBTree<K, V>>,
+}
+
+impl<'a, K, V> RawVacantEntry<'a, K, V> {
+ /// Inserts the given node into the [`RBTree`] at this entry.
+ ///
+ /// The `node` must have a key such that inserting it here does not break the ordering of this
+ /// [`RBTree`].
+ fn insert(self, node: RBTreeNode<K, V>) -> &'a mut V {
+ let node = Box::into_raw(node.node);
+
+ // SAFETY: `node` is valid at least until we call `Box::from_raw`, which only happens when
+ // the node is removed or replaced.
+ let node_links = unsafe { addr_of_mut!((*node).links) };
+
+ // INVARIANT: We are linking in a new node, which is valid. It remains valid because we
+ // "forgot" it with `Box::into_raw`.
+ // SAFETY: The type invariants of `RawVacantEntry` are exactly the safety requirements of `rb_link_node`.
+ unsafe { bindings::rb_link_node(node_links, self.parent, self.child_field_of_parent) };
+
+ // SAFETY: All pointers are valid. `node` has just been inserted into the tree.
+ unsafe { bindings::rb_insert_color(node_links, addr_of_mut!((*self.rbtree).root)) };
+
+ // SAFETY: The node is valid until we remove it from the tree.
+ unsafe { &mut (*node).value }
+ }
+}
+
+impl<'a, K, V> VacantEntry<'a, K, V> {
+ /// Inserts the given node into the [`RBTree`] at this entry.
+ pub fn insert(self, value: V, reservation: RBTreeNodeReservation<K, V>) -> &'a mut V {
+ self.raw.insert(reservation.into_node(self.key, value))
+ }
+}
+
+/// A view into an occupied entry in a [`RBTree`]. It is part of the [`Entry`] enum.
+///
+/// # Invariants
+/// - `node_links` is a valid, non-null pointer to a tree node in `self.rbtree`
+pub struct OccupiedEntry<'a, K, V> {
+ rbtree: &'a mut RBTree<K, V>,
+ /// The node that this entry corresponds to.
+ node_links: *mut bindings::rb_node,
+}
+
+impl<'a, K, V> OccupiedEntry<'a, K, V> {
+ /// Gets a reference to the value in the entry.
+ pub fn get(&self) -> &V {
+ // SAFETY:
+ // - `self.node_links` is a valid pointer to a node in the tree.
+ // - We have shared access to the underlying tree, and can thus give out a shared reference.
+ unsafe { &(*container_of!(self.node_links, Node<K, V>, links)).value }
+ }
+
+ /// Gets a mutable reference to the value in the entry.
+ pub fn get_mut(&mut self) -> &mut V {
+ // SAFETY:
+ // - `self.node_links` is a valid pointer to a node in the tree.
+ // - We have exclusive access to the underlying tree, and can thus give out a mutable reference.
+ unsafe { &mut (*(container_of!(self.node_links, Node<K, V>, links).cast_mut())).value }
+ }
+
+ /// Converts the entry into a mutable reference to its value.
+ ///
+ /// If you need multiple references to the `OccupiedEntry`, see [`self#get_mut`].
+ pub fn into_mut(self) -> &'a mut V {
+ // SAFETY:
+ // - `self.node_links` is a valid pointer to a node in the tree.
+ // - This consumes the `&'a mut RBTree<K, V>`, therefore it can give out a mutable reference that lives for `'a`.
+ unsafe { &mut (*(container_of!(self.node_links, Node<K, V>, links).cast_mut())).value }
+ }
+
+ /// Remove this entry from the [`RBTree`].
+ pub fn remove_node(self) -> RBTreeNode<K, V> {
+ // SAFETY: The node is a node in the tree, so it is valid.
+ unsafe { bindings::rb_erase(self.node_links, &mut self.rbtree.root) };
+
+ // INVARIANT: The node is being returned and the caller may free it, however, it was
+ // removed from the tree. So the invariants still hold.
+ RBTreeNode {
+ // SAFETY: The node was a node in the tree, but we removed it, so we can convert it
+ // back into a box.
+ node: unsafe {
+ Box::from_raw(container_of!(self.node_links, Node<K, V>, links).cast_mut())
+ },
+ }
+ }
+
+ /// Takes the value of the entry out of the map, and returns it.
+ pub fn remove(self) -> V {
+ self.remove_node().node.value
+ }
+
+ /// Swap the current node for the provided node.
+ ///
+ /// The key of both nodes must be equal.
+ fn replace(self, node: RBTreeNode<K, V>) -> RBTreeNode<K, V> {
+ let node = Box::into_raw(node.node);
+
+ // SAFETY: `node` is valid at least until we call `Box::from_raw`, which only happens when
+ // the node is removed or replaced.
+ let new_node_links = unsafe { addr_of_mut!((*node).links) };
+
+ // SAFETY: This updates the pointers so that `new_node_links` is in the tree where
+ // `self.node_links` used to be.
+ unsafe {
+ bindings::rb_replace_node(self.node_links, new_node_links, &mut self.rbtree.root)
+ };
+
+ // SAFETY:
+ // - `self.node_ptr` produces a valid pointer to a node in the tree.
+ // - Now that we removed this entry from the tree, we can convert the node to a box.
+ let old_node =
+ unsafe { Box::from_raw(container_of!(self.node_links, Node<K, V>, links).cast_mut()) };
+
+ RBTreeNode { node: old_node }
+ }
+}
+
+struct Node<K, V> {
+ links: bindings::rb_node,
+ key: K,
+ value: V,
+}
diff --git a/rust/kernel/std_vendor.rs b/rust/kernel/std_vendor.rs
index 39679a960c1a..67bf9d37ddb5 100644
--- a/rust/kernel/std_vendor.rs
+++ b/rust/kernel/std_vendor.rs
@@ -136,7 +136,7 @@
///
/// [`std::dbg`]: https://doc.rust-lang.org/std/macro.dbg.html
/// [`eprintln`]: https://doc.rust-lang.org/std/macro.eprintln.html
-/// [`printk`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html
+/// [`printk`]: https://docs.kernel.org/core-api/printk-basics.html
/// [`pr_info`]: crate::pr_info!
/// [`pr_debug`]: crate::pr_debug!
#[macro_export]
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index 3673496c2363..3021f30fd822 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -12,12 +12,13 @@
//! 2. It does not support weak references, which allows it to be half the size.
//! 3. It saturates the reference count instead of aborting when it goes over a threshold.
//! 4. It does not provide a `get_mut` method, so the ref counted object is pinned.
+//! 5. The object in [`Arc`] is pinned implicitly.
//!
//! [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html
use crate::{
alloc::{box_ext::BoxExt, AllocError, Flags},
- error::{self, Error},
+ bindings,
init::{self, InPlaceInit, Init, PinInit},
try_init,
types::{ForeignOwnable, Opaque},
@@ -209,28 +210,6 @@ impl<T> Arc<T> {
// `Arc` object.
Ok(unsafe { Self::from_inner(Box::leak(inner).into()) })
}
-
- /// Use the given initializer to in-place initialize a `T`.
- ///
- /// If `T: !Unpin` it will not be able to move afterwards.
- #[inline]
- pub fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> error::Result<Self>
- where
- Error: From<E>,
- {
- UniqueArc::pin_init(init, flags).map(|u| u.into())
- }
-
- /// Use the given initializer to in-place initialize a `T`.
- ///
- /// This is equivalent to [`Arc<T>::pin_init`], since an [`Arc`] is always pinned.
- #[inline]
- pub fn init<E>(init: impl Init<T, E>, flags: Flags) -> error::Result<Self>
- where
- Error: From<E>,
- {
- UniqueArc::init(init, flags).map(|u| u.into())
- }
}
impl<T: ?Sized> Arc<T> {
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index bd189d646adb..9e7ca066355c 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -7,8 +7,9 @@ use alloc::boxed::Box;
use core::{
cell::UnsafeCell,
marker::{PhantomData, PhantomPinned},
- mem::MaybeUninit,
+ mem::{ManuallyDrop, MaybeUninit},
ops::{Deref, DerefMut},
+ pin::Pin,
ptr::NonNull,
};
@@ -26,7 +27,10 @@ pub trait ForeignOwnable: Sized {
/// Converts a Rust-owned object to a foreign-owned one.
///
- /// The foreign representation is a pointer to void.
+ /// The foreign representation is a pointer to void. There are no guarantees for this pointer.
+ /// For example, it might be invalid, dangling or pointing to uninitialized memory. Using it in
+ /// any way except for [`ForeignOwnable::from_foreign`], [`ForeignOwnable::borrow`],
+ /// [`ForeignOwnable::try_from_foreign`] can result in undefined behavior.
fn into_foreign(self) -> *const core::ffi::c_void;
/// Borrows a foreign-owned object.
@@ -89,6 +93,32 @@ impl<T: 'static> ForeignOwnable for Box<T> {
}
}
+impl<T: 'static> ForeignOwnable for Pin<Box<T>> {
+ type Borrowed<'a> = Pin<&'a T>;
+
+ fn into_foreign(self) -> *const core::ffi::c_void {
+ // SAFETY: We are still treating the box as pinned.
+ Box::into_raw(unsafe { Pin::into_inner_unchecked(self) }) as _
+ }
+
+ unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Pin<&'a T> {
+ // SAFETY: The safety requirements for this function ensure that the object is still alive,
+ // so it is safe to dereference the raw pointer.
+ // The safety requirements of `from_foreign` also ensure that the object remains alive for
+ // the lifetime of the returned value.
+ let r = unsafe { &*ptr.cast() };
+
+ // SAFETY: This pointer originates from a `Pin<Box<T>>`.
+ unsafe { Pin::new_unchecked(r) }
+ }
+
+ unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self {
+ // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
+ // call to `Self::into_foreign`.
+ unsafe { Pin::new_unchecked(Box::from_raw(ptr as _)) }
+ }
+}
+
impl ForeignOwnable for () {
type Borrowed<'a> = ();
@@ -366,6 +396,35 @@ impl<T: AlwaysRefCounted> ARef<T> {
_p: PhantomData,
}
}
+
+ /// Consumes the `ARef`, returning a raw pointer.
+ ///
+ /// This function does not change the refcount. After calling this function, the caller is
+ /// responsible for the refcount previously managed by the `ARef`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use core::ptr::NonNull;
+ /// use kernel::types::{ARef, AlwaysRefCounted};
+ ///
+ /// struct Empty {}
+ ///
+ /// unsafe impl AlwaysRefCounted for Empty {
+ /// fn inc_ref(&self) {}
+ /// unsafe fn dec_ref(_obj: NonNull<Self>) {}
+ /// }
+ ///
+ /// let mut data = Empty {};
+ /// let ptr = NonNull::<Empty>::new(&mut data as *mut _).unwrap();
+ /// let data_ref: ARef<Empty> = unsafe { ARef::from_raw(ptr) };
+ /// let raw_ptr: NonNull<Empty> = ARef::into_raw(data_ref);
+ ///
+ /// assert_eq!(ptr, raw_ptr);
+ /// ```
+ pub fn into_raw(me: Self) -> NonNull<T> {
+ ManuallyDrop::new(me).ptr
+ }
}
impl<T: AlwaysRefCounted> Clone for ARef<T> {
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
index 5be0cb9db3ee..a626b1145e5c 100644
--- a/rust/macros/lib.rs
+++ b/rust/macros/lib.rs
@@ -2,6 +2,10 @@
//! Crate for all kernel procedural macros.
+// When fixdep scans this, it will find this string `CONFIG_RUSTC_VERSION_TEXT`
+// and thus add a dependency on `include/config/RUSTC_VERSION_TEXT`, which is
+// touched by Kconfig when the version string from the compiler changes.
+
#[macro_use]
mod quote;
mod concat_idents;
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index 7a5b899e47b7..aef3b132f32b 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -262,6 +262,12 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
#[cfg(MODULE)]
#[doc(hidden)]
+ #[used]
+ #[link_section = \".init.data\"]
+ static __UNIQUE_ID___addressable_init_module: unsafe extern \"C\" fn() -> i32 = init_module;
+
+ #[cfg(MODULE)]
+ #[doc(hidden)]
#[no_mangle]
pub extern \"C\" fn cleanup_module() {{
// SAFETY:
@@ -273,6 +279,12 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
unsafe {{ __exit() }}
}}
+ #[cfg(MODULE)]
+ #[doc(hidden)]
+ #[used]
+ #[link_section = \".exit.data\"]
+ static __UNIQUE_ID___addressable_cleanup_module: extern \"C\" fn() = cleanup_module;
+
// Built-in modules are initialized through an initcall pointer
// and the identifiers need to be unique.
#[cfg(not(MODULE))]
diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c
index b382c696c877..59eefe2fed10 100644
--- a/samples/vfio-mdev/mtty.c
+++ b/samples/vfio-mdev/mtty.c
@@ -927,7 +927,6 @@ static const struct file_operations mtty_save_fops = {
.unlocked_ioctl = mtty_precopy_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.release = mtty_release_migf,
- .llseek = no_llseek,
};
static void mtty_save_state(struct mdev_state *mdev_state)
@@ -1082,7 +1081,6 @@ static const struct file_operations mtty_resume_fops = {
.owner = THIS_MODULE,
.write = mtty_resume_write,
.release = mtty_release_migf,
- .llseek = no_llseek,
};
static struct mtty_migration_file *
diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
index 3500a3d62f0d..785a491e5996 100644
--- a/scripts/Kconfig.include
+++ b/scripts/Kconfig.include
@@ -64,3 +64,11 @@ ld-version := $(shell,set -- $(ld-info) && echo $2)
cc-option-bit = $(if-success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null,$(1))
m32-flag := $(cc-option-bit,-m32)
m64-flag := $(cc-option-bit,-m64)
+
+# $(rustc-option,<flag>)
+# Return y if the Rust compiler supports <flag>, n otherwise
+# Calls to this should be guarded so that they are not evaluated if
+# CONFIG_RUST_IS_AVAILABLE is not set.
+# If you are testing for unstable features, consider testing RUSTC_VERSION
+# instead, as features may have different completeness while available.
+rustc-option = $(success,trap "rm -rf .tmp_$$" EXIT; mkdir .tmp_$$; $(RUSTC) $(1) --crate-type=rlib /dev/null --out-dir=.tmp_$$ -o .tmp_$$/tmp.rlib)
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 8403eba15457..8f423a1faf50 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -273,10 +273,15 @@ rust_common_cmd = \
# would not match each other.
quiet_cmd_rustc_o_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
- cmd_rustc_o_rs = $(rust_common_cmd) --emit=obj=$@ $<
+ cmd_rustc_o_rs = $(rust_common_cmd) --emit=obj=$@ $< $(cmd_objtool)
+
+define rule_rustc_o_rs
+ $(call cmd_and_fixdep,rustc_o_rs)
+ $(call cmd,gen_objtooldep)
+endef
$(obj)/%.o: $(obj)/%.rs FORCE
- +$(call if_changed_dep,rustc_o_rs)
+ +$(call if_changed_rule,rustc_o_rs)
quiet_cmd_rustc_rsi_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
cmd_rustc_rsi_rs = \
diff --git a/scripts/Makefile.compiler b/scripts/Makefile.compiler
index 92be0c9a13ee..057305eae85c 100644
--- a/scripts/Makefile.compiler
+++ b/scripts/Makefile.compiler
@@ -72,3 +72,18 @@ clang-min-version = $(call test-ge, $(CONFIG_CLANG_VERSION), $1)
# ld-option
# Usage: KBUILD_LDFLAGS += $(call ld-option, -X, -Y)
ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3))
+
+# __rustc-option
+# Usage: MY_RUSTFLAGS += $(call __rustc-option,$(RUSTC),$(MY_RUSTFLAGS),-Cinstrument-coverage,-Zinstrument-coverage)
+__rustc-option = $(call try-run,\
+ $(1) $(2) $(3) --crate-type=rlib /dev/null --out-dir=$$TMPOUT -o "$$TMP",$(3),$(4))
+
+# rustc-option
+# Usage: rustflags-y += $(call rustc-option,-Cinstrument-coverage,-Zinstrument-coverage)
+rustc-option = $(call __rustc-option, $(RUSTC),\
+ $(KBUILD_RUSTFLAGS),$(1),$(2))
+
+# rustc-option-yn
+# Usage: flag := $(call rustc-option-yn,-Cinstrument-coverage)
+rustc-option-yn = $(call try-run,\
+ $(RUSTC) $(KBUILD_RUSTFLAGS) $(1) --crate-type=rlib /dev/null --out-dir=$$TMPOUT -o "$$TMP",y,n)
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
index aab4154af00a..693dbbebebba 100644
--- a/scripts/Makefile.kasan
+++ b/scripts/Makefile.kasan
@@ -12,6 +12,11 @@ endif
KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
+rustc-param = $(call rustc-option, -Cllvm-args=-$(1),)
+
+check-args = $(foreach arg,$(2),$(call $(1),$(arg)))
+
+kasan_params :=
ifdef CONFIG_KASAN_STACK
stack_enable := 1
@@ -41,39 +46,59 @@ CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \
$(call cc-option, -fsanitize=kernel-address \
-mllvm -asan-mapping-offset=$(KASAN_SHADOW_OFFSET)))
-# Now, add other parameters enabled similarly in both GCC and Clang.
-# As some of them are not supported by older compilers, use cc-param.
-CFLAGS_KASAN += $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
- $(call cc-param,asan-stack=$(stack_enable)) \
- $(call cc-param,asan-instrument-allocas=1) \
- $(call cc-param,asan-globals=1)
+# The minimum supported `rustc` version has a minimum supported LLVM
+# version late enough that we can assume support for -asan-mapping-offset.
+RUSTFLAGS_KASAN := -Zsanitizer=kernel-address \
+ -Zsanitizer-recover=kernel-address \
+ -Cllvm-args=-asan-mapping-offset=$(KASAN_SHADOW_OFFSET)
+
+# Now, add other parameters enabled similarly in GCC, Clang, and rustc.
+# As some of them are not supported by older compilers, these will be filtered
+# through `cc-param` or `rust-param` as applicable.
+kasan_params += asan-instrumentation-with-call-threshold=$(call_threshold) \
+ asan-stack=$(stack_enable) \
+ asan-instrument-allocas=1 \
+ asan-globals=1
# Instrument memcpy/memset/memmove calls by using instrumented __asan_mem*()
# instead. With compilers that don't support this option, compiler-inserted
# memintrinsics won't be checked by KASAN on GENERIC_ENTRY architectures.
-CFLAGS_KASAN += $(call cc-param,asan-kernel-mem-intrinsic-prefix=1)
+kasan_params += asan-kernel-mem-intrinsic-prefix=1
endif # CONFIG_KASAN_GENERIC
ifdef CONFIG_KASAN_SW_TAGS
+CFLAGS_KASAN := -fsanitize=kernel-hwaddress
+
+# This sets flags that will enable SW_TAGS KASAN once enabled in Rust. These
+# will not work today, and is guarded against in dependencies for CONFIG_RUST.
+RUSTFLAGS_KASAN := -Zsanitizer=kernel-hwaddress \
+ -Zsanitizer-recover=kernel-hwaddress
+
ifdef CONFIG_KASAN_INLINE
- instrumentation_flags := $(call cc-param,hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET))
+ kasan_params += hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET)
else
- instrumentation_flags := $(call cc-param,hwasan-instrument-with-calls=1)
+ kasan_params += hwasan-instrument-with-calls=1
endif
-CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
- $(call cc-param,hwasan-instrument-stack=$(stack_enable)) \
- $(call cc-param,hwasan-use-short-granules=0) \
- $(call cc-param,hwasan-inline-all-checks=0) \
- $(instrumentation_flags)
+kasan_params += hwasan-instrument-stack=$(stack_enable) \
+ hwasan-use-short-granules=0 \
+ hwasan-inline-all-checks=0
# Instrument memcpy/memset/memmove calls by using instrumented __hwasan_mem*().
ifeq ($(call clang-min-version, 150000)$(call gcc-min-version, 130000),y)
- CFLAGS_KASAN += $(call cc-param,hwasan-kernel-mem-intrinsic-prefix=1)
+ kasan_params += hwasan-kernel-mem-intrinsic-prefix=1
endif
endif # CONFIG_KASAN_SW_TAGS
-export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE
+# Add all as-supported KASAN LLVM parameters requested by the configuration.
+CFLAGS_KASAN += $(call check-args, cc-param, $(kasan_params))
+
+ifdef CONFIG_RUST
+ # Avoid calling `rustc-param` unless Rust is enabled.
+ RUSTFLAGS_KASAN += $(call check-args, rustc-param, $(kasan_params))
+endif # CONFIG_RUST
+
+export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE RUSTFLAGS_KASAN
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 29bfd6ed3e3f..01a9f567d5af 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -146,6 +146,9 @@ ifneq ($(CONFIG_KASAN_HW_TAGS),y)
_c_flags += $(if $(patsubst n%,, \
$(KASAN_SANITIZE_$(target-stem).o)$(KASAN_SANITIZE)$(is-kernel-object)), \
$(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE))
+_rust_flags += $(if $(patsubst n%,, \
+ $(KASAN_SANITIZE_$(target-stem).o)$(KASAN_SANITIZE)$(is-kernel-object)), \
+ $(RUSTFLAGS_KASAN))
endif
endif
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index 4d81ed9af294..d97720943189 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -53,9 +53,11 @@ $(foreach x, % :, $(if $(findstring $x, $(dst)), \
$(error module installation path cannot contain '$x')))
suffix-y :=
+ifdef CONFIG_MODULE_COMPRESS_ALL
suffix-$(CONFIG_MODULE_COMPRESS_GZIP) := .gz
suffix-$(CONFIG_MODULE_COMPRESS_XZ) := .xz
suffix-$(CONFIG_MODULE_COMPRESS_ZSTD) := .zst
+endif
modules := $(patsubst $(extmod_prefix)%.o, $(dst)/%.ko$(suffix-y), $(modules))
install-$(CONFIG_MODULES) += $(modules)
diff --git a/scripts/coccinelle/api/stream_open.cocci b/scripts/coccinelle/api/stream_open.cocci
index df00d6619b06..50ab60c81f13 100644
--- a/scripts/coccinelle/api/stream_open.cocci
+++ b/scripts/coccinelle/api/stream_open.cocci
@@ -131,7 +131,6 @@ identifier llseek_f;
identifier fops0.fops;
@@
struct file_operations fops = {
- .llseek = no_llseek,
};
@ has_noop_llseek @
diff --git a/scripts/coccinelle/api/string_choices.cocci b/scripts/coccinelle/api/string_choices.cocci
index 5e729f187f22..375045086912 100644
--- a/scripts/coccinelle/api/string_choices.cocci
+++ b/scripts/coccinelle/api/string_choices.cocci
@@ -14,23 +14,18 @@ expression E;
- ((E == 1) ? "" : "s")
+ str_plural(E)
|
-- ((E != 1) ? "s" : "")
-+ str_plural(E)
-|
- ((E > 1) ? "s" : "")
+ str_plural(E)
)
-@str_plural_r depends on !patch exists@
+@str_plural_r depends on !patch@
expression E;
position P;
@@
(
-* ((E@P == 1) ? "" : "s")
-|
-* ((E@P != 1) ? "s" : "")
+* (E@P == 1) ? "" : "s"
|
-* ((E@P > 1) ? "s" : "")
+* (E@P > 1) ? "s" : ""
)
@script:python depends on report@
@@ -40,21 +35,17 @@ e << str_plural_r.E;
coccilib.report.print_report(p[0], "opportunity for str_plural(%s)" % e)
-@str_up_down depends on patch@
+@str_up_down depends on patch disable neg_if_exp@
expression E;
@@
-(
- ((E) ? "up" : "down")
+ str_up_down(E)
-)
-@str_up_down_r depends on !patch exists@
+@str_up_down_r depends on !patch disable neg_if_exp@
expression E;
position P;
@@
-(
-* ((E@P) ? "up" : "down")
-)
+* E@P ? "up" : "down"
@script:python depends on report@
p << str_up_down_r.P;
@@ -63,21 +54,17 @@ e << str_up_down_r.E;
coccilib.report.print_report(p[0], "opportunity for str_up_down(%s)" % e)
-@str_down_up depends on patch@
+@str_down_up depends on patch disable neg_if_exp@
expression E;
@@
-(
- ((E) ? "down" : "up")
+ str_down_up(E)
-)
-@str_down_up_r depends on !patch exists@
+@str_down_up_r depends on !patch disable neg_if_exp@
expression E;
position P;
@@
-(
-* ((E@P) ? "down" : "up")
-)
+* E@P ? "down" : "up"
@script:python depends on report@
p << str_down_up_r.P;
@@ -85,3 +72,231 @@ e << str_down_up_r.E;
@@
coccilib.report.print_report(p[0], "opportunity for str_down_up(%s)" % e)
+
+@str_true_false depends on patch disable neg_if_exp@
+expression E;
+@@
+- ((E) ? "true" : "false")
++ str_true_false(E)
+
+@str_true_false_r depends on !patch disable neg_if_exp@
+expression E;
+position P;
+@@
+* E@P ? "true" : "false"
+
+@script:python depends on report@
+p << str_true_false_r.P;
+e << str_true_false_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_true_false(%s)" % e)
+
+@str_false_true depends on patch disable neg_if_exp@
+expression E;
+@@
+- ((E) ? "false" : "true")
++ str_false_true(E)
+
+@str_false_true_r depends on !patch disable neg_if_exp@
+expression E;
+position P;
+@@
+* E@P ? "false" : "true"
+
+@script:python depends on report@
+p << str_false_true_r.P;
+e << str_false_true_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_false_true(%s)" % e)
+
+@str_hi_lo depends on patch disable neg_if_exp@
+expression E;
+@@
+- ((E) ? "hi" : "lo")
++ str_hi_lo(E)
+
+@str_hi_lo_r depends on !patch disable neg_if_exp@
+expression E;
+position P;
+@@
+* E@P ? "hi" : "lo"
+
+@script:python depends on report@
+p << str_hi_lo_r.P;
+e << str_hi_lo_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_hi_lo(%s)" % e)
+
+@str_high_low depends on patch disable neg_if_exp@
+expression E;
+@@
+- ((E) ? "high" : "low")
++ str_high_low(E)
+
+@str_high_low_r depends on !patch disable neg_if_exp@
+expression E;
+position P;
+@@
+* E@P ? "high" : "low"
+
+@script:python depends on report@
+p << str_high_low_r.P;
+e << str_high_low_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_high_low(%s)" % e)
+
+@str_lo_hi depends on patch disable neg_if_exp@
+expression E;
+@@
+- ((E) ? "lo" : "hi")
++ str_lo_hi(E)
+
+@str_lo_hi_r depends on !patch disable neg_if_exp@
+expression E;
+position P;
+@@
+* E@P ? "lo" : "hi"
+
+@script:python depends on report@
+p << str_lo_hi_r.P;
+e << str_lo_hi_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_lo_hi(%s)" % e)
+
+@str_low_high depends on patch disable neg_if_exp@
+expression E;
+@@
+- ((E) ? "low" : "high")
++ str_low_high(E)
+
+@str_low_high_r depends on !patch disable neg_if_exp@
+expression E;
+position P;
+@@
+* E@P ? "low" : "high"
+
+@script:python depends on report@
+p << str_low_high_r.P;
+e << str_low_high_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_low_high(%s)" % e)
+
+@str_enable_disable depends on patch@
+expression E;
+@@
+- ((E) ? "enable" : "disable")
++ str_enable_disable(E)
+
+@str_enable_disable_r depends on !patch@
+expression E;
+position P;
+@@
+* E@P ? "enable" : "disable"
+
+@script:python depends on report@
+p << str_enable_disable_r.P;
+e << str_enable_disable_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_enable_disable(%s)" % e)
+
+@str_enabled_disabled depends on patch@
+expression E;
+@@
+- ((E) ? "enabled" : "disabled")
++ str_enabled_disabled(E)
+
+@str_enabled_disabled_r depends on !patch@
+expression E;
+position P;
+@@
+* E@P ? "enabled" : "disabled"
+
+@script:python depends on report@
+p << str_enabled_disabled_r.P;
+e << str_enabled_disabled_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_enabled_disabled(%s)" % e)
+
+@str_read_write depends on patch disable neg_if_exp@
+expression E;
+@@
+- ((E) ? "read" : "write")
++ str_read_write(E)
+
+@str_read_write_r depends on !patch disable neg_if_exp@
+expression E;
+position P;
+@@
+* E@P ? "read" : "write"
+
+@script:python depends on report@
+p << str_read_write_r.P;
+e << str_read_write_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_read_write(%s)" % e)
+
+@str_write_read depends on patch disable neg_if_exp@
+expression E;
+@@
+- ((E) ? "write" : "read")
++ str_write_read(E)
+
+@str_write_read_r depends on !patch disable neg_if_exp@
+expression E;
+position P;
+@@
+* E@P ? "write" : "read"
+
+@script:python depends on report@
+p << str_write_read_r.P;
+e << str_write_read_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_write_read(%s)" % e)
+
+@str_on_off depends on patch@
+expression E;
+@@
+- ((E) ? "on" : "off")
++ str_on_off(E)
+
+@str_on_off_r depends on !patch@
+expression E;
+position P;
+@@
+* E@P ? "on" : "off"
+
+@script:python depends on report@
+p << str_on_off_r.P;
+e << str_on_off_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_on_off(%s)" % e)
+
+@str_yes_no depends on patch@
+expression E;
+@@
+- ((E) ? "yes" : "no")
++ str_yes_no(E)
+
+@str_yes_no_r depends on !patch@
+expression E;
+position P;
+@@
+* E@P ? "yes" : "no"
+
+@script:python depends on report@
+p << str_yes_no_r.P;
+e << str_yes_no_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_yes_no(%s)" % e)
diff --git a/scripts/generate_rust_target.rs b/scripts/generate_rust_target.rs
index 404edf7587e0..0d00ac3723b5 100644
--- a/scripts/generate_rust_target.rs
+++ b/scripts/generate_rust_target.rs
@@ -20,12 +20,28 @@ enum Value {
Boolean(bool),
Number(i32),
String(String),
+ Array(Vec<Value>),
Object(Object),
}
type Object = Vec<(String, Value)>;
-/// Minimal "almost JSON" generator (e.g. no `null`s, no arrays, no escaping),
+fn comma_sep<T>(
+ seq: &[T],
+ formatter: &mut Formatter<'_>,
+ f: impl Fn(&mut Formatter<'_>, &T) -> Result,
+) -> Result {
+ if let [ref rest @ .., ref last] = seq[..] {
+ for v in rest {
+ f(formatter, v)?;
+ formatter.write_str(",")?;
+ }
+ f(formatter, last)?;
+ }
+ Ok(())
+}
+
+/// Minimal "almost JSON" generator (e.g. no `null`s, no escaping),
/// enough for this purpose.
impl Display for Value {
fn fmt(&self, formatter: &mut Formatter<'_>) -> Result {
@@ -33,59 +49,67 @@ impl Display for Value {
Value::Boolean(boolean) => write!(formatter, "{}", boolean),
Value::Number(number) => write!(formatter, "{}", number),
Value::String(string) => write!(formatter, "\"{}\"", string),
+ Value::Array(values) => {
+ formatter.write_str("[")?;
+ comma_sep(&values[..], formatter, |formatter, v| v.fmt(formatter))?;
+ formatter.write_str("]")
+ }
Value::Object(object) => {
formatter.write_str("{")?;
- if let [ref rest @ .., ref last] = object[..] {
- for (key, value) in rest {
- write!(formatter, "\"{}\": {},", key, value)?;
- }
- write!(formatter, "\"{}\": {}", last.0, last.1)?;
- }
+ comma_sep(&object[..], formatter, |formatter, v| {
+ write!(formatter, "\"{}\": {}", v.0, v.1)
+ })?;
formatter.write_str("}")
}
}
}
}
-struct TargetSpec(Object);
-
-impl TargetSpec {
- fn new() -> TargetSpec {
- TargetSpec(Vec::new())
+impl From<bool> for Value {
+ fn from(value: bool) -> Self {
+ Self::Boolean(value)
}
}
-trait Push<T> {
- fn push(&mut self, key: &str, value: T);
+impl From<i32> for Value {
+ fn from(value: i32) -> Self {
+ Self::Number(value)
+ }
}
-impl Push<bool> for TargetSpec {
- fn push(&mut self, key: &str, value: bool) {
- self.0.push((key.to_string(), Value::Boolean(value)));
+impl From<String> for Value {
+ fn from(value: String) -> Self {
+ Self::String(value)
}
}
-impl Push<i32> for TargetSpec {
- fn push(&mut self, key: &str, value: i32) {
- self.0.push((key.to_string(), Value::Number(value)));
+impl From<&str> for Value {
+ fn from(value: &str) -> Self {
+ Self::String(value.to_string())
}
}
-impl Push<String> for TargetSpec {
- fn push(&mut self, key: &str, value: String) {
- self.0.push((key.to_string(), Value::String(value)));
+impl From<Object> for Value {
+ fn from(object: Object) -> Self {
+ Self::Object(object)
}
}
-impl Push<&str> for TargetSpec {
- fn push(&mut self, key: &str, value: &str) {
- self.push(key, value.to_string());
+impl<T: Into<Value>, const N: usize> From<[T; N]> for Value {
+ fn from(i: [T; N]) -> Self {
+ Self::Array(i.into_iter().map(|v| v.into()).collect())
}
}
-impl Push<Object> for TargetSpec {
- fn push(&mut self, key: &str, value: Object) {
- self.0.push((key.to_string(), Value::Object(value)));
+struct TargetSpec(Object);
+
+impl TargetSpec {
+ fn new() -> TargetSpec {
+ TargetSpec(Vec::new())
+ }
+
+ fn push(&mut self, key: &str, value: impl Into<Value>) {
+ self.0.push((key.to_string(), value.into()));
}
}
@@ -164,10 +188,26 @@ fn main() {
);
let mut features = "-mmx,+soft-float".to_string();
if cfg.has("MITIGATION_RETPOLINE") {
+ // The kernel uses `-mretpoline-external-thunk` (for Clang), which Clang maps to the
+ // target feature of the same name plus the other two target features in
+ // `clang/lib/Driver/ToolChains/Arch/X86.cpp`. These should be eventually enabled via
+ // `-Ctarget-feature` when `rustc` starts recognizing them (or via a new dedicated
+ // flag); see https://github.com/rust-lang/rust/issues/116852.
features += ",+retpoline-external-thunk";
+ features += ",+retpoline-indirect-branches";
+ features += ",+retpoline-indirect-calls";
+ }
+ if cfg.has("MITIGATION_SLS") {
+ // The kernel uses `-mharden-sls=all`, which Clang maps to both these target features in
+ // `clang/lib/Driver/ToolChains/Arch/X86.cpp`. These should be eventually enabled via
+ // `-Ctarget-feature` when `rustc` starts recognizing them (or via a new dedicated
+ // flag); see https://github.com/rust-lang/rust/issues/116851.
+ features += ",+harden-sls-ijmp";
+ features += ",+harden-sls-ret";
}
ts.push("features", features);
ts.push("llvm-target", "x86_64-linux-gnu");
+ ts.push("supported-sanitizers", ["kcfi", "kernel-address"]);
ts.push("target-pointer-width", "64");
} else if cfg.has("X86_32") {
// This only works on UML, as i386 otherwise needs regparm support in rustc
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index 518200813d4e..9c7b404defbd 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -153,6 +153,10 @@ int main(void)
DEVID_FIELD(i3c_device_id, part_id);
DEVID_FIELD(i3c_device_id, extra_info);
+ DEVID(slim_device_id);
+ DEVID_FIELD(slim_device_id, manf_id);
+ DEVID_FIELD(slim_device_id, prod_code);
+
DEVID(spi_device_id);
DEVID_FIELD(spi_device_id, name);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 5d1c61fa5a55..99dce93a4188 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -960,6 +960,16 @@ static int do_i3c_entry(const char *filename, void *symval,
return 1;
}
+static int do_slim_entry(const char *filename, void *symval, char *alias)
+{
+ DEF_FIELD(symval, slim_device_id, manf_id);
+ DEF_FIELD(symval, slim_device_id, prod_code);
+
+ sprintf(alias, "slim:%x:%x:*", manf_id, prod_code);
+
+ return 1;
+}
+
/* Looks like: spi:S */
static int do_spi_entry(const char *filename, void *symval,
char *alias)
@@ -1555,6 +1565,7 @@ static const struct devtable devtable[] = {
{"rpmsg", SIZE_rpmsg_device_id, do_rpmsg_entry},
{"i2c", SIZE_i2c_device_id, do_i2c_entry},
{"i3c", SIZE_i3c_device_id, do_i3c_entry},
+ {"slim", SIZE_slim_device_id, do_slim_entry},
{"spi", SIZE_spi_device_id, do_spi_entry},
{"dmi", SIZE_dmi_system_id, do_dmi_entry},
{"platform", SIZE_platform_device_id, do_platform_entry},
diff --git a/scripts/rustc-version.sh b/scripts/rustc-version.sh
new file mode 100755
index 000000000000..4e22593e2eab
--- /dev/null
+++ b/scripts/rustc-version.sh
@@ -0,0 +1,26 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Usage: $ ./rustc-version.sh rustc
+#
+# Print the Rust compiler version in a 6 or 7-digit form.
+
+# Convert the version string x.y.z to a canonical up-to-7-digits form.
+#
+# Note that this function uses one more digit (compared to other
+# instances in other version scripts) to give a bit more space to
+# `rustc` since it will reach 1.100.0 in late 2026.
+get_canonical_version()
+{
+ IFS=.
+ set -- $1
+ echo $((100000 * $1 + 100 * $2 + $3))
+}
+
+if output=$("$@" --version 2>/dev/null); then
+ set -- $output
+ get_canonical_version $2
+else
+ echo 0
+ exit 1
+fi
diff --git a/security/security.c b/security/security.c
index 4564a0a1e4ef..6875eb4a59fc 100644
--- a/security/security.c
+++ b/security/security.c
@@ -5681,7 +5681,7 @@ int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
* Return: Returns 0 on success, error on failure.
*/
int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
- struct path *path)
+ const struct path *path)
{
return call_int_hook(bpf_token_create, token, attr, path);
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 94c523140125..fc926d3cac6e 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -6933,7 +6933,7 @@ static void selinux_bpf_prog_free(struct bpf_prog *prog)
}
static int selinux_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
- struct path *path)
+ const struct path *path)
{
struct bpf_security_struct *bpfsec;
diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig
index 1e0dd1a6d0b0..90eccc6cd464 100644
--- a/security/tomoyo/Kconfig
+++ b/security/tomoyo/Kconfig
@@ -13,6 +13,21 @@ config SECURITY_TOMOYO
found at <https://tomoyo.sourceforge.net/>.
If you are unsure how to answer this question, answer N.
+config SECURITY_TOMOYO_LKM
+ bool "Cut out most of TOMOYO's code to a loadable kernel module"
+ default n
+ depends on SECURITY_TOMOYO
+ depends on MODULES
+ help
+ Say Y here if you want to include TOMOYO without bloating
+ vmlinux file. If you say Y, most of TOMOYO code is cut out to
+ a loadable kernel module named tomoyo.ko . This option will be
+ useful for kernels built by Linux distributors where TOMOYO is
+ included but TOMOYO is not enabled by default. Please be sure
+ to explicitly load tomoyo.ko if you want to activate TOMOYO
+ without calling userspace policy loader, for tomoyo.ko is
+ loaded immediately before calling userspace policy loader.
+
config SECURITY_TOMOYO_MAX_ACCEPT_ENTRY
int "Default maximal count for learning mode"
default 2048
diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
index 55c67b9846a9..287a7d16fa15 100644
--- a/security/tomoyo/Makefile
+++ b/security/tomoyo/Makefile
@@ -1,5 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load_policy.o memory.o mount.o network.o realpath.o securityfs_if.o tomoyo.o util.o
+tomoyo-objs := audit.o common.o condition.o domain.o environ.o file.o gc.o group.o memory.o mount.o network.o proxy.o realpath.o securityfs_if.o util.o
+obj-y += init.o load_policy.o
+ifdef CONFIG_SECURITY_TOMOYO_LKM
+obj-m += tomoyo.o
+else
+obj-y += tomoyo.o
+endif
targets += builtin-policy.h
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 5c7b059a332a..c0ef014f8009 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -998,8 +998,13 @@ static bool tomoyo_select_domain(struct tomoyo_io_buffer *head,
p = find_task_by_pid_ns(pid, &init_pid_ns);
else
p = find_task_by_vpid(pid);
- if (p)
+ if (p) {
domain = tomoyo_task(p)->domain_info;
+#ifdef CONFIG_SECURITY_TOMOYO_LKM
+ if (!domain)
+ domain = &tomoyo_kernel_domain;
+#endif
+ }
rcu_read_unlock();
} else if (!strncmp(data, "domain=", 7)) {
if (tomoyo_domain_def(data + 7))
@@ -1710,8 +1715,13 @@ static void tomoyo_read_pid(struct tomoyo_io_buffer *head)
p = find_task_by_pid_ns(pid, &init_pid_ns);
else
p = find_task_by_vpid(pid);
- if (p)
+ if (p) {
domain = tomoyo_task(p)->domain_info;
+#ifdef CONFIG_SECURITY_TOMOYO_LKM
+ if (!domain)
+ domain = &tomoyo_kernel_domain;
+#endif
+ }
rcu_read_unlock();
if (!domain)
return;
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 0e8e2e959aef..4f6c52a9f478 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -978,6 +978,7 @@ int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile,
int tomoyo_init_request_info(struct tomoyo_request_info *r,
struct tomoyo_domain_info *domain,
const u8 index);
+int __init tomoyo_interface_init(void);
int tomoyo_mkdev_perm(const u8 operation, const struct path *path,
const unsigned int mode, unsigned int dev);
int tomoyo_mount_permission(const char *dev_name, const struct path *path,
@@ -1214,10 +1215,14 @@ static inline void tomoyo_put_group(struct tomoyo_group *group)
*
* Returns pointer to "struct tomoyo_task" for specified thread.
*/
+#ifdef CONFIG_SECURITY_TOMOYO_LKM
+extern struct tomoyo_task *tomoyo_task(struct task_struct *task);
+#else
static inline struct tomoyo_task *tomoyo_task(struct task_struct *task)
{
return task->security + tomoyo_blob_sizes.lbs_task;
}
+#endif
/**
* tomoyo_same_name_union - Check for duplicated "struct tomoyo_name_union" entry.
@@ -1284,4 +1289,71 @@ static inline struct tomoyo_policy_namespace *tomoyo_current_namespace(void)
pos = srcu_dereference((head)->next, &tomoyo_ss); \
for ( ; pos != (head); pos = srcu_dereference(pos->next, &tomoyo_ss))
+#ifdef CONFIG_SECURITY_TOMOYO_LKM
+
+#define LSM_HOOK(RET, DEFAULT, NAME, ...) typedef RET (NAME##_t)(__VA_ARGS__);
+#include <linux/lsm_hook_defs.h>
+#undef LSM_HOOK
+
+struct tomoyo_hooks {
+ cred_prepare_t *cred_prepare;
+ bprm_committed_creds_t *bprm_committed_creds;
+ task_alloc_t *task_alloc;
+ task_free_t *task_free;
+ bprm_check_security_t *bprm_check_security;
+ file_fcntl_t *file_fcntl;
+ file_open_t *file_open;
+ file_truncate_t *file_truncate;
+ path_truncate_t *path_truncate;
+ path_unlink_t *path_unlink;
+ path_mkdir_t *path_mkdir;
+ path_rmdir_t *path_rmdir;
+ path_symlink_t *path_symlink;
+ path_mknod_t *path_mknod;
+ path_link_t *path_link;
+ path_rename_t *path_rename;
+ inode_getattr_t *inode_getattr;
+ file_ioctl_t *file_ioctl;
+ file_ioctl_compat_t *file_ioctl_compat;
+ path_chmod_t *path_chmod;
+ path_chown_t *path_chown;
+ path_chroot_t *path_chroot;
+ sb_mount_t *sb_mount;
+ sb_umount_t *sb_umount;
+ sb_pivotroot_t *sb_pivotroot;
+ socket_bind_t *socket_bind;
+ socket_connect_t *socket_connect;
+ socket_listen_t *socket_listen;
+ socket_sendmsg_t *socket_sendmsg;
+};
+
+extern void tomoyo_register_hooks(const struct tomoyo_hooks *tomoyo_hooks);
+
+struct tomoyo_operations {
+ void (*check_profile)(void);
+ int enabled;
+};
+
+extern struct tomoyo_operations tomoyo_ops;
+
+/*
+ * Temporary hack: functions needed by tomoyo.ko . This will be removed
+ * after all functions are marked as EXPORT_STMBOL_GPL().
+ */
+struct tomoyo_tmp_exports {
+ struct task_struct * (*find_task_by_vpid)(pid_t nr);
+ struct task_struct * (*find_task_by_pid_ns)(pid_t nr, struct pid_namespace *ns);
+ void (*put_filesystem)(struct file_system_type *fs);
+ struct file * (*get_mm_exe_file)(struct mm_struct *mm);
+ char * (*d_absolute_path)(const struct path *path, char *buf, int buflen);
+};
+extern const struct tomoyo_tmp_exports tomoyo_tmp_exports;
+#define find_task_by_vpid tomoyo_tmp_exports.find_task_by_vpid
+#define find_task_by_pid_ns tomoyo_tmp_exports.find_task_by_pid_ns
+#define put_filesystem tomoyo_tmp_exports.put_filesystem
+#define get_mm_exe_file tomoyo_tmp_exports.get_mm_exe_file
+#define d_absolute_path tomoyo_tmp_exports.d_absolute_path
+
+#endif /* defined(CONFIG_SECURITY_TOMOYO_LKM) */
+
#endif /* !defined(_SECURITY_TOMOYO_COMMON_H) */
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index 90b53500a236..aed9e3ef2c9e 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -723,10 +723,13 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
ee->r.obj = &ee->obj;
ee->obj.path1 = bprm->file->f_path;
/* Get symlink's pathname of program. */
- retval = -ENOENT;
exename.name = tomoyo_realpath_nofollow(original_name);
- if (!exename.name)
- goto out;
+ if (!exename.name) {
+ /* Fallback to realpath if symlink's pathname does not exist. */
+ exename.name = tomoyo_realpath_from_path(&bprm->file->f_path);
+ if (!exename.name)
+ goto out;
+ }
tomoyo_fill_path_info(&exename);
retry:
/* Check 'aggregator' directive. */
diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c
index 026e29ea3796..6eccca150839 100644
--- a/security/tomoyo/gc.c
+++ b/security/tomoyo/gc.c
@@ -9,6 +9,9 @@
#include <linux/kthread.h>
#include <linux/slab.h>
+/* Lock for GC. */
+DEFINE_SRCU(tomoyo_ss);
+
/**
* tomoyo_memory_free - Free memory for elements.
*
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/hooks.h
index 04a92c3d65d4..58929bb71477 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/hooks.h
@@ -1,12 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * security/tomoyo/tomoyo.c
+ * security/tomoyo/hooks.h
*
* Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
-#include <linux/lsm_hooks.h>
-#include <uapi/linux/lsm.h>
#include "common.h"
/**
@@ -18,10 +16,6 @@ struct tomoyo_domain_info *tomoyo_domain(void)
{
struct tomoyo_task *s = tomoyo_task(current);
- if (s->old_domain_info && !current->in_execve) {
- atomic_dec(&s->old_domain_info->users);
- s->old_domain_info = NULL;
- }
return s->domain_info;
}
@@ -62,26 +56,6 @@ static void tomoyo_bprm_committed_creds(const struct linux_binprm *bprm)
s->old_domain_info = NULL;
}
-#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
-/**
- * tomoyo_bprm_creds_for_exec - Target for security_bprm_creds_for_exec().
- *
- * @bprm: Pointer to "struct linux_binprm".
- *
- * Returns 0.
- */
-static int tomoyo_bprm_creds_for_exec(struct linux_binprm *bprm)
-{
- /*
- * Load policy if /sbin/tomoyo-init exists and /sbin/init is requested
- * for the first time.
- */
- if (!tomoyo_policy_loaded)
- tomoyo_load_policy(bprm->filename);
- return 0;
-}
-#endif
-
/**
* tomoyo_bprm_check_security - Target for security_bprm_check().
*
@@ -501,10 +475,6 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
return tomoyo_socket_sendmsg_permission(sock, msg, size);
}
-struct lsm_blob_sizes tomoyo_blob_sizes __ro_after_init = {
- .lbs_task = sizeof(struct tomoyo_task),
-};
-
/**
* tomoyo_task_alloc - Target for security_task_alloc().
*
@@ -543,81 +513,3 @@ static void tomoyo_task_free(struct task_struct *task)
s->old_domain_info = NULL;
}
}
-
-static const struct lsm_id tomoyo_lsmid = {
- .name = "tomoyo",
- .id = LSM_ID_TOMOYO,
-};
-
-/*
- * tomoyo_security_ops is a "struct security_operations" which is used for
- * registering TOMOYO.
- */
-static struct security_hook_list tomoyo_hooks[] __ro_after_init = {
- LSM_HOOK_INIT(cred_prepare, tomoyo_cred_prepare),
- LSM_HOOK_INIT(bprm_committed_creds, tomoyo_bprm_committed_creds),
- LSM_HOOK_INIT(task_alloc, tomoyo_task_alloc),
- LSM_HOOK_INIT(task_free, tomoyo_task_free),
-#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
- LSM_HOOK_INIT(bprm_creds_for_exec, tomoyo_bprm_creds_for_exec),
-#endif
- LSM_HOOK_INIT(bprm_check_security, tomoyo_bprm_check_security),
- LSM_HOOK_INIT(file_fcntl, tomoyo_file_fcntl),
- LSM_HOOK_INIT(file_open, tomoyo_file_open),
- LSM_HOOK_INIT(file_truncate, tomoyo_file_truncate),
- LSM_HOOK_INIT(path_truncate, tomoyo_path_truncate),
- LSM_HOOK_INIT(path_unlink, tomoyo_path_unlink),
- LSM_HOOK_INIT(path_mkdir, tomoyo_path_mkdir),
- LSM_HOOK_INIT(path_rmdir, tomoyo_path_rmdir),
- LSM_HOOK_INIT(path_symlink, tomoyo_path_symlink),
- LSM_HOOK_INIT(path_mknod, tomoyo_path_mknod),
- LSM_HOOK_INIT(path_link, tomoyo_path_link),
- LSM_HOOK_INIT(path_rename, tomoyo_path_rename),
- LSM_HOOK_INIT(inode_getattr, tomoyo_inode_getattr),
- LSM_HOOK_INIT(file_ioctl, tomoyo_file_ioctl),
- LSM_HOOK_INIT(file_ioctl_compat, tomoyo_file_ioctl),
- LSM_HOOK_INIT(path_chmod, tomoyo_path_chmod),
- LSM_HOOK_INIT(path_chown, tomoyo_path_chown),
- LSM_HOOK_INIT(path_chroot, tomoyo_path_chroot),
- LSM_HOOK_INIT(sb_mount, tomoyo_sb_mount),
- LSM_HOOK_INIT(sb_umount, tomoyo_sb_umount),
- LSM_HOOK_INIT(sb_pivotroot, tomoyo_sb_pivotroot),
- LSM_HOOK_INIT(socket_bind, tomoyo_socket_bind),
- LSM_HOOK_INIT(socket_connect, tomoyo_socket_connect),
- LSM_HOOK_INIT(socket_listen, tomoyo_socket_listen),
- LSM_HOOK_INIT(socket_sendmsg, tomoyo_socket_sendmsg),
-};
-
-/* Lock for GC. */
-DEFINE_SRCU(tomoyo_ss);
-
-int tomoyo_enabled __ro_after_init = 1;
-
-/**
- * tomoyo_init - Register TOMOYO Linux as a LSM module.
- *
- * Returns 0.
- */
-static int __init tomoyo_init(void)
-{
- struct tomoyo_task *s = tomoyo_task(current);
-
- /* register ourselves with the security framework */
- security_add_hooks(tomoyo_hooks, ARRAY_SIZE(tomoyo_hooks),
- &tomoyo_lsmid);
- pr_info("TOMOYO Linux initialized\n");
- s->domain_info = &tomoyo_kernel_domain;
- atomic_inc(&tomoyo_kernel_domain.users);
- s->old_domain_info = NULL;
- tomoyo_mm_init();
-
- return 0;
-}
-
-DEFINE_LSM(tomoyo) = {
- .name = "tomoyo",
- .enabled = &tomoyo_enabled,
- .flags = LSM_FLAG_LEGACY_MAJOR,
- .blobs = &tomoyo_blob_sizes,
- .init = tomoyo_init,
-};
diff --git a/security/tomoyo/init.c b/security/tomoyo/init.c
new file mode 100644
index 000000000000..034e7db22d4e
--- /dev/null
+++ b/security/tomoyo/init.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/init.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include <linux/lsm_hooks.h>
+#include <uapi/linux/lsm.h>
+#include "common.h"
+
+#ifndef CONFIG_SECURITY_TOMOYO_LKM
+
+#include "hooks.h"
+
+#else
+
+#define DEFINE_STATIC_CALL_PROXY(NAME) \
+ static NAME##_t tomoyo_##NAME; \
+ DEFINE_STATIC_CALL_RET0(tomoyo_##NAME, tomoyo_##NAME);
+DEFINE_STATIC_CALL_PROXY(cred_prepare)
+DEFINE_STATIC_CALL_PROXY(bprm_committed_creds)
+DEFINE_STATIC_CALL_PROXY(bprm_check_security)
+DEFINE_STATIC_CALL_PROXY(inode_getattr)
+DEFINE_STATIC_CALL_PROXY(path_truncate)
+DEFINE_STATIC_CALL_PROXY(file_truncate)
+DEFINE_STATIC_CALL_PROXY(path_unlink)
+DEFINE_STATIC_CALL_PROXY(path_mkdir)
+DEFINE_STATIC_CALL_PROXY(path_rmdir)
+DEFINE_STATIC_CALL_PROXY(path_symlink)
+DEFINE_STATIC_CALL_PROXY(path_mknod)
+DEFINE_STATIC_CALL_PROXY(path_link)
+DEFINE_STATIC_CALL_PROXY(path_rename)
+DEFINE_STATIC_CALL_PROXY(file_fcntl)
+DEFINE_STATIC_CALL_PROXY(file_open)
+DEFINE_STATIC_CALL_PROXY(file_ioctl)
+DEFINE_STATIC_CALL_PROXY(path_chmod)
+DEFINE_STATIC_CALL_PROXY(path_chown)
+DEFINE_STATIC_CALL_PROXY(path_chroot)
+DEFINE_STATIC_CALL_PROXY(sb_mount)
+DEFINE_STATIC_CALL_PROXY(sb_umount)
+DEFINE_STATIC_CALL_PROXY(sb_pivotroot)
+DEFINE_STATIC_CALL_PROXY(socket_listen)
+DEFINE_STATIC_CALL_PROXY(socket_connect)
+DEFINE_STATIC_CALL_PROXY(socket_bind)
+DEFINE_STATIC_CALL_PROXY(socket_sendmsg)
+DEFINE_STATIC_CALL_PROXY(task_alloc)
+DEFINE_STATIC_CALL_PROXY(task_free)
+#undef DEFINE_STATIC_CALL_PROXY
+
+static int tomoyo_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp)
+{
+ return static_call(tomoyo_cred_prepare)(new, old, gfp);
+}
+
+static void tomoyo_bprm_committed_creds(const struct linux_binprm *bprm)
+{
+ static_call(tomoyo_bprm_committed_creds)(bprm);
+}
+
+static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
+{
+ return static_call(tomoyo_bprm_check_security)(bprm);
+}
+
+static int tomoyo_inode_getattr(const struct path *path)
+{
+ return static_call(tomoyo_inode_getattr)(path);
+}
+
+static int tomoyo_path_truncate(const struct path *path)
+{
+ return static_call(tomoyo_path_truncate)(path);
+}
+
+static int tomoyo_file_truncate(struct file *file)
+{
+ return static_call(tomoyo_file_truncate)(file);
+}
+
+static int tomoyo_path_unlink(const struct path *parent, struct dentry *dentry)
+{
+ return static_call(tomoyo_path_unlink)(parent, dentry);
+}
+
+static int tomoyo_path_mkdir(const struct path *parent, struct dentry *dentry, umode_t mode)
+{
+ return static_call(tomoyo_path_mkdir)(parent, dentry, mode);
+}
+
+static int tomoyo_path_rmdir(const struct path *parent, struct dentry *dentry)
+{
+ return static_call(tomoyo_path_rmdir)(parent, dentry);
+}
+
+static int tomoyo_path_symlink(const struct path *parent, struct dentry *dentry,
+ const char *old_name)
+{
+ return static_call(tomoyo_path_symlink)(parent, dentry, old_name);
+}
+
+static int tomoyo_path_mknod(const struct path *parent, struct dentry *dentry,
+ umode_t mode, unsigned int dev)
+{
+ return static_call(tomoyo_path_mknod)(parent, dentry, mode, dev);
+}
+
+static int tomoyo_path_link(struct dentry *old_dentry, const struct path *new_dir,
+ struct dentry *new_dentry)
+{
+ return static_call(tomoyo_path_link)(old_dentry, new_dir, new_dentry);
+}
+
+static int tomoyo_path_rename(const struct path *old_parent, struct dentry *old_dentry,
+ const struct path *new_parent, struct dentry *new_dentry,
+ const unsigned int flags)
+{
+ return static_call(tomoyo_path_rename)(old_parent, old_dentry, new_parent, new_dentry, flags);
+}
+
+static int tomoyo_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return static_call(tomoyo_file_fcntl)(file, cmd, arg);
+}
+
+static int tomoyo_file_open(struct file *f)
+{
+ return static_call(tomoyo_file_open)(f);
+}
+
+static int tomoyo_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return static_call(tomoyo_file_ioctl)(file, cmd, arg);
+}
+
+static int tomoyo_path_chmod(const struct path *path, umode_t mode)
+{
+ return static_call(tomoyo_path_chmod)(path, mode);
+}
+
+static int tomoyo_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
+{
+ return static_call(tomoyo_path_chown)(path, uid, gid);
+}
+
+static int tomoyo_path_chroot(const struct path *path)
+{
+ return static_call(tomoyo_path_chroot)(path);
+}
+
+static int tomoyo_sb_mount(const char *dev_name, const struct path *path,
+ const char *type, unsigned long flags, void *data)
+{
+ return static_call(tomoyo_sb_mount)(dev_name, path, type, flags, data);
+}
+
+static int tomoyo_sb_umount(struct vfsmount *mnt, int flags)
+{
+ return static_call(tomoyo_sb_umount)(mnt, flags);
+}
+
+static int tomoyo_sb_pivotroot(const struct path *old_path, const struct path *new_path)
+{
+ return static_call(tomoyo_sb_pivotroot)(old_path, new_path);
+}
+
+static int tomoyo_socket_listen(struct socket *sock, int backlog)
+{
+ return static_call(tomoyo_socket_listen)(sock, backlog);
+}
+
+static int tomoyo_socket_connect(struct socket *sock, struct sockaddr *addr, int addr_len)
+{
+ return static_call(tomoyo_socket_connect)(sock, addr, addr_len);
+}
+
+static int tomoyo_socket_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+{
+ return static_call(tomoyo_socket_bind)(sock, addr, addr_len);
+}
+
+static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size)
+{
+ return static_call(tomoyo_socket_sendmsg)(sock, msg, size);
+}
+
+static int tomoyo_task_alloc(struct task_struct *task, unsigned long clone_flags)
+{
+ return static_call(tomoyo_task_alloc)(task, clone_flags);
+}
+
+static void tomoyo_task_free(struct task_struct *task)
+{
+ static_call(tomoyo_task_free)(task);
+}
+
+void tomoyo_register_hooks(const struct tomoyo_hooks *tomoyo_hooks)
+{
+ static void *registered;
+
+ if (cmpxchg(&registered, NULL, &registered))
+ panic("%s was called twice!\n", __func__);
+ static_call_update(tomoyo_task_free, tomoyo_hooks->task_free);
+ static_call_update(tomoyo_task_alloc, tomoyo_hooks->task_alloc);
+ static_call_update(tomoyo_cred_prepare, tomoyo_hooks->cred_prepare);
+ static_call_update(tomoyo_bprm_committed_creds, tomoyo_hooks->bprm_committed_creds);
+ static_call_update(tomoyo_bprm_check_security, tomoyo_hooks->bprm_check_security);
+ static_call_update(tomoyo_inode_getattr, tomoyo_hooks->inode_getattr);
+ static_call_update(tomoyo_path_truncate, tomoyo_hooks->path_truncate);
+ static_call_update(tomoyo_file_truncate, tomoyo_hooks->file_truncate);
+ static_call_update(tomoyo_path_unlink, tomoyo_hooks->path_unlink);
+ static_call_update(tomoyo_path_mkdir, tomoyo_hooks->path_mkdir);
+ static_call_update(tomoyo_path_rmdir, tomoyo_hooks->path_rmdir);
+ static_call_update(tomoyo_path_symlink, tomoyo_hooks->path_symlink);
+ static_call_update(tomoyo_path_mknod, tomoyo_hooks->path_mknod);
+ static_call_update(tomoyo_path_link, tomoyo_hooks->path_link);
+ static_call_update(tomoyo_path_rename, tomoyo_hooks->path_rename);
+ static_call_update(tomoyo_file_fcntl, tomoyo_hooks->file_fcntl);
+ static_call_update(tomoyo_file_open, tomoyo_hooks->file_open);
+ static_call_update(tomoyo_file_ioctl, tomoyo_hooks->file_ioctl);
+ static_call_update(tomoyo_path_chmod, tomoyo_hooks->path_chmod);
+ static_call_update(tomoyo_path_chown, tomoyo_hooks->path_chown);
+ static_call_update(tomoyo_path_chroot, tomoyo_hooks->path_chroot);
+ static_call_update(tomoyo_sb_mount, tomoyo_hooks->sb_mount);
+ static_call_update(tomoyo_sb_umount, tomoyo_hooks->sb_umount);
+ static_call_update(tomoyo_sb_pivotroot, tomoyo_hooks->sb_pivotroot);
+ static_call_update(tomoyo_socket_listen, tomoyo_hooks->socket_listen);
+ static_call_update(tomoyo_socket_connect, tomoyo_hooks->socket_connect);
+ static_call_update(tomoyo_socket_bind, tomoyo_hooks->socket_bind);
+ static_call_update(tomoyo_socket_sendmsg, tomoyo_hooks->socket_sendmsg);
+}
+EXPORT_SYMBOL_GPL(tomoyo_register_hooks);
+
+/*
+ * Temporary hack: functions needed by tomoyo.ko . This hack will be removed
+ * after all functions are marked as EXPORT_STMBOL_GPL().
+ */
+#undef find_task_by_vpid
+#undef find_task_by_pid_ns
+#undef put_filesystem
+#undef get_mm_exe_file
+#undef d_absolute_path
+const struct tomoyo_tmp_exports tomoyo_tmp_exports = {
+ .find_task_by_vpid = find_task_by_vpid,
+ .find_task_by_pid_ns = find_task_by_pid_ns,
+ .put_filesystem = put_filesystem,
+ .get_mm_exe_file = get_mm_exe_file,
+ .d_absolute_path = d_absolute_path,
+};
+EXPORT_SYMBOL_GPL(tomoyo_tmp_exports);
+
+#endif
+
+#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+static int tomoyo_bprm_creds_for_exec(struct linux_binprm *bprm)
+{
+ /*
+ * Load policy if /sbin/tomoyo-init exists and /sbin/init is requested
+ * for the first time.
+ */
+ if (!tomoyo_policy_loaded)
+ tomoyo_load_policy(bprm->filename);
+ return 0;
+}
+#endif
+
+struct lsm_blob_sizes tomoyo_blob_sizes __ro_after_init = {
+ .lbs_task = sizeof(struct tomoyo_task),
+};
+
+static const struct lsm_id tomoyo_lsmid = {
+ .name = "tomoyo",
+ .id = LSM_ID_TOMOYO,
+};
+
+/* tomoyo_hooks is used for registering TOMOYO. */
+static struct security_hook_list tomoyo_hooks[] __ro_after_init = {
+ LSM_HOOK_INIT(cred_prepare, tomoyo_cred_prepare),
+ LSM_HOOK_INIT(bprm_committed_creds, tomoyo_bprm_committed_creds),
+ LSM_HOOK_INIT(task_alloc, tomoyo_task_alloc),
+ LSM_HOOK_INIT(task_free, tomoyo_task_free),
+#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+ LSM_HOOK_INIT(bprm_creds_for_exec, tomoyo_bprm_creds_for_exec),
+#endif
+ LSM_HOOK_INIT(bprm_check_security, tomoyo_bprm_check_security),
+ LSM_HOOK_INIT(file_fcntl, tomoyo_file_fcntl),
+ LSM_HOOK_INIT(file_open, tomoyo_file_open),
+ LSM_HOOK_INIT(file_truncate, tomoyo_file_truncate),
+ LSM_HOOK_INIT(path_truncate, tomoyo_path_truncate),
+ LSM_HOOK_INIT(path_unlink, tomoyo_path_unlink),
+ LSM_HOOK_INIT(path_mkdir, tomoyo_path_mkdir),
+ LSM_HOOK_INIT(path_rmdir, tomoyo_path_rmdir),
+ LSM_HOOK_INIT(path_symlink, tomoyo_path_symlink),
+ LSM_HOOK_INIT(path_mknod, tomoyo_path_mknod),
+ LSM_HOOK_INIT(path_link, tomoyo_path_link),
+ LSM_HOOK_INIT(path_rename, tomoyo_path_rename),
+ LSM_HOOK_INIT(inode_getattr, tomoyo_inode_getattr),
+ LSM_HOOK_INIT(file_ioctl, tomoyo_file_ioctl),
+ LSM_HOOK_INIT(file_ioctl_compat, tomoyo_file_ioctl),
+ LSM_HOOK_INIT(path_chmod, tomoyo_path_chmod),
+ LSM_HOOK_INIT(path_chown, tomoyo_path_chown),
+ LSM_HOOK_INIT(path_chroot, tomoyo_path_chroot),
+ LSM_HOOK_INIT(sb_mount, tomoyo_sb_mount),
+ LSM_HOOK_INIT(sb_umount, tomoyo_sb_umount),
+ LSM_HOOK_INIT(sb_pivotroot, tomoyo_sb_pivotroot),
+ LSM_HOOK_INIT(socket_bind, tomoyo_socket_bind),
+ LSM_HOOK_INIT(socket_connect, tomoyo_socket_connect),
+ LSM_HOOK_INIT(socket_listen, tomoyo_socket_listen),
+ LSM_HOOK_INIT(socket_sendmsg, tomoyo_socket_sendmsg),
+};
+
+int tomoyo_enabled __ro_after_init = 1;
+
+/* Has /sbin/init started? */
+bool tomoyo_policy_loaded;
+
+#ifdef CONFIG_SECURITY_TOMOYO_LKM
+EXPORT_SYMBOL_GPL(tomoyo_blob_sizes);
+EXPORT_SYMBOL_GPL(tomoyo_policy_loaded);
+
+struct tomoyo_operations tomoyo_ops;
+EXPORT_SYMBOL_GPL(tomoyo_ops);
+
+/**
+ * tomoyo_init - Reserve hooks for TOMOYO Linux.
+ *
+ * Returns 0.
+ */
+static int __init tomoyo_init(void)
+{
+ /* register ourselves with the security framework */
+ security_add_hooks(tomoyo_hooks, ARRAY_SIZE(tomoyo_hooks), &tomoyo_lsmid);
+ tomoyo_ops.enabled = tomoyo_enabled;
+ pr_info("Hooks for initializing TOMOYO Linux are ready\n");
+ return 0;
+}
+#else
+/**
+ * tomoyo_init - Register TOMOYO Linux as a LSM module.
+ *
+ * Returns 0.
+ */
+static int __init tomoyo_init(void)
+{
+ struct tomoyo_task *s = tomoyo_task(current);
+
+ /* register ourselves with the security framework */
+ security_add_hooks(tomoyo_hooks, ARRAY_SIZE(tomoyo_hooks),
+ &tomoyo_lsmid);
+ pr_info("TOMOYO Linux initialized\n");
+ s->domain_info = &tomoyo_kernel_domain;
+ atomic_inc(&tomoyo_kernel_domain.users);
+ s->old_domain_info = NULL;
+ tomoyo_mm_init();
+
+ return 0;
+}
+#endif
+
+DEFINE_LSM(tomoyo) = {
+ .name = "tomoyo",
+ .enabled = &tomoyo_enabled,
+ .flags = LSM_FLAG_LEGACY_MAJOR,
+ .blobs = &tomoyo_blob_sizes,
+ .init = tomoyo_init,
+};
diff --git a/security/tomoyo/load_policy.c b/security/tomoyo/load_policy.c
index 363b65be87ab..6a2a72354a64 100644
--- a/security/tomoyo/load_policy.c
+++ b/security/tomoyo/load_policy.c
@@ -97,6 +97,14 @@ void tomoyo_load_policy(const char *filename)
if (!tomoyo_policy_loader_exists())
return;
done = true;
+#ifdef CONFIG_SECURITY_TOMOYO_LKM
+ /* Load tomoyo.ko if not yet loaded. */
+ if (!tomoyo_ops.check_profile)
+ request_module("tomoyo");
+ /* Check if tomoyo.ko was successfully loaded. */
+ if (!tomoyo_ops.check_profile)
+ panic("Failed to load tomoyo module.");
+#endif
pr_info("Calling %s to load policy. Please wait.\n", tomoyo_loader);
argv[0] = (char *) tomoyo_loader;
argv[1] = NULL;
@@ -104,7 +112,11 @@ void tomoyo_load_policy(const char *filename)
envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
envp[2] = NULL;
call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
+#ifdef CONFIG_SECURITY_TOMOYO_LKM
+ tomoyo_ops.check_profile();
+#else
tomoyo_check_profile();
+#endif
}
#endif
diff --git a/security/tomoyo/proxy.c b/security/tomoyo/proxy.c
new file mode 100644
index 000000000000..1618cc0f2af8
--- /dev/null
+++ b/security/tomoyo/proxy.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/proxy.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include <linux/security.h>
+#include "common.h"
+
+#ifdef CONFIG_SECURITY_TOMOYO_LKM
+
+struct tomoyo_task *tomoyo_task(struct task_struct *task)
+{
+ struct tomoyo_task *s = task->security + tomoyo_blob_sizes.lbs_task;
+
+ if (unlikely(!s->domain_info)) {
+ if (likely(task == current)) {
+ s->domain_info = &tomoyo_kernel_domain;
+ atomic_inc(&tomoyo_kernel_domain.users);
+ } else {
+ /* Caller handles s->domain_info == NULL case. */
+ }
+ }
+ return s;
+}
+
+#include "hooks.h"
+
+/**
+ * tomoyo_runtime_init - Register TOMOYO Linux as a loadable LSM module.
+ *
+ * Returns 0 if TOMOYO is enabled, -EINVAL otherwise.
+ */
+static int __init tomoyo_runtime_init(void)
+{
+ const struct tomoyo_hooks tomoyo_hooks = {
+ .cred_prepare = tomoyo_cred_prepare,
+ .bprm_committed_creds = tomoyo_bprm_committed_creds,
+ .task_alloc = tomoyo_task_alloc,
+ .task_free = tomoyo_task_free,
+ .bprm_check_security = tomoyo_bprm_check_security,
+ .file_fcntl = tomoyo_file_fcntl,
+ .file_open = tomoyo_file_open,
+ .file_truncate = tomoyo_file_truncate,
+ .path_truncate = tomoyo_path_truncate,
+ .path_unlink = tomoyo_path_unlink,
+ .path_mkdir = tomoyo_path_mkdir,
+ .path_rmdir = tomoyo_path_rmdir,
+ .path_symlink = tomoyo_path_symlink,
+ .path_mknod = tomoyo_path_mknod,
+ .path_link = tomoyo_path_link,
+ .path_rename = tomoyo_path_rename,
+ .inode_getattr = tomoyo_inode_getattr,
+ .file_ioctl = tomoyo_file_ioctl,
+ .file_ioctl_compat = tomoyo_file_ioctl,
+ .path_chmod = tomoyo_path_chmod,
+ .path_chown = tomoyo_path_chown,
+ .path_chroot = tomoyo_path_chroot,
+ .sb_mount = tomoyo_sb_mount,
+ .sb_umount = tomoyo_sb_umount,
+ .sb_pivotroot = tomoyo_sb_pivotroot,
+ .socket_bind = tomoyo_socket_bind,
+ .socket_connect = tomoyo_socket_connect,
+ .socket_listen = tomoyo_socket_listen,
+ .socket_sendmsg = tomoyo_socket_sendmsg,
+ };
+
+ if (!tomoyo_ops.enabled)
+ return -EINVAL;
+ tomoyo_ops.check_profile = tomoyo_check_profile;
+ pr_info("TOMOYO Linux initialized\n");
+ tomoyo_task(current);
+ tomoyo_mm_init();
+ tomoyo_interface_init();
+ tomoyo_register_hooks(&tomoyo_hooks);
+ return 0;
+}
+module_init(tomoyo_runtime_init);
+MODULE_LICENSE("GPL");
+
+#endif
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
index a2705798476f..a3b821b7f477 100644
--- a/security/tomoyo/securityfs_if.c
+++ b/security/tomoyo/securityfs_if.c
@@ -229,17 +229,19 @@ static void __init tomoyo_create_entry(const char *name, const umode_t mode,
}
/**
- * tomoyo_initerface_init - Initialize /sys/kernel/security/tomoyo/ interface.
+ * tomoyo_interface_init - Initialize /sys/kernel/security/tomoyo/ interface.
*
* Returns 0.
*/
-static int __init tomoyo_initerface_init(void)
+int __init tomoyo_interface_init(void)
{
struct tomoyo_domain_info *domain;
struct dentry *tomoyo_dir;
+#ifndef CONFIG_SECURITY_TOMOYO_LKM
if (!tomoyo_enabled)
return 0;
+#endif
domain = tomoyo_domain();
/* Don't create securityfs entries unless registered. */
if (domain != &tomoyo_kernel_domain)
@@ -270,4 +272,6 @@ static int __init tomoyo_initerface_init(void)
return 0;
}
-fs_initcall(tomoyo_initerface_init);
+#ifndef CONFIG_SECURITY_TOMOYO_LKM
+fs_initcall(tomoyo_interface_init);
+#endif
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index 6799b1122c9d..b851ff377382 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -13,9 +13,6 @@
/* Lock for protecting policy. */
DEFINE_MUTEX(tomoyo_policy_lock);
-/* Has /sbin/init started? */
-bool tomoyo_policy_loaded;
-
/*
* Mapping table from "enum tomoyo_mac_index" to
* "enum tomoyo_mac_category_index".
diff --git a/sound/core/control.c b/sound/core/control.c
index 4f55f64c42e1..2f790a7b1e90 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -2267,7 +2267,6 @@ static const struct file_operations snd_ctl_f_ops =
.read = snd_ctl_read,
.open = snd_ctl_open,
.release = snd_ctl_release,
- .llseek = no_llseek,
.poll = snd_ctl_poll,
.unlocked_ioctl = snd_ctl_ioctl,
.compat_ioctl = snd_ctl_ioctl_compat,
diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
index 33bf9a220ada..668604d0ec9d 100644
--- a/sound/core/oss/mixer_oss.c
+++ b/sound/core/oss/mixer_oss.c
@@ -412,7 +412,6 @@ static const struct file_operations snd_mixer_oss_f_ops =
.owner = THIS_MODULE,
.open = snd_mixer_oss_open,
.release = snd_mixer_oss_release,
- .llseek = no_llseek,
.unlocked_ioctl = snd_mixer_oss_ioctl,
.compat_ioctl = snd_mixer_oss_ioctl_compat,
};
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 7386982cf40e..4683b9139c56 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -3106,7 +3106,6 @@ static const struct file_operations snd_pcm_oss_f_reg =
.write = snd_pcm_oss_write,
.open = snd_pcm_oss_open,
.release = snd_pcm_oss_release,
- .llseek = no_llseek,
.poll = snd_pcm_oss_poll,
.unlocked_ioctl = snd_pcm_oss_ioctl,
.compat_ioctl = snd_pcm_oss_ioctl_compat,
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 99e39b5359cc..5b9076829ade 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -4115,7 +4115,6 @@ const struct file_operations snd_pcm_f_ops[2] = {
.write_iter = snd_pcm_writev,
.open = snd_pcm_playback_open,
.release = snd_pcm_release,
- .llseek = no_llseek,
.poll = snd_pcm_poll,
.unlocked_ioctl = snd_pcm_ioctl,
.compat_ioctl = snd_pcm_ioctl_compat,
@@ -4129,7 +4128,6 @@ const struct file_operations snd_pcm_f_ops[2] = {
.read_iter = snd_pcm_readv,
.open = snd_pcm_capture_open,
.release = snd_pcm_release,
- .llseek = no_llseek,
.poll = snd_pcm_poll,
.unlocked_ioctl = snd_pcm_ioctl,
.compat_ioctl = snd_pcm_ioctl_compat,
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 7accf9a1ddf4..03306be5fa02 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1784,7 +1784,6 @@ static const struct file_operations snd_rawmidi_f_ops = {
.write = snd_rawmidi_write,
.open = snd_rawmidi_open,
.release = snd_rawmidi_release,
- .llseek = no_llseek,
.poll = snd_rawmidi_poll,
.unlocked_ioctl = snd_rawmidi_ioctl,
.compat_ioctl = snd_rawmidi_ioctl_compat,
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 6437193e42bf..3930e2f9082f 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -2722,7 +2722,6 @@ static const struct file_operations snd_seq_f_ops =
.write = snd_seq_write,
.open = snd_seq_open,
.release = snd_seq_release,
- .llseek = no_llseek,
.poll = snd_seq_poll,
.unlocked_ioctl = snd_seq_ioctl,
.compat_ioctl = snd_seq_ioctl_compat,
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 668c40bac318..fbada79380f9 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -2436,7 +2436,6 @@ static const struct file_operations snd_timer_f_ops =
.read = snd_timer_user_read,
.open = snd_timer_user_open,
.release = snd_timer_user_release,
- .llseek = no_llseek,
.poll = snd_timer_user_poll,
.unlocked_ioctl = snd_timer_user_ioctl,
.compat_ioctl = snd_timer_user_ioctl_compat,
diff --git a/sound/oss/dmasound/dmasound_core.c b/sound/oss/dmasound/dmasound_core.c
index 4b1baf4dd50e..dea2d9b18fc9 100644
--- a/sound/oss/dmasound/dmasound_core.c
+++ b/sound/oss/dmasound/dmasound_core.c
@@ -381,7 +381,6 @@ static long mixer_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
static const struct file_operations mixer_fops =
{
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = mixer_unlocked_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = mixer_open,
@@ -1155,7 +1154,6 @@ static long sq_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
static const struct file_operations sq_fops =
{
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = sq_write,
.poll = sq_poll,
.unlocked_ioctl = sq_unlocked_ioctl,
@@ -1351,7 +1349,6 @@ static ssize_t state_read(struct file *file, char __user *buf, size_t count,
static const struct file_operations state_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = state_read,
.open = state_open,
.release = state_release,
diff --git a/sound/soc/cirrus/Kconfig b/sound/soc/cirrus/Kconfig
index 38a83c4dcc2d..97def4e53fbc 100644
--- a/sound/soc/cirrus/Kconfig
+++ b/sound/soc/cirrus/Kconfig
@@ -31,12 +31,3 @@ config SND_EP93XX_SOC_I2S_WATCHDOG
endif # if SND_EP93XX_SOC_I2S
-config SND_EP93XX_SOC_EDB93XX
- tristate "SoC Audio support for Cirrus Logic EDB93xx boards"
- depends on SND_EP93XX_SOC && (MACH_EDB9301 || MACH_EDB9302 || MACH_EDB9302A || MACH_EDB9307A || MACH_EDB9315A)
- select SND_EP93XX_SOC_I2S
- select SND_SOC_CS4271_I2C if I2C
- select SND_SOC_CS4271_SPI if SPI_MASTER
- help
- Say Y or M here if you want to add support for I2S audio on the
- Cirrus Logic EDB93xx boards.
diff --git a/sound/soc/cirrus/Makefile b/sound/soc/cirrus/Makefile
index ad606b293715..61d8cf64e859 100644
--- a/sound/soc/cirrus/Makefile
+++ b/sound/soc/cirrus/Makefile
@@ -6,7 +6,3 @@ snd-soc-ep93xx-i2s-y := ep93xx-i2s.o
obj-$(CONFIG_SND_EP93XX_SOC) += snd-soc-ep93xx.o
obj-$(CONFIG_SND_EP93XX_SOC_I2S) += snd-soc-ep93xx-i2s.o
-# EP93XX Machine Support
-snd-soc-edb93xx-y := edb93xx.o
-
-obj-$(CONFIG_SND_EP93XX_SOC_EDB93XX) += snd-soc-edb93xx.o
diff --git a/sound/soc/cirrus/edb93xx.c b/sound/soc/cirrus/edb93xx.c
deleted file mode 100644
index 8dac754ddb0d..000000000000
--- a/sound/soc/cirrus/edb93xx.c
+++ /dev/null
@@ -1,116 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SoC audio for EDB93xx
- *
- * Copyright (c) 2010 Alexander Sverdlin <subaparts@yandex.ru>
- *
- * This driver support CS4271 codec being master or slave, working
- * in control port mode, connected either via SPI or I2C.
- * The data format accepted is I2S or left-justified.
- * DAPM support not implemented.
- */
-
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/soc/cirrus/ep93xx.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <asm/mach-types.h>
-
-static int edb93xx_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
- struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
- struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
- int err;
- unsigned int mclk_rate;
- unsigned int rate = params_rate(params);
-
- /*
- * According to CS4271 datasheet we use MCLK/LRCK=256 for
- * rates below 50kHz and 128 for higher sample rates
- */
- if (rate < 50000)
- mclk_rate = rate * 64 * 4;
- else
- mclk_rate = rate * 64 * 2;
-
- err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk_rate,
- SND_SOC_CLOCK_IN);
- if (err)
- return err;
-
- return snd_soc_dai_set_sysclk(cpu_dai, 0, mclk_rate,
- SND_SOC_CLOCK_OUT);
-}
-
-static const struct snd_soc_ops edb93xx_ops = {
- .hw_params = edb93xx_hw_params,
-};
-
-SND_SOC_DAILINK_DEFS(hifi,
- DAILINK_COMP_ARRAY(COMP_CPU("ep93xx-i2s")),
- DAILINK_COMP_ARRAY(COMP_CODEC("spi0.0", "cs4271-hifi")),
- DAILINK_COMP_ARRAY(COMP_PLATFORM("ep93xx-i2s")));
-
-static struct snd_soc_dai_link edb93xx_dai = {
- .name = "CS4271",
- .stream_name = "CS4271 HiFi",
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBC_CFC,
- .ops = &edb93xx_ops,
- SND_SOC_DAILINK_REG(hifi),
-};
-
-static struct snd_soc_card snd_soc_edb93xx = {
- .name = "EDB93XX",
- .owner = THIS_MODULE,
- .dai_link = &edb93xx_dai,
- .num_links = 1,
-};
-
-static int edb93xx_probe(struct platform_device *pdev)
-{
- struct snd_soc_card *card = &snd_soc_edb93xx;
- int ret;
-
- ret = ep93xx_i2s_acquire();
- if (ret)
- return ret;
-
- card->dev = &pdev->dev;
-
- ret = snd_soc_register_card(card);
- if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
- ret);
- ep93xx_i2s_release();
- }
-
- return ret;
-}
-
-static void edb93xx_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- snd_soc_unregister_card(card);
- ep93xx_i2s_release();
-}
-
-static struct platform_driver edb93xx_driver = {
- .driver = {
- .name = "edb93xx-audio",
- },
- .probe = edb93xx_probe,
- .remove = edb93xx_remove,
-};
-
-module_platform_driver(edb93xx_driver);
-
-MODULE_AUTHOR("Alexander Sverdlin <subaparts@yandex.ru>");
-MODULE_DESCRIPTION("ALSA SoC EDB93xx");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:edb93xx-audio");
diff --git a/sound/soc/cirrus/ep93xx-i2s.c b/sound/soc/cirrus/ep93xx-i2s.c
index d45862ceb0c9..cca01c03f048 100644
--- a/sound/soc/cirrus/ep93xx-i2s.c
+++ b/sound/soc/cirrus/ep93xx-i2s.c
@@ -24,7 +24,6 @@
#include <sound/initval.h>
#include <sound/soc.h>
-#include <linux/platform_data/dma-ep93xx.h>
#include <linux/soc/cirrus/ep93xx.h>
#include "ep93xx-pcm.h"
@@ -80,19 +79,6 @@ struct ep93xx_i2s_info {
struct snd_dmaengine_dai_dma_data dma_params_tx;
};
-static struct ep93xx_dma_data ep93xx_i2s_dma_data[] = {
- [SNDRV_PCM_STREAM_PLAYBACK] = {
- .name = "i2s-pcm-out",
- .port = EP93XX_DMA_I2S1,
- .direction = DMA_MEM_TO_DEV,
- },
- [SNDRV_PCM_STREAM_CAPTURE] = {
- .name = "i2s-pcm-in",
- .port = EP93XX_DMA_I2S1,
- .direction = DMA_DEV_TO_MEM,
- },
-};
-
static inline void ep93xx_i2s_write_reg(struct ep93xx_i2s_info *info,
unsigned reg, unsigned val)
{
@@ -198,11 +184,6 @@ static int ep93xx_i2s_dai_probe(struct snd_soc_dai *dai)
{
struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(dai);
- info->dma_params_tx.filter_data =
- &ep93xx_i2s_dma_data[SNDRV_PCM_STREAM_PLAYBACK];
- info->dma_params_rx.filter_data =
- &ep93xx_i2s_dma_data[SNDRV_PCM_STREAM_CAPTURE];
-
snd_soc_dai_init_dma_data(dai, &info->dma_params_tx,
&info->dma_params_rx);
diff --git a/sound/soc/cirrus/ep93xx-pcm.c b/sound/soc/cirrus/ep93xx-pcm.c
index fa72acd8d334..5ecb4671cbba 100644
--- a/sound/soc/cirrus/ep93xx-pcm.c
+++ b/sound/soc/cirrus/ep93xx-pcm.c
@@ -18,8 +18,6 @@
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
-#include <linux/platform_data/dma-ep93xx.h>
-
#include "ep93xx-pcm.h"
static const struct snd_pcm_hardware ep93xx_pcm_hardware = {
@@ -35,30 +33,15 @@ static const struct snd_pcm_hardware ep93xx_pcm_hardware = {
.fifo_size = 32,
};
-static bool ep93xx_pcm_dma_filter(struct dma_chan *chan, void *filter_param)
-{
- struct ep93xx_dma_data *data = filter_param;
-
- if (data->direction == ep93xx_dma_chan_direction(chan)) {
- chan->private = data;
- return true;
- }
-
- return false;
-}
-
static const struct snd_dmaengine_pcm_config ep93xx_dmaengine_pcm_config = {
.pcm_hardware = &ep93xx_pcm_hardware,
- .compat_filter_fn = ep93xx_pcm_dma_filter,
.prealloc_buffer_size = 131072,
};
int devm_ep93xx_pcm_platform_register(struct device *dev)
{
return devm_snd_dmaengine_pcm_register(dev,
- &ep93xx_dmaengine_pcm_config,
- SND_DMAENGINE_PCM_FLAG_NO_DT |
- SND_DMAENGINE_PCM_FLAG_COMPAT);
+ &ep93xx_dmaengine_pcm_config, 0);
}
EXPORT_SYMBOL_GPL(devm_ep93xx_pcm_platform_register);
diff --git a/sound/soc/intel/avs/debugfs.c b/sound/soc/intel/avs/debugfs.c
index 3fc2bbb63369..1767ded4d983 100644
--- a/sound/soc/intel/avs/debugfs.c
+++ b/sound/soc/intel/avs/debugfs.c
@@ -68,7 +68,6 @@ static ssize_t fw_regs_read(struct file *file, char __user *to, size_t count, lo
static const struct file_operations fw_regs_fops = {
.open = simple_open,
.read = fw_regs_read,
- .llseek = no_llseek,
};
static ssize_t debug_window_read(struct file *file, char __user *to, size_t count, loff_t *ppos)
@@ -93,7 +92,6 @@ static ssize_t debug_window_read(struct file *file, char __user *to, size_t coun
static const struct file_operations debug_window_fops = {
.open = simple_open,
.read = debug_window_read,
- .llseek = no_llseek,
};
static ssize_t probe_points_read(struct file *file, char __user *to, size_t count, loff_t *ppos)
@@ -170,7 +168,6 @@ static const struct file_operations probe_points_fops = {
.open = simple_open,
.read = probe_points_read,
.write = probe_points_write,
- .llseek = no_llseek,
};
static ssize_t probe_points_disconnect_write(struct file *file, const char __user *from,
diff --git a/tools/iio/Makefile b/tools/iio/Makefile
index fa720f062229..3bcce0b7d10f 100644
--- a/tools/iio/Makefile
+++ b/tools/iio/Makefile
@@ -58,7 +58,7 @@ $(OUTPUT)iio_generic_buffer: $(IIO_GENERIC_BUFFER_IN)
clean:
rm -f $(ALL_PROGRAMS)
rm -rf $(OUTPUT)include/linux/iio
- find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+ find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.cmd' -delete
install: $(ALL_PROGRAMS)
install -d -m 755 $(DESTDIR)$(bindir); \
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 0d0a7a19d6f9..9ef5ee087eda 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -498,6 +498,10 @@ int main(int argc, char **argv)
return -ENOMEM;
}
trigger_name = malloc(IIO_MAX_NAME_LENGTH);
+ if (!trigger_name) {
+ ret = -ENOMEM;
+ goto error;
+ }
ret = read_sysfs_string("name", trig_dev_name, trigger_name);
free(trig_dev_name);
if (ret < 0) {
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 4366da278033..9c05a59f0184 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -128,10 +128,6 @@
# define unlikely(x) __builtin_expect(!!(x), 0)
#endif
-#ifndef __init
-# define __init
-#endif
-
#include <linux/types.h>
/*
diff --git a/tools/testing/memblock/linux/init.h b/tools/include/linux/init.h
index 828e0ee0bc6c..51b5cde28639 100644
--- a/tools/testing/memblock/linux/init.h
+++ b/tools/include/linux/init.h
@@ -1,10 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_INIT_H
-#define _LINUX_INIT_H
+#ifndef _TOOLS_LINUX_INIT_H_
+#define _TOOLS_LINUX_INIT_H_
#include <linux/compiler.h>
-#include <asm/export.h>
-#include <linux/memory_hotplug.h>
+
+#ifndef __init
+# define __init
+#endif
+
+#ifndef __exit
+# define __exit
+#endif
#define __section(section) __attribute__((__section__(section)))
@@ -28,7 +34,10 @@ struct obs_kernel_param {
__aligned(__alignof__(struct obs_kernel_param)) = \
{ __setup_str_##unique_id, fn, early }
+#define __setup(str, fn) \
+ __setup_param(str, fn, fn, 0)
+
#define early_param(str, fn) \
__setup_param(str, fn, fn, 1)
-#endif
+#endif /* _TOOLS_LINUX_INIT_H_ */
diff --git a/tools/include/linux/linkage.h b/tools/include/linux/linkage.h
index a48ff086899c..7baaa5898ca2 100644
--- a/tools/include/linux/linkage.h
+++ b/tools/include/linux/linkage.h
@@ -1,8 +1,12 @@
#ifndef _TOOLS_INCLUDE_LINUX_LINKAGE_H
#define _TOOLS_INCLUDE_LINUX_LINKAGE_H
-#define SYM_FUNC_START(x) .globl x; x:
+#include <linux/export.h>
+#define SYM_FUNC_START(x) .globl x; x:
#define SYM_FUNC_END(x)
+#define SYM_DATA_START(x) .globl x; x:
+#define SYM_DATA_START_LOCAL(x) x:
+#define SYM_DATA_END(x)
#endif /* _TOOLS_INCLUDE_LINUX_LINKAGE_H */
diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h
index cad4f2927983..677c37e4a18c 100644
--- a/tools/include/linux/mm.h
+++ b/tools/include/linux/mm.h
@@ -25,6 +25,12 @@ static inline void *phys_to_virt(unsigned long address)
return __va(address);
}
+#define virt_to_phys virt_to_phys
+static inline phys_addr_t virt_to_phys(volatile void *address)
+{
+ return (phys_addr_t)address;
+}
+
void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid);
static inline void totalram_pages_inc(void)
diff --git a/tools/include/linux/pfn.h b/tools/include/linux/pfn.h
index 7512a58189eb..f77a30d70152 100644
--- a/tools/include/linux/pfn.h
+++ b/tools/include/linux/pfn.h
@@ -7,4 +7,5 @@
#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
#define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT)
+#define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT))
#endif
diff --git a/tools/include/linux/string.h b/tools/include/linux/string.h
index 0acb1fc14e19..8499f509f03e 100644
--- a/tools/include/linux/string.h
+++ b/tools/include/linux/string.h
@@ -12,6 +12,8 @@ void argv_free(char **argv);
int strtobool(const char *s, bool *res);
+#define strscpy strcpy
+
/*
* glibc based builds needs the extern while uClibc doesn't.
* However uClibc headers also define __GLIBC__ hence the hack below
@@ -49,4 +51,5 @@ extern char *strim(char *);
extern void remove_spaces(char *s);
extern void *memchr_inv(const void *start, int c, size_t bytes);
+extern unsigned long long memparse(const char *ptr, char **retptr);
#endif /* _TOOLS_LINUX_STRING_H_ */
diff --git a/tools/lib/cmdline.c b/tools/lib/cmdline.c
new file mode 100644
index 000000000000..c85f00f43c5e
--- /dev/null
+++ b/tools/lib/cmdline.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * From lib/cmdline.c
+ */
+#include <stdlib.h>
+
+#if __has_attribute(__fallthrough__)
+# define fallthrough __attribute__((__fallthrough__))
+#else
+# define fallthrough do {} while (0) /* fallthrough */
+#endif
+
+unsigned long long memparse(const char *ptr, char **retptr)
+{
+ char *endptr; /* local pointer to end of parsed string */
+
+ unsigned long long ret = strtoll(ptr, &endptr, 0);
+
+ switch (*endptr) {
+ case 'E':
+ case 'e':
+ ret <<= 10;
+ fallthrough;
+ case 'P':
+ case 'p':
+ ret <<= 10;
+ fallthrough;
+ case 'T':
+ case 't':
+ ret <<= 10;
+ fallthrough;
+ case 'G':
+ case 'g':
+ ret <<= 10;
+ fallthrough;
+ case 'M':
+ case 'm':
+ ret <<= 10;
+ fallthrough;
+ case 'K':
+ case 'k':
+ ret <<= 10;
+ endptr++;
+ fallthrough;
+ default:
+ break;
+ }
+
+ if (retptr)
+ *retptr = endptr;
+
+ return ret;
+}
diff --git a/tools/objtool/arch/loongarch/decode.c b/tools/objtool/arch/loongarch/decode.c
index aee479d2191c..69b66994f2a1 100644
--- a/tools/objtool/arch/loongarch/decode.c
+++ b/tools/objtool/arch/loongarch/decode.c
@@ -122,7 +122,7 @@ static bool decode_insn_reg2i12_fomat(union loongarch_instruction inst,
switch (inst.reg2i12_format.opcode) {
case addid_op:
if ((inst.reg2i12_format.rd == CFI_SP) || (inst.reg2i12_format.rj == CFI_SP)) {
- /* addi.d sp,sp,si12 or addi.d fp,sp,si12 */
+ /* addi.d sp,sp,si12 or addi.d fp,sp,si12 or addi.d sp,fp,si12 */
insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11);
ADD_OP(op) {
op->src.type = OP_SRC_ADD;
@@ -132,6 +132,15 @@ static bool decode_insn_reg2i12_fomat(union loongarch_instruction inst,
op->dest.reg = inst.reg2i12_format.rd;
}
}
+ if ((inst.reg2i12_format.rd == CFI_SP) && (inst.reg2i12_format.rj == CFI_FP)) {
+ /* addi.d sp,fp,si12 */
+ struct symbol *func = find_func_containing(insn->sec, insn->offset);
+
+ if (!func)
+ return false;
+
+ func->frame_pointer = true;
+ }
break;
case ldd_op:
if (inst.reg2i12_format.rj == CFI_SP) {
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 01237d167223..6604f5d038aa 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -178,6 +178,52 @@ static bool is_sibling_call(struct instruction *insn)
}
/*
+ * Checks if a string ends with another.
+ */
+static bool str_ends_with(const char *s, const char *sub)
+{
+ const int slen = strlen(s);
+ const int sublen = strlen(sub);
+
+ if (sublen > slen)
+ return 0;
+
+ return !memcmp(s + slen - sublen, sub, sublen);
+}
+
+/*
+ * Checks if a function is a Rust "noreturn" one.
+ */
+static bool is_rust_noreturn(const struct symbol *func)
+{
+ /*
+ * If it does not start with "_R", then it is not a Rust symbol.
+ */
+ if (strncmp(func->name, "_R", 2))
+ return false;
+
+ /*
+ * These are just heuristics -- we do not control the precise symbol
+ * name, due to the crate disambiguators (which depend on the compiler)
+ * as well as changes to the source code itself between versions (since
+ * these come from the Rust standard library).
+ */
+ return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") ||
+ str_ends_with(func->name, "_4core6option13unwrap_failed") ||
+ str_ends_with(func->name, "_4core6result13unwrap_failed") ||
+ str_ends_with(func->name, "_4core9panicking5panic") ||
+ str_ends_with(func->name, "_4core9panicking9panic_fmt") ||
+ str_ends_with(func->name, "_4core9panicking14panic_explicit") ||
+ str_ends_with(func->name, "_4core9panicking14panic_nounwind") ||
+ str_ends_with(func->name, "_4core9panicking18panic_bounds_check") ||
+ str_ends_with(func->name, "_4core9panicking19assert_failed_inner") ||
+ str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") ||
+ strstr(func->name, "_4core9panicking11panic_const24panic_const_") ||
+ (strstr(func->name, "_4core5slice5index24slice_") &&
+ str_ends_with(func->name, "_fail"));
+}
+
+/*
* This checks to see if the given function is a "noreturn" function.
*
* For global functions which are outside the scope of this object file, we
@@ -202,10 +248,14 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
if (!func)
return false;
- if (func->bind == STB_GLOBAL || func->bind == STB_WEAK)
+ if (func->bind == STB_GLOBAL || func->bind == STB_WEAK) {
+ if (is_rust_noreturn(func))
+ return true;
+
for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
if (!strcmp(func->name, global_noreturns[i]))
return true;
+ }
if (func->bind == STB_WEAK)
return false;
@@ -2993,10 +3043,27 @@ static int update_cfi_state(struct instruction *insn,
break;
}
- if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
+ if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP &&
+ insn->sym->frame_pointer) {
+ /* addi.d fp,sp,imm on LoongArch */
+ if (cfa->base == CFI_SP && cfa->offset == op->src.offset) {
+ cfa->base = CFI_BP;
+ cfa->offset = 0;
+ }
+ break;
+ }
- /* lea disp(%rbp), %rsp */
- cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
+ if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
+ /* addi.d sp,fp,imm on LoongArch */
+ if (cfa->base == CFI_BP && cfa->offset == 0) {
+ if (insn->sym->frame_pointer) {
+ cfa->base = CFI_SP;
+ cfa->offset = -op->src.offset;
+ }
+ } else {
+ /* lea disp(%rbp), %rsp */
+ cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
+ }
break;
}
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
index 2b8a69de4db8..d7e815c2fd15 100644
--- a/tools/objtool/include/objtool/elf.h
+++ b/tools/objtool/include/objtool/elf.h
@@ -68,6 +68,7 @@ struct symbol {
u8 warned : 1;
u8 embedded_insn : 1;
u8 local_label : 1;
+ u8 frame_pointer : 1;
struct list_head pv_target;
struct reloc *relocs;
};
diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h
index 1e8141ef1b15..e7da92489167 100644
--- a/tools/objtool/noreturns.h
+++ b/tools/objtool/noreturns.h
@@ -39,6 +39,8 @@ NORETURN(panic)
NORETURN(panic_smp_self_stop)
NORETURN(rest_init)
NORETURN(rewind_stack_and_make_dead)
+NORETURN(rust_begin_unwind)
+NORETURN(rust_helper_BUG)
NORETURN(sev_es_terminate)
NORETURN(snp_abort)
NORETURN(start_kernel)
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index 3d1ca9e38b1f..b1256fee3567 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -14,7 +14,7 @@ ldflags-y += --wrap=cxl_dvsec_rr_decode
ldflags-y += --wrap=devm_cxl_add_rch_dport
ldflags-y += --wrap=cxl_rcd_component_reg_phys
ldflags-y += --wrap=cxl_endpoint_parse_cdat
-ldflags-y += --wrap=cxl_setup_parent_dport
+ldflags-y += --wrap=cxl_dport_init_ras_reporting
DRIVERS := ../../../drivers
CXL_SRC := $(DRIVERS)/cxl
diff --git a/tools/testing/cxl/mock_acpi.c b/tools/testing/cxl/mock_acpi.c
index 55813de26d46..8da94378ccec 100644
--- a/tools/testing/cxl/mock_acpi.c
+++ b/tools/testing/cxl/mock_acpi.c
@@ -18,7 +18,7 @@ struct acpi_device *to_cxl_host_bridge(struct device *host, struct device *dev)
goto out;
}
- if (dev->bus == &platform_bus_type)
+ if (dev_is_platform(dev))
goto out;
adev = to_acpi_device(dev);
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index 129f179b0ac5..ccdd6a504222 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/sizes.h>
#include <linux/bits.h>
+#include <cxl/mailbox.h>
#include <asm/unaligned.h>
#include <crypto/sha2.h>
#include <cxlmem.h>
@@ -534,6 +535,7 @@ static int mock_gsl(struct cxl_mbox_cmd *cmd)
static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_get_log *gl = cmd->payload_in;
u32 offset = le32_to_cpu(gl->offset);
u32 length = le32_to_cpu(gl->length);
@@ -542,7 +544,7 @@ static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
if (cmd->size_in < sizeof(*gl))
return -EINVAL;
- if (length > mds->payload_size)
+ if (length > cxl_mbox->payload_size)
return -EINVAL;
if (offset + length > sizeof(mock_cel))
return -EINVAL;
@@ -617,12 +619,13 @@ void cxl_mockmem_sanitize_work(struct work_struct *work)
{
struct cxl_memdev_state *mds =
container_of(work, typeof(*mds), security.poll_dwork.work);
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
- mutex_lock(&mds->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
if (mds->security.sanitize_node)
sysfs_notify_dirent(mds->security.sanitize_node);
mds->security.sanitize_active = false;
- mutex_unlock(&mds->mbox_mutex);
+ mutex_unlock(&cxl_mbox->mbox_mutex);
dev_dbg(mds->cxlds.dev, "sanitize complete\n");
}
@@ -631,6 +634,7 @@ static int mock_sanitize(struct cxl_mockmem_data *mdata,
struct cxl_mbox_cmd *cmd)
{
struct cxl_memdev_state *mds = mdata->mds;
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
int rc = 0;
if (cmd->size_in != 0)
@@ -648,14 +652,14 @@ static int mock_sanitize(struct cxl_mockmem_data *mdata,
return -ENXIO;
}
- mutex_lock(&mds->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
if (schedule_delayed_work(&mds->security.poll_dwork,
msecs_to_jiffies(mdata->sanitize_timeout))) {
mds->security.sanitize_active = true;
dev_dbg(mds->cxlds.dev, "sanitize issued\n");
} else
rc = -EBUSY;
- mutex_unlock(&mds->mbox_mutex);
+ mutex_unlock(&cxl_mbox->mbox_mutex);
return rc;
}
@@ -1333,12 +1337,13 @@ static int mock_activate_fw(struct cxl_mockmem_data *mdata,
return -EINVAL;
}
-static int cxl_mock_mbox_send(struct cxl_memdev_state *mds,
+static int cxl_mock_mbox_send(struct cxl_mailbox *cxl_mbox,
struct cxl_mbox_cmd *cmd)
{
- struct cxl_dev_state *cxlds = &mds->cxlds;
- struct device *dev = cxlds->dev;
+ struct device *dev = cxl_mbox->host;
struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
+ struct cxl_memdev_state *mds = mdata->mds;
+ struct cxl_dev_state *cxlds = &mds->cxlds;
int rc = -EIO;
switch (cmd->opcode) {
@@ -1453,6 +1458,17 @@ static ssize_t event_trigger_store(struct device *dev,
}
static DEVICE_ATTR_WO(event_trigger);
+static int cxl_mock_mailbox_create(struct cxl_dev_state *cxlds)
+{
+ int rc;
+
+ rc = cxl_mailbox_init(&cxlds->cxl_mbox, cxlds->dev);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
static int cxl_mock_mem_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1460,6 +1476,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
struct cxl_memdev_state *mds;
struct cxl_dev_state *cxlds;
struct cxl_mockmem_data *mdata;
+ struct cxl_mailbox *cxl_mbox;
int rc;
mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
@@ -1487,13 +1504,18 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
if (IS_ERR(mds))
return PTR_ERR(mds);
+ cxlds = &mds->cxlds;
+ rc = cxl_mock_mailbox_create(cxlds);
+ if (rc)
+ return rc;
+
+ cxl_mbox = &mds->cxlds.cxl_mbox;
mdata->mds = mds;
- mds->mbox_send = cxl_mock_mbox_send;
- mds->payload_size = SZ_4K;
+ cxl_mbox->mbox_send = cxl_mock_mbox_send;
+ cxl_mbox->payload_size = SZ_4K;
mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
- cxlds = &mds->cxlds;
cxlds->serial = pdev->id;
if (is_rcd(pdev))
cxlds->rcd = true;
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
index d619672faa49..f4ce96cc11d4 100644
--- a/tools/testing/cxl/test/mock.c
+++ b/tools/testing/cxl/test/mock.c
@@ -228,7 +228,7 @@ int __wrap_cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_hdm_decode_init, CXL);
-int __wrap_cxl_dvsec_rr_decode(struct device *dev, int dvsec,
+int __wrap_cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
struct cxl_endpoint_dvsec_info *info)
{
int rc = 0, index;
@@ -237,7 +237,7 @@ int __wrap_cxl_dvsec_rr_decode(struct device *dev, int dvsec,
if (ops && ops->is_mock_dev(dev))
rc = 0;
else
- rc = cxl_dvsec_rr_decode(dev, dvsec, info);
+ rc = cxl_dvsec_rr_decode(dev, port, info);
put_cxl_mock_ops(index);
return rc;
@@ -299,17 +299,17 @@ void __wrap_cxl_endpoint_parse_cdat(struct cxl_port *port)
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_endpoint_parse_cdat, CXL);
-void __wrap_cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport)
+void __wrap_cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host)
{
int index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (!ops || !ops->is_mock_port(dport->dport_dev))
- cxl_setup_parent_dport(host, dport);
+ cxl_dport_init_ras_reporting(dport, host);
put_cxl_mock_ops(index);
}
-EXPORT_SYMBOL_NS_GPL(__wrap_cxl_setup_parent_dport, CXL);
+EXPORT_SYMBOL_NS_GPL(__wrap_cxl_dport_init_ras_reporting, CXL);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(ACPI);
diff --git a/tools/testing/memblock/Makefile b/tools/testing/memblock/Makefile
index 7a1ca694a982..d80982ccdc20 100644
--- a/tools/testing/memblock/Makefile
+++ b/tools/testing/memblock/Makefile
@@ -8,7 +8,7 @@ LDFLAGS += -fsanitize=address -fsanitize=undefined
TARGETS = main
TEST_OFILES = tests/alloc_nid_api.o tests/alloc_helpers_api.o tests/alloc_api.o \
tests/basic_api.o tests/common.o tests/alloc_exact_nid_api.o
-DEP_OFILES = memblock.o lib/slab.o mmzone.o slab.o
+DEP_OFILES = memblock.o lib/slab.o mmzone.o slab.o cmdline.o
OFILES = main.o $(DEP_OFILES) $(TEST_OFILES)
EXTR_SRC = ../../../mm/memblock.c
diff --git a/tools/testing/memblock/linux/kernel.h b/tools/testing/memblock/linux/kernel.h
index d2f148bd8902..4d1012d5be6e 100644
--- a/tools/testing/memblock/linux/kernel.h
+++ b/tools/testing/memblock/linux/kernel.h
@@ -8,5 +8,7 @@
#include <linux/printk.h>
#include <linux/linkage.h>
#include <linux/kconfig.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
#endif
diff --git a/tools/testing/memblock/linux/mmzone.h b/tools/testing/memblock/linux/mmzone.h
index 71546e15bdd3..bb682659a12d 100644
--- a/tools/testing/memblock/linux/mmzone.h
+++ b/tools/testing/memblock/linux/mmzone.h
@@ -3,6 +3,7 @@
#define _TOOLS_MMZONE_H
#include <linux/atomic.h>
+#include <linux/memory_hotplug.h>
struct pglist_data *first_online_pgdat(void);
struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
index c5b00aca9def..1873ddbe16cc 100644
--- a/tools/testing/radix-tree/maple.c
+++ b/tools/testing/radix-tree/maple.c
@@ -14,7 +14,7 @@
#include "test.h"
#include <stdlib.h>
#include <time.h>
-#include "linux/init.h"
+#include <linux/init.h>
#define module_init(x)
#define module_exit(x)
diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
index 5f362c0fd890..319567f0fae1 100644
--- a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
+++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
@@ -65,6 +65,7 @@ static int __do_binderfs_test(struct __test_metadata *_metadata)
static const char * const binder_features[] = {
"oneway_spam_detection",
"extended_error",
+ "freeze_notification",
};
change_mountns(_metadata);
diff --git a/tools/testing/selftests/ftrace/config b/tools/testing/selftests/ftrace/config
index 048a312abf40..544de0db5f58 100644
--- a/tools/testing/selftests/ftrace/config
+++ b/tools/testing/selftests/ftrace/config
@@ -20,6 +20,7 @@ CONFIG_PREEMPT_TRACER=y
CONFIG_PROBE_EVENTS_BTF_ARGS=y
CONFIG_SAMPLES=y
CONFIG_SAMPLE_FTRACE_DIRECT=m
+CONFIG_SAMPLE_TRACE_EVENTS=m
CONFIG_SAMPLE_TRACE_PRINTK=m
CONFIG_SCHED_TRACER=y
CONFIG_STACK_TRACER=y
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_tprobe_module.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_tprobe_module.tc
new file mode 100644
index 000000000000..d319d5ed4226
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_tprobe_module.tc
@@ -0,0 +1,61 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Generic dynamic event - add/remove tracepoint probe events on module
+# requires: dynamic_events "t[:[<group>/][<event>]] <tracepoint> [<args>]":README
+
+rmmod trace-events-sample ||:
+if ! modprobe trace-events-sample ; then
+ echo "No trace-events sample module - please make CONFIG_SAMPLE_TRACE_EVENTS=m"
+ exit_unresolved;
+fi
+trap "rmmod trace-events-sample" EXIT
+
+echo 0 > events/enable
+echo > dynamic_events
+
+TRACEPOINT1=foo_bar
+TRACEPOINT2=foo_bar_with_cond
+
+echo "t:myevent1 $TRACEPOINT1" >> dynamic_events
+echo "t:myevent2 $TRACEPOINT2" >> dynamic_events
+
+grep -q myevent1 dynamic_events
+grep -q myevent2 dynamic_events
+test -d events/tracepoints/myevent1
+test -d events/tracepoints/myevent2
+
+echo "-:myevent2" >> dynamic_events
+
+grep -q myevent1 dynamic_events
+! grep -q myevent2 dynamic_events
+
+echo > dynamic_events
+
+clear_trace
+
+:;: "Try to put a probe on a tracepoint in non-loaded module" ;:
+rmmod trace-events-sample
+
+echo "t:myevent1 $TRACEPOINT1" >> dynamic_events
+echo "t:myevent2 $TRACEPOINT2" >> dynamic_events
+
+grep -q myevent1 dynamic_events
+grep -q myevent2 dynamic_events
+test -d events/tracepoints/myevent1
+test -d events/tracepoints/myevent2
+
+echo 1 > events/tracepoints/enable
+
+modprobe trace-events-sample
+
+sleep 2
+
+grep -q "myevent1" trace
+grep -q "myevent2" trace
+
+rmmod trace-events-sample
+trap "" EXIT
+
+echo 0 > events/tracepoints/enable
+echo > dynamic_events
+clear_trace
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/tprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/dynevent/tprobe_syntax_errors.tc
index da117b8f1d12..ffe8ffef4027 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/tprobe_syntax_errors.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/tprobe_syntax_errors.tc
@@ -9,7 +9,6 @@ check_error() { # command-with-error-pos-by-^
check_error 't^100 kfree' # BAD_MAXACT_TYPE
-check_error 't ^non_exist_tracepoint' # NO_TRACEPOINT
check_error 't:^/bar kfree' # NO_GROUP_NAME
check_error 't:^12345678901234567890123456789012345678901234567890123456789012345/bar kfree' # GROUP_TOO_LONG
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 6d9381d60172..7f57abf936e7 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -5,3 +5,7 @@
!*.h
!*.S
!*.sh
+!.gitignore
+!config
+!settings
+!Makefile
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 0c4b254ab56b..960cf6a77198 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -130,6 +130,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/max_vcpuid_cap_test
TEST_GEN_PROGS_x86_64 += x86_64/triple_fault_event_test
TEST_GEN_PROGS_x86_64 += x86_64/recalc_apic_map_test
TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
+TEST_GEN_PROGS_x86_64 += coalesced_io_test
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
@@ -167,6 +168,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/vpmu_counter_access
TEST_GEN_PROGS_aarch64 += aarch64/no-vgic-v3
TEST_GEN_PROGS_aarch64 += access_tracking_perf_test
TEST_GEN_PROGS_aarch64 += arch_timer
+TEST_GEN_PROGS_aarch64 += coalesced_io_test
TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
@@ -188,6 +190,7 @@ TEST_GEN_PROGS_s390x += s390x/tprot
TEST_GEN_PROGS_s390x += s390x/cmma_test
TEST_GEN_PROGS_s390x += s390x/debug_test
TEST_GEN_PROGS_s390x += s390x/shared_zeropage_test
+TEST_GEN_PROGS_s390x += s390x/ucontrol_test
TEST_GEN_PROGS_s390x += demand_paging_test
TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += guest_print_test
@@ -200,6 +203,7 @@ TEST_GEN_PROGS_s390x += kvm_binary_stats_test
TEST_GEN_PROGS_riscv += riscv/sbi_pmu_test
TEST_GEN_PROGS_riscv += riscv/ebreak_test
TEST_GEN_PROGS_riscv += arch_timer
+TEST_GEN_PROGS_riscv += coalesced_io_test
TEST_GEN_PROGS_riscv += demand_paging_test
TEST_GEN_PROGS_riscv += dirty_log_test
TEST_GEN_PROGS_riscv += get-reg-list
diff --git a/tools/testing/selftests/kvm/coalesced_io_test.c b/tools/testing/selftests/kvm/coalesced_io_test.c
new file mode 100644
index 000000000000..60cb25454899
--- /dev/null
+++ b/tools/testing/selftests/kvm/coalesced_io_test.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include <linux/sizes.h>
+
+#include <kvm_util.h>
+#include <processor.h>
+
+#include "ucall_common.h"
+
+struct kvm_coalesced_io {
+ struct kvm_coalesced_mmio_ring *ring;
+ uint32_t ring_size;
+ uint64_t mmio_gpa;
+ uint64_t *mmio;
+
+ /*
+ * x86-only, but define pio_port for all architectures to minimize the
+ * amount of #ifdeffery and complexity, without having to sacrifice
+ * verbose error messages.
+ */
+ uint8_t pio_port;
+};
+
+static struct kvm_coalesced_io kvm_builtin_io_ring;
+
+#ifdef __x86_64__
+static const int has_pio = 1;
+#else
+static const int has_pio = 0;
+#endif
+
+static void guest_code(struct kvm_coalesced_io *io)
+{
+ int i, j;
+
+ for (;;) {
+ for (j = 0; j < 1 + has_pio; j++) {
+ /*
+ * KVM always leaves one free entry, i.e. exits to
+ * userspace before the last entry is filled.
+ */
+ for (i = 0; i < io->ring_size - 1; i++) {
+#ifdef __x86_64__
+ if (i & 1)
+ outl(io->pio_port, io->pio_port + i);
+ else
+#endif
+ WRITE_ONCE(*io->mmio, io->mmio_gpa + i);
+ }
+#ifdef __x86_64__
+ if (j & 1)
+ outl(io->pio_port, io->pio_port + i);
+ else
+#endif
+ WRITE_ONCE(*io->mmio, io->mmio_gpa + i);
+ }
+ GUEST_SYNC(0);
+
+ WRITE_ONCE(*io->mmio, io->mmio_gpa + i);
+#ifdef __x86_64__
+ outl(io->pio_port, io->pio_port + i);
+#endif
+ }
+}
+
+static void vcpu_run_and_verify_io_exit(struct kvm_vcpu *vcpu,
+ struct kvm_coalesced_io *io,
+ uint32_t ring_start,
+ uint32_t expected_exit)
+{
+ const bool want_pio = expected_exit == KVM_EXIT_IO;
+ struct kvm_coalesced_mmio_ring *ring = io->ring;
+ struct kvm_run *run = vcpu->run;
+ uint32_t pio_value;
+
+ WRITE_ONCE(ring->first, ring_start);
+ WRITE_ONCE(ring->last, ring_start);
+
+ vcpu_run(vcpu);
+
+ /*
+ * Annoyingly, reading PIO data is safe only for PIO exits, otherwise
+ * data_offset is garbage, e.g. an MMIO gpa.
+ */
+ if (run->exit_reason == KVM_EXIT_IO)
+ pio_value = *(uint32_t *)((void *)run + run->io.data_offset);
+ else
+ pio_value = 0;
+
+ TEST_ASSERT((!want_pio && (run->exit_reason == KVM_EXIT_MMIO && run->mmio.is_write &&
+ run->mmio.phys_addr == io->mmio_gpa && run->mmio.len == 8 &&
+ *(uint64_t *)run->mmio.data == io->mmio_gpa + io->ring_size - 1)) ||
+ (want_pio && (run->exit_reason == KVM_EXIT_IO && run->io.port == io->pio_port &&
+ run->io.direction == KVM_EXIT_IO_OUT && run->io.count == 1 &&
+ pio_value == io->pio_port + io->ring_size - 1)),
+ "For start = %u, expected exit on %u-byte %s write 0x%llx = %lx, got exit_reason = %u (%s)\n "
+ "(MMIO addr = 0x%llx, write = %u, len = %u, data = %lx)\n "
+ "(PIO port = 0x%x, write = %u, len = %u, count = %u, data = %x",
+ ring_start, want_pio ? 4 : 8, want_pio ? "PIO" : "MMIO",
+ want_pio ? (unsigned long long)io->pio_port : io->mmio_gpa,
+ (want_pio ? io->pio_port : io->mmio_gpa) + io->ring_size - 1, run->exit_reason,
+ run->exit_reason == KVM_EXIT_MMIO ? "MMIO" : run->exit_reason == KVM_EXIT_IO ? "PIO" : "other",
+ run->mmio.phys_addr, run->mmio.is_write, run->mmio.len, *(uint64_t *)run->mmio.data,
+ run->io.port, run->io.direction, run->io.size, run->io.count, pio_value);
+}
+
+static void vcpu_run_and_verify_coalesced_io(struct kvm_vcpu *vcpu,
+ struct kvm_coalesced_io *io,
+ uint32_t ring_start,
+ uint32_t expected_exit)
+{
+ struct kvm_coalesced_mmio_ring *ring = io->ring;
+ int i;
+
+ vcpu_run_and_verify_io_exit(vcpu, io, ring_start, expected_exit);
+
+ TEST_ASSERT((ring->last + 1) % io->ring_size == ring->first,
+ "Expected ring to be full (minus 1), first = %u, last = %u, max = %u, start = %u",
+ ring->first, ring->last, io->ring_size, ring_start);
+
+ for (i = 0; i < io->ring_size - 1; i++) {
+ uint32_t idx = (ring->first + i) % io->ring_size;
+ struct kvm_coalesced_mmio *entry = &ring->coalesced_mmio[idx];
+
+#ifdef __x86_64__
+ if (i & 1)
+ TEST_ASSERT(entry->phys_addr == io->pio_port &&
+ entry->len == 4 && entry->pio &&
+ *(uint32_t *)entry->data == io->pio_port + i,
+ "Wanted 4-byte port I/O 0x%x = 0x%x in entry %u, got %u-byte %s 0x%llx = 0x%x",
+ io->pio_port, io->pio_port + i, i,
+ entry->len, entry->pio ? "PIO" : "MMIO",
+ entry->phys_addr, *(uint32_t *)entry->data);
+ else
+#endif
+ TEST_ASSERT(entry->phys_addr == io->mmio_gpa &&
+ entry->len == 8 && !entry->pio,
+ "Wanted 8-byte MMIO to 0x%lx = %lx in entry %u, got %u-byte %s 0x%llx = 0x%lx",
+ io->mmio_gpa, io->mmio_gpa + i, i,
+ entry->len, entry->pio ? "PIO" : "MMIO",
+ entry->phys_addr, *(uint64_t *)entry->data);
+ }
+}
+
+static void test_coalesced_io(struct kvm_vcpu *vcpu,
+ struct kvm_coalesced_io *io, uint32_t ring_start)
+{
+ struct kvm_coalesced_mmio_ring *ring = io->ring;
+
+ kvm_vm_register_coalesced_io(vcpu->vm, io->mmio_gpa, 8, false /* pio */);
+#ifdef __x86_64__
+ kvm_vm_register_coalesced_io(vcpu->vm, io->pio_port, 8, true /* pio */);
+#endif
+
+ vcpu_run_and_verify_coalesced_io(vcpu, io, ring_start, KVM_EXIT_MMIO);
+#ifdef __x86_64__
+ vcpu_run_and_verify_coalesced_io(vcpu, io, ring_start, KVM_EXIT_IO);
+#endif
+
+ /*
+ * Verify ucall, which may use non-coalesced MMIO or PIO, generates an
+ * immediate exit.
+ */
+ WRITE_ONCE(ring->first, ring_start);
+ WRITE_ONCE(ring->last, ring_start);
+ vcpu_run(vcpu);
+ TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
+ TEST_ASSERT_EQ(ring->first, ring_start);
+ TEST_ASSERT_EQ(ring->last, ring_start);
+
+ /* Verify that non-coalesced MMIO/PIO generates an exit to userspace. */
+ kvm_vm_unregister_coalesced_io(vcpu->vm, io->mmio_gpa, 8, false /* pio */);
+ vcpu_run_and_verify_io_exit(vcpu, io, ring_start, KVM_EXIT_MMIO);
+
+#ifdef __x86_64__
+ kvm_vm_unregister_coalesced_io(vcpu->vm, io->pio_port, 8, true /* pio */);
+ vcpu_run_and_verify_io_exit(vcpu, io, ring_start, KVM_EXIT_IO);
+#endif
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ int i;
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_COALESCED_MMIO));
+
+#ifdef __x86_64__
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_COALESCED_PIO));
+#endif
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+
+ kvm_builtin_io_ring = (struct kvm_coalesced_io) {
+ /*
+ * The I/O ring is a kernel-allocated page whose address is
+ * relative to each vCPU's run page, with the page offset
+ * provided by KVM in the return of KVM_CAP_COALESCED_MMIO.
+ */
+ .ring = (void *)vcpu->run +
+ (kvm_check_cap(KVM_CAP_COALESCED_MMIO) * getpagesize()),
+
+ /*
+ * The size of the I/O ring is fixed, but KVM defines the sized
+ * based on the kernel's PAGE_SIZE. Thus, userspace must query
+ * the host's page size at runtime to compute the ring size.
+ */
+ .ring_size = (getpagesize() - sizeof(struct kvm_coalesced_mmio_ring)) /
+ sizeof(struct kvm_coalesced_mmio),
+
+ /*
+ * Arbitrary address+port (MMIO mustn't overlap memslots), with
+ * the MMIO GPA identity mapped in the guest.
+ */
+ .mmio_gpa = 4ull * SZ_1G,
+ .mmio = (uint64_t *)(4ull * SZ_1G),
+ .pio_port = 0x80,
+ };
+
+ virt_map(vm, (uint64_t)kvm_builtin_io_ring.mmio, kvm_builtin_io_ring.mmio_gpa, 1);
+
+ sync_global_to_guest(vm, kvm_builtin_io_ring);
+ vcpu_args_set(vcpu, 1, &kvm_builtin_io_ring);
+
+ for (i = 0; i < kvm_builtin_io_ring.ring_size; i++)
+ test_coalesced_io(vcpu, &kvm_builtin_io_ring, i);
+
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/guest_print_test.c b/tools/testing/selftests/kvm/guest_print_test.c
index 8092c2d0f5d6..bcf582852db9 100644
--- a/tools/testing/selftests/kvm/guest_print_test.c
+++ b/tools/testing/selftests/kvm/guest_print_test.c
@@ -107,6 +107,21 @@ static void ucall_abort(const char *assert_msg, const char *expected_assert_msg)
expected_assert_msg, &assert_msg[offset]);
}
+/*
+ * Open code vcpu_run(), sans the UCALL_ABORT handling, so that intentional
+ * guest asserts guest can be verified instead of being reported as failures.
+ */
+static void do_vcpu_run(struct kvm_vcpu *vcpu)
+{
+ int r;
+
+ do {
+ r = __vcpu_run(vcpu);
+ } while (r == -1 && errno == EINTR);
+
+ TEST_ASSERT(!r, KVM_IOCTL_ERROR(KVM_RUN, r));
+}
+
static void run_test(struct kvm_vcpu *vcpu, const char *expected_printf,
const char *expected_assert)
{
@@ -114,7 +129,7 @@ static void run_test(struct kvm_vcpu *vcpu, const char *expected_printf,
struct ucall uc;
while (1) {
- vcpu_run(vcpu);
+ do_vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
"Unexpected exit reason: %u (%s),",
@@ -159,7 +174,7 @@ static void test_limits(void)
vm = vm_create_with_one_vcpu(&vcpu, guest_code_limits);
run = vcpu->run;
- vcpu_run(vcpu);
+ do_vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
"Unexpected exit reason: %u (%s),",
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 63c2aaae51f3..bc7c242480d6 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -428,8 +428,6 @@ const char *vm_guest_mode_string(uint32_t i);
void kvm_vm_free(struct kvm_vm *vmp);
void kvm_vm_restart(struct kvm_vm *vmp);
void kvm_vm_release(struct kvm_vm *vmp);
-int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
- size_t len);
void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
int kvm_memfd_alloc(size_t size, bool hugepages);
@@ -460,6 +458,32 @@ static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
}
+static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm,
+ uint64_t address,
+ uint64_t size, bool pio)
+{
+ struct kvm_coalesced_mmio_zone zone = {
+ .addr = address,
+ .size = size,
+ .pio = pio,
+ };
+
+ vm_ioctl(vm, KVM_REGISTER_COALESCED_MMIO, &zone);
+}
+
+static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm,
+ uint64_t address,
+ uint64_t size, bool pio)
+{
+ struct kvm_coalesced_mmio_zone zone = {
+ .addr = address,
+ .size = size,
+ .pio = pio,
+ };
+
+ vm_ioctl(vm, KVM_UNREGISTER_COALESCED_MMIO, &zone);
+}
+
static inline int vm_get_stats_fd(struct kvm_vm *vm)
{
int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
diff --git a/tools/testing/selftests/kvm/include/s390x/debug_print.h b/tools/testing/selftests/kvm/include/s390x/debug_print.h
new file mode 100644
index 000000000000..1bf275631cc6
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/s390x/debug_print.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Definition for kernel virtual machines on s390x
+ *
+ * Copyright IBM Corp. 2024
+ *
+ * Authors:
+ * Christoph Schlameuss <schlameuss@linux.ibm.com>
+ */
+
+#ifndef SELFTEST_KVM_DEBUG_PRINT_H
+#define SELFTEST_KVM_DEBUG_PRINT_H
+
+#include "asm/ptrace.h"
+#include "kvm_util.h"
+#include "sie.h"
+
+static inline void print_hex_bytes(const char *name, u64 addr, size_t len)
+{
+ u64 pos;
+
+ pr_debug("%s (%p)\n", name, (void *)addr);
+ pr_debug(" 0/0x00---------|");
+ if (len > 8)
+ pr_debug(" 8/0x08---------|");
+ if (len > 16)
+ pr_debug(" 16/0x10--------|");
+ if (len > 24)
+ pr_debug(" 24/0x18--------|");
+ for (pos = 0; pos < len; pos += 8) {
+ if ((pos % 32) == 0)
+ pr_debug("\n %3lu 0x%.3lx ", pos, pos);
+ pr_debug(" %16lx", *((u64 *)(addr + pos)));
+ }
+ pr_debug("\n");
+}
+
+static inline void print_hex(const char *name, u64 addr)
+{
+ print_hex_bytes(name, addr, 512);
+}
+
+static inline void print_psw(struct kvm_run *run, struct kvm_s390_sie_block *sie_block)
+{
+ pr_debug("flags:0x%x psw:0x%.16llx:0x%.16llx exit:%u %s\n",
+ run->flags,
+ run->psw_mask, run->psw_addr,
+ run->exit_reason, exit_reason_str(run->exit_reason));
+ pr_debug("sie_block psw:0x%.16llx:0x%.16llx\n",
+ sie_block->psw_mask, sie_block->psw_addr);
+}
+
+static inline void print_run(struct kvm_run *run, struct kvm_s390_sie_block *sie_block)
+{
+ print_hex_bytes("run", (u64)run, 0x150);
+ print_hex("sie_block", (u64)sie_block);
+ print_psw(run, sie_block);
+}
+
+static inline void print_regs(struct kvm_run *run)
+{
+ struct kvm_sync_regs *sync_regs = &run->s.regs;
+
+ print_hex_bytes("GPRS", (u64)sync_regs->gprs, 8 * NUM_GPRS);
+ print_hex_bytes("ACRS", (u64)sync_regs->acrs, 4 * NUM_ACRS);
+ print_hex_bytes("CRS", (u64)sync_regs->crs, 8 * NUM_CRS);
+}
+
+#endif /* SELFTEST_KVM_DEBUG_PRINT_H */
diff --git a/tools/testing/selftests/kvm/include/s390x/processor.h b/tools/testing/selftests/kvm/include/s390x/processor.h
index 255c9b990f4c..481bd2fd6a32 100644
--- a/tools/testing/selftests/kvm/include/s390x/processor.h
+++ b/tools/testing/selftests/kvm/include/s390x/processor.h
@@ -21,6 +21,11 @@
#define PAGE_PROTECT 0x200 /* HW read-only bit */
#define PAGE_NOEXEC 0x100 /* HW no-execute bit */
+/* Page size definitions */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE BIT_ULL(PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
/* Is there a portable way to do this? */
static inline void cpu_relax(void)
{
diff --git a/tools/testing/selftests/kvm/include/s390x/sie.h b/tools/testing/selftests/kvm/include/s390x/sie.h
new file mode 100644
index 000000000000..160acd4a1db9
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/s390x/sie.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definition for kernel virtual machines on s390.
+ *
+ * Adapted copy of struct definition kvm_s390_sie_block from
+ * arch/s390/include/asm/kvm_host.h for use in userspace selftest programs.
+ *
+ * Copyright IBM Corp. 2008, 2024
+ *
+ * Authors:
+ * Christoph Schlameuss <schlameuss@linux.ibm.com>
+ * Carsten Otte <cotte@de.ibm.com>
+ */
+
+#ifndef SELFTEST_KVM_SIE_H
+#define SELFTEST_KVM_SIE_H
+
+#include <linux/types.h>
+
+struct kvm_s390_sie_block {
+#define CPUSTAT_STOPPED 0x80000000
+#define CPUSTAT_WAIT 0x10000000
+#define CPUSTAT_ECALL_PEND 0x08000000
+#define CPUSTAT_STOP_INT 0x04000000
+#define CPUSTAT_IO_INT 0x02000000
+#define CPUSTAT_EXT_INT 0x01000000
+#define CPUSTAT_RUNNING 0x00800000
+#define CPUSTAT_RETAINED 0x00400000
+#define CPUSTAT_TIMING_SUB 0x00020000
+#define CPUSTAT_SIE_SUB 0x00010000
+#define CPUSTAT_RRF 0x00008000
+#define CPUSTAT_SLSV 0x00004000
+#define CPUSTAT_SLSR 0x00002000
+#define CPUSTAT_ZARCH 0x00000800
+#define CPUSTAT_MCDS 0x00000100
+#define CPUSTAT_KSS 0x00000200
+#define CPUSTAT_SM 0x00000080
+#define CPUSTAT_IBS 0x00000040
+#define CPUSTAT_GED2 0x00000010
+#define CPUSTAT_G 0x00000008
+#define CPUSTAT_GED 0x00000004
+#define CPUSTAT_J 0x00000002
+#define CPUSTAT_P 0x00000001
+ __u32 cpuflags; /* 0x0000 */
+ __u32: 1; /* 0x0004 */
+ __u32 prefix : 18;
+ __u32: 1;
+ __u32 ibc : 12;
+ __u8 reserved08[4]; /* 0x0008 */
+#define PROG_IN_SIE BIT(0)
+ __u32 prog0c; /* 0x000c */
+ union {
+ __u8 reserved10[16]; /* 0x0010 */
+ struct {
+ __u64 pv_handle_cpu;
+ __u64 pv_handle_config;
+ };
+ };
+#define PROG_BLOCK_SIE BIT(0)
+#define PROG_REQUEST BIT(1)
+ __u32 prog20; /* 0x0020 */
+ __u8 reserved24[4]; /* 0x0024 */
+ __u64 cputm; /* 0x0028 */
+ __u64 ckc; /* 0x0030 */
+ __u64 epoch; /* 0x0038 */
+ __u32 svcc; /* 0x0040 */
+#define LCTL_CR0 0x8000
+#define LCTL_CR6 0x0200
+#define LCTL_CR9 0x0040
+#define LCTL_CR10 0x0020
+#define LCTL_CR11 0x0010
+#define LCTL_CR14 0x0002
+ __u16 lctl; /* 0x0044 */
+ __s16 icpua; /* 0x0046 */
+#define ICTL_OPEREXC 0x80000000
+#define ICTL_PINT 0x20000000
+#define ICTL_LPSW 0x00400000
+#define ICTL_STCTL 0x00040000
+#define ICTL_ISKE 0x00004000
+#define ICTL_SSKE 0x00002000
+#define ICTL_RRBE 0x00001000
+#define ICTL_TPROT 0x00000200
+ __u32 ictl; /* 0x0048 */
+#define ECA_CEI 0x80000000
+#define ECA_IB 0x40000000
+#define ECA_SIGPI 0x10000000
+#define ECA_MVPGI 0x01000000
+#define ECA_AIV 0x00200000
+#define ECA_VX 0x00020000
+#define ECA_PROTEXCI 0x00002000
+#define ECA_APIE 0x00000008
+#define ECA_SII 0x00000001
+ __u32 eca; /* 0x004c */
+#define ICPT_INST 0x04
+#define ICPT_PROGI 0x08
+#define ICPT_INSTPROGI 0x0C
+#define ICPT_EXTREQ 0x10
+#define ICPT_EXTINT 0x14
+#define ICPT_IOREQ 0x18
+#define ICPT_WAIT 0x1c
+#define ICPT_VALIDITY 0x20
+#define ICPT_STOP 0x28
+#define ICPT_OPEREXC 0x2C
+#define ICPT_PARTEXEC 0x38
+#define ICPT_IOINST 0x40
+#define ICPT_KSS 0x5c
+#define ICPT_MCHKREQ 0x60
+#define ICPT_INT_ENABLE 0x64
+#define ICPT_PV_INSTR 0x68
+#define ICPT_PV_NOTIFY 0x6c
+#define ICPT_PV_PREF 0x70
+ __u8 icptcode; /* 0x0050 */
+ __u8 icptstatus; /* 0x0051 */
+ __u16 ihcpu; /* 0x0052 */
+ __u8 reserved54; /* 0x0054 */
+#define IICTL_CODE_NONE 0x00
+#define IICTL_CODE_MCHK 0x01
+#define IICTL_CODE_EXT 0x02
+#define IICTL_CODE_IO 0x03
+#define IICTL_CODE_RESTART 0x04
+#define IICTL_CODE_SPECIFICATION 0x10
+#define IICTL_CODE_OPERAND 0x11
+ __u8 iictl; /* 0x0055 */
+ __u16 ipa; /* 0x0056 */
+ __u32 ipb; /* 0x0058 */
+ __u32 scaoh; /* 0x005c */
+#define FPF_BPBC 0x20
+ __u8 fpf; /* 0x0060 */
+#define ECB_GS 0x40
+#define ECB_TE 0x10
+#define ECB_SPECI 0x08
+#define ECB_SRSI 0x04
+#define ECB_HOSTPROTINT 0x02
+#define ECB_PTF 0x01
+ __u8 ecb; /* 0x0061 */
+#define ECB2_CMMA 0x80
+#define ECB2_IEP 0x20
+#define ECB2_PFMFI 0x08
+#define ECB2_ESCA 0x04
+#define ECB2_ZPCI_LSI 0x02
+ __u8 ecb2; /* 0x0062 */
+#define ECB3_AISI 0x20
+#define ECB3_AISII 0x10
+#define ECB3_DEA 0x08
+#define ECB3_AES 0x04
+#define ECB3_RI 0x01
+ __u8 ecb3; /* 0x0063 */
+#define ESCA_SCAOL_MASK ~0x3fU
+ __u32 scaol; /* 0x0064 */
+ __u8 sdf; /* 0x0068 */
+ __u8 epdx; /* 0x0069 */
+ __u8 cpnc; /* 0x006a */
+ __u8 reserved6b; /* 0x006b */
+ __u32 todpr; /* 0x006c */
+#define GISA_FORMAT1 0x00000001
+ __u32 gd; /* 0x0070 */
+ __u8 reserved74[12]; /* 0x0074 */
+ __u64 mso; /* 0x0080 */
+ __u64 msl; /* 0x0088 */
+ __u64 psw_mask; /* 0x0090 */
+ __u64 psw_addr; /* 0x0098 */
+ __u64 gg14; /* 0x00a0 */
+ __u64 gg15; /* 0x00a8 */
+ __u8 reservedb0[8]; /* 0x00b0 */
+#define HPID_KVM 0x4
+#define HPID_VSIE 0x5
+ __u8 hpid; /* 0x00b8 */
+ __u8 reservedb9[7]; /* 0x00b9 */
+ union {
+ struct {
+ __u32 eiparams; /* 0x00c0 */
+ __u16 extcpuaddr; /* 0x00c4 */
+ __u16 eic; /* 0x00c6 */
+ };
+ __u64 mcic; /* 0x00c0 */
+ } __packed;
+ __u32 reservedc8; /* 0x00c8 */
+ union {
+ struct {
+ __u16 pgmilc; /* 0x00cc */
+ __u16 iprcc; /* 0x00ce */
+ };
+ __u32 edc; /* 0x00cc */
+ } __packed;
+ union {
+ struct {
+ __u32 dxc; /* 0x00d0 */
+ __u16 mcn; /* 0x00d4 */
+ __u8 perc; /* 0x00d6 */
+ __u8 peratmid; /* 0x00d7 */
+ };
+ __u64 faddr; /* 0x00d0 */
+ } __packed;
+ __u64 peraddr; /* 0x00d8 */
+ __u8 eai; /* 0x00e0 */
+ __u8 peraid; /* 0x00e1 */
+ __u8 oai; /* 0x00e2 */
+ __u8 armid; /* 0x00e3 */
+ __u8 reservede4[4]; /* 0x00e4 */
+ union {
+ __u64 tecmc; /* 0x00e8 */
+ struct {
+ __u16 subchannel_id; /* 0x00e8 */
+ __u16 subchannel_nr; /* 0x00ea */
+ __u32 io_int_parm; /* 0x00ec */
+ __u32 io_int_word; /* 0x00f0 */
+ };
+ } __packed;
+ __u8 reservedf4[8]; /* 0x00f4 */
+#define CRYCB_FORMAT_MASK 0x00000003
+#define CRYCB_FORMAT0 0x00000000
+#define CRYCB_FORMAT1 0x00000001
+#define CRYCB_FORMAT2 0x00000003
+ __u32 crycbd; /* 0x00fc */
+ __u64 gcr[16]; /* 0x0100 */
+ union {
+ __u64 gbea; /* 0x0180 */
+ __u64 sidad;
+ };
+ __u8 reserved188[8]; /* 0x0188 */
+ __u64 sdnxo; /* 0x0190 */
+ __u8 reserved198[8]; /* 0x0198 */
+ __u32 fac; /* 0x01a0 */
+ __u8 reserved1a4[20]; /* 0x01a4 */
+ __u64 cbrlo; /* 0x01b8 */
+ __u8 reserved1c0[8]; /* 0x01c0 */
+#define ECD_HOSTREGMGMT 0x20000000
+#define ECD_MEF 0x08000000
+#define ECD_ETOKENF 0x02000000
+#define ECD_ECC 0x00200000
+ __u32 ecd; /* 0x01c8 */
+ __u8 reserved1cc[18]; /* 0x01cc */
+ __u64 pp; /* 0x01de */
+ __u8 reserved1e6[2]; /* 0x01e6 */
+ __u64 itdba; /* 0x01e8 */
+ __u64 riccbd; /* 0x01f0 */
+ __u64 gvrd; /* 0x01f8 */
+} __packed __aligned(512);
+
+#endif /* SELFTEST_KVM_SIE_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/apic.h b/tools/testing/selftests/kvm/include/x86_64/apic.h
index 0f268b55fa06..51990094effd 100644
--- a/tools/testing/selftests/kvm/include/x86_64/apic.h
+++ b/tools/testing/selftests/kvm/include/x86_64/apic.h
@@ -11,6 +11,7 @@
#include <stdint.h>
#include "processor.h"
+#include "ucall_common.h"
#define APIC_DEFAULT_GPA 0xfee00000ULL
@@ -93,9 +94,27 @@ static inline uint64_t x2apic_read_reg(unsigned int reg)
return rdmsr(APIC_BASE_MSR + (reg >> 4));
}
+static inline uint8_t x2apic_write_reg_safe(unsigned int reg, uint64_t value)
+{
+ return wrmsr_safe(APIC_BASE_MSR + (reg >> 4), value);
+}
+
static inline void x2apic_write_reg(unsigned int reg, uint64_t value)
{
- wrmsr(APIC_BASE_MSR + (reg >> 4), value);
+ uint8_t fault = x2apic_write_reg_safe(reg, value);
+
+ __GUEST_ASSERT(!fault, "Unexpected fault 0x%x on WRMSR(%x) = %lx\n",
+ fault, APIC_BASE_MSR + (reg >> 4), value);
}
+static inline void x2apic_write_reg_fault(unsigned int reg, uint64_t value)
+{
+ uint8_t fault = x2apic_write_reg_safe(reg, value);
+
+ __GUEST_ASSERT(fault == GP_VECTOR,
+ "Wanted #GP on WRMSR(%x) = %lx, got 0x%x\n",
+ APIC_BASE_MSR + (reg >> 4), value, fault);
+}
+
+
#endif /* SELFTEST_KVM_APIC_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/hyperv.h b/tools/testing/selftests/kvm/include/x86_64/hyperv.h
index fa65b908b13e..6849e2552f1b 100644
--- a/tools/testing/selftests/kvm/include/x86_64/hyperv.h
+++ b/tools/testing/selftests/kvm/include/x86_64/hyperv.h
@@ -186,6 +186,18 @@
#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED \
KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 14)
+/* HYPERV_CPUID_NESTED_FEATURES.EAX */
+#define HV_X64_NESTED_DIRECT_FLUSH \
+ KVM_X86_CPU_FEATURE(HYPERV_CPUID_NESTED_FEATURES, 0, EAX, 17)
+#define HV_X64_NESTED_GUEST_MAPPING_FLUSH \
+ KVM_X86_CPU_FEATURE(HYPERV_CPUID_NESTED_FEATURES, 0, EAX, 18)
+#define HV_X64_NESTED_MSR_BITMAP \
+ KVM_X86_CPU_FEATURE(HYPERV_CPUID_NESTED_FEATURES, 0, EAX, 19)
+
+/* HYPERV_CPUID_NESTED_FEATURES.EBX */
+#define HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL \
+ KVM_X86_CPU_FEATURE(HYPERV_CPUID_NESTED_FEATURES, 0, EBX, 0)
+
/* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */
#define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING \
KVM_X86_CPU_FEATURE(HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, 0, EAX, 1)
@@ -343,4 +355,10 @@ struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
/* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */
#define HV_INVARIANT_TSC_EXPOSED BIT_ULL(0)
+const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
+const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu);
+void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu);
+
+bool kvm_hv_cpu_has(struct kvm_x86_cpu_feature feature);
+
#endif /* !SELFTEST_KVM_HYPERV_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index a0c1440017bb..e247f99e0473 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -25,6 +25,10 @@ extern bool host_cpu_is_intel;
extern bool host_cpu_is_amd;
extern uint64_t guest_tsc_khz;
+#ifndef MAX_NR_CPUID_ENTRIES
+#define MAX_NR_CPUID_ENTRIES 100
+#endif
+
/* Forced emulation prefix, used to invoke the emulator unconditionally. */
#define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
@@ -908,8 +912,6 @@ static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs)
const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
uint32_t function, uint32_t index);
const struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
-const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
-const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu);
static inline uint32_t kvm_cpu_fms(void)
{
@@ -1009,7 +1011,6 @@ static inline struct kvm_cpuid2 *allocate_kvm_cpuid2(int nr_entries)
}
void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid);
-void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu);
static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
uint32_t function,
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 56b170b725b3..a2b7df5f1d39 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -712,16 +712,13 @@ void kvm_vm_release(struct kvm_vm *vmp)
}
static void __vm_mem_region_delete(struct kvm_vm *vm,
- struct userspace_mem_region *region,
- bool unlink)
+ struct userspace_mem_region *region)
{
int ret;
- if (unlink) {
- rb_erase(&region->gpa_node, &vm->regions.gpa_tree);
- rb_erase(&region->hva_node, &vm->regions.hva_tree);
- hash_del(&region->slot_node);
- }
+ rb_erase(&region->gpa_node, &vm->regions.gpa_tree);
+ rb_erase(&region->hva_node, &vm->regions.hva_tree);
+ hash_del(&region->slot_node);
region->region.memory_size = 0;
vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
@@ -762,7 +759,7 @@ void kvm_vm_free(struct kvm_vm *vmp)
/* Free userspace_mem_regions. */
hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
- __vm_mem_region_delete(vmp, region, false);
+ __vm_mem_region_delete(vmp, region);
/* Free sparsebit arrays. */
sparsebit_free(&vmp->vpages_valid);
@@ -794,76 +791,6 @@ int kvm_memfd_alloc(size_t size, bool hugepages)
return fd;
}
-/*
- * Memory Compare, host virtual to guest virtual
- *
- * Input Args:
- * hva - Starting host virtual address
- * vm - Virtual Machine
- * gva - Starting guest virtual address
- * len - number of bytes to compare
- *
- * Output Args: None
- *
- * Input/Output Args: None
- *
- * Return:
- * Returns 0 if the bytes starting at hva for a length of len
- * are equal the guest virtual bytes starting at gva. Returns
- * a value < 0, if bytes at hva are less than those at gva.
- * Otherwise a value > 0 is returned.
- *
- * Compares the bytes starting at the host virtual address hva, for
- * a length of len, to the guest bytes starting at the guest virtual
- * address given by gva.
- */
-int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
-{
- size_t amt;
-
- /*
- * Compare a batch of bytes until either a match is found
- * or all the bytes have been compared.
- */
- for (uintptr_t offset = 0; offset < len; offset += amt) {
- uintptr_t ptr1 = (uintptr_t)hva + offset;
-
- /*
- * Determine host address for guest virtual address
- * at offset.
- */
- uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
-
- /*
- * Determine amount to compare on this pass.
- * Don't allow the comparsion to cross a page boundary.
- */
- amt = len - offset;
- if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
- amt = vm->page_size - (ptr1 % vm->page_size);
- if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
- amt = vm->page_size - (ptr2 % vm->page_size);
-
- assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
- assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
-
- /*
- * Perform the comparison. If there is a difference
- * return that result to the caller, otherwise need
- * to continue on looking for a mismatch.
- */
- int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
- if (ret != 0)
- return ret;
- }
-
- /*
- * No mismatch found. Let the caller know the two memory
- * areas are equal.
- */
- return 0;
-}
-
static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree,
struct userspace_mem_region *region)
{
@@ -1270,7 +1197,7 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
*/
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
{
- __vm_mem_region_delete(vm, memslot2region(vm, slot), true);
+ __vm_mem_region_delete(vm, memslot2region(vm, slot));
}
void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
diff --git a/tools/testing/selftests/kvm/lib/s390x/processor.c b/tools/testing/selftests/kvm/lib/s390x/processor.c
index 4ad4492eea1d..20cfe970e3e3 100644
--- a/tools/testing/selftests/kvm/lib/s390x/processor.c
+++ b/tools/testing/selftests/kvm/lib/s390x/processor.c
@@ -14,7 +14,7 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
vm_paddr_t paddr;
- TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
+ TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
vm->page_size);
if (vm->pgd_created)
@@ -79,7 +79,7 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
}
/* Fill in page table entry */
- idx = (gva >> 12) & 0x0ffu; /* page index */
+ idx = (gva >> PAGE_SHIFT) & 0x0ffu; /* page index */
if (!(entry[idx] & PAGE_INVALID))
fprintf(stderr,
"WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa);
@@ -91,7 +91,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
int ri, idx;
uint64_t *entry;
- TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
+ TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
vm->page_size);
entry = addr_gpa2hva(vm, vm->pgd);
@@ -103,7 +103,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
}
- idx = (gva >> 12) & 0x0ffu; /* page index */
+ idx = (gva >> PAGE_SHIFT) & 0x0ffu; /* page index */
TEST_ASSERT(!(entry[idx] & PAGE_INVALID),
"No page mapping for vm virtual address 0x%lx", gva);
@@ -168,7 +168,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
struct kvm_sregs sregs;
struct kvm_vcpu *vcpu;
- TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
+ TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
vm->page_size);
stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
diff --git a/tools/testing/selftests/kvm/lib/x86_64/hyperv.c b/tools/testing/selftests/kvm/lib/x86_64/hyperv.c
index efb7e7a1354d..15bc8cd583aa 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/hyperv.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/hyperv.c
@@ -8,6 +8,73 @@
#include "processor.h"
#include "hyperv.h"
+const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
+{
+ static struct kvm_cpuid2 *cpuid;
+ int kvm_fd;
+
+ if (cpuid)
+ return cpuid;
+
+ cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
+ kvm_fd = open_kvm_dev_path_or_exit();
+
+ kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
+
+ close(kvm_fd);
+ return cpuid;
+}
+
+void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu)
+{
+ static struct kvm_cpuid2 *cpuid_full;
+ const struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
+ int i, nent = 0;
+
+ if (!cpuid_full) {
+ cpuid_sys = kvm_get_supported_cpuid();
+ cpuid_hv = kvm_get_supported_hv_cpuid();
+
+ cpuid_full = allocate_kvm_cpuid2(cpuid_sys->nent + cpuid_hv->nent);
+ if (!cpuid_full) {
+ perror("malloc");
+ abort();
+ }
+
+ /* Need to skip KVM CPUID leaves 0x400000xx */
+ for (i = 0; i < cpuid_sys->nent; i++) {
+ if (cpuid_sys->entries[i].function >= 0x40000000 &&
+ cpuid_sys->entries[i].function < 0x40000100)
+ continue;
+ cpuid_full->entries[nent] = cpuid_sys->entries[i];
+ nent++;
+ }
+
+ memcpy(&cpuid_full->entries[nent], cpuid_hv->entries,
+ cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2));
+ cpuid_full->nent = nent + cpuid_hv->nent;
+ }
+
+ vcpu_init_cpuid(vcpu, cpuid_full);
+}
+
+const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
+
+ vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
+
+ return cpuid;
+}
+
+bool kvm_hv_cpu_has(struct kvm_x86_cpu_feature feature)
+{
+ if (!kvm_has_cap(KVM_CAP_SYS_HYPERV_CPUID))
+ return false;
+
+ return kvm_cpuid_has(kvm_get_supported_hv_cpuid(), feature);
+}
+
struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
vm_vaddr_t *p_hv_pages_gva)
{
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 153739f2e201..974bcd2df6d7 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -19,8 +19,6 @@
#define KERNEL_DS 0x10
#define KERNEL_TSS 0x18
-#define MAX_NR_CPUID_ENTRIES 100
-
vm_vaddr_t exception_handlers;
bool host_cpu_is_amd;
bool host_cpu_is_intel;
@@ -566,10 +564,8 @@ void route_exception(struct ex_regs *regs)
if (kvm_fixup_exception(regs))
return;
- ucall_assert(UCALL_UNHANDLED,
- "Unhandled exception in guest", __FILE__, __LINE__,
- "Unhandled exception '0x%lx' at guest RIP '0x%lx'",
- regs->vector, regs->rip);
+ GUEST_FAIL("Unhandled exception '0x%lx' at guest RIP '0x%lx'",
+ regs->vector, regs->rip);
}
static void vm_init_descriptor_tables(struct kvm_vm *vm)
@@ -611,7 +607,7 @@ void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
struct ucall uc;
- if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED)
+ if (get_ucall(vcpu, &uc) == UCALL_ABORT)
REPORT_GUEST_ASSERT(uc);
}
@@ -1195,65 +1191,6 @@ void xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
GUEST_ASSERT(!__xen_hypercall(nr, a0, a1));
}
-const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
-{
- static struct kvm_cpuid2 *cpuid;
- int kvm_fd;
-
- if (cpuid)
- return cpuid;
-
- cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
- kvm_fd = open_kvm_dev_path_or_exit();
-
- kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
-
- close(kvm_fd);
- return cpuid;
-}
-
-void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu)
-{
- static struct kvm_cpuid2 *cpuid_full;
- const struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
- int i, nent = 0;
-
- if (!cpuid_full) {
- cpuid_sys = kvm_get_supported_cpuid();
- cpuid_hv = kvm_get_supported_hv_cpuid();
-
- cpuid_full = allocate_kvm_cpuid2(cpuid_sys->nent + cpuid_hv->nent);
- if (!cpuid_full) {
- perror("malloc");
- abort();
- }
-
- /* Need to skip KVM CPUID leaves 0x400000xx */
- for (i = 0; i < cpuid_sys->nent; i++) {
- if (cpuid_sys->entries[i].function >= 0x40000000 &&
- cpuid_sys->entries[i].function < 0x40000100)
- continue;
- cpuid_full->entries[nent] = cpuid_sys->entries[i];
- nent++;
- }
-
- memcpy(&cpuid_full->entries[nent], cpuid_hv->entries,
- cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2));
- cpuid_full->nent = nent + cpuid_hv->nent;
- }
-
- vcpu_init_cpuid(vcpu, cpuid_full);
-}
-
-const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
-
- vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
-
- return cpuid;
-}
-
unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
{
const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
index 49f162573126..e3343f0df9e1 100644
--- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c
+++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
@@ -79,6 +79,7 @@ struct test_params {
useconds_t delay;
uint64_t nr_iterations;
bool partition_vcpu_memory_access;
+ bool disable_slot_zap_quirk;
};
static void run_test(enum vm_guest_mode mode, void *arg)
@@ -89,6 +90,13 @@ static void run_test(enum vm_guest_mode mode, void *arg)
vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
VM_MEM_SRC_ANONYMOUS,
p->partition_vcpu_memory_access);
+#ifdef __x86_64__
+ if (p->disable_slot_zap_quirk)
+ vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL);
+
+ pr_info("Memslot zap quirk %s\n", p->disable_slot_zap_quirk ?
+ "disabled" : "enabled");
+#endif
pr_info("Finished creating vCPUs\n");
@@ -107,11 +115,12 @@ static void run_test(enum vm_guest_mode mode, void *arg)
static void help(char *name)
{
puts("");
- printf("usage: %s [-h] [-m mode] [-d delay_usec]\n"
+ printf("usage: %s [-h] [-m mode] [-d delay_usec] [-q]\n"
" [-b memory] [-v vcpus] [-o] [-i iterations]\n", name);
guest_modes_help();
printf(" -d: add a delay between each iteration of adding and\n"
" deleting a memslot in usec.\n");
+ printf(" -q: Disable memslot zap quirk.\n");
printf(" -b: specify the size of the memory region which should be\n"
" accessed by each vCPU. e.g. 10M or 3G.\n"
" Default: 1G\n");
@@ -137,7 +146,7 @@ int main(int argc, char *argv[])
guest_modes_append_default();
- while ((opt = getopt(argc, argv, "hm:d:b:v:oi:")) != -1) {
+ while ((opt = getopt(argc, argv, "hm:d:qb:v:oi:")) != -1) {
switch (opt) {
case 'm':
guest_modes_cmdline(optarg);
@@ -160,6 +169,12 @@ int main(int argc, char *argv[])
case 'i':
p.nr_iterations = atoi_positive("Number of iterations", optarg);
break;
+ case 'q':
+ p.disable_slot_zap_quirk = true;
+
+ TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) &
+ KVM_X86_QUIRK_SLOT_ZAP_ALL);
+ break;
case 'h':
default:
help(argv[0]);
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
index 579a64f97333..893366982f77 100644
--- a/tools/testing/selftests/kvm/memslot_perf_test.c
+++ b/tools/testing/selftests/kvm/memslot_perf_test.c
@@ -113,6 +113,7 @@ static_assert(ATOMIC_BOOL_LOCK_FREE == 2, "atomic bool is not lockless");
static sem_t vcpu_ready;
static bool map_unmap_verify;
+static bool disable_slot_zap_quirk;
static bool verbose;
#define pr_info_v(...) \
@@ -578,6 +579,9 @@ static bool test_memslot_move_prepare(struct vm_data *data,
uint32_t guest_page_size = data->vm->page_size;
uint64_t movesrcgpa, movetestgpa;
+ if (disable_slot_zap_quirk)
+ vm_enable_cap(data->vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL);
+
movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
if (isactive) {
@@ -896,6 +900,7 @@ static void help(char *name, struct test_args *targs)
pr_info(" -h: print this help screen.\n");
pr_info(" -v: enable verbose mode (not for benchmarking).\n");
pr_info(" -d: enable extra debug checks.\n");
+ pr_info(" -q: Disable memslot zap quirk during memslot move.\n");
pr_info(" -s: specify memslot count cap (-1 means no cap; currently: %i)\n",
targs->nslots);
pr_info(" -f: specify the first test to run (currently: %i; max %zu)\n",
@@ -954,7 +959,7 @@ static bool parse_args(int argc, char *argv[],
uint32_t max_mem_slots;
int opt;
- while ((opt = getopt(argc, argv, "hvds:f:e:l:r:")) != -1) {
+ while ((opt = getopt(argc, argv, "hvdqs:f:e:l:r:")) != -1) {
switch (opt) {
case 'h':
default:
@@ -966,6 +971,11 @@ static bool parse_args(int argc, char *argv[],
case 'd':
map_unmap_verify = true;
break;
+ case 'q':
+ disable_slot_zap_quirk = true;
+ TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) &
+ KVM_X86_QUIRK_SLOT_ZAP_ALL);
+ break;
case 's':
targs->nslots = atoi_paranoid(optarg);
if (targs->nslots <= 1 && targs->nslots != -1) {
diff --git a/tools/testing/selftests/kvm/s390x/cmma_test.c b/tools/testing/selftests/kvm/s390x/cmma_test.c
index b39033844756..e32dd59703a0 100644
--- a/tools/testing/selftests/kvm/s390x/cmma_test.c
+++ b/tools/testing/selftests/kvm/s390x/cmma_test.c
@@ -17,16 +17,17 @@
#include "kvm_util.h"
#include "kselftest.h"
#include "ucall_common.h"
+#include "processor.h"
#define MAIN_PAGE_COUNT 512
#define TEST_DATA_PAGE_COUNT 512
#define TEST_DATA_MEMSLOT 1
-#define TEST_DATA_START_GFN 4096
+#define TEST_DATA_START_GFN PAGE_SIZE
#define TEST_DATA_TWO_PAGE_COUNT 256
#define TEST_DATA_TWO_MEMSLOT 2
-#define TEST_DATA_TWO_START_GFN 8192
+#define TEST_DATA_TWO_START_GFN (2 * PAGE_SIZE)
static char cmma_value_buf[MAIN_PAGE_COUNT + TEST_DATA_PAGE_COUNT];
@@ -66,7 +67,7 @@ static void guest_dirty_test_data(void)
" lghi 5,%[page_count]\n"
/* r5 += r1 */
"2: agfr 5,1\n"
- /* r2 = r1 << 12 */
+ /* r2 = r1 << PAGE_SHIFT */
"1: sllg 2,1,12(0)\n"
/* essa(r4, r2, SET_STABLE) */
" .insn rrf,0xb9ab0000,4,2,1,0\n"
diff --git a/tools/testing/selftests/kvm/s390x/config b/tools/testing/selftests/kvm/s390x/config
new file mode 100644
index 000000000000..23270f2d679f
--- /dev/null
+++ b/tools/testing/selftests/kvm/s390x/config
@@ -0,0 +1,2 @@
+CONFIG_KVM=y
+CONFIG_KVM_S390_UCONTROL=y
diff --git a/tools/testing/selftests/kvm/s390x/debug_test.c b/tools/testing/selftests/kvm/s390x/debug_test.c
index 84313fb27529..ad8095968601 100644
--- a/tools/testing/selftests/kvm/s390x/debug_test.c
+++ b/tools/testing/selftests/kvm/s390x/debug_test.c
@@ -2,12 +2,12 @@
/* Test KVM debugging features. */
#include "kvm_util.h"
#include "test_util.h"
+#include "sie.h"
#include <linux/kvm.h>
#define __LC_SVC_NEW_PSW 0x1c0
#define __LC_PGM_NEW_PSW 0x1d0
-#define ICPT_INSTRUCTION 0x04
#define IPA0_DIAG 0x8300
#define PGM_SPECIFICATION 0x06
@@ -85,7 +85,7 @@ static void test_step_pgm_diag(void)
vm = test_step_int_1(&vcpu, test_step_pgm_diag_guest_code,
__LC_PGM_NEW_PSW, new_psw);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
- TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, ICPT_INSTRUCTION);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, ICPT_INST);
TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa & 0xff00, IPA0_DIAG);
vcpu_ioctl(vcpu, KVM_S390_IRQ, &irq);
vcpu_run(vcpu);
diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c
index f2df7416be84..4374b4cd2a80 100644
--- a/tools/testing/selftests/kvm/s390x/memop.c
+++ b/tools/testing/selftests/kvm/s390x/memop.c
@@ -16,6 +16,7 @@
#include "kvm_util.h"
#include "kselftest.h"
#include "ucall_common.h"
+#include "processor.h"
enum mop_target {
LOGICAL,
@@ -226,9 +227,6 @@ static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,
#define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (1ULL << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE - 1))
#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
diff --git a/tools/testing/selftests/kvm/s390x/tprot.c b/tools/testing/selftests/kvm/s390x/tprot.c
index 7a742a673b7c..12d5e1cb62e3 100644
--- a/tools/testing/selftests/kvm/s390x/tprot.c
+++ b/tools/testing/selftests/kvm/s390x/tprot.c
@@ -9,9 +9,8 @@
#include "kvm_util.h"
#include "kselftest.h"
#include "ucall_common.h"
+#include "processor.h"
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (1 << PAGE_SHIFT)
#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
@@ -151,7 +150,7 @@ static enum stage perform_next_stage(int *i, bool mapped_0)
* instead.
* In order to skip these tests we detect this inside the guest
*/
- skip = tests[*i].addr < (void *)4096 &&
+ skip = tests[*i].addr < (void *)PAGE_SIZE &&
tests[*i].expected != TRANSL_UNAVAIL &&
!mapped_0;
if (!skip) {
diff --git a/tools/testing/selftests/kvm/s390x/ucontrol_test.c b/tools/testing/selftests/kvm/s390x/ucontrol_test.c
new file mode 100644
index 000000000000..f257beec1430
--- /dev/null
+++ b/tools/testing/selftests/kvm/s390x/ucontrol_test.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test code for the s390x kvm ucontrol interface
+ *
+ * Copyright IBM Corp. 2024
+ *
+ * Authors:
+ * Christoph Schlameuss <schlameuss@linux.ibm.com>
+ */
+#include "debug_print.h"
+#include "kselftest_harness.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "sie.h"
+
+#include <linux/capability.h>
+#include <linux/sizes.h>
+
+#define VM_MEM_SIZE (4 * SZ_1M)
+
+/* so directly declare capget to check caps without libcap */
+int capget(cap_user_header_t header, cap_user_data_t data);
+
+/**
+ * In order to create user controlled virtual machines on S390,
+ * check KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL
+ * as privileged user (SYS_ADMIN).
+ */
+void require_ucontrol_admin(void)
+{
+ struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3];
+ struct __user_cap_header_struct hdr = {
+ .version = _LINUX_CAPABILITY_VERSION_3,
+ };
+ int rc;
+
+ rc = capget(&hdr, data);
+ TEST_ASSERT_EQ(0, rc);
+ TEST_REQUIRE((data->effective & CAP_TO_MASK(CAP_SYS_ADMIN)) > 0);
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_UCONTROL));
+}
+
+/* Test program setting some registers and looping */
+extern char test_gprs_asm[];
+asm("test_gprs_asm:\n"
+ "xgr %r0, %r0\n"
+ "lgfi %r1,1\n"
+ "lgfi %r2,2\n"
+ "lgfi %r3,3\n"
+ "lgfi %r4,4\n"
+ "lgfi %r5,5\n"
+ "lgfi %r6,6\n"
+ "lgfi %r7,7\n"
+ "0:\n"
+ " diag 0,0,0x44\n"
+ " ahi %r0,1\n"
+ " j 0b\n"
+);
+
+FIXTURE(uc_kvm)
+{
+ struct kvm_s390_sie_block *sie_block;
+ struct kvm_run *run;
+ uintptr_t base_gpa;
+ uintptr_t code_gpa;
+ uintptr_t base_hva;
+ uintptr_t code_hva;
+ int kvm_run_size;
+ void *vm_mem;
+ int vcpu_fd;
+ int kvm_fd;
+ int vm_fd;
+};
+
+/**
+ * create VM with single vcpu, map kvm_run and SIE control block for easy access
+ */
+FIXTURE_SETUP(uc_kvm)
+{
+ struct kvm_s390_vm_cpu_processor info;
+ int rc;
+
+ require_ucontrol_admin();
+
+ self->kvm_fd = open_kvm_dev_path_or_exit();
+ self->vm_fd = ioctl(self->kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL);
+ ASSERT_GE(self->vm_fd, 0);
+
+ kvm_device_attr_get(self->vm_fd, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_PROCESSOR, &info);
+ TH_LOG("create VM 0x%llx", info.cpuid);
+
+ self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0);
+ ASSERT_GE(self->vcpu_fd, 0);
+
+ self->kvm_run_size = ioctl(self->kvm_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
+ ASSERT_GE(self->kvm_run_size, sizeof(struct kvm_run))
+ TH_LOG(KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, self->kvm_run_size));
+ self->run = (struct kvm_run *)mmap(NULL, self->kvm_run_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
+ ASSERT_NE(self->run, MAP_FAILED);
+ /**
+ * For virtual cpus that have been created with S390 user controlled
+ * virtual machines, the resulting vcpu fd can be memory mapped at page
+ * offset KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of
+ * the virtual cpu's hardware control block.
+ */
+ self->sie_block = (struct kvm_s390_sie_block *)mmap(NULL, PAGE_SIZE,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ self->vcpu_fd, KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT);
+ ASSERT_NE(self->sie_block, MAP_FAILED);
+
+ TH_LOG("VM created %p %p", self->run, self->sie_block);
+
+ self->base_gpa = 0;
+ self->code_gpa = self->base_gpa + (3 * SZ_1M);
+
+ self->vm_mem = aligned_alloc(SZ_1M, VM_MEM_SIZE);
+ ASSERT_NE(NULL, self->vm_mem) TH_LOG("malloc failed %u", errno);
+ self->base_hva = (uintptr_t)self->vm_mem;
+ self->code_hva = self->base_hva - self->base_gpa + self->code_gpa;
+ struct kvm_s390_ucas_mapping map = {
+ .user_addr = self->base_hva,
+ .vcpu_addr = self->base_gpa,
+ .length = VM_MEM_SIZE,
+ };
+ TH_LOG("ucas map %p %p 0x%llx",
+ (void *)map.user_addr, (void *)map.vcpu_addr, map.length);
+ rc = ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map);
+ ASSERT_EQ(0, rc) TH_LOG("ucas map result %d not expected, %s",
+ rc, strerror(errno));
+
+ TH_LOG("page in %p", (void *)self->base_gpa);
+ rc = ioctl(self->vcpu_fd, KVM_S390_VCPU_FAULT, self->base_gpa);
+ ASSERT_EQ(0, rc) TH_LOG("vcpu fault (%p) result %d not expected, %s",
+ (void *)self->base_hva, rc, strerror(errno));
+
+ self->sie_block->cpuflags &= ~CPUSTAT_STOPPED;
+}
+
+FIXTURE_TEARDOWN(uc_kvm)
+{
+ munmap(self->sie_block, PAGE_SIZE);
+ munmap(self->run, self->kvm_run_size);
+ close(self->vcpu_fd);
+ close(self->vm_fd);
+ close(self->kvm_fd);
+ free(self->vm_mem);
+}
+
+TEST_F(uc_kvm, uc_sie_assertions)
+{
+ /* assert interception of Code 08 (Program Interruption) is set */
+ EXPECT_EQ(0, self->sie_block->ecb & ECB_SPECI);
+}
+
+TEST_F(uc_kvm, uc_attr_mem_limit)
+{
+ u64 limit;
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_MEM_CTRL,
+ .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
+ .addr = (unsigned long)&limit,
+ };
+ int rc;
+
+ rc = ioctl(self->vm_fd, KVM_GET_DEVICE_ATTR, &attr);
+ EXPECT_EQ(0, rc);
+ EXPECT_EQ(~0UL, limit);
+
+ /* assert set not supported */
+ rc = ioctl(self->vm_fd, KVM_SET_DEVICE_ATTR, &attr);
+ EXPECT_EQ(-1, rc);
+ EXPECT_EQ(EINVAL, errno);
+}
+
+TEST_F(uc_kvm, uc_no_dirty_log)
+{
+ struct kvm_dirty_log dlog;
+ int rc;
+
+ rc = ioctl(self->vm_fd, KVM_GET_DIRTY_LOG, &dlog);
+ EXPECT_EQ(-1, rc);
+ EXPECT_EQ(EINVAL, errno);
+}
+
+/**
+ * Assert HPAGE CAP cannot be enabled on UCONTROL VM
+ */
+TEST(uc_cap_hpage)
+{
+ int rc, kvm_fd, vm_fd, vcpu_fd;
+ struct kvm_enable_cap cap = {
+ .cap = KVM_CAP_S390_HPAGE_1M,
+ };
+
+ require_ucontrol_admin();
+
+ kvm_fd = open_kvm_dev_path_or_exit();
+ vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL);
+ ASSERT_GE(vm_fd, 0);
+
+ /* assert hpages are not supported on ucontrol vm */
+ rc = ioctl(vm_fd, KVM_CHECK_EXTENSION, KVM_CAP_S390_HPAGE_1M);
+ EXPECT_EQ(0, rc);
+
+ /* Test that KVM_CAP_S390_HPAGE_1M can't be enabled for a ucontrol vm */
+ rc = ioctl(vm_fd, KVM_ENABLE_CAP, cap);
+ EXPECT_EQ(-1, rc);
+ EXPECT_EQ(EINVAL, errno);
+
+ /* assert HPAGE CAP is rejected after vCPU creation */
+ vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
+ ASSERT_GE(vcpu_fd, 0);
+ rc = ioctl(vm_fd, KVM_ENABLE_CAP, cap);
+ EXPECT_EQ(-1, rc);
+ EXPECT_EQ(EBUSY, errno);
+
+ close(vcpu_fd);
+ close(vm_fd);
+ close(kvm_fd);
+}
+
+/* verify SIEIC exit
+ * * fail on codes not expected in the test cases
+ */
+static bool uc_handle_sieic(FIXTURE_DATA(uc_kvm) * self)
+{
+ struct kvm_s390_sie_block *sie_block = self->sie_block;
+ struct kvm_run *run = self->run;
+
+ /* check SIE interception code */
+ pr_info("sieic: 0x%.2x 0x%.4x 0x%.4x\n",
+ run->s390_sieic.icptcode,
+ run->s390_sieic.ipa,
+ run->s390_sieic.ipb);
+ switch (run->s390_sieic.icptcode) {
+ case ICPT_INST:
+ /* end execution in caller on intercepted instruction */
+ pr_info("sie instruction interception\n");
+ return false;
+ case ICPT_OPEREXC:
+ /* operation exception */
+ TEST_FAIL("sie exception on %.4x%.8x", sie_block->ipa, sie_block->ipb);
+ default:
+ TEST_FAIL("UNEXPECTED SIEIC CODE %d", run->s390_sieic.icptcode);
+ }
+ return true;
+}
+
+/* verify VM state on exit */
+static bool uc_handle_exit(FIXTURE_DATA(uc_kvm) * self)
+{
+ struct kvm_run *run = self->run;
+
+ switch (run->exit_reason) {
+ case KVM_EXIT_S390_SIEIC:
+ return uc_handle_sieic(self);
+ default:
+ pr_info("exit_reason %2d not handled\n", run->exit_reason);
+ }
+ return true;
+}
+
+/* run the VM until interrupted */
+static int uc_run_once(FIXTURE_DATA(uc_kvm) * self)
+{
+ int rc;
+
+ rc = ioctl(self->vcpu_fd, KVM_RUN, NULL);
+ print_run(self->run, self->sie_block);
+ print_regs(self->run);
+ pr_debug("run %d / %d %s\n", rc, errno, strerror(errno));
+ return rc;
+}
+
+static void uc_assert_diag44(FIXTURE_DATA(uc_kvm) * self)
+{
+ struct kvm_s390_sie_block *sie_block = self->sie_block;
+
+ /* assert vm was interrupted by diag 0x0044 */
+ TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason);
+ TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode);
+ TEST_ASSERT_EQ(0x8300, sie_block->ipa);
+ TEST_ASSERT_EQ(0x440000, sie_block->ipb);
+}
+
+TEST_F(uc_kvm, uc_gprs)
+{
+ struct kvm_sync_regs *sync_regs = &self->run->s.regs;
+ struct kvm_run *run = self->run;
+ struct kvm_regs regs = {};
+
+ /* Set registers to values that are different from the ones that we expect below */
+ for (int i = 0; i < 8; i++)
+ sync_regs->gprs[i] = 8;
+ run->kvm_dirty_regs |= KVM_SYNC_GPRS;
+
+ /* copy test_gprs_asm to code_hva / code_gpa */
+ TH_LOG("copy code %p to vm mapped memory %p / %p",
+ &test_gprs_asm, (void *)self->code_hva, (void *)self->code_gpa);
+ memcpy((void *)self->code_hva, &test_gprs_asm, PAGE_SIZE);
+
+ /* DAT disabled + 64 bit mode */
+ run->psw_mask = 0x0000000180000000ULL;
+ run->psw_addr = self->code_gpa;
+
+ /* run and expect interception of diag 44 */
+ ASSERT_EQ(0, uc_run_once(self));
+ ASSERT_EQ(false, uc_handle_exit(self));
+ uc_assert_diag44(self);
+
+ /* Retrieve and check guest register values */
+ ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs));
+ for (int i = 0; i < 8; i++) {
+ ASSERT_EQ(i, regs.gprs[i]);
+ ASSERT_EQ(i, sync_regs->gprs[i]);
+ }
+
+ /* run and expect interception of diag 44 again */
+ ASSERT_EQ(0, uc_run_once(self));
+ ASSERT_EQ(false, uc_handle_exit(self));
+ uc_assert_diag44(self);
+
+ /* check continued increment of register 0 value */
+ ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs));
+ ASSERT_EQ(1, regs.gprs[0]);
+ ASSERT_EQ(1, sync_regs->gprs[0]);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
index bb8002084f52..a8267628e9ed 100644
--- a/tools/testing/selftests/kvm/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/set_memory_region_test.c
@@ -175,7 +175,7 @@ static void guest_code_move_memory_region(void)
GUEST_DONE();
}
-static void test_move_memory_region(void)
+static void test_move_memory_region(bool disable_slot_zap_quirk)
{
pthread_t vcpu_thread;
struct kvm_vcpu *vcpu;
@@ -184,6 +184,9 @@ static void test_move_memory_region(void)
vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region);
+ if (disable_slot_zap_quirk)
+ vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL);
+
hva = addr_gpa2hva(vm, MEM_REGION_GPA);
/*
@@ -266,7 +269,7 @@ static void guest_code_delete_memory_region(void)
GUEST_ASSERT(0);
}
-static void test_delete_memory_region(void)
+static void test_delete_memory_region(bool disable_slot_zap_quirk)
{
pthread_t vcpu_thread;
struct kvm_vcpu *vcpu;
@@ -276,6 +279,9 @@ static void test_delete_memory_region(void)
vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_delete_memory_region);
+ if (disable_slot_zap_quirk)
+ vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL);
+
/* Delete the memory region, the guest should not die. */
vm_mem_region_delete(vm, MEM_REGION_SLOT);
wait_for_vcpu();
@@ -553,7 +559,10 @@ int main(int argc, char *argv[])
{
#ifdef __x86_64__
int i, loops;
+ int j, disable_slot_zap_quirk = 0;
+ if (kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_SLOT_ZAP_ALL)
+ disable_slot_zap_quirk = 1;
/*
* FIXME: the zero-memslot test fails on aarch64 and s390x because
* KVM_RUN fails with ENOEXEC or EFAULT.
@@ -579,13 +588,17 @@ int main(int argc, char *argv[])
else
loops = 10;
- pr_info("Testing MOVE of in-use region, %d loops\n", loops);
- for (i = 0; i < loops; i++)
- test_move_memory_region();
+ for (j = 0; j <= disable_slot_zap_quirk; j++) {
+ pr_info("Testing MOVE of in-use region, %d loops, slot zap quirk %s\n",
+ loops, j ? "disabled" : "enabled");
+ for (i = 0; i < loops; i++)
+ test_move_memory_region(!!j);
- pr_info("Testing DELETE of in-use region, %d loops\n", loops);
- for (i = 0; i < loops; i++)
- test_delete_memory_region();
+ pr_info("Testing DELETE of in-use region, %d loops, slot zap quirk %s\n",
+ loops, j ? "disabled" : "enabled");
+ for (i = 0; i < loops; i++)
+ test_delete_memory_region(!!j);
+ }
#endif
return 0;
diff --git a/tools/testing/selftests/kvm/x86_64/debug_regs.c b/tools/testing/selftests/kvm/x86_64/debug_regs.c
index f6b295e0b2d2..76cc2df9238a 100644
--- a/tools/testing/selftests/kvm/x86_64/debug_regs.c
+++ b/tools/testing/selftests/kvm/x86_64/debug_regs.c
@@ -47,15 +47,18 @@ static void guest_code(void)
/*
* Single step test, covers 2 basic instructions and 2 emulated
*
- * Enable interrupts during the single stepping to see that
- * pending interrupt we raised is not handled due to KVM_GUESTDBG_BLOCKIRQ
+ * Enable interrupts during the single stepping to see that pending
+ * interrupt we raised is not handled due to KVM_GUESTDBG_BLOCKIRQ.
+ *
+ * Write MSR_IA32_TSC_DEADLINE to verify that KVM's fastpath handler
+ * exits to userspace due to single-step being enabled.
*/
asm volatile("ss_start: "
"sti\n\t"
"xor %%eax,%%eax\n\t"
"cpuid\n\t"
- "movl $0x1a0,%%ecx\n\t"
- "rdmsr\n\t"
+ "movl $" __stringify(MSR_IA32_TSC_DEADLINE) ", %%ecx\n\t"
+ "wrmsr\n\t"
"cli\n\t"
: : : "eax", "ebx", "ecx", "edx");
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c b/tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c
index e192720bfe14..74cf19661309 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c
@@ -242,7 +242,7 @@ int main(int argc, char *argv[])
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS));
- TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_DIRECT_TLBFLUSH));
+ TEST_REQUIRE(kvm_hv_cpu_has(HV_X64_NESTED_DIRECT_FLUSH));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
index b987a3d79715..0ddb63229bcb 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
@@ -157,7 +157,7 @@ int main(int argc, char *argv[])
int stage;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
- TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_DIRECT_TLBFLUSH));
+ TEST_REQUIRE(kvm_hv_cpu_has(HV_X64_NESTED_DIRECT_FLUSH));
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
diff --git a/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c b/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c
index 7c70c0da4fb7..2e9197eb1652 100644
--- a/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c
+++ b/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c
@@ -160,6 +160,36 @@ static void test_sev(void *guest_code, uint64_t policy)
kvm_vm_free(vm);
}
+static void guest_shutdown_code(void)
+{
+ struct desc_ptr idt;
+
+ /* Clobber the IDT so that #UD is guaranteed to trigger SHUTDOWN. */
+ memset(&idt, 0, sizeof(idt));
+ __asm__ __volatile__("lidt %0" :: "m"(idt));
+
+ __asm__ __volatile__("ud2");
+}
+
+static void test_sev_es_shutdown(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ uint32_t type = KVM_X86_SEV_ES_VM;
+
+ vm = vm_sev_create_with_one_vcpu(type, guest_shutdown_code, &vcpu);
+
+ vm_sev_launch(vm, SEV_POLICY_ES, NULL);
+
+ vcpu_run(vcpu);
+ TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SHUTDOWN,
+ "Wanted SHUTDOWN, got %s",
+ exit_reason_str(vcpu->run->exit_reason));
+
+ kvm_vm_free(vm);
+}
+
int main(int argc, char *argv[])
{
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SEV));
@@ -171,6 +201,8 @@ int main(int argc, char *argv[])
test_sev(guest_sev_es_code, SEV_POLICY_ES | SEV_POLICY_NO_DBG);
test_sev(guest_sev_es_code, SEV_POLICY_ES);
+ test_sev_es_shutdown();
+
if (kvm_has_cap(KVM_CAP_XCRS) &&
(xgetbv(0) & XFEATURE_MASK_X87_AVX) == XFEATURE_MASK_X87_AVX) {
test_sync_vmsa(0);
diff --git a/tools/testing/selftests/kvm/x86_64/xapic_state_test.c b/tools/testing/selftests/kvm/x86_64/xapic_state_test.c
index 618cd2442390..88bcca188799 100644
--- a/tools/testing/selftests/kvm/x86_64/xapic_state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xapic_state_test.c
@@ -13,6 +13,7 @@
struct xapic_vcpu {
struct kvm_vcpu *vcpu;
bool is_x2apic;
+ bool has_xavic_errata;
};
static void xapic_guest_code(void)
@@ -31,6 +32,10 @@ static void xapic_guest_code(void)
}
}
+#define X2APIC_RSVD_BITS_MASK (GENMASK_ULL(31, 20) | \
+ GENMASK_ULL(17, 16) | \
+ GENMASK_ULL(13, 13))
+
static void x2apic_guest_code(void)
{
asm volatile("cli");
@@ -41,7 +46,12 @@ static void x2apic_guest_code(void)
uint64_t val = x2apic_read_reg(APIC_IRR) |
x2apic_read_reg(APIC_IRR + 0x10) << 32;
- x2apic_write_reg(APIC_ICR, val);
+ if (val & X2APIC_RSVD_BITS_MASK) {
+ x2apic_write_reg_fault(APIC_ICR, val);
+ } else {
+ x2apic_write_reg(APIC_ICR, val);
+ GUEST_ASSERT_EQ(x2apic_read_reg(APIC_ICR), val);
+ }
GUEST_SYNC(val);
} while (1);
}
@@ -71,27 +81,28 @@ static void ____test_icr(struct xapic_vcpu *x, uint64_t val)
icr = (u64)(*((u32 *)&xapic.regs[APIC_ICR])) |
(u64)(*((u32 *)&xapic.regs[APIC_ICR2])) << 32;
if (!x->is_x2apic) {
- val &= (-1u | (0xffull << (32 + 24)));
- TEST_ASSERT_EQ(icr, val & ~APIC_ICR_BUSY);
- } else {
- TEST_ASSERT_EQ(icr & ~APIC_ICR_BUSY, val & ~APIC_ICR_BUSY);
+ if (!x->has_xavic_errata)
+ val &= (-1u | (0xffull << (32 + 24)));
+ } else if (val & X2APIC_RSVD_BITS_MASK) {
+ return;
}
-}
-#define X2APIC_RSVED_BITS_MASK (GENMASK_ULL(31,20) | \
- GENMASK_ULL(17,16) | \
- GENMASK_ULL(13,13))
+ if (x->has_xavic_errata)
+ TEST_ASSERT_EQ(icr & ~APIC_ICR_BUSY, val & ~APIC_ICR_BUSY);
+ else
+ TEST_ASSERT_EQ(icr, val & ~APIC_ICR_BUSY);
+}
static void __test_icr(struct xapic_vcpu *x, uint64_t val)
{
- if (x->is_x2apic) {
- /* Hardware writing vICR register requires reserved bits 31:20,
- * 17:16 and 13 kept as zero to avoid #GP exception. Data value
- * written to vICR should mask out those bits above.
- */
- val &= ~X2APIC_RSVED_BITS_MASK;
- }
- ____test_icr(x, val | APIC_ICR_BUSY);
+ /*
+ * The BUSY bit is reserved on both AMD and Intel, but only AMD treats
+ * it is as _must_ be zero. Intel simply ignores the bit. Don't test
+ * the BUSY bit for x2APIC, as there is no single correct behavior.
+ */
+ if (!x->is_x2apic)
+ ____test_icr(x, val | APIC_ICR_BUSY);
+
____test_icr(x, val & ~(u64)APIC_ICR_BUSY);
}
@@ -231,6 +242,15 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&x.vcpu, xapic_guest_code);
x.is_x2apic = false;
+ /*
+ * AMD's AVIC implementation is buggy (fails to clear the ICR BUSY bit),
+ * and also diverges from KVM with respect to ICR2[23:0] (KVM and Intel
+ * drops writes, AMD does not). Account for the errata when checking
+ * that KVM reads back what was written.
+ */
+ x.has_xavic_errata = host_cpu_is_amd &&
+ get_kvm_amd_param_bool("avic");
+
vcpu_clear_cpuid_feature(x.vcpu, X86_FEATURE_X2APIC);
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
index e149d0574961..2585087cdf5c 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
@@ -10,6 +10,7 @@
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
+#include "hyperv.h"
#define HCALL_REGION_GPA 0xc0000000ULL
#define HCALL_REGION_SLOT 10
diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c
index fc90af2a97b8..bcc73b4e805c 100644
--- a/tools/testing/selftests/mm/pagemap_ioctl.c
+++ b/tools/testing/selftests/mm/pagemap_ioctl.c
@@ -15,7 +15,7 @@
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <math.h>
-#include <asm-generic/unistd.h>
+#include <asm/unistd.h>
#include <pthread.h>
#include <sys/resource.h>
#include <assert.h>
diff --git a/tools/testing/selftests/net/netfilter/Makefile b/tools/testing/selftests/net/netfilter/Makefile
index d13fb5ea3e89..e6c9e777fead 100644
--- a/tools/testing/selftests/net/netfilter/Makefile
+++ b/tools/testing/selftests/net/netfilter/Makefile
@@ -13,6 +13,7 @@ TEST_PROGS += conntrack_ipip_mtu.sh
TEST_PROGS += conntrack_tcp_unreplied.sh
TEST_PROGS += conntrack_sctp_collision.sh
TEST_PROGS += conntrack_vrf.sh
+TEST_PROGS += conntrack_reverse_clash.sh
TEST_PROGS += ipvs.sh
TEST_PROGS += nf_conntrack_packetdrill.sh
TEST_PROGS += nf_nat_edemux.sh
@@ -26,6 +27,8 @@ TEST_PROGS += nft_nat.sh
TEST_PROGS += nft_nat_zones.sh
TEST_PROGS += nft_queue.sh
TEST_PROGS += nft_synproxy.sh
+TEST_PROGS += nft_tproxy_tcp.sh
+TEST_PROGS += nft_tproxy_udp.sh
TEST_PROGS += nft_zones_many.sh
TEST_PROGS += rpath.sh
TEST_PROGS += xt_string.sh
@@ -36,6 +39,7 @@ TEST_GEN_PROGS = conntrack_dump_flush
TEST_GEN_FILES = audit_logread
TEST_GEN_FILES += connect_close nf_queue
+TEST_GEN_FILES += conntrack_reverse_clash
TEST_GEN_FILES += sctp_collision
include ../../lib.mk
diff --git a/tools/testing/selftests/net/netfilter/config b/tools/testing/selftests/net/netfilter/config
index b2dd4db45215..c5fe7b34eaf1 100644
--- a/tools/testing/selftests/net/netfilter/config
+++ b/tools/testing/selftests/net/netfilter/config
@@ -81,6 +81,7 @@ CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_SYNPROXY=m
+CONFIG_NFT_TPROXY=m
CONFIG_VETH=m
CONFIG_VLAN_8021Q=m
CONFIG_XFRM_USER=m
diff --git a/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.c b/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.c
new file mode 100644
index 000000000000..507930cee8cb
--- /dev/null
+++ b/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Needs something like:
+ *
+ * iptables -t nat -A POSTROUTING -o nomatch -j MASQUERADE
+ *
+ * so NAT engine attaches a NAT null-binding to each connection.
+ *
+ * With unmodified kernels, child or parent will exit with
+ * "Port number changed" error, even though no port translation
+ * was requested.
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <time.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <sys/socket.h>
+#include <sys/wait.h>
+
+#define LEN 512
+#define PORT 56789
+#define TEST_TIME 5
+
+static void die(const char *e)
+{
+ perror(e);
+ exit(111);
+}
+
+static void die_port(uint16_t got, uint16_t want)
+{
+ fprintf(stderr, "Port number changed, wanted %d got %d\n", want, ntohs(got));
+ exit(1);
+}
+
+static int udp_socket(void)
+{
+ static const struct timeval tv = {
+ .tv_sec = 1,
+ };
+ int fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+
+ if (fd < 0)
+ die("socket");
+
+ setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
+ return fd;
+}
+
+int main(int argc, char *argv[])
+{
+ struct sockaddr_in sa1 = {
+ .sin_family = AF_INET,
+ };
+ struct sockaddr_in sa2 = {
+ .sin_family = AF_INET,
+ };
+ int s1, s2, status;
+ time_t end, now;
+ socklen_t plen;
+ char buf[LEN];
+ bool child;
+
+ sa1.sin_port = htons(PORT);
+ sa2.sin_port = htons(PORT + 1);
+
+ s1 = udp_socket();
+ s2 = udp_socket();
+
+ inet_pton(AF_INET, "127.0.0.11", &sa1.sin_addr);
+ inet_pton(AF_INET, "127.0.0.12", &sa2.sin_addr);
+
+ if (bind(s1, (struct sockaddr *)&sa1, sizeof(sa1)) < 0)
+ die("bind 1");
+ if (bind(s2, (struct sockaddr *)&sa2, sizeof(sa2)) < 0)
+ die("bind 2");
+
+ child = fork() == 0;
+
+ now = time(NULL);
+ end = now + TEST_TIME;
+
+ while (now < end) {
+ struct sockaddr_in peer;
+ socklen_t plen = sizeof(peer);
+
+ now = time(NULL);
+
+ if (child) {
+ if (sendto(s1, buf, LEN, 0, (struct sockaddr *)&sa2, sizeof(sa2)) != LEN)
+ continue;
+
+ if (recvfrom(s2, buf, LEN, 0, (struct sockaddr *)&peer, &plen) < 0)
+ die("child recvfrom");
+
+ if (peer.sin_port != htons(PORT))
+ die_port(peer.sin_port, PORT);
+ } else {
+ if (sendto(s2, buf, LEN, 0, (struct sockaddr *)&sa1, sizeof(sa1)) != LEN)
+ continue;
+
+ if (recvfrom(s1, buf, LEN, 0, (struct sockaddr *)&peer, &plen) < 0)
+ die("parent recvfrom");
+
+ if (peer.sin_port != htons((PORT + 1)))
+ die_port(peer.sin_port, PORT + 1);
+ }
+ }
+
+ if (child)
+ return 0;
+
+ wait(&status);
+
+ if (WIFEXITED(status))
+ return WEXITSTATUS(status);
+
+ return 1;
+}
diff --git a/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.sh b/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.sh
new file mode 100755
index 000000000000..a24c896347a8
--- /dev/null
+++ b/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source lib.sh
+
+cleanup()
+{
+ cleanup_all_ns
+}
+
+checktool "nft --version" "run test without nft"
+checktool "conntrack --version" "run test without conntrack"
+
+trap cleanup EXIT
+
+setup_ns ns0
+
+# make loopback connections get nat null bindings assigned
+ip netns exec "$ns0" nft -f - <<EOF
+table ip nat {
+ chain POSTROUTING {
+ type nat hook postrouting priority srcnat; policy accept;
+ oifname "nomatch" counter packets 0 bytes 0 masquerade
+ }
+}
+EOF
+
+do_flush()
+{
+ local end
+ local now
+
+ now=$(date +%s)
+ end=$((now + 5))
+
+ while [ $now -lt $end ];do
+ ip netns exec "$ns0" conntrack -F 2>/dev/null
+ now=$(date +%s)
+ done
+}
+
+do_flush &
+
+if ip netns exec "$ns0" ./conntrack_reverse_clash; then
+ echo "PASS: No SNAT performed for null bindings"
+else
+ echo "ERROR: SNAT performed without any matching snat rule"
+ exit 1
+fi
+
+exit 0
diff --git a/tools/testing/selftests/net/netfilter/ipvs.sh b/tools/testing/selftests/net/netfilter/ipvs.sh
index 4ceee9fb3949..d3edb16cd4b3 100755
--- a/tools/testing/selftests/net/netfilter/ipvs.sh
+++ b/tools/testing/selftests/net/netfilter/ipvs.sh
@@ -97,7 +97,7 @@ cleanup() {
}
server_listen() {
- ip netns exec "$ns2" socat -u -4 TCP-LISTEN:8080,reuseaddr STDOUT > "${outfile}" &
+ ip netns exec "$ns2" timeout 5 socat -u -4 TCP-LISTEN:8080,reuseaddr STDOUT > "${outfile}" &
server_pid=$!
sleep 0.2
}
diff --git a/tools/testing/selftests/net/netfilter/nft_queue.sh b/tools/testing/selftests/net/netfilter/nft_queue.sh
index d66e3c4dfec6..a9d109fcc15c 100755
--- a/tools/testing/selftests/net/netfilter/nft_queue.sh
+++ b/tools/testing/selftests/net/netfilter/nft_queue.sh
@@ -31,7 +31,7 @@ modprobe -q sctp
trap cleanup EXIT
-setup_ns ns1 ns2 nsrouter
+setup_ns ns1 ns2 ns3 nsrouter
TMPFILE0=$(mktemp)
TMPFILE1=$(mktemp)
@@ -48,6 +48,7 @@ if ! ip link add veth0 netns "$nsrouter" type veth peer name eth0 netns "$ns1" >
exit $ksft_skip
fi
ip link add veth1 netns "$nsrouter" type veth peer name eth0 netns "$ns2"
+ip link add veth2 netns "$nsrouter" type veth peer name eth0 netns "$ns3"
ip -net "$nsrouter" link set veth0 up
ip -net "$nsrouter" addr add 10.0.1.1/24 dev veth0
@@ -57,8 +58,13 @@ ip -net "$nsrouter" link set veth1 up
ip -net "$nsrouter" addr add 10.0.2.1/24 dev veth1
ip -net "$nsrouter" addr add dead:2::1/64 dev veth1 nodad
+ip -net "$nsrouter" link set veth2 up
+ip -net "$nsrouter" addr add 10.0.3.1/24 dev veth2
+ip -net "$nsrouter" addr add dead:3::1/64 dev veth2 nodad
+
ip -net "$ns1" link set eth0 up
ip -net "$ns2" link set eth0 up
+ip -net "$ns3" link set eth0 up
ip -net "$ns1" addr add 10.0.1.99/24 dev eth0
ip -net "$ns1" addr add dead:1::99/64 dev eth0 nodad
@@ -70,6 +76,11 @@ ip -net "$ns2" addr add dead:2::99/64 dev eth0 nodad
ip -net "$ns2" route add default via 10.0.2.1
ip -net "$ns2" route add default via dead:2::1
+ip -net "$ns3" addr add 10.0.3.99/24 dev eth0
+ip -net "$ns3" addr add dead:3::99/64 dev eth0 nodad
+ip -net "$ns3" route add default via 10.0.3.1
+ip -net "$ns3" route add default via dead:3::1
+
load_ruleset() {
local name=$1
local prio=$2
@@ -473,6 +484,83 @@ EOF
check_output_files "$TMPINPUT" "$TMPFILE1" "sctp output"
}
+udp_listener_ready()
+{
+ ss -S -N "$1" -uln -o "sport = :12345" | grep -q 12345
+}
+
+output_files_written()
+{
+ test -s "$1" && test -s "$2"
+}
+
+test_udp_ct_race()
+{
+ ip netns exec "$nsrouter" nft -f /dev/stdin <<EOF
+flush ruleset
+table inet udpq {
+ chain prerouting {
+ type nat hook prerouting priority dstnat - 5; policy accept;
+ ip daddr 10.6.6.6 udp dport 12345 counter dnat to numgen inc mod 2 map { 0 : 10.0.2.99, 1 : 10.0.3.99 }
+ }
+ chain postrouting {
+ type filter hook postrouting priority srcnat - 5; policy accept;
+ udp dport 12345 counter queue num 12
+ }
+}
+EOF
+ :> "$TMPFILE1"
+ :> "$TMPFILE2"
+
+ timeout 10 ip netns exec "$ns2" socat UDP-LISTEN:12345,fork OPEN:"$TMPFILE1",trunc &
+ local rpid1=$!
+
+ timeout 10 ip netns exec "$ns3" socat UDP-LISTEN:12345,fork OPEN:"$TMPFILE2",trunc &
+ local rpid2=$!
+
+ ip netns exec "$nsrouter" ./nf_queue -q 12 -d 1000 &
+ local nfqpid=$!
+
+ busywait "$BUSYWAIT_TIMEOUT" udp_listener_ready "$ns2"
+ busywait "$BUSYWAIT_TIMEOUT" udp_listener_ready "$ns3"
+ busywait "$BUSYWAIT_TIMEOUT" nf_queue_wait "$nsrouter" 12
+
+ # Send two packets, one should end up in ns1, other in ns2.
+ # This is because nfqueue will delay packet for long enough so that
+ # second packet will not find existing conntrack entry.
+ echo "Packet 1" | ip netns exec "$ns1" socat STDIN UDP-DATAGRAM:10.6.6.6:12345,bind=0.0.0.0:55221
+ echo "Packet 2" | ip netns exec "$ns1" socat STDIN UDP-DATAGRAM:10.6.6.6:12345,bind=0.0.0.0:55221
+
+ busywait 10000 output_files_written "$TMPFILE1" "$TMPFILE2"
+
+ kill "$nfqpid"
+
+ if ! ip netns exec "$nsrouter" bash -c 'conntrack -L -p udp --dport 12345 2>/dev/null | wc -l | grep -q "^1"'; then
+ echo "FAIL: Expected One udp conntrack entry"
+ ip netns exec "$nsrouter" conntrack -L -p udp --dport 12345
+ ret=1
+ fi
+
+ if ! ip netns exec "$nsrouter" nft delete table inet udpq; then
+ echo "FAIL: Could not delete udpq table"
+ ret=1
+ return
+ fi
+
+ NUMLINES1=$(wc -l < "$TMPFILE1")
+ NUMLINES2=$(wc -l < "$TMPFILE2")
+
+ if [ "$NUMLINES1" -ne 1 ] || [ "$NUMLINES2" -ne 1 ]; then
+ ret=1
+ echo "FAIL: uneven udp packet distribution: $NUMLINES1 $NUMLINES2"
+ echo -n "$TMPFILE1: ";cat "$TMPFILE1"
+ echo -n "$TMPFILE2: ";cat "$TMPFILE2"
+ return
+ fi
+
+ echo "PASS: both udp receivers got one packet each"
+}
+
test_queue_removal()
{
read tainted_then < /proc/sys/kernel/tainted
@@ -512,6 +600,7 @@ EOF
ip netns exec "$nsrouter" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth2.forwarding=1 > /dev/null
load_ruleset "filter" 0
@@ -549,6 +638,7 @@ test_tcp_localhost_connectclose
test_tcp_localhost_requeue
test_sctp_forward
test_sctp_output
+test_udp_ct_race
# should be last, adds vrf device in ns1 and changes routes
test_icmp_vrf
diff --git a/tools/testing/selftests/net/netfilter/nft_tproxy_tcp.sh b/tools/testing/selftests/net/netfilter/nft_tproxy_tcp.sh
new file mode 100755
index 000000000000..e208fb03eeb7
--- /dev/null
+++ b/tools/testing/selftests/net/netfilter/nft_tproxy_tcp.sh
@@ -0,0 +1,358 @@
+#!/bin/bash
+#
+# This tests tproxy on the following scenario:
+#
+# +------------+
+# +-------+ | nsrouter | +-------+
+# |ns1 |.99 .1| |.1 .99| ns2|
+# | eth0|---------------|veth0 veth1|------------------|eth0 |
+# | | 10.0.1.0/24 | | 10.0.2.0/24 | |
+# +-------+ dead:1::/64 | veth2 | dead:2::/64 +-------+
+# +------------+
+# |.1
+# |
+# |
+# | +-------+
+# | .99| ns3|
+# +------------------------|eth0 |
+# 10.0.3.0/24 | |
+# dead:3::/64 +-------+
+#
+# The tproxy implementation acts as an echo server so the client
+# must receive the same message it sent if it has been proxied.
+# If is not proxied the servers return PONG_NS# with the number
+# of the namespace the server is running.
+#
+# shellcheck disable=SC2162,SC2317
+
+source lib.sh
+ret=0
+timeout=5
+
+cleanup()
+{
+ ip netns pids "$ns1" | xargs kill 2>/dev/null
+ ip netns pids "$ns2" | xargs kill 2>/dev/null
+ ip netns pids "$ns3" | xargs kill 2>/dev/null
+ ip netns pids "$nsrouter" | xargs kill 2>/dev/null
+
+ cleanup_all_ns
+}
+
+checktool "nft --version" "test without nft tool"
+checktool "socat -h" "run test without socat"
+
+trap cleanup EXIT
+setup_ns ns1 ns2 ns3 nsrouter
+
+if ! ip link add veth0 netns "$nsrouter" type veth peer name eth0 netns "$ns1" > /dev/null 2>&1; then
+ echo "SKIP: No virtual ethernet pair device support in kernel"
+ exit $ksft_skip
+fi
+ip link add veth1 netns "$nsrouter" type veth peer name eth0 netns "$ns2"
+ip link add veth2 netns "$nsrouter" type veth peer name eth0 netns "$ns3"
+
+ip -net "$nsrouter" link set veth0 up
+ip -net "$nsrouter" addr add 10.0.1.1/24 dev veth0
+ip -net "$nsrouter" addr add dead:1::1/64 dev veth0 nodad
+
+ip -net "$nsrouter" link set veth1 up
+ip -net "$nsrouter" addr add 10.0.2.1/24 dev veth1
+ip -net "$nsrouter" addr add dead:2::1/64 dev veth1 nodad
+
+ip -net "$nsrouter" link set veth2 up
+ip -net "$nsrouter" addr add 10.0.3.1/24 dev veth2
+ip -net "$nsrouter" addr add dead:3::1/64 dev veth2 nodad
+
+ip -net "$ns1" link set eth0 up
+ip -net "$ns2" link set eth0 up
+ip -net "$ns3" link set eth0 up
+
+ip -net "$ns1" addr add 10.0.1.99/24 dev eth0
+ip -net "$ns1" addr add dead:1::99/64 dev eth0 nodad
+ip -net "$ns1" route add default via 10.0.1.1
+ip -net "$ns1" route add default via dead:1::1
+
+ip -net "$ns2" addr add 10.0.2.99/24 dev eth0
+ip -net "$ns2" addr add dead:2::99/64 dev eth0 nodad
+ip -net "$ns2" route add default via 10.0.2.1
+ip -net "$ns2" route add default via dead:2::1
+
+ip -net "$ns3" addr add 10.0.3.99/24 dev eth0
+ip -net "$ns3" addr add dead:3::99/64 dev eth0 nodad
+ip -net "$ns3" route add default via 10.0.3.1
+ip -net "$ns3" route add default via dead:3::1
+
+ip netns exec "$nsrouter" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth2.forwarding=1 > /dev/null
+
+test_ping() {
+ if ! ip netns exec "$ns1" ping -c 1 -q 10.0.2.99 > /dev/null; then
+ return 1
+ fi
+
+ if ! ip netns exec "$ns1" ping -c 1 -q dead:2::99 > /dev/null; then
+ return 2
+ fi
+
+ if ! ip netns exec "$ns1" ping -c 1 -q 10.0.3.99 > /dev/null; then
+ return 1
+ fi
+
+ if ! ip netns exec "$ns1" ping -c 1 -q dead:3::99 > /dev/null; then
+ return 2
+ fi
+
+ return 0
+}
+
+test_ping_router() {
+ if ! ip netns exec "$ns1" ping -c 1 -q 10.0.2.1 > /dev/null; then
+ return 3
+ fi
+
+ if ! ip netns exec "$ns1" ping -c 1 -q dead:2::1 > /dev/null; then
+ return 4
+ fi
+
+ return 0
+}
+
+
+listener_ready()
+{
+ local ns="$1"
+ local port="$2"
+ local proto="$3"
+ ss -N "$ns" -ln "$proto" -o "sport = :$port" | grep -q "$port"
+}
+
+test_tproxy()
+{
+ local traffic_origin="$1"
+ local ip_proto="$2"
+ local expect_ns1_ns2="$3"
+ local expect_ns1_ns3="$4"
+ local expect_nsrouter_ns2="$5"
+ local expect_nsrouter_ns3="$6"
+
+ # derived variables
+ local testname="test_${ip_proto}_tcp_${traffic_origin}"
+ local socat_ipproto
+ local ns1_ip
+ local ns2_ip
+ local ns3_ip
+ local ns2_target
+ local ns3_target
+ local nftables_subject
+ local ip_command
+
+ # socat 1.8.0 has a bug that requires to specify the IP family to bind (fixed in 1.8.0.1)
+ case $ip_proto in
+ "ip")
+ socat_ipproto="-4"
+ ns1_ip=10.0.1.99
+ ns2_ip=10.0.2.99
+ ns3_ip=10.0.3.99
+ ns2_target="tcp:$ns2_ip:8080"
+ ns3_target="tcp:$ns3_ip:8080"
+ nftables_subject="ip daddr $ns2_ip tcp dport 8080"
+ ip_command="ip"
+ ;;
+ "ip6")
+ socat_ipproto="-6"
+ ns1_ip=dead:1::99
+ ns2_ip=dead:2::99
+ ns3_ip=dead:3::99
+ ns2_target="tcp:[$ns2_ip]:8080"
+ ns3_target="tcp:[$ns3_ip]:8080"
+ nftables_subject="ip6 daddr $ns2_ip tcp dport 8080"
+ ip_command="ip -6"
+ ;;
+ *)
+ echo "FAIL: unsupported protocol"
+ exit 255
+ ;;
+ esac
+
+ case $traffic_origin in
+ # to capture the local originated traffic we need to mark the outgoing
+ # traffic so the policy based routing rule redirects it and can be processed
+ # in the prerouting chain.
+ "local")
+ nftables_rules="
+flush ruleset
+table inet filter {
+ chain divert {
+ type filter hook prerouting priority 0; policy accept;
+ $nftables_subject tproxy $ip_proto to :12345 meta mark set 1 accept
+ }
+ chain output {
+ type route hook output priority 0; policy accept;
+ $nftables_subject meta mark set 1 accept
+ }
+}"
+ ;;
+ "forward")
+ nftables_rules="
+flush ruleset
+table inet filter {
+ chain divert {
+ type filter hook prerouting priority 0; policy accept;
+ $nftables_subject tproxy $ip_proto to :12345 meta mark set 1 accept
+ }
+}"
+ ;;
+ *)
+ echo "FAIL: unsupported parameter for traffic origin"
+ exit 255
+ ;;
+ esac
+
+ # shellcheck disable=SC2046 # Intended splitting of ip_command
+ ip netns exec "$nsrouter" $ip_command rule add fwmark 1 table 100
+ ip netns exec "$nsrouter" $ip_command route add local "${ns2_ip}" dev lo table 100
+ echo "$nftables_rules" | ip netns exec "$nsrouter" nft -f /dev/stdin
+
+ timeout "$timeout" ip netns exec "$nsrouter" socat "$socat_ipproto" tcp-listen:12345,fork,ip-transparent SYSTEM:"cat" 2>/dev/null &
+ local tproxy_pid=$!
+
+ timeout "$timeout" ip netns exec "$ns2" socat "$socat_ipproto" tcp-listen:8080,fork SYSTEM:"echo PONG_NS2" 2>/dev/null &
+ local server2_pid=$!
+
+ timeout "$timeout" ip netns exec "$ns3" socat "$socat_ipproto" tcp-listen:8080,fork SYSTEM:"echo PONG_NS3" 2>/dev/null &
+ local server3_pid=$!
+
+ busywait "$BUSYWAIT_TIMEOUT" listener_ready "$nsrouter" 12345 "-t"
+ busywait "$BUSYWAIT_TIMEOUT" listener_ready "$ns2" 8080 "-t"
+ busywait "$BUSYWAIT_TIMEOUT" listener_ready "$ns3" 8080 "-t"
+
+ local result
+ # request from ns1 to ns2 (forwarded traffic)
+ result=$(echo I_M_PROXIED | ip netns exec "$ns1" socat -t 2 -T 2 STDIO "$ns2_target")
+ if [ "$result" == "$expect_ns1_ns2" ] ;then
+ echo "PASS: tproxy test $testname: ns1 got reply \"$result\" connecting to ns2"
+ else
+ echo "ERROR: tproxy test $testname: ns1 got reply \"$result\" connecting to ns2, not \"${expect_ns1_ns2}\" as intended"
+ ret=1
+ fi
+
+ # request from ns1 to ns3(forwarded traffic)
+ result=$(echo I_M_PROXIED | ip netns exec "$ns1" socat -t 2 -T 2 STDIO "$ns3_target")
+ if [ "$result" = "$expect_ns1_ns3" ] ;then
+ echo "PASS: tproxy test $testname: ns1 got reply \"$result\" connecting to ns3"
+ else
+ echo "ERROR: tproxy test $testname: ns1 got reply \"$result\" connecting to ns3, not \"$expect_ns1_ns3\" as intended"
+ ret=1
+ fi
+
+ # request from nsrouter to ns2 (localy originated traffic)
+ result=$(echo I_M_PROXIED | ip netns exec "$nsrouter" socat -t 2 -T 2 STDIO "$ns2_target")
+ if [ "$result" == "$expect_nsrouter_ns2" ] ;then
+ echo "PASS: tproxy test $testname: nsrouter got reply \"$result\" connecting to ns2"
+ else
+ echo "ERROR: tproxy test $testname: nsrouter got reply \"$result\" connecting to ns2, not \"$expect_nsrouter_ns2\" as intended"
+ ret=1
+ fi
+
+ # request from nsrouter to ns3 (localy originated traffic)
+ result=$(echo I_M_PROXIED | ip netns exec "$nsrouter" socat -t 2 -T 2 STDIO "$ns3_target")
+ if [ "$result" = "$expect_nsrouter_ns3" ] ;then
+ echo "PASS: tproxy test $testname: nsrouter got reply \"$result\" connecting to ns3"
+ else
+ echo "ERROR: tproxy test $testname: nsrouter got reply \"$result\" connecting to ns3, not \"$expect_nsrouter_ns3\" as intended"
+ ret=1
+ fi
+
+ # cleanup
+ kill "$tproxy_pid" "$server2_pid" "$server3_pid" 2>/dev/null
+ # shellcheck disable=SC2046 # Intended splitting of ip_command
+ ip netns exec "$nsrouter" $ip_command rule del fwmark 1 table 100
+ ip netns exec "$nsrouter" $ip_command route flush table 100
+}
+
+
+test_ipv4_tcp_forward()
+{
+ local traffic_origin="forward"
+ local ip_proto="ip"
+ local expect_ns1_ns2="I_M_PROXIED"
+ local expect_ns1_ns3="PONG_NS3"
+ local expect_nsrouter_ns2="PONG_NS2"
+ local expect_nsrouter_ns3="PONG_NS3"
+
+ test_tproxy "$traffic_origin" \
+ "$ip_proto" \
+ "$expect_ns1_ns2" \
+ "$expect_ns1_ns3" \
+ "$expect_nsrouter_ns2" \
+ "$expect_nsrouter_ns3"
+}
+
+test_ipv4_tcp_local()
+{
+ local traffic_origin="local"
+ local ip_proto="ip"
+ local expect_ns1_ns2="I_M_PROXIED"
+ local expect_ns1_ns3="PONG_NS3"
+ local expect_nsrouter_ns2="I_M_PROXIED"
+ local expect_nsrouter_ns3="PONG_NS3"
+
+ test_tproxy "$traffic_origin" \
+ "$ip_proto" \
+ "$expect_ns1_ns2" \
+ "$expect_ns1_ns3" \
+ "$expect_nsrouter_ns2" \
+ "$expect_nsrouter_ns3"
+}
+
+test_ipv6_tcp_forward()
+{
+ local traffic_origin="forward"
+ local ip_proto="ip6"
+ local expect_ns1_ns2="I_M_PROXIED"
+ local expect_ns1_ns3="PONG_NS3"
+ local expect_nsrouter_ns2="PONG_NS2"
+ local expect_nsrouter_ns3="PONG_NS3"
+
+ test_tproxy "$traffic_origin" \
+ "$ip_proto" \
+ "$expect_ns1_ns2" \
+ "$expect_ns1_ns3" \
+ "$expect_nsrouter_ns2" \
+ "$expect_nsrouter_ns3"
+}
+
+test_ipv6_tcp_local()
+{
+ local traffic_origin="local"
+ local ip_proto="ip6"
+ local expect_ns1_ns2="I_M_PROXIED"
+ local expect_ns1_ns3="PONG_NS3"
+ local expect_nsrouter_ns2="I_M_PROXIED"
+ local expect_nsrouter_ns3="PONG_NS3"
+
+ test_tproxy "$traffic_origin" \
+ "$ip_proto" \
+ "$expect_ns1_ns2" \
+ "$expect_ns1_ns3" \
+ "$expect_nsrouter_ns2" \
+ "$expect_nsrouter_ns3"
+}
+
+if test_ping; then
+ # queue bypass works (rules were skipped, no listener)
+ echo "PASS: ${ns1} can reach ${ns2}"
+else
+ echo "FAIL: ${ns1} cannot reach ${ns2}: $ret" 1>&2
+ exit $ret
+fi
+
+test_ipv4_tcp_forward
+test_ipv4_tcp_local
+test_ipv6_tcp_forward
+test_ipv6_tcp_local
+
+exit $ret
diff --git a/tools/testing/selftests/net/netfilter/nft_tproxy_udp.sh b/tools/testing/selftests/net/netfilter/nft_tproxy_udp.sh
new file mode 100755
index 000000000000..d16de13fe5a7
--- /dev/null
+++ b/tools/testing/selftests/net/netfilter/nft_tproxy_udp.sh
@@ -0,0 +1,262 @@
+#!/bin/bash
+#
+# This tests tproxy on the following scenario:
+#
+# +------------+
+# +-------+ | nsrouter | +-------+
+# |ns1 |.99 .1| |.1 .99| ns2|
+# | eth0|---------------|veth0 veth1|------------------|eth0 |
+# | | 10.0.1.0/24 | | 10.0.2.0/24 | |
+# +-------+ dead:1::/64 | veth2 | dead:2::/64 +-------+
+# +------------+
+# |.1
+# |
+# |
+# | +-------+
+# | .99| ns3|
+# +------------------------|eth0 |
+# 10.0.3.0/24 | |
+# dead:3::/64 +-------+
+#
+# The tproxy implementation acts as an echo server so the client
+# must receive the same message it sent if it has been proxied.
+# If is not proxied the servers return PONG_NS# with the number
+# of the namespace the server is running.
+# shellcheck disable=SC2162,SC2317
+
+source lib.sh
+ret=0
+# UDP is slow
+timeout=15
+
+cleanup()
+{
+ ip netns pids "$ns1" | xargs kill 2>/dev/null
+ ip netns pids "$ns2" | xargs kill 2>/dev/null
+ ip netns pids "$ns3" | xargs kill 2>/dev/null
+ ip netns pids "$nsrouter" | xargs kill 2>/dev/null
+
+ cleanup_all_ns
+}
+
+checktool "nft --version" "test without nft tool"
+checktool "socat -h" "run test without socat"
+
+trap cleanup EXIT
+setup_ns ns1 ns2 ns3 nsrouter
+
+if ! ip link add veth0 netns "$nsrouter" type veth peer name eth0 netns "$ns1" > /dev/null 2>&1; then
+ echo "SKIP: No virtual ethernet pair device support in kernel"
+ exit $ksft_skip
+fi
+ip link add veth1 netns "$nsrouter" type veth peer name eth0 netns "$ns2"
+ip link add veth2 netns "$nsrouter" type veth peer name eth0 netns "$ns3"
+
+ip -net "$nsrouter" link set veth0 up
+ip -net "$nsrouter" addr add 10.0.1.1/24 dev veth0
+ip -net "$nsrouter" addr add dead:1::1/64 dev veth0 nodad
+
+ip -net "$nsrouter" link set veth1 up
+ip -net "$nsrouter" addr add 10.0.2.1/24 dev veth1
+ip -net "$nsrouter" addr add dead:2::1/64 dev veth1 nodad
+
+ip -net "$nsrouter" link set veth2 up
+ip -net "$nsrouter" addr add 10.0.3.1/24 dev veth2
+ip -net "$nsrouter" addr add dead:3::1/64 dev veth2 nodad
+
+ip -net "$ns1" link set eth0 up
+ip -net "$ns2" link set eth0 up
+ip -net "$ns3" link set eth0 up
+
+ip -net "$ns1" addr add 10.0.1.99/24 dev eth0
+ip -net "$ns1" addr add dead:1::99/64 dev eth0 nodad
+ip -net "$ns1" route add default via 10.0.1.1
+ip -net "$ns1" route add default via dead:1::1
+
+ip -net "$ns2" addr add 10.0.2.99/24 dev eth0
+ip -net "$ns2" addr add dead:2::99/64 dev eth0 nodad
+ip -net "$ns2" route add default via 10.0.2.1
+ip -net "$ns2" route add default via dead:2::1
+
+ip -net "$ns3" addr add 10.0.3.99/24 dev eth0
+ip -net "$ns3" addr add dead:3::99/64 dev eth0 nodad
+ip -net "$ns3" route add default via 10.0.3.1
+ip -net "$ns3" route add default via dead:3::1
+
+ip netns exec "$nsrouter" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth2.forwarding=1 > /dev/null
+
+test_ping() {
+ if ! ip netns exec "$ns1" ping -c 1 -q 10.0.2.99 > /dev/null; then
+ return 1
+ fi
+
+ if ! ip netns exec "$ns1" ping -c 1 -q dead:2::99 > /dev/null; then
+ return 2
+ fi
+
+ if ! ip netns exec "$ns1" ping -c 1 -q 10.0.3.99 > /dev/null; then
+ return 1
+ fi
+
+ if ! ip netns exec "$ns1" ping -c 1 -q dead:3::99 > /dev/null; then
+ return 2
+ fi
+
+ return 0
+}
+
+test_ping_router() {
+ if ! ip netns exec "$ns1" ping -c 1 -q 10.0.2.1 > /dev/null; then
+ return 3
+ fi
+
+ if ! ip netns exec "$ns1" ping -c 1 -q dead:2::1 > /dev/null; then
+ return 4
+ fi
+
+ return 0
+}
+
+
+listener_ready()
+{
+ local ns="$1"
+ local port="$2"
+ local proto="$3"
+ ss -N "$ns" -ln "$proto" -o "sport = :$port" | grep -q "$port"
+}
+
+test_tproxy_udp_forward()
+{
+ local ip_proto="$1"
+
+ local expect_ns1_ns2="I_M_PROXIED"
+ local expect_ns1_ns3="PONG_NS3"
+ local expect_nsrouter_ns2="PONG_NS2"
+ local expect_nsrouter_ns3="PONG_NS3"
+
+ # derived variables
+ local testname="test_${ip_proto}_udp_forward"
+ local socat_ipproto
+ local ns1_ip
+ local ns2_ip
+ local ns3_ip
+ local ns1_ip_port
+ local ns2_ip_port
+ local ns3_ip_port
+ local ip_command
+
+ # socat 1.8.0 has a bug that requires to specify the IP family to bind (fixed in 1.8.0.1)
+ case $ip_proto in
+ "ip")
+ socat_ipproto="-4"
+ ns1_ip=10.0.1.99
+ ns2_ip=10.0.2.99
+ ns3_ip=10.0.3.99
+ ns1_ip_port="$ns1_ip:18888"
+ ns2_ip_port="$ns2_ip:8080"
+ ns3_ip_port="$ns3_ip:8080"
+ ip_command="ip"
+ ;;
+ "ip6")
+ socat_ipproto="-6"
+ ns1_ip=dead:1::99
+ ns2_ip=dead:2::99
+ ns3_ip=dead:3::99
+ ns1_ip_port="[$ns1_ip]:18888"
+ ns2_ip_port="[$ns2_ip]:8080"
+ ns3_ip_port="[$ns3_ip]:8080"
+ ip_command="ip -6"
+ ;;
+ *)
+ echo "FAIL: unsupported protocol"
+ exit 255
+ ;;
+ esac
+
+ # shellcheck disable=SC2046 # Intended splitting of ip_command
+ ip netns exec "$nsrouter" $ip_command rule add fwmark 1 table 100
+ ip netns exec "$nsrouter" $ip_command route add local "$ns2_ip" dev lo table 100
+ ip netns exec "$nsrouter" nft -f /dev/stdin <<EOF
+flush ruleset
+table inet filter {
+ chain divert {
+ type filter hook prerouting priority 0; policy accept;
+ $ip_proto daddr $ns2_ip udp dport 8080 tproxy $ip_proto to :12345 meta mark set 1 accept
+ }
+}
+EOF
+
+ timeout "$timeout" ip netns exec "$nsrouter" socat -u "$socat_ipproto" udp-listen:12345,fork,ip-transparent,reuseport udp:"$ns1_ip_port",ip-transparent,reuseport,bind="$ns2_ip_port" 2>/dev/null &
+ local tproxy_pid=$!
+
+ timeout "$timeout" ip netns exec "$ns2" socat "$socat_ipproto" udp-listen:8080,fork SYSTEM:"echo PONG_NS2" 2>/dev/null &
+ local server2_pid=$!
+
+ timeout "$timeout" ip netns exec "$ns3" socat "$socat_ipproto" udp-listen:8080,fork SYSTEM:"echo PONG_NS3" 2>/dev/null &
+ local server3_pid=$!
+
+ busywait "$BUSYWAIT_TIMEOUT" listener_ready "$nsrouter" 12345 "-u"
+ busywait "$BUSYWAIT_TIMEOUT" listener_ready "$ns2" 8080 "-u"
+ busywait "$BUSYWAIT_TIMEOUT" listener_ready "$ns3" 8080 "-u"
+
+ local result
+ # request from ns1 to ns2 (forwarded traffic)
+ result=$(echo I_M_PROXIED | ip netns exec "$ns1" socat -t 2 -T 2 STDIO udp:"$ns2_ip_port",sourceport=18888)
+ if [ "$result" == "$expect_ns1_ns2" ] ;then
+ echo "PASS: tproxy test $testname: ns1 got reply \"$result\" connecting to ns2"
+ else
+ echo "ERROR: tproxy test $testname: ns1 got reply \"$result\" connecting to ns2, not \"${expect_ns1_ns2}\" as intended"
+ ret=1
+ fi
+
+ # request from ns1 to ns3 (forwarded traffic)
+ result=$(echo I_M_PROXIED | ip netns exec "$ns1" socat -t 2 -T 2 STDIO udp:"$ns3_ip_port")
+ if [ "$result" = "$expect_ns1_ns3" ] ;then
+ echo "PASS: tproxy test $testname: ns1 got reply \"$result\" connecting to ns3"
+ else
+ echo "ERROR: tproxy test $testname: ns1 got reply \"$result\" connecting to ns3, not \"$expect_ns1_ns3\" as intended"
+ ret=1
+ fi
+
+ # request from nsrouter to ns2 (localy originated traffic)
+ result=$(echo I_M_PROXIED | ip netns exec "$nsrouter" socat -t 2 -T 2 STDIO udp:"$ns2_ip_port")
+ if [ "$result" == "$expect_nsrouter_ns2" ] ;then
+ echo "PASS: tproxy test $testname: nsrouter got reply \"$result\" connecting to ns2"
+ else
+ echo "ERROR: tproxy test $testname: nsrouter got reply \"$result\" connecting to ns2, not \"$expect_nsrouter_ns2\" as intended"
+ ret=1
+ fi
+
+ # request from nsrouter to ns3 (localy originated traffic)
+ result=$(echo I_M_PROXIED | ip netns exec "$nsrouter" socat -t 2 -T 2 STDIO udp:"$ns3_ip_port")
+ if [ "$result" = "$expect_nsrouter_ns3" ] ;then
+ echo "PASS: tproxy test $testname: nsrouter got reply \"$result\" connecting to ns3"
+ else
+ echo "ERROR: tproxy test $testname: nsrouter got reply \"$result\" connecting to ns3, not \"$expect_nsrouter_ns3\" as intended"
+ ret=1
+ fi
+
+ # cleanup
+ kill "$tproxy_pid" "$server2_pid" "$server3_pid" 2>/dev/null
+ # shellcheck disable=SC2046 # Intended splitting of ip_command
+ ip netns exec "$nsrouter" $ip_command rule del fwmark 1 table 100
+ ip netns exec "$nsrouter" $ip_command route flush table 100
+}
+
+
+if test_ping; then
+ # queue bypass works (rules were skipped, no listener)
+ echo "PASS: ${ns1} can reach ${ns2}"
+else
+ echo "FAIL: ${ns1} cannot reach ${ns2}: $ret" 1>&2
+ exit $ret
+fi
+
+test_tproxy_udp_forward "ip"
+test_tproxy_udp_forward "ip6"
+
+exit $ret
diff --git a/tools/testing/selftests/net/packetdrill/ksft_runner.sh b/tools/testing/selftests/net/packetdrill/ksft_runner.sh
index 7478c0c0c9aa..4071c133f29e 100755
--- a/tools/testing/selftests/net/packetdrill/ksft_runner.sh
+++ b/tools/testing/selftests/net/packetdrill/ksft_runner.sh
@@ -30,12 +30,17 @@ if [ -z "$(which packetdrill)" ]; then
exit "$KSFT_SKIP"
fi
+declare -a optargs
+if [[ -n "${KSFT_MACHINE_SLOW}" ]]; then
+ optargs+=('--tolerance_usecs=14000')
+fi
+
ktap_print_header
ktap_set_plan 2
-unshare -n packetdrill ${ipv4_args[@]} $(basename $script) > /dev/null \
+unshare -n packetdrill ${ipv4_args[@]} ${optargs[@]} $(basename $script) > /dev/null \
&& ktap_test_pass "ipv4" || ktap_test_fail "ipv4"
-unshare -n packetdrill ${ipv6_args[@]} $(basename $script) > /dev/null \
+unshare -n packetdrill ${ipv6_args[@]} ${optargs[@]} $(basename $script) > /dev/null \
&& ktap_test_pass "ipv6" || ktap_test_fail "ipv6"
ktap_finished
diff --git a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
index 27f6fdf11969..644915862af8 100644
--- a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
+++ b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
@@ -131,6 +131,8 @@ asm (
"_start:\n\t"
#ifdef __x86_64__
"mov %rsp,%rdi\n\t"
+ "and $-16,%rsp\n\t"
+ "sub $8,%rsp\n\t"
"jmp c_main"
#else
"push %esp\n\t"
diff --git a/tools/testing/shared/linux/init.h b/tools/testing/shared/linux/init.h
deleted file mode 100644
index 81563c3dfce7..000000000000
--- a/tools/testing/shared/linux/init.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#define __init
-#define __exit
diff --git a/tools/testing/shared/maple-shared.h b/tools/testing/shared/maple-shared.h
index 3d847edd149d..dc4d30f3860b 100644
--- a/tools/testing/shared/maple-shared.h
+++ b/tools/testing/shared/maple-shared.h
@@ -1,4 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __MAPLE_SHARED_H__
+#define __MAPLE_SHARED_H__
#define CONFIG_DEBUG_MAPLE_TREE
#define CONFIG_MAPLE_SEARCH
@@ -7,3 +9,5 @@
#include <stdlib.h>
#include <time.h>
#include "linux/init.h"
+
+#endif /* __MAPLE_SHARED_H__ */
diff --git a/tools/testing/shared/shared.h b/tools/testing/shared/shared.h
index f08f683812ad..13fb4d39966b 100644
--- a/tools/testing/shared/shared.h
+++ b/tools/testing/shared/shared.h
@@ -1,4 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SHARED_H__
+#define __SHARED_H__
#include <linux/types.h>
#include <linux/bug.h>
@@ -31,3 +33,5 @@
#ifndef dump_stack
#define dump_stack() assert(0)
#endif
+
+#endif /* __SHARED_H__ */
diff --git a/tools/testing/shared/shared.mk b/tools/testing/shared/shared.mk
index a05f0588513a..a6bc51d0b0bf 100644
--- a/tools/testing/shared/shared.mk
+++ b/tools/testing/shared/shared.mk
@@ -15,7 +15,9 @@ SHARED_DEPS = Makefile ../shared/shared.mk ../shared/*.h generated/map-shift.h \
../../../include/linux/maple_tree.h \
../../../include/linux/radix-tree.h \
../../../lib/radix-tree.h \
- ../../../include/linux/idr.h
+ ../../../include/linux/idr.h \
+ ../../../lib/maple_tree.c \
+ ../../../lib/test_maple_tree.c
ifndef SHIFT
SHIFT=3
diff --git a/tools/testing/shared/xarray-shared.h b/tools/testing/shared/xarray-shared.h
index ac2d16ff53ae..d50de7884803 100644
--- a/tools/testing/shared/xarray-shared.h
+++ b/tools/testing/shared/xarray-shared.h
@@ -1,4 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __XARRAY_SHARED_H__
+#define __XARRAY_SHARED_H__
#define XA_DEBUG
#include "shared.h"
+
+#endif /* __XARRAY_SHARED_H__ */
diff --git a/tools/usb/p9_fwd.py b/tools/usb/p9_fwd.py
new file mode 100755
index 000000000000..12c76cbb046b
--- /dev/null
+++ b/tools/usb/p9_fwd.py
@@ -0,0 +1,243 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+import argparse
+import errno
+import logging
+import socket
+import struct
+import time
+
+import usb.core
+import usb.util
+
+
+def path_from_usb_dev(dev):
+ """Takes a pyUSB device as argument and returns a string.
+ The string is a Path representation of the position of the USB device on the USB bus tree.
+
+ This path is used to find a USB device on the bus or all devices connected to a HUB.
+ The path is made up of the number of the USB controller followed be the ports of the HUB tree."""
+ if dev.port_numbers:
+ dev_path = ".".join(str(i) for i in dev.port_numbers)
+ return f"{dev.bus}-{dev_path}"
+ return ""
+
+
+HEXDUMP_FILTER = "".join(chr(x).isprintable() and chr(x) or "." for x in range(128)) + "." * 128
+
+
+class Forwarder:
+ @staticmethod
+ def _log_hexdump(data):
+ if not logging.root.isEnabledFor(logging.TRACE):
+ return
+ L = 16
+ for c in range(0, len(data), L):
+ chars = data[c : c + L]
+ dump = " ".join(f"{x:02x}" for x in chars)
+ printable = "".join(HEXDUMP_FILTER[x] for x in chars)
+ line = f"{c:08x} {dump:{L*3}s} |{printable:{L}s}|"
+ logging.root.log(logging.TRACE, "%s", line)
+
+ def __init__(self, server, vid, pid, path):
+ self.stats = {
+ "c2s packets": 0,
+ "c2s bytes": 0,
+ "s2c packets": 0,
+ "s2c bytes": 0,
+ }
+ self.stats_logged = time.monotonic()
+
+ def find_filter(dev):
+ dev_path = path_from_usb_dev(dev)
+ if path is not None:
+ return dev_path == path
+ return True
+
+ dev = usb.core.find(idVendor=vid, idProduct=pid, custom_match=find_filter)
+ if dev is None:
+ raise ValueError("Device not found")
+
+ logging.info(f"found device: {dev.bus}/{dev.address} located at {path_from_usb_dev(dev)}")
+
+ # dev.set_configuration() is not necessary since g_multi has only one
+ usb9pfs = None
+ # g_multi adds 9pfs as last interface
+ cfg = dev.get_active_configuration()
+ for intf in cfg:
+ # we have to detach the usb-storage driver from multi gadget since
+ # stall option could be set, which will lead to spontaneous port
+ # resets and our transfers will run dead
+ if intf.bInterfaceClass == 0x08:
+ if dev.is_kernel_driver_active(intf.bInterfaceNumber):
+ dev.detach_kernel_driver(intf.bInterfaceNumber)
+
+ if intf.bInterfaceClass == 0xFF and intf.bInterfaceSubClass == 0xFF and intf.bInterfaceProtocol == 0x09:
+ usb9pfs = intf
+ if usb9pfs is None:
+ raise ValueError("Interface not found")
+
+ logging.info(f"claiming interface:\n{usb9pfs}")
+ usb.util.claim_interface(dev, usb9pfs.bInterfaceNumber)
+ ep_out = usb.util.find_descriptor(
+ usb9pfs,
+ custom_match=lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_OUT,
+ )
+ assert ep_out is not None
+ ep_in = usb.util.find_descriptor(
+ usb9pfs,
+ custom_match=lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_IN,
+ )
+ assert ep_in is not None
+ logging.info("interface claimed")
+
+ self.ep_out = ep_out
+ self.ep_in = ep_in
+ self.dev = dev
+
+ # create and connect socket
+ self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.s.connect(server)
+
+ logging.info("connected to server")
+
+ def c2s(self):
+ """forward a request from the USB client to the TCP server"""
+ data = None
+ while data is None:
+ try:
+ logging.log(logging.TRACE, "c2s: reading")
+ data = self.ep_in.read(self.ep_in.wMaxPacketSize)
+ except usb.core.USBTimeoutError:
+ logging.log(logging.TRACE, "c2s: reading timed out")
+ continue
+ except usb.core.USBError as e:
+ if e.errno == errno.EIO:
+ logging.debug("c2s: reading failed with %s, retrying", repr(e))
+ time.sleep(0.5)
+ continue
+ logging.error("c2s: reading failed with %s, aborting", repr(e))
+ raise
+ size = struct.unpack("<I", data[:4])[0]
+ while len(data) < size:
+ data += self.ep_in.read(size - len(data))
+ logging.log(logging.TRACE, "c2s: writing")
+ self._log_hexdump(data)
+ self.s.send(data)
+ logging.debug("c2s: forwarded %i bytes", size)
+ self.stats["c2s packets"] += 1
+ self.stats["c2s bytes"] += size
+
+ def s2c(self):
+ """forward a response from the TCP server to the USB client"""
+ logging.log(logging.TRACE, "s2c: reading")
+ data = self.s.recv(4)
+ size = struct.unpack("<I", data[:4])[0]
+ while len(data) < size:
+ data += self.s.recv(size - len(data))
+ logging.log(logging.TRACE, "s2c: writing")
+ self._log_hexdump(data)
+ while data:
+ written = self.ep_out.write(data)
+ assert written > 0
+ data = data[written:]
+ if size % self.ep_out.wMaxPacketSize == 0:
+ logging.log(logging.TRACE, "sending zero length packet")
+ self.ep_out.write(b"")
+ logging.debug("s2c: forwarded %i bytes", size)
+ self.stats["s2c packets"] += 1
+ self.stats["s2c bytes"] += size
+
+ def log_stats(self):
+ logging.info("statistics:")
+ for k, v in self.stats.items():
+ logging.info(f" {k+':':14s} {v}")
+
+ def log_stats_interval(self, interval=5):
+ if (time.monotonic() - self.stats_logged) < interval:
+ return
+
+ self.log_stats()
+ self.stats_logged = time.monotonic()
+
+
+def try_get_usb_str(dev, name):
+ try:
+ with open(f"/sys/bus/usb/devices/{dev.bus}-{dev.address}/{name}") as f:
+ return f.read().strip()
+ except FileNotFoundError:
+ return None
+
+
+def list_usb(args):
+ vid, pid = [int(x, 16) for x in args.id.split(":", 1)]
+
+ print("Bus | Addr | Manufacturer | Product | ID | Path")
+ print("--- | ---- | ---------------- | ---------------- | --------- | ----")
+ for dev in usb.core.find(find_all=True, idVendor=vid, idProduct=pid):
+ path = path_from_usb_dev(dev) or ""
+ manufacturer = try_get_usb_str(dev, "manufacturer") or "unknown"
+ product = try_get_usb_str(dev, "product") or "unknown"
+ print(
+ f"{dev.bus:3} | {dev.address:4} | {manufacturer:16} | {product:16} | {dev.idVendor:04x}:{dev.idProduct:04x} | {path:18}"
+ )
+
+
+def connect(args):
+ vid, pid = [int(x, 16) for x in args.id.split(":", 1)]
+
+ f = Forwarder(server=(args.server, args.port), vid=vid, pid=pid, path=args.path)
+
+ try:
+ while True:
+ f.c2s()
+ f.s2c()
+ f.log_stats_interval()
+ finally:
+ f.log_stats()
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Forward 9PFS requests from USB to TCP",
+ )
+
+ parser.add_argument("--id", type=str, default="1d6b:0109", help="vid:pid of target device")
+ parser.add_argument("--path", type=str, required=False, help="path of target device")
+ parser.add_argument("-v", "--verbose", action="count", default=0)
+
+ subparsers = parser.add_subparsers()
+ subparsers.required = True
+ subparsers.dest = "command"
+
+ parser_list = subparsers.add_parser("list", help="List all connected 9p gadgets")
+ parser_list.set_defaults(func=list_usb)
+
+ parser_connect = subparsers.add_parser(
+ "connect", help="Forward messages between the usb9pfs gadget and the 9p server"
+ )
+ parser_connect.set_defaults(func=connect)
+ connect_group = parser_connect.add_argument_group()
+ connect_group.required = True
+ parser_connect.add_argument("-s", "--server", type=str, default="127.0.0.1", help="server hostname")
+ parser_connect.add_argument("-p", "--port", type=int, default=564, help="server port")
+
+ args = parser.parse_args()
+
+ logging.TRACE = logging.DEBUG - 5
+ logging.addLevelName(logging.TRACE, "TRACE")
+
+ if args.verbose >= 2:
+ level = logging.TRACE
+ elif args.verbose:
+ level = logging.DEBUG
+ else:
+ level = logging.INFO
+ logging.basicConfig(level=level, format="%(asctime)-15s %(levelname)-8s %(message)s")
+
+ args.func(args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/virtio/ringtest/main.c b/tools/virtio/ringtest/main.c
index 5a18b2301a63..e471d8e7cfaa 100644
--- a/tools/virtio/ringtest/main.c
+++ b/tools/virtio/ringtest/main.c
@@ -276,7 +276,7 @@ static void help(void)
fprintf(stderr, "Usage: <test> [--help]"
" [--host-affinity H]"
" [--guest-affinity G]"
- " [--ring-size R (default: %d)]"
+ " [--ring-size R (default: %u)]"
" [--run-cycles C (default: %d)]"
" [--batch b]"
" [--outstanding o]"
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 1b90acb6e3fe..375d6285475e 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -40,27 +40,6 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
return 1;
}
-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
-{
- struct kvm_coalesced_mmio_ring *ring;
- unsigned avail;
-
- /* Are we able to batch it ? */
-
- /* last is the first free entry
- * check if we don't meet the first used entry
- * there is always one unused entry in the buffer
- */
- ring = dev->kvm->coalesced_mmio_ring;
- avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
- if (avail == 0) {
- /* full */
- return 0;
- }
-
- return 1;
-}
-
static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *this, gpa_t addr,
int len, const void *val)
@@ -74,9 +53,15 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
spin_lock(&dev->kvm->ring_lock);
+ /*
+ * last is the index of the entry to fill. Verify userspace hasn't
+ * set last to be out of range, and that there is room in the ring.
+ * Leave one entry free in the ring so that userspace can differentiate
+ * between an empty ring and a full ring.
+ */
insert = READ_ONCE(ring->last);
- if (!coalesced_mmio_has_room(dev, insert) ||
- insert >= KVM_COALESCED_MMIO_MAX) {
+ if (insert >= KVM_COALESCED_MMIO_MAX ||
+ (insert + 1) % KVM_COALESCED_MMIO_MAX == READ_ONCE(ring->first)) {
spin_unlock(&dev->kvm->ring_lock);
return -EOPNOTSUPP;
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f416d5e3f9c0..05cbb2548d99 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -136,8 +136,8 @@ static int kvm_no_compat_open(struct inode *inode, struct file *file)
#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
.open = kvm_no_compat_open
#endif
-static int hardware_enable_all(void);
-static void hardware_disable_all(void);
+static int kvm_enable_virtualization(void);
+static void kvm_disable_virtualization(void);
static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
@@ -1220,7 +1220,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
if (r)
goto out_err_no_arch_destroy_vm;
- r = hardware_enable_all();
+ r = kvm_enable_virtualization();
if (r)
goto out_err_no_disable;
@@ -1263,7 +1263,7 @@ out_no_coalesced_mmio:
mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
#endif
out_err_no_mmu_notifier:
- hardware_disable_all();
+ kvm_disable_virtualization();
out_err_no_disable:
kvm_arch_destroy_vm(kvm);
out_err_no_arch_destroy_vm:
@@ -1360,7 +1360,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
#endif
kvm_arch_free_vm(kvm);
preempt_notifier_dec();
- hardware_disable_all();
+ kvm_disable_virtualization();
mmdrop(mm);
}
@@ -3270,6 +3270,9 @@ static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
int r;
unsigned long addr;
+ if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
+ return -EFAULT;
+
addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
@@ -3343,6 +3346,9 @@ static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
int r;
unsigned long addr;
+ if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
+ return -EFAULT;
+
addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
@@ -3373,6 +3379,9 @@ static int __kvm_write_guest_page(struct kvm *kvm,
int r;
unsigned long addr;
+ if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
+ return -EFAULT;
+
addr = gfn_to_hva_memslot(memslot, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
@@ -3576,7 +3585,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
int ret;
while ((seg = next_segment(len, offset)) != 0) {
- ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
+ ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, seg);
if (ret < 0)
return ret;
offset = 0;
@@ -5566,137 +5575,67 @@ static struct miscdevice kvm_dev = {
};
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
+static bool enable_virt_at_load = true;
+module_param(enable_virt_at_load, bool, 0444);
+
__visible bool kvm_rebooting;
EXPORT_SYMBOL_GPL(kvm_rebooting);
-static DEFINE_PER_CPU(bool, hardware_enabled);
+static DEFINE_PER_CPU(bool, virtualization_enabled);
+static DEFINE_MUTEX(kvm_usage_lock);
static int kvm_usage_count;
-static int __hardware_enable_nolock(void)
+__weak void kvm_arch_enable_virtualization(void)
+{
+
+}
+
+__weak void kvm_arch_disable_virtualization(void)
+{
+
+}
+
+static int kvm_enable_virtualization_cpu(void)
{
- if (__this_cpu_read(hardware_enabled))
+ if (__this_cpu_read(virtualization_enabled))
return 0;
- if (kvm_arch_hardware_enable()) {
+ if (kvm_arch_enable_virtualization_cpu()) {
pr_info("kvm: enabling virtualization on CPU%d failed\n",
raw_smp_processor_id());
return -EIO;
}
- __this_cpu_write(hardware_enabled, true);
+ __this_cpu_write(virtualization_enabled, true);
return 0;
}
-static void hardware_enable_nolock(void *failed)
-{
- if (__hardware_enable_nolock())
- atomic_inc(failed);
-}
-
static int kvm_online_cpu(unsigned int cpu)
{
- int ret = 0;
-
/*
* Abort the CPU online process if hardware virtualization cannot
* be enabled. Otherwise running VMs would encounter unrecoverable
* errors when scheduled to this CPU.
*/
- mutex_lock(&kvm_lock);
- if (kvm_usage_count)
- ret = __hardware_enable_nolock();
- mutex_unlock(&kvm_lock);
- return ret;
+ return kvm_enable_virtualization_cpu();
}
-static void hardware_disable_nolock(void *junk)
+static void kvm_disable_virtualization_cpu(void *ign)
{
- /*
- * Note, hardware_disable_all_nolock() tells all online CPUs to disable
- * hardware, not just CPUs that successfully enabled hardware!
- */
- if (!__this_cpu_read(hardware_enabled))
+ if (!__this_cpu_read(virtualization_enabled))
return;
- kvm_arch_hardware_disable();
+ kvm_arch_disable_virtualization_cpu();
- __this_cpu_write(hardware_enabled, false);
+ __this_cpu_write(virtualization_enabled, false);
}
static int kvm_offline_cpu(unsigned int cpu)
{
- mutex_lock(&kvm_lock);
- if (kvm_usage_count)
- hardware_disable_nolock(NULL);
- mutex_unlock(&kvm_lock);
+ kvm_disable_virtualization_cpu(NULL);
return 0;
}
-static void hardware_disable_all_nolock(void)
-{
- BUG_ON(!kvm_usage_count);
-
- kvm_usage_count--;
- if (!kvm_usage_count)
- on_each_cpu(hardware_disable_nolock, NULL, 1);
-}
-
-static void hardware_disable_all(void)
-{
- cpus_read_lock();
- mutex_lock(&kvm_lock);
- hardware_disable_all_nolock();
- mutex_unlock(&kvm_lock);
- cpus_read_unlock();
-}
-
-static int hardware_enable_all(void)
-{
- atomic_t failed = ATOMIC_INIT(0);
- int r;
-
- /*
- * Do not enable hardware virtualization if the system is going down.
- * If userspace initiated a forced reboot, e.g. reboot -f, then it's
- * possible for an in-flight KVM_CREATE_VM to trigger hardware enabling
- * after kvm_reboot() is called. Note, this relies on system_state
- * being set _before_ kvm_reboot(), which is why KVM uses a syscore ops
- * hook instead of registering a dedicated reboot notifier (the latter
- * runs before system_state is updated).
- */
- if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
- system_state == SYSTEM_RESTART)
- return -EBUSY;
-
- /*
- * When onlining a CPU, cpu_online_mask is set before kvm_online_cpu()
- * is called, and so on_each_cpu() between them includes the CPU that
- * is being onlined. As a result, hardware_enable_nolock() may get
- * invoked before kvm_online_cpu(), which also enables hardware if the
- * usage count is non-zero. Disable CPU hotplug to avoid attempting to
- * enable hardware multiple times.
- */
- cpus_read_lock();
- mutex_lock(&kvm_lock);
-
- r = 0;
-
- kvm_usage_count++;
- if (kvm_usage_count == 1) {
- on_each_cpu(hardware_enable_nolock, &failed, 1);
-
- if (atomic_read(&failed)) {
- hardware_disable_all_nolock();
- r = -EBUSY;
- }
- }
-
- mutex_unlock(&kvm_lock);
- cpus_read_unlock();
-
- return r;
-}
-
static void kvm_shutdown(void)
{
/*
@@ -5712,34 +5651,32 @@ static void kvm_shutdown(void)
*/
pr_info("kvm: exiting hardware virtualization\n");
kvm_rebooting = true;
- on_each_cpu(hardware_disable_nolock, NULL, 1);
+ on_each_cpu(kvm_disable_virtualization_cpu, NULL, 1);
}
static int kvm_suspend(void)
{
/*
* Secondary CPUs and CPU hotplug are disabled across the suspend/resume
- * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count
- * is stable. Assert that kvm_lock is not held to ensure the system
- * isn't suspended while KVM is enabling hardware. Hardware enabling
- * can be preempted, but the task cannot be frozen until it has dropped
- * all locks (userspace tasks are frozen via a fake signal).
+ * callbacks, i.e. no need to acquire kvm_usage_lock to ensure the usage
+ * count is stable. Assert that kvm_usage_lock is not held to ensure
+ * the system isn't suspended while KVM is enabling hardware. Hardware
+ * enabling can be preempted, but the task cannot be frozen until it has
+ * dropped all locks (userspace tasks are frozen via a fake signal).
*/
- lockdep_assert_not_held(&kvm_lock);
+ lockdep_assert_not_held(&kvm_usage_lock);
lockdep_assert_irqs_disabled();
- if (kvm_usage_count)
- hardware_disable_nolock(NULL);
+ kvm_disable_virtualization_cpu(NULL);
return 0;
}
static void kvm_resume(void)
{
- lockdep_assert_not_held(&kvm_lock);
+ lockdep_assert_not_held(&kvm_usage_lock);
lockdep_assert_irqs_disabled();
- if (kvm_usage_count)
- WARN_ON_ONCE(__hardware_enable_nolock());
+ WARN_ON_ONCE(kvm_enable_virtualization_cpu());
}
static struct syscore_ops kvm_syscore_ops = {
@@ -5747,13 +5684,95 @@ static struct syscore_ops kvm_syscore_ops = {
.resume = kvm_resume,
.shutdown = kvm_shutdown,
};
+
+static int kvm_enable_virtualization(void)
+{
+ int r;
+
+ guard(mutex)(&kvm_usage_lock);
+
+ if (kvm_usage_count++)
+ return 0;
+
+ kvm_arch_enable_virtualization();
+
+ r = cpuhp_setup_state(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online",
+ kvm_online_cpu, kvm_offline_cpu);
+ if (r)
+ goto err_cpuhp;
+
+ register_syscore_ops(&kvm_syscore_ops);
+
+ /*
+ * Undo virtualization enabling and bail if the system is going down.
+ * If userspace initiated a forced reboot, e.g. reboot -f, then it's
+ * possible for an in-flight operation to enable virtualization after
+ * syscore_shutdown() is called, i.e. without kvm_shutdown() being
+ * invoked. Note, this relies on system_state being set _before_
+ * kvm_shutdown(), e.g. to ensure either kvm_shutdown() is invoked
+ * or this CPU observes the impending shutdown. Which is why KVM uses
+ * a syscore ops hook instead of registering a dedicated reboot
+ * notifier (the latter runs before system_state is updated).
+ */
+ if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
+ system_state == SYSTEM_RESTART) {
+ r = -EBUSY;
+ goto err_rebooting;
+ }
+
+ return 0;
+
+err_rebooting:
+ unregister_syscore_ops(&kvm_syscore_ops);
+ cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
+err_cpuhp:
+ kvm_arch_disable_virtualization();
+ --kvm_usage_count;
+ return r;
+}
+
+static void kvm_disable_virtualization(void)
+{
+ guard(mutex)(&kvm_usage_lock);
+
+ if (--kvm_usage_count)
+ return;
+
+ unregister_syscore_ops(&kvm_syscore_ops);
+ cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
+ kvm_arch_disable_virtualization();
+}
+
+static int kvm_init_virtualization(void)
+{
+ if (enable_virt_at_load)
+ return kvm_enable_virtualization();
+
+ return 0;
+}
+
+static void kvm_uninit_virtualization(void)
+{
+ if (enable_virt_at_load)
+ kvm_disable_virtualization();
+}
#else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
-static int hardware_enable_all(void)
+static int kvm_enable_virtualization(void)
+{
+ return 0;
+}
+
+static int kvm_init_virtualization(void)
{
return 0;
}
-static void hardware_disable_all(void)
+static void kvm_disable_virtualization(void)
+{
+
+}
+
+static void kvm_uninit_virtualization(void)
{
}
@@ -6186,7 +6205,6 @@ static const struct file_operations stat_fops_per_vm = {
.release = kvm_debugfs_release,
.read = simple_attr_read,
.write = simple_attr_write,
- .llseek = no_llseek,
};
static int vm_stat_get(void *_offset, u64 *val)
@@ -6455,15 +6473,6 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
int r;
int cpu;
-#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
- r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online",
- kvm_online_cpu, kvm_offline_cpu);
- if (r)
- return r;
-
- register_syscore_ops(&kvm_syscore_ops);
-#endif
-
/* A kmem cache lets us meet the alignment requirements of fx_save. */
if (!vcpu_align)
vcpu_align = __alignof__(struct kvm_vcpu);
@@ -6474,10 +6483,8 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
offsetofend(struct kvm_vcpu, stats_id)
- offsetof(struct kvm_vcpu, arch),
NULL);
- if (!kvm_vcpu_cache) {
- r = -ENOMEM;
- goto err_vcpu_cache;
- }
+ if (!kvm_vcpu_cache)
+ return -ENOMEM;
for_each_possible_cpu(cpu) {
if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
@@ -6511,6 +6518,10 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
kvm_gmem_init(module);
+ r = kvm_init_virtualization();
+ if (r)
+ goto err_virt;
+
/*
* Registration _must_ be the very last thing done, as this exposes
* /dev/kvm to userspace, i.e. all infrastructure must be setup!
@@ -6524,6 +6535,8 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
return 0;
err_register:
+ kvm_uninit_virtualization();
+err_virt:
kvm_vfio_ops_exit();
err_vfio:
kvm_async_pf_deinit();
@@ -6534,11 +6547,6 @@ err_cpu_kick_mask:
for_each_possible_cpu(cpu)
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
kmem_cache_destroy(kvm_vcpu_cache);
-err_vcpu_cache:
-#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
- unregister_syscore_ops(&kvm_syscore_ops);
- cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
-#endif
return r;
}
EXPORT_SYMBOL_GPL(kvm_init);
@@ -6554,16 +6562,14 @@ void kvm_exit(void)
*/
misc_deregister(&kvm_dev);
+ kvm_uninit_virtualization();
+
debugfs_remove_recursive(kvm_debugfs_dir);
for_each_possible_cpu(cpu)
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
kmem_cache_destroy(kvm_vcpu_cache);
kvm_vfio_ops_exit();
kvm_async_pf_deinit();
-#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
- unregister_syscore_ops(&kvm_syscore_ops);
- cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
-#endif
kvm_irqfd_exit();
}
EXPORT_SYMBOL_GPL(kvm_exit);