diff options
Diffstat (limited to 'include')
126 files changed, 3673 insertions, 1822 deletions
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h index 94a37cd7fbda..d4b72944ccda 100644 --- a/include/acpi/acpi_numa.h +++ b/include/acpi/acpi_numa.h @@ -15,6 +15,10 @@ extern int pxm_to_node(int); extern int node_to_pxm(int); extern int acpi_map_pxm_to_node(int); extern unsigned char acpi_srat_revision; +extern int acpi_numa __initdata; + +extern void bad_srat(void); +extern int srat_disabled(void); #endif /* CONFIG_ACPI_NUMA */ #endif /* __ACP_NUMA_H */ diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index dad8af3ebeb5..284965cbc9af 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -15,10 +15,9 @@ #define _CPPC_ACPI_H #include <linux/acpi.h> -#include <linux/mailbox_controller.h> -#include <linux/mailbox_client.h> #include <linux/types.h> +#include <acpi/pcc.h> #include <acpi/processor.h> /* Only support CPPCv2 for now. */ @@ -130,8 +129,4 @@ extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); extern int acpi_get_psd_map(struct cpudata **); -/* Methods to interact with the PCC mailbox controller. */ -extern struct mbox_chan * - pcc_mbox_request_channel(struct mbox_client *, unsigned int); - #endif /* _CPPC_ACPI_H*/ diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h new file mode 100644 index 000000000000..17a940a14477 --- /dev/null +++ b/include/acpi/pcc.h @@ -0,0 +1,29 @@ +/* + * PCC (Platform Communications Channel) methods + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#ifndef _PCC_H +#define _PCC_H + +#include <linux/mailbox_controller.h> +#include <linux/mailbox_client.h> + +#ifdef CONFIG_PCC +extern struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, + int subspace_id); +extern void pcc_mbox_free_channel(struct mbox_chan *chan); +#else +static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, + int subspace_id) +{ + return NULL; +} +static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { } +#endif + +#endif /* _PCC_H */ diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index 45c2d6528829..93b61b1f2beb 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -73,6 +73,10 @@ #define ACPI_DEBUGGER #endif +#ifdef CONFIG_ACPI_DEBUG +#define ACPI_MUTEX_DEBUG +#endif + #include <linux/string.h> #include <linux/kernel.h> #include <linux/ctype.h> diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 6f1805dd5d3c..bfe6b2e10f3a 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -39,6 +39,7 @@ #define ACPI_CSTATE_SYSTEMIO 0 #define ACPI_CSTATE_FFH 1 #define ACPI_CSTATE_HALT 2 +#define ACPI_CSTATE_INTEGER 3 #define ACPI_CX_DESC_LEN 32 @@ -67,9 +68,25 @@ struct acpi_processor_cx { char desc[ACPI_CX_DESC_LEN]; }; +struct acpi_lpi_state { + u32 min_residency; + u32 wake_latency; /* worst case */ + u32 flags; + u32 arch_flags; + u32 res_cnt_freq; + u32 enable_parent_state; + u64 address; + u8 index; + u8 entry_method; + char desc[ACPI_CX_DESC_LEN]; +}; + struct acpi_processor_power { int count; - struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER]; + union { + struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER]; + struct acpi_lpi_state lpi_states[ACPI_PROCESSOR_MAX_POWER]; + }; int timer_broadcast_on_state; }; @@ -189,6 +206,7 @@ struct acpi_processor_flags { u8 bm_control:1; u8 bm_check:1; u8 has_cst:1; + u8 has_lpi:1; u8 power_setup_done:1; u8 bm_rld_set:1; u8 need_hotplug_init:1; @@ -242,7 +260,7 @@ extern int acpi_processor_get_performance_info(struct acpi_processor *pr); DECLARE_PER_CPU(struct acpi_processor *, processors); extern struct acpi_processor_errata errata; -#ifdef ARCH_HAS_POWER_INIT +#if defined(ARCH_HAS_POWER_INIT) && defined(CONFIG_ACPI_PROCESSOR_CSTATE) void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, unsigned int cpu); int acpi_processor_ffh_cstate_probe(unsigned int cpu, @@ -309,6 +327,7 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) /* in processor_core.c */ phys_cpuid_t acpi_get_phys_id(acpi_handle, int type, u32 acpi_id); +phys_cpuid_t acpi_map_madt_entry(u32 acpi_id); int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id); int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); @@ -371,7 +390,7 @@ extern struct cpuidle_driver acpi_idle_driver; #ifdef CONFIG_ACPI_PROCESSOR_IDLE int acpi_processor_power_init(struct acpi_processor *pr); int acpi_processor_power_exit(struct acpi_processor *pr); -int acpi_processor_cst_has_changed(struct acpi_processor *pr); +int acpi_processor_power_state_has_changed(struct acpi_processor *pr); int acpi_processor_hotplug(struct acpi_processor *pr); #else static inline int acpi_processor_power_init(struct acpi_processor *pr) @@ -384,7 +403,7 @@ static inline int acpi_processor_power_exit(struct acpi_processor *pr) return -ENODEV; } -static inline int acpi_processor_cst_has_changed(struct acpi_processor *pr) +static inline int acpi_processor_power_state_has_changed(struct acpi_processor *pr) { return -ENODEV; } diff --git a/include/acpi/video.h b/include/acpi/video.h index 5731ccb42585..4536bd345ab4 100644 --- a/include/acpi/video.h +++ b/include/acpi/video.h @@ -54,7 +54,7 @@ extern int acpi_video_get_levels(struct acpi_device *device, struct acpi_video_device_brightness **dev_br, int *pmax_level); #else -static inline int acpi_video_register(void) { return 0; } +static inline int acpi_video_register(void) { return -ENODEV; } static inline void acpi_video_unregister(void) { return; } static inline int acpi_video_get_edid(struct acpi_device *device, int type, int device_id, void **edid) diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 5e1f345b58dd..288cc9e96395 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -112,6 +112,62 @@ static __always_inline void atomic_long_dec(atomic_long_t *l) ATOMIC_LONG_PFX(_dec)(v); } +#define ATOMIC_LONG_FETCH_OP(op, mo) \ +static inline long \ +atomic_long_fetch_##op##mo(long i, atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v); \ +} + +ATOMIC_LONG_FETCH_OP(add, ) +ATOMIC_LONG_FETCH_OP(add, _relaxed) +ATOMIC_LONG_FETCH_OP(add, _acquire) +ATOMIC_LONG_FETCH_OP(add, _release) +ATOMIC_LONG_FETCH_OP(sub, ) +ATOMIC_LONG_FETCH_OP(sub, _relaxed) +ATOMIC_LONG_FETCH_OP(sub, _acquire) +ATOMIC_LONG_FETCH_OP(sub, _release) +ATOMIC_LONG_FETCH_OP(and, ) +ATOMIC_LONG_FETCH_OP(and, _relaxed) +ATOMIC_LONG_FETCH_OP(and, _acquire) +ATOMIC_LONG_FETCH_OP(and, _release) +ATOMIC_LONG_FETCH_OP(andnot, ) +ATOMIC_LONG_FETCH_OP(andnot, _relaxed) +ATOMIC_LONG_FETCH_OP(andnot, _acquire) +ATOMIC_LONG_FETCH_OP(andnot, _release) +ATOMIC_LONG_FETCH_OP(or, ) +ATOMIC_LONG_FETCH_OP(or, _relaxed) +ATOMIC_LONG_FETCH_OP(or, _acquire) +ATOMIC_LONG_FETCH_OP(or, _release) +ATOMIC_LONG_FETCH_OP(xor, ) +ATOMIC_LONG_FETCH_OP(xor, _relaxed) +ATOMIC_LONG_FETCH_OP(xor, _acquire) +ATOMIC_LONG_FETCH_OP(xor, _release) + +#undef ATOMIC_LONG_FETCH_OP + +#define ATOMIC_LONG_FETCH_INC_DEC_OP(op, mo) \ +static inline long \ +atomic_long_fetch_##op##mo(atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(v); \ +} + +ATOMIC_LONG_FETCH_INC_DEC_OP(inc,) +ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _relaxed) +ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _acquire) +ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _release) +ATOMIC_LONG_FETCH_INC_DEC_OP(dec,) +ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _relaxed) +ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _acquire) +ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _release) + +#undef ATOMIC_LONG_FETCH_INC_DEC_OP + #define ATOMIC_LONG_OP(op) \ static __always_inline void \ atomic_long_##op(long i, atomic_long_t *l) \ @@ -124,9 +180,9 @@ atomic_long_##op(long i, atomic_long_t *l) \ ATOMIC_LONG_OP(add) ATOMIC_LONG_OP(sub) ATOMIC_LONG_OP(and) +ATOMIC_LONG_OP(andnot) ATOMIC_LONG_OP(or) ATOMIC_LONG_OP(xor) -ATOMIC_LONG_OP(andnot) #undef ATOMIC_LONG_OP diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 74f1a3704d7a..9ed8b987185b 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -61,6 +61,18 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ return c c_op i; \ } +#define ATOMIC_FETCH_OP(op, c_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + int c, old; \ + \ + c = v->counter; \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + c = old; \ + \ + return c; \ +} + #else #include <linux/irqflags.h> @@ -88,6 +100,20 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ return ret; \ } +#define ATOMIC_FETCH_OP(op, c_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + int ret; \ + \ + raw_local_irq_save(flags); \ + ret = v->counter; \ + v->counter = v->counter c_op i; \ + raw_local_irq_restore(flags); \ + \ + return ret; \ +} + #endif /* CONFIG_SMP */ #ifndef atomic_add_return @@ -98,6 +124,26 @@ ATOMIC_OP_RETURN(add, +) ATOMIC_OP_RETURN(sub, -) #endif +#ifndef atomic_fetch_add +ATOMIC_FETCH_OP(add, +) +#endif + +#ifndef atomic_fetch_sub +ATOMIC_FETCH_OP(sub, -) +#endif + +#ifndef atomic_fetch_and +ATOMIC_FETCH_OP(and, &) +#endif + +#ifndef atomic_fetch_or +ATOMIC_FETCH_OP(or, |) +#endif + +#ifndef atomic_fetch_xor +ATOMIC_FETCH_OP(xor, ^) +#endif + #ifndef atomic_and ATOMIC_OP(and, &) #endif @@ -110,6 +156,7 @@ ATOMIC_OP(or, |) ATOMIC_OP(xor, ^) #endif +#undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index d48e78ccad3d..dad68bf46c77 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -27,16 +27,23 @@ extern void atomic64_##op(long long a, atomic64_t *v); #define ATOMIC64_OP_RETURN(op) \ extern long long atomic64_##op##_return(long long a, atomic64_t *v); -#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) +#define ATOMIC64_FETCH_OP(op) \ +extern long long atomic64_fetch_##op(long long a, atomic64_t *v); + +#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) ATOMIC64_OPS(add) ATOMIC64_OPS(sub) -ATOMIC64_OP(and) -ATOMIC64_OP(or) -ATOMIC64_OP(xor) +#undef ATOMIC64_OPS +#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op) + +ATOMIC64_OPS(and) +ATOMIC64_OPS(or) +ATOMIC64_OPS(xor) #undef ATOMIC64_OPS +#undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 1cceca146905..fe297b599b0a 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -194,7 +194,7 @@ do { \ }) #endif -#endif +#endif /* CONFIG_SMP */ /* Barriers for virtual machine guests when talking to an SMP host */ #define virt_mb() __smp_mb() @@ -207,5 +207,44 @@ do { \ #define virt_store_release(p, v) __smp_store_release(p, v) #define virt_load_acquire(p) __smp_load_acquire(p) +/** + * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency + * + * A control dependency provides a LOAD->STORE order, the additional RMB + * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, + * aka. (load)-ACQUIRE. + * + * Architectures that do not do load speculation can have this be barrier(). + */ +#ifndef smp_acquire__after_ctrl_dep +#define smp_acquire__after_ctrl_dep() smp_rmb() +#endif + +/** + * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering + * @ptr: pointer to the variable to wait on + * @cond: boolean expression to wait for + * + * Equivalent to using smp_load_acquire() on the condition variable but employs + * the control dependency of the wait to reduce the barrier on many platforms. + * + * Due to C lacking lambda expressions we load the value of *ptr into a + * pre-named variable @VAL to be used in @cond. + */ +#ifndef smp_cond_load_acquire +#define smp_cond_load_acquire(ptr, cond_expr) ({ \ + typeof(ptr) __PTR = (ptr); \ + typeof(*ptr) VAL; \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + cpu_relax(); \ + } \ + smp_acquire__after_ctrl_dep(); \ + VAL; \ +}) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h index 0f1c6f315cdc..a84e28e0c634 100644 --- a/include/asm-generic/cputime_nsecs.h +++ b/include/asm-generic/cputime_nsecs.h @@ -50,6 +50,8 @@ typedef u64 __nocast cputime64_t; (__force u64)(__ct) #define nsecs_to_cputime(__nsecs) \ (__force cputime_t)(__nsecs) +#define nsecs_to_cputime64(__nsecs) \ + (__force cputime64_t)(__nsecs) /* diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 002b81f6f2bc..7ef015eb3403 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -585,6 +585,16 @@ static inline u32 ioread32(const volatile void __iomem *addr) } #endif +#ifdef CONFIG_64BIT +#ifndef ioread64 +#define ioread64 ioread64 +static inline u64 ioread64(const volatile void __iomem *addr) +{ + return readq(addr); +} +#endif +#endif /* CONFIG_64BIT */ + #ifndef iowrite8 #define iowrite8 iowrite8 static inline void iowrite8(u8 value, volatile void __iomem *addr) @@ -609,11 +619,21 @@ static inline void iowrite32(u32 value, volatile void __iomem *addr) } #endif +#ifdef CONFIG_64BIT +#ifndef iowrite64 +#define iowrite64 iowrite64 +static inline void iowrite64(u64 value, volatile void __iomem *addr) +{ + writeq(value, addr); +} +#endif +#endif /* CONFIG_64BIT */ + #ifndef ioread16be #define ioread16be ioread16be static inline u16 ioread16be(const volatile void __iomem *addr) { - return __be16_to_cpu(__raw_readw(addr)); + return swab16(readw(addr)); } #endif @@ -621,15 +641,25 @@ static inline u16 ioread16be(const volatile void __iomem *addr) #define ioread32be ioread32be static inline u32 ioread32be(const volatile void __iomem *addr) { - return __be32_to_cpu(__raw_readl(addr)); + return swab32(readl(addr)); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef ioread64be +#define ioread64be ioread64be +static inline u64 ioread64be(const volatile void __iomem *addr) +{ + return swab64(readq(addr)); } #endif +#endif /* CONFIG_64BIT */ #ifndef iowrite16be #define iowrite16be iowrite16be static inline void iowrite16be(u16 value, void volatile __iomem *addr) { - __raw_writew(__cpu_to_be16(value), addr); + writew(swab16(value), addr); } #endif @@ -637,10 +667,20 @@ static inline void iowrite16be(u16 value, void volatile __iomem *addr) #define iowrite32be iowrite32be static inline void iowrite32be(u32 value, volatile void __iomem *addr) { - __raw_writel(__cpu_to_be32(value), addr); + writel(swab32(value), addr); } #endif +#ifdef CONFIG_64BIT +#ifndef iowrite64be +#define iowrite64be iowrite64be +static inline void iowrite64be(u64 value, volatile void __iomem *addr) +{ + writeq(swab64(value), addr); +} +#endif +#endif /* CONFIG_64BIT */ + #ifndef ioread8_rep #define ioread8_rep ioread8_rep static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, @@ -668,6 +708,17 @@ static inline void ioread32_rep(const volatile void __iomem *addr, } #endif +#ifdef CONFIG_64BIT +#ifndef ioread64_rep +#define ioread64_rep ioread64_rep +static inline void ioread64_rep(const volatile void __iomem *addr, + void *buffer, unsigned int count) +{ + readsq(addr, buffer, count); +} +#endif +#endif /* CONFIG_64BIT */ + #ifndef iowrite8_rep #define iowrite8_rep iowrite8_rep static inline void iowrite8_rep(volatile void __iomem *addr, @@ -697,6 +748,18 @@ static inline void iowrite32_rep(volatile void __iomem *addr, writesl(addr, buffer, count); } #endif + +#ifdef CONFIG_64BIT +#ifndef iowrite64_rep +#define iowrite64_rep iowrite64_rep +static inline void iowrite64_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesq(addr, buffer, count); +} +#endif +#endif /* CONFIG_64BIT */ #endif /* CONFIG_GENERIC_IOMAP */ #ifdef __KERNEL__ diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h index d8f8622fa044..650fede33c25 100644 --- a/include/asm-generic/iomap.h +++ b/include/asm-generic/iomap.h @@ -30,12 +30,20 @@ extern unsigned int ioread16(void __iomem *); extern unsigned int ioread16be(void __iomem *); extern unsigned int ioread32(void __iomem *); extern unsigned int ioread32be(void __iomem *); +#ifdef CONFIG_64BIT +extern u64 ioread64(void __iomem *); +extern u64 ioread64be(void __iomem *); +#endif extern void iowrite8(u8, void __iomem *); extern void iowrite16(u16, void __iomem *); extern void iowrite16be(u16, void __iomem *); extern void iowrite32(u32, void __iomem *); extern void iowrite32be(u32, void __iomem *); +#ifdef CONFIG_64BIT +extern void iowrite64(u64, void __iomem *); +extern void iowrite64be(u64, void __iomem *); +#endif /* * "string" versions of the above. Note that they diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h index fd694cfd678a..c54829d3de37 100644 --- a/include/asm-generic/mutex-dec.h +++ b/include/asm-generic/mutex-dec.h @@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) { - if (likely(atomic_cmpxchg_acquire(count, 1, 0) == 1)) + if (likely(atomic_read(count) == 1 && atomic_cmpxchg_acquire(count, 1, 0) == 1)) return 1; return 0; } diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h index a6b4a7bd6ac9..3269ec4e195f 100644 --- a/include/asm-generic/mutex-xchg.h +++ b/include/asm-generic/mutex-xchg.h @@ -91,8 +91,12 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) { - int prev = atomic_xchg_acquire(count, 0); + int prev; + if (atomic_read(count) != 1) + return 0; + + prev = atomic_xchg_acquire(count, 0); if (unlikely(prev < 0)) { /* * The lock was marked contended so we must restore that diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 05f05f17a7c2..9f0681bf1e87 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -111,10 +111,9 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock) static __always_inline void queued_spin_unlock(struct qspinlock *lock) { /* - * smp_mb__before_atomic() in order to guarantee release semantics + * unlock() needs release semantics: */ - smp_mb__before_atomic(); - atomic_sub(_Q_LOCKED_VAL, &lock->val); + (void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val); } #endif diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h index 3fc94a046bf5..5be122e3d326 100644 --- a/include/asm-generic/rwsem.h +++ b/include/asm-generic/rwsem.h @@ -41,8 +41,8 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) { long tmp; - while ((tmp = sem->count) >= 0) { - if (tmp == cmpxchg_acquire(&sem->count, tmp, + while ((tmp = atomic_long_read(&sem->count)) >= 0) { + if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, tmp + RWSEM_ACTIVE_READ_BIAS)) { return 1; } @@ -79,7 +79,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) { long tmp; - tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE, + tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS); return tmp == RWSEM_UNLOCKED_VALUE; } @@ -107,14 +107,6 @@ static inline void __up_write(struct rw_semaphore *sem) } /* - * implement atomic add functionality - */ -static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) -{ - atomic_long_add(delta, (atomic_long_t *)&sem->count); -} - -/* * downgrade write lock to read lock */ static inline void __downgrade_write(struct rw_semaphore *sem) @@ -134,13 +126,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem) rwsem_downgrade_wake(sem); } -/* - * implement exchange and add functionality - */ -static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) -{ - return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); -} - #endif /* __KERNEL__ */ #endif /* _ASM_GENERIC_RWSEM_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 081d0f258d4c..54643d1f5af4 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -250,6 +250,14 @@ VMLINUX_SYMBOL(__end_init_task) = .; /* + * Allow architectures to handle ro_after_init data on their + * own by defining an empty RO_AFTER_INIT_DATA. + */ +#ifndef RO_AFTER_INIT_DATA +#define RO_AFTER_INIT_DATA *(.data..ro_after_init) +#endif + +/* * Read only Data */ #define RO_DATA_SECTION(align) \ @@ -257,7 +265,7 @@ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_rodata) = .; \ *(.rodata) *(.rodata.*) \ - *(.data..ro_after_init) /* Read only after init */ \ + RO_AFTER_INIT_DATA /* Read only after init */ \ *(__vermagic) /* Kernel version magic */ \ . = ALIGN(8); \ VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ diff --git a/include/clocksource/timer-sp804.h b/include/clocksource/timer-sp804.h index 1f8a1caa7cb4..7654d71243dd 100644 --- a/include/clocksource/timer-sp804.h +++ b/include/clocksource/timer-sp804.h @@ -3,10 +3,10 @@ struct clk; -void __sp804_clocksource_and_sched_clock_init(void __iomem *, - const char *, struct clk *, int); -void __sp804_clockevents_init(void __iomem *, unsigned int, - struct clk *, const char *); +int __sp804_clocksource_and_sched_clock_init(void __iomem *, + const char *, struct clk *, int); +int __sp804_clockevents_init(void __iomem *, unsigned int, + struct clk *, const char *); void sp804_timer_disable(void __iomem *); static inline void sp804_clocksource_init(void __iomem *base, const char *name) diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 75174f80a106..12f84327ca36 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -112,11 +112,12 @@ struct aead_request { * supplied during the decryption operation. This function is also * responsible for checking the authentication tag size for * validity. - * @setkey: see struct ablkcipher_alg - * @encrypt: see struct ablkcipher_alg - * @decrypt: see struct ablkcipher_alg - * @geniv: see struct ablkcipher_alg - * @ivsize: see struct ablkcipher_alg + * @setkey: see struct skcipher_alg + * @encrypt: see struct skcipher_alg + * @decrypt: see struct skcipher_alg + * @geniv: see struct skcipher_alg + * @ivsize: see struct skcipher_alg + * @chunksize: see struct skcipher_alg * @init: Initialize the cryptographic transformation object. This function * is used to initialize the cryptographic transformation object. * This function is called only once at the instantiation time, right @@ -145,6 +146,7 @@ struct aead_alg { unsigned int ivsize; unsigned int maxauthsize; + unsigned int chunksize; struct crypto_alg base; }; diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index eeafd21afb44..8637cdfe382a 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -244,6 +244,8 @@ static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta, } int crypto_attr_u32(struct rtattr *rta, u32 *num); +int crypto_inst_setname(struct crypto_instance *inst, const char *name, + struct crypto_alg *alg); void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, unsigned int head); struct crypto_instance *crypto_alloc_instance(const char *name, @@ -440,8 +442,10 @@ static inline int crypto_memneq(const void *a, const void *b, size_t size) static inline void crypto_yield(u32 flags) { +#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY) if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) cond_resched(); +#endif } #endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h index 1547f540c920..bc792d5a9e88 100644 --- a/include/crypto/cryptd.h +++ b/include/crypto/cryptd.h @@ -31,6 +31,7 @@ static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast( struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, u32 type, u32 mask); struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); +bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm); void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); struct cryptd_ahash { @@ -48,6 +49,8 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, u32 type, u32 mask); struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); struct shash_desc *cryptd_shash_desc(struct ahash_request *req); +/* Must be called without moving CPUs. */ +bool cryptd_ahash_queued(struct cryptd_ahash *tfm); void cryptd_free_ahash(struct cryptd_ahash *tfm); struct cryptd_aead { @@ -64,6 +67,8 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, u32 type, u32 mask); struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); +/* Must be called without moving CPUs. */ +bool cryptd_aead_queued(struct cryptd_aead *tfm); void cryptd_free_aead(struct cryptd_aead *tfm); diff --git a/include/crypto/dh.h b/include/crypto/dh.h new file mode 100644 index 000000000000..5102a8f282e6 --- /dev/null +++ b/include/crypto/dh.h @@ -0,0 +1,29 @@ +/* + * Diffie-Hellman secret to be used with kpp API along with helper functions + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_DH_ +#define _CRYPTO_DH_ + +struct dh { + void *key; + void *p; + void *g; + unsigned int key_size; + unsigned int p_size; + unsigned int g_size; +}; + +int crypto_dh_key_len(const struct dh *params); +int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params); +int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params); + +#endif diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index d961b2b16f55..61580b19f9f6 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -43,6 +43,7 @@ #include <linux/random.h> #include <linux/scatterlist.h> #include <crypto/hash.h> +#include <crypto/skcipher.h> #include <linux/module.h> #include <linux/crypto.h> #include <linux/slab.h> @@ -107,14 +108,25 @@ struct drbg_test_data { struct drbg_state { struct mutex drbg_mutex; /* lock around DRBG */ unsigned char *V; /* internal state 10.1.1.1 1a) */ + unsigned char *Vbuf; /* hash: static value 10.1.1.1 1b) hmac / ctr: key */ unsigned char *C; + unsigned char *Cbuf; /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */ size_t reseed_ctr; size_t reseed_threshold; /* some memory the DRBG can use for its operation */ unsigned char *scratchpad; + unsigned char *scratchpadbuf; void *priv_data; /* Cipher handle */ + + struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */ + struct skcipher_request *ctr_req; /* CTR mode request handle */ + __u8 *ctr_null_value_buf; /* CTR mode unaligned buffer */ + __u8 *ctr_null_value; /* CTR mode aligned zero buf */ + struct completion ctr_completion; /* CTR mode async handler */ + int ctr_async_err; /* CTR mode async error */ + bool seeded; /* DRBG fully seeded? */ bool pr; /* Prediction resistance enabled? */ struct work_struct seed_work; /* asynchronous seeding support */ diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h new file mode 100644 index 000000000000..84bad548d194 --- /dev/null +++ b/include/crypto/ecdh.h @@ -0,0 +1,30 @@ +/* + * ECDH params to be used with kpp API + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_ECDH_ +#define _CRYPTO_ECDH_ + +/* Curves IDs */ +#define ECC_CURVE_NIST_P192 0x0001 +#define ECC_CURVE_NIST_P256 0x0002 + +struct ecdh { + unsigned short curve_id; + char *key; + unsigned short key_size; +}; + +int crypto_ecdh_key_len(const struct ecdh *params); +int crypto_ecdh_encode_key(char *buf, unsigned int len, const struct ecdh *p); +int crypto_ecdh_decode_key(const char *buf, unsigned int len, struct ecdh *p); + +#endif diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h index da3864991d4c..6ad8e31d3868 100644 --- a/include/crypto/internal/aead.h +++ b/include/crypto/internal/aead.h @@ -159,6 +159,27 @@ static inline struct aead_request *aead_get_backlog(struct aead_queue *queue) return req ? container_of(req, struct aead_request, base) : NULL; } +static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg) +{ + return alg->chunksize; +} + +/** + * crypto_aead_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CCM. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_aead_chunksize(struct crypto_aead *tfm) +{ + return crypto_aead_alg_chunksize(crypto_aead_alg(tfm)); +} + int crypto_register_aead(struct aead_alg *alg); void crypto_unregister_aead(struct aead_alg *alg); int crypto_register_aeads(struct aead_alg *algs, int count); diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h index 59333635e712..2bcfb931bc5b 100644 --- a/include/crypto/internal/geniv.h +++ b/include/crypto/internal/geniv.h @@ -20,7 +20,7 @@ struct aead_geniv_ctx { spinlock_t lock; struct crypto_aead *child; - struct crypto_blkcipher *null; + struct crypto_skcipher *sknull; u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); }; diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 49dae16f8929..1d4f365d8f03 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -114,14 +114,10 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc); int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); -int shash_ahash_mcryptd_update(struct ahash_request *req, - struct shash_desc *desc); -int shash_ahash_mcryptd_final(struct ahash_request *req, - struct shash_desc *desc); -int shash_ahash_mcryptd_finup(struct ahash_request *req, - struct shash_desc *desc); -int shash_ahash_mcryptd_digest(struct ahash_request *req, - struct shash_desc *desc); +int ahash_mcryptd_update(struct ahash_request *desc); +int ahash_mcryptd_final(struct ahash_request *desc); +int ahash_mcryptd_finup(struct ahash_request *desc); +int ahash_mcryptd_digest(struct ahash_request *desc); int crypto_init_shash_ops_async(struct crypto_tfm *tfm); diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h new file mode 100644 index 000000000000..ad3acf3649be --- /dev/null +++ b/include/crypto/internal/kpp.h @@ -0,0 +1,64 @@ +/* + * Key-agreement Protocol Primitives (KPP) + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_KPP_INT_H +#define _CRYPTO_KPP_INT_H +#include <crypto/kpp.h> +#include <crypto/algapi.h> + +/* + * Transform internal helpers. + */ +static inline void *kpp_request_ctx(struct kpp_request *req) +{ + return req->__ctx; +} + +static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void kpp_request_complete(struct kpp_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *kpp_alg_name(struct crypto_kpp *tfm) +{ + return crypto_kpp_tfm(tfm)->__crt_alg->cra_name; +} + +/** + * crypto_register_kpp() -- Register key-agreement protocol primitives algorithm + * + * Function registers an implementation of a key-agreement protocol primitive + * algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_kpp(struct kpp_alg *alg); + +/** + * crypto_unregister_kpp() -- Unregister key-agreement protocol primitive + * algorithm + * + * Function unregisters an implementation of a key-agreement protocol primitive + * algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_kpp(struct kpp_alg *alg); + +#endif diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h index c7585bdecbc2..9e8f1590de98 100644 --- a/include/crypto/internal/rsa.h +++ b/include/crypto/internal/rsa.h @@ -12,12 +12,44 @@ */ #ifndef _RSA_HELPER_ #define _RSA_HELPER_ -#include <linux/mpi.h> +#include <linux/types.h> +/** + * rsa_key - RSA key structure + * @n : RSA modulus raw byte stream + * @e : RSA public exponent raw byte stream + * @d : RSA private exponent raw byte stream + * @p : RSA prime factor p of n raw byte stream + * @q : RSA prime factor q of n raw byte stream + * @dp : RSA exponent d mod (p - 1) raw byte stream + * @dq : RSA exponent d mod (q - 1) raw byte stream + * @qinv : RSA CRT coefficient q^(-1) mod p raw byte stream + * @n_sz : length in bytes of RSA modulus n + * @e_sz : length in bytes of RSA public exponent + * @d_sz : length in bytes of RSA private exponent + * @p_sz : length in bytes of p field + * @q_sz : length in bytes of q field + * @dp_sz : length in bytes of dp field + * @dq_sz : length in bytes of dq field + * @qinv_sz : length in bytes of qinv field + */ struct rsa_key { - MPI n; - MPI e; - MPI d; + const u8 *n; + const u8 *e; + const u8 *d; + const u8 *p; + const u8 *q; + const u8 *dp; + const u8 *dq; + const u8 *qinv; + size_t n_sz; + size_t e_sz; + size_t d_sz; + size_t p_sz; + size_t q_sz; + size_t dp_sz; + size_t dq_sz; + size_t qinv_sz; }; int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, @@ -26,7 +58,5 @@ int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, unsigned int key_len); -void rsa_free_key(struct rsa_key *rsa_key); - extern struct crypto_template rsa_pkcs1pad_tmpl; #endif diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 2cf7a61ece59..a21a95e1a375 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -19,12 +19,46 @@ struct rtattr; +struct skcipher_instance { + void (*free)(struct skcipher_instance *inst); + union { + struct { + char head[offsetof(struct skcipher_alg, base)]; + struct crypto_instance base; + } s; + struct skcipher_alg alg; + }; +}; + struct crypto_skcipher_spawn { struct crypto_spawn base; }; extern const struct crypto_type crypto_givcipher_type; +static inline struct crypto_instance *skcipher_crypto_instance( + struct skcipher_instance *inst) +{ + return &inst->s.base; +} + +static inline struct skcipher_instance *skcipher_alg_instance( + struct crypto_skcipher *skcipher) +{ + return container_of(crypto_skcipher_alg(skcipher), + struct skcipher_instance, alg); +} + +static inline void *skcipher_instance_ctx(struct skcipher_instance *inst) +{ + return crypto_instance_ctx(skcipher_crypto_instance(inst)); +} + +static inline void skcipher_request_complete(struct skcipher_request *req, int err) +{ + req->base.complete(&req->base, err); +} + static inline void crypto_set_skcipher_spawn( struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst) { @@ -34,6 +68,12 @@ static inline void crypto_set_skcipher_spawn( int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, u32 type, u32 mask); +static inline int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn, + const char *name, u32 type, u32 mask) +{ + return crypto_grab_skcipher(spawn, name, type, mask); +} + struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask); static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) @@ -41,54 +81,42 @@ static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) crypto_drop_spawn(&spawn->base); } -static inline struct crypto_alg *crypto_skcipher_spawn_alg( +static inline struct skcipher_alg *crypto_skcipher_spawn_alg( struct crypto_skcipher_spawn *spawn) { - return spawn->base.alg; + return container_of(spawn->base.alg, struct skcipher_alg, base); } -static inline struct crypto_ablkcipher *crypto_spawn_skcipher( +static inline struct skcipher_alg *crypto_spawn_skcipher_alg( struct crypto_skcipher_spawn *spawn) { - return __crypto_ablkcipher_cast( - crypto_spawn_tfm(&spawn->base, crypto_skcipher_type(0), - crypto_skcipher_mask(0))); + return crypto_skcipher_spawn_alg(spawn); } -int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req); -int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req); -const char *crypto_default_geniv(const struct crypto_alg *alg); - -struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl, - struct rtattr **tb, u32 type, - u32 mask); -void skcipher_geniv_free(struct crypto_instance *inst); -int skcipher_geniv_init(struct crypto_tfm *tfm); -void skcipher_geniv_exit(struct crypto_tfm *tfm); - -static inline struct crypto_ablkcipher *skcipher_geniv_cipher( - struct crypto_ablkcipher *geniv) +static inline struct crypto_skcipher *crypto_spawn_skcipher( + struct crypto_skcipher_spawn *spawn) { - return crypto_ablkcipher_crt(geniv)->base; + return crypto_spawn_tfm2(&spawn->base); } -static inline int skcipher_enqueue_givcrypt( - struct crypto_queue *queue, struct skcipher_givcrypt_request *request) +static inline struct crypto_skcipher *crypto_spawn_skcipher2( + struct crypto_skcipher_spawn *spawn) { - return ablkcipher_enqueue_request(queue, &request->creq); + return crypto_spawn_skcipher(spawn); } -static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt( - struct crypto_queue *queue) +static inline void crypto_skcipher_set_reqsize( + struct crypto_skcipher *skcipher, unsigned int reqsize) { - return skcipher_givcrypt_cast(crypto_dequeue_request(queue)); + skcipher->reqsize = reqsize; } -static inline void *skcipher_givcrypt_reqctx( - struct skcipher_givcrypt_request *req) -{ - return ablkcipher_request_ctx(&req->creq); -} +int crypto_register_skcipher(struct skcipher_alg *alg); +void crypto_unregister_skcipher(struct skcipher_alg *alg); +int crypto_register_skciphers(struct skcipher_alg *algs, int count); +void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); +int skcipher_register_instance(struct crypto_template *tmpl, + struct skcipher_instance *inst); static inline void ablkcipher_request_complete(struct ablkcipher_request *req, int err) @@ -96,12 +124,6 @@ static inline void ablkcipher_request_complete(struct ablkcipher_request *req, req->base.complete(&req->base, err); } -static inline void skcipher_givcrypt_complete( - struct skcipher_givcrypt_request *req, int err) -{ - ablkcipher_request_complete(&req->creq, err); -} - static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req) { return req->base.flags; @@ -122,5 +144,31 @@ static inline u32 skcipher_request_flags(struct skcipher_request *req) return req->base.flags; } +static inline unsigned int crypto_skcipher_alg_min_keysize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.min_keysize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.min_keysize; + + return alg->min_keysize; +} + +static inline unsigned int crypto_skcipher_alg_max_keysize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.max_keysize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.max_keysize; + + return alg->max_keysize; +} + #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h new file mode 100644 index 000000000000..30791f75c180 --- /dev/null +++ b/include/crypto/kpp.h @@ -0,0 +1,330 @@ +/* + * Key-agreement Protocol Primitives (KPP) + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_KPP_ +#define _CRYPTO_KPP_ +#include <linux/crypto.h> + +/** + * struct kpp_request + * + * @base: Common attributes for async crypto requests + * @src: Source data + * @dst: Destination data + * @src_len: Size of the input buffer + * @dst_len: Size of the output buffer. It needs to be at least + * as big as the expected result depending on the operation + * After operation it will be updated with the actual size of the + * result. In case of error where the dst sgl size was insufficient, + * it will be updated to the size required for the operation. + * @__ctx: Start of private context data + */ +struct kpp_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct crypto_kpp - user-instantiated object which encapsulate + * algorithms and core processing logic + * + * @base: Common crypto API algorithm data structure + */ +struct crypto_kpp { + struct crypto_tfm base; +}; + +/** + * struct kpp_alg - generic key-agreement protocol primitives + * + * @set_secret: Function invokes the protocol specific function to + * store the secret private key along with parameters. + * The implementation knows how to decode thie buffer + * @generate_public_key: Function generate the public key to be sent to the + * counterpart. In case of error, where output is not big + * enough req->dst_len will be updated to the size + * required + * @compute_shared_secret: Function compute the shared secret as defined by + * the algorithm. The result is given back to the user. + * In case of error, where output is not big enough, + * req->dst_len will be updated to the size required + * @max_size: Function returns the size of the output buffer + * @init: Initialize the object. This is called only once at + * instantiation time. In case the cryptographic hardware + * needs to be initialized. Software fallback should be + * put in place here. + * @exit: Undo everything @init did. + * + * @reqsize: Request context size required by algorithm + * implementation + * @base Common crypto API algorithm data structure + */ +struct kpp_alg { + int (*set_secret)(struct crypto_kpp *tfm, void *buffer, + unsigned int len); + int (*generate_public_key)(struct kpp_request *req); + int (*compute_shared_secret)(struct kpp_request *req); + + int (*max_size)(struct crypto_kpp *tfm); + + int (*init)(struct crypto_kpp *tfm); + void (*exit)(struct crypto_kpp *tfm); + + unsigned int reqsize; + struct crypto_alg base; +}; + +/** + * DOC: Generic Key-agreement Protocol Primitevs API + * + * The KPP API is used with the algorithm type + * CRYPTO_ALG_TYPE_KPP (listed as type "kpp" in /proc/crypto) + */ + +/** + * crypto_alloc_kpp() - allocate KPP tfm handle + * @alg_name: is the name of the kpp algorithm (e.g. "dh", "ecdh") + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * + * Allocate a handle for kpp algorithm. The returned struct crypto_kpp + * is requeried for any following API invocation + * + * Return: allocated handle in case of success; IS_ERR() is true in case of + * an error, PTR_ERR() returns the error code. + */ +struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm) +{ + return &tfm->base; +} + +static inline struct kpp_alg *__crypto_kpp_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct kpp_alg, base); +} + +static inline struct crypto_kpp *__crypto_kpp_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_kpp, base); +} + +static inline struct kpp_alg *crypto_kpp_alg(struct crypto_kpp *tfm) +{ + return __crypto_kpp_alg(crypto_kpp_tfm(tfm)->__crt_alg); +} + +static inline unsigned int crypto_kpp_reqsize(struct crypto_kpp *tfm) +{ + return crypto_kpp_alg(tfm)->reqsize; +} + +static inline void kpp_request_set_tfm(struct kpp_request *req, + struct crypto_kpp *tfm) +{ + req->base.tfm = crypto_kpp_tfm(tfm); +} + +static inline struct crypto_kpp *crypto_kpp_reqtfm(struct kpp_request *req) +{ + return __crypto_kpp_tfm(req->base.tfm); +} + +/** + * crypto_free_kpp() - free KPP tfm handle + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + */ +static inline void crypto_free_kpp(struct crypto_kpp *tfm) +{ + crypto_destroy_tfm(tfm, crypto_kpp_tfm(tfm)); +} + +/** + * kpp_request_alloc() - allocates kpp request + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + * @gfp: allocation flags + * + * Return: allocated handle in case of success or NULL in case of an error. + */ +static inline struct kpp_request *kpp_request_alloc(struct crypto_kpp *tfm, + gfp_t gfp) +{ + struct kpp_request *req; + + req = kmalloc(sizeof(*req) + crypto_kpp_reqsize(tfm), gfp); + if (likely(req)) + kpp_request_set_tfm(req, tfm); + + return req; +} + +/** + * kpp_request_free() - zeroize and free kpp request + * + * @req: request to free + */ +static inline void kpp_request_free(struct kpp_request *req) +{ + kzfree(req); +} + +/** + * kpp_request_set_callback() - Sets an asynchronous callback. + * + * Callback will be called when an asynchronous operation on a given + * request is finished. + * + * @req: request that the callback will be set for + * @flgs: specify for instance if the operation may backlog + * @cmpl: callback which will be called + * @data: private data used by the caller + */ +static inline void kpp_request_set_callback(struct kpp_request *req, + u32 flgs, + crypto_completion_t cmpl, + void *data) +{ + req->base.complete = cmpl; + req->base.data = data; + req->base.flags = flgs; +} + +/** + * kpp_request_set_input() - Sets input buffer + * + * Sets parameters required by generate_public_key + * + * @req: kpp request + * @input: ptr to input scatter list + * @input_len: size of the input scatter list + */ +static inline void kpp_request_set_input(struct kpp_request *req, + struct scatterlist *input, + unsigned int input_len) +{ + req->src = input; + req->src_len = input_len; +} + +/** + * kpp_request_set_output() - Sets output buffer + * + * Sets parameters required by kpp operation + * + * @req: kpp request + * @output: ptr to output scatter list + * @output_len: size of the output scatter list + */ +static inline void kpp_request_set_output(struct kpp_request *req, + struct scatterlist *output, + unsigned int output_len) +{ + req->dst = output; + req->dst_len = output_len; +} + +enum { + CRYPTO_KPP_SECRET_TYPE_UNKNOWN, + CRYPTO_KPP_SECRET_TYPE_DH, + CRYPTO_KPP_SECRET_TYPE_ECDH, +}; + +/** + * struct kpp_secret - small header for packing secret buffer + * + * @type: define type of secret. Each kpp type will define its own + * @len: specify the len of the secret, include the header, that + * follows the struct + */ +struct kpp_secret { + unsigned short type; + unsigned short len; +}; + +/** + * crypto_kpp_set_secret() - Invoke kpp operation + * + * Function invokes the specific kpp operation for a given alg. + * + * @tfm: tfm handle + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, void *buffer, + unsigned int len) +{ + struct kpp_alg *alg = crypto_kpp_alg(tfm); + + return alg->set_secret(tfm, buffer, len); +} + +/** + * crypto_kpp_generate_public_key() - Invoke kpp operation + * + * Function invokes the specific kpp operation for generating the public part + * for a given kpp algorithm + * + * @req: kpp key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct kpp_alg *alg = crypto_kpp_alg(tfm); + + return alg->generate_public_key(req); +} + +/** + * crypto_kpp_compute_shared_secret() - Invoke kpp operation + * + * Function invokes the specific kpp operation for computing the shared secret + * for a given kpp algorithm. + * + * @req: kpp key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct kpp_alg *alg = crypto_kpp_alg(tfm); + + return alg->compute_shared_secret(req); +} + +/** + * crypto_kpp_maxsize() - Get len for output buffer + * + * Function returns the output buffer size required + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + * + * Return: minimum len for output buffer or error code if key hasn't been set + */ +static inline int crypto_kpp_maxsize(struct crypto_kpp *tfm) +{ + struct kpp_alg *alg = crypto_kpp_alg(tfm); + + return alg->max_size(tfm); +} + +#endif diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h index c23ee1f7ee80..4a53c0d38cd2 100644 --- a/include/crypto/mcryptd.h +++ b/include/crypto/mcryptd.h @@ -39,7 +39,7 @@ struct mcryptd_instance_ctx { }; struct mcryptd_hash_ctx { - struct crypto_shash *child; + struct crypto_ahash *child; struct mcryptd_alg_state *alg_state; }; @@ -59,13 +59,13 @@ struct mcryptd_hash_request_ctx { struct crypto_hash_walk walk; u8 *out; int flag; - struct shash_desc desc; + struct ahash_request areq; }; struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, u32 type, u32 mask); -struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm); -struct shash_desc *mcryptd_shash_desc(struct ahash_request *req); +struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm); +struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req); void mcryptd_free_ahash(struct mcryptd_ahash *tfm); void mcryptd_flusher(struct work_struct *work); diff --git a/include/crypto/null.h b/include/crypto/null.h index 06dc30d9f56e..3f0c59fb0a61 100644 --- a/include/crypto/null.h +++ b/include/crypto/null.h @@ -8,7 +8,17 @@ #define NULL_DIGEST_SIZE 0 #define NULL_IV_SIZE 0 -struct crypto_blkcipher *crypto_get_default_null_skcipher(void); +struct crypto_skcipher *crypto_get_default_null_skcipher(void); void crypto_put_default_null_skcipher(void); +static inline struct crypto_skcipher *crypto_get_default_null_skcipher2(void) +{ + return crypto_get_default_null_skcipher(); +} + +static inline void crypto_put_default_null_skcipher2(void) +{ + crypto_put_default_null_skcipher(); +} + #endif diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 35f99b68d037..880e6be9e95e 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -16,14 +16,10 @@ #ifndef _CRYPTO_SCATTERWALK_H #define _CRYPTO_SCATTERWALK_H -#include <asm/kmap_types.h> #include <crypto/algapi.h> -#include <linux/hardirq.h> #include <linux/highmem.h> #include <linux/kernel.h> -#include <linux/mm.h> #include <linux/scatterlist.h> -#include <linux/sched.h> static inline void scatterwalk_crypto_chain(struct scatterlist *head, struct scatterlist *sg, @@ -83,17 +79,53 @@ static inline void scatterwalk_unmap(void *vaddr) kunmap_atomic(vaddr); } -void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg); +static inline void scatterwalk_start(struct scatter_walk *walk, + struct scatterlist *sg) +{ + walk->sg = sg; + walk->offset = sg->offset; +} + +static inline void *scatterwalk_map(struct scatter_walk *walk) +{ + return kmap_atomic(scatterwalk_page(walk)) + + offset_in_page(walk->offset); +} + +static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out, + unsigned int more) +{ + if (out) { + struct page *page; + + page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); + /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as + * PageSlab cannot be optimised away per se due to + * use of volatile pointer. + */ + if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page)) + flush_dcache_page(page); + } + + if (more && walk->offset >= walk->sg->offset + walk->sg->length) + scatterwalk_start(walk, sg_next(walk->sg)); +} + +static inline void scatterwalk_done(struct scatter_walk *walk, int out, + int more) +{ + if (!more || walk->offset >= walk->sg->offset + walk->sg->length || + !(walk->offset & (PAGE_SIZE - 1))) + scatterwalk_pagedone(walk, out, more); +} + void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out); void *scatterwalk_map(struct scatter_walk *walk); -void scatterwalk_done(struct scatter_walk *walk, int out, int more); void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, unsigned int start, unsigned int nbytes, int out); -int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes); - struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], struct scatterlist *src, unsigned int len); diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h new file mode 100644 index 000000000000..f4c9f68f5ffe --- /dev/null +++ b/include/crypto/sha3.h @@ -0,0 +1,29 @@ +/* + * Common values for SHA-3 algorithms + */ +#ifndef __CRYPTO_SHA3_H__ +#define __CRYPTO_SHA3_H__ + +#define SHA3_224_DIGEST_SIZE (224 / 8) +#define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE) + +#define SHA3_256_DIGEST_SIZE (256 / 8) +#define SHA3_256_BLOCK_SIZE (200 - 2 * SHA3_256_DIGEST_SIZE) + +#define SHA3_384_DIGEST_SIZE (384 / 8) +#define SHA3_384_BLOCK_SIZE (200 - 2 * SHA3_384_DIGEST_SIZE) + +#define SHA3_512_DIGEST_SIZE (512 / 8) +#define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE) + +struct sha3_state { + u64 st[25]; + unsigned int md_len; + unsigned int rsiz; + unsigned int rsizw; + + unsigned int partial; + u8 buf[SHA3_224_BLOCK_SIZE]; +}; + +#endif diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 0f987f50bb52..cc4d98a7892e 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -65,86 +65,80 @@ struct crypto_skcipher { struct crypto_tfm base; }; -#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \ - char __##name##_desc[sizeof(struct skcipher_request) + \ - crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \ - struct skcipher_request *name = (void *)__##name##_desc - -static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm( - struct skcipher_givcrypt_request *req) -{ - return crypto_ablkcipher_reqtfm(&req->creq); -} +/** + * struct skcipher_alg - symmetric key cipher definition + * @min_keysize: Minimum key size supported by the transformation. This is the + * smallest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MIN_KEY_SIZE" include/crypto/ + * @max_keysize: Maximum key size supported by the transformation. This is the + * largest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MAX_KEY_SIZE" include/crypto/ + * @setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function can + * be called multiple times during the existence of the transformation + * object, so one must make sure the key is properly reprogrammed into + * the hardware. This function is also responsible for checking the key + * length for validity. In case a software fallback was put in place in + * the @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. + * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt + * the supplied scatterlist containing the blocks of data. The crypto + * API consumer is responsible for aligning the entries of the + * scatterlist properly and making sure the chunks are correctly + * sized. In case a software fallback was put in place in the + * @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. In case the + * key was stored in transformation context, the key might need to be + * re-programmed into the hardware in this function. This function + * shall not modify the transformation context, as this function may + * be called in parallel with the same transformation object. + * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt + * and the conditions are exactly the same. + * @init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. In case the + * cryptographic hardware has some special requirements which need to + * be handled by software, this function shall check for the precise + * requirement of the transformation and put any software fallbacks + * in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * @ivsize: IV size applicable for transformation. The consumer must provide an + * IV of exactly that size to perform the encrypt or decrypt operation. + * @chunksize: Equal to the block size except for stream ciphers such as + * CTR where it is set to the underlying block size. + * @base: Definition of a generic crypto algorithm. + * + * All fields except @ivsize are mandatory and must be filled. + */ +struct skcipher_alg { + int (*setkey)(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct skcipher_request *req); + int (*decrypt)(struct skcipher_request *req); + int (*init)(struct crypto_skcipher *tfm); + void (*exit)(struct crypto_skcipher *tfm); -static inline int crypto_skcipher_givencrypt( - struct skcipher_givcrypt_request *req) -{ - struct ablkcipher_tfm *crt = - crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req)); - return crt->givencrypt(req); -}; + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; -static inline int crypto_skcipher_givdecrypt( - struct skcipher_givcrypt_request *req) -{ - struct ablkcipher_tfm *crt = - crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req)); - return crt->givdecrypt(req); + struct crypto_alg base; }; -static inline void skcipher_givcrypt_set_tfm( - struct skcipher_givcrypt_request *req, struct crypto_ablkcipher *tfm) -{ - req->creq.base.tfm = crypto_ablkcipher_tfm(tfm); -} - -static inline struct skcipher_givcrypt_request *skcipher_givcrypt_cast( - struct crypto_async_request *req) -{ - return container_of(ablkcipher_request_cast(req), - struct skcipher_givcrypt_request, creq); -} - -static inline struct skcipher_givcrypt_request *skcipher_givcrypt_alloc( - struct crypto_ablkcipher *tfm, gfp_t gfp) -{ - struct skcipher_givcrypt_request *req; - - req = kmalloc(sizeof(struct skcipher_givcrypt_request) + - crypto_ablkcipher_reqsize(tfm), gfp); - - if (likely(req)) - skcipher_givcrypt_set_tfm(req, tfm); - - return req; -} - -static inline void skcipher_givcrypt_free(struct skcipher_givcrypt_request *req) -{ - kfree(req); -} - -static inline void skcipher_givcrypt_set_callback( - struct skcipher_givcrypt_request *req, u32 flags, - crypto_completion_t compl, void *data) -{ - ablkcipher_request_set_callback(&req->creq, flags, compl, data); -} - -static inline void skcipher_givcrypt_set_crypt( - struct skcipher_givcrypt_request *req, - struct scatterlist *src, struct scatterlist *dst, - unsigned int nbytes, void *iv) -{ - ablkcipher_request_set_crypt(&req->creq, src, dst, nbytes, iv); -} - -static inline void skcipher_givcrypt_set_giv( - struct skcipher_givcrypt_request *req, u8 *giv, u64 seq) -{ - req->giv = giv; - req->seq = seq; -} +#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \ + char __##name##_desc[sizeof(struct skcipher_request) + \ + crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \ + struct skcipher_request *name = (void *)__##name##_desc /** * DOC: Symmetric Key Cipher API @@ -231,12 +225,43 @@ static inline int crypto_has_skcipher(const char *alg_name, u32 type, crypto_skcipher_mask(mask)); } +/** + * crypto_has_skcipher2() - Search for the availability of an skcipher. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * skcipher + * @type: specifies the type of the skcipher + * @mask: specifies the mask for the skcipher + * + * Return: true when the skcipher is known to the kernel crypto API; false + * otherwise + */ +int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask); + static inline const char *crypto_skcipher_driver_name( struct crypto_skcipher *tfm) { return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); } +static inline struct skcipher_alg *crypto_skcipher_alg( + struct crypto_skcipher *tfm) +{ + return container_of(crypto_skcipher_tfm(tfm)->__crt_alg, + struct skcipher_alg, base); +} + +static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.ivsize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.ivsize; + + return alg->ivsize; +} + /** * crypto_skcipher_ivsize() - obtain IV size * @tfm: cipher handle @@ -251,6 +276,36 @@ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) return tfm->ivsize; } +static inline unsigned int crypto_skcipher_alg_chunksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->chunksize; +} + +/** + * crypto_skcipher_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CTR. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_skcipher_chunksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); +} + /** * crypto_skcipher_blocksize() - obtain block size of cipher * @tfm: cipher handle diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 288fac5294f5..cc63aef07249 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -208,7 +208,6 @@ void acpi_boot_table_init (void); int acpi_mps_check (void); int acpi_numa_init (void); -void early_acpi_table_init(void *data, size_t size); int acpi_table_init (void); int acpi_table_parse(char *id, acpi_tbl_table_handler handler); int __init acpi_parse_entries(char *id, unsigned long table_size, @@ -232,12 +231,26 @@ int acpi_table_parse_madt(enum acpi_madt_type id, int acpi_parse_mcfg (struct acpi_table_header *header); void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); -/* the following four functions are architecture-dependent */ +/* the following numa functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); + +#if defined(CONFIG_X86) || defined(CONFIG_IA64) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); +#else +static inline void +acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { } +#endif + void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); + +#ifdef CONFIG_ARM64 +void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa); +#else +static inline void +acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } +#endif + int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); -void acpi_numa_arch_fixup(void); #ifndef PHYS_CPUID_INVALID typedef u32 phys_cpuid_t; @@ -444,8 +457,12 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); #define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008 #define OSC_SB_APEI_SUPPORT 0x00000010 #define OSC_SB_CPC_SUPPORT 0x00000020 +#define OSC_SB_CPCV2_SUPPORT 0x00000040 +#define OSC_SB_PCLPI_SUPPORT 0x00000080 +#define OSC_SB_OSLPI_SUPPORT 0x00000100 extern bool osc_sb_apei_support_acked; +extern bool osc_pc_lpi_support_confirmed; /* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */ #define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001 @@ -532,6 +549,24 @@ void acpi_walk_dep_device_list(acpi_handle handle); struct platform_device *acpi_create_platform_device(struct acpi_device *); #define ACPI_PTR(_ptr) (_ptr) +static inline void acpi_device_set_enumerated(struct acpi_device *adev) +{ + adev->flags.visited = true; +} + +static inline void acpi_device_clear_enumerated(struct acpi_device *adev) +{ + adev->flags.visited = false; +} + +enum acpi_reconfig_event { + ACPI_RECONFIG_DEVICE_ADD = 0, + ACPI_RECONFIG_DEVICE_REMOVE, +}; + +int acpi_reconfig_notifier_register(struct notifier_block *nb); +int acpi_reconfig_notifier_unregister(struct notifier_block *nb); + #else /* !CONFIG_ACPI */ #define acpi_disabled 1 @@ -588,7 +623,6 @@ static inline const char *acpi_dev_name(struct acpi_device *adev) return NULL; } -static inline void early_acpi_table_init(void *data, size_t size) { } static inline void acpi_early_init(void) { } static inline void acpi_subsystem_init(void) { } @@ -678,6 +712,24 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) #define ACPI_PTR(_ptr) (NULL) +static inline void acpi_device_set_enumerated(struct acpi_device *adev) +{ +} + +static inline void acpi_device_clear_enumerated(struct acpi_device *adev) +{ +} + +static inline int acpi_reconfig_notifier_register(struct notifier_block *nb) +{ + return -EINVAL; +} + +static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb) +{ + return -EINVAL; +} + #endif /* !CONFIG_ACPI */ #ifdef CONFIG_ACPI @@ -997,4 +1049,10 @@ static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev, #define acpi_probe_device_table(t) ({ int __r = 0; __r;}) #endif +#ifdef CONFIG_ACPI_TABLE_UPGRADE +void acpi_table_upgrade(void); +#else +static inline void acpi_table_upgrade(void) { } +#endif + #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h index 52f3b7da4f2d..9d8031257a90 100644 --- a/include/linux/alarmtimer.h +++ b/include/linux/alarmtimer.h @@ -26,10 +26,10 @@ enum alarmtimer_restart { * struct alarm - Alarm timer structure * @node: timerqueue node for adding to the event list this value * also includes the expiration time. - * @period: Period for recuring alarms + * @timer: hrtimer used to schedule events while running * @function: Function pointer to be executed when the timer fires. - * @type: Alarm type (BOOTTIME/REALTIME) - * @enabled: Flag that represents if the alarm is set to fire or not + * @type: Alarm type (BOOTTIME/REALTIME). + * @state: Flag that represents if the alarm is set to fire or not. * @data: Internal data value. */ struct alarm { diff --git a/include/linux/ata.h b/include/linux/ata.h index 99346be5a7ca..adbc812c009b 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -46,8 +46,9 @@ enum { ATA_MAX_SECTORS_128 = 128, ATA_MAX_SECTORS = 256, ATA_MAX_SECTORS_1024 = 1024, - ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */ + ATA_MAX_SECTORS_LBA48 = 65535,/* avoid count to be 0000h */ ATA_MAX_SECTORS_TAPE = 65535, + ATA_MAX_TRIM_RNUM = 64, /* 512-byte payload / (6-byte LBA + 2-byte range per entry) */ ATA_ID_WORDS = 256, ATA_ID_CONFIG = 0, @@ -409,6 +410,9 @@ enum { SETFEATURES_WC_ON = 0x02, /* Enable write cache */ SETFEATURES_WC_OFF = 0x82, /* Disable write cache */ + SETFEATURES_RA_ON = 0xaa, /* Enable read look-ahead */ + SETFEATURES_RA_OFF = 0x55, /* Disable read look-ahead */ + /* Enable/Disable Automatic Acoustic Management */ SETFEATURES_AAM_ON = 0x42, SETFEATURES_AAM_OFF = 0xC2, @@ -519,16 +523,23 @@ enum { SERR_DEV_XCHG = (1 << 26), /* device exchanged */ }; -enum ata_tf_protocols { - /* ATA taskfile protocols */ - ATA_PROT_UNKNOWN, /* unknown/invalid */ - ATA_PROT_NODATA, /* no data */ - ATA_PROT_PIO, /* PIO data xfer */ - ATA_PROT_DMA, /* DMA */ - ATA_PROT_NCQ, /* NCQ */ - ATAPI_PROT_NODATA, /* packet command, no data */ - ATAPI_PROT_PIO, /* packet command, PIO data xfer*/ - ATAPI_PROT_DMA, /* packet command with special DMA sauce */ +enum ata_prot_flags { + /* protocol flags */ + ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */ + ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */ + ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */ + ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */ + + /* taskfile protocols */ + ATA_PROT_UNKNOWN = (u8)-1, + ATA_PROT_NODATA = 0, + ATA_PROT_PIO = ATA_PROT_FLAG_PIO, + ATA_PROT_DMA = ATA_PROT_FLAG_DMA, + ATA_PROT_NCQ_NODATA = ATA_PROT_FLAG_NCQ, + ATA_PROT_NCQ = ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ, + ATAPI_PROT_NODATA = ATA_PROT_FLAG_ATAPI, + ATAPI_PROT_PIO = ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO, + ATAPI_PROT_DMA = ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA, }; enum ata_ioctls { @@ -1066,12 +1077,12 @@ static inline void ata_id_to_hd_driveid(u16 *id) * TO NV CACHE PINNED SET. */ static inline unsigned ata_set_lba_range_entries(void *_buffer, - unsigned buf_size, u64 sector, unsigned long count) + unsigned num, u64 sector, unsigned long count) { __le64 *buffer = _buffer; unsigned i = 0, used_bytes; - while (i < buf_size / 8 ) { /* 6-byte LBA + 2-byte range per entry */ + while (i < num) { u64 entry = sector | ((u64)(count > 0xffff ? 0xffff : count) << 48); buffer[i++] = __cpu_to_le64(entry); @@ -1095,13 +1106,13 @@ static inline bool ata_ok(u8 status) static inline bool lba_28_ok(u64 block, u32 n_block) { /* check the ending block number: must be LESS THAN 0x0fffffff */ - return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256); + return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= ATA_MAX_SECTORS); } static inline bool lba_48_ok(u64 block, u32 n_block) { /* check the ending block number */ - return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= 65536); + return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= ATA_MAX_SECTORS_LBA48); } #define sata_pmp_gscr_vendor(gscr) ((gscr)[SATA_PMP_GSCR_PROD_ID] & 0xffff) diff --git a/include/linux/atomic.h b/include/linux/atomic.h index e451534fe54d..e71835bf60a9 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -163,206 +163,265 @@ #endif #endif /* atomic_dec_return_relaxed */ -/* atomic_xchg_relaxed */ -#ifndef atomic_xchg_relaxed -#define atomic_xchg_relaxed atomic_xchg -#define atomic_xchg_acquire atomic_xchg -#define atomic_xchg_release atomic_xchg -#else /* atomic_xchg_relaxed */ +/* atomic_fetch_add_relaxed */ +#ifndef atomic_fetch_add_relaxed +#define atomic_fetch_add_relaxed atomic_fetch_add +#define atomic_fetch_add_acquire atomic_fetch_add +#define atomic_fetch_add_release atomic_fetch_add -#ifndef atomic_xchg_acquire -#define atomic_xchg_acquire(...) \ - __atomic_op_acquire(atomic_xchg, __VA_ARGS__) +#else /* atomic_fetch_add_relaxed */ + +#ifndef atomic_fetch_add_acquire +#define atomic_fetch_add_acquire(...) \ + __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__) #endif -#ifndef atomic_xchg_release -#define atomic_xchg_release(...) \ - __atomic_op_release(atomic_xchg, __VA_ARGS__) +#ifndef atomic_fetch_add_release +#define atomic_fetch_add_release(...) \ + __atomic_op_release(atomic_fetch_add, __VA_ARGS__) #endif -#ifndef atomic_xchg -#define atomic_xchg(...) \ - __atomic_op_fence(atomic_xchg, __VA_ARGS__) +#ifndef atomic_fetch_add +#define atomic_fetch_add(...) \ + __atomic_op_fence(atomic_fetch_add, __VA_ARGS__) +#endif +#endif /* atomic_fetch_add_relaxed */ + +/* atomic_fetch_inc_relaxed */ +#ifndef atomic_fetch_inc_relaxed + +#ifndef atomic_fetch_inc +#define atomic_fetch_inc(v) atomic_fetch_add(1, (v)) +#define atomic_fetch_inc_relaxed(v) atomic_fetch_add_relaxed(1, (v)) +#define atomic_fetch_inc_acquire(v) atomic_fetch_add_acquire(1, (v)) +#define atomic_fetch_inc_release(v) atomic_fetch_add_release(1, (v)) +#else /* atomic_fetch_inc */ +#define atomic_fetch_inc_relaxed atomic_fetch_inc +#define atomic_fetch_inc_acquire atomic_fetch_inc +#define atomic_fetch_inc_release atomic_fetch_inc +#endif /* atomic_fetch_inc */ + +#else /* atomic_fetch_inc_relaxed */ + +#ifndef atomic_fetch_inc_acquire +#define atomic_fetch_inc_acquire(...) \ + __atomic_op_acquire(atomic_fetch_inc, __VA_ARGS__) #endif -#endif /* atomic_xchg_relaxed */ -/* atomic_cmpxchg_relaxed */ -#ifndef atomic_cmpxchg_relaxed -#define atomic_cmpxchg_relaxed atomic_cmpxchg -#define atomic_cmpxchg_acquire atomic_cmpxchg -#define atomic_cmpxchg_release atomic_cmpxchg +#ifndef atomic_fetch_inc_release +#define atomic_fetch_inc_release(...) \ + __atomic_op_release(atomic_fetch_inc, __VA_ARGS__) +#endif -#else /* atomic_cmpxchg_relaxed */ +#ifndef atomic_fetch_inc +#define atomic_fetch_inc(...) \ + __atomic_op_fence(atomic_fetch_inc, __VA_ARGS__) +#endif +#endif /* atomic_fetch_inc_relaxed */ -#ifndef atomic_cmpxchg_acquire -#define atomic_cmpxchg_acquire(...) \ - __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) +/* atomic_fetch_sub_relaxed */ +#ifndef atomic_fetch_sub_relaxed +#define atomic_fetch_sub_relaxed atomic_fetch_sub +#define atomic_fetch_sub_acquire atomic_fetch_sub +#define atomic_fetch_sub_release atomic_fetch_sub + +#else /* atomic_fetch_sub_relaxed */ + +#ifndef atomic_fetch_sub_acquire +#define atomic_fetch_sub_acquire(...) \ + __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__) #endif -#ifndef atomic_cmpxchg_release -#define atomic_cmpxchg_release(...) \ - __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) +#ifndef atomic_fetch_sub_release +#define atomic_fetch_sub_release(...) \ + __atomic_op_release(atomic_fetch_sub, __VA_ARGS__) #endif -#ifndef atomic_cmpxchg -#define atomic_cmpxchg(...) \ - __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) +#ifndef atomic_fetch_sub +#define atomic_fetch_sub(...) \ + __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__) +#endif +#endif /* atomic_fetch_sub_relaxed */ + +/* atomic_fetch_dec_relaxed */ +#ifndef atomic_fetch_dec_relaxed + +#ifndef atomic_fetch_dec +#define atomic_fetch_dec(v) atomic_fetch_sub(1, (v)) +#define atomic_fetch_dec_relaxed(v) atomic_fetch_sub_relaxed(1, (v)) +#define atomic_fetch_dec_acquire(v) atomic_fetch_sub_acquire(1, (v)) +#define atomic_fetch_dec_release(v) atomic_fetch_sub_release(1, (v)) +#else /* atomic_fetch_dec */ +#define atomic_fetch_dec_relaxed atomic_fetch_dec +#define atomic_fetch_dec_acquire atomic_fetch_dec +#define atomic_fetch_dec_release atomic_fetch_dec +#endif /* atomic_fetch_dec */ + +#else /* atomic_fetch_dec_relaxed */ + +#ifndef atomic_fetch_dec_acquire +#define atomic_fetch_dec_acquire(...) \ + __atomic_op_acquire(atomic_fetch_dec, __VA_ARGS__) #endif -#endif /* atomic_cmpxchg_relaxed */ -#ifndef atomic64_read_acquire -#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) +#ifndef atomic_fetch_dec_release +#define atomic_fetch_dec_release(...) \ + __atomic_op_release(atomic_fetch_dec, __VA_ARGS__) #endif -#ifndef atomic64_set_release -#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) +#ifndef atomic_fetch_dec +#define atomic_fetch_dec(...) \ + __atomic_op_fence(atomic_fetch_dec, __VA_ARGS__) #endif +#endif /* atomic_fetch_dec_relaxed */ -/* atomic64_add_return_relaxed */ -#ifndef atomic64_add_return_relaxed -#define atomic64_add_return_relaxed atomic64_add_return -#define atomic64_add_return_acquire atomic64_add_return -#define atomic64_add_return_release atomic64_add_return +/* atomic_fetch_or_relaxed */ +#ifndef atomic_fetch_or_relaxed +#define atomic_fetch_or_relaxed atomic_fetch_or +#define atomic_fetch_or_acquire atomic_fetch_or +#define atomic_fetch_or_release atomic_fetch_or -#else /* atomic64_add_return_relaxed */ +#else /* atomic_fetch_or_relaxed */ -#ifndef atomic64_add_return_acquire -#define atomic64_add_return_acquire(...) \ - __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) +#ifndef atomic_fetch_or_acquire +#define atomic_fetch_or_acquire(...) \ + __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__) #endif -#ifndef atomic64_add_return_release -#define atomic64_add_return_release(...) \ - __atomic_op_release(atomic64_add_return, __VA_ARGS__) +#ifndef atomic_fetch_or_release +#define atomic_fetch_or_release(...) \ + __atomic_op_release(atomic_fetch_or, __VA_ARGS__) #endif -#ifndef atomic64_add_return -#define atomic64_add_return(...) \ - __atomic_op_fence(atomic64_add_return, __VA_ARGS__) +#ifndef atomic_fetch_or +#define atomic_fetch_or(...) \ + __atomic_op_fence(atomic_fetch_or, __VA_ARGS__) #endif -#endif /* atomic64_add_return_relaxed */ +#endif /* atomic_fetch_or_relaxed */ -/* atomic64_inc_return_relaxed */ -#ifndef atomic64_inc_return_relaxed -#define atomic64_inc_return_relaxed atomic64_inc_return -#define atomic64_inc_return_acquire atomic64_inc_return -#define atomic64_inc_return_release atomic64_inc_return +/* atomic_fetch_and_relaxed */ +#ifndef atomic_fetch_and_relaxed +#define atomic_fetch_and_relaxed atomic_fetch_and +#define atomic_fetch_and_acquire atomic_fetch_and +#define atomic_fetch_and_release atomic_fetch_and -#else /* atomic64_inc_return_relaxed */ +#else /* atomic_fetch_and_relaxed */ -#ifndef atomic64_inc_return_acquire -#define atomic64_inc_return_acquire(...) \ - __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) +#ifndef atomic_fetch_and_acquire +#define atomic_fetch_and_acquire(...) \ + __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__) #endif -#ifndef atomic64_inc_return_release -#define atomic64_inc_return_release(...) \ - __atomic_op_release(atomic64_inc_return, __VA_ARGS__) +#ifndef atomic_fetch_and_release +#define atomic_fetch_and_release(...) \ + __atomic_op_release(atomic_fetch_and, __VA_ARGS__) #endif -#ifndef atomic64_inc_return -#define atomic64_inc_return(...) \ - __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) +#ifndef atomic_fetch_and +#define atomic_fetch_and(...) \ + __atomic_op_fence(atomic_fetch_and, __VA_ARGS__) #endif -#endif /* atomic64_inc_return_relaxed */ - +#endif /* atomic_fetch_and_relaxed */ -/* atomic64_sub_return_relaxed */ -#ifndef atomic64_sub_return_relaxed -#define atomic64_sub_return_relaxed atomic64_sub_return -#define atomic64_sub_return_acquire atomic64_sub_return -#define atomic64_sub_return_release atomic64_sub_return +#ifdef atomic_andnot +/* atomic_fetch_andnot_relaxed */ +#ifndef atomic_fetch_andnot_relaxed +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot +#define atomic_fetch_andnot_acquire atomic_fetch_andnot +#define atomic_fetch_andnot_release atomic_fetch_andnot -#else /* atomic64_sub_return_relaxed */ +#else /* atomic_fetch_andnot_relaxed */ -#ifndef atomic64_sub_return_acquire -#define atomic64_sub_return_acquire(...) \ - __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) +#ifndef atomic_fetch_andnot_acquire +#define atomic_fetch_andnot_acquire(...) \ + __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__) #endif -#ifndef atomic64_sub_return_release -#define atomic64_sub_return_release(...) \ - __atomic_op_release(atomic64_sub_return, __VA_ARGS__) +#ifndef atomic_fetch_andnot_release +#define atomic_fetch_andnot_release(...) \ + __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__) #endif -#ifndef atomic64_sub_return -#define atomic64_sub_return(...) \ - __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) +#ifndef atomic_fetch_andnot +#define atomic_fetch_andnot(...) \ + __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__) #endif -#endif /* atomic64_sub_return_relaxed */ +#endif /* atomic_fetch_andnot_relaxed */ +#endif /* atomic_andnot */ -/* atomic64_dec_return_relaxed */ -#ifndef atomic64_dec_return_relaxed -#define atomic64_dec_return_relaxed atomic64_dec_return -#define atomic64_dec_return_acquire atomic64_dec_return -#define atomic64_dec_return_release atomic64_dec_return +/* atomic_fetch_xor_relaxed */ +#ifndef atomic_fetch_xor_relaxed +#define atomic_fetch_xor_relaxed atomic_fetch_xor +#define atomic_fetch_xor_acquire atomic_fetch_xor +#define atomic_fetch_xor_release atomic_fetch_xor -#else /* atomic64_dec_return_relaxed */ +#else /* atomic_fetch_xor_relaxed */ -#ifndef atomic64_dec_return_acquire -#define atomic64_dec_return_acquire(...) \ - __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) +#ifndef atomic_fetch_xor_acquire +#define atomic_fetch_xor_acquire(...) \ + __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__) #endif -#ifndef atomic64_dec_return_release -#define atomic64_dec_return_release(...) \ - __atomic_op_release(atomic64_dec_return, __VA_ARGS__) +#ifndef atomic_fetch_xor_release +#define atomic_fetch_xor_release(...) \ + __atomic_op_release(atomic_fetch_xor, __VA_ARGS__) #endif -#ifndef atomic64_dec_return -#define atomic64_dec_return(...) \ - __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) +#ifndef atomic_fetch_xor +#define atomic_fetch_xor(...) \ + __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__) #endif -#endif /* atomic64_dec_return_relaxed */ +#endif /* atomic_fetch_xor_relaxed */ -/* atomic64_xchg_relaxed */ -#ifndef atomic64_xchg_relaxed -#define atomic64_xchg_relaxed atomic64_xchg -#define atomic64_xchg_acquire atomic64_xchg -#define atomic64_xchg_release atomic64_xchg -#else /* atomic64_xchg_relaxed */ +/* atomic_xchg_relaxed */ +#ifndef atomic_xchg_relaxed +#define atomic_xchg_relaxed atomic_xchg +#define atomic_xchg_acquire atomic_xchg +#define atomic_xchg_release atomic_xchg -#ifndef atomic64_xchg_acquire -#define atomic64_xchg_acquire(...) \ - __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) +#else /* atomic_xchg_relaxed */ + +#ifndef atomic_xchg_acquire +#define atomic_xchg_acquire(...) \ + __atomic_op_acquire(atomic_xchg, __VA_ARGS__) #endif -#ifndef atomic64_xchg_release -#define atomic64_xchg_release(...) \ - __atomic_op_release(atomic64_xchg, __VA_ARGS__) +#ifndef atomic_xchg_release +#define atomic_xchg_release(...) \ + __atomic_op_release(atomic_xchg, __VA_ARGS__) #endif -#ifndef atomic64_xchg -#define atomic64_xchg(...) \ - __atomic_op_fence(atomic64_xchg, __VA_ARGS__) +#ifndef atomic_xchg +#define atomic_xchg(...) \ + __atomic_op_fence(atomic_xchg, __VA_ARGS__) #endif -#endif /* atomic64_xchg_relaxed */ +#endif /* atomic_xchg_relaxed */ -/* atomic64_cmpxchg_relaxed */ -#ifndef atomic64_cmpxchg_relaxed -#define atomic64_cmpxchg_relaxed atomic64_cmpxchg -#define atomic64_cmpxchg_acquire atomic64_cmpxchg -#define atomic64_cmpxchg_release atomic64_cmpxchg +/* atomic_cmpxchg_relaxed */ +#ifndef atomic_cmpxchg_relaxed +#define atomic_cmpxchg_relaxed atomic_cmpxchg +#define atomic_cmpxchg_acquire atomic_cmpxchg +#define atomic_cmpxchg_release atomic_cmpxchg -#else /* atomic64_cmpxchg_relaxed */ +#else /* atomic_cmpxchg_relaxed */ -#ifndef atomic64_cmpxchg_acquire -#define atomic64_cmpxchg_acquire(...) \ - __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) +#ifndef atomic_cmpxchg_acquire +#define atomic_cmpxchg_acquire(...) \ + __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) #endif -#ifndef atomic64_cmpxchg_release -#define atomic64_cmpxchg_release(...) \ - __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) +#ifndef atomic_cmpxchg_release +#define atomic_cmpxchg_release(...) \ + __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) #endif -#ifndef atomic64_cmpxchg -#define atomic64_cmpxchg(...) \ - __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) +#ifndef atomic_cmpxchg +#define atomic_cmpxchg(...) \ + __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) #endif -#endif /* atomic64_cmpxchg_relaxed */ +#endif /* atomic_cmpxchg_relaxed */ /* cmpxchg_relaxed */ #ifndef cmpxchg_relaxed @@ -463,18 +522,28 @@ static inline void atomic_andnot(int i, atomic_t *v) { atomic_and(~i, v); } -#endif -static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) +static inline int atomic_fetch_andnot(int i, atomic_t *v) { - atomic_andnot(mask, v); + return atomic_fetch_and(~i, v); } -static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) +static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { - atomic_or(mask, v); + return atomic_fetch_and_relaxed(~i, v); } +static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + return atomic_fetch_and_acquire(~i, v); +} + +static inline int atomic_fetch_andnot_release(int i, atomic_t *v) +{ + return atomic_fetch_and_release(~i, v); +} +#endif + /** * atomic_inc_not_zero_hint - increment if not null * @v: pointer of type atomic_t @@ -558,36 +627,400 @@ static inline int atomic_dec_if_positive(atomic_t *v) } #endif -/** - * atomic_fetch_or - perform *p |= mask and return old value of *p - * @mask: mask to OR on the atomic_t - * @p: pointer to atomic_t - */ -#ifndef atomic_fetch_or -static inline int atomic_fetch_or(int mask, atomic_t *p) -{ - int old, val = atomic_read(p); +#ifdef CONFIG_GENERIC_ATOMIC64 +#include <asm-generic/atomic64.h> +#endif - for (;;) { - old = atomic_cmpxchg(p, val, val | mask); - if (old == val) - break; - val = old; - } +#ifndef atomic64_read_acquire +#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) +#endif - return old; -} +#ifndef atomic64_set_release +#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) #endif -#ifdef CONFIG_GENERIC_ATOMIC64 -#include <asm-generic/atomic64.h> +/* atomic64_add_return_relaxed */ +#ifndef atomic64_add_return_relaxed +#define atomic64_add_return_relaxed atomic64_add_return +#define atomic64_add_return_acquire atomic64_add_return +#define atomic64_add_return_release atomic64_add_return + +#else /* atomic64_add_return_relaxed */ + +#ifndef atomic64_add_return_acquire +#define atomic64_add_return_acquire(...) \ + __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) +#endif + +#ifndef atomic64_add_return_release +#define atomic64_add_return_release(...) \ + __atomic_op_release(atomic64_add_return, __VA_ARGS__) +#endif + +#ifndef atomic64_add_return +#define atomic64_add_return(...) \ + __atomic_op_fence(atomic64_add_return, __VA_ARGS__) +#endif +#endif /* atomic64_add_return_relaxed */ + +/* atomic64_inc_return_relaxed */ +#ifndef atomic64_inc_return_relaxed +#define atomic64_inc_return_relaxed atomic64_inc_return +#define atomic64_inc_return_acquire atomic64_inc_return +#define atomic64_inc_return_release atomic64_inc_return + +#else /* atomic64_inc_return_relaxed */ + +#ifndef atomic64_inc_return_acquire +#define atomic64_inc_return_acquire(...) \ + __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return_release +#define atomic64_inc_return_release(...) \ + __atomic_op_release(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return +#define atomic64_inc_return(...) \ + __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) +#endif +#endif /* atomic64_inc_return_relaxed */ + + +/* atomic64_sub_return_relaxed */ +#ifndef atomic64_sub_return_relaxed +#define atomic64_sub_return_relaxed atomic64_sub_return +#define atomic64_sub_return_acquire atomic64_sub_return +#define atomic64_sub_return_release atomic64_sub_return + +#else /* atomic64_sub_return_relaxed */ + +#ifndef atomic64_sub_return_acquire +#define atomic64_sub_return_acquire(...) \ + __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) #endif +#ifndef atomic64_sub_return_release +#define atomic64_sub_return_release(...) \ + __atomic_op_release(atomic64_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic64_sub_return +#define atomic64_sub_return(...) \ + __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) +#endif +#endif /* atomic64_sub_return_relaxed */ + +/* atomic64_dec_return_relaxed */ +#ifndef atomic64_dec_return_relaxed +#define atomic64_dec_return_relaxed atomic64_dec_return +#define atomic64_dec_return_acquire atomic64_dec_return +#define atomic64_dec_return_release atomic64_dec_return + +#else /* atomic64_dec_return_relaxed */ + +#ifndef atomic64_dec_return_acquire +#define atomic64_dec_return_acquire(...) \ + __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic64_dec_return_release +#define atomic64_dec_return_release(...) \ + __atomic_op_release(atomic64_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic64_dec_return +#define atomic64_dec_return(...) \ + __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) +#endif +#endif /* atomic64_dec_return_relaxed */ + + +/* atomic64_fetch_add_relaxed */ +#ifndef atomic64_fetch_add_relaxed +#define atomic64_fetch_add_relaxed atomic64_fetch_add +#define atomic64_fetch_add_acquire atomic64_fetch_add +#define atomic64_fetch_add_release atomic64_fetch_add + +#else /* atomic64_fetch_add_relaxed */ + +#ifndef atomic64_fetch_add_acquire +#define atomic64_fetch_add_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_add_release +#define atomic64_fetch_add_release(...) \ + __atomic_op_release(atomic64_fetch_add, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_add +#define atomic64_fetch_add(...) \ + __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_add_relaxed */ + +/* atomic64_fetch_inc_relaxed */ +#ifndef atomic64_fetch_inc_relaxed + +#ifndef atomic64_fetch_inc +#define atomic64_fetch_inc(v) atomic64_fetch_add(1, (v)) +#define atomic64_fetch_inc_relaxed(v) atomic64_fetch_add_relaxed(1, (v)) +#define atomic64_fetch_inc_acquire(v) atomic64_fetch_add_acquire(1, (v)) +#define atomic64_fetch_inc_release(v) atomic64_fetch_add_release(1, (v)) +#else /* atomic64_fetch_inc */ +#define atomic64_fetch_inc_relaxed atomic64_fetch_inc +#define atomic64_fetch_inc_acquire atomic64_fetch_inc +#define atomic64_fetch_inc_release atomic64_fetch_inc +#endif /* atomic64_fetch_inc */ + +#else /* atomic64_fetch_inc_relaxed */ + +#ifndef atomic64_fetch_inc_acquire +#define atomic64_fetch_inc_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_inc, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_inc_release +#define atomic64_fetch_inc_release(...) \ + __atomic_op_release(atomic64_fetch_inc, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_inc +#define atomic64_fetch_inc(...) \ + __atomic_op_fence(atomic64_fetch_inc, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_inc_relaxed */ + +/* atomic64_fetch_sub_relaxed */ +#ifndef atomic64_fetch_sub_relaxed +#define atomic64_fetch_sub_relaxed atomic64_fetch_sub +#define atomic64_fetch_sub_acquire atomic64_fetch_sub +#define atomic64_fetch_sub_release atomic64_fetch_sub + +#else /* atomic64_fetch_sub_relaxed */ + +#ifndef atomic64_fetch_sub_acquire +#define atomic64_fetch_sub_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_sub_release +#define atomic64_fetch_sub_release(...) \ + __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_sub +#define atomic64_fetch_sub(...) \ + __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_sub_relaxed */ + +/* atomic64_fetch_dec_relaxed */ +#ifndef atomic64_fetch_dec_relaxed + +#ifndef atomic64_fetch_dec +#define atomic64_fetch_dec(v) atomic64_fetch_sub(1, (v)) +#define atomic64_fetch_dec_relaxed(v) atomic64_fetch_sub_relaxed(1, (v)) +#define atomic64_fetch_dec_acquire(v) atomic64_fetch_sub_acquire(1, (v)) +#define atomic64_fetch_dec_release(v) atomic64_fetch_sub_release(1, (v)) +#else /* atomic64_fetch_dec */ +#define atomic64_fetch_dec_relaxed atomic64_fetch_dec +#define atomic64_fetch_dec_acquire atomic64_fetch_dec +#define atomic64_fetch_dec_release atomic64_fetch_dec +#endif /* atomic64_fetch_dec */ + +#else /* atomic64_fetch_dec_relaxed */ + +#ifndef atomic64_fetch_dec_acquire +#define atomic64_fetch_dec_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_dec, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_dec_release +#define atomic64_fetch_dec_release(...) \ + __atomic_op_release(atomic64_fetch_dec, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_dec +#define atomic64_fetch_dec(...) \ + __atomic_op_fence(atomic64_fetch_dec, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_dec_relaxed */ + +/* atomic64_fetch_or_relaxed */ +#ifndef atomic64_fetch_or_relaxed +#define atomic64_fetch_or_relaxed atomic64_fetch_or +#define atomic64_fetch_or_acquire atomic64_fetch_or +#define atomic64_fetch_or_release atomic64_fetch_or + +#else /* atomic64_fetch_or_relaxed */ + +#ifndef atomic64_fetch_or_acquire +#define atomic64_fetch_or_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_or_release +#define atomic64_fetch_or_release(...) \ + __atomic_op_release(atomic64_fetch_or, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_or +#define atomic64_fetch_or(...) \ + __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_or_relaxed */ + +/* atomic64_fetch_and_relaxed */ +#ifndef atomic64_fetch_and_relaxed +#define atomic64_fetch_and_relaxed atomic64_fetch_and +#define atomic64_fetch_and_acquire atomic64_fetch_and +#define atomic64_fetch_and_release atomic64_fetch_and + +#else /* atomic64_fetch_and_relaxed */ + +#ifndef atomic64_fetch_and_acquire +#define atomic64_fetch_and_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_and_release +#define atomic64_fetch_and_release(...) \ + __atomic_op_release(atomic64_fetch_and, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_and +#define atomic64_fetch_and(...) \ + __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_and_relaxed */ + +#ifdef atomic64_andnot +/* atomic64_fetch_andnot_relaxed */ +#ifndef atomic64_fetch_andnot_relaxed +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot +#define atomic64_fetch_andnot_release atomic64_fetch_andnot + +#else /* atomic64_fetch_andnot_relaxed */ + +#ifndef atomic64_fetch_andnot_acquire +#define atomic64_fetch_andnot_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_andnot_release +#define atomic64_fetch_andnot_release(...) \ + __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_andnot +#define atomic64_fetch_andnot(...) \ + __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_andnot_relaxed */ +#endif /* atomic64_andnot */ + +/* atomic64_fetch_xor_relaxed */ +#ifndef atomic64_fetch_xor_relaxed +#define atomic64_fetch_xor_relaxed atomic64_fetch_xor +#define atomic64_fetch_xor_acquire atomic64_fetch_xor +#define atomic64_fetch_xor_release atomic64_fetch_xor + +#else /* atomic64_fetch_xor_relaxed */ + +#ifndef atomic64_fetch_xor_acquire +#define atomic64_fetch_xor_acquire(...) \ + __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_xor_release +#define atomic64_fetch_xor_release(...) \ + __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__) +#endif + +#ifndef atomic64_fetch_xor +#define atomic64_fetch_xor(...) \ + __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__) +#endif +#endif /* atomic64_fetch_xor_relaxed */ + + +/* atomic64_xchg_relaxed */ +#ifndef atomic64_xchg_relaxed +#define atomic64_xchg_relaxed atomic64_xchg +#define atomic64_xchg_acquire atomic64_xchg +#define atomic64_xchg_release atomic64_xchg + +#else /* atomic64_xchg_relaxed */ + +#ifndef atomic64_xchg_acquire +#define atomic64_xchg_acquire(...) \ + __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) +#endif + +#ifndef atomic64_xchg_release +#define atomic64_xchg_release(...) \ + __atomic_op_release(atomic64_xchg, __VA_ARGS__) +#endif + +#ifndef atomic64_xchg +#define atomic64_xchg(...) \ + __atomic_op_fence(atomic64_xchg, __VA_ARGS__) +#endif +#endif /* atomic64_xchg_relaxed */ + +/* atomic64_cmpxchg_relaxed */ +#ifndef atomic64_cmpxchg_relaxed +#define atomic64_cmpxchg_relaxed atomic64_cmpxchg +#define atomic64_cmpxchg_acquire atomic64_cmpxchg +#define atomic64_cmpxchg_release atomic64_cmpxchg + +#else /* atomic64_cmpxchg_relaxed */ + +#ifndef atomic64_cmpxchg_acquire +#define atomic64_cmpxchg_acquire(...) \ + __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic64_cmpxchg_release +#define atomic64_cmpxchg_release(...) \ + __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic64_cmpxchg +#define atomic64_cmpxchg(...) \ + __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) +#endif +#endif /* atomic64_cmpxchg_relaxed */ + #ifndef atomic64_andnot static inline void atomic64_andnot(long long i, atomic64_t *v) { atomic64_and(~i, v); } + +static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v) +{ + return atomic64_fetch_and(~i, v); +} + +static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v) +{ + return atomic64_fetch_and_relaxed(~i, v); +} + +static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v) +{ + return atomic64_fetch_and_acquire(~i, v); +} + +static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v) +{ + return atomic64_fetch_and_release(~i, v); +} #endif #include <asm-generic/atomic-long.h> diff --git a/include/linux/bio.h b/include/linux/bio.h index 9faebf7f9a33..583c10810e32 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -41,44 +41,9 @@ #endif #define BIO_MAX_PAGES 256 -#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_SHIFT) -#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) -/* - * upper 16 bits of bi_rw define the io priority of this bio - */ -#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) -#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) -#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) - -#define bio_set_prio(bio, prio) do { \ - WARN_ON(prio >= (1 << IOPRIO_BITS)); \ - (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \ - (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \ -} while (0) - -/* - * various member access, note that bio_data should of course not be used - * on highmem page vectors - */ -#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) - -#define bvec_iter_page(bvec, iter) \ - (__bvec_iter_bvec((bvec), (iter))->bv_page) - -#define bvec_iter_len(bvec, iter) \ - min((iter).bi_size, \ - __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) - -#define bvec_iter_offset(bvec, iter) \ - (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) - -#define bvec_iter_bvec(bvec, iter) \ -((struct bio_vec) { \ - .bv_page = bvec_iter_page((bvec), (iter)), \ - .bv_len = bvec_iter_len((bvec), (iter)), \ - .bv_offset = bvec_iter_offset((bvec), (iter)), \ -}) +#define bio_prio(bio) (bio)->bi_ioprio +#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) #define bio_iter_iovec(bio, iter) \ bvec_iter_bvec((bio)->bi_io_vec, (iter)) @@ -106,18 +71,23 @@ static inline bool bio_has_data(struct bio *bio) { if (bio && bio->bi_iter.bi_size && - !(bio->bi_rw & REQ_DISCARD)) + bio_op(bio) != REQ_OP_DISCARD) return true; return false; } +static inline bool bio_no_advance_iter(struct bio *bio) +{ + return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_WRITE_SAME; +} + static inline bool bio_is_rw(struct bio *bio) { if (!bio_has_data(bio)) return false; - if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) + if (bio_no_advance_iter(bio)) return false; return true; @@ -193,39 +163,12 @@ static inline void *bio_data(struct bio *bio) #define bio_for_each_segment_all(bvl, bio, i) \ for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++) -static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter, - unsigned bytes) -{ - WARN_ONCE(bytes > iter->bi_size, - "Attempted to advance past end of bvec iter\n"); - - while (bytes) { - unsigned len = min(bytes, bvec_iter_len(bv, *iter)); - - bytes -= len; - iter->bi_size -= len; - iter->bi_bvec_done += len; - - if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { - iter->bi_bvec_done = 0; - iter->bi_idx++; - } - } -} - -#define for_each_bvec(bvl, bio_vec, iter, start) \ - for (iter = (start); \ - (iter).bi_size && \ - ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ - bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) - - static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, unsigned bytes) { iter->bi_sector += bytes >> 9; - if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) + if (bio_no_advance_iter(bio)) iter->bi_size -= bytes; else bvec_iter_advance(bio->bi_io_vec, iter, bytes); @@ -253,10 +196,10 @@ static inline unsigned bio_segments(struct bio *bio) * differently: */ - if (bio->bi_rw & REQ_DISCARD) + if (bio_op(bio) == REQ_OP_DISCARD) return 1; - if (bio->bi_rw & REQ_WRITE_SAME) + if (bio_op(bio) == REQ_OP_WRITE_SAME) return 1; bio_for_each_segment(bv, bio, iter) @@ -473,7 +416,7 @@ static inline void bio_io_error(struct bio *bio) struct request_queue; extern int bio_phys_segments(struct request_queue *, struct bio *); -extern int submit_bio_wait(int rw, struct bio *bio); +extern int submit_bio_wait(struct bio *bio); extern void bio_advance(struct bio *, unsigned); extern void bio_init(struct bio *); @@ -720,8 +663,6 @@ static inline void bio_inc_remaining(struct bio *bio) * and the bvec_slabs[]. */ #define BIO_POOL_SIZE 2 -#define BIOVEC_NR_POOLS 6 -#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1) struct bio_set { struct kmem_cache *bio_slab; diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index e9b0b9ab07e5..27bfc0b631a9 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -267,6 +267,10 @@ static inline int bitmap_equal(const unsigned long *src1, { if (small_const_nbits(nbits)) return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); +#ifdef CONFIG_S390 + else if (__builtin_constant_p(nbits) && (nbits % BITS_PER_LONG) == 0) + return !memcmp(src1, src2, nbits / 8); +#endif else return __bitmap_equal(src1, src2, nbits); } diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index c02e669945e9..f77150a4a96a 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -590,25 +590,26 @@ static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat) /** * blkg_rwstat_add - add a value to a blkg_rwstat * @rwstat: target blkg_rwstat - * @rw: mask of REQ_{WRITE|SYNC} + * @op: REQ_OP + * @op_flags: rq_flag_bits * @val: value to add * * Add @val to @rwstat. The counters are chosen according to @rw. The * caller is responsible for synchronizing calls to this function. */ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, - int rw, uint64_t val) + int op, int op_flags, uint64_t val) { struct percpu_counter *cnt; - if (rw & REQ_WRITE) + if (op_is_write(op)) cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE]; else cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ]; __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); - if (rw & REQ_SYNC) + if (op_flags & REQ_SYNC) cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; else cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; @@ -713,9 +714,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, if (!throtl) { blkg = blkg ?: q->root_blkg; - blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw, + blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_rw, bio->bi_iter.bi_size); - blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1); + blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_rw, 1); } rcu_read_unlock(); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 2498fdf3a503..e43bbffb5b7a 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -96,6 +96,7 @@ typedef int (init_request_fn)(void *, struct request *, unsigned int, unsigned int, unsigned int); typedef void (exit_request_fn)(void *, struct request *, unsigned int, unsigned int); +typedef int (reinit_request_fn)(void *, struct request *); typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, bool); @@ -145,6 +146,7 @@ struct blk_mq_ops { */ init_request_fn *init_request; exit_request_fn *exit_request; + reinit_request_fn *reinit_request; }; enum { @@ -196,6 +198,8 @@ enum { struct request *blk_mq_alloc_request(struct request_queue *q, int rw, unsigned int flags); +struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op, + unsigned int flags, unsigned int hctx_idx); struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags); @@ -243,6 +247,7 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_unfreeze_queue(struct request_queue *q); void blk_mq_freeze_queue_start(struct request_queue *q); +int blk_mq_reinit_tagset(struct blk_mq_tag_set *set); void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 77e5d81f07aa..f254eb264924 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -6,6 +6,7 @@ #define __LINUX_BLK_TYPES_H #include <linux/types.h> +#include <linux/bvec.h> struct bio_set; struct bio; @@ -17,28 +18,7 @@ struct cgroup_subsys_state; typedef void (bio_end_io_t) (struct bio *); typedef void (bio_destructor_t) (struct bio *); -/* - * was unsigned short, but we might as well be ready for > 64kB I/O pages - */ -struct bio_vec { - struct page *bv_page; - unsigned int bv_len; - unsigned int bv_offset; -}; - #ifdef CONFIG_BLOCK - -struct bvec_iter { - sector_t bi_sector; /* device address in 512 byte - sectors */ - unsigned int bi_size; /* residual I/O count */ - - unsigned int bi_idx; /* current index into bvl_vec */ - - unsigned int bi_bvec_done; /* number of bytes completed in - current bvec */ -}; - /* * main unit of I/O for the block layer and lower layers (ie drivers and * stacking drivers) @@ -46,11 +26,12 @@ struct bvec_iter { struct bio { struct bio *bi_next; /* request queue link */ struct block_device *bi_bdev; - unsigned int bi_flags; /* status, command, etc */ int bi_error; - unsigned long bi_rw; /* bottom bits READ/WRITE, - * top bits priority + unsigned int bi_rw; /* bottom bits req flags, + * top bits REQ_OP */ + unsigned short bi_flags; /* status, command, etc */ + unsigned short bi_ioprio; struct bvec_iter bi_iter; @@ -107,6 +88,16 @@ struct bio { struct bio_vec bi_inline_vecs[0]; }; +#define BIO_OP_SHIFT (8 * sizeof(unsigned int) - REQ_OP_BITS) +#define bio_op(bio) ((bio)->bi_rw >> BIO_OP_SHIFT) + +#define bio_set_op_attrs(bio, op, op_flags) do { \ + WARN_ON(op >= (1 << REQ_OP_BITS)); \ + (bio)->bi_rw &= ((1 << BIO_OP_SHIFT) - 1); \ + (bio)->bi_rw |= ((unsigned int) (op) << BIO_OP_SHIFT); \ + (bio)->bi_rw |= op_flags; \ +} while (0) + #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) /* @@ -123,19 +114,25 @@ struct bio { /* * Flags starting here get preserved by bio_reset() - this includes - * BIO_POOL_IDX() + * BVEC_POOL_IDX() + */ +#define BIO_RESET_BITS 10 + +/* + * We support 6 different bvec pools, the last one is magic in that it + * is backed by a mempool. */ -#define BIO_RESET_BITS 13 -#define BIO_OWNS_VEC 13 /* bio_free() should free bvec */ +#define BVEC_POOL_NR 6 +#define BVEC_POOL_MAX (BVEC_POOL_NR - 1) /* - * top 4 bits of bio flags indicate the pool this bio came from + * Top 4 bits of bio flags indicate the pool the bvecs came from. We add + * 1 to the actual index so that 0 indicates that there are no bvecs to be + * freed. */ -#define BIO_POOL_BITS (4) -#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1) -#define BIO_POOL_OFFSET (32 - BIO_POOL_BITS) -#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET) -#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) +#define BVEC_POOL_BITS (4) +#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) +#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) #endif /* CONFIG_BLOCK */ @@ -145,7 +142,6 @@ struct bio { */ enum rq_flag_bits { /* common flags */ - __REQ_WRITE, /* not set, read. set, write */ __REQ_FAILFAST_DEV, /* no driver retries of device errors */ __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ @@ -153,14 +149,11 @@ enum rq_flag_bits { __REQ_SYNC, /* request is sync (sync write or read) */ __REQ_META, /* metadata io request */ __REQ_PRIO, /* boost priority in cfq */ - __REQ_DISCARD, /* request to discard sectors */ - __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ - __REQ_WRITE_SAME, /* write same block many times */ __REQ_NOIDLE, /* don't anticipate more IO after this one */ __REQ_INTEGRITY, /* I/O includes block integrity payload */ __REQ_FUA, /* forced unit access */ - __REQ_FLUSH, /* request for cache flush */ + __REQ_PREFLUSH, /* request for cache flush */ /* bio only flags */ __REQ_RAHEAD, /* read ahead, can fail anytime */ @@ -191,31 +184,25 @@ enum rq_flag_bits { __REQ_NR_BITS, /* stops here */ }; -#define REQ_WRITE (1ULL << __REQ_WRITE) #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) #define REQ_SYNC (1ULL << __REQ_SYNC) #define REQ_META (1ULL << __REQ_META) #define REQ_PRIO (1ULL << __REQ_PRIO) -#define REQ_DISCARD (1ULL << __REQ_DISCARD) -#define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME) #define REQ_NOIDLE (1ULL << __REQ_NOIDLE) #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) #define REQ_FAILFAST_MASK \ (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) #define REQ_COMMON_MASK \ - (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ - REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \ - REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE) + (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \ + REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE) #define REQ_CLONE_MASK REQ_COMMON_MASK -#define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME) - /* This mask is used for both bio and request merge checking */ #define REQ_NOMERGE_FLAGS \ - (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ) + (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ) #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_THROTTLED (1ULL << __REQ_THROTTLED) @@ -233,15 +220,25 @@ enum rq_flag_bits { #define REQ_PREEMPT (1ULL << __REQ_PREEMPT) #define REQ_ALLOCED (1ULL << __REQ_ALLOCED) #define REQ_COPY_USER (1ULL << __REQ_COPY_USER) -#define REQ_FLUSH (1ULL << __REQ_FLUSH) +#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) #define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ) #define REQ_IO_STAT (1ULL << __REQ_IO_STAT) #define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) -#define REQ_SECURE (1ULL << __REQ_SECURE) #define REQ_PM (1ULL << __REQ_PM) #define REQ_HASHED (1ULL << __REQ_HASHED) #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) +enum req_op { + REQ_OP_READ, + REQ_OP_WRITE, + REQ_OP_DISCARD, /* request to discard sectors */ + REQ_OP_SECURE_ERASE, /* request to securely erase sectors */ + REQ_OP_WRITE_SAME, /* write same block many times */ + REQ_OP_FLUSH, /* request for cache flush */ +}; + +#define REQ_OP_BITS 3 + typedef unsigned int blk_qc_t; #define BLK_QC_T_NONE -1U #define BLK_QC_T_SHIFT 16 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3d9cf326574f..c96db9c22d10 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -90,18 +90,17 @@ struct request { struct list_head queuelist; union { struct call_single_data csd; - unsigned long fifo_time; + u64 fifo_time; }; struct request_queue *q; struct blk_mq_ctx *mq_ctx; - u64 cmd_flags; + int cpu; unsigned cmd_type; + u64 cmd_flags; unsigned long atomic_flags; - int cpu; - /* the following two fields are internal, NEVER access directly */ unsigned int __data_len; /* total data len */ sector_t __sector; /* sector cursor */ @@ -200,6 +199,20 @@ struct request { struct request *next_rq; }; +#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS) +#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT) + +#define req_set_op(req, op) do { \ + WARN_ON(op >= (1 << REQ_OP_BITS)); \ + (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \ + (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \ +} while (0) + +#define req_set_op_attrs(req, op, flags) do { \ + req_set_op(req, op); \ + (req)->cmd_flags |= flags; \ +} while (0) + static inline unsigned short req_get_ioprio(struct request *req) { return req->ioprio; @@ -483,7 +496,7 @@ struct request_queue { #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ -#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ +#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */ #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ @@ -492,6 +505,7 @@ struct request_queue { #define QUEUE_FLAG_WC 23 /* Write back caching */ #define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ #define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ +#define QUEUE_FLAG_DAX 26 /* device supports DAX */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ @@ -579,8 +593,9 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) #define blk_queue_stackable(q) \ test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) -#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ - test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) +#define blk_queue_secure_erase(q) \ + (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) +#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) #define blk_noretry_request(rq) \ ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ @@ -597,7 +612,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) -#define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1)) +#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) /* * Driver can handle struct request, if it either has an old style @@ -616,14 +631,14 @@ static inline unsigned int blk_queue_cluster(struct request_queue *q) /* * We regard a request as sync, if either a read or a sync write */ -static inline bool rw_is_sync(unsigned int rw_flags) +static inline bool rw_is_sync(int op, unsigned int rw_flags) { - return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); + return op == REQ_OP_READ || (rw_flags & REQ_SYNC); } static inline bool rq_is_sync(struct request *rq) { - return rw_is_sync(rq->cmd_flags); + return rw_is_sync(req_op(rq), rq->cmd_flags); } static inline bool blk_rl_full(struct request_list *rl, bool sync) @@ -652,22 +667,10 @@ static inline bool rq_mergeable(struct request *rq) if (rq->cmd_type != REQ_TYPE_FS) return false; - if (rq->cmd_flags & REQ_NOMERGE_FLAGS) + if (req_op(rq) == REQ_OP_FLUSH) return false; - return true; -} - -static inline bool blk_check_merge_flags(unsigned int flags1, - unsigned int flags2) -{ - if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) - return false; - - if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) - return false; - - if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) + if (rq->cmd_flags & REQ_NOMERGE_FLAGS) return false; return true; @@ -786,8 +789,6 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_put_request(struct request *); extern void __blk_put_request(struct request_queue *, struct request *); extern struct request *blk_get_request(struct request_queue *, int, gfp_t); -extern struct request *blk_make_request(struct request_queue *, struct bio *, - gfp_t); extern void blk_rq_set_block_pc(struct request *); extern void blk_requeue_request(struct request_queue *, struct request *); extern void blk_add_request_payload(struct request *rq, struct page *page, @@ -800,6 +801,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, extern void blk_rq_unprep_clone(struct request *rq); extern int blk_insert_cloned_request(struct request_queue *q, struct request *rq); +extern int blk_rq_append_bio(struct request *rq, struct bio *bio); extern void blk_delay_queue(struct request_queue *, unsigned long); extern void blk_queue_split(struct request_queue *, struct bio **, struct bio_set *); @@ -879,12 +881,12 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) } static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, - unsigned int cmd_flags) + int op) { - if (unlikely(cmd_flags & REQ_DISCARD)) + if (unlikely(op == REQ_OP_DISCARD)) return min(q->limits.max_discard_sectors, UINT_MAX >> 9); - if (unlikely(cmd_flags & REQ_WRITE_SAME)) + if (unlikely(op == REQ_OP_WRITE_SAME)) return q->limits.max_write_same_sectors; return q->limits.max_sectors; @@ -904,18 +906,19 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q, (offset & (q->limits.chunk_sectors - 1)); } -static inline unsigned int blk_rq_get_max_sectors(struct request *rq) +static inline unsigned int blk_rq_get_max_sectors(struct request *rq, + sector_t offset) { struct request_queue *q = rq->q; if (unlikely(rq->cmd_type != REQ_TYPE_FS)) return q->limits.max_hw_sectors; - if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) - return blk_queue_get_max_sectors(q, rq->cmd_flags); + if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD)) + return blk_queue_get_max_sectors(q, req_op(rq)); - return min(blk_max_size_offset(q, blk_rq_pos(rq)), - blk_queue_get_max_sectors(q, rq->cmd_flags)); + return min(blk_max_size_offset(q, offset), + blk_queue_get_max_sectors(q, req_op(rq))); } static inline unsigned int blk_rq_count_bios(struct request *rq) @@ -1135,13 +1138,16 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, return bqt->tag_index[tag]; } -#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ + +#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ +#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */ extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, - sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop); + sector_t nr_sects, gfp_t gfp_mask, int flags, + struct bio **biop); extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page); extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 0f3172b8b225..cceb72f9e29f 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -118,7 +118,7 @@ static inline int blk_cmd_buf_len(struct request *rq) } extern void blk_dump_cmd(char *buf, struct request *rq); -extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); +extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes); #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index d48daa3f6f20..ebbacd14d450 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -187,12 +187,13 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); void free_buffer_head(struct buffer_head * bh); void unlock_buffer(struct buffer_head *bh); void __lock_buffer(struct buffer_head *bh); -void ll_rw_block(int, int, struct buffer_head * bh[]); +void ll_rw_block(int, int, int, struct buffer_head * bh[]); int sync_dirty_buffer(struct buffer_head *bh); -int __sync_dirty_buffer(struct buffer_head *bh, int rw); -void write_dirty_buffer(struct buffer_head *bh, int rw); -int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags); -int submit_bh(int, struct buffer_head *); +int __sync_dirty_buffer(struct buffer_head *bh, int op_flags); +void write_dirty_buffer(struct buffer_head *bh, int op_flags); +int _submit_bh(int op, int op_flags, struct buffer_head *bh, + unsigned long bio_flags); +int submit_bh(int, int, struct buffer_head *); void write_boundary_block(struct block_device *bdev, sector_t bblock, unsigned blocksize); int bh_uptodate_or_lock(struct buffer_head *bh); @@ -208,6 +209,9 @@ void block_invalidatepage(struct page *page, unsigned int offset, unsigned int length); int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc); +int __block_write_full_page(struct inode *inode, struct page *page, + get_block_t *get_block, struct writeback_control *wbc, + bh_end_io_t *handler); int block_read_full_page(struct page*, get_block_t*); int block_is_partially_uptodate(struct page *page, unsigned long from, unsigned long count); diff --git a/include/linux/bvec.h b/include/linux/bvec.h new file mode 100644 index 000000000000..701b64a3b7c5 --- /dev/null +++ b/include/linux/bvec.h @@ -0,0 +1,96 @@ +/* + * bvec iterator + * + * Copyright (C) 2001 Ming Lei <ming.lei@canonical.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public Licens + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- + */ +#ifndef __LINUX_BVEC_ITER_H +#define __LINUX_BVEC_ITER_H + +#include <linux/kernel.h> +#include <linux/bug.h> + +/* + * was unsigned short, but we might as well be ready for > 64kB I/O pages + */ +struct bio_vec { + struct page *bv_page; + unsigned int bv_len; + unsigned int bv_offset; +}; + +struct bvec_iter { + sector_t bi_sector; /* device address in 512 byte + sectors */ + unsigned int bi_size; /* residual I/O count */ + + unsigned int bi_idx; /* current index into bvl_vec */ + + unsigned int bi_bvec_done; /* number of bytes completed in + current bvec */ +}; + +/* + * various member access, note that bio_data should of course not be used + * on highmem page vectors + */ +#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) + +#define bvec_iter_page(bvec, iter) \ + (__bvec_iter_bvec((bvec), (iter))->bv_page) + +#define bvec_iter_len(bvec, iter) \ + min((iter).bi_size, \ + __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) + +#define bvec_iter_offset(bvec, iter) \ + (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) + +#define bvec_iter_bvec(bvec, iter) \ +((struct bio_vec) { \ + .bv_page = bvec_iter_page((bvec), (iter)), \ + .bv_len = bvec_iter_len((bvec), (iter)), \ + .bv_offset = bvec_iter_offset((bvec), (iter)), \ +}) + +static inline void bvec_iter_advance(const struct bio_vec *bv, + struct bvec_iter *iter, + unsigned bytes) +{ + WARN_ONCE(bytes > iter->bi_size, + "Attempted to advance past end of bvec iter\n"); + + while (bytes) { + unsigned len = min(bytes, bvec_iter_len(bv, *iter)); + + bytes -= len; + iter->bi_size -= len; + iter->bi_bvec_done += len; + + if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { + iter->bi_bvec_done = 0; + iter->bi_idx++; + } + } +} + +#define for_each_bvec(bvl, bio_vec, iter, start) \ + for (iter = (start); \ + (iter).bi_size && \ + ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ + bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) + +#endif /* __LINUX_BVEC_ITER_H */ diff --git a/include/linux/clk.h b/include/linux/clk.h index 0df4a51e1a78..834179f3fa72 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -461,6 +461,10 @@ static inline struct clk *clk_get_parent(struct clk *clk) return NULL; } +static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) +{ + return NULL; +} #endif /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 44a1aff22566..08398182f56e 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -244,7 +244,7 @@ extern int clocksource_mmio_init(void __iomem *, const char *, extern int clocksource_i8253_init(void); #define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ - OF_DECLARE_1(clksrc, name, compat, fn) + OF_DECLARE_1_RET(clksrc, name, compat, fn) #ifdef CONFIG_CLKSRC_PROBE extern void clocksource_probe(void); diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 793c0829e3a3..2e853b679a5d 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -304,23 +304,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s __u.__val; \ }) -/** - * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering - * @cond: boolean expression to wait for - * - * Equivalent to using smp_load_acquire() on the condition variable but employs - * the control dependency of the wait to reduce the barrier on many platforms. - * - * The control dependency provides a LOAD->STORE order, the additional RMB - * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, - * aka. ACQUIRE. - */ -#define smp_cond_acquire(cond) do { \ - while (!(cond)) \ - cpu_relax(); \ - smp_rmb(); /* ctrl + rmb := acquire */ \ -} while (0) - #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ @@ -545,10 +528,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * Similar to rcu_dereference(), but for situations where the pointed-to * object's lifetime is managed by something other than RCU. That * "something other" might be reference counting or simple immortality. + * + * The seemingly unused void * variable is to validate @p is indeed a pointer + * type. All pointer types silently cast to void *. */ #define lockless_dereference(p) \ ({ \ typeof(p) _________p1 = READ_ONCE(p); \ + __maybe_unused const void * const _________p2 = _________p1; \ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ (_________p1); \ }) diff --git a/include/linux/console.h b/include/linux/console.h index 98c8615dc300..d530c4627e54 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -28,6 +28,13 @@ struct tty_struct; #define VT100ID "\033[?1;2c" #define VT102ID "\033[?6c" +/** + * struct consw - callbacks for consoles + * + * @con_set_palette: sets the palette of the console to @table (optional) + * @con_scrolldelta: the contents of the console should be scrolled by @lines. + * Invoked by user. (optional) + */ struct consw { struct module *owner; const char *(*con_startup)(void); @@ -38,7 +45,6 @@ struct consw { void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int); void (*con_cursor)(struct vc_data *, int); int (*con_scroll)(struct vc_data *, int, int, int, int); - void (*con_bmove)(struct vc_data *, int, int, int, int, int, int); int (*con_switch)(struct vc_data *); int (*con_blank)(struct vc_data *, int, int); int (*con_font_set)(struct vc_data *, struct console_font *, unsigned); @@ -47,8 +53,9 @@ struct consw { int (*con_font_copy)(struct vc_data *, int); int (*con_resize)(struct vc_data *, unsigned int, unsigned int, unsigned int); - int (*con_set_palette)(struct vc_data *, const unsigned char *); - int (*con_scrolldelta)(struct vc_data *, int); + void (*con_set_palette)(struct vc_data *, + const unsigned char *table); + void (*con_scrolldelta)(struct vc_data *, int lines); int (*con_set_origin)(struct vc_data *); void (*con_save_screen)(struct vc_data *); u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8); diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index e329ee2667e1..6fd3c908a340 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -21,6 +21,38 @@ struct uni_pagedir; #define NPAR 16 +/* + * Example: vc_data of a console that was scrolled 3 lines down. + * + * Console buffer + * vc_screenbuf ---------> +----------------------+-. + * | initializing W | \ + * | initializing X | | + * | initializing Y | > scroll-back area + * | initializing Z | | + * | | / + * vc_visible_origin ---> ^+----------------------+-: + * (changes by scroll) || Welcome to linux | \ + * || | | + * vc_rows --->< | login: root | | visible on console + * || password: | > (vc_screenbuf_size is + * vc_origin -----------> || | | vc_size_row * vc_rows) + * (start when no scroll) || Last login: 12:28 | / + * v+----------------------+-: + * | Have a lot of fun... | \ + * vc_pos -----------------|--------v | > scroll-front area + * | ~ # cat_ | / + * vc_scr_end -----------> +----------------------+-: + * (vc_origin + | | \ EMPTY, to be filled by + * vc_screenbuf_size) | | / vc_video_erase_char + * +----------------------+-' + * <---- 2 * vc_cols -----> + * <---- vc_size_row -----> + * + * Note that every character in the console buffer is accompanied with an + * attribute in the buffer right after the character. This is not depicted + * in the figure. + */ struct vc_data { struct tty_port port; /* Upper level data */ @@ -74,7 +106,6 @@ struct vc_data { unsigned int vc_decawm : 1; /* Autowrap Mode */ unsigned int vc_deccm : 1; /* Cursor Visible */ unsigned int vc_decim : 1; /* Insert Mode */ - unsigned int vc_deccolm : 1; /* 80/132 Column Mode */ /* attribute flags */ unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */ unsigned int vc_italic:1; @@ -136,6 +167,9 @@ extern void vc_SAK(struct work_struct *work); #define CUR_DEFAULT CUR_UNDERLINE -#define CON_IS_VISIBLE(conp) (*conp->vc_display_fg == conp) +static inline bool con_is_visible(const struct vc_data *vc) +{ + return *vc->vc_display_fg == vc; +} #endif /* _LINUX_CONSOLE_STRUCT_H */ diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index d259274238db..d9aef2a0ec8e 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -31,6 +31,19 @@ static inline void user_exit(void) context_tracking_exit(CONTEXT_USER); } +/* Called with interrupts disabled. */ +static inline void user_enter_irqoff(void) +{ + if (context_tracking_is_enabled()) + __context_tracking_enter(CONTEXT_USER); + +} +static inline void user_exit_irqoff(void) +{ + if (context_tracking_is_enabled()) + __context_tracking_exit(CONTEXT_USER); +} + static inline enum ctx_state exception_enter(void) { enum ctx_state prev_ctx; @@ -69,6 +82,8 @@ static inline enum ctx_state ct_state(void) #else static inline void user_enter(void) { } static inline void user_exit(void) { } +static inline void user_enter_irqoff(void) { } +static inline void user_exit_irqoff(void) { } static inline enum ctx_state exception_enter(void) { return 0; } static inline void exception_exit(enum ctx_state prev_ctx) { } static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 4e81e08db752..631ba33bbe9f 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -36,6 +36,12 @@ struct cpufreq_governor; +enum cpufreq_table_sorting { + CPUFREQ_TABLE_UNSORTED, + CPUFREQ_TABLE_SORTED_ASCENDING, + CPUFREQ_TABLE_SORTED_DESCENDING +}; + struct cpufreq_freqs { unsigned int cpu; /* cpu nr */ unsigned int old; @@ -87,6 +93,7 @@ struct cpufreq_policy { struct cpufreq_user_policy user_policy; struct cpufreq_frequency_table *freq_table; + enum cpufreq_table_sorting freq_table_sorted; struct list_head policy_list; struct kobject kobj; @@ -113,6 +120,10 @@ struct cpufreq_policy { bool fast_switch_possible; bool fast_switch_enabled; + /* Cached frequency lookup from cpufreq_driver_resolve_freq. */ + unsigned int cached_target_freq; + int cached_resolved_idx; + /* Synchronization for frequency transitions */ bool transition_ongoing; /* Tracks transition status */ spinlock_t transition_lock; @@ -185,6 +196,18 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) static inline void disable_cpufreq(void) { } #endif +#ifdef CONFIG_CPU_FREQ_STAT +void cpufreq_stats_create_table(struct cpufreq_policy *policy); +void cpufreq_stats_free_table(struct cpufreq_policy *policy); +void cpufreq_stats_record_transition(struct cpufreq_policy *policy, + unsigned int new_freq); +#else +static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { } +static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { } +static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy, + unsigned int new_freq) { } +#endif /* CONFIG_CPU_FREQ_STAT */ + /********************************************************************* * CPUFREQ DRIVER INTERFACE * *********************************************************************/ @@ -251,6 +274,16 @@ struct cpufreq_driver { unsigned int index); unsigned int (*fast_switch)(struct cpufreq_policy *policy, unsigned int target_freq); + + /* + * Caches and returns the lowest driver-supported frequency greater than + * or equal to the target frequency, subject to any driver limitations. + * Does not set the frequency. Only to be implemented for drivers with + * target(). + */ + unsigned int (*resolve_freq)(struct cpufreq_policy *policy, + unsigned int target_freq); + /* * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION * unset. @@ -455,18 +488,13 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, #define MIN_LATENCY_MULTIPLIER (20) #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) -/* Governor Events */ -#define CPUFREQ_GOV_START 1 -#define CPUFREQ_GOV_STOP 2 -#define CPUFREQ_GOV_LIMITS 3 -#define CPUFREQ_GOV_POLICY_INIT 4 -#define CPUFREQ_GOV_POLICY_EXIT 5 - struct cpufreq_governor { char name[CPUFREQ_NAME_LEN]; - int initialized; - int (*governor) (struct cpufreq_policy *policy, - unsigned int event); + int (*init)(struct cpufreq_policy *policy); + void (*exit)(struct cpufreq_policy *policy); + int (*start)(struct cpufreq_policy *policy); + void (*stop)(struct cpufreq_policy *policy); + void (*limits)(struct cpufreq_policy *policy); ssize_t (*show_setspeed) (struct cpufreq_policy *policy, char *buf); int (*store_setspeed) (struct cpufreq_policy *policy, @@ -487,12 +515,22 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation); +unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, + unsigned int target_freq); int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); struct cpufreq_governor *cpufreq_default_governor(void); struct cpufreq_governor *cpufreq_fallback_governor(void); +static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy) +{ + if (policy->max < policy->cur) + __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); + else if (policy->min > policy->cur) + __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); +} + /* Governor attribute set */ struct gov_attr_set { struct kobject kobj; @@ -582,11 +620,9 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table); int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy); -int cpufreq_frequency_table_target(struct cpufreq_policy *policy, - struct cpufreq_frequency_table *table, - unsigned int target_freq, - unsigned int relation, - unsigned int *index); +int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, unsigned int freq); @@ -597,6 +633,227 @@ int cpufreq_boost_trigger_state(int state); int cpufreq_boost_enabled(void); int cpufreq_enable_boost_support(void); bool policy_has_boost_freq(struct cpufreq_policy *policy); + +/* Find lowest freq at or above target in a table in ascending order */ +static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq >= target_freq) + return i; + + best = i; + } + + return best; +} + +/* Find lowest freq at or above target in a table in descending order */ +static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq == target_freq) + return i; + + if (freq > target_freq) { + best = i; + continue; + } + + /* No freq found above target_freq */ + if (best == -1) + return i; + + return best; + } + + return best; +} + +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_index_al(policy, target_freq); + else + return cpufreq_table_find_index_dl(policy, target_freq); +} + +/* Find highest freq at or below target in a table in ascending order */ +static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq == target_freq) + return i; + + if (freq < target_freq) { + best = i; + continue; + } + + /* No freq found below target_freq */ + if (best == -1) + return i; + + return best; + } + + return best; +} + +/* Find highest freq at or below target in a table in descending order */ +static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq <= target_freq) + return i; + + best = i; + } + + return best; +} + +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_index_ah(policy, target_freq); + else + return cpufreq_table_find_index_dh(policy, target_freq); +} + +/* Find closest freq to target in a table in ascending order */ +static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq == target_freq) + return i; + + if (freq < target_freq) { + best = i; + continue; + } + + /* No freq found below target_freq */ + if (best == -1) + return i; + + /* Choose the closest freq */ + if (target_freq - table[best].frequency > freq - target_freq) + return i; + + return best; + } + + return best; +} + +/* Find closest freq to target in a table in descending order */ +static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + unsigned int freq; + int i, best = -1; + + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + freq = table[i].frequency; + + if (freq == target_freq) + return i; + + if (freq > target_freq) { + best = i; + continue; + } + + /* No freq found above target_freq */ + if (best == -1) + return i; + + /* Choose the closest freq */ + if (table[best].frequency - target_freq > target_freq - freq) + return i; + + return best; + } + + return best; +} + +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_index_ac(policy, target_freq); + else + return cpufreq_table_find_index_dc(policy, target_freq); +} + +static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)) + return cpufreq_table_index_unsorted(policy, target_freq, + relation); + + switch (relation) { + case CPUFREQ_RELATION_L: + return cpufreq_table_find_index_l(policy, target_freq); + case CPUFREQ_RELATION_H: + return cpufreq_table_find_index_h(policy, target_freq); + case CPUFREQ_RELATION_C: + return cpufreq_table_find_index_c(policy, target_freq); + default: + pr_err("%s: Invalid relation: %d\n", __func__, relation); + return -EINVAL; + } +} #else static inline int cpufreq_boost_trigger_state(int state) { @@ -617,8 +874,6 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy) return false; } #endif -/* the following funtion is for cpufreq core use only */ -struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); /* the following are really really optional */ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 07b83d32f66c..bb31373c3478 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -252,4 +252,22 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) #define CPUIDLE_DRIVER_STATE_START 0 #endif +#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ +({ \ + int __ret; \ + \ + if (!idx) { \ + cpu_do_idle(); \ + return idx; \ + } \ + \ + __ret = cpu_pm_enter(); \ + if (!__ret) { \ + __ret = low_level_idle_enter(idx); \ + cpu_pm_exit(); \ + } \ + \ + __ret ? -1 : idx; \ +}) + #endif /* _LINUX_CPUIDLE_H */ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 6e28c895c376..7cee5551625b 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -47,16 +47,18 @@ #define CRYPTO_ALG_TYPE_AEAD 0x00000003 #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 +#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 -#define CRYPTO_ALG_TYPE_DIGEST 0x00000008 -#define CRYPTO_ALG_TYPE_HASH 0x00000008 -#define CRYPTO_ALG_TYPE_SHASH 0x00000009 -#define CRYPTO_ALG_TYPE_AHASH 0x0000000a +#define CRYPTO_ALG_TYPE_KPP 0x00000008 #define CRYPTO_ALG_TYPE_RNG 0x0000000c #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d +#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e +#define CRYPTO_ALG_TYPE_HASH 0x0000000e +#define CRYPTO_ALG_TYPE_SHASH 0x0000000e +#define CRYPTO_ALG_TYPE_AHASH 0x0000000f #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e -#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c +#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c #define CRYPTO_ALG_LARVAL 0x00000010 @@ -486,8 +488,6 @@ struct ablkcipher_tfm { unsigned int keylen); int (*encrypt)(struct ablkcipher_request *req); int (*decrypt)(struct ablkcipher_request *req); - int (*givencrypt)(struct skcipher_givcrypt_request *req); - int (*givdecrypt)(struct skcipher_givcrypt_request *req); struct crypto_ablkcipher *base; @@ -712,23 +712,6 @@ static inline u32 crypto_skcipher_mask(u32 mask) * state information is unused by the kernel crypto API. */ -/** - * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle - * @alg_name: is the cra_name / name or cra_driver_name / driver name of the - * ablkcipher cipher - * @type: specifies the type of the cipher - * @mask: specifies the mask for the cipher - * - * Allocate a cipher handle for an ablkcipher. The returned struct - * crypto_ablkcipher is the cipher handle that is required for any subsequent - * API invocation for that ablkcipher. - * - * Return: allocated cipher handle in case of success; IS_ERR() is true in case - * of an error, PTR_ERR() returns the error code. - */ -struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, - u32 type, u32 mask); - static inline struct crypto_tfm *crypto_ablkcipher_tfm( struct crypto_ablkcipher *tfm) { diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 0830c9e86f0d..b0db857f334b 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -19,6 +19,15 @@ struct dm_table; struct mapped_device; struct bio_vec; +/* + * Type of table, mapped_device's mempool and request_queue + */ +#define DM_TYPE_NONE 0 +#define DM_TYPE_BIO_BASED 1 +#define DM_TYPE_REQUEST_BASED 2 +#define DM_TYPE_MQ_REQUEST_BASED 3 +#define DM_TYPE_DAX_BIO_BASED 4 + typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; union map_info { @@ -116,6 +125,14 @@ typedef void (*dm_io_hints_fn) (struct dm_target *ti, */ typedef int (*dm_busy_fn) (struct dm_target *ti); +/* + * Returns: + * < 0 : error + * >= 0 : the number of bytes accessible at the address + */ +typedef long (*dm_direct_access_fn) (struct dm_target *ti, sector_t sector, + void __pmem **kaddr, pfn_t *pfn, long size); + void dm_error(const char *message); struct dm_dev { @@ -162,6 +179,7 @@ struct target_type { dm_busy_fn busy; dm_iterate_devices_fn iterate_devices; dm_io_hints_fn io_hints; + dm_direct_access_fn direct_access; /* For internal device-mapper use. */ struct list_head list; @@ -444,6 +462,14 @@ int dm_table_add_target(struct dm_table *t, const char *type, void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); /* + * Target can use this to set the table's type. + * Can only ever be called from a target's ctr. + * Useful for "hybrid" target (supports both bio-based + * and request-based). + */ +void dm_table_set_type(struct dm_table *t, unsigned type); + +/* * Finally call this to make the table ready for use. */ int dm_table_complete(struct dm_table *t); diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h index a68cbe59e6ad..b91b023deffb 100644 --- a/include/linux/dm-io.h +++ b/include/linux/dm-io.h @@ -57,7 +57,8 @@ struct dm_io_notify { */ struct dm_io_client; struct dm_io_request { - int bi_rw; /* READ|WRITE - not READA */ + int bi_op; /* REQ_OP */ + int bi_op_flags; /* rq_flag_bits */ struct dm_io_memory mem; /* Memory to use for io */ struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ struct dm_io_client *client; /* Client memory handler */ diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h index 79df69dc629c..aaff68efba5d 100644 --- a/include/linux/dma/hsu.h +++ b/include/linux/dma/hsu.h @@ -39,14 +39,22 @@ struct hsu_dma_chip { #if IS_ENABLED(CONFIG_HSU_DMA) /* Export to the internal users */ -irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr); +int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr, + u32 *status); +irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, + u32 status); /* Export to the platform drivers */ int hsu_dma_probe(struct hsu_dma_chip *chip); int hsu_dma_remove(struct hsu_dma_chip *chip); #else -static inline irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, - unsigned short nr) +static inline int hsu_dma_get_status(struct hsu_dma_chip *chip, + unsigned short nr, u32 *status) +{ + return 0; +} +static inline irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, + unsigned short nr, u32 status) { return IRQ_NONE; } diff --git a/include/linux/drbd.h b/include/linux/drbd.h index d6b3c9943a2c..002611c85318 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -51,7 +51,7 @@ #endif extern const char *drbd_buildtag(void); -#define REL_VERSION "8.4.6" +#define REL_VERSION "8.4.7" #define API_VERSION 1 #define PRO_VERSION_MIN 86 #define PRO_VERSION_MAX 101 @@ -370,6 +370,14 @@ enum drbd_notification_type { NOTIFY_FLAGS = NOTIFY_CONTINUES, }; +enum drbd_peer_state { + P_INCONSISTENT = 3, + P_OUTDATED = 4, + P_DOWN = 5, + P_PRIMARY = 6, + P_FENCING = 7, +}; + #define UUID_JUST_CREATED ((__u64)4) enum write_ordering_e { diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h index 2d0e5ad5de9d..c934d3a96b5e 100644 --- a/include/linux/drbd_genl.h +++ b/include/linux/drbd_genl.h @@ -123,15 +123,16 @@ GENL_struct(DRBD_NLA_DISK_CONF, 3, disk_conf, __u32_field_def(13, DRBD_GENLA_F_MANDATORY, c_fill_target, DRBD_C_FILL_TARGET_DEF) __u32_field_def(14, DRBD_GENLA_F_MANDATORY, c_max_rate, DRBD_C_MAX_RATE_DEF) __u32_field_def(15, DRBD_GENLA_F_MANDATORY, c_min_rate, DRBD_C_MIN_RATE_DEF) + __u32_field_def(20, DRBD_GENLA_F_MANDATORY, disk_timeout, DRBD_DISK_TIMEOUT_DEF) + __u32_field_def(21, 0 /* OPTIONAL */, read_balancing, DRBD_READ_BALANCING_DEF) + __u32_field_def(25, 0 /* OPTIONAL */, rs_discard_granularity, DRBD_RS_DISCARD_GRANULARITY_DEF) __flg_field_def(16, DRBD_GENLA_F_MANDATORY, disk_barrier, DRBD_DISK_BARRIER_DEF) __flg_field_def(17, DRBD_GENLA_F_MANDATORY, disk_flushes, DRBD_DISK_FLUSHES_DEF) __flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF) __flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF) - __u32_field_def(20, DRBD_GENLA_F_MANDATORY, disk_timeout, DRBD_DISK_TIMEOUT_DEF) - __u32_field_def(21, 0 /* OPTIONAL */, read_balancing, DRBD_READ_BALANCING_DEF) - /* 9: __u32_field_def(22, DRBD_GENLA_F_MANDATORY, unplug_watermark, DRBD_UNPLUG_WATERMARK_DEF) */ __flg_field_def(23, 0 /* OPTIONAL */, al_updates, DRBD_AL_UPDATES_DEF) + __flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED) ) GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts, diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h index 8ac8c5d9a3ad..ddac68422a96 100644 --- a/include/linux/drbd_limits.h +++ b/include/linux/drbd_limits.h @@ -126,8 +126,7 @@ #define DRBD_RESYNC_RATE_DEF 250 #define DRBD_RESYNC_RATE_SCALE 'k' /* kilobytes */ - /* less than 7 would hit performance unnecessarily. */ -#define DRBD_AL_EXTENTS_MIN 7 +#define DRBD_AL_EXTENTS_MIN 67 /* we use u16 as "slot number", (u16)~0 is "FREE". * If you use >= 292 kB on-disk ring buffer, * this is the maximum you can use: */ @@ -210,6 +209,12 @@ #define DRBD_MD_FLUSHES_DEF 1 #define DRBD_TCP_CORK_DEF 1 #define DRBD_AL_UPDATES_DEF 1 +/* We used to ignore the discard_zeroes_data setting. + * To not change established (and expected) behaviour, + * by default assume that, for discard_zeroes_data=0, + * we can make that an effective discard_zeroes_data=1, + * if we only explicitly zero-out unaligned partial chunks. */ +#define DRBD_DISCARD_ZEROES_IF_ALIGNED 1 #define DRBD_ALLOW_TWO_PRIMARIES_DEF 0 #define DRBD_ALWAYS_ASBP_DEF 0 @@ -230,4 +235,10 @@ #define DRBD_SOCKET_CHECK_TIMEO_MAX DRBD_PING_TIMEO_MAX #define DRBD_SOCKET_CHECK_TIMEO_DEF 0 #define DRBD_SOCKET_CHECK_TIMEO_SCALE '1' + +#define DRBD_RS_DISCARD_GRANULARITY_MIN 0 +#define DRBD_RS_DISCARD_GRANULARITY_MAX (1<<20) /* 1MiByte */ +#define DRBD_RS_DISCARD_GRANULARITY_DEF 0 /* disabled by default */ +#define DRBD_RS_DISCARD_GRANULARITY_SCALE '1' /* bytes */ + #endif diff --git a/include/linux/efi.h b/include/linux/efi.h index f196dd0b0f2f..7f80a75ee9e3 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -536,116 +536,58 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes, void efi_native_runtime_setup(void); /* - * EFI Configuration Table and GUID definitions + * EFI Configuration Table and GUID definitions + * + * These are all defined in a single line to make them easier to + * grep for and to see them at a glance - while still having a + * similar structure to the definitions in the spec. + * + * Here's how they are structured: + * + * GUID: 12345678-1234-1234-1234-123456789012 + * Spec: + * #define EFI_SOME_PROTOCOL_GUID \ + * {0x12345678,0x1234,0x1234,\ + * {0x12,0x34,0x12,0x34,0x56,0x78,0x90,0x12}} + * Here: + * #define SOME_PROTOCOL_GUID EFI_GUID(0x12345678, 0x1234, 0x1234, 0x12, 0x34, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12) + * ^ tabs ^extra space + * + * Note that the 'extra space' separates the values at the same place + * where the UEFI SPEC breaks the line. */ -#define NULL_GUID \ - EFI_GUID(0x00000000, 0x0000, 0x0000, \ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00) - -#define MPS_TABLE_GUID \ - EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, \ - 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) - -#define ACPI_TABLE_GUID \ - EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, \ - 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) - -#define ACPI_20_TABLE_GUID \ - EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, \ - 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81) - -#define SMBIOS_TABLE_GUID \ - EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, \ - 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) - -#define SMBIOS3_TABLE_GUID \ - EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, \ - 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94) - -#define SAL_SYSTEM_TABLE_GUID \ - EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, \ - 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) - -#define HCDP_TABLE_GUID \ - EFI_GUID(0xf951938d, 0x620b, 0x42ef, \ - 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98) - -#define UGA_IO_PROTOCOL_GUID \ - EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, \ - 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2) - -#define EFI_GLOBAL_VARIABLE_GUID \ - EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, \ - 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c) - -#define UV_SYSTEM_TABLE_GUID \ - EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, \ - 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93) - -#define LINUX_EFI_CRASH_GUID \ - EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, \ - 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0) - -#define LOADED_IMAGE_PROTOCOL_GUID \ - EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, \ - 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) - -#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID \ - EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, \ - 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a) - -#define EFI_UGA_PROTOCOL_GUID \ - EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, \ - 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39) - -#define EFI_PCI_IO_PROTOCOL_GUID \ - EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, \ - 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a) - -#define EFI_FILE_INFO_ID \ - EFI_GUID(0x9576e92, 0x6d3f, 0x11d2, \ - 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) - -#define EFI_SYSTEM_RESOURCE_TABLE_GUID \ - EFI_GUID(0xb122a263, 0x3661, 0x4f68, \ - 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80) - -#define EFI_FILE_SYSTEM_GUID \ - EFI_GUID(0x964e5b22, 0x6459, 0x11d2, \ - 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) - -#define DEVICE_TREE_GUID \ - EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, \ - 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0) - -#define EFI_PROPERTIES_TABLE_GUID \ - EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, \ - 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5) - -#define EFI_RNG_PROTOCOL_GUID \ - EFI_GUID(0x3152bca5, 0xeade, 0x433d, \ - 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44) - -#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID \ - EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, \ - 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) - -#define EFI_CONSOLE_OUT_DEVICE_GUID \ - EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, \ - 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define NULL_GUID EFI_GUID(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00) +#define MPS_TABLE_GUID EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define ACPI_TABLE_GUID EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81) +#define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94) +#define SAL_SYSTEM_TABLE_GUID EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) +#define HCDP_TABLE_GUID EFI_GUID(0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98) +#define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2) +#define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c) +#define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93) +#define LINUX_EFI_CRASH_GUID EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0) +#define LOADED_IMAGE_PROTOCOL_GUID EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) +#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a) +#define EFI_UGA_PROTOCOL_GUID EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39) +#define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a) +#define EFI_FILE_INFO_ID EFI_GUID(0x09576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) +#define EFI_SYSTEM_RESOURCE_TABLE_GUID EFI_GUID(0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80) +#define EFI_FILE_SYSTEM_GUID EFI_GUID(0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) +#define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0) +#define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5) +#define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44) +#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) +#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) /* * This GUID is used to pass to the kernel proper the struct screen_info * structure that was populated by the stub based on the GOP protocol instance * associated with ConOut */ -#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID \ - EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, \ - 0xb9, 0xe, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) - -#define LINUX_EFI_LOADER_ENTRY_GUID \ - EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, \ - 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) +#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) +#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) typedef struct { efi_guid_t guid; @@ -975,7 +917,6 @@ extern u64 efi_mem_desc_end(efi_memory_desc_t *md); extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); -extern void efi_get_time(struct timespec *now); extern void efi_reserve_boot_services(void); extern int efi_get_fdt_params(struct efi_fdt_params *params); extern struct kobject *efi_kobj; @@ -1465,4 +1406,55 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg, unsigned long size); bool efi_runtime_disabled(void); +extern void efi_call_virt_check_flags(unsigned long flags, const char *call); + +/* + * Arch code can implement the following three template macros, avoiding + * reptition for the void/non-void return cases of {__,}efi_call_virt(): + * + * * arch_efi_call_virt_setup() + * + * Sets up the environment for the call (e.g. switching page tables, + * allowing kernel-mode use of floating point, if required). + * + * * arch_efi_call_virt() + * + * Performs the call. The last expression in the macro must be the call + * itself, allowing the logic to be shared by the void and non-void + * cases. + * + * * arch_efi_call_virt_teardown() + * + * Restores the usual kernel environment once the call has returned. + */ + +#define efi_call_virt_pointer(p, f, args...) \ +({ \ + efi_status_t __s; \ + unsigned long __flags; \ + \ + arch_efi_call_virt_setup(); \ + \ + local_save_flags(__flags); \ + __s = arch_efi_call_virt(p, f, args); \ + efi_call_virt_check_flags(__flags, __stringify(f)); \ + \ + arch_efi_call_virt_teardown(); \ + \ + __s; \ +}) + +#define __efi_call_virt_pointer(p, f, args...) \ +({ \ + unsigned long __flags; \ + \ + arch_efi_call_virt_setup(); \ + \ + local_save_flags(__flags); \ + arch_efi_call_virt(p, f, args); \ + efi_call_virt_check_flags(__flags, __stringify(f)); \ + \ + arch_efi_call_virt_teardown(); \ +}) + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 638b324f0291..e7f358d2e5fc 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -16,7 +16,11 @@ typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int); -typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *); +typedef int (elevator_allow_bio_merge_fn) (struct request_queue *, + struct request *, struct bio *); + +typedef int (elevator_allow_rq_merge_fn) (struct request_queue *, + struct request *, struct request *); typedef void (elevator_bio_merged_fn) (struct request_queue *, struct request *, struct bio *); @@ -26,7 +30,7 @@ typedef int (elevator_dispatch_fn) (struct request_queue *, int); typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); -typedef int (elevator_may_queue_fn) (struct request_queue *, int); +typedef int (elevator_may_queue_fn) (struct request_queue *, int, int); typedef void (elevator_init_icq_fn) (struct io_cq *); typedef void (elevator_exit_icq_fn) (struct io_cq *); @@ -46,7 +50,8 @@ struct elevator_ops elevator_merge_fn *elevator_merge_fn; elevator_merged_fn *elevator_merged_fn; elevator_merge_req_fn *elevator_merge_req_fn; - elevator_allow_merge_fn *elevator_allow_merge_fn; + elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn; + elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn; elevator_bio_merged_fn *elevator_bio_merged_fn; elevator_dispatch_fn *elevator_dispatch_fn; @@ -134,7 +139,7 @@ extern struct request *elv_former_request(struct request_queue *, struct request extern struct request *elv_latter_request(struct request_queue *, struct request *); extern int elv_register_queue(struct request_queue *q); extern void elv_unregister_queue(struct request_queue *q); -extern int elv_may_queue(struct request_queue *, int); +extern int elv_may_queue(struct request_queue *, int, int); extern void elv_completed_request(struct request_queue *, struct request *); extern int elv_set_request(struct request_queue *q, struct request *rq, struct bio *bio, gfp_t gfp_mask); @@ -157,7 +162,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); extern int elevator_init(struct request_queue *, char *); extern void elevator_exit(struct elevator_queue *); extern int elevator_change(struct request_queue *, const char *); -extern bool elv_rq_merge_ok(struct request *, struct bio *); +extern bool elv_bio_merge_ok(struct request *, struct bio *); extern struct elevator_queue *elevator_alloc(struct request_queue *, struct elevator_type *); diff --git a/include/linux/extcon.h b/include/linux/extcon.h index 7abf674c388c..61004413dc64 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -126,42 +126,6 @@ struct extcon_dev { struct device_attribute *d_attrs_muex; }; -/** - * struct extcon_cable - An internal data for each cable of extcon device. - * @edev: The extcon device - * @cable_index: Index of this cable in the edev - * @attr_g: Attribute group for the cable - * @attr_name: "name" sysfs entry - * @attr_state: "state" sysfs entry - * @attrs: Array pointing to attr_name and attr_state for attr_g - */ -struct extcon_cable { - struct extcon_dev *edev; - int cable_index; - - struct attribute_group attr_g; - struct device_attribute attr_name; - struct device_attribute attr_state; - - struct attribute *attrs[3]; /* to be fed to attr_g.attrs */ -}; - -/** - * struct extcon_specific_cable_nb - An internal data for - * extcon_register_interest(). - * @user_nb: user provided notifier block for events from - * a specific cable. - * @cable_index: the target cable. - * @edev: the target extcon device. - * @previous_value: the saved previous event value. - */ -struct extcon_specific_cable_nb { - struct notifier_block *user_nb; - int cable_index; - struct extcon_dev *edev; - unsigned long previous_value; -}; - #if IS_ENABLED(CONFIG_EXTCON) /* @@ -201,29 +165,12 @@ extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state); /* * get/set_cable_state access each bit of the 32b encoded state value. - * They are used to access the status of each cable based on the cable_name. + * They are used to access the status of each cable based on the cable id. */ extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id); extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id, bool cable_state); -extern int extcon_get_cable_state(struct extcon_dev *edev, - const char *cable_name); -extern int extcon_set_cable_state(struct extcon_dev *edev, - const char *cable_name, bool cable_state); - -/* - * Following APIs are for notifiees (those who want to be notified) - * to register a callback for events from a specific cable of the extcon. - * Notifiees are the connected device drivers wanting to get notified by - * a specific external port of a connection device. - */ -extern int extcon_register_interest(struct extcon_specific_cable_nb *obj, - const char *extcon_name, - const char *cable_name, - struct notifier_block *nb); -extern int extcon_unregister_interest(struct extcon_specific_cable_nb *nb); - /* * Following APIs are to monitor every action of a notifier. * Registrar gets notified for every external port of a connection device. @@ -235,6 +182,12 @@ extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id, struct notifier_block *nb); extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id, struct notifier_block *nb); +extern int devm_extcon_register_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb); +extern void devm_extcon_unregister_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb); /* * Following API get the extcon device from devicetree. @@ -246,6 +199,7 @@ extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, /* Following API to get information of extcon device */ extern const char *extcon_get_edev_name(struct extcon_dev *edev); + #else /* CONFIG_EXTCON */ static inline int extcon_dev_register(struct extcon_dev *edev) { @@ -306,18 +260,6 @@ static inline int extcon_set_cable_state_(struct extcon_dev *edev, return 0; } -static inline int extcon_get_cable_state(struct extcon_dev *edev, - const char *cable_name) -{ - return 0; -} - -static inline int extcon_set_cable_state(struct extcon_dev *edev, - const char *cable_name, int state) -{ - return 0; -} - static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) { return NULL; @@ -337,19 +279,16 @@ static inline int extcon_unregister_notifier(struct extcon_dev *edev, return 0; } -static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj, - const char *extcon_name, - const char *cable_name, - struct notifier_block *nb) +static inline int devm_extcon_register_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb) { - return 0; + return -ENOSYS; } -static inline int extcon_unregister_interest(struct extcon_specific_cable_nb - *obj) -{ - return 0; -} +static inline void devm_extcon_unregister_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb) { } static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index) @@ -357,4 +296,28 @@ static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, return ERR_PTR(-ENODEV); } #endif /* CONFIG_EXTCON */ + +/* + * Following structure and API are deprecated. EXTCON remains the function + * definition to prevent the build break. + */ +struct extcon_specific_cable_nb { + struct notifier_block *user_nb; + int cable_index; + struct extcon_dev *edev; + unsigned long previous_value; +}; + +static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj, + const char *extcon_name, const char *cable_name, + struct notifier_block *nb) +{ + return -EINVAL; +} + +static inline int extcon_unregister_interest(struct extcon_specific_cable_nb + *obj) +{ + return -EINVAL; +} #endif /* __LINUX_EXTCON_H__ */ diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h index 53c60806bcfb..ac85f2061351 100644 --- a/include/linux/extcon/extcon-adc-jack.h +++ b/include/linux/extcon/extcon-adc-jack.h @@ -53,6 +53,7 @@ struct adc_jack_cond { * milli-seconds after the interrupt occurs. You may * describe such delays with @handling_delay_ms, which * is rounded-off by jiffies. + * @wakeup_source: flag to wake up the system for extcon events. */ struct adc_jack_pdata { const char *name; @@ -65,6 +66,7 @@ struct adc_jack_pdata { unsigned long irq_flags; unsigned long handling_delay_ms; /* in ms */ + bool wakeup_source; }; #endif /* _EXTCON_ADC_JACK_H */ diff --git a/include/linux/fence.h b/include/linux/fence.h index 2056e9fd0138..1de1b3f6fb76 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h @@ -81,8 +81,6 @@ struct fence { unsigned long flags; ktime_t timestamp; int status; - struct list_head child_list; - struct list_head active_list; }; enum fence_flag_bits { diff --git a/include/linux/fs.h b/include/linux/fs.h index dd288148a6b1..dc488662ce0b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -152,9 +152,10 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, #define CHECK_IOVEC_ONLY -1 /* - * The below are the various read and write types that we support. Some of + * The below are the various read and write flags that we support. Some of * them include behavioral modifiers that send information down to the - * block layer and IO scheduler. Terminology: + * block layer and IO scheduler. They should be used along with a req_op. + * Terminology: * * The block layer uses device plugging to defer IO a little bit, in * the hope that we will see more IO very shortly. This increases @@ -177,9 +178,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, * READ_SYNC A synchronous read. Device is not plugged, caller can * immediately wait on this read without caring about * unplugging. - * READA Used for read-ahead operations. Lower priority, and the - * block layer could (in theory) choose to ignore this - * request if it runs into resource problems. * WRITE A normal async write. Device will be plugged. * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down * the hint that someone will be waiting on this IO @@ -193,19 +191,17 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, * non-volatile media on completion. * */ -#define RW_MASK REQ_WRITE -#define RWA_MASK REQ_RAHEAD +#define RW_MASK REQ_OP_WRITE -#define READ 0 -#define WRITE RW_MASK -#define READA RWA_MASK +#define READ REQ_OP_READ +#define WRITE REQ_OP_WRITE -#define READ_SYNC (READ | REQ_SYNC) -#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) -#define WRITE_ODIRECT (WRITE | REQ_SYNC) -#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH) -#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA) -#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) +#define READ_SYNC REQ_SYNC +#define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE) +#define WRITE_ODIRECT REQ_SYNC +#define WRITE_FLUSH (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH) +#define WRITE_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FUA) +#define WRITE_FLUSH_FUA (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA) /* * Attribute flags. These should be or-ed together to figure out what @@ -2464,15 +2460,18 @@ extern void make_bad_inode(struct inode *); extern bool is_bad_inode(struct inode *); #ifdef CONFIG_BLOCK -/* - * return READ, READA, or WRITE - */ -#define bio_rw(bio) ((bio)->bi_rw & (RW_MASK | RWA_MASK)) +static inline bool op_is_write(unsigned int op) +{ + return op == REQ_OP_READ ? false : true; +} /* * return data direction, READ or WRITE */ -#define bio_data_dir(bio) ((bio)->bi_rw & 1) +static inline int bio_data_dir(struct bio *bio) +{ + return op_is_write(bio_op(bio)) ? WRITE : READ; +} extern void check_disk_size_change(struct gendisk *disk, struct block_device *bdev); @@ -2747,7 +2746,7 @@ static inline void remove_inode_hash(struct inode *inode) extern void inode_sb_list_add(struct inode *inode); #ifdef CONFIG_BLOCK -extern blk_qc_t submit_bio(int, struct bio *); +extern blk_qc_t submit_bio(struct bio *); extern int bdev_read_only(struct block_device *); #endif extern int set_blocksize(struct block_device *, int); @@ -2802,7 +2801,7 @@ extern int generic_file_open(struct inode * inode, struct file * filp); extern int nonseekable_open(struct inode * inode, struct file * filp); #ifdef CONFIG_BLOCK -typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, +typedef void (dio_submit_t)(struct bio *bio, struct inode *inode, loff_t file_offset); enum { diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 359a8e4bd44d..1dbf52f9c24b 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -205,7 +205,6 @@ struct gendisk { void *private_data; int flags; - struct device *driverfs_dev; // FIXME: remove struct kobject *slave_dir; struct timer_rand_state *random; @@ -414,7 +413,12 @@ static inline void free_part_info(struct hd_struct *part) extern void part_round_stats(int cpu, struct hd_struct *part); /* block/genhd.c */ -extern void add_disk(struct gendisk *disk); +extern void device_add_disk(struct device *parent, struct gendisk *disk); +static inline void add_disk(struct gendisk *disk) +{ + device_add_disk(NULL, disk); +} + extern void del_gendisk(struct gendisk *gp); extern struct gendisk *get_gendisk(dev_t dev, int *partno); extern struct block_device *bdget_disk(struct gendisk *disk, int partno); diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 99403b19092f..228bd44efa4c 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -223,6 +223,7 @@ struct st_sensor_settings { * @get_irq_data_ready: Function to get the IRQ used for data ready signal. * @tf: Transfer function structure used by I/O operations. * @tb: Transfer buffers and mutex used by I/O operations. + * @edge_irq: the IRQ triggers on edges and need special handling. * @hw_irq_trigger: if we're using the hardware interrupt on the sensor. * @hw_timestamp: Latest timestamp from the interrupt handler, when in use. */ @@ -250,14 +251,13 @@ struct st_sensor_data { const struct st_sensor_transfer_function *tf; struct st_sensor_transfer_buffer tb; + bool edge_irq; bool hw_irq_trigger; s64 hw_timestamp; }; #ifdef CONFIG_IIO_BUFFER irqreturn_t st_sensors_trigger_handler(int irq, void *p); - -int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf); #endif #ifdef CONFIG_IIO_TRIGGER @@ -287,7 +287,7 @@ int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable); int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable); -void st_sensors_power_enable(struct iio_dev *indio_dev); +int st_sensors_power_enable(struct iio_dev *indio_dev); void st_sensors_power_disable(struct iio_dev *indio_dev); diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 7c29cb0124ae..854e2dad1e0d 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -312,13 +312,8 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan, }, \ } -/** - * iio_get_time_ns() - utility function to get a time stamp for events etc - **/ -static inline s64 iio_get_time_ns(void) -{ - return ktime_get_real_ns(); -} +s64 iio_get_time_ns(const struct iio_dev *indio_dev); +unsigned int iio_get_time_res(const struct iio_dev *indio_dev); /* Device operating modes */ #define INDIO_DIRECT_MODE 0x01 @@ -497,6 +492,7 @@ struct iio_buffer_setup_ops { * @chan_attr_group: [INTERN] group for all attrs in base directory * @name: [DRIVER] name of the device. * @info: [DRIVER] callbacks and constant info from driver + * @clock_id: [INTERN] timestamping clock posix identifier * @info_exist_lock: [INTERN] lock to prevent use during removal * @setup_ops: [DRIVER] callbacks to call before and after buffer * enable/disable @@ -537,6 +533,7 @@ struct iio_dev { struct attribute_group chan_attr_group; const char *name; const struct iio_info *info; + clockid_t clock_id; struct mutex info_exist_lock; const struct iio_buffer_setup_ops *setup_ops; struct cdev chrdev; @@ -565,7 +562,7 @@ extern struct bus_type iio_bus_type; /** * iio_device_put() - reference counted deallocation of struct device - * @indio_dev: IIO device structure containing the device + * @indio_dev: IIO device structure containing the device **/ static inline void iio_device_put(struct iio_dev *indio_dev) { @@ -574,6 +571,15 @@ static inline void iio_device_put(struct iio_dev *indio_dev) } /** + * iio_device_get_clock() - Retrieve current timestamping clock for the device + * @indio_dev: IIO device structure containing the device + */ +static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) +{ + return indio_dev->clock_id; +} + +/** * dev_to_iio_dev() - Get IIO device struct from a device struct * @dev: The device embedded in the IIO device * diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h new file mode 100644 index 000000000000..23ca41515527 --- /dev/null +++ b/include/linux/iio/sw_device.h @@ -0,0 +1,70 @@ +/* + * Industrial I/O software device interface + * + * Copyright (c) 2016 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef __IIO_SW_DEVICE +#define __IIO_SW_DEVICE + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/iio/iio.h> +#include <linux/configfs.h> + +#define module_iio_sw_device_driver(__iio_sw_device_type) \ + module_driver(__iio_sw_device_type, iio_register_sw_device_type, \ + iio_unregister_sw_device_type) + +struct iio_sw_device_ops; + +struct iio_sw_device_type { + const char *name; + struct module *owner; + const struct iio_sw_device_ops *ops; + struct list_head list; + struct config_group *group; +}; + +struct iio_sw_device { + struct iio_dev *device; + struct iio_sw_device_type *device_type; + struct config_group group; +}; + +struct iio_sw_device_ops { + struct iio_sw_device* (*probe)(const char *); + int (*remove)(struct iio_sw_device *); +}; + +static inline +struct iio_sw_device *to_iio_sw_device(struct config_item *item) +{ + return container_of(to_config_group(item), struct iio_sw_device, + group); +} + +int iio_register_sw_device_type(struct iio_sw_device_type *dt); +void iio_unregister_sw_device_type(struct iio_sw_device_type *dt); + +struct iio_sw_device *iio_sw_device_create(const char *, const char *); +void iio_sw_device_destroy(struct iio_sw_device *); + +int iio_sw_device_type_configfs_register(struct iio_sw_device_type *dt); +void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt); + +static inline +void iio_swd_group_init_type_name(struct iio_sw_device *d, + const char *name, + struct config_item_type *type) +{ +#ifdef CONFIG_CONFIGFS_FS + config_group_init_type_name(&d->group, name, type); +#endif +} + +#endif /* __IIO_SW_DEVICE */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 9fcabeb07787..b6683f0ffc9f 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -278,6 +278,8 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); +struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs); + #else /* CONFIG_SMP */ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) @@ -308,6 +310,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) { return 0; } + +static inline struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs) +{ + *nr_vecs = 1; + return NULL; +} #endif /* CONFIG_SMP */ /* diff --git a/include/linux/irq.h b/include/linux/irq.h index 4d758a7c604a..b52424eaa0ed 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -197,6 +197,7 @@ struct irq_data { * IRQD_IRQ_INPROGRESS - In progress state of the interrupt * IRQD_WAKEUP_ARMED - Wakeup mode armed * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU + * IRQD_AFFINITY_MANAGED - Affinity is auto-managed by the kernel */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -212,6 +213,7 @@ enum { IRQD_IRQ_INPROGRESS = (1 << 18), IRQD_WAKEUP_ARMED = (1 << 19), IRQD_FORWARDED_TO_VCPU = (1 << 20), + IRQD_AFFINITY_MANAGED = (1 << 21), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -305,6 +307,11 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; } +static inline bool irqd_affinity_is_managed(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; +} + #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) @@ -315,6 +322,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) /** * struct irq_chip - hardware interrupt chip descriptor * + * @parent_device: pointer to parent device for irqchip * @name: name for /proc/interrupts * @irq_startup: start up the interrupt (defaults to ->enable if NULL) * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) @@ -354,6 +362,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) * @flags: chip specific flags */ struct irq_chip { + struct device *parent_device; const char *name; unsigned int (*irq_startup)(struct irq_data *data); void (*irq_shutdown)(struct irq_data *data); @@ -482,12 +491,15 @@ extern void handle_fasteoi_irq(struct irq_desc *desc); extern void handle_edge_irq(struct irq_desc *desc); extern void handle_edge_eoi_irq(struct irq_desc *desc); extern void handle_simple_irq(struct irq_desc *desc); +extern void handle_untracked_irq(struct irq_desc *desc); extern void handle_percpu_irq(struct irq_desc *desc); extern void handle_percpu_devid_irq(struct irq_desc *desc); extern void handle_bad_irq(struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); +extern int irq_chip_pm_get(struct irq_data *data); +extern int irq_chip_pm_put(struct irq_data *data); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY extern void irq_chip_enable_parent(struct irq_data *data); extern void irq_chip_disable_parent(struct irq_data *data); @@ -701,11 +713,11 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) unsigned int arch_dynirq_lower_bound(unsigned int from); int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, - struct module *owner); + struct module *owner, const struct cpumask *affinity); /* use macros to avoid needing export.h for THIS_MODULE */ #define irq_alloc_descs(irq, from, cnt, node) \ - __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE) + __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL) #define irq_alloc_desc(node) \ irq_alloc_descs(-1, 0, 1, node) diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index dc493e0f0ff7..107eed475b94 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -204,6 +204,7 @@ #define GITS_BASER_NR_REGS 8 #define GITS_BASER_VALID (1UL << 63) +#define GITS_BASER_INDIRECT (1UL << 62) #define GITS_BASER_nCnB (0UL << 59) #define GITS_BASER_nC (1UL << 59) #define GITS_BASER_RaWt (2UL << 59) @@ -228,6 +229,7 @@ #define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGES_MAX 256 +#define GITS_BASER_PAGES_SHIFT (0) #define GITS_BASER_TYPE_NONE 0 #define GITS_BASER_TYPE_DEVICE 1 @@ -238,6 +240,8 @@ #define GITS_BASER_TYPE_RESERVED6 6 #define GITS_BASER_TYPE_RESERVED7 7 +#define GITS_LVL1_ENTRY_SIZE (8UL) + /* * ITS commands */ diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index fd051855539b..eafc965b3eb8 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -101,9 +101,14 @@ #include <linux/irqdomain.h> struct device_node; +struct gic_chip_data; void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); int gic_cpu_if_down(unsigned int gic_nr); +void gic_cpu_save(struct gic_chip_data *gic); +void gic_cpu_restore(struct gic_chip_data *gic); +void gic_dist_save(struct gic_chip_data *gic); +void gic_dist_restore(struct gic_chip_data *gic); /* * Subdrivers that need some preparatory work can initialize their @@ -112,6 +117,12 @@ int gic_cpu_if_down(unsigned int gic_nr); int gic_of_init(struct device_node *node, struct device_node *parent); /* + * Initialises and registers a non-root or child GIC chip. Memory for + * the gic_chip_data structure is dynamically allocated. + */ +int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq); + +/* * Legacy platforms not converted to DT yet must use this to init * their GIC */ diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index f1f36e04d885..ffb84604c1de 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -39,6 +39,7 @@ struct irq_domain; struct of_device_id; struct irq_chip; struct irq_data; +struct cpumask; /* Number of irqs reserved for a legacy isa controller */ #define NUM_ISA_INTERRUPTS 16 @@ -217,7 +218,8 @@ extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token); extern void irq_set_default_host(struct irq_domain *host); extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, - irq_hw_number_t hwirq, int node); + irq_hw_number_t hwirq, int node, + const struct cpumask *affinity); static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) { @@ -389,7 +391,7 @@ static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *par extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, - bool realloc); + bool realloc, const struct cpumask *affinity); extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); extern void irq_domain_activate_irq(struct irq_data *irq_data); extern void irq_domain_deactivate_irq(struct irq_data *irq_data); @@ -397,7 +399,8 @@ extern void irq_domain_deactivate_irq(struct irq_data *irq_data); static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs, int node, void *arg) { - return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false); + return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false, + NULL); } extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, @@ -452,6 +455,9 @@ static inline int irq_domain_alloc_irqs(struct irq_domain *domain, return -1; } +static inline void irq_domain_free_irqs(unsigned int virq, + unsigned int nr_irqs) { } + static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) { return false; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index efb232c5f668..dfaa1f4dcb0c 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -491,10 +491,6 @@ struct jbd2_journal_handle unsigned long h_start_jiffies; unsigned int h_requested_credits; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map h_lockdep_map; -#endif }; @@ -793,6 +789,7 @@ jbd2_time_diff(unsigned long start, unsigned long end) * @j_proc_entry: procfs entry for the jbd statistics directory * @j_stats: Overall statistics * @j_private: An opaque pointer to fs-private information. + * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies */ struct journal_s @@ -1035,8 +1032,26 @@ struct journal_s /* Precomputed journal UUID checksum for seeding other checksums */ __u32 j_csum_seed; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Lockdep entity to track transaction commit dependencies. Handles + * hold this "lock" for read, when we wait for commit, we acquire the + * "lock" for writing. This matches the properties of jbd2 journalling + * where the running transaction has to wait for all handles to be + * dropped to commit that transaction and also acquiring a handle may + * require transaction commit to finish. + */ + struct lockdep_map j_trans_commit_map; +#endif }; +#define jbd2_might_wait_for_commit(j) \ + do { \ + rwsem_acquire(&j->j_trans_commit_map, 0, 0, _THIS_IP_); \ + rwsem_release(&j->j_trans_commit_map, 1, _THIS_IP_); \ + } while (0) + /* journal feature predicate functions */ #define JBD2_FEATURE_COMPAT_FUNCS(name, flagname) \ static inline bool jbd2_has_feature_##name(journal_t *j) \ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 94aa10ffe156..c42082112ec8 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -451,6 +451,7 @@ extern int panic_on_oops; extern int panic_on_unrecovered_nmi; extern int panic_on_io_nmi; extern int panic_on_warn; +extern int sysctl_panic_on_rcu_stall; extern int sysctl_panic_on_stackoverflow; extern bool crash_kexec_post_notifiers; diff --git a/include/linux/libata.h b/include/linux/libata.h index d15c19e331d1..e37d4f99f510 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -146,13 +146,6 @@ enum { ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */ - /* protocol flags */ - ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */ - ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */ - ATA_PROT_FLAG_DATA = ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA, - ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */ - ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */ - /* struct ata_device stuff */ ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ @@ -1039,58 +1032,29 @@ extern const unsigned long sata_deb_timing_long[]; extern struct ata_port_operations ata_dummy_port_ops; extern const struct ata_port_info ata_dummy_port_info; -/* - * protocol tests - */ -static inline unsigned int ata_prot_flags(u8 prot) -{ - switch (prot) { - case ATA_PROT_NODATA: - return 0; - case ATA_PROT_PIO: - return ATA_PROT_FLAG_PIO; - case ATA_PROT_DMA: - return ATA_PROT_FLAG_DMA; - case ATA_PROT_NCQ: - return ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ; - case ATAPI_PROT_NODATA: - return ATA_PROT_FLAG_ATAPI; - case ATAPI_PROT_PIO: - return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO; - case ATAPI_PROT_DMA: - return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA; - } - return 0; -} - -static inline int ata_is_atapi(u8 prot) -{ - return ata_prot_flags(prot) & ATA_PROT_FLAG_ATAPI; -} - -static inline int ata_is_nodata(u8 prot) +static inline bool ata_is_atapi(u8 prot) { - return !(ata_prot_flags(prot) & ATA_PROT_FLAG_DATA); + return prot & ATA_PROT_FLAG_ATAPI; } -static inline int ata_is_pio(u8 prot) +static inline bool ata_is_pio(u8 prot) { - return ata_prot_flags(prot) & ATA_PROT_FLAG_PIO; + return prot & ATA_PROT_FLAG_PIO; } -static inline int ata_is_dma(u8 prot) +static inline bool ata_is_dma(u8 prot) { - return ata_prot_flags(prot) & ATA_PROT_FLAG_DMA; + return prot & ATA_PROT_FLAG_DMA; } -static inline int ata_is_ncq(u8 prot) +static inline bool ata_is_ncq(u8 prot) { - return ata_prot_flags(prot) & ATA_PROT_FLAG_NCQ; + return prot & ATA_PROT_FLAG_NCQ; } -static inline int ata_is_data(u8 prot) +static inline bool ata_is_data(u8 prot) { - return ata_prot_flags(prot) & ATA_PROT_FLAG_DATA; + return prot & (ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA); } static inline int is_multi_taskfile(struct ata_taskfile *tf) @@ -1407,7 +1371,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap) return ap->nr_pmp_links != 0; } -static inline int ata_is_host_link(const struct ata_link *link) +static inline bool ata_is_host_link(const struct ata_link *link) { return link == &link->ap->link || link == link->ap->slave_link; } @@ -1422,7 +1386,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap) return false; } -static inline int ata_is_host_link(const struct ata_link *link) +static inline bool ata_is_host_link(const struct ata_link *link) { return 1; } diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index ef2c7d2e76c4..ba78b8306674 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -1,7 +1,9 @@ #ifndef NVM_H #define NVM_H +#include <linux/blkdev.h> #include <linux/types.h> +#include <uapi/linux/lightnvm.h> enum { NVM_IO_OK = 0, @@ -269,24 +271,15 @@ struct nvm_lun { int lun_id; int chnl_id; - /* It is up to the target to mark blocks as closed. If the target does - * not do it, all blocks are marked as open, and nr_open_blocks - * represents the number of blocks in use - */ - unsigned int nr_open_blocks; /* Number of used, writable blocks */ - unsigned int nr_closed_blocks; /* Number of used, read-only blocks */ - unsigned int nr_free_blocks; /* Number of unused blocks */ - unsigned int nr_bad_blocks; /* Number of bad blocks */ - spinlock_t lock; + unsigned int nr_free_blocks; /* Number of unused blocks */ struct nvm_block *blocks; }; enum { NVM_BLK_ST_FREE = 0x1, /* Free block */ - NVM_BLK_ST_OPEN = 0x2, /* Open block - read-write */ - NVM_BLK_ST_CLOSED = 0x4, /* Closed block - read-only */ + NVM_BLK_ST_TGT = 0x2, /* Block in use by target */ NVM_BLK_ST_BAD = 0x8, /* Bad block */ }; @@ -385,6 +378,7 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, { struct ppa_addr l; + l.ppa = 0; /* * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. */ @@ -455,6 +449,8 @@ struct nvm_tgt_type { struct list_head list; }; +extern struct nvm_tgt_type *nvm_find_target_type(const char *, int); + extern int nvm_register_tgt_type(struct nvm_tgt_type *); extern void nvm_unregister_tgt_type(struct nvm_tgt_type *); @@ -463,6 +459,9 @@ extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); typedef int (nvmm_register_fn)(struct nvm_dev *); typedef void (nvmm_unregister_fn)(struct nvm_dev *); + +typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *); +typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *); typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *, struct nvm_lun *, unsigned long); typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *); @@ -488,9 +487,10 @@ struct nvmm_type { nvmm_register_fn *register_mgr; nvmm_unregister_fn *unregister_mgr; + nvmm_create_tgt_fn *create_tgt; + nvmm_remove_tgt_fn *remove_tgt; + /* Block administration callbacks */ - nvmm_get_blk_fn *get_blk_unlocked; - nvmm_put_blk_fn *put_blk_unlocked; nvmm_get_blk_fn *get_blk; nvmm_put_blk_fn *put_blk; nvmm_open_blk_fn *open_blk; @@ -520,10 +520,6 @@ struct nvmm_type { extern int nvm_register_mgr(struct nvmm_type *); extern void nvm_unregister_mgr(struct nvmm_type *); -extern struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *, - struct nvm_lun *, unsigned long); -extern void nvm_put_blk_unlocked(struct nvm_dev *, struct nvm_block *); - extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *, unsigned long); extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *); @@ -532,11 +528,13 @@ extern int nvm_register(struct request_queue *, char *, struct nvm_dev_ops *); extern void nvm_unregister(char *); +void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type); + extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *); extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *); extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, - struct ppa_addr *, int, int); + const struct ppa_addr *, int, int); extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int); extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); diff --git a/include/linux/list.h b/include/linux/list.h index 5356f4d661a7..5183138aa932 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -679,6 +679,16 @@ static inline bool hlist_fake(struct hlist_node *h) } /* + * Check whether the node is the only node of the head without + * accessing head: + */ +static inline bool +hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h) +{ + return !n->next && n->pprev == &h->first; +} + +/* * Move a list from one list head to another. Fixup the pprev * reference of the first entry if it exists. */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index ca3e517980a0..917f2b6a0cde 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -594,6 +594,9 @@ struct vm_special_mapping { int (*fault)(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf); + + int (*mremap)(const struct vm_special_mapping *sm, + struct vm_area_struct *new_vma); }; enum tlb_flush_reason { diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 6e4c645e1c0d..ed84c07f6a51 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -657,4 +657,20 @@ struct ulpi_device_id { kernel_ulong_t driver_data; }; +/** + * struct fsl_mc_device_id - MC object device identifier + * @vendor: vendor ID + * @obj_type: MC object type + * @ver_major: MC object version major number + * @ver_minor: MC object version minor number + * + * Type of entries in the "device Id" table for MC object devices supported by + * a MC object device driver. The last entry of the table has vendor set to 0x0 + */ +struct fsl_mc_device_id { + __u16 vendor; + const char obj_type[16]; +}; + + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/mpi.h b/include/linux/mpi.h index 3a5abe95affd..1cc5ffb769af 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -80,8 +80,7 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, int *sign); void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign); -int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign); -int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned *nbytes, +int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes, int *sign); #define log_mpidump g10_log_mpidump diff --git a/include/linux/msi.h b/include/linux/msi.h index 8b425c66305a..4f0bfe5912b2 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -47,6 +47,7 @@ struct fsl_mc_msi_desc { * @nvec_used: The number of vectors used * @dev: Pointer to the device which uses this descriptor * @msg: The last set MSI message cached for reuse + * @affinity: Optional pointer to a cpu affinity mask for this descriptor * * @masked: [PCI MSI/X] Mask bits * @is_msix: [PCI MSI/X] True if MSI-X @@ -67,6 +68,7 @@ struct msi_desc { unsigned int nvec_used; struct device *dev; struct msi_msg msg; + const struct cpumask *affinity; union { /* PCI MSI/X specific data */ @@ -264,12 +266,10 @@ enum { * callbacks. */ MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), - /* Build identity map between hwirq and irq */ - MSI_FLAG_IDENTITY_MAP = (1 << 2), /* Support multiple PCI MSI interrupts */ - MSI_FLAG_MULTI_PCI_MSI = (1 << 3), + MSI_FLAG_MULTI_PCI_MSI = (1 << 2), /* Support PCI MSIX interrupts */ - MSI_FLAG_PCI_MSIX = (1 << 4), + MSI_FLAG_PCI_MSIX = (1 << 3), }; int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h new file mode 100644 index 000000000000..bf240a3cbf99 --- /dev/null +++ b/include/linux/nvme-rdma.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _LINUX_NVME_RDMA_H +#define _LINUX_NVME_RDMA_H + +enum nvme_rdma_cm_fmt { + NVME_RDMA_CM_FMT_1_0 = 0x0, +}; + +enum nvme_rdma_cm_status { + NVME_RDMA_CM_INVALID_LEN = 0x01, + NVME_RDMA_CM_INVALID_RECFMT = 0x02, + NVME_RDMA_CM_INVALID_QID = 0x03, + NVME_RDMA_CM_INVALID_HSQSIZE = 0x04, + NVME_RDMA_CM_INVALID_HRQSIZE = 0x05, + NVME_RDMA_CM_NO_RSC = 0x06, + NVME_RDMA_CM_INVALID_IRD = 0x07, + NVME_RDMA_CM_INVALID_ORD = 0x08, +}; + +/** + * struct nvme_rdma_cm_req - rdma connect request + * + * @recfmt: format of the RDMA Private Data + * @qid: queue Identifier for the Admin or I/O Queue + * @hrqsize: host receive queue size to be created + * @hsqsize: host send queue size to be created + */ +struct nvme_rdma_cm_req { + __le16 recfmt; + __le16 qid; + __le16 hrqsize; + __le16 hsqsize; + u8 rsvd[24]; +}; + +/** + * struct nvme_rdma_cm_rep - rdma connect reply + * + * @recfmt: format of the RDMA Private Data + * @crqsize: controller receive queue size + */ +struct nvme_rdma_cm_rep { + __le16 recfmt; + __le16 crqsize; + u8 rsvd[28]; +}; + +/** + * struct nvme_rdma_cm_rej - rdma connect reject + * + * @recfmt: format of the RDMA Private Data + * @fsts: error status for the associated connect request + */ +struct nvme_rdma_cm_rej { + __le16 recfmt; + __le16 sts; +}; + +#endif /* _LINUX_NVME_RDMA_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 7d51b2904cb7..d8b37bab2887 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -16,6 +16,78 @@ #define _LINUX_NVME_H #include <linux/types.h> +#include <linux/uuid.h> + +/* NQN names in commands fields specified one size */ +#define NVMF_NQN_FIELD_LEN 256 + +/* However the max length of a qualified name is another size */ +#define NVMF_NQN_SIZE 223 + +#define NVMF_TRSVCID_SIZE 32 +#define NVMF_TRADDR_SIZE 256 +#define NVMF_TSAS_SIZE 256 + +#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery" + +#define NVME_RDMA_IP_PORT 4420 + +enum nvme_subsys_type { + NVME_NQN_DISC = 1, /* Discovery type target subsystem */ + NVME_NQN_NVME = 2, /* NVME type target subsystem */ +}; + +/* Address Family codes for Discovery Log Page entry ADRFAM field */ +enum { + NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */ + NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */ + NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */ + NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */ + NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */ +}; + +/* Transport Type codes for Discovery Log Page entry TRTYPE field */ +enum { + NVMF_TRTYPE_RDMA = 1, /* RDMA */ + NVMF_TRTYPE_FC = 2, /* Fibre Channel */ + NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */ + NVMF_TRTYPE_MAX, +}; + +/* Transport Requirements codes for Discovery Log Page entry TREQ field */ +enum { + NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */ + NVMF_TREQ_REQUIRED = 1, /* Required */ + NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */ +}; + +/* RDMA QP Service Type codes for Discovery Log Page entry TSAS + * RDMA_QPTYPE field + */ +enum { + NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */ + NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */ +}; + +/* RDMA QP Service Type codes for Discovery Log Page entry TSAS + * RDMA_QPTYPE field + */ +enum { + NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */ + NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */ + NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */ + NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */ + NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */ +}; + +/* RDMA Connection Management Service Type codes for Discovery Log Page + * entry TSAS RDMA_CMS field + */ +enum { + NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */ +}; + +#define NVMF_AQ_DEPTH 32 enum { NVME_REG_CAP = 0x0000, /* Controller Capabilities */ @@ -50,6 +122,13 @@ enum { #define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2) #define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1) +/* + * Submission and Completion Queue Entry Sizes for the NVM command set. + * (In bytes and specified as a power of two (2^n)). + */ +#define NVME_NVM_IOSQES 6 +#define NVME_NVM_IOCQES 4 + enum { NVME_CC_ENABLE = 1 << 0, NVME_CC_CSS_NVM = 0 << 4, @@ -61,8 +140,8 @@ enum { NVME_CC_SHN_NORMAL = 1 << 14, NVME_CC_SHN_ABRUPT = 2 << 14, NVME_CC_SHN_MASK = 3 << 14, - NVME_CC_IOSQES = 6 << 16, - NVME_CC_IOCQES = 4 << 20, + NVME_CC_IOSQES = NVME_NVM_IOSQES << 16, + NVME_CC_IOCQES = NVME_NVM_IOCQES << 20, NVME_CSTS_RDY = 1 << 0, NVME_CSTS_CFS = 1 << 1, NVME_CSTS_NSSRO = 1 << 4, @@ -107,7 +186,11 @@ struct nvme_id_ctrl { __u8 mdts; __le16 cntlid; __le32 ver; - __u8 rsvd84[172]; + __le32 rtd3r; + __le32 rtd3e; + __le32 oaes; + __le32 ctratt; + __u8 rsvd100[156]; __le16 oacs; __u8 acl; __u8 aerl; @@ -119,10 +202,12 @@ struct nvme_id_ctrl { __u8 apsta; __le16 wctemp; __le16 cctemp; - __u8 rsvd270[242]; + __u8 rsvd270[50]; + __le16 kas; + __u8 rsvd322[190]; __u8 sqes; __u8 cqes; - __u8 rsvd514[2]; + __le16 maxcmd; __le32 nn; __le16 oncs; __le16 fuses; @@ -135,7 +220,15 @@ struct nvme_id_ctrl { __le16 acwu; __u8 rsvd534[2]; __le32 sgls; - __u8 rsvd540[1508]; + __u8 rsvd540[228]; + char subnqn[256]; + __u8 rsvd1024[768]; + __le32 ioccsz; + __le32 iorcsz; + __le16 icdoff; + __u8 ctrattr; + __u8 msdbd; + __u8 rsvd1804[244]; struct nvme_id_power_state psd[32]; __u8 vs[1024]; }; @@ -274,6 +367,12 @@ struct nvme_reservation_status { } regctl_ds[]; }; +enum nvme_async_event_type { + NVME_AER_TYPE_ERROR = 0, + NVME_AER_TYPE_SMART = 1, + NVME_AER_TYPE_NOTICE = 2, +}; + /* I/O commands */ enum nvme_opcode { @@ -290,6 +389,84 @@ enum nvme_opcode { nvme_cmd_resv_release = 0x15, }; +/* + * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier + * + * @NVME_SGL_FMT_ADDRESS: absolute address of the data block + * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block + * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation + * request subtype + */ +enum { + NVME_SGL_FMT_ADDRESS = 0x00, + NVME_SGL_FMT_OFFSET = 0x01, + NVME_SGL_FMT_INVALIDATE = 0x0f, +}; + +/* + * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier + * + * For struct nvme_sgl_desc: + * @NVME_SGL_FMT_DATA_DESC: data block descriptor + * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor + * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor + * + * For struct nvme_keyed_sgl_desc: + * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor + */ +enum { + NVME_SGL_FMT_DATA_DESC = 0x00, + NVME_SGL_FMT_SEG_DESC = 0x02, + NVME_SGL_FMT_LAST_SEG_DESC = 0x03, + NVME_KEY_SGL_FMT_DATA_DESC = 0x04, +}; + +struct nvme_sgl_desc { + __le64 addr; + __le32 length; + __u8 rsvd[3]; + __u8 type; +}; + +struct nvme_keyed_sgl_desc { + __le64 addr; + __u8 length[3]; + __u8 key[4]; + __u8 type; +}; + +union nvme_data_ptr { + struct { + __le64 prp1; + __le64 prp2; + }; + struct nvme_sgl_desc sgl; + struct nvme_keyed_sgl_desc ksgl; +}; + +/* + * Lowest two bits of our flags field (FUSE field in the spec): + * + * @NVME_CMD_FUSE_FIRST: Fused Operation, first command + * @NVME_CMD_FUSE_SECOND: Fused Operation, second command + * + * Highest two bits in our flags field (PSDT field in the spec): + * + * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer, + * If used, MPTR contains addr of single physical buffer (byte aligned). + * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer, + * If used, MPTR contains an address of an SGL segment containing + * exactly 1 SGL descriptor (qword aligned). + */ +enum { + NVME_CMD_FUSE_FIRST = (1 << 0), + NVME_CMD_FUSE_SECOND = (1 << 1), + + NVME_CMD_SGL_METABUF = (1 << 6), + NVME_CMD_SGL_METASEG = (1 << 7), + NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG, +}; + struct nvme_common_command { __u8 opcode; __u8 flags; @@ -297,8 +474,7 @@ struct nvme_common_command { __le32 nsid; __le32 cdw2[2]; __le64 metadata; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le32 cdw10[6]; }; @@ -309,8 +485,7 @@ struct nvme_rw_command { __le32 nsid; __u64 rsvd2; __le64 metadata; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le64 slba; __le16 length; __le16 control; @@ -350,8 +525,7 @@ struct nvme_dsm_cmd { __u16 command_id; __le32 nsid; __u64 rsvd2[2]; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le32 nr; __le32 attributes; __u32 rsvd12[4]; @@ -384,6 +558,7 @@ enum nvme_admin_opcode { nvme_admin_async_event = 0x0c, nvme_admin_activate_fw = 0x10, nvme_admin_download_fw = 0x11, + nvme_admin_keep_alive = 0x18, nvme_admin_format_nvm = 0x80, nvme_admin_security_send = 0x81, nvme_admin_security_recv = 0x82, @@ -408,6 +583,7 @@ enum { NVME_FEAT_WRITE_ATOMIC = 0x0a, NVME_FEAT_ASYNC_EVENT = 0x0b, NVME_FEAT_AUTO_PST = 0x0c, + NVME_FEAT_KATO = 0x0f, NVME_FEAT_SW_PROGRESS = 0x80, NVME_FEAT_HOST_ID = 0x81, NVME_FEAT_RESV_MASK = 0x82, @@ -415,6 +591,7 @@ enum { NVME_LOG_ERROR = 0x01, NVME_LOG_SMART = 0x02, NVME_LOG_FW_SLOT = 0x03, + NVME_LOG_DISC = 0x70, NVME_LOG_RESERVATION = 0x80, NVME_FWACT_REPL = (0 << 3), NVME_FWACT_REPL_ACTV = (1 << 3), @@ -427,8 +604,7 @@ struct nvme_identify { __u16 command_id; __le32 nsid; __u64 rsvd2[2]; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le32 cns; __u32 rsvd11[5]; }; @@ -439,8 +615,7 @@ struct nvme_features { __u16 command_id; __le32 nsid; __u64 rsvd2[2]; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le32 fid; __le32 dword11; __u32 rsvd12[4]; @@ -499,8 +674,7 @@ struct nvme_download_firmware { __u8 flags; __u16 command_id; __u32 rsvd1[5]; - __le64 prp1; - __le64 prp2; + union nvme_data_ptr dptr; __le32 numd; __le32 offset; __u32 rsvd12[4]; @@ -516,6 +690,143 @@ struct nvme_format_cmd { __u32 rsvd11[5]; }; +struct nvme_get_log_page_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __u8 lid; + __u8 rsvd10; + __le16 numdl; + __le16 numdu; + __u16 rsvd11; + __le32 lpol; + __le32 lpou; + __u32 rsvd14[2]; +}; + +/* + * Fabrics subcommands. + */ +enum nvmf_fabrics_opcode { + nvme_fabrics_command = 0x7f, +}; + +enum nvmf_capsule_command { + nvme_fabrics_type_property_set = 0x00, + nvme_fabrics_type_connect = 0x01, + nvme_fabrics_type_property_get = 0x04, +}; + +struct nvmf_common_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 ts[24]; +}; + +/* + * The legal cntlid range a NVMe Target will provide. + * Note that cntlid of value 0 is considered illegal in the fabrics world. + * Devices based on earlier specs did not have the subsystem concept; + * therefore, those devices had their cntlid value set to 0 as a result. + */ +#define NVME_CNTLID_MIN 1 +#define NVME_CNTLID_MAX 0xffef +#define NVME_CNTLID_DYNAMIC 0xffff + +#define MAX_DISC_LOGS 255 + +/* Discovery log page entry */ +struct nvmf_disc_rsp_page_entry { + __u8 trtype; + __u8 adrfam; + __u8 nqntype; + __u8 treq; + __le16 portid; + __le16 cntlid; + __le16 asqsz; + __u8 resv8[22]; + char trsvcid[NVMF_TRSVCID_SIZE]; + __u8 resv64[192]; + char subnqn[NVMF_NQN_FIELD_LEN]; + char traddr[NVMF_TRADDR_SIZE]; + union tsas { + char common[NVMF_TSAS_SIZE]; + struct rdma { + __u8 qptype; + __u8 prtype; + __u8 cms; + __u8 resv3[5]; + __u16 pkey; + __u8 resv10[246]; + } rdma; + } tsas; +}; + +/* Discovery log page header */ +struct nvmf_disc_rsp_page_hdr { + __le64 genctr; + __le64 numrec; + __le16 recfmt; + __u8 resv14[1006]; + struct nvmf_disc_rsp_page_entry entries[0]; +}; + +struct nvmf_connect_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __le16 recfmt; + __le16 qid; + __le16 sqsize; + __u8 cattr; + __u8 resv3; + __le32 kato; + __u8 resv4[12]; +}; + +struct nvmf_connect_data { + uuid_le hostid; + __le16 cntlid; + char resv4[238]; + char subsysnqn[NVMF_NQN_FIELD_LEN]; + char hostnqn[NVMF_NQN_FIELD_LEN]; + char resv5[256]; +}; + +struct nvmf_property_set_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __le64 value; + __u8 resv4[8]; +}; + +struct nvmf_property_get_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __u8 resv4[16]; +}; + struct nvme_command { union { struct nvme_common_command common; @@ -529,10 +840,30 @@ struct nvme_command { struct nvme_format_cmd format; struct nvme_dsm_cmd dsm; struct nvme_abort_cmd abort; + struct nvme_get_log_page_command get_log_page; + struct nvmf_common_command fabrics; + struct nvmf_connect_command connect; + struct nvmf_property_set_command prop_set; + struct nvmf_property_get_command prop_get; }; }; +static inline bool nvme_is_write(struct nvme_command *cmd) +{ + /* + * What a mess... + * + * Why can't we simply have a Fabrics In and Fabrics out command? + */ + if (unlikely(cmd->common.opcode == nvme_fabrics_command)) + return cmd->fabrics.opcode & 1; + return cmd->common.opcode & 1; +} + enum { + /* + * Generic Command Status: + */ NVME_SC_SUCCESS = 0x0, NVME_SC_INVALID_OPCODE = 0x1, NVME_SC_INVALID_FIELD = 0x2, @@ -551,10 +882,18 @@ enum { NVME_SC_SGL_INVALID_DATA = 0xf, NVME_SC_SGL_INVALID_METADATA = 0x10, NVME_SC_SGL_INVALID_TYPE = 0x11, + + NVME_SC_SGL_INVALID_OFFSET = 0x16, + NVME_SC_SGL_INVALID_SUBTYPE = 0x17, + NVME_SC_LBA_RANGE = 0x80, NVME_SC_CAP_EXCEEDED = 0x81, NVME_SC_NS_NOT_READY = 0x82, NVME_SC_RESERVATION_CONFLICT = 0x83, + + /* + * Command Specific Status: + */ NVME_SC_CQ_INVALID = 0x100, NVME_SC_QID_INVALID = 0x101, NVME_SC_QUEUE_SIZE = 0x102, @@ -572,9 +911,29 @@ enum { NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, NVME_SC_FEATURE_NOT_PER_NS = 0x10f, NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110, + + /* + * I/O Command Set Specific - NVM commands: + */ NVME_SC_BAD_ATTRIBUTES = 0x180, NVME_SC_INVALID_PI = 0x181, NVME_SC_READ_ONLY = 0x182, + + /* + * I/O Command Set Specific - Fabrics commands: + */ + NVME_SC_CONNECT_FORMAT = 0x180, + NVME_SC_CONNECT_CTRL_BUSY = 0x181, + NVME_SC_CONNECT_INVALID_PARAM = 0x182, + NVME_SC_CONNECT_RESTART_DISC = 0x183, + NVME_SC_CONNECT_INVALID_HOST = 0x184, + + NVME_SC_DISCOVERY_RESTART = 0x190, + NVME_SC_AUTH_REQUIRED = 0x191, + + /* + * Media and Data Integrity Errors: + */ NVME_SC_WRITE_FAULT = 0x280, NVME_SC_READ_ERROR = 0x281, NVME_SC_GUARD_CHECK = 0x282, @@ -582,12 +941,19 @@ enum { NVME_SC_REFTAG_CHECK = 0x284, NVME_SC_COMPARE_FAILED = 0x285, NVME_SC_ACCESS_DENIED = 0x286, + NVME_SC_DNR = 0x4000, }; struct nvme_completion { - __le32 result; /* Used by admin commands to return data */ - __u32 rsvd; + /* + * Used by Admin and Fabrics commands to return data: + */ + union { + __le16 result16; + __le32 result; + __le64 result64; + }; __le16 sq_head; /* how much of this queue may be reclaimed */ __le16 sq_id; /* submission queue that generated this entry */ __u16 command_id; /* of the command which completed */ diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index 9bb77d3ed6e0..c2256d746543 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -74,7 +74,7 @@ static inline void nvmem_cell_put(struct nvmem_cell *cell) { } -static inline char *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) +static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) { return ERR_PTR(-ENOSYS); } diff --git a/include/linux/of.h b/include/linux/of.h index 74eb28cadbef..15c43f076b23 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -1009,10 +1009,13 @@ static inline int of_get_available_child_count(const struct device_node *np) #endif typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); +typedef int (*of_init_fn_1_ret)(struct device_node *); typedef void (*of_init_fn_1)(struct device_node *); #define OF_DECLARE_1(table, name, compat, fn) \ _OF_DECLARE(table, name, compat, fn, of_init_fn_1) +#define OF_DECLARE_1_RET(table, name, compat, fn) \ + _OF_DECLARE(table, name, compat, fn, of_init_fn_1_ret) #define OF_DECLARE_2(table, name, compat, fn) \ _OF_DECLARE(table, name, compat, fn, of_init_fn_2) diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 84f542df7ff5..1c7eec09e5eb 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -136,14 +136,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, * used as a pointer. If the compiler generates a separate fetch * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in * between contaminating the pointer value, meaning that - * ACCESS_ONCE() is required when fetching it. - * - * Also, we need a data dependency barrier to be paired with - * smp_store_release() in __percpu_ref_switch_to_percpu(). - * - * Use lockless deref which contains both. + * READ_ONCE() is required when fetching it. */ - percpu_ptr = lockless_dereference(ref->percpu_count_ptr); + percpu_ptr = READ_ONCE(ref->percpu_count_ptr); + + /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */ + smp_read_barrier_depends(); /* * Theoretically, the following could test just ATOMIC; however, diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1a827cecd62f..7921f4f20a58 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -517,6 +517,11 @@ struct swevent_hlist { struct perf_cgroup; struct ring_buffer; +struct pmu_event_list { + raw_spinlock_t lock; + struct list_head list; +}; + /** * struct perf_event - performance event kernel representation: */ @@ -675,6 +680,7 @@ struct perf_event { int cgrp_defer_enabled; #endif + struct list_head sb_list; #endif /* CONFIG_PERF_EVENTS */ }; @@ -1074,7 +1080,7 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct extern struct perf_callchain_entry * get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark); -extern int get_callchain_buffers(void); +extern int get_callchain_buffers(int max_stack); extern void put_callchain_buffers(void); extern int sysctl_perf_event_max_stack; @@ -1326,6 +1332,13 @@ struct perf_pmu_events_attr { const char *event_str; }; +struct perf_pmu_events_ht_attr { + struct device_attribute attr; + u64 id; + const char *event_str_ht; + const char *event_str_noht; +}; + ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, char *page); diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index a810f2a18842..f08b67238b58 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -22,12 +22,20 @@ struct phy; +enum phy_mode { + PHY_MODE_INVALID, + PHY_MODE_USB_HOST, + PHY_MODE_USB_DEVICE, + PHY_MODE_USB_OTG, +}; + /** * struct phy_ops - set of function pointers for performing phy operations * @init: operation to be performed for initializing phy * @exit: operation to be performed while exiting * @power_on: powering on the phy * @power_off: powering off the phy + * @set_mode: set the mode of the phy * @owner: the module owner containing the ops */ struct phy_ops { @@ -35,6 +43,7 @@ struct phy_ops { int (*exit)(struct phy *phy); int (*power_on)(struct phy *phy); int (*power_off)(struct phy *phy); + int (*set_mode)(struct phy *phy, enum phy_mode mode); struct module *owner; }; @@ -126,6 +135,7 @@ int phy_init(struct phy *phy); int phy_exit(struct phy *phy); int phy_power_on(struct phy *phy); int phy_power_off(struct phy *phy); +int phy_set_mode(struct phy *phy, enum phy_mode mode); static inline int phy_get_bus_width(struct phy *phy) { return phy->attrs.bus_width; @@ -233,6 +243,13 @@ static inline int phy_power_off(struct phy *phy) return -ENOSYS; } +static inline int phy_set_mode(struct phy *phy, enum phy_mode mode) +{ + if (!phy) + return 0; + return -ENOSYS; +} + static inline int phy_get_bus_width(struct phy *phy) { return -ENOSYS; diff --git a/include/linux/platform_data/sht3x.h b/include/linux/platform_data/sht3x.h new file mode 100644 index 000000000000..2e5eea358194 --- /dev/null +++ b/include/linux/platform_data/sht3x.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2016 Sensirion AG, Switzerland + * Author: David Frey <david.frey@sensirion.com> + * Author: Pascal Sachs <pascal.sachs@sensirion.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __SHT3X_H_ +#define __SHT3X_H_ + +struct sht3x_platform_data { + bool blocking_io; + bool high_precision; +}; +#endif /* __SHT3X_H_ */ diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h index 308d6044f153..09779b0ae720 100644 --- a/include/linux/pm_clock.h +++ b/include/linux/pm_clock.h @@ -42,6 +42,7 @@ extern int pm_clk_create(struct device *dev); extern void pm_clk_destroy(struct device *dev); extern int pm_clk_add(struct device *dev, const char *con_id); extern int pm_clk_add_clk(struct device *dev, struct clk *clk); +extern int of_pm_clk_add_clk(struct device *dev, const char *name); extern int of_pm_clk_add_clks(struct device *dev); extern void pm_clk_remove(struct device *dev, const char *con_id); extern void pm_clk_remove_clk(struct device *dev, struct clk *clk); diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 39285c7bd3f5..31fec858088c 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -57,7 +57,6 @@ struct generic_pm_domain { unsigned int device_count; /* Number of devices */ unsigned int suspended_count; /* System suspend device counter */ unsigned int prepared_count; /* Suspend counter of prepared devices */ - bool suspend_power_off; /* Power status before system suspend */ int (*power_off)(struct generic_pm_domain *domain); int (*power_on)(struct generic_pm_domain *domain); struct gpd_dev_ops dev_ops; @@ -128,8 +127,8 @@ extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *new_subdomain); extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *target); -extern void pm_genpd_init(struct generic_pm_domain *genpd, - struct dev_power_governor *gov, bool is_off); +extern int pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; @@ -164,9 +163,10 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline void pm_genpd_init(struct generic_pm_domain *genpd, - struct dev_power_governor *gov, bool is_off) +static inline int pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off) { + return -ENOSYS; } #endif diff --git a/include/linux/printk.h b/include/linux/printk.h index f4da695fd615..f136b22c7772 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -108,11 +108,14 @@ struct va_format { * Dummy printk for disabled debugging statements to use whilst maintaining * gcc's format checking. */ -#define no_printk(fmt, ...) \ -do { \ - if (0) \ - printk(fmt, ##__VA_ARGS__); \ -} while (0) +#define no_printk(fmt, ...) \ +({ \ + do { \ + if (0) \ + printk(fmt, ##__VA_ARGS__); \ + } while (0); \ + 0; \ +}) #ifdef CONFIG_EARLY_PRINTK extern asmlinkage __printf(1, 2) @@ -309,20 +312,24 @@ extern asmlinkage void dump_stack(void) __cold; #define printk_once(fmt, ...) \ ({ \ static bool __print_once __read_mostly; \ + bool __ret_print_once = !__print_once; \ \ if (!__print_once) { \ __print_once = true; \ printk(fmt, ##__VA_ARGS__); \ } \ + unlikely(__ret_print_once); \ }) #define printk_deferred_once(fmt, ...) \ ({ \ static bool __print_once __read_mostly; \ + bool __ret_print_once = !__print_once; \ \ if (!__print_once) { \ __print_once = true; \ printk_deferred(fmt, ##__VA_ARGS__); \ } \ + unlikely(__ret_print_once); \ }) #else #define printk_once(fmt, ...) \ diff --git a/include/linux/random.h b/include/linux/random.h index e47e533742b5..3d6e9815cd85 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -95,27 +95,27 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed) #ifdef CONFIG_ARCH_RANDOM # include <asm/archrandom.h> #else -static inline int arch_get_random_long(unsigned long *v) +static inline bool arch_get_random_long(unsigned long *v) { return 0; } -static inline int arch_get_random_int(unsigned int *v) +static inline bool arch_get_random_int(unsigned int *v) { return 0; } -static inline int arch_has_random(void) +static inline bool arch_has_random(void) { return 0; } -static inline int arch_get_random_seed_long(unsigned long *v) +static inline bool arch_get_random_seed_long(unsigned long *v) { return 0; } -static inline int arch_get_random_seed_int(unsigned int *v) +static inline bool arch_get_random_seed_int(unsigned int *v) { return 0; } -static inline int arch_has_random_seed(void) +static inline bool arch_has_random_seed(void) { return 0; } diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 5f1533e3d032..3bc5de08c0b7 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -45,6 +45,7 @@ #include <linux/bug.h> #include <linux/compiler.h> #include <linux/ktime.h> +#include <linux/irqflags.h> #include <asm/barrier.h> @@ -379,12 +380,13 @@ static inline void rcu_init_nohz(void) * in the inner idle loop. * * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) - * will tell RCU that it needs to pay attending, invoke its argument - * (in this example, a call to the do_something_with_RCU() function), + * will tell RCU that it needs to pay attention, invoke its argument + * (in this example, calling the do_something_with_RCU() function), * and then tell RCU to go back to ignoring this CPU. It is permissible - * to nest RCU_NONIDLE() wrappers, but the nesting level is currently - * quite limited. If deeper nesting is required, it will be necessary - * to adjust DYNTICK_TASK_NESTING_VALUE accordingly. + * to nest RCU_NONIDLE() wrappers, but not indefinitely (but the limit is + * on the order of a million or so, even on 32-bit systems). It is + * not legal to block within RCU_NONIDLE(), nor is it permissible to + * transfer control either into or out of RCU_NONIDLE()'s statement. */ #define RCU_NONIDLE(a) \ do { \ @@ -649,7 +651,16 @@ static inline void rcu_preempt_sleep_check(void) * please be careful when making changes to rcu_assign_pointer() and the * other macros that it invokes. */ -#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v)) +#define rcu_assign_pointer(p, v) \ +({ \ + uintptr_t _r_a_p__v = (uintptr_t)(v); \ + \ + if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ + WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ + else \ + smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ + _r_a_p__v; \ +}) /** * rcu_access_pointer() - fetch RCU pointer with no dereferencing diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index d37fbb34d06f..dd1d14250340 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -23,10 +23,11 @@ struct rw_semaphore; #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK #include <linux/rwsem-spinlock.h> /* use a generic implementation */ +#define __RWSEM_INIT_COUNT(name) .count = RWSEM_UNLOCKED_VALUE #else /* All arch specific implementations share the same struct */ struct rw_semaphore { - long count; + atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; #ifdef CONFIG_RWSEM_SPIN_ON_OWNER @@ -54,9 +55,10 @@ extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); /* In all implementations count != 0 means locked */ static inline int rwsem_is_locked(struct rw_semaphore *sem) { - return sem->count != 0; + return atomic_long_read(&sem->count) != 0; } +#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) #endif /* Common initializer macros and functions */ @@ -74,7 +76,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) #endif #define __RWSEM_INITIALIZER(name) \ - { .count = RWSEM_UNLOCKED_VALUE, \ + { __RWSEM_INIT_COUNT(name), \ .wait_list = LIST_HEAD_INIT((name).wait_list), \ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ __RWSEM_OPT_INIT(name) \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 253538f29ade..d99218a1e043 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -219,9 +219,10 @@ extern void proc_sched_set_task(struct task_struct *p); #define TASK_WAKING 256 #define TASK_PARKED 512 #define TASK_NOLOAD 1024 -#define TASK_STATE_MAX 2048 +#define TASK_NEW 2048 +#define TASK_STATE_MAX 4096 -#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN" +#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" extern char ___assert_task_state[1 - 2*!!( sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; @@ -2139,6 +2140,9 @@ static inline void put_task_struct(struct task_struct *t) __put_task_struct(t); } +struct task_struct *task_rcu_dereference(struct task_struct **ptask); +struct task_struct *try_get_task_struct(struct task_struct **ptask); + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN extern void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime); diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 48ec7651989b..923266cd294a 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -111,6 +111,7 @@ struct uart_8250_port { * if no_console_suspend */ unsigned char probe; + struct mctrl_gpios *gpios; #define UART_PROBE_RSA (1 << 0) /* diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index a3d7c0d4a03e..2f44e2013654 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -352,9 +352,15 @@ struct earlycon_id { extern const struct earlycon_id __earlycon_table[]; extern const struct earlycon_id __earlycon_table_end[]; +#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE) +#define EARLYCON_USED_OR_UNUSED __used +#else +#define EARLYCON_USED_OR_UNUSED __maybe_unused +#endif + #define OF_EARLYCON_DECLARE(_name, compat, fn) \ static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \ - __used __section(__earlycon_table) \ + EARLYCON_USED_OR_UNUSED __section(__earlycon_table) \ = { .name = __stringify(_name), \ .compatible = compat, \ .setup = fn } diff --git a/include/linux/sfi.h b/include/linux/sfi.h index d9b436f09925..e0e1597ef9e6 100644 --- a/include/linux/sfi.h +++ b/include/linux/sfi.h @@ -156,6 +156,7 @@ struct sfi_device_table_entry { #define SFI_DEV_TYPE_UART 2 #define SFI_DEV_TYPE_HSI 3 #define SFI_DEV_TYPE_IPC 4 +#define SFI_DEV_TYPE_SD 5 u8 host_num; /* attached to host 0, 1...*/ u16 addr; diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 8b3ac0d718eb..0d9848de677d 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -6,6 +6,7 @@ #endif #include <asm/processor.h> /* for cpu_relax() */ +#include <asm/barrier.h> /* * include/linux/spinlock_up.h - UP-debug version of spinlocks. @@ -25,6 +26,11 @@ #ifdef CONFIG_DEBUG_SPINLOCK #define arch_spin_is_locked(x) ((x)->slock == 0) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) +{ + smp_cond_load_acquire(&lock->slock, VAL); +} + static inline void arch_spin_lock(arch_spinlock_t *lock) { lock->slock = 0; @@ -67,6 +73,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) #else /* DEBUG_SPINLOCK */ #define arch_spin_is_locked(lock) ((void)(lock), 0) +#define arch_spin_unlock_wait(lock) do { barrier(); (void)(lock); } while (0) /* for sched/core.c and kernel_lock.c: */ # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) @@ -79,7 +86,4 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) #define arch_read_can_lock(lock) (((void)(lock), 1)) #define arch_write_can_lock(lock) (((void)(lock), 1)) -#define arch_spin_unlock_wait(lock) \ - do { cpu_relax(); } while (arch_spin_is_locked(lock)) - #endif /* __LINUX_SPINLOCK_UP_H */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 8b6ec7ef0854..7693e39b14fe 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -18,12 +18,11 @@ static inline void pm_set_vt_switch(int do_switch) #endif #ifdef CONFIG_VT_CONSOLE_SLEEP -extern int pm_prepare_console(void); +extern void pm_prepare_console(void); extern void pm_restore_console(void); #else -static inline int pm_prepare_console(void) +static inline void pm_prepare_console(void) { - return 0; } static inline void pm_restore_console(void) diff --git a/include/linux/time.h b/include/linux/time.h index 297f09f23896..4cea09d94208 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -205,7 +205,20 @@ struct tm { int tm_yday; }; -void time_to_tm(time_t totalsecs, int offset, struct tm *result); +void time64_to_tm(time64_t totalsecs, int offset, struct tm *result); + +/** + * time_to_tm - converts the calendar time to local broken-down time + * + * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970, + * Coordinated Universal Time (UTC). + * @offset offset seconds adding to totalsecs. + * @result pointer to struct tm variable to receive broken-down time + */ +static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result) +{ + time64_to_tm(totalsecs, offset, result); +} /** * timespec_to_ns - Convert timespec to nanoseconds diff --git a/include/linux/timer.h b/include/linux/timer.h index 20ac746f3eb3..4419506b564e 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -19,7 +19,6 @@ struct timer_list { void (*function)(unsigned long); unsigned long data; u32 flags; - int slack; #ifdef CONFIG_TIMER_STATS int start_pid; @@ -58,11 +57,14 @@ struct timer_list { * workqueue locking issues. It's not meant for executing random crap * with interrupts disabled. Abuse is monitored! */ -#define TIMER_CPUMASK 0x0007FFFF -#define TIMER_MIGRATING 0x00080000 +#define TIMER_CPUMASK 0x0003FFFF +#define TIMER_MIGRATING 0x00040000 #define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING) -#define TIMER_DEFERRABLE 0x00100000 +#define TIMER_DEFERRABLE 0x00080000 +#define TIMER_PINNED 0x00100000 #define TIMER_IRQSAFE 0x00200000 +#define TIMER_ARRAYSHIFT 22 +#define TIMER_ARRAYMASK 0xFFC00000 #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ .entry = { .next = TIMER_ENTRY_STATIC }, \ @@ -70,7 +72,6 @@ struct timer_list { .expires = (_expires), \ .data = (_data), \ .flags = (_flags), \ - .slack = -1, \ __TIMER_LOCKDEP_MAP_INITIALIZER( \ __FILE__ ":" __stringify(__LINE__)) \ } @@ -78,9 +79,15 @@ struct timer_list { #define TIMER_INITIALIZER(_function, _expires, _data) \ __TIMER_INITIALIZER((_function), (_expires), (_data), 0) +#define TIMER_PINNED_INITIALIZER(_function, _expires, _data) \ + __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_PINNED) + #define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \ __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE) +#define TIMER_PINNED_DEFERRED_INITIALIZER(_function, _expires, _data) \ + __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE | TIMER_PINNED) + #define DEFINE_TIMER(_name, _function, _expires, _data) \ struct timer_list _name = \ TIMER_INITIALIZER(_function, _expires, _data) @@ -124,8 +131,12 @@ static inline void init_timer_on_stack_key(struct timer_list *timer, #define init_timer(timer) \ __init_timer((timer), 0) +#define init_timer_pinned(timer) \ + __init_timer((timer), TIMER_PINNED) #define init_timer_deferrable(timer) \ __init_timer((timer), TIMER_DEFERRABLE) +#define init_timer_pinned_deferrable(timer) \ + __init_timer((timer), TIMER_DEFERRABLE | TIMER_PINNED) #define init_timer_on_stack(timer) \ __init_timer_on_stack((timer), 0) @@ -145,12 +156,20 @@ static inline void init_timer_on_stack_key(struct timer_list *timer, #define setup_timer(timer, fn, data) \ __setup_timer((timer), (fn), (data), 0) +#define setup_pinned_timer(timer, fn, data) \ + __setup_timer((timer), (fn), (data), TIMER_PINNED) #define setup_deferrable_timer(timer, fn, data) \ __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE) +#define setup_pinned_deferrable_timer(timer, fn, data) \ + __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) #define setup_timer_on_stack(timer, fn, data) \ __setup_timer_on_stack((timer), (fn), (data), 0) +#define setup_pinned_timer_on_stack(timer, fn, data) \ + __setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED) #define setup_deferrable_timer_on_stack(timer, fn, data) \ __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE) +#define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \ + __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) /** * timer_pending - is a timer pending? @@ -171,12 +190,7 @@ extern void add_timer_on(struct timer_list *timer, int cpu); extern int del_timer(struct timer_list * timer); extern int mod_timer(struct timer_list *timer, unsigned long expires); extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); -extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires); - -extern void set_timer_slack(struct timer_list *time, int slack_hz); -#define TIMER_NOT_PINNED 0 -#define TIMER_PINNED 1 /* * The jiffies value which is added to now, when there is no timer * in the timer wheel: diff --git a/include/linux/torture.h b/include/linux/torture.h index 7759fc3c622d..6685a73736a2 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -50,6 +50,10 @@ do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0) /* Definitions for online/offline exerciser. */ +bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes, + unsigned long *sum_offl, int *min_onl, int *max_onl); +bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, + unsigned long *sum_onl, int *min_onl, int *max_onl); int torture_onoff_init(long ooholdoff, long oointerval); void torture_onoff_stats(void); bool torture_onoff_failures(void); diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index fefe8b06a63d..612dbdfa388e 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -25,6 +25,8 @@ #include <linux/workqueue.h> #include <linux/usb/ch9.h> +#define UDC_TRACE_STR_MAX 512 + struct usb_ep; /** @@ -228,307 +230,49 @@ struct usb_ep { /*-------------------------------------------------------------------------*/ -/** - * usb_ep_set_maxpacket_limit - set maximum packet size limit for endpoint - * @ep:the endpoint being configured - * @maxpacket_limit:value of maximum packet size limit - * - * This function should be used only in UDC drivers to initialize endpoint - * (usually in probe function). - */ +#if IS_ENABLED(CONFIG_USB_GADGET) +void usb_ep_set_maxpacket_limit(struct usb_ep *ep, unsigned maxpacket_limit); +int usb_ep_enable(struct usb_ep *ep); +int usb_ep_disable(struct usb_ep *ep); +struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags); +void usb_ep_free_request(struct usb_ep *ep, struct usb_request *req); +int usb_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags); +int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req); +int usb_ep_set_halt(struct usb_ep *ep); +int usb_ep_clear_halt(struct usb_ep *ep); +int usb_ep_set_wedge(struct usb_ep *ep); +int usb_ep_fifo_status(struct usb_ep *ep); +void usb_ep_fifo_flush(struct usb_ep *ep); +#else static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep, - unsigned maxpacket_limit) -{ - ep->maxpacket_limit = maxpacket_limit; - ep->maxpacket = maxpacket_limit; -} - -/** - * usb_ep_enable - configure endpoint, making it usable - * @ep:the endpoint being configured. may not be the endpoint named "ep0". - * drivers discover endpoints through the ep_list of a usb_gadget. - * - * When configurations are set, or when interface settings change, the driver - * will enable or disable the relevant endpoints. while it is enabled, an - * endpoint may be used for i/o until the driver receives a disconnect() from - * the host or until the endpoint is disabled. - * - * the ep0 implementation (which calls this routine) must ensure that the - * hardware capabilities of each endpoint match the descriptor provided - * for it. for example, an endpoint named "ep2in-bulk" would be usable - * for interrupt transfers as well as bulk, but it likely couldn't be used - * for iso transfers or for endpoint 14. some endpoints are fully - * configurable, with more generic names like "ep-a". (remember that for - * USB, "in" means "towards the USB master".) - * - * returns zero, or a negative error code. - */ + unsigned maxpacket_limit) +{ } static inline int usb_ep_enable(struct usb_ep *ep) -{ - int ret; - - if (ep->enabled) - return 0; - - ret = ep->ops->enable(ep, ep->desc); - if (ret) - return ret; - - ep->enabled = true; - - return 0; -} - -/** - * usb_ep_disable - endpoint is no longer usable - * @ep:the endpoint being unconfigured. may not be the endpoint named "ep0". - * - * no other task may be using this endpoint when this is called. - * any pending and uncompleted requests will complete with status - * indicating disconnect (-ESHUTDOWN) before this call returns. - * gadget drivers must call usb_ep_enable() again before queueing - * requests to the endpoint. - * - * returns zero, or a negative error code. - */ +{ return 0; } static inline int usb_ep_disable(struct usb_ep *ep) -{ - int ret; - - if (!ep->enabled) - return 0; - - ret = ep->ops->disable(ep); - if (ret) - return ret; - - ep->enabled = false; - - return 0; -} - -/** - * usb_ep_alloc_request - allocate a request object to use with this endpoint - * @ep:the endpoint to be used with with the request - * @gfp_flags:GFP_* flags to use - * - * Request objects must be allocated with this call, since they normally - * need controller-specific setup and may even need endpoint-specific - * resources such as allocation of DMA descriptors. - * Requests may be submitted with usb_ep_queue(), and receive a single - * completion callback. Free requests with usb_ep_free_request(), when - * they are no longer needed. - * - * Returns the request, or null if one could not be allocated. - */ +{ return 0; } static inline struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, - gfp_t gfp_flags) -{ - return ep->ops->alloc_request(ep, gfp_flags); -} - -/** - * usb_ep_free_request - frees a request object - * @ep:the endpoint associated with the request - * @req:the request being freed - * - * Reverses the effect of usb_ep_alloc_request(). - * Caller guarantees the request is not queued, and that it will - * no longer be requeued (or otherwise used). - */ + gfp_t gfp_flags) +{ return NULL; } static inline void usb_ep_free_request(struct usb_ep *ep, - struct usb_request *req) -{ - ep->ops->free_request(ep, req); -} - -/** - * usb_ep_queue - queues (submits) an I/O request to an endpoint. - * @ep:the endpoint associated with the request - * @req:the request being submitted - * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't - * pre-allocate all necessary memory with the request. - * - * This tells the device controller to perform the specified request through - * that endpoint (reading or writing a buffer). When the request completes, - * including being canceled by usb_ep_dequeue(), the request's completion - * routine is called to return the request to the driver. Any endpoint - * (except control endpoints like ep0) may have more than one transfer - * request queued; they complete in FIFO order. Once a gadget driver - * submits a request, that request may not be examined or modified until it - * is given back to that driver through the completion callback. - * - * Each request is turned into one or more packets. The controller driver - * never merges adjacent requests into the same packet. OUT transfers - * will sometimes use data that's already buffered in the hardware. - * Drivers can rely on the fact that the first byte of the request's buffer - * always corresponds to the first byte of some USB packet, for both - * IN and OUT transfers. - * - * Bulk endpoints can queue any amount of data; the transfer is packetized - * automatically. The last packet will be short if the request doesn't fill it - * out completely. Zero length packets (ZLPs) should be avoided in portable - * protocols since not all usb hardware can successfully handle zero length - * packets. (ZLPs may be explicitly written, and may be implicitly written if - * the request 'zero' flag is set.) Bulk endpoints may also be used - * for interrupt transfers; but the reverse is not true, and some endpoints - * won't support every interrupt transfer. (Such as 768 byte packets.) - * - * Interrupt-only endpoints are less functional than bulk endpoints, for - * example by not supporting queueing or not handling buffers that are - * larger than the endpoint's maxpacket size. They may also treat data - * toggle differently. - * - * Control endpoints ... after getting a setup() callback, the driver queues - * one response (even if it would be zero length). That enables the - * status ack, after transferring data as specified in the response. Setup - * functions may return negative error codes to generate protocol stalls. - * (Note that some USB device controllers disallow protocol stall responses - * in some cases.) When control responses are deferred (the response is - * written after the setup callback returns), then usb_ep_set_halt() may be - * used on ep0 to trigger protocol stalls. Depending on the controller, - * it may not be possible to trigger a status-stage protocol stall when the - * data stage is over, that is, from within the response's completion - * routine. - * - * For periodic endpoints, like interrupt or isochronous ones, the usb host - * arranges to poll once per interval, and the gadget driver usually will - * have queued some data to transfer at that time. - * - * Returns zero, or a negative error code. Endpoints that are not enabled - * report errors; errors will also be - * reported when the usb peripheral is disconnected. - */ -static inline int usb_ep_queue(struct usb_ep *ep, - struct usb_request *req, gfp_t gfp_flags) -{ - if (WARN_ON_ONCE(!ep->enabled && ep->address)) - return -ESHUTDOWN; - - return ep->ops->queue(ep, req, gfp_flags); -} - -/** - * usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint - * @ep:the endpoint associated with the request - * @req:the request being canceled - * - * If the request is still active on the endpoint, it is dequeued and its - * completion routine is called (with status -ECONNRESET); else a negative - * error code is returned. This is guaranteed to happen before the call to - * usb_ep_dequeue() returns. - * - * Note that some hardware can't clear out write fifos (to unlink the request - * at the head of the queue) except as part of disconnecting from usb. Such - * restrictions prevent drivers from supporting configuration changes, - * even to configuration zero (a "chapter 9" requirement). - */ + struct usb_request *req) +{ } +static inline int usb_ep_queue(struct usb_ep *ep, struct usb_request *req, + gfp_t gfp_flags) +{ return 0; } static inline int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req) -{ - return ep->ops->dequeue(ep, req); -} - -/** - * usb_ep_set_halt - sets the endpoint halt feature. - * @ep: the non-isochronous endpoint being stalled - * - * Use this to stall an endpoint, perhaps as an error report. - * Except for control endpoints, - * the endpoint stays halted (will not stream any data) until the host - * clears this feature; drivers may need to empty the endpoint's request - * queue first, to make sure no inappropriate transfers happen. - * - * Note that while an endpoint CLEAR_FEATURE will be invisible to the - * gadget driver, a SET_INTERFACE will not be. To reset endpoints for the - * current altsetting, see usb_ep_clear_halt(). When switching altsettings, - * it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints. - * - * Returns zero, or a negative error code. On success, this call sets - * underlying hardware state that blocks data transfers. - * Attempts to halt IN endpoints will fail (returning -EAGAIN) if any - * transfer requests are still queued, or if the controller hardware - * (usually a FIFO) still holds bytes that the host hasn't collected. - */ +{ return 0; } static inline int usb_ep_set_halt(struct usb_ep *ep) -{ - return ep->ops->set_halt(ep, 1); -} - -/** - * usb_ep_clear_halt - clears endpoint halt, and resets toggle - * @ep:the bulk or interrupt endpoint being reset - * - * Use this when responding to the standard usb "set interface" request, - * for endpoints that aren't reconfigured, after clearing any other state - * in the endpoint's i/o queue. - * - * Returns zero, or a negative error code. On success, this call clears - * the underlying hardware state reflecting endpoint halt and data toggle. - * Note that some hardware can't support this request (like pxa2xx_udc), - * and accordingly can't correctly implement interface altsettings. - */ +{ return 0; } static inline int usb_ep_clear_halt(struct usb_ep *ep) -{ - return ep->ops->set_halt(ep, 0); -} - -/** - * usb_ep_set_wedge - sets the halt feature and ignores clear requests - * @ep: the endpoint being wedged - * - * Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT) - * requests. If the gadget driver clears the halt status, it will - * automatically unwedge the endpoint. - * - * Returns zero on success, else negative errno. - */ -static inline int -usb_ep_set_wedge(struct usb_ep *ep) -{ - if (ep->ops->set_wedge) - return ep->ops->set_wedge(ep); - else - return ep->ops->set_halt(ep, 1); -} - -/** - * usb_ep_fifo_status - returns number of bytes in fifo, or error - * @ep: the endpoint whose fifo status is being checked. - * - * FIFO endpoints may have "unclaimed data" in them in certain cases, - * such as after aborted transfers. Hosts may not have collected all - * the IN data written by the gadget driver (and reported by a request - * completion). The gadget driver may not have collected all the data - * written OUT to it by the host. Drivers that need precise handling for - * fault reporting or recovery may need to use this call. - * - * This returns the number of such bytes in the fifo, or a negative - * errno if the endpoint doesn't use a FIFO or doesn't support such - * precise handling. - */ +{ return 0; } +static inline int usb_ep_set_wedge(struct usb_ep *ep) +{ return 0; } static inline int usb_ep_fifo_status(struct usb_ep *ep) -{ - if (ep->ops->fifo_status) - return ep->ops->fifo_status(ep); - else - return -EOPNOTSUPP; -} - -/** - * usb_ep_fifo_flush - flushes contents of a fifo - * @ep: the endpoint whose fifo is being flushed. - * - * This call may be used to flush the "unclaimed data" that may exist in - * an endpoint fifo after abnormal transaction terminations. The call - * must never be used except when endpoint is not being used for any - * protocol translation. - */ +{ return 0; } static inline void usb_ep_fifo_flush(struct usb_ep *ep) -{ - if (ep->ops->fifo_flush) - ep->ops->fifo_flush(ep); -} - +{ } +#endif /* USB_GADGET */ /*-------------------------------------------------------------------------*/ @@ -582,6 +326,7 @@ struct usb_gadget_ops { * @dev: Driver model state for this abstract device. * @out_epnum: last used out ep number * @in_epnum: last used in ep number + * @mA: last set mA value * @otg_caps: OTG capabilities of this gadget. * @sg_supported: true if we can handle scatter-gather * @is_otg: True if the USB device port uses a Mini-AB jack, so that the @@ -638,6 +383,7 @@ struct usb_gadget { struct device dev; unsigned out_epnum; unsigned in_epnum; + unsigned mA; struct usb_otg_caps *otg_caps; unsigned sg_supported:1; @@ -760,251 +506,44 @@ static inline int gadget_is_otg(struct usb_gadget *g) #endif } -/** - * usb_gadget_frame_number - returns the current frame number - * @gadget: controller that reports the frame number - * - * Returns the usb frame number, normally eleven bits from a SOF packet, - * or negative errno if this device doesn't support this capability. - */ -static inline int usb_gadget_frame_number(struct usb_gadget *gadget) -{ - return gadget->ops->get_frame(gadget); -} +/*-------------------------------------------------------------------------*/ -/** - * usb_gadget_wakeup - tries to wake up the host connected to this gadget - * @gadget: controller used to wake up the host - * - * Returns zero on success, else negative error code if the hardware - * doesn't support such attempts, or its support has not been enabled - * by the usb host. Drivers must return device descriptors that report - * their ability to support this, or hosts won't enable it. - * - * This may also try to use SRP to wake the host and start enumeration, - * even if OTG isn't otherwise in use. OTG devices may also start - * remote wakeup even when hosts don't explicitly enable it. - */ +#if IS_ENABLED(CONFIG_USB_GADGET) +int usb_gadget_frame_number(struct usb_gadget *gadget); +int usb_gadget_wakeup(struct usb_gadget *gadget); +int usb_gadget_set_selfpowered(struct usb_gadget *gadget); +int usb_gadget_clear_selfpowered(struct usb_gadget *gadget); +int usb_gadget_vbus_connect(struct usb_gadget *gadget); +int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA); +int usb_gadget_vbus_disconnect(struct usb_gadget *gadget); +int usb_gadget_connect(struct usb_gadget *gadget); +int usb_gadget_disconnect(struct usb_gadget *gadget); +int usb_gadget_deactivate(struct usb_gadget *gadget); +int usb_gadget_activate(struct usb_gadget *gadget); +#else +static inline int usb_gadget_frame_number(struct usb_gadget *gadget) +{ return 0; } static inline int usb_gadget_wakeup(struct usb_gadget *gadget) -{ - if (!gadget->ops->wakeup) - return -EOPNOTSUPP; - return gadget->ops->wakeup(gadget); -} - -/** - * usb_gadget_set_selfpowered - sets the device selfpowered feature. - * @gadget:the device being declared as self-powered - * - * this affects the device status reported by the hardware driver - * to reflect that it now has a local power supply. - * - * returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_set_selfpowered(struct usb_gadget *gadget) -{ - if (!gadget->ops->set_selfpowered) - return -EOPNOTSUPP; - return gadget->ops->set_selfpowered(gadget, 1); -} - -/** - * usb_gadget_clear_selfpowered - clear the device selfpowered feature. - * @gadget:the device being declared as bus-powered - * - * this affects the device status reported by the hardware driver. - * some hardware may not support bus-powered operation, in which - * case this feature's value can never change. - * - * returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_clear_selfpowered(struct usb_gadget *gadget) -{ - if (!gadget->ops->set_selfpowered) - return -EOPNOTSUPP; - return gadget->ops->set_selfpowered(gadget, 0); -} - -/** - * usb_gadget_vbus_connect - Notify controller that VBUS is powered - * @gadget:The device which now has VBUS power. - * Context: can sleep - * - * This call is used by a driver for an external transceiver (or GPIO) - * that detects a VBUS power session starting. Common responses include - * resuming the controller, activating the D+ (or D-) pullup to let the - * host detect that a USB device is attached, and starting to draw power - * (8mA or possibly more, especially after SET_CONFIGURATION). - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_vbus_connect(struct usb_gadget *gadget) -{ - if (!gadget->ops->vbus_session) - return -EOPNOTSUPP; - return gadget->ops->vbus_session(gadget, 1); -} - -/** - * usb_gadget_vbus_draw - constrain controller's VBUS power usage - * @gadget:The device whose VBUS usage is being described - * @mA:How much current to draw, in milliAmperes. This should be twice - * the value listed in the configuration descriptor bMaxPower field. - * - * This call is used by gadget drivers during SET_CONFIGURATION calls, - * reporting how much power the device may consume. For example, this - * could affect how quickly batteries are recharged. - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) -{ - if (!gadget->ops->vbus_draw) - return -EOPNOTSUPP; - return gadget->ops->vbus_draw(gadget, mA); -} - -/** - * usb_gadget_vbus_disconnect - notify controller about VBUS session end - * @gadget:the device whose VBUS supply is being described - * Context: can sleep - * - * This call is used by a driver for an external transceiver (or GPIO) - * that detects a VBUS power session ending. Common responses include - * reversing everything done in usb_gadget_vbus_connect(). - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_vbus_disconnect(struct usb_gadget *gadget) -{ - if (!gadget->ops->vbus_session) - return -EOPNOTSUPP; - return gadget->ops->vbus_session(gadget, 0); -} - -/** - * usb_gadget_connect - software-controlled connect to USB host - * @gadget:the peripheral being connected - * - * Enables the D+ (or potentially D-) pullup. The host will start - * enumerating this gadget when the pullup is active and a VBUS session - * is active (the link is powered). This pullup is always enabled unless - * usb_gadget_disconnect() has been used to disable it. - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_connect(struct usb_gadget *gadget) -{ - int ret; - - if (!gadget->ops->pullup) - return -EOPNOTSUPP; - - if (gadget->deactivated) { - /* - * If gadget is deactivated we only save new state. - * Gadget will be connected automatically after activation. - */ - gadget->connected = true; - return 0; - } - - ret = gadget->ops->pullup(gadget, 1); - if (!ret) - gadget->connected = 1; - return ret; -} - -/** - * usb_gadget_disconnect - software-controlled disconnect from USB host - * @gadget:the peripheral being disconnected - * - * Disables the D+ (or potentially D-) pullup, which the host may see - * as a disconnect (when a VBUS session is active). Not all systems - * support software pullup controls. - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_disconnect(struct usb_gadget *gadget) -{ - int ret; - - if (!gadget->ops->pullup) - return -EOPNOTSUPP; - - if (gadget->deactivated) { - /* - * If gadget is deactivated we only save new state. - * Gadget will stay disconnected after activation. - */ - gadget->connected = false; - return 0; - } - - ret = gadget->ops->pullup(gadget, 0); - if (!ret) - gadget->connected = 0; - return ret; -} - -/** - * usb_gadget_deactivate - deactivate function which is not ready to work - * @gadget: the peripheral being deactivated - * - * This routine may be used during the gadget driver bind() call to prevent - * the peripheral from ever being visible to the USB host, unless later - * usb_gadget_activate() is called. For example, user mode components may - * need to be activated before the system can talk to hosts. - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_deactivate(struct usb_gadget *gadget) -{ - int ret; - - if (gadget->deactivated) - return 0; - - if (gadget->connected) { - ret = usb_gadget_disconnect(gadget); - if (ret) - return ret; - /* - * If gadget was being connected before deactivation, we want - * to reconnect it in usb_gadget_activate(). - */ - gadget->connected = true; - } - gadget->deactivated = true; - - return 0; -} - -/** - * usb_gadget_activate - activate function which is not ready to work - * @gadget: the peripheral being activated - * - * This routine activates gadget which was previously deactivated with - * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed. - * - * Returns zero on success, else negative errno. - */ +{ return 0; } static inline int usb_gadget_activate(struct usb_gadget *gadget) -{ - if (!gadget->deactivated) - return 0; - - gadget->deactivated = false; - - /* - * If gadget has been connected before deactivation, or became connected - * while it was being deactivated, we call usb_gadget_connect(). - */ - if (gadget->connected) - return usb_gadget_connect(gadget); - - return 0; -} +{ return 0; } +#endif /* CONFIG_USB_GADGET */ /*-------------------------------------------------------------------------*/ diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h deleted file mode 100644 index 8c8f6854c993..000000000000 --- a/include/linux/usb/msm_hsusb.h +++ /dev/null @@ -1,200 +0,0 @@ -/* linux/include/asm-arm/arch-msm/hsusb.h - * - * Copyright (C) 2008 Google, Inc. - * Author: Brian Swetland <swetland@google.com> - * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef __ASM_ARCH_MSM_HSUSB_H -#define __ASM_ARCH_MSM_HSUSB_H - -#include <linux/extcon.h> -#include <linux/types.h> -#include <linux/usb/otg.h> -#include <linux/clk.h> - -/** - * OTG control - * - * OTG_NO_CONTROL Id/VBUS notifications not required. Useful in host - * only configuration. - * OTG_PHY_CONTROL Id/VBUS notifications comes form USB PHY. - * OTG_PMIC_CONTROL Id/VBUS notifications comes from PMIC hardware. - * OTG_USER_CONTROL Id/VBUS notifcations comes from User via sysfs. - * - */ -enum otg_control_type { - OTG_NO_CONTROL = 0, - OTG_PHY_CONTROL, - OTG_PMIC_CONTROL, - OTG_USER_CONTROL, -}; - -/** - * PHY used in - * - * INVALID_PHY Unsupported PHY - * CI_45NM_INTEGRATED_PHY Chipidea 45nm integrated PHY - * SNPS_28NM_INTEGRATED_PHY Synopsis 28nm integrated PHY - * - */ -enum msm_usb_phy_type { - INVALID_PHY = 0, - CI_45NM_INTEGRATED_PHY, - SNPS_28NM_INTEGRATED_PHY, -}; - -#define IDEV_CHG_MAX 1500 -#define IUNIT 100 - -/** - * Different states involved in USB charger detection. - * - * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection - * process is not yet started. - * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact. - * USB_CHG_STATE_DCD_DONE Data pin contact is detected. - * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects - * between SDP and DCP/CDP). - * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects - * between DCP and CDP). - * USB_CHG_STATE_DETECTED USB charger type is determined. - * - */ -enum usb_chg_state { - USB_CHG_STATE_UNDEFINED = 0, - USB_CHG_STATE_WAIT_FOR_DCD, - USB_CHG_STATE_DCD_DONE, - USB_CHG_STATE_PRIMARY_DONE, - USB_CHG_STATE_SECONDARY_DONE, - USB_CHG_STATE_DETECTED, -}; - -/** - * USB charger types - * - * USB_INVALID_CHARGER Invalid USB charger. - * USB_SDP_CHARGER Standard downstream port. Refers to a downstream port - * on USB2.0 compliant host/hub. - * USB_DCP_CHARGER Dedicated charger port (AC charger/ Wall charger). - * USB_CDP_CHARGER Charging downstream port. Enumeration can happen and - * IDEV_CHG_MAX can be drawn irrespective of USB state. - * - */ -enum usb_chg_type { - USB_INVALID_CHARGER = 0, - USB_SDP_CHARGER, - USB_DCP_CHARGER, - USB_CDP_CHARGER, -}; - -/** - * struct msm_otg_platform_data - platform device data - * for msm_otg driver. - * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as - * "do not overwrite default vaule at this address". - * @phy_init_sz: PHY configuration sequence size. - * @vbus_power: VBUS power on/off routine. - * @power_budget: VBUS power budget in mA (0 will be treated as 500mA). - * @mode: Supported mode (OTG/peripheral/host). - * @otg_control: OTG switch controlled by user/Id pin - */ -struct msm_otg_platform_data { - int *phy_init_seq; - int phy_init_sz; - void (*vbus_power)(bool on); - unsigned power_budget; - enum usb_dr_mode mode; - enum otg_control_type otg_control; - enum msm_usb_phy_type phy_type; - void (*setup_gpio)(enum usb_otg_state state); -}; - -/** - * struct msm_usb_cable - structure for exteternal connector cable - * state tracking - * @nb: hold event notification callback - * @conn: used for notification registration - */ -struct msm_usb_cable { - struct notifier_block nb; - struct extcon_dev *extcon; -}; - -/** - * struct msm_otg: OTG driver data. Shared by HCD and DCD. - * @otg: USB OTG Transceiver structure. - * @pdata: otg device platform data. - * @irq: IRQ number assigned for HSUSB controller. - * @clk: clock struct of usb_hs_clk. - * @pclk: clock struct of usb_hs_pclk. - * @core_clk: clock struct of usb_hs_core_clk. - * @regs: ioremapped register base address. - * @inputs: OTG state machine inputs(Id, SessValid etc). - * @sm_work: OTG state machine work. - * @in_lpm: indicates low power mode (LPM) state. - * @async_int: Async interrupt arrived. - * @cur_power: The amount of mA available from downstream port. - * @chg_work: Charger detection work. - * @chg_state: The state of charger detection process. - * @chg_type: The type of charger attached. - * @dcd_retires: The retry count used to track Data contact - * detection process. - * @manual_pullup: true if VBUS is not routed to USB controller/phy - * and controller driver therefore enables pull-up explicitly before - * starting controller using usbcmd run/stop bit. - * @vbus: VBUS signal state trakining, using extcon framework - * @id: ID signal state trakining, using extcon framework - * @switch_gpio: Descriptor for GPIO used to control external Dual - * SPDT USB Switch. - * @reboot: Used to inform the driver to route USB D+/D- line to Device - * connector - */ -struct msm_otg { - struct usb_phy phy; - struct msm_otg_platform_data *pdata; - int irq; - struct clk *clk; - struct clk *pclk; - struct clk *core_clk; - void __iomem *regs; -#define ID 0 -#define B_SESS_VLD 1 - unsigned long inputs; - struct work_struct sm_work; - atomic_t in_lpm; - int async_int; - unsigned cur_power; - int phy_number; - struct delayed_work chg_work; - enum usb_chg_state chg_state; - enum usb_chg_type chg_type; - u8 dcd_retries; - struct regulator *v3p3; - struct regulator *v1p8; - struct regulator *vddcx; - - struct reset_control *phy_rst; - struct reset_control *link_rst; - int vdd_levels[3]; - - bool manual_pullup; - - struct msm_usb_cable vbus; - struct msm_usb_cable id; - - struct gpio_desc *switch_gpio; - struct notifier_block reboot; -}; - -#endif diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h index de3237fce6b2..5ff9032ee1b4 100644 --- a/include/linux/usb/of.h +++ b/include/linux/usb/of.h @@ -12,7 +12,7 @@ #include <linux/usb/phy.h> #if IS_ENABLED(CONFIG_OF) -enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np); +enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0); bool of_usb_host_tpl_support(struct device_node *np); int of_usb_update_otg_caps(struct device_node *np, struct usb_otg_caps *otg_caps); @@ -20,7 +20,7 @@ struct device_node *usb_of_get_child_node(struct device_node *parent, int portnum); #else static inline enum usb_dr_mode -of_usb_get_dr_mode_by_phy(struct device_node *phy_np) +of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0) { return USB_DR_MODE_UNKNOWN; } diff --git a/include/linux/usb/xhci_pdriver.h b/include/linux/usb/xhci_pdriver.h deleted file mode 100644 index 376654b5b0f7..000000000000 --- a/include/linux/usb/xhci_pdriver.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#ifndef __USB_CORE_XHCI_PDRIVER_H -#define __USB_CORE_XHCI_PDRIVER_H - -/** - * struct usb_xhci_pdata - platform_data for generic xhci platform driver - * - * @usb3_lpm_capable: determines if this xhci platform supports USB3 - * LPM capability - * - */ -struct usb_xhci_pdata { - unsigned usb3_lpm_capable:1; -}; - -#endif /* __USB_CORE_XHCI_PDRIVER_H */ diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 8d7634247fb4..6abd24f258bc 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -45,7 +45,7 @@ void poke_blanked_console(void); int con_font_op(struct vc_data *vc, struct console_font_op *op); int con_set_cmap(unsigned char __user *cmap); int con_get_cmap(unsigned char __user *cmap); -void scrollback(struct vc_data *vc, int lines); +void scrollback(struct vc_data *vc); void scrollfront(struct vc_data *vc, int lines); void clear_buffer_attributes(struct vc_data *vc); void update_region(struct vc_data *vc, unsigned long start, int count); @@ -59,14 +59,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg); #ifdef CONFIG_CONSOLE_TRANSLATIONS /* consolemap.c */ -struct unimapinit; struct unipair; int con_set_trans_old(unsigned char __user * table); int con_get_trans_old(unsigned char __user * table); int con_set_trans_new(unsigned short __user * table); int con_get_trans_new(unsigned short __user * table); -int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui); +int con_clear_unimap(struct vc_data *vc); int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list); int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list); int con_set_default_unimap(struct vc_data *vc); @@ -92,7 +91,7 @@ static inline int con_get_trans_new(unsigned short __user *table) { return -EINVAL; } -static inline int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui) +static inline int con_clear_unimap(struct vc_data *vc) { return 0; } diff --git a/include/linux/vtime.h b/include/linux/vtime.h index fa2196990f84..aa9bfea8804a 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -12,11 +12,9 @@ struct task_struct; /* * vtime_accounting_cpu_enabled() definitions/declarations */ -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) static inline bool vtime_accounting_cpu_enabled(void) { return true; } -#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN) /* * Checks if vtime is enabled on some CPU. Cputime readers want to be careful * in that case and compute the tickless cputime. @@ -37,11 +35,9 @@ static inline bool vtime_accounting_cpu_enabled(void) return false; } -#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ - -#ifndef CONFIG_VIRT_CPU_ACCOUNTING +#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ static inline bool vtime_accounting_cpu_enabled(void) { return false; } -#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ +#endif /* @@ -64,35 +60,15 @@ extern void vtime_account_system(struct task_struct *tsk); extern void vtime_account_idle(struct task_struct *tsk); extern void vtime_account_user(struct task_struct *tsk); -#ifdef __ARCH_HAS_VTIME_ACCOUNT -extern void vtime_account_irq_enter(struct task_struct *tsk); -#else -extern void vtime_common_account_irq_enter(struct task_struct *tsk); -static inline void vtime_account_irq_enter(struct task_struct *tsk) -{ - if (vtime_accounting_cpu_enabled()) - vtime_common_account_irq_enter(tsk); -} -#endif /* __ARCH_HAS_VTIME_ACCOUNT */ - #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ static inline void vtime_task_switch(struct task_struct *prev) { } static inline void vtime_account_system(struct task_struct *tsk) { } static inline void vtime_account_user(struct task_struct *tsk) { } -static inline void vtime_account_irq_enter(struct task_struct *tsk) { } #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN extern void arch_vtime_task_switch(struct task_struct *tsk); -extern void vtime_gen_account_irq_exit(struct task_struct *tsk); - -static inline void vtime_account_irq_exit(struct task_struct *tsk) -{ - if (vtime_accounting_cpu_enabled()) - vtime_gen_account_irq_exit(tsk); -} - extern void vtime_user_enter(struct task_struct *tsk); static inline void vtime_user_exit(struct task_struct *tsk) @@ -103,11 +79,6 @@ extern void vtime_guest_enter(struct task_struct *tsk); extern void vtime_guest_exit(struct task_struct *tsk); extern void vtime_init_idle(struct task_struct *tsk, int cpu); #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ -static inline void vtime_account_irq_exit(struct task_struct *tsk) -{ - /* On hard|softirq exit we always account to hard|softirq cputime */ - vtime_account_system(tsk); -} static inline void vtime_user_enter(struct task_struct *tsk) { } static inline void vtime_user_exit(struct task_struct *tsk) { } static inline void vtime_guest_enter(struct task_struct *tsk) { } @@ -115,6 +86,19 @@ static inline void vtime_guest_exit(struct task_struct *tsk) { } static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } #endif +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +extern void vtime_account_irq_enter(struct task_struct *tsk); +static inline void vtime_account_irq_exit(struct task_struct *tsk) +{ + /* On hard|softirq exit we always account to hard|softirq cputime */ + vtime_account_system(tsk); +} +#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ +static inline void vtime_account_irq_enter(struct task_struct *tsk) { } +static inline void vtime_account_irq_exit(struct task_struct *tsk) { } +#endif + + #ifdef CONFIG_IRQ_TIME_ACCOUNTING extern void irqtime_account_irq(struct task_struct *tsk); #else diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h index 981acf74b14f..65673d8b81ac 100644 --- a/include/trace/events/bcache.h +++ b/include/trace/events/bcache.h @@ -27,7 +27,8 @@ DECLARE_EVENT_CLASS(bcache_request, __entry->sector = bio->bi_iter.bi_sector; __entry->orig_sector = bio->bi_iter.bi_sector - 16; __entry->nr_sector = bio->bi_iter.bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, + bio->bi_iter.bi_size); ), TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", @@ -101,7 +102,8 @@ DECLARE_EVENT_CLASS(bcache_bio, __entry->dev = bio->bi_bdev->bd_dev; __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio->bi_iter.bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, + bio->bi_iter.bi_size); ), TP_printk("%d,%d %s %llu + %u", @@ -136,7 +138,8 @@ TRACE_EVENT(bcache_read, __entry->dev = bio->bi_bdev->bd_dev; __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio->bi_iter.bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, + bio->bi_iter.bi_size); __entry->cache_hit = hit; __entry->bypass = bypass; ), @@ -167,7 +170,8 @@ TRACE_EVENT(bcache_write, __entry->inode = inode; __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio->bi_iter.bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, + bio->bi_iter.bi_size); __entry->writeback = writeback; __entry->bypass = bypass; ), diff --git a/include/trace/events/block.h b/include/trace/events/block.h index e8a5eca1dbe5..5a2a7592068f 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -84,7 +84,8 @@ DECLARE_EVENT_CLASS(block_rq_with_error, 0 : blk_rq_sectors(rq); __entry->errors = rq->errors; - blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); + blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, + blk_rq_bytes(rq)); blk_dump_cmd(__get_str(cmd), rq); ), @@ -162,7 +163,7 @@ TRACE_EVENT(block_rq_complete, __entry->nr_sector = nr_bytes >> 9; __entry->errors = rq->errors; - blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes); + blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, nr_bytes); blk_dump_cmd(__get_str(cmd), rq); ), @@ -198,7 +199,8 @@ DECLARE_EVENT_CLASS(block_rq, __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? blk_rq_bytes(rq) : 0; - blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); + blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, + blk_rq_bytes(rq)); blk_dump_cmd(__get_str(cmd), rq); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), @@ -272,7 +274,8 @@ TRACE_EVENT(block_bio_bounce, bio->bi_bdev->bd_dev : 0; __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, + bio->bi_iter.bi_size); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), @@ -310,7 +313,8 @@ TRACE_EVENT(block_bio_complete, __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); __entry->error = error; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, + bio->bi_iter.bi_size); ), TP_printk("%d,%d %s %llu + %u [%d]", @@ -337,7 +341,8 @@ DECLARE_EVENT_CLASS(block_bio_merge, __entry->dev = bio->bi_bdev->bd_dev; __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, + bio->bi_iter.bi_size); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), @@ -404,7 +409,8 @@ TRACE_EVENT(block_bio_queue, __entry->dev = bio->bi_bdev->bd_dev; __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, + bio->bi_iter.bi_size); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), @@ -432,7 +438,7 @@ DECLARE_EVENT_CLASS(block_get_rq, __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; __entry->sector = bio ? bio->bi_iter.bi_sector : 0; __entry->nr_sector = bio ? bio_sectors(bio) : 0; - blk_fill_rwbs(__entry->rwbs, + blk_fill_rwbs(__entry->rwbs, bio ? bio_op(bio) : 0, bio ? bio->bi_rw : 0, __entry->nr_sector); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), @@ -567,7 +573,8 @@ TRACE_EVENT(block_split, __entry->dev = bio->bi_bdev->bd_dev; __entry->sector = bio->bi_iter.bi_sector; __entry->new_sector = new_sector; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, + bio->bi_iter.bi_size); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), @@ -610,7 +617,8 @@ TRACE_EVENT(block_bio_remap, __entry->nr_sector = bio_sectors(bio); __entry->old_dev = dev; __entry->old_sector = from; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, + bio->bi_iter.bi_size); ), TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", @@ -656,7 +664,8 @@ TRACE_EVENT(block_rq_remap, __entry->old_dev = dev; __entry->old_sector = from; __entry->nr_bios = blk_rq_count_bios(rq); - blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); + blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, + blk_rq_bytes(rq)); ), TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u", diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 3a09bb4dc3b2..ff95fd02116f 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -31,10 +31,9 @@ TRACE_DEFINE_ENUM(BG_GC); TRACE_DEFINE_ENUM(LFS); TRACE_DEFINE_ENUM(SSR); TRACE_DEFINE_ENUM(__REQ_RAHEAD); -TRACE_DEFINE_ENUM(__REQ_WRITE); TRACE_DEFINE_ENUM(__REQ_SYNC); TRACE_DEFINE_ENUM(__REQ_NOIDLE); -TRACE_DEFINE_ENUM(__REQ_FLUSH); +TRACE_DEFINE_ENUM(__REQ_PREFLUSH); TRACE_DEFINE_ENUM(__REQ_FUA); TRACE_DEFINE_ENUM(__REQ_PRIO); TRACE_DEFINE_ENUM(__REQ_META); @@ -56,17 +55,21 @@ TRACE_DEFINE_ENUM(CP_DISCARD); { IPU, "IN-PLACE" }, \ { OPU, "OUT-OF-PLACE" }) -#define F2FS_BIO_MASK(t) (t & (READA | WRITE_FLUSH_FUA)) +#define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | WRITE_FLUSH_FUA)) #define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO)) -#define show_bio_type(type) show_bio_base(type), show_bio_extra(type) +#define show_bio_type(op, op_flags) show_bio_op(op), \ + show_bio_op_flags(op_flags), show_bio_extra(op_flags) -#define show_bio_base(type) \ - __print_symbolic(F2FS_BIO_MASK(type), \ +#define show_bio_op(op) \ + __print_symbolic(op, \ { READ, "READ" }, \ - { READA, "READAHEAD" }, \ + { WRITE, "WRITE" }) + +#define show_bio_op_flags(flags) \ + __print_symbolic(F2FS_BIO_FLAG_MASK(flags), \ + { REQ_RAHEAD, "READAHEAD" }, \ { READ_SYNC, "READ_SYNC" }, \ - { WRITE, "WRITE" }, \ { WRITE_SYNC, "WRITE_SYNC" }, \ { WRITE_FLUSH, "WRITE_FLUSH" }, \ { WRITE_FUA, "WRITE_FUA" }, \ @@ -734,7 +737,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, __field(pgoff_t, index) __field(block_t, old_blkaddr) __field(block_t, new_blkaddr) - __field(int, rw) + __field(int, op) + __field(int, op_flags) __field(int, type) ), @@ -744,17 +748,18 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, __entry->index = page->index; __entry->old_blkaddr = fio->old_blkaddr; __entry->new_blkaddr = fio->new_blkaddr; - __entry->rw = fio->rw; + __entry->op = fio->op; + __entry->op_flags = fio->op_flags; __entry->type = fio->type; ), TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, " - "oldaddr = 0x%llx, newaddr = 0x%llx rw = %s%s, type = %s", + "oldaddr = 0x%llx, newaddr = 0x%llx rw = %s%si%s, type = %s", show_dev_ino(__entry), (unsigned long)__entry->index, (unsigned long long)__entry->old_blkaddr, (unsigned long long)__entry->new_blkaddr, - show_bio_type(__entry->rw), + show_bio_type(__entry->op, __entry->op_flags), show_block_type(__entry->type)) ); @@ -785,7 +790,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio, TP_STRUCT__entry( __field(dev_t, dev) - __field(int, rw) + __field(int, op) + __field(int, op_flags) __field(int, type) __field(sector_t, sector) __field(unsigned int, size) @@ -793,15 +799,16 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio, TP_fast_assign( __entry->dev = sb->s_dev; - __entry->rw = fio->rw; + __entry->op = fio->op; + __entry->op_flags = fio->op_flags; __entry->type = fio->type; __entry->sector = bio->bi_iter.bi_sector; __entry->size = bio->bi_iter.bi_size; ), - TP_printk("dev = (%d,%d), %s%s, %s, sector = %lld, size = %u", + TP_printk("dev = (%d,%d), %s%s%s, %s, sector = %lld, size = %u", show_dev(__entry), - show_bio_type(__entry->rw), + show_bio_type(__entry->op, __entry->op_flags), show_block_type(__entry->type), (unsigned long long)__entry->sector, __entry->size) diff --git a/include/trace/events/libata.h b/include/trace/events/libata.h index 75fff8696bae..2fbbf990e4b3 100644 --- a/include/trace/events/libata.h +++ b/include/trace/events/libata.h @@ -126,6 +126,7 @@ ata_protocol_name(ATA_PROT_PIO), \ ata_protocol_name(ATA_PROT_DMA), \ ata_protocol_name(ATA_PROT_NCQ), \ + ata_protocol_name(ATA_PROT_NCQ_NODATA), \ ata_protocol_name(ATAPI_PROT_NODATA), \ ata_protocol_name(ATAPI_PROT_PIO), \ ata_protocol_name(ATAPI_PROT_DMA)) diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h index 2e67bb64c1da..79b5ded2001a 100644 --- a/include/uapi/linux/cryptouser.h +++ b/include/uapi/linux/cryptouser.h @@ -45,6 +45,7 @@ enum crypto_attr_type_t { CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */ CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */ CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */ + CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */ __CRYPTOCFGA_MAX #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) @@ -107,5 +108,9 @@ struct crypto_report_akcipher { char type[CRYPTO_MAX_NAME]; }; +struct crypto_report_kpp { + char type[CRYPTO_MAX_NAME]; +}; + #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \ sizeof(struct crypto_report_blkcipher)) diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index 30afd0a23c4b..4bf9f1eabffc 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -267,9 +267,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 34 +#define DM_VERSION_MINOR 35 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2015-10-28)" +#define DM_VERSION_EXTRA "-ioctl (2016-06-23)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h index b0916fc72cce..22e5e589a274 100644 --- a/include/uapi/linux/iio/types.h +++ b/include/uapi/linux/iio/types.h @@ -39,6 +39,7 @@ enum iio_chan_type { IIO_RESISTANCE, IIO_PH, IIO_UVINDEX, + IIO_ELECTRICALCONDUCTIVITY, }; enum iio_modifier { diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 36ce552cf6a9..c66a485a24ac 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -276,6 +276,9 @@ enum perf_event_read_format { /* * Hardware event_id to monitor via a performance monitoring event: + * + * @sample_max_stack: Max number of frame pointers in a callchain, + * should be < /proc/sys/kernel/perf_event_max_stack */ struct perf_event_attr { @@ -385,7 +388,8 @@ struct perf_event_attr { * Wakeup watermark for AUX area */ __u32 aux_watermark; - __u32 __reserved_2; /* align to __u64 */ + __u16 sample_max_stack; + __u16 __reserved_2; /* align to __u64 */ }; #define perf_flags(attr) (*(&(attr)->read_format + 1)) |