diff options
author | Ingo Molnar <mingo@kernel.org> | 2018-11-03 23:42:16 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-11-03 23:42:16 +0100 |
commit | 23a12ddee1ce28065b71f14ccc695b5a0c8a64ff (patch) | |
tree | cedaa1cde5b2557116e523c31552187804704093 /include/linux | |
parent | 98f76206b33504b934209d16196477dfa519a807 (diff) | |
parent | bcb6fb5da77c2a228adf07cc9cb1a0c2aa2001c6 (diff) |
Merge branch 'core/urgent' into x86/urgent, to pick up objtool fix
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux')
67 files changed, 3362 insertions, 1174 deletions
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index acf5e8df3504..f58e97446abc 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -28,8 +28,8 @@ * The available bitmap operations and their rough meaning in the * case that the bitmap is a single unsigned long are thus: * - * Note that nbits should be always a compile time evaluable constant. - * Otherwise many inlines will generate horrible code. + * The generated code is more efficient when nbits is known at + * compile-time and at most BITS_PER_LONG. * * :: * @@ -204,38 +204,31 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf, #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) +/* + * The static inlines below do not handle constant nbits==0 correctly, + * so make such users (should any ever turn up) call the out-of-line + * versions. + */ #define small_const_nbits(nbits) \ - (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0) static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) { - if (small_const_nbits(nbits)) - *dst = 0UL; - else { - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); - memset(dst, 0, len); - } + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); } static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) { - if (small_const_nbits(nbits)) - *dst = ~0UL; - else { - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); - memset(dst, 0xff, len); - } + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0xff, len); } static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits) { - if (small_const_nbits(nbits)) - *dst = *src; - else { - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); - memcpy(dst, src, len); - } + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memcpy(dst, src, len); } /* @@ -398,7 +391,7 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start, } static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src, - unsigned int shift, int nbits) + unsigned int shift, unsigned int nbits) { if (small_const_nbits(nbits)) *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift; diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 7ddb1349394d..705f7c442691 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -236,33 +236,33 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, #ifdef __KERNEL__ #ifndef set_mask_bits -#define set_mask_bits(ptr, _mask, _bits) \ +#define set_mask_bits(ptr, mask, bits) \ ({ \ - const typeof(*ptr) mask = (_mask), bits = (_bits); \ - typeof(*ptr) old, new; \ + const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \ + typeof(*(ptr)) old__, new__; \ \ do { \ - old = READ_ONCE(*ptr); \ - new = (old & ~mask) | bits; \ - } while (cmpxchg(ptr, old, new) != old); \ + old__ = READ_ONCE(*(ptr)); \ + new__ = (old__ & ~mask__) | bits__; \ + } while (cmpxchg(ptr, old__, new__) != old__); \ \ - new; \ + new__; \ }) #endif #ifndef bit_clear_unless -#define bit_clear_unless(ptr, _clear, _test) \ +#define bit_clear_unless(ptr, clear, test) \ ({ \ - const typeof(*ptr) clear = (_clear), test = (_test); \ - typeof(*ptr) old, new; \ + const typeof(*(ptr)) clear__ = (clear), test__ = (test);\ + typeof(*(ptr)) old__, new__; \ \ do { \ - old = READ_ONCE(*ptr); \ - new = old & ~clear; \ - } while (!(old & test) && \ - cmpxchg(ptr, old, new) != old); \ + old__ = READ_ONCE(*(ptr)); \ + new__ = old__ & ~clear__; \ + } while (!(old__ & test__) && \ + cmpxchg(ptr, old__, new__) != old__); \ \ - !(old & test); \ + !(old__ & test__); \ }) #endif diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h deleted file mode 100644 index 42515195d7d8..000000000000 --- a/include/linux/bootmem.h +++ /dev/null @@ -1,404 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 - */ -#ifndef _LINUX_BOOTMEM_H -#define _LINUX_BOOTMEM_H - -#include <linux/mmzone.h> -#include <linux/mm_types.h> -#include <asm/dma.h> -#include <asm/processor.h> - -/* - * simple boot-time physical memory area allocator. - */ - -extern unsigned long max_low_pfn; -extern unsigned long min_low_pfn; - -/* - * highest page - */ -extern unsigned long max_pfn; -/* - * highest possible page - */ -extern unsigned long long max_possible_pfn; - -#ifndef CONFIG_NO_BOOTMEM -/** - * struct bootmem_data - per-node information used by the bootmem allocator - * @node_min_pfn: the starting physical address of the node's memory - * @node_low_pfn: the end physical address of the directly addressable memory - * @node_bootmem_map: is a bitmap pointer - the bits represent all physical - * memory pages (including holes) on the node. - * @last_end_off: the offset within the page of the end of the last allocation; - * if 0, the page used is full - * @hint_idx: the PFN of the page used with the last allocation; - * together with using this with the @last_end_offset field, - * a test can be made to see if allocations can be merged - * with the page used for the last allocation rather than - * using up a full new page. - * @list: list entry in the linked list ordered by the memory addresses - */ -typedef struct bootmem_data { - unsigned long node_min_pfn; - unsigned long node_low_pfn; - void *node_bootmem_map; - unsigned long last_end_off; - unsigned long hint_idx; - struct list_head list; -} bootmem_data_t; - -extern bootmem_data_t bootmem_node_data[]; -#endif - -extern unsigned long bootmem_bootmap_pages(unsigned long); - -extern unsigned long init_bootmem_node(pg_data_t *pgdat, - unsigned long freepfn, - unsigned long startpfn, - unsigned long endpfn); -extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); - -extern unsigned long free_all_bootmem(void); -extern void reset_node_managed_pages(pg_data_t *pgdat); -extern void reset_all_zones_managed_pages(void); - -extern void free_bootmem_node(pg_data_t *pgdat, - unsigned long addr, - unsigned long size); -extern void free_bootmem(unsigned long physaddr, unsigned long size); -extern void free_bootmem_late(unsigned long physaddr, unsigned long size); - -/* - * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, - * the architecture-specific code should honor this). - * - * If flags is BOOTMEM_DEFAULT, then the return value is always 0 (success). - * If flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the memory - * already was reserved. - */ -#define BOOTMEM_DEFAULT 0 -#define BOOTMEM_EXCLUSIVE (1<<0) - -extern int reserve_bootmem(unsigned long addr, - unsigned long size, - int flags); -extern int reserve_bootmem_node(pg_data_t *pgdat, - unsigned long physaddr, - unsigned long size, - int flags); - -extern void *__alloc_bootmem(unsigned long size, - unsigned long align, - unsigned long goal); -extern void *__alloc_bootmem_nopanic(unsigned long size, - unsigned long align, - unsigned long goal) __malloc; -extern void *__alloc_bootmem_node(pg_data_t *pgdat, - unsigned long size, - unsigned long align, - unsigned long goal) __malloc; -void *__alloc_bootmem_node_high(pg_data_t *pgdat, - unsigned long size, - unsigned long align, - unsigned long goal) __malloc; -extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, - unsigned long size, - unsigned long align, - unsigned long goal) __malloc; -void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat, - unsigned long size, - unsigned long align, - unsigned long goal, - unsigned long limit) __malloc; -extern void *__alloc_bootmem_low(unsigned long size, - unsigned long align, - unsigned long goal) __malloc; -void *__alloc_bootmem_low_nopanic(unsigned long size, - unsigned long align, - unsigned long goal) __malloc; -extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, - unsigned long size, - unsigned long align, - unsigned long goal) __malloc; - -#ifdef CONFIG_NO_BOOTMEM -/* We are using top down, so it is safe to use 0 here */ -#define BOOTMEM_LOW_LIMIT 0 -#else -#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS) -#endif - -#ifndef ARCH_LOW_ADDRESS_LIMIT -#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL -#endif - -#define alloc_bootmem(x) \ - __alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) -#define alloc_bootmem_align(x, align) \ - __alloc_bootmem(x, align, BOOTMEM_LOW_LIMIT) -#define alloc_bootmem_nopanic(x) \ - __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) -#define alloc_bootmem_pages(x) \ - __alloc_bootmem(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) -#define alloc_bootmem_pages_nopanic(x) \ - __alloc_bootmem_nopanic(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) -#define alloc_bootmem_node(pgdat, x) \ - __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) -#define alloc_bootmem_node_nopanic(pgdat, x) \ - __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) -#define alloc_bootmem_pages_node(pgdat, x) \ - __alloc_bootmem_node(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) -#define alloc_bootmem_pages_node_nopanic(pgdat, x) \ - __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) - -#define alloc_bootmem_low(x) \ - __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) -#define alloc_bootmem_low_pages_nopanic(x) \ - __alloc_bootmem_low_nopanic(x, PAGE_SIZE, 0) -#define alloc_bootmem_low_pages(x) \ - __alloc_bootmem_low(x, PAGE_SIZE, 0) -#define alloc_bootmem_low_pages_node(pgdat, x) \ - __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) - - -#if defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) - -/* FIXME: use MEMBLOCK_ALLOC_* variants here */ -#define BOOTMEM_ALLOC_ACCESSIBLE 0 -#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0) - -/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */ -void *memblock_virt_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, - phys_addr_t min_addr, - phys_addr_t max_addr, int nid); -void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size, - phys_addr_t align, phys_addr_t min_addr, - phys_addr_t max_addr, int nid); -void *memblock_virt_alloc_try_nid(phys_addr_t size, phys_addr_t align, - phys_addr_t min_addr, phys_addr_t max_addr, int nid); -void __memblock_free_early(phys_addr_t base, phys_addr_t size); -void __memblock_free_late(phys_addr_t base, phys_addr_t size); - -static inline void * __init memblock_virt_alloc( - phys_addr_t size, phys_addr_t align) -{ - return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT, - BOOTMEM_ALLOC_ACCESSIBLE, - NUMA_NO_NODE); -} - -static inline void * __init memblock_virt_alloc_raw( - phys_addr_t size, phys_addr_t align) -{ - return memblock_virt_alloc_try_nid_raw(size, align, BOOTMEM_LOW_LIMIT, - BOOTMEM_ALLOC_ACCESSIBLE, - NUMA_NO_NODE); -} - -static inline void * __init memblock_virt_alloc_nopanic( - phys_addr_t size, phys_addr_t align) -{ - return memblock_virt_alloc_try_nid_nopanic(size, align, - BOOTMEM_LOW_LIMIT, - BOOTMEM_ALLOC_ACCESSIBLE, - NUMA_NO_NODE); -} - -static inline void * __init memblock_virt_alloc_low( - phys_addr_t size, phys_addr_t align) -{ - return memblock_virt_alloc_try_nid(size, align, - BOOTMEM_LOW_LIMIT, - ARCH_LOW_ADDRESS_LIMIT, - NUMA_NO_NODE); -} -static inline void * __init memblock_virt_alloc_low_nopanic( - phys_addr_t size, phys_addr_t align) -{ - return memblock_virt_alloc_try_nid_nopanic(size, align, - BOOTMEM_LOW_LIMIT, - ARCH_LOW_ADDRESS_LIMIT, - NUMA_NO_NODE); -} - -static inline void * __init memblock_virt_alloc_from_nopanic( - phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) -{ - return memblock_virt_alloc_try_nid_nopanic(size, align, min_addr, - BOOTMEM_ALLOC_ACCESSIBLE, - NUMA_NO_NODE); -} - -static inline void * __init memblock_virt_alloc_node( - phys_addr_t size, int nid) -{ - return memblock_virt_alloc_try_nid(size, 0, BOOTMEM_LOW_LIMIT, - BOOTMEM_ALLOC_ACCESSIBLE, nid); -} - -static inline void * __init memblock_virt_alloc_node_nopanic( - phys_addr_t size, int nid) -{ - return memblock_virt_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT, - BOOTMEM_ALLOC_ACCESSIBLE, - nid); -} - -static inline void __init memblock_free_early( - phys_addr_t base, phys_addr_t size) -{ - __memblock_free_early(base, size); -} - -static inline void __init memblock_free_early_nid( - phys_addr_t base, phys_addr_t size, int nid) -{ - __memblock_free_early(base, size); -} - -static inline void __init memblock_free_late( - phys_addr_t base, phys_addr_t size) -{ - __memblock_free_late(base, size); -} - -#else - -#define BOOTMEM_ALLOC_ACCESSIBLE 0 - - -/* Fall back to all the existing bootmem APIs */ -static inline void * __init memblock_virt_alloc( - phys_addr_t size, phys_addr_t align) -{ - if (!align) - align = SMP_CACHE_BYTES; - return __alloc_bootmem(size, align, BOOTMEM_LOW_LIMIT); -} - -static inline void * __init memblock_virt_alloc_raw( - phys_addr_t size, phys_addr_t align) -{ - if (!align) - align = SMP_CACHE_BYTES; - return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT); -} - -static inline void * __init memblock_virt_alloc_nopanic( - phys_addr_t size, phys_addr_t align) -{ - if (!align) - align = SMP_CACHE_BYTES; - return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT); -} - -static inline void * __init memblock_virt_alloc_low( - phys_addr_t size, phys_addr_t align) -{ - if (!align) - align = SMP_CACHE_BYTES; - return __alloc_bootmem_low(size, align, 0); -} - -static inline void * __init memblock_virt_alloc_low_nopanic( - phys_addr_t size, phys_addr_t align) -{ - if (!align) - align = SMP_CACHE_BYTES; - return __alloc_bootmem_low_nopanic(size, align, 0); -} - -static inline void * __init memblock_virt_alloc_from_nopanic( - phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) -{ - return __alloc_bootmem_nopanic(size, align, min_addr); -} - -static inline void * __init memblock_virt_alloc_node( - phys_addr_t size, int nid) -{ - return __alloc_bootmem_node(NODE_DATA(nid), size, SMP_CACHE_BYTES, - BOOTMEM_LOW_LIMIT); -} - -static inline void * __init memblock_virt_alloc_node_nopanic( - phys_addr_t size, int nid) -{ - return __alloc_bootmem_node_nopanic(NODE_DATA(nid), size, - SMP_CACHE_BYTES, - BOOTMEM_LOW_LIMIT); -} - -static inline void * __init memblock_virt_alloc_try_nid(phys_addr_t size, - phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) -{ - return __alloc_bootmem_node_high(NODE_DATA(nid), size, align, - min_addr); -} - -static inline void * __init memblock_virt_alloc_try_nid_raw( - phys_addr_t size, phys_addr_t align, - phys_addr_t min_addr, phys_addr_t max_addr, int nid) -{ - return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align, - min_addr, max_addr); -} - -static inline void * __init memblock_virt_alloc_try_nid_nopanic( - phys_addr_t size, phys_addr_t align, - phys_addr_t min_addr, phys_addr_t max_addr, int nid) -{ - return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align, - min_addr, max_addr); -} - -static inline void __init memblock_free_early( - phys_addr_t base, phys_addr_t size) -{ - free_bootmem(base, size); -} - -static inline void __init memblock_free_early_nid( - phys_addr_t base, phys_addr_t size, int nid) -{ - free_bootmem_node(NODE_DATA(nid), base, size); -} - -static inline void __init memblock_free_late( - phys_addr_t base, phys_addr_t size) -{ - free_bootmem_late(base, size); -} -#endif /* defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) */ - -extern void *alloc_large_system_hash(const char *tablename, - unsigned long bucketsize, - unsigned long numentries, - int scale, - int flags, - unsigned int *_hash_shift, - unsigned int *_hash_mask, - unsigned long low_limit, - unsigned long high_limit); - -#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ -#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min - * shift passed via *_hash_shift */ -#define HASH_ZERO 0x00000004 /* Zero allocated hash table */ - -/* Only NUMA needs hash distribution. 64bit NUMA architectures have - * sufficient vmalloc space. - */ -#ifdef CONFIG_NUMA -#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) -extern int hashdist; /* Distribute hashes across NUMA nodes? */ -#else -#define hashdist (0) -#endif - - -#endif /* _LINUX_BOOTMEM_H */ diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 49c93b9308d7..68bb09c29ce8 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -81,7 +81,13 @@ struct ceph_options { #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) #define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) -#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024) + +/* + * Handle the largest possible rbd object in one message. + * There is no limit on the size of cephfs objects, but it has to obey + * rsize and wsize mount options anyway. + */ +#define CEPH_MSG_MAX_DATA_LEN (32*1024*1024) #define CEPH_AUTH_NAME_DEFAULT "guest" diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index fc2b4491ee0a..800a2128d411 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -82,22 +82,6 @@ enum ceph_msg_data_type { CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */ }; -static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) -{ - switch (type) { - case CEPH_MSG_DATA_NONE: - case CEPH_MSG_DATA_PAGES: - case CEPH_MSG_DATA_PAGELIST: -#ifdef CONFIG_BLOCK - case CEPH_MSG_DATA_BIO: -#endif /* CONFIG_BLOCK */ - case CEPH_MSG_DATA_BVECS: - return true; - default: - return false; - } -} - #ifdef CONFIG_BLOCK struct ceph_bio_iter { @@ -181,7 +165,6 @@ struct ceph_bvec_iter { } while (0) struct ceph_msg_data { - struct list_head links; /* ceph_msg->data */ enum ceph_msg_data_type type; union { #ifdef CONFIG_BLOCK @@ -202,7 +185,6 @@ struct ceph_msg_data { struct ceph_msg_data_cursor { size_t total_resid; /* across all data items */ - struct list_head *data_head; /* = &ceph_msg->data */ struct ceph_msg_data *data; /* current data item */ size_t resid; /* bytes not yet consumed */ @@ -240,7 +222,9 @@ struct ceph_msg { struct ceph_buffer *middle; size_t data_length; - struct list_head data; + struct ceph_msg_data *data; + int num_data_items; + int max_data_items; struct ceph_msg_data_cursor cursor; struct ceph_connection *con; @@ -381,6 +365,8 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos, void ceph_msg_data_add_bvecs(struct ceph_msg *msg, struct ceph_bvec_iter *bvec_pos); +struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items, + gfp_t flags, bool can_fail); extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, bool can_fail); diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h index 76c98a512758..729cdf700eae 100644 --- a/include/linux/ceph/msgpool.h +++ b/include/linux/ceph/msgpool.h @@ -13,14 +13,15 @@ struct ceph_msgpool { mempool_t *pool; int type; /* preallocated message type */ int front_len; /* preallocated payload size */ + int max_data_items; }; -extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type, - int front_len, int size, bool blocking, - const char *name); +int ceph_msgpool_init(struct ceph_msgpool *pool, int type, + int front_len, int max_data_items, int size, + const char *name); extern void ceph_msgpool_destroy(struct ceph_msgpool *pool); -extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *, - int front_len); +struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len, + int max_data_items); extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *); #endif diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 02096da01845..7a2af5034278 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -136,6 +136,13 @@ struct ceph_osd_req_op { u64 expected_object_size; u64 expected_write_size; } alloc_hint; + struct { + u64 snapid; + u64 src_version; + u8 flags; + u32 src_fadvise_flags; + struct ceph_osd_data osd_data; + } copy_from; }; }; @@ -444,9 +451,8 @@ extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages); -extern int osd_req_op_cls_init(struct ceph_osd_request *osd_req, - unsigned int which, u16 opcode, - const char *class, const char *method); +int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, + const char *class, const char *method); extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *name, const void *value, size_t size, u8 cmp_op, u8 cmp_mode); @@ -511,6 +517,16 @@ extern int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct timespec64 *mtime, struct page **pages, int nr_pages); +int ceph_osdc_copy_from(struct ceph_osd_client *osdc, + u64 src_snapid, u64 src_version, + struct ceph_object_id *src_oid, + struct ceph_object_locator *src_oloc, + u32 src_fadvise_flags, + struct ceph_object_id *dst_oid, + struct ceph_object_locator *dst_oloc, + u32 dst_fadvise_flags, + u8 copy_from_flags); + /* watch/notify */ struct ceph_osd_linger_request * ceph_osdc_watch(struct ceph_osd_client *osdc, diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h index d0223364349f..5dead8486fd8 100644 --- a/include/linux/ceph/pagelist.h +++ b/include/linux/ceph/pagelist.h @@ -23,16 +23,7 @@ struct ceph_pagelist_cursor { size_t room; /* room remaining to reset to */ }; -static inline void ceph_pagelist_init(struct ceph_pagelist *pl) -{ - INIT_LIST_HEAD(&pl->head); - pl->mapped_tail = NULL; - pl->length = 0; - pl->room = 0; - INIT_LIST_HEAD(&pl->free_list); - pl->num_pages_free = 0; - refcount_set(&pl->refcnt, 1); -} +struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags); extern void ceph_pagelist_release(struct ceph_pagelist *pl); diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index f1988387c5ad..3eb0e55665b4 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -410,6 +410,14 @@ enum { enum { CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */ CEPH_OSD_OP_FLAG_FAILOK = 2, /* continue despite failure */ + CEPH_OSD_OP_FLAG_FADVISE_RANDOM = 0x4, /* the op is random */ + CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL = 0x8, /* the op is sequential */ + CEPH_OSD_OP_FLAG_FADVISE_WILLNEED = 0x10,/* data will be accessed in + the near future */ + CEPH_OSD_OP_FLAG_FADVISE_DONTNEED = 0x20,/* data will not be accessed + in the near future */ + CEPH_OSD_OP_FLAG_FADVISE_NOCACHE = 0x40,/* data will be accessed only + once by this client */ }; #define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/ @@ -432,6 +440,15 @@ enum { }; enum { + CEPH_OSD_COPY_FROM_FLAG_FLUSH = 1, /* part of a flush operation */ + CEPH_OSD_COPY_FROM_FLAG_IGNORE_OVERLAY = 2, /* ignore pool overlay */ + CEPH_OSD_COPY_FROM_FLAG_IGNORE_CACHE = 4, /* ignore osd cache logic */ + CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE = 8, /* map snap direct to + * cloneid */ + CEPH_OSD_COPY_FROM_FLAG_RWORDERED = 16, /* order with write */ +}; + +enum { CEPH_OSD_WATCH_OP_UNWATCH = 0, CEPH_OSD_WATCH_OP_LEGACY_WATCH = 1, /* note: use only ODD ids to prevent pre-giant code from @@ -497,6 +514,17 @@ struct ceph_osd_op { __le64 expected_object_size; __le64 expected_write_size; } __attribute__ ((packed)) alloc_hint; + struct { + __le64 snapid; + __le64 src_version; + __u8 flags; /* CEPH_OSD_COPY_FROM_FLAG_* */ + /* + * CEPH_OSD_OP_FLAG_FADVISE_*: fadvise flags + * for src object, flags for dest object are in + * ceph_osd_op::flags. + */ + __le32 src_fadvise_flags; + } __attribute__ ((packed)) copy_from; }; __le32 payload_len; } __attribute__ ((packed)); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 08b1aa70a38d..60c51871b04b 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -119,6 +119,11 @@ struct clk_duty { * Called with enable_lock held. This function must not * sleep. * + * @save_context: Save the context of the clock in prepration for poweroff. + * + * @restore_context: Restore the context of the clock after a restoration + * of power. + * * @recalc_rate Recalculate the rate of this clock, by querying hardware. The * parent rate is an input parameter. It is up to the caller to * ensure that the prepare_mutex is held across this call. @@ -223,6 +228,8 @@ struct clk_ops { void (*disable)(struct clk_hw *hw); int (*is_enabled)(struct clk_hw *hw); void (*disable_unused)(struct clk_hw *hw); + int (*save_context)(struct clk_hw *hw); + void (*restore_context)(struct clk_hw *hw); unsigned long (*recalc_rate)(struct clk_hw *hw, unsigned long parent_rate); long (*round_rate)(struct clk_hw *hw, unsigned long rate, @@ -1011,5 +1018,7 @@ static inline void clk_writel(u32 val, u32 __iomem *reg) #endif /* platform dependent I/O accessors */ +void clk_gate_restore_context(struct clk_hw *hw); + #endif /* CONFIG_COMMON_CLK */ #endif /* CLK_PROVIDER_H */ diff --git a/include/linux/clk.h b/include/linux/clk.h index 4f750c481b82..a7773b5c0b9f 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -312,7 +312,26 @@ struct clk *clk_get(struct device *dev, const char *id); */ int __must_check clk_bulk_get(struct device *dev, int num_clks, struct clk_bulk_data *clks); - +/** + * clk_bulk_get_all - lookup and obtain all available references to clock + * producer. + * @dev: device for clock "consumer" + * @clks: pointer to the clk_bulk_data table of consumer + * + * This helper function allows drivers to get all clk consumers in one + * operation. If any of the clk cannot be acquired then any clks + * that were obtained will be freed before returning to the caller. + * + * Returns a positive value for the number of clocks obtained while the + * clock references are stored in the clk_bulk_data table in @clks field. + * Returns 0 if there're none and a negative value if something failed. + * + * Drivers must assume that the clock source is not enabled. + * + * clk_bulk_get should not be called from within interrupt context. + */ +int __must_check clk_bulk_get_all(struct device *dev, + struct clk_bulk_data **clks); /** * devm_clk_bulk_get - managed get multiple clk consumers * @dev: device for clock "consumer" @@ -327,6 +346,22 @@ int __must_check clk_bulk_get(struct device *dev, int num_clks, */ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, struct clk_bulk_data *clks); +/** + * devm_clk_bulk_get_all - managed get multiple clk consumers + * @dev: device for clock "consumer" + * @clks: pointer to the clk_bulk_data table of consumer + * + * Returns a positive value for the number of clocks obtained while the + * clock references are stored in the clk_bulk_data table in @clks field. + * Returns 0 if there're none and a negative value if something failed. + * + * This helper function allows drivers to get several clk + * consumers in one operation with management, the clks will + * automatically be freed when the device is unbound. + */ + +int __must_check devm_clk_bulk_get_all(struct device *dev, + struct clk_bulk_data **clks); /** * devm_clk_get - lookup and obtain a managed reference to a clock producer. @@ -488,6 +523,19 @@ void clk_put(struct clk *clk); void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); /** + * clk_bulk_put_all - "free" all the clock source + * @num_clks: the number of clk_bulk_data + * @clks: the clk_bulk_data table of consumer + * + * Note: drivers must ensure that all clk_bulk_enable calls made on this + * clock source are balanced by clk_bulk_disable calls prior to calling + * this function. + * + * clk_bulk_put_all should not be called from within interrupt context. + */ +void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks); + +/** * devm_clk_put - "free" a managed clock source * @dev: device used to acquire the clock * @clk: clock source acquired with devm_clk_get() @@ -629,6 +677,23 @@ struct clk *clk_get_parent(struct clk *clk); */ struct clk *clk_get_sys(const char *dev_id, const char *con_id); +/** + * clk_save_context - save clock context for poweroff + * + * Saves the context of the clock register for powerstates in which the + * contents of the registers will be lost. Occurs deep within the suspend + * code so locking is not necessary. + */ +int clk_save_context(void); + +/** + * clk_restore_context - restore clock context after poweroff + * + * This occurs with all clocks enabled. Occurs deep within the resume code + * so locking is not necessary. + */ +void clk_restore_context(void); + #else /* !CONFIG_HAVE_CLK */ static inline struct clk *clk_get(struct device *dev, const char *id) @@ -642,6 +707,12 @@ static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, return 0; } +static inline int __must_check clk_bulk_get_all(struct device *dev, + struct clk_bulk_data **clks) +{ + return 0; +} + static inline struct clk *devm_clk_get(struct device *dev, const char *id) { return NULL; @@ -653,6 +724,13 @@ static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clk return 0; } +static inline int __must_check devm_clk_bulk_get_all(struct device *dev, + struct clk_bulk_data **clks) +{ + + return 0; +} + static inline struct clk *devm_get_clk_from_child(struct device *dev, struct device_node *np, const char *con_id) { @@ -663,6 +741,8 @@ static inline void clk_put(struct clk *clk) {} static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} +static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} + static inline void devm_clk_put(struct device *dev, struct clk *clk) {} @@ -728,6 +808,14 @@ static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) { return NULL; } + +static inline int clk_save_context(void) +{ + return 0; +} + +static inline void clk_restore_context(void) {} + #endif /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h index 9ebf1f8243bb..0ebbe2f0b45e 100644 --- a/include/linux/clk/renesas.h +++ b/include/linux/clk/renesas.h @@ -1,14 +1,10 @@ -/* +/* SPDX-License-Identifier: GPL-2.0+ + * * Copyright 2013 Ideas On Board SPRL * Copyright 2013, 2014 Horms Solutions Ltd. * * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Contact: Simon Horman <horms@verge.net.au> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __LINUX_CLK_RENESAS_H_ diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index a8faa38b1ed6..eacc5df57b99 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -159,6 +159,7 @@ struct clk_hw_omap { const char *clkdm_name; struct clockdomain *clkdm; const struct clk_hw_omap_ops *ops; + u32 context; }; /* @@ -290,9 +291,15 @@ struct ti_clk_features { #define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1) #define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2) #define TI_CLK_ERRATA_I810 BIT(3) +#define TI_CLK_CLKCTRL_COMPAT BIT(4) void ti_clk_setup_features(struct ti_clk_features *features); const struct ti_clk_features *ti_clk_get_features(void); +int omap3_noncore_dpll_save_context(struct clk_hw *hw); +void omap3_noncore_dpll_restore_context(struct clk_hw *hw); + +int omap3_core_dpll_save_context(struct clk_hw *hw); +void omap3_core_dpll_restore_context(struct clk_hw *hw); extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll; diff --git a/include/linux/compat.h b/include/linux/compat.h index f1ef24c68fcc..88720b443cd6 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -488,8 +488,11 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, compat_sigset_t v; switch (_NSIG_WORDS) { case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3]; + /* fall through */ case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2]; + /* fall through */ case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1]; + /* fall through */ case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0]; } return copy_to_user(compat, &v, size) ? -EFAULT : 0; diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 1921545c6351..4170fcee5adb 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -344,29 +344,14 @@ static inline void *offset_to_ptr(const int *off) #endif #ifndef __compiletime_error # define __compiletime_error(message) -/* - * Sparse complains of variable sized arrays due to the temporary variable in - * __compiletime_assert. Unfortunately we can't just expand it out to make - * sparse see a constant array size without breaking compiletime_assert on old - * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. - */ -# ifndef __CHECKER__ -# define __compiletime_error_fallback(condition) \ - do { ((void)sizeof(char[1 - 2 * condition])); } while (0) -# endif -#endif -#ifndef __compiletime_error_fallback -# define __compiletime_error_fallback(condition) do { } while (0) #endif #ifdef __OPTIMIZE__ # define __compiletime_assert(condition, msg, prefix, suffix) \ do { \ - int __cond = !(condition); \ extern void prefix ## suffix(void) __compiletime_error(msg); \ - if (__cond) \ + if (!(condition)) \ prefix ## suffix(); \ - __compiletime_error_fallback(__cond); \ } while (0) #else # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index fea64f2692a0..ab137f97ecbd 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -141,7 +141,6 @@ struct vc_data { struct uni_pagedir *vc_uni_pagedir; struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ struct uni_screen *vc_uni_screen; /* unicode screen content */ - bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */ /* additional information is in vt_kern.h */ }; diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index 096c96f4f16a..a5a60691e48b 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -4,6 +4,61 @@ #include <uapi/linux/fanotify.h> -/* not valid from userspace, only kernel internal */ -#define FAN_MARK_ONDIR 0x00000100 +#define FAN_GROUP_FLAG(group, flag) \ + ((group)->fanotify_data.flags & (flag)) + +/* + * Flags allowed to be passed from/to userspace. + * + * We intentionally do not add new bits to the old FAN_ALL_* constants, because + * they are uapi exposed constants. If there are programs out there using + * these constant, the programs may break if re-compiled with new uapi headers + * and then run on an old kernel. + */ +#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \ + FAN_CLASS_PRE_CONTENT) + +#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | \ + FAN_REPORT_TID | \ + FAN_CLOEXEC | FAN_NONBLOCK | \ + FAN_UNLIMITED_QUEUE | FAN_UNLIMITED_MARKS) + +#define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \ + FAN_MARK_FILESYSTEM) + +#define FANOTIFY_MARK_FLAGS (FANOTIFY_MARK_TYPE_BITS | \ + FAN_MARK_ADD | \ + FAN_MARK_REMOVE | \ + FAN_MARK_DONT_FOLLOW | \ + FAN_MARK_ONLYDIR | \ + FAN_MARK_IGNORED_MASK | \ + FAN_MARK_IGNORED_SURV_MODIFY | \ + FAN_MARK_FLUSH) + +/* Events that user can request to be notified on */ +#define FANOTIFY_EVENTS (FAN_ACCESS | FAN_MODIFY | \ + FAN_CLOSE | FAN_OPEN) + +/* Events that require a permission response from user */ +#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM) + +/* Extra flags that may be reported with event or control handling of events */ +#define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR) + +/* Events that may be reported to user */ +#define FANOTIFY_OUTGOING_EVENTS (FANOTIFY_EVENTS | \ + FANOTIFY_PERM_EVENTS | \ + FAN_Q_OVERFLOW) + +#define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \ + FANOTIFY_EVENT_FLAGS) + +/* Do not use these old uapi constants internally */ +#undef FAN_ALL_CLASS_BITS +#undef FAN_ALL_INIT_FLAGS +#undef FAN_ALL_MARK_FLAGS +#undef FAN_ALL_EVENTS +#undef FAN_ALL_PERM_EVENTS +#undef FAN_ALL_OUTGOING_EVENTS + #endif /* _LINUX_FANOTIFY_H */ diff --git a/include/linux/fb.h b/include/linux/fb.h index 3e7e75383d32..a3cab6dc9b44 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -456,10 +456,13 @@ struct fb_tile_ops { * and host endianness. Drivers should not use this flag. */ #define FBINFO_BE_MATH 0x100000 +/* + * Hide smem_start in the FBIOGET_FSCREENINFO IOCTL. This is used by modern DRM + * drivers to stop userspace from trying to share buffers behind the kernel's + * back. Instead dma-buf based buffer sharing should be used. + */ +#define FBINFO_HIDE_SMEM_START 0x200000 -/* report to the VT layer that this fb driver can accept forced console - output like oopses */ -#define FBINFO_CAN_FORCE_OUTPUT 0x200000 struct fb_info { atomic_t count; @@ -632,6 +635,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, extern int register_framebuffer(struct fb_info *fb_info); extern int unregister_framebuffer(struct fb_info *fb_info); extern int unlink_framebuffer(struct fb_info *fb_info); +extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, + const char *name); extern int remove_conflicting_framebuffers(struct apertures_struct *a, const char *name, bool primary); extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); diff --git a/include/linux/filter.h b/include/linux/filter.h index 91b4c934f02e..de629b706d1d 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -854,6 +854,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, extern int bpf_jit_enable; extern int bpf_jit_harden; extern int bpf_jit_kallsyms; +extern int bpf_jit_limit; typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); diff --git a/include/linux/firmware/imx/ipc.h b/include/linux/firmware/imx/ipc.h new file mode 100644 index 000000000000..6312c8cb084a --- /dev/null +++ b/include/linux/firmware/imx/ipc.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2018 NXP + * + * Header file for the IPC implementation. + */ + +#ifndef _SC_IPC_H +#define _SC_IPC_H + +#include <linux/device.h> +#include <linux/types.h> + +#define IMX_SC_RPC_VERSION 1 +#define IMX_SC_RPC_MAX_MSG 8 + +struct imx_sc_ipc; + +enum imx_sc_rpc_svc { + IMX_SC_RPC_SVC_UNKNOWN = 0, + IMX_SC_RPC_SVC_RETURN = 1, + IMX_SC_RPC_SVC_PM = 2, + IMX_SC_RPC_SVC_RM = 3, + IMX_SC_RPC_SVC_TIMER = 5, + IMX_SC_RPC_SVC_PAD = 6, + IMX_SC_RPC_SVC_MISC = 7, + IMX_SC_RPC_SVC_IRQ = 8, + IMX_SC_RPC_SVC_ABORT = 9 +}; + +struct imx_sc_rpc_msg { + uint8_t ver; + uint8_t size; + uint8_t svc; + uint8_t func; +}; + +/* + * This is an function to send an RPC message over an IPC channel. + * It is called by client-side SCFW API function shims. + * + * @param[in] ipc IPC handle + * @param[in,out] msg handle to a message + * @param[in] have_resp response flag + * + * If have_resp is true then this function waits for a response + * and returns the result in msg. + */ +int imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg, bool have_resp); + +/* + * This function gets the default ipc handle used by SCU + * + * @param[out] ipc sc ipc handle + * + * @return Returns an error code (0 = success, failed if < 0) + */ +int imx_scu_get_handle(struct imx_sc_ipc **ipc); +#endif /* _SC_IPC_H */ diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h new file mode 100644 index 000000000000..29ada609de03 --- /dev/null +++ b/include/linux/firmware/imx/sci.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017~2018 NXP + * + * Header file containing the public System Controller Interface (SCI) + * definitions. + */ + +#ifndef _SC_SCI_H +#define _SC_SCI_H + +#include <linux/firmware/imx/ipc.h> +#include <linux/firmware/imx/types.h> + +#include <linux/firmware/imx/svc/misc.h> +#endif /* _SC_SCI_H */ diff --git a/include/linux/firmware/imx/svc/misc.h b/include/linux/firmware/imx/svc/misc.h new file mode 100644 index 000000000000..e21c49aba92f --- /dev/null +++ b/include/linux/firmware/imx/svc/misc.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017~2018 NXP + * + * Header file containing the public API for the System Controller (SC) + * Miscellaneous (MISC) function. + * + * MISC_SVC (SVC) Miscellaneous Service + * + * Module for the Miscellaneous (MISC) service. + */ + +#ifndef _SC_MISC_API_H +#define _SC_MISC_API_H + +#include <linux/firmware/imx/sci.h> + +/* + * This type is used to indicate RPC MISC function calls. + */ +enum imx_misc_func { + IMX_SC_MISC_FUNC_UNKNOWN = 0, + IMX_SC_MISC_FUNC_SET_CONTROL = 1, + IMX_SC_MISC_FUNC_GET_CONTROL = 2, + IMX_SC_MISC_FUNC_SET_MAX_DMA_GROUP = 4, + IMX_SC_MISC_FUNC_SET_DMA_GROUP = 5, + IMX_SC_MISC_FUNC_SECO_IMAGE_LOAD = 8, + IMX_SC_MISC_FUNC_SECO_AUTHENTICATE = 9, + IMX_SC_MISC_FUNC_DEBUG_OUT = 10, + IMX_SC_MISC_FUNC_WAVEFORM_CAPTURE = 6, + IMX_SC_MISC_FUNC_BUILD_INFO = 15, + IMX_SC_MISC_FUNC_UNIQUE_ID = 19, + IMX_SC_MISC_FUNC_SET_ARI = 3, + IMX_SC_MISC_FUNC_BOOT_STATUS = 7, + IMX_SC_MISC_FUNC_BOOT_DONE = 14, + IMX_SC_MISC_FUNC_OTP_FUSE_READ = 11, + IMX_SC_MISC_FUNC_OTP_FUSE_WRITE = 17, + IMX_SC_MISC_FUNC_SET_TEMP = 12, + IMX_SC_MISC_FUNC_GET_TEMP = 13, + IMX_SC_MISC_FUNC_GET_BOOT_DEV = 16, + IMX_SC_MISC_FUNC_GET_BUTTON_STATUS = 18, +}; + +/* + * Control Functions + */ + +int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource, + u8 ctrl, u32 val); + +int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource, + u8 ctrl, u32 *val); + +#endif /* _SC_MISC_API_H */ diff --git a/include/linux/firmware/imx/types.h b/include/linux/firmware/imx/types.h new file mode 100644 index 000000000000..9cbf0c4a6069 --- /dev/null +++ b/include/linux/firmware/imx/types.h @@ -0,0 +1,617 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017~2018 NXP + * + * Header file containing types used across multiple service APIs. + */ + +#ifndef _SC_TYPES_H +#define _SC_TYPES_H + +/* + * This type is used to indicate a resource. Resources include peripherals + * and bus masters (but not memory regions). Note items from list should + * never be changed or removed (only added to at the end of the list). + */ +enum imx_sc_rsrc { + IMX_SC_R_A53 = 0, + IMX_SC_R_A53_0 = 1, + IMX_SC_R_A53_1 = 2, + IMX_SC_R_A53_2 = 3, + IMX_SC_R_A53_3 = 4, + IMX_SC_R_A72 = 5, + IMX_SC_R_A72_0 = 6, + IMX_SC_R_A72_1 = 7, + IMX_SC_R_A72_2 = 8, + IMX_SC_R_A72_3 = 9, + IMX_SC_R_CCI = 10, + IMX_SC_R_DB = 11, + IMX_SC_R_DRC_0 = 12, + IMX_SC_R_DRC_1 = 13, + IMX_SC_R_GIC_SMMU = 14, + IMX_SC_R_IRQSTR_M4_0 = 15, + IMX_SC_R_IRQSTR_M4_1 = 16, + IMX_SC_R_SMMU = 17, + IMX_SC_R_GIC = 18, + IMX_SC_R_DC_0_BLIT0 = 19, + IMX_SC_R_DC_0_BLIT1 = 20, + IMX_SC_R_DC_0_BLIT2 = 21, + IMX_SC_R_DC_0_BLIT_OUT = 22, + IMX_SC_R_DC_0_CAPTURE0 = 23, + IMX_SC_R_DC_0_CAPTURE1 = 24, + IMX_SC_R_DC_0_WARP = 25, + IMX_SC_R_DC_0_INTEGRAL0 = 26, + IMX_SC_R_DC_0_INTEGRAL1 = 27, + IMX_SC_R_DC_0_VIDEO0 = 28, + IMX_SC_R_DC_0_VIDEO1 = 29, + IMX_SC_R_DC_0_FRAC0 = 30, + IMX_SC_R_DC_0_FRAC1 = 31, + IMX_SC_R_DC_0 = 32, + IMX_SC_R_GPU_2_PID0 = 33, + IMX_SC_R_DC_0_PLL_0 = 34, + IMX_SC_R_DC_0_PLL_1 = 35, + IMX_SC_R_DC_1_BLIT0 = 36, + IMX_SC_R_DC_1_BLIT1 = 37, + IMX_SC_R_DC_1_BLIT2 = 38, + IMX_SC_R_DC_1_BLIT_OUT = 39, + IMX_SC_R_DC_1_CAPTURE0 = 40, + IMX_SC_R_DC_1_CAPTURE1 = 41, + IMX_SC_R_DC_1_WARP = 42, + IMX_SC_R_DC_1_INTEGRAL0 = 43, + IMX_SC_R_DC_1_INTEGRAL1 = 44, + IMX_SC_R_DC_1_VIDEO0 = 45, + IMX_SC_R_DC_1_VIDEO1 = 46, + IMX_SC_R_DC_1_FRAC0 = 47, + IMX_SC_R_DC_1_FRAC1 = 48, + IMX_SC_R_DC_1 = 49, + IMX_SC_R_GPU_3_PID0 = 50, + IMX_SC_R_DC_1_PLL_0 = 51, + IMX_SC_R_DC_1_PLL_1 = 52, + IMX_SC_R_SPI_0 = 53, + IMX_SC_R_SPI_1 = 54, + IMX_SC_R_SPI_2 = 55, + IMX_SC_R_SPI_3 = 56, + IMX_SC_R_UART_0 = 57, + IMX_SC_R_UART_1 = 58, + IMX_SC_R_UART_2 = 59, + IMX_SC_R_UART_3 = 60, + IMX_SC_R_UART_4 = 61, + IMX_SC_R_EMVSIM_0 = 62, + IMX_SC_R_EMVSIM_1 = 63, + IMX_SC_R_DMA_0_CH0 = 64, + IMX_SC_R_DMA_0_CH1 = 65, + IMX_SC_R_DMA_0_CH2 = 66, + IMX_SC_R_DMA_0_CH3 = 67, + IMX_SC_R_DMA_0_CH4 = 68, + IMX_SC_R_DMA_0_CH5 = 69, + IMX_SC_R_DMA_0_CH6 = 70, + IMX_SC_R_DMA_0_CH7 = 71, + IMX_SC_R_DMA_0_CH8 = 72, + IMX_SC_R_DMA_0_CH9 = 73, + IMX_SC_R_DMA_0_CH10 = 74, + IMX_SC_R_DMA_0_CH11 = 75, + IMX_SC_R_DMA_0_CH12 = 76, + IMX_SC_R_DMA_0_CH13 = 77, + IMX_SC_R_DMA_0_CH14 = 78, + IMX_SC_R_DMA_0_CH15 = 79, + IMX_SC_R_DMA_0_CH16 = 80, + IMX_SC_R_DMA_0_CH17 = 81, + IMX_SC_R_DMA_0_CH18 = 82, + IMX_SC_R_DMA_0_CH19 = 83, + IMX_SC_R_DMA_0_CH20 = 84, + IMX_SC_R_DMA_0_CH21 = 85, + IMX_SC_R_DMA_0_CH22 = 86, + IMX_SC_R_DMA_0_CH23 = 87, + IMX_SC_R_DMA_0_CH24 = 88, + IMX_SC_R_DMA_0_CH25 = 89, + IMX_SC_R_DMA_0_CH26 = 90, + IMX_SC_R_DMA_0_CH27 = 91, + IMX_SC_R_DMA_0_CH28 = 92, + IMX_SC_R_DMA_0_CH29 = 93, + IMX_SC_R_DMA_0_CH30 = 94, + IMX_SC_R_DMA_0_CH31 = 95, + IMX_SC_R_I2C_0 = 96, + IMX_SC_R_I2C_1 = 97, + IMX_SC_R_I2C_2 = 98, + IMX_SC_R_I2C_3 = 99, + IMX_SC_R_I2C_4 = 100, + IMX_SC_R_ADC_0 = 101, + IMX_SC_R_ADC_1 = 102, + IMX_SC_R_FTM_0 = 103, + IMX_SC_R_FTM_1 = 104, + IMX_SC_R_CAN_0 = 105, + IMX_SC_R_CAN_1 = 106, + IMX_SC_R_CAN_2 = 107, + IMX_SC_R_DMA_1_CH0 = 108, + IMX_SC_R_DMA_1_CH1 = 109, + IMX_SC_R_DMA_1_CH2 = 110, + IMX_SC_R_DMA_1_CH3 = 111, + IMX_SC_R_DMA_1_CH4 = 112, + IMX_SC_R_DMA_1_CH5 = 113, + IMX_SC_R_DMA_1_CH6 = 114, + IMX_SC_R_DMA_1_CH7 = 115, + IMX_SC_R_DMA_1_CH8 = 116, + IMX_SC_R_DMA_1_CH9 = 117, + IMX_SC_R_DMA_1_CH10 = 118, + IMX_SC_R_DMA_1_CH11 = 119, + IMX_SC_R_DMA_1_CH12 = 120, + IMX_SC_R_DMA_1_CH13 = 121, + IMX_SC_R_DMA_1_CH14 = 122, + IMX_SC_R_DMA_1_CH15 = 123, + IMX_SC_R_DMA_1_CH16 = 124, + IMX_SC_R_DMA_1_CH17 = 125, + IMX_SC_R_DMA_1_CH18 = 126, + IMX_SC_R_DMA_1_CH19 = 127, + IMX_SC_R_DMA_1_CH20 = 128, + IMX_SC_R_DMA_1_CH21 = 129, + IMX_SC_R_DMA_1_CH22 = 130, + IMX_SC_R_DMA_1_CH23 = 131, + IMX_SC_R_DMA_1_CH24 = 132, + IMX_SC_R_DMA_1_CH25 = 133, + IMX_SC_R_DMA_1_CH26 = 134, + IMX_SC_R_DMA_1_CH27 = 135, + IMX_SC_R_DMA_1_CH28 = 136, + IMX_SC_R_DMA_1_CH29 = 137, + IMX_SC_R_DMA_1_CH30 = 138, + IMX_SC_R_DMA_1_CH31 = 139, + IMX_SC_R_UNUSED1 = 140, + IMX_SC_R_UNUSED2 = 141, + IMX_SC_R_UNUSED3 = 142, + IMX_SC_R_UNUSED4 = 143, + IMX_SC_R_GPU_0_PID0 = 144, + IMX_SC_R_GPU_0_PID1 = 145, + IMX_SC_R_GPU_0_PID2 = 146, + IMX_SC_R_GPU_0_PID3 = 147, + IMX_SC_R_GPU_1_PID0 = 148, + IMX_SC_R_GPU_1_PID1 = 149, + IMX_SC_R_GPU_1_PID2 = 150, + IMX_SC_R_GPU_1_PID3 = 151, + IMX_SC_R_PCIE_A = 152, + IMX_SC_R_SERDES_0 = 153, + IMX_SC_R_MATCH_0 = 154, + IMX_SC_R_MATCH_1 = 155, + IMX_SC_R_MATCH_2 = 156, + IMX_SC_R_MATCH_3 = 157, + IMX_SC_R_MATCH_4 = 158, + IMX_SC_R_MATCH_5 = 159, + IMX_SC_R_MATCH_6 = 160, + IMX_SC_R_MATCH_7 = 161, + IMX_SC_R_MATCH_8 = 162, + IMX_SC_R_MATCH_9 = 163, + IMX_SC_R_MATCH_10 = 164, + IMX_SC_R_MATCH_11 = 165, + IMX_SC_R_MATCH_12 = 166, + IMX_SC_R_MATCH_13 = 167, + IMX_SC_R_MATCH_14 = 168, + IMX_SC_R_PCIE_B = 169, + IMX_SC_R_SATA_0 = 170, + IMX_SC_R_SERDES_1 = 171, + IMX_SC_R_HSIO_GPIO = 172, + IMX_SC_R_MATCH_15 = 173, + IMX_SC_R_MATCH_16 = 174, + IMX_SC_R_MATCH_17 = 175, + IMX_SC_R_MATCH_18 = 176, + IMX_SC_R_MATCH_19 = 177, + IMX_SC_R_MATCH_20 = 178, + IMX_SC_R_MATCH_21 = 179, + IMX_SC_R_MATCH_22 = 180, + IMX_SC_R_MATCH_23 = 181, + IMX_SC_R_MATCH_24 = 182, + IMX_SC_R_MATCH_25 = 183, + IMX_SC_R_MATCH_26 = 184, + IMX_SC_R_MATCH_27 = 185, + IMX_SC_R_MATCH_28 = 186, + IMX_SC_R_LCD_0 = 187, + IMX_SC_R_LCD_0_PWM_0 = 188, + IMX_SC_R_LCD_0_I2C_0 = 189, + IMX_SC_R_LCD_0_I2C_1 = 190, + IMX_SC_R_PWM_0 = 191, + IMX_SC_R_PWM_1 = 192, + IMX_SC_R_PWM_2 = 193, + IMX_SC_R_PWM_3 = 194, + IMX_SC_R_PWM_4 = 195, + IMX_SC_R_PWM_5 = 196, + IMX_SC_R_PWM_6 = 197, + IMX_SC_R_PWM_7 = 198, + IMX_SC_R_GPIO_0 = 199, + IMX_SC_R_GPIO_1 = 200, + IMX_SC_R_GPIO_2 = 201, + IMX_SC_R_GPIO_3 = 202, + IMX_SC_R_GPIO_4 = 203, + IMX_SC_R_GPIO_5 = 204, + IMX_SC_R_GPIO_6 = 205, + IMX_SC_R_GPIO_7 = 206, + IMX_SC_R_GPT_0 = 207, + IMX_SC_R_GPT_1 = 208, + IMX_SC_R_GPT_2 = 209, + IMX_SC_R_GPT_3 = 210, + IMX_SC_R_GPT_4 = 211, + IMX_SC_R_KPP = 212, + IMX_SC_R_MU_0A = 213, + IMX_SC_R_MU_1A = 214, + IMX_SC_R_MU_2A = 215, + IMX_SC_R_MU_3A = 216, + IMX_SC_R_MU_4A = 217, + IMX_SC_R_MU_5A = 218, + IMX_SC_R_MU_6A = 219, + IMX_SC_R_MU_7A = 220, + IMX_SC_R_MU_8A = 221, + IMX_SC_R_MU_9A = 222, + IMX_SC_R_MU_10A = 223, + IMX_SC_R_MU_11A = 224, + IMX_SC_R_MU_12A = 225, + IMX_SC_R_MU_13A = 226, + IMX_SC_R_MU_5B = 227, + IMX_SC_R_MU_6B = 228, + IMX_SC_R_MU_7B = 229, + IMX_SC_R_MU_8B = 230, + IMX_SC_R_MU_9B = 231, + IMX_SC_R_MU_10B = 232, + IMX_SC_R_MU_11B = 233, + IMX_SC_R_MU_12B = 234, + IMX_SC_R_MU_13B = 235, + IMX_SC_R_ROM_0 = 236, + IMX_SC_R_FSPI_0 = 237, + IMX_SC_R_FSPI_1 = 238, + IMX_SC_R_IEE = 239, + IMX_SC_R_IEE_R0 = 240, + IMX_SC_R_IEE_R1 = 241, + IMX_SC_R_IEE_R2 = 242, + IMX_SC_R_IEE_R3 = 243, + IMX_SC_R_IEE_R4 = 244, + IMX_SC_R_IEE_R5 = 245, + IMX_SC_R_IEE_R6 = 246, + IMX_SC_R_IEE_R7 = 247, + IMX_SC_R_SDHC_0 = 248, + IMX_SC_R_SDHC_1 = 249, + IMX_SC_R_SDHC_2 = 250, + IMX_SC_R_ENET_0 = 251, + IMX_SC_R_ENET_1 = 252, + IMX_SC_R_MLB_0 = 253, + IMX_SC_R_DMA_2_CH0 = 254, + IMX_SC_R_DMA_2_CH1 = 255, + IMX_SC_R_DMA_2_CH2 = 256, + IMX_SC_R_DMA_2_CH3 = 257, + IMX_SC_R_DMA_2_CH4 = 258, + IMX_SC_R_USB_0 = 259, + IMX_SC_R_USB_1 = 260, + IMX_SC_R_USB_0_PHY = 261, + IMX_SC_R_USB_2 = 262, + IMX_SC_R_USB_2_PHY = 263, + IMX_SC_R_DTCP = 264, + IMX_SC_R_NAND = 265, + IMX_SC_R_LVDS_0 = 266, + IMX_SC_R_LVDS_0_PWM_0 = 267, + IMX_SC_R_LVDS_0_I2C_0 = 268, + IMX_SC_R_LVDS_0_I2C_1 = 269, + IMX_SC_R_LVDS_1 = 270, + IMX_SC_R_LVDS_1_PWM_0 = 271, + IMX_SC_R_LVDS_1_I2C_0 = 272, + IMX_SC_R_LVDS_1_I2C_1 = 273, + IMX_SC_R_LVDS_2 = 274, + IMX_SC_R_LVDS_2_PWM_0 = 275, + IMX_SC_R_LVDS_2_I2C_0 = 276, + IMX_SC_R_LVDS_2_I2C_1 = 277, + IMX_SC_R_M4_0_PID0 = 278, + IMX_SC_R_M4_0_PID1 = 279, + IMX_SC_R_M4_0_PID2 = 280, + IMX_SC_R_M4_0_PID3 = 281, + IMX_SC_R_M4_0_PID4 = 282, + IMX_SC_R_M4_0_RGPIO = 283, + IMX_SC_R_M4_0_SEMA42 = 284, + IMX_SC_R_M4_0_TPM = 285, + IMX_SC_R_M4_0_PIT = 286, + IMX_SC_R_M4_0_UART = 287, + IMX_SC_R_M4_0_I2C = 288, + IMX_SC_R_M4_0_INTMUX = 289, + IMX_SC_R_M4_0_SIM = 290, + IMX_SC_R_M4_0_WDOG = 291, + IMX_SC_R_M4_0_MU_0B = 292, + IMX_SC_R_M4_0_MU_0A0 = 293, + IMX_SC_R_M4_0_MU_0A1 = 294, + IMX_SC_R_M4_0_MU_0A2 = 295, + IMX_SC_R_M4_0_MU_0A3 = 296, + IMX_SC_R_M4_0_MU_1A = 297, + IMX_SC_R_M4_1_PID0 = 298, + IMX_SC_R_M4_1_PID1 = 299, + IMX_SC_R_M4_1_PID2 = 300, + IMX_SC_R_M4_1_PID3 = 301, + IMX_SC_R_M4_1_PID4 = 302, + IMX_SC_R_M4_1_RGPIO = 303, + IMX_SC_R_M4_1_SEMA42 = 304, + IMX_SC_R_M4_1_TPM = 305, + IMX_SC_R_M4_1_PIT = 306, + IMX_SC_R_M4_1_UART = 307, + IMX_SC_R_M4_1_I2C = 308, + IMX_SC_R_M4_1_INTMUX = 309, + IMX_SC_R_M4_1_SIM = 310, + IMX_SC_R_M4_1_WDOG = 311, + IMX_SC_R_M4_1_MU_0B = 312, + IMX_SC_R_M4_1_MU_0A0 = 313, + IMX_SC_R_M4_1_MU_0A1 = 314, + IMX_SC_R_M4_1_MU_0A2 = 315, + IMX_SC_R_M4_1_MU_0A3 = 316, + IMX_SC_R_M4_1_MU_1A = 317, + IMX_SC_R_SAI_0 = 318, + IMX_SC_R_SAI_1 = 319, + IMX_SC_R_SAI_2 = 320, + IMX_SC_R_IRQSTR_SCU2 = 321, + IMX_SC_R_IRQSTR_DSP = 322, + IMX_SC_R_UNUSED5 = 323, + IMX_SC_R_UNUSED6 = 324, + IMX_SC_R_AUDIO_PLL_0 = 325, + IMX_SC_R_PI_0 = 326, + IMX_SC_R_PI_0_PWM_0 = 327, + IMX_SC_R_PI_0_PWM_1 = 328, + IMX_SC_R_PI_0_I2C_0 = 329, + IMX_SC_R_PI_0_PLL = 330, + IMX_SC_R_PI_1 = 331, + IMX_SC_R_PI_1_PWM_0 = 332, + IMX_SC_R_PI_1_PWM_1 = 333, + IMX_SC_R_PI_1_I2C_0 = 334, + IMX_SC_R_PI_1_PLL = 335, + IMX_SC_R_SC_PID0 = 336, + IMX_SC_R_SC_PID1 = 337, + IMX_SC_R_SC_PID2 = 338, + IMX_SC_R_SC_PID3 = 339, + IMX_SC_R_SC_PID4 = 340, + IMX_SC_R_SC_SEMA42 = 341, + IMX_SC_R_SC_TPM = 342, + IMX_SC_R_SC_PIT = 343, + IMX_SC_R_SC_UART = 344, + IMX_SC_R_SC_I2C = 345, + IMX_SC_R_SC_MU_0B = 346, + IMX_SC_R_SC_MU_0A0 = 347, + IMX_SC_R_SC_MU_0A1 = 348, + IMX_SC_R_SC_MU_0A2 = 349, + IMX_SC_R_SC_MU_0A3 = 350, + IMX_SC_R_SC_MU_1A = 351, + IMX_SC_R_SYSCNT_RD = 352, + IMX_SC_R_SYSCNT_CMP = 353, + IMX_SC_R_DEBUG = 354, + IMX_SC_R_SYSTEM = 355, + IMX_SC_R_SNVS = 356, + IMX_SC_R_OTP = 357, + IMX_SC_R_VPU_PID0 = 358, + IMX_SC_R_VPU_PID1 = 359, + IMX_SC_R_VPU_PID2 = 360, + IMX_SC_R_VPU_PID3 = 361, + IMX_SC_R_VPU_PID4 = 362, + IMX_SC_R_VPU_PID5 = 363, + IMX_SC_R_VPU_PID6 = 364, + IMX_SC_R_VPU_PID7 = 365, + IMX_SC_R_VPU_UART = 366, + IMX_SC_R_VPUCORE = 367, + IMX_SC_R_VPUCORE_0 = 368, + IMX_SC_R_VPUCORE_1 = 369, + IMX_SC_R_VPUCORE_2 = 370, + IMX_SC_R_VPUCORE_3 = 371, + IMX_SC_R_DMA_4_CH0 = 372, + IMX_SC_R_DMA_4_CH1 = 373, + IMX_SC_R_DMA_4_CH2 = 374, + IMX_SC_R_DMA_4_CH3 = 375, + IMX_SC_R_DMA_4_CH4 = 376, + IMX_SC_R_ISI_CH0 = 377, + IMX_SC_R_ISI_CH1 = 378, + IMX_SC_R_ISI_CH2 = 379, + IMX_SC_R_ISI_CH3 = 380, + IMX_SC_R_ISI_CH4 = 381, + IMX_SC_R_ISI_CH5 = 382, + IMX_SC_R_ISI_CH6 = 383, + IMX_SC_R_ISI_CH7 = 384, + IMX_SC_R_MJPEG_DEC_S0 = 385, + IMX_SC_R_MJPEG_DEC_S1 = 386, + IMX_SC_R_MJPEG_DEC_S2 = 387, + IMX_SC_R_MJPEG_DEC_S3 = 388, + IMX_SC_R_MJPEG_ENC_S0 = 389, + IMX_SC_R_MJPEG_ENC_S1 = 390, + IMX_SC_R_MJPEG_ENC_S2 = 391, + IMX_SC_R_MJPEG_ENC_S3 = 392, + IMX_SC_R_MIPI_0 = 393, + IMX_SC_R_MIPI_0_PWM_0 = 394, + IMX_SC_R_MIPI_0_I2C_0 = 395, + IMX_SC_R_MIPI_0_I2C_1 = 396, + IMX_SC_R_MIPI_1 = 397, + IMX_SC_R_MIPI_1_PWM_0 = 398, + IMX_SC_R_MIPI_1_I2C_0 = 399, + IMX_SC_R_MIPI_1_I2C_1 = 400, + IMX_SC_R_CSI_0 = 401, + IMX_SC_R_CSI_0_PWM_0 = 402, + IMX_SC_R_CSI_0_I2C_0 = 403, + IMX_SC_R_CSI_1 = 404, + IMX_SC_R_CSI_1_PWM_0 = 405, + IMX_SC_R_CSI_1_I2C_0 = 406, + IMX_SC_R_HDMI = 407, + IMX_SC_R_HDMI_I2S = 408, + IMX_SC_R_HDMI_I2C_0 = 409, + IMX_SC_R_HDMI_PLL_0 = 410, + IMX_SC_R_HDMI_RX = 411, + IMX_SC_R_HDMI_RX_BYPASS = 412, + IMX_SC_R_HDMI_RX_I2C_0 = 413, + IMX_SC_R_ASRC_0 = 414, + IMX_SC_R_ESAI_0 = 415, + IMX_SC_R_SPDIF_0 = 416, + IMX_SC_R_SPDIF_1 = 417, + IMX_SC_R_SAI_3 = 418, + IMX_SC_R_SAI_4 = 419, + IMX_SC_R_SAI_5 = 420, + IMX_SC_R_GPT_5 = 421, + IMX_SC_R_GPT_6 = 422, + IMX_SC_R_GPT_7 = 423, + IMX_SC_R_GPT_8 = 424, + IMX_SC_R_GPT_9 = 425, + IMX_SC_R_GPT_10 = 426, + IMX_SC_R_DMA_2_CH5 = 427, + IMX_SC_R_DMA_2_CH6 = 428, + IMX_SC_R_DMA_2_CH7 = 429, + IMX_SC_R_DMA_2_CH8 = 430, + IMX_SC_R_DMA_2_CH9 = 431, + IMX_SC_R_DMA_2_CH10 = 432, + IMX_SC_R_DMA_2_CH11 = 433, + IMX_SC_R_DMA_2_CH12 = 434, + IMX_SC_R_DMA_2_CH13 = 435, + IMX_SC_R_DMA_2_CH14 = 436, + IMX_SC_R_DMA_2_CH15 = 437, + IMX_SC_R_DMA_2_CH16 = 438, + IMX_SC_R_DMA_2_CH17 = 439, + IMX_SC_R_DMA_2_CH18 = 440, + IMX_SC_R_DMA_2_CH19 = 441, + IMX_SC_R_DMA_2_CH20 = 442, + IMX_SC_R_DMA_2_CH21 = 443, + IMX_SC_R_DMA_2_CH22 = 444, + IMX_SC_R_DMA_2_CH23 = 445, + IMX_SC_R_DMA_2_CH24 = 446, + IMX_SC_R_DMA_2_CH25 = 447, + IMX_SC_R_DMA_2_CH26 = 448, + IMX_SC_R_DMA_2_CH27 = 449, + IMX_SC_R_DMA_2_CH28 = 450, + IMX_SC_R_DMA_2_CH29 = 451, + IMX_SC_R_DMA_2_CH30 = 452, + IMX_SC_R_DMA_2_CH31 = 453, + IMX_SC_R_ASRC_1 = 454, + IMX_SC_R_ESAI_1 = 455, + IMX_SC_R_SAI_6 = 456, + IMX_SC_R_SAI_7 = 457, + IMX_SC_R_AMIX = 458, + IMX_SC_R_MQS_0 = 459, + IMX_SC_R_DMA_3_CH0 = 460, + IMX_SC_R_DMA_3_CH1 = 461, + IMX_SC_R_DMA_3_CH2 = 462, + IMX_SC_R_DMA_3_CH3 = 463, + IMX_SC_R_DMA_3_CH4 = 464, + IMX_SC_R_DMA_3_CH5 = 465, + IMX_SC_R_DMA_3_CH6 = 466, + IMX_SC_R_DMA_3_CH7 = 467, + IMX_SC_R_DMA_3_CH8 = 468, + IMX_SC_R_DMA_3_CH9 = 469, + IMX_SC_R_DMA_3_CH10 = 470, + IMX_SC_R_DMA_3_CH11 = 471, + IMX_SC_R_DMA_3_CH12 = 472, + IMX_SC_R_DMA_3_CH13 = 473, + IMX_SC_R_DMA_3_CH14 = 474, + IMX_SC_R_DMA_3_CH15 = 475, + IMX_SC_R_DMA_3_CH16 = 476, + IMX_SC_R_DMA_3_CH17 = 477, + IMX_SC_R_DMA_3_CH18 = 478, + IMX_SC_R_DMA_3_CH19 = 479, + IMX_SC_R_DMA_3_CH20 = 480, + IMX_SC_R_DMA_3_CH21 = 481, + IMX_SC_R_DMA_3_CH22 = 482, + IMX_SC_R_DMA_3_CH23 = 483, + IMX_SC_R_DMA_3_CH24 = 484, + IMX_SC_R_DMA_3_CH25 = 485, + IMX_SC_R_DMA_3_CH26 = 486, + IMX_SC_R_DMA_3_CH27 = 487, + IMX_SC_R_DMA_3_CH28 = 488, + IMX_SC_R_DMA_3_CH29 = 489, + IMX_SC_R_DMA_3_CH30 = 490, + IMX_SC_R_DMA_3_CH31 = 491, + IMX_SC_R_AUDIO_PLL_1 = 492, + IMX_SC_R_AUDIO_CLK_0 = 493, + IMX_SC_R_AUDIO_CLK_1 = 494, + IMX_SC_R_MCLK_OUT_0 = 495, + IMX_SC_R_MCLK_OUT_1 = 496, + IMX_SC_R_PMIC_0 = 497, + IMX_SC_R_PMIC_1 = 498, + IMX_SC_R_SECO = 499, + IMX_SC_R_CAAM_JR1 = 500, + IMX_SC_R_CAAM_JR2 = 501, + IMX_SC_R_CAAM_JR3 = 502, + IMX_SC_R_SECO_MU_2 = 503, + IMX_SC_R_SECO_MU_3 = 504, + IMX_SC_R_SECO_MU_4 = 505, + IMX_SC_R_HDMI_RX_PWM_0 = 506, + IMX_SC_R_A35 = 507, + IMX_SC_R_A35_0 = 508, + IMX_SC_R_A35_1 = 509, + IMX_SC_R_A35_2 = 510, + IMX_SC_R_A35_3 = 511, + IMX_SC_R_DSP = 512, + IMX_SC_R_DSP_RAM = 513, + IMX_SC_R_CAAM_JR1_OUT = 514, + IMX_SC_R_CAAM_JR2_OUT = 515, + IMX_SC_R_CAAM_JR3_OUT = 516, + IMX_SC_R_VPU_DEC_0 = 517, + IMX_SC_R_VPU_ENC_0 = 518, + IMX_SC_R_CAAM_JR0 = 519, + IMX_SC_R_CAAM_JR0_OUT = 520, + IMX_SC_R_PMIC_2 = 521, + IMX_SC_R_DBLOGIC = 522, + IMX_SC_R_HDMI_PLL_1 = 523, + IMX_SC_R_BOARD_R0 = 524, + IMX_SC_R_BOARD_R1 = 525, + IMX_SC_R_BOARD_R2 = 526, + IMX_SC_R_BOARD_R3 = 527, + IMX_SC_R_BOARD_R4 = 528, + IMX_SC_R_BOARD_R5 = 529, + IMX_SC_R_BOARD_R6 = 530, + IMX_SC_R_BOARD_R7 = 531, + IMX_SC_R_MJPEG_DEC_MP = 532, + IMX_SC_R_MJPEG_ENC_MP = 533, + IMX_SC_R_VPU_TS_0 = 534, + IMX_SC_R_VPU_MU_0 = 535, + IMX_SC_R_VPU_MU_1 = 536, + IMX_SC_R_VPU_MU_2 = 537, + IMX_SC_R_VPU_MU_3 = 538, + IMX_SC_R_VPU_ENC_1 = 539, + IMX_SC_R_VPU = 540, + IMX_SC_R_LAST +}; + +/* NOTE - please add by replacing some of the UNUSED from above! */ + +/* + * This type is used to indicate a control. + */ +enum imx_sc_ctrl { + IMX_SC_C_TEMP = 0, + IMX_SC_C_TEMP_HI = 1, + IMX_SC_C_TEMP_LOW = 2, + IMX_SC_C_PXL_LINK_MST1_ADDR = 3, + IMX_SC_C_PXL_LINK_MST2_ADDR = 4, + IMX_SC_C_PXL_LINK_MST_ENB = 5, + IMX_SC_C_PXL_LINK_MST1_ENB = 6, + IMX_SC_C_PXL_LINK_MST2_ENB = 7, + IMX_SC_C_PXL_LINK_SLV1_ADDR = 8, + IMX_SC_C_PXL_LINK_SLV2_ADDR = 9, + IMX_SC_C_PXL_LINK_MST_VLD = 10, + IMX_SC_C_PXL_LINK_MST1_VLD = 11, + IMX_SC_C_PXL_LINK_MST2_VLD = 12, + IMX_SC_C_SINGLE_MODE = 13, + IMX_SC_C_ID = 14, + IMX_SC_C_PXL_CLK_POLARITY = 15, + IMX_SC_C_LINESTATE = 16, + IMX_SC_C_PCIE_G_RST = 17, + IMX_SC_C_PCIE_BUTTON_RST = 18, + IMX_SC_C_PCIE_PERST = 19, + IMX_SC_C_PHY_RESET = 20, + IMX_SC_C_PXL_LINK_RATE_CORRECTION = 21, + IMX_SC_C_PANIC = 22, + IMX_SC_C_PRIORITY_GROUP = 23, + IMX_SC_C_TXCLK = 24, + IMX_SC_C_CLKDIV = 25, + IMX_SC_C_DISABLE_50 = 26, + IMX_SC_C_DISABLE_125 = 27, + IMX_SC_C_SEL_125 = 28, + IMX_SC_C_MODE = 29, + IMX_SC_C_SYNC_CTRL0 = 30, + IMX_SC_C_KACHUNK_CNT = 31, + IMX_SC_C_KACHUNK_SEL = 32, + IMX_SC_C_SYNC_CTRL1 = 33, + IMX_SC_C_DPI_RESET = 34, + IMX_SC_C_MIPI_RESET = 35, + IMX_SC_C_DUAL_MODE = 36, + IMX_SC_C_VOLTAGE = 37, + IMX_SC_C_PXL_LINK_SEL = 38, + IMX_SC_C_OFS_SEL = 39, + IMX_SC_C_OFS_AUDIO = 40, + IMX_SC_C_OFS_PERIPH = 41, + IMX_SC_C_OFS_IRQ = 42, + IMX_SC_C_RST0 = 43, + IMX_SC_C_RST1 = 44, + IMX_SC_C_SEL0 = 45, + IMX_SC_C_LAST +}; + +#endif /* _SC_TYPES_H */ diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h index 37a5eaea69dd..f98c20dd266e 100644 --- a/include/linux/firmware/meson/meson_sm.h +++ b/include/linux/firmware/meson/meson_sm.h @@ -17,6 +17,7 @@ enum { SM_EFUSE_READ, SM_EFUSE_WRITE, SM_EFUSE_USER_MAX, + SM_GET_CHIP_ID, }; struct meson_sm_firmware; diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h new file mode 100644 index 000000000000..3c3c28eff56a --- /dev/null +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Xilinx Zynq MPSoC Firmware layer + * + * Copyright (C) 2014-2018 Xilinx + * + * Michal Simek <michal.simek@xilinx.com> + * Davorin Mista <davorin.mista@aggios.com> + * Jolly Shah <jollys@xilinx.com> + * Rajan Vaja <rajanv@xilinx.com> + */ + +#ifndef __FIRMWARE_ZYNQMP_H__ +#define __FIRMWARE_ZYNQMP_H__ + +#define ZYNQMP_PM_VERSION_MAJOR 1 +#define ZYNQMP_PM_VERSION_MINOR 0 + +#define ZYNQMP_PM_VERSION ((ZYNQMP_PM_VERSION_MAJOR << 16) | \ + ZYNQMP_PM_VERSION_MINOR) + +#define ZYNQMP_TZ_VERSION_MAJOR 1 +#define ZYNQMP_TZ_VERSION_MINOR 0 + +#define ZYNQMP_TZ_VERSION ((ZYNQMP_TZ_VERSION_MAJOR << 16) | \ + ZYNQMP_TZ_VERSION_MINOR) + +/* SMC SIP service Call Function Identifier Prefix */ +#define PM_SIP_SVC 0xC2000000 +#define PM_GET_TRUSTZONE_VERSION 0xa03 + +/* Number of 32bits values in payload */ +#define PAYLOAD_ARG_CNT 4U + +enum pm_api_id { + PM_GET_API_VERSION = 1, + PM_IOCTL = 34, + PM_QUERY_DATA, + PM_CLOCK_ENABLE, + PM_CLOCK_DISABLE, + PM_CLOCK_GETSTATE, + PM_CLOCK_SETDIVIDER, + PM_CLOCK_GETDIVIDER, + PM_CLOCK_SETRATE, + PM_CLOCK_GETRATE, + PM_CLOCK_SETPARENT, + PM_CLOCK_GETPARENT, +}; + +/* PMU-FW return status codes */ +enum pm_ret_status { + XST_PM_SUCCESS = 0, + XST_PM_INTERNAL = 2000, + XST_PM_CONFLICT, + XST_PM_NO_ACCESS, + XST_PM_INVALID_NODE, + XST_PM_DOUBLE_REQ, + XST_PM_ABORT_SUSPEND, +}; + +enum pm_ioctl_id { + IOCTL_SET_PLL_FRAC_MODE = 8, + IOCTL_GET_PLL_FRAC_MODE, + IOCTL_SET_PLL_FRAC_DATA, + IOCTL_GET_PLL_FRAC_DATA, +}; + +enum pm_query_id { + PM_QID_INVALID, + PM_QID_CLOCK_GET_NAME, + PM_QID_CLOCK_GET_TOPOLOGY, + PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS, + PM_QID_CLOCK_GET_PARENTS, + PM_QID_CLOCK_GET_ATTRIBUTES, + PM_QID_CLOCK_GET_NUM_CLOCKS = 12, +}; + +/** + * struct zynqmp_pm_query_data - PM query data + * @qid: query ID + * @arg1: Argument 1 of query data + * @arg2: Argument 2 of query data + * @arg3: Argument 3 of query data + */ +struct zynqmp_pm_query_data { + u32 qid; + u32 arg1; + u32 arg2; + u32 arg3; +}; + +struct zynqmp_eemi_ops { + int (*get_api_version)(u32 *version); + int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out); + int (*clock_enable)(u32 clock_id); + int (*clock_disable)(u32 clock_id); + int (*clock_getstate)(u32 clock_id, u32 *state); + int (*clock_setdivider)(u32 clock_id, u32 divider); + int (*clock_getdivider)(u32 clock_id, u32 *divider); + int (*clock_setrate)(u32 clock_id, u64 rate); + int (*clock_getrate)(u32 clock_id, u64 *rate); + int (*clock_setparent)(u32 clock_id, u32 parent_id); + int (*clock_getparent)(u32 clock_id, u32 *parent_id); + int (*ioctl)(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2, u32 *out); +}; + +#if IS_REACHABLE(CONFIG_ARCH_ZYNQMP) +const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void); +#else +static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void) +{ + return NULL; +} +#endif + +#endif /* __FIRMWARE_ZYNQMP_H__ */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 897eae8faee1..8252df30b9a1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -403,24 +403,40 @@ int pagecache_write_end(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); +/** + * struct address_space - Contents of a cacheable, mappable object. + * @host: Owner, either the inode or the block_device. + * @i_pages: Cached pages. + * @gfp_mask: Memory allocation flags to use for allocating pages. + * @i_mmap_writable: Number of VM_SHARED mappings. + * @i_mmap: Tree of private and shared mappings. + * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. + * @nrpages: Number of page entries, protected by the i_pages lock. + * @nrexceptional: Shadow or DAX entries, protected by the i_pages lock. + * @writeback_index: Writeback starts here. + * @a_ops: Methods. + * @flags: Error bits and flags (AS_*). + * @wb_err: The most recent error which has occurred. + * @private_lock: For use by the owner of the address_space. + * @private_list: For use by the owner of the address_space. + * @private_data: For use by the owner of the address_space. + */ struct address_space { - struct inode *host; /* owner: inode, block_device */ - struct radix_tree_root i_pages; /* cached pages */ - atomic_t i_mmap_writable;/* count VM_SHARED mappings */ - struct rb_root_cached i_mmap; /* tree of private and shared mappings */ - struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ - /* Protected by the i_pages lock */ - unsigned long nrpages; /* number of total pages */ - /* number of shadow or DAX exceptional entries */ + struct inode *host; + struct xarray i_pages; + gfp_t gfp_mask; + atomic_t i_mmap_writable; + struct rb_root_cached i_mmap; + struct rw_semaphore i_mmap_rwsem; + unsigned long nrpages; unsigned long nrexceptional; - pgoff_t writeback_index;/* writeback starts here */ - const struct address_space_operations *a_ops; /* methods */ - unsigned long flags; /* error bits */ - spinlock_t private_lock; /* for use by the address_space */ - gfp_t gfp_mask; /* implicit gfp mask for allocations */ - struct list_head private_list; /* for use by the address_space */ - void *private_data; /* ditto */ + pgoff_t writeback_index; + const struct address_space_operations *a_ops; + unsigned long flags; errseq_t wb_err; + spinlock_t private_lock; + struct list_head private_list; + void *private_data; } __attribute__((aligned(sizeof(long)))) __randomize_layout; /* * On most architectures that alignment is already the case; but @@ -467,15 +483,18 @@ struct block_device { struct mutex bd_fsfreeze_mutex; } __randomize_layout; +/* XArray tags, for tagging dirty and writeback pages in the pagecache. */ +#define PAGECACHE_TAG_DIRTY XA_MARK_0 +#define PAGECACHE_TAG_WRITEBACK XA_MARK_1 +#define PAGECACHE_TAG_TOWRITE XA_MARK_2 + /* - * Radix-tree tags, for tagging dirty and writeback pages within the pagecache - * radix trees + * Returns true if any of the pages in the mapping are marked with the tag. */ -#define PAGECACHE_TAG_DIRTY 0 -#define PAGECACHE_TAG_WRITEBACK 1 -#define PAGECACHE_TAG_TOWRITE 2 - -int mapping_tagged(struct address_space *mapping, int tag); +static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag) +{ + return xa_marked(&mapping->i_pages, tag); +} static inline void i_mmap_lock_write(struct address_space *mapping) { @@ -1393,17 +1412,26 @@ struct super_block { struct sb_writers s_writers; + /* + * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and + * s_fsnotify_marks together for cache efficiency. They are frequently + * accessed and rarely modified. + */ + void *s_fs_info; /* Filesystem private info */ + + /* Granularity of c/m/atime in ns (cannot be worse than a second) */ + u32 s_time_gran; +#ifdef CONFIG_FSNOTIFY + __u32 s_fsnotify_mask; + struct fsnotify_mark_connector __rcu *s_fsnotify_marks; +#endif + char s_id[32]; /* Informational name */ uuid_t s_uuid; /* UUID */ - void *s_fs_info; /* Filesystem private info */ unsigned int s_max_links; fmode_t s_mode; - /* Granularity of c/m/atime in ns. - Cannot be worse than a second */ - u32 s_time_gran; - /* * The next field is for VFS *only*. No filesystems have any business * even looking at it. You had been warned. @@ -1428,6 +1456,9 @@ struct super_block { /* Number of inodes with nlink == 0 but still referenced */ atomic_long_t s_remove_count; + /* Pending fsnotify inode refs */ + atomic_long_t s_fsnotify_inode_refs; + /* Being remounted read-only */ int s_readonly_remount; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index b8f4182f42f1..135b973e44d1 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -68,15 +68,20 @@ #define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM) +/* Events that can be reported to backends */ #define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \ FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \ FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \ FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \ - FS_OPEN_PERM | FS_ACCESS_PERM | FS_EXCL_UNLINK | \ - FS_ISDIR | FS_IN_ONESHOT | FS_DN_RENAME | \ + FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME) + +/* Extra flags that may be reported with event or control handling of events */ +#define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \ FS_DN_MULTISHOT | FS_EVENT_ON_CHILD) +#define ALL_FSNOTIFY_BITS (ALL_FSNOTIFY_EVENTS | ALL_FSNOTIFY_FLAGS) + struct fsnotify_group; struct fsnotify_event; struct fsnotify_mark; @@ -189,10 +194,10 @@ struct fsnotify_group { /* allows a group to block waiting for a userspace response */ struct list_head access_list; wait_queue_head_t access_waitq; - int f_flags; + int flags; /* flags from fanotify_init() */ + int f_flags; /* event_f_flags from fanotify_init() */ unsigned int max_marks; struct user_struct *user; - bool audit; } fanotify_data; #endif /* CONFIG_FANOTIFY */ }; @@ -206,12 +211,14 @@ struct fsnotify_group { enum fsnotify_obj_type { FSNOTIFY_OBJ_TYPE_INODE, FSNOTIFY_OBJ_TYPE_VFSMOUNT, + FSNOTIFY_OBJ_TYPE_SB, FSNOTIFY_OBJ_TYPE_COUNT, FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT }; #define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE) #define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT) +#define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB) #define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1) static inline bool fsnotify_valid_obj_type(unsigned int type) @@ -255,6 +262,7 @@ static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \ FSNOTIFY_ITER_FUNCS(inode, INODE) FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT) +FSNOTIFY_ITER_FUNCS(sb, SB) #define fsnotify_foreach_obj_type(type) \ for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++) @@ -267,8 +275,8 @@ struct fsnotify_mark_connector; typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t; /* - * Inode / vfsmount point to this structure which tracks all marks attached to - * the inode / vfsmount. The reference to inode / vfsmount is held by this + * Inode/vfsmount/sb point to this structure which tracks all marks attached to + * the inode/vfsmount/sb. The reference to inode/vfsmount/sb is held by this * structure. We destroy this structure when there are no more marks attached * to it. The structure is protected by fsnotify_mark_srcu. */ @@ -335,6 +343,7 @@ extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int dat extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask); extern void __fsnotify_inode_delete(struct inode *inode); extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); +extern void fsnotify_sb_delete(struct super_block *sb); extern u32 fsnotify_get_cookie(void); static inline int fsnotify_inode_watches_children(struct inode *inode) @@ -455,9 +464,13 @@ static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *gr { fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL); } +/* run all the marks in a group, and clear all of the sn marks */ +static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group) +{ + fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB_FL); +} extern void fsnotify_get_mark(struct fsnotify_mark *mark); extern void fsnotify_put_mark(struct fsnotify_mark *mark); -extern void fsnotify_unmount_inodes(struct super_block *sb); extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); @@ -484,6 +497,9 @@ static inline void __fsnotify_inode_delete(struct inode *inode) static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt) {} +static inline void fsnotify_sb_delete(struct super_block *sb) +{} + static inline void fsnotify_update_flags(struct dentry *dentry) {} diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index d271ff23984f..4f3febc0f971 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -101,8 +101,8 @@ enum hdmi_extended_colorimetry { HDMI_EXTENDED_COLORIMETRY_XV_YCC_601, HDMI_EXTENDED_COLORIMETRY_XV_YCC_709, HDMI_EXTENDED_COLORIMETRY_S_YCC_601, - HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601, - HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB, + HDMI_EXTENDED_COLORIMETRY_OPYCC_601, + HDMI_EXTENDED_COLORIMETRY_OPRGB, /* The following EC values are only defined in CEA-861-F. */ HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM, diff --git a/include/linux/hmm.h b/include/linux/hmm.h index dde947083d4e..c6fb869a81c0 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -11,7 +11,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * Authors: Jérôme Glisse <jglisse@redhat.com> + * Authors: Jérôme Glisse <jglisse@redhat.com> */ /* * Heterogeneous Memory Management (HMM) @@ -274,14 +274,29 @@ static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range, struct hmm_mirror; /* - * enum hmm_update_type - type of update + * enum hmm_update_event - type of update * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why) */ -enum hmm_update_type { +enum hmm_update_event { HMM_UPDATE_INVALIDATE, }; /* + * struct hmm_update - HMM update informations for callback + * + * @start: virtual start address of the range to update + * @end: virtual end address of the range to update + * @event: event triggering the update (what is happening) + * @blockable: can the callback block/sleep ? + */ +struct hmm_update { + unsigned long start; + unsigned long end; + enum hmm_update_event event; + bool blockable; +}; + +/* * struct hmm_mirror_ops - HMM mirror device operations callback * * @update: callback to update range on a device @@ -300,9 +315,9 @@ struct hmm_mirror_ops { /* sync_cpu_device_pagetables() - synchronize page tables * * @mirror: pointer to struct hmm_mirror - * @update_type: type of update that occurred to the CPU page table - * @start: virtual start address of the range to update - * @end: virtual end address of the range to update + * @update: update informations (see struct hmm_update) + * Returns: -EAGAIN if update.blockable false and callback need to + * block, 0 otherwise. * * This callback ultimately originates from mmu_notifiers when the CPU * page table is updated. The device driver must update its page table @@ -313,10 +328,8 @@ struct hmm_mirror_ops { * page tables are completely updated (TLBs flushed, etc); this is a * synchronous call. */ - void (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror, - enum hmm_update_type update_type, - unsigned long start, - unsigned long end); + int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror, + const struct hmm_update *update); }; /* diff --git a/include/linux/idr.h b/include/linux/idr.h index 3ec8628ce17f..60daf34b625d 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -214,8 +214,7 @@ static inline void idr_preload_end(void) ++id, (entry) = idr_get_next((idr), &(id))) /* - * IDA - IDR based id allocator, use when translation from id to - * pointer isn't necessary. + * IDA - ID Allocator, use when translation from id to pointer isn't necessary. */ #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ #define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long)) @@ -225,14 +224,14 @@ struct ida_bitmap { unsigned long bitmap[IDA_BITMAP_LONGS]; }; -DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap); - struct ida { - struct radix_tree_root ida_rt; + struct xarray xa; }; +#define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC) + #define IDA_INIT(name) { \ - .ida_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER | GFP_NOWAIT), \ + .xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \ } #define DEFINE_IDA(name) struct ida name = IDA_INIT(name) @@ -292,7 +291,7 @@ static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) static inline void ida_init(struct ida *ida) { - INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT); + xa_init_flags(&ida->xa, IDA_INIT_FLAGS); } #define ida_simple_get(ida, start, end, gfp) \ @@ -301,9 +300,6 @@ static inline void ida_init(struct ida *ida) static inline bool ida_is_empty(const struct ida *ida) { - return radix_tree_empty(&ida->ida_rt); + return xa_empty(&ida->xa); } - -/* in lib/radix-tree.c */ -int ida_pre_get(struct ida *ida, gfp_t gfp_mask); #endif /* __IDR_H__ */ diff --git a/include/linux/list.h b/include/linux/list.h index de04cc5ed536..edb7628e46ed 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -184,6 +184,29 @@ static inline void list_move_tail(struct list_head *list, } /** + * list_bulk_move_tail - move a subsection of a list to its tail + * @head: the head that will follow our entry + * @first: first entry to move + * @last: last entry to move, can be the same as first + * + * Move all entries between @first and including @last before @head. + * All three entries must belong to the same linked list. + */ +static inline void list_bulk_move_tail(struct list_head *head, + struct list_head *first, + struct list_head *last) +{ + first->prev->next = last->next; + last->next->prev = first->prev; + + head->prev->next = first; + first->prev = head->prev; + + last->next = head; + head->prev = last; +} + +/** * list_is_last - tests whether @list is the last entry in list @head * @list: the entry to test * @head: the head of the list diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 2acdd046df2d..aee299a6aa76 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -2,7 +2,6 @@ #define _LINUX_MEMBLOCK_H #ifdef __KERNEL__ -#ifdef CONFIG_HAVE_MEMBLOCK /* * Logical memory blocks. * @@ -16,6 +15,19 @@ #include <linux/init.h> #include <linux/mm.h> +#include <asm/dma.h> + +extern unsigned long max_low_pfn; +extern unsigned long min_low_pfn; + +/* + * highest page + */ +extern unsigned long max_pfn; +/* + * highest possible page + */ +extern unsigned long long max_possible_pfn; #define INIT_MEMBLOCK_REGIONS 128 #define INIT_PHYSMEM_REGIONS 4 @@ -120,6 +132,10 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); enum memblock_flags choose_memblock_flags(void); +unsigned long memblock_free_all(void); +void reset_node_managed_pages(pg_data_t *pgdat); +void reset_all_zones_managed_pages(void); + /* Low level functions */ int memblock_add_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, @@ -301,10 +317,116 @@ static inline int memblock_get_region_node(const struct memblock_region *r) } #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ -phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); -phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); +/* Flags for memblock allocation APIs */ +#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) +#define MEMBLOCK_ALLOC_ACCESSIBLE 0 + +/* We are using top down, so it is safe to use 0 here */ +#define MEMBLOCK_LOW_LIMIT 0 + +#ifndef ARCH_LOW_ADDRESS_LIMIT +#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL +#endif + +phys_addr_t memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); +phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); + +phys_addr_t memblock_phys_alloc(phys_addr_t size, phys_addr_t align); + +void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, + int nid); +void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, + int nid); +void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, + int nid); + +static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align) +{ + return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, + MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_raw(phys_addr_t size, + phys_addr_t align) +{ + return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT, + MEMBLOCK_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_from(phys_addr_t size, + phys_addr_t align, + phys_addr_t min_addr) +{ + return memblock_alloc_try_nid(size, align, min_addr, + MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_nopanic(phys_addr_t size, + phys_addr_t align) +{ + return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT, + MEMBLOCK_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_low(phys_addr_t size, + phys_addr_t align) +{ + return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, + ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); +} +static inline void * __init memblock_alloc_low_nopanic(phys_addr_t size, + phys_addr_t align) +{ + return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT, + ARCH_LOW_ADDRESS_LIMIT, + NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_from_nopanic(phys_addr_t size, + phys_addr_t align, + phys_addr_t min_addr) +{ + return memblock_alloc_try_nid_nopanic(size, align, min_addr, + MEMBLOCK_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_node(phys_addr_t size, + phys_addr_t align, int nid) +{ + return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, + MEMBLOCK_ALLOC_ACCESSIBLE, nid); +} + +static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size, + int nid) +{ + return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, + MEMBLOCK_LOW_LIMIT, + MEMBLOCK_ALLOC_ACCESSIBLE, nid); +} + +static inline void __init memblock_free_early(phys_addr_t base, + phys_addr_t size) +{ + __memblock_free_early(base, size); +} + +static inline void __init memblock_free_early_nid(phys_addr_t base, + phys_addr_t size, int nid) +{ + __memblock_free_early(base, size); +} -phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); +static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size) +{ + __memblock_free_late(base, size); +} /* * Set the allocation direction to bottom-up or top-down. @@ -324,10 +446,6 @@ static inline bool memblock_bottom_up(void) return memblock.bottom_up; } -/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ -#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) -#define MEMBLOCK_ALLOC_ACCESSIBLE 0 - phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, enum memblock_flags flags); @@ -433,6 +551,31 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo i < memblock_type->cnt; \ i++, rgn = &memblock_type->regions[i]) +extern void *alloc_large_system_hash(const char *tablename, + unsigned long bucketsize, + unsigned long numentries, + int scale, + int flags, + unsigned int *_hash_shift, + unsigned int *_hash_mask, + unsigned long low_limit, + unsigned long high_limit); + +#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ +#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min + * shift passed via *_hash_shift */ +#define HASH_ZERO 0x00000004 /* Zero allocated hash table */ + +/* Only NUMA needs hash distribution. 64bit NUMA architectures have + * sufficient vmalloc space. + */ +#ifdef CONFIG_NUMA +#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) +extern int hashdist; /* Distribute hashes across NUMA nodes? */ +#else +#define hashdist (0) +#endif + #ifdef CONFIG_MEMTEST extern void early_memtest(phys_addr_t start, phys_addr_t end); #else @@ -440,12 +583,6 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end) { } #endif -#else -static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) -{ - return 0; -} -#endif /* CONFIG_HAVE_MEMBLOCK */ #endif /* __KERNEL__ */ diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 34a28227068d..ffd9cd10fcf3 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -301,6 +301,7 @@ extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); extern void try_offline_node(int nid); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern void remove_memory(int nid, u64 start, u64 size); +extern void __remove_memory(int nid, u64 start, u64 size); #else static inline bool is_mem_section_removable(unsigned long pfn, @@ -317,11 +318,13 @@ static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) } static inline void remove_memory(int nid, u64 start, u64 size) {} +static inline void __remove_memory(int nid, u64 start, u64 size) {} #endif /* CONFIG_MEMORY_HOTREMOVE */ extern void __ref free_area_init_core_hotplug(int nid); extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, void *arg, int (*func)(struct memory_block *, void *)); +extern int __add_memory(int nid, u64 start, u64 size); extern int add_memory(int nid, u64 start, u64 size); extern int add_memory_resource(int nid, struct resource *resource, bool online); extern int arch_add_memory(int nid, u64 start, u64 size, @@ -330,7 +333,6 @@ extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern bool is_memblock_offlined(struct memory_block *mem); -extern void remove_memory(int nid, u64 start, u64 size); extern int sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn, struct vmem_altmap *altmap); extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 20949dde35cd..e44e3ec8a9c7 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -36,7 +36,7 @@ * I2C requires 1 additional byte for requests. * I2C requires 2 additional bytes for responses. * SPI requires up to 32 additional bytes for responses. - * */ + */ #define EC_PROTO_VERSION_UNKNOWN 0 #define EC_MAX_REQUEST_OVERHEAD 1 #define EC_MAX_RESPONSE_OVERHEAD 32 @@ -58,13 +58,14 @@ enum { EC_MAX_MSG_BYTES = 64 * 1024, }; -/* - * @version: Command version number (often 0) - * @command: Command to send (EC_CMD_...) - * @outsize: Outgoing length in bytes - * @insize: Max number of bytes to accept from EC - * @result: EC's response to the command (separate from communication failure) - * @data: Where to put the incoming data from EC and outgoing data to EC +/** + * struct cros_ec_command - Information about a ChromeOS EC command. + * @version: Command version number (often 0). + * @command: Command to send (EC_CMD_...). + * @outsize: Outgoing length in bytes. + * @insize: Max number of bytes to accept from the EC. + * @result: EC's response to the command (separate from communication failure). + * @data: Where to put the incoming data from EC and outgoing data to EC. */ struct cros_ec_command { uint32_t version; @@ -76,48 +77,55 @@ struct cros_ec_command { }; /** - * struct cros_ec_device - Information about a ChromeOS EC device - * - * @phys_name: name of physical comms layer (e.g. 'i2c-4') + * struct cros_ec_device - Information about a ChromeOS EC device. + * @phys_name: Name of physical comms layer (e.g. 'i2c-4'). * @dev: Device pointer for physical comms device - * @was_wake_device: true if this device was set to wake the system from - * sleep at the last suspend - * @cmd_readmem: direct read of the EC memory-mapped region, if supported - * @offset is within EC_LPC_ADDR_MEMMAP region. - * @bytes: number of bytes to read. zero means "read a string" (including - * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be read. - * Caller must ensure that the buffer is large enough for the result when - * reading a string. - * - * @priv: Private data - * @irq: Interrupt to use - * @id: Device id - * @din: input buffer (for data from EC) - * @dout: output buffer (for data to EC) - * \note - * These two buffers will always be dword-aligned and include enough - * space for up to 7 word-alignment bytes also, so we can ensure that - * the body of the message is always dword-aligned (64-bit). - * We use this alignment to keep ARM and x86 happy. Probably word - * alignment would be OK, there might be a small performance advantage - * to using dword. - * @din_size: size of din buffer to allocate (zero to use static din) - * @dout_size: size of dout buffer to allocate (zero to use static dout) - * @wake_enabled: true if this device can wake the system from sleep - * @suspended: true if this device had been suspended - * @cmd_xfer: send command to EC and get response - * Returns the number of bytes received if the communication succeeded, but - * that doesn't mean the EC was happy with the command. The caller - * should check msg.result for the EC's result code. - * @pkt_xfer: send packet to EC and get response - * @lock: one transaction at a time - * @mkbp_event_supported: true if this EC supports the MKBP event protocol. - * @event_notifier: interrupt event notifier for transport devices. - * @event_data: raw payload transferred with the MKBP event. - * @event_size: size in bytes of the event data. + * @was_wake_device: True if this device was set to wake the system from + * sleep at the last suspend. + * @cros_class: The class structure for this device. + * @cmd_readmem: Direct read of the EC memory-mapped region, if supported. + * @offset: Is within EC_LPC_ADDR_MEMMAP region. + * @bytes: Number of bytes to read. zero means "read a string" (including + * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be + * read. Caller must ensure that the buffer is large enough for the + * result when reading a string. + * @max_request: Max size of message requested. + * @max_response: Max size of message response. + * @max_passthru: Max sice of passthru message. + * @proto_version: The protocol version used for this device. + * @priv: Private data. + * @irq: Interrupt to use. + * @id: Device id. + * @din: Input buffer (for data from EC). This buffer will always be + * dword-aligned and include enough space for up to 7 word-alignment + * bytes also, so we can ensure that the body of the message is always + * dword-aligned (64-bit). We use this alignment to keep ARM and x86 + * happy. Probably word alignment would be OK, there might be a small + * performance advantage to using dword. + * @dout: Output buffer (for data to EC). This buffer will always be + * dword-aligned and include enough space for up to 7 word-alignment + * bytes also, so we can ensure that the body of the message is always + * dword-aligned (64-bit). We use this alignment to keep ARM and x86 + * happy. Probably word alignment would be OK, there might be a small + * performance advantage to using dword. + * @din_size: Size of din buffer to allocate (zero to use static din). + * @dout_size: Size of dout buffer to allocate (zero to use static dout). + * @wake_enabled: True if this device can wake the system from sleep. + * @suspended: True if this device had been suspended. + * @cmd_xfer: Send command to EC and get response. + * Returns the number of bytes received if the communication + * succeeded, but that doesn't mean the EC was happy with the + * command. The caller should check msg.result for the EC's result + * code. + * @pkt_xfer: Send packet to EC and get response. + * @lock: One transaction at a time. + * @mkbp_event_supported: True if this EC supports the MKBP event protocol. + * @event_notifier: Interrupt event notifier for transport devices. + * @event_data: Raw payload transferred with the MKBP event. + * @event_size: Size in bytes of the event data. + * @host_event_wake_mask: Mask of host events that cause wake from suspend. */ struct cros_ec_device { - /* These are used by other drivers that want to talk to the EC */ const char *phys_name; struct device *dev; @@ -153,20 +161,19 @@ struct cros_ec_device { }; /** - * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information - * + * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information. * @sensor_num: Id of the sensor, as reported by the EC. */ struct cros_ec_sensor_platform { u8 sensor_num; }; -/* struct cros_ec_platform - ChromeOS EC platform information - * - * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...) - * used in /dev/ and sysfs. - * @cmd_offset: offset to apply for each command. Set when - * registering a devicde behind another one. +/** + * struct cros_ec_platform - ChromeOS EC platform information. + * @ec_name: Name of EC device (e.g. 'cros-ec', 'cros-pd', ...) + * used in /dev/ and sysfs. + * @cmd_offset: Offset to apply for each command. Set when + * registering a device behind another one. */ struct cros_ec_platform { const char *ec_name; @@ -175,16 +182,16 @@ struct cros_ec_platform { struct cros_ec_debugfs; -/* - * struct cros_ec_dev - ChromeOS EC device entry point - * - * @class_dev: Device structure used in sysfs - * @cdev: Character device structure in /dev - * @ec_dev: cros_ec_device structure to talk to the physical device - * @dev: pointer to the platform device - * @debug_info: cros_ec_debugfs structure for debugging information - * @has_kb_wake_angle: true if at least 2 accelerometer are connected to the EC. - * @cmd_offset: offset to apply for each command. +/** + * struct cros_ec_dev - ChromeOS EC device entry point. + * @class_dev: Device structure used in sysfs. + * @cdev: Character device structure in /dev. + * @ec_dev: cros_ec_device structure to talk to the physical device. + * @dev: Pointer to the platform device. + * @debug_info: cros_ec_debugfs structure for debugging information. + * @has_kb_wake_angle: True if at least 2 accelerometer are connected to the EC. + * @cmd_offset: Offset to apply for each command. + * @features: Features supported by the EC. */ struct cros_ec_dev { struct device class_dev; @@ -200,124 +207,129 @@ struct cros_ec_dev { #define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev) /** - * cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device + * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device. + * @ec_dev: Device to suspend. * * This can be called by drivers to handle a suspend event. * - * ec_dev: Device to suspend - * @return 0 if ok, -ve on error + * Return: 0 on success or negative error code. */ int cros_ec_suspend(struct cros_ec_device *ec_dev); /** - * cros_ec_resume - Handle a resume operation for the ChromeOS EC device + * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device. + * @ec_dev: Device to resume. * * This can be called by drivers to handle a resume event. * - * @ec_dev: Device to resume - * @return 0 if ok, -ve on error + * Return: 0 on success or negative error code. */ int cros_ec_resume(struct cros_ec_device *ec_dev); /** - * cros_ec_prepare_tx - Prepare an outgoing message in the output buffer + * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer. + * @ec_dev: Device to register. + * @msg: Message to write. * * This is intended to be used by all ChromeOS EC drivers, but at present * only SPI uses it. Once LPC uses the same protocol it can start using it. * I2C could use it now, with a refactor of the existing code. * - * @ec_dev: Device to register - * @msg: Message to write + * Return: 0 on success or negative error code. */ int cros_ec_prepare_tx(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); /** - * cros_ec_check_result - Check ec_msg->result + * cros_ec_check_result() - Check ec_msg->result. + * @ec_dev: EC device. + * @msg: Message to check. * * This is used by ChromeOS EC drivers to check the ec_msg->result for * errors and to warn about them. * - * @ec_dev: EC device - * @msg: Message to check + * Return: 0 on success or negative error code. */ int cros_ec_check_result(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); /** - * cros_ec_cmd_xfer - Send a command to the ChromeOS EC + * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC. + * @ec_dev: EC device. + * @msg: Message to write. * * Call this to send a command to the ChromeOS EC. This should be used * instead of calling the EC's cmd_xfer() callback directly. * - * @ec_dev: EC device - * @msg: Message to write + * Return: 0 on success or negative error code. */ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); /** - * cros_ec_cmd_xfer_status - Send a command to the ChromeOS EC + * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC. + * @ec_dev: EC device. + * @msg: Message to write. * * This function is identical to cros_ec_cmd_xfer, except it returns success * status only if both the command was transmitted successfully and the EC * replied with success status. It's not necessary to check msg->result when * using this function. * - * @ec_dev: EC device - * @msg: Message to write - * @return: Num. of bytes transferred on success, <0 on failure + * Return: The number of bytes transferred on success or negative error code. */ int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); /** - * cros_ec_remove - Remove a ChromeOS EC + * cros_ec_remove() - Remove a ChromeOS EC. + * @ec_dev: Device to register. * * Call this to deregister a ChromeOS EC, then clean up any private data. * - * @ec_dev: Device to register - * @return 0 if ok, -ve on error + * Return: 0 on success or negative error code. */ int cros_ec_remove(struct cros_ec_device *ec_dev); /** - * cros_ec_register - Register a new ChromeOS EC, using the provided info + * cros_ec_register() - Register a new ChromeOS EC, using the provided info. + * @ec_dev: Device to register. * * Before calling this, allocate a pointer to a new device and then fill * in all the fields up to the --private-- marker. * - * @ec_dev: Device to register - * @return 0 if ok, -ve on error + * Return: 0 on success or negative error code. */ int cros_ec_register(struct cros_ec_device *ec_dev); /** - * cros_ec_query_all - Query the protocol version supported by the ChromeOS EC + * cros_ec_query_all() - Query the protocol version supported by the + * ChromeOS EC. + * @ec_dev: Device to register. * - * @ec_dev: Device to register - * @return 0 if ok, -ve on error + * Return: 0 on success or negative error code. */ int cros_ec_query_all(struct cros_ec_device *ec_dev); /** - * cros_ec_get_next_event - Fetch next event from the ChromeOS EC - * - * @ec_dev: Device to fetch event from + * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC. + * @ec_dev: Device to fetch event from. * @wake_event: Pointer to a bool set to true upon return if the event might be * treated as a wake event. Ignored if null. * - * Returns: 0 on success, Linux error number on failure + * Return: 0 on success or negative error code. */ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event); /** - * cros_ec_get_host_event - Return a mask of event set by the EC. + * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC. + * @ec_dev: Device to fetch event from. * - * When MKBP is supported, when the EC raises an interrupt, - * We collect the events raised and call the functions in the ec notifier. + * When MKBP is supported, when the EC raises an interrupt, we collect the + * events raised and call the functions in the ec notifier. This function + * is a helper to know which events are raised. * - * This function is a helper to know which events are raised. + * Return: 0 on success or negative error code. */ u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 5fd0e429f472..9a9631f0559e 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h @@ -306,15 +306,18 @@ enum host_event_code { /* Host event mask */ #define EC_HOST_EVENT_MASK(event_code) (1UL << ((event_code) - 1)) -/* Arguments at EC_LPC_ADDR_HOST_ARGS */ +/** + * struct ec_lpc_host_args - Arguments at EC_LPC_ADDR_HOST_ARGS + * @flags: The host argument flags. + * @command_version: Command version. + * @data_size: The length of data. + * @checksum: Checksum; sum of command + flags + command_version + data_size + + * all params/response data bytes. + */ struct ec_lpc_host_args { uint8_t flags; uint8_t command_version; uint8_t data_size; - /* - * Checksum; sum of command + flags + command_version + data_size + - * all params/response data bytes. - */ uint8_t checksum; } __packed; @@ -468,54 +471,43 @@ struct ec_lpc_host_args { #define EC_HOST_REQUEST_VERSION 3 -/* Version 3 request from host */ +/** + * struct ec_host_request - Version 3 request from host. + * @struct_version: Should be 3. The EC will return EC_RES_INVALID_HEADER if it + * receives a header with a version it doesn't know how to + * parse. + * @checksum: Checksum of request and data; sum of all bytes including checksum + * should total to 0. + * @command: Command to send (EC_CMD_...) + * @command_version: Command version. + * @reserved: Unused byte in current protocol version; set to 0. + * @data_len: Length of data which follows this header. + */ struct ec_host_request { - /* Struct version (=3) - * - * EC will return EC_RES_INVALID_HEADER if it receives a header with a - * version it doesn't know how to parse. - */ uint8_t struct_version; - - /* - * Checksum of request and data; sum of all bytes including checksum - * should total to 0. - */ uint8_t checksum; - - /* Command code */ uint16_t command; - - /* Command version */ uint8_t command_version; - - /* Unused byte in current protocol version; set to 0 */ uint8_t reserved; - - /* Length of data which follows this header */ uint16_t data_len; } __packed; #define EC_HOST_RESPONSE_VERSION 3 -/* Version 3 response from EC */ +/** + * struct ec_host_response - Version 3 response from EC. + * @struct_version: Struct version (=3). + * @checksum: Checksum of response and data; sum of all bytes including + * checksum should total to 0. + * @result: EC's response to the command (separate from communication failure) + * @data_len: Length of data which follows this header. + * @reserved: Unused bytes in current protocol version; set to 0. + */ struct ec_host_response { - /* Struct version (=3) */ uint8_t struct_version; - - /* - * Checksum of response and data; sum of all bytes including checksum - * should total to 0. - */ uint8_t checksum; - - /* Result code (EC_RES_*) */ uint16_t result; - - /* Length of data which follows this header */ uint16_t data_len; - - /* Unused bytes in current protocol version; set to 0 */ uint16_t reserved; } __packed; @@ -540,6 +532,10 @@ struct ec_host_response { */ #define EC_CMD_PROTO_VERSION 0x00 +/** + * struct ec_response_proto_version - Response to the proto version command. + * @version: The protocol version. + */ struct ec_response_proto_version { uint32_t version; } __packed; @@ -550,12 +546,20 @@ struct ec_response_proto_version { */ #define EC_CMD_HELLO 0x01 +/** + * struct ec_params_hello - Parameters to the hello command. + * @in_data: Pass anything here. + */ struct ec_params_hello { - uint32_t in_data; /* Pass anything here */ + uint32_t in_data; } __packed; +/** + * struct ec_response_hello - Response to the hello command. + * @out_data: Output will be in_data + 0x01020304. + */ struct ec_response_hello { - uint32_t out_data; /* Output will be in_data + 0x01020304 */ + uint32_t out_data; } __packed; /* Get version number */ @@ -567,22 +571,37 @@ enum ec_current_image { EC_IMAGE_RW }; +/** + * struct ec_response_get_version - Response to the get version command. + * @version_string_ro: Null-terminated RO firmware version string. + * @version_string_rw: Null-terminated RW firmware version string. + * @reserved: Unused bytes; was previously RW-B firmware version string. + * @current_image: One of ec_current_image. + */ struct ec_response_get_version { - /* Null-terminated version strings for RO, RW */ char version_string_ro[32]; char version_string_rw[32]; - char reserved[32]; /* Was previously RW-B string */ - uint32_t current_image; /* One of ec_current_image */ + char reserved[32]; + uint32_t current_image; } __packed; /* Read test */ #define EC_CMD_READ_TEST 0x03 +/** + * struct ec_params_read_test - Parameters for the read test command. + * @offset: Starting value for read buffer. + * @size: Size to read in bytes. + */ struct ec_params_read_test { - uint32_t offset; /* Starting value for read buffer */ - uint32_t size; /* Size to read in bytes */ + uint32_t offset; + uint32_t size; } __packed; +/** + * struct ec_response_read_test - Response to the read test command. + * @data: Data returned by the read test command. + */ struct ec_response_read_test { uint32_t data[32]; } __packed; @@ -597,18 +616,27 @@ struct ec_response_read_test { /* Get chip info */ #define EC_CMD_GET_CHIP_INFO 0x05 +/** + * struct ec_response_get_chip_info - Response to the get chip info command. + * @vendor: Null-terminated string for chip vendor. + * @name: Null-terminated string for chip name. + * @revision: Null-terminated string for chip mask version. + */ struct ec_response_get_chip_info { - /* Null-terminated strings */ char vendor[32]; char name[32]; - char revision[32]; /* Mask version */ + char revision[32]; } __packed; /* Get board HW version */ #define EC_CMD_GET_BOARD_VERSION 0x06 +/** + * struct ec_response_board_version - Response to the board version command. + * @board_version: A monotonously incrementing number. + */ struct ec_response_board_version { - uint16_t board_version; /* A monotonously incrementing number. */ + uint16_t board_version; } __packed; /* @@ -621,27 +649,42 @@ struct ec_response_board_version { */ #define EC_CMD_READ_MEMMAP 0x07 +/** + * struct ec_params_read_memmap - Parameters for the read memory map command. + * @offset: Offset in memmap (EC_MEMMAP_*). + * @size: Size to read in bytes. + */ struct ec_params_read_memmap { - uint8_t offset; /* Offset in memmap (EC_MEMMAP_*) */ - uint8_t size; /* Size to read in bytes */ + uint8_t offset; + uint8_t size; } __packed; /* Read versions supported for a command */ #define EC_CMD_GET_CMD_VERSIONS 0x08 +/** + * struct ec_params_get_cmd_versions - Parameters for the get command versions. + * @cmd: Command to check. + */ struct ec_params_get_cmd_versions { - uint8_t cmd; /* Command to check */ + uint8_t cmd; } __packed; +/** + * struct ec_params_get_cmd_versions_v1 - Parameters for the get command + * versions (v1) + * @cmd: Command to check. + */ struct ec_params_get_cmd_versions_v1 { - uint16_t cmd; /* Command to check */ + uint16_t cmd; } __packed; +/** + * struct ec_response_get_cmd_version - Response to the get command versions. + * @version_mask: Mask of supported versions; use EC_VER_MASK() to compare with + * a desired version. + */ struct ec_response_get_cmd_versions { - /* - * Mask of supported versions; use EC_VER_MASK() to compare with a - * desired version. - */ uint32_t version_mask; } __packed; @@ -659,6 +702,11 @@ enum ec_comms_status { EC_COMMS_STATUS_PROCESSING = 1 << 0, /* Processing cmd */ }; +/** + * struct ec_response_get_comms_status - Response to the get comms status + * command. + * @flags: Mask of enum ec_comms_status. + */ struct ec_response_get_comms_status { uint32_t flags; /* Mask of enum ec_comms_status */ } __packed; @@ -685,19 +733,19 @@ struct ec_response_test_protocol { /* EC_RES_IN_PROGRESS may be returned if a command is slow */ #define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED (1 << 0) +/** + * struct ec_response_get_protocol_info - Response to the get protocol info. + * @protocol_versions: Bitmask of protocol versions supported (1 << n means + * version n). + * @max_request_packet_size: Maximum request packet size in bytes. + * @max_response_packet_size: Maximum response packet size in bytes. + * @flags: see EC_PROTOCOL_INFO_* + */ struct ec_response_get_protocol_info { /* Fields which exist if at least protocol version 3 supported */ - - /* Bitmask of protocol versions supported (1 << n means version n)*/ uint32_t protocol_versions; - - /* Maximum request packet size, in bytes */ uint16_t max_request_packet_size; - - /* Maximum response packet size, in bytes */ uint16_t max_response_packet_size; - - /* Flags; see EC_PROTOCOL_INFO_* */ uint32_t flags; } __packed; @@ -708,8 +756,10 @@ struct ec_response_get_protocol_info { /* The upper byte of .flags tells what to do (nothing means "get") */ #define EC_GSV_SET 0x80000000 -/* The lower three bytes of .flags identifies the parameter, if that has - meaning for an individual command. */ +/* + * The lower three bytes of .flags identifies the parameter, if that has + * meaning for an individual command. + */ #define EC_GSV_PARAM_MASK 0x00ffffff struct ec_params_get_set_value { @@ -810,6 +860,7 @@ enum ec_feature_code { #define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32)) #define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32)) + struct ec_response_get_features { uint32_t flags[2]; } __packed; @@ -820,24 +871,22 @@ struct ec_response_get_features { /* Get flash info */ #define EC_CMD_FLASH_INFO 0x10 -/* Version 0 returns these fields */ +/** + * struct ec_response_flash_info - Response to the flash info command. + * @flash_size: Usable flash size in bytes. + * @write_block_size: Write block size. Write offset and size must be a + * multiple of this. + * @erase_block_size: Erase block size. Erase offset and size must be a + * multiple of this. + * @protect_block_size: Protection block size. Protection offset and size + * must be a multiple of this. + * + * Version 0 returns these fields. + */ struct ec_response_flash_info { - /* Usable flash size, in bytes */ uint32_t flash_size; - /* - * Write block size. Write offset and size must be a multiple - * of this. - */ uint32_t write_block_size; - /* - * Erase block size. Erase offset and size must be a multiple - * of this. - */ uint32_t erase_block_size; - /* - * Protection block size. Protection offset and size must be a - * multiple of this. - */ uint32_t protect_block_size; } __packed; @@ -845,7 +894,22 @@ struct ec_response_flash_info { /* EC flash erases bits to 0 instead of 1 */ #define EC_FLASH_INFO_ERASE_TO_0 (1 << 0) -/* +/** + * struct ec_response_flash_info_1 - Response to the flash info v1 command. + * @flash_size: Usable flash size in bytes. + * @write_block_size: Write block size. Write offset and size must be a + * multiple of this. + * @erase_block_size: Erase block size. Erase offset and size must be a + * multiple of this. + * @protect_block_size: Protection block size. Protection offset and size + * must be a multiple of this. + * @write_ideal_size: Ideal write size in bytes. Writes will be fastest if + * size is exactly this and offset is a multiple of this. + * For example, an EC may have a write buffer which can do + * half-page operations if data is aligned, and a slower + * word-at-a-time write mode. + * @flags: Flags; see EC_FLASH_INFO_* + * * Version 1 returns the same initial fields as version 0, with additional * fields following. * @@ -860,15 +924,7 @@ struct ec_response_flash_info_1 { uint32_t protect_block_size; /* Version 1 adds these fields: */ - /* - * Ideal write size in bytes. Writes will be fastest if size is - * exactly this and offset is a multiple of this. For example, an EC - * may have a write buffer which can do half-page operations if data is - * aligned, and a slower word-at-a-time write mode. - */ uint32_t write_ideal_size; - - /* Flags; see EC_FLASH_INFO_* */ uint32_t flags; } __packed; @@ -879,9 +935,14 @@ struct ec_response_flash_info_1 { */ #define EC_CMD_FLASH_READ 0x11 +/** + * struct ec_params_flash_read - Parameters for the flash read command. + * @offset: Byte offset to read. + * @size: Size to read in bytes. + */ struct ec_params_flash_read { - uint32_t offset; /* Byte offset to read */ - uint32_t size; /* Size to read in bytes */ + uint32_t offset; + uint32_t size; } __packed; /* Write flash */ @@ -891,18 +952,28 @@ struct ec_params_flash_read { /* Version 0 of the flash command supported only 64 bytes of data */ #define EC_FLASH_WRITE_VER0_SIZE 64 +/** + * struct ec_params_flash_write - Parameters for the flash write command. + * @offset: Byte offset to write. + * @size: Size to write in bytes. + */ struct ec_params_flash_write { - uint32_t offset; /* Byte offset to write */ - uint32_t size; /* Size to write in bytes */ + uint32_t offset; + uint32_t size; /* Followed by data to write */ } __packed; /* Erase flash */ #define EC_CMD_FLASH_ERASE 0x13 +/** + * struct ec_params_flash_erase - Parameters for the flash erase command. + * @offset: Byte offset to erase. + * @size: Size to erase in bytes. + */ struct ec_params_flash_erase { - uint32_t offset; /* Byte offset to erase */ - uint32_t size; /* Size to erase in bytes */ + uint32_t offset; + uint32_t size; } __packed; /* @@ -941,21 +1012,28 @@ struct ec_params_flash_erase { /* Entile flash code protected when the EC boots */ #define EC_FLASH_PROTECT_ALL_AT_BOOT (1 << 6) +/** + * struct ec_params_flash_protect - Parameters for the flash protect command. + * @mask: Bits in flags to apply. + * @flags: New flags to apply. + */ struct ec_params_flash_protect { - uint32_t mask; /* Bits in flags to apply */ - uint32_t flags; /* New flags to apply */ + uint32_t mask; + uint32_t flags; } __packed; +/** + * struct ec_response_flash_protect - Response to the flash protect command. + * @flags: Current value of flash protect flags. + * @valid_flags: Flags which are valid on this platform. This allows the + * caller to distinguish between flags which aren't set vs. flags + * which can't be set on this platform. + * @writable_flags: Flags which can be changed given the current protection + * state. + */ struct ec_response_flash_protect { - /* Current value of flash protect flags */ uint32_t flags; - /* - * Flags which are valid on this platform. This allows the caller - * to distinguish between flags which aren't set vs. flags which can't - * be set on this platform. - */ uint32_t valid_flags; - /* Flags which can be changed given the current protection state */ uint32_t writable_flags; } __packed; @@ -982,8 +1060,13 @@ enum ec_flash_region { EC_FLASH_REGION_COUNT, }; +/** + * struct ec_params_flash_region_info - Parameters for the flash region info + * command. + * @region: Flash region; see EC_FLASH_REGION_* + */ struct ec_params_flash_region_info { - uint32_t region; /* enum ec_flash_region */ + uint32_t region; } __packed; struct ec_response_flash_region_info { @@ -1094,7 +1177,9 @@ struct rgb_s { }; #define LB_BATTERY_LEVELS 4 -/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a + +/* + * List of tweakable parameters. NOTE: It's __packed so it can be sent in a * host command, but the alignment is the same regardless. Keep it that way. */ struct lightbar_params_v0 { diff --git a/include/linux/mfd/cros_ec_lpc_mec.h b/include/linux/mfd/cros_ec_lpc_mec.h deleted file mode 100644 index 176496ddc66c..000000000000 --- a/include/linux/mfd/cros_ec_lpc_mec.h +++ /dev/null @@ -1,90 +0,0 @@ -/* - * cros_ec_lpc_mec - LPC variant I/O for Microchip EC - * - * Copyright (C) 2016 Google, Inc - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * This driver uses the Chrome OS EC byte-level message-based protocol for - * communicating the keyboard state (which keys are pressed) from a keyboard EC - * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing, - * but everything else (including deghosting) is done here. The main - * motivation for this is to keep the EC firmware as simple as possible, since - * it cannot be easily upgraded and EC flash/IRAM space is relatively - * expensive. - */ - -#ifndef __LINUX_MFD_CROS_EC_MEC_H -#define __LINUX_MFD_CROS_EC_MEC_H - -#include <linux/mfd/cros_ec_commands.h> - -enum cros_ec_lpc_mec_emi_access_mode { - /* 8-bit access */ - ACCESS_TYPE_BYTE = 0x0, - /* 16-bit access */ - ACCESS_TYPE_WORD = 0x1, - /* 32-bit access */ - ACCESS_TYPE_LONG = 0x2, - /* - * 32-bit access, read or write of MEC_EMI_EC_DATA_B3 causes the - * EC data register to be incremented. - */ - ACCESS_TYPE_LONG_AUTO_INCREMENT = 0x3, -}; - -enum cros_ec_lpc_mec_io_type { - MEC_IO_READ, - MEC_IO_WRITE, -}; - -/* Access IO ranges 0x800 thru 0x9ff using EMI interface instead of LPC */ -#define MEC_EMI_RANGE_START EC_HOST_CMD_REGION0 -#define MEC_EMI_RANGE_END (EC_LPC_ADDR_MEMMAP + EC_MEMMAP_SIZE) - -/* EMI registers are relative to base */ -#define MEC_EMI_BASE 0x800 -#define MEC_EMI_HOST_TO_EC (MEC_EMI_BASE + 0) -#define MEC_EMI_EC_TO_HOST (MEC_EMI_BASE + 1) -#define MEC_EMI_EC_ADDRESS_B0 (MEC_EMI_BASE + 2) -#define MEC_EMI_EC_ADDRESS_B1 (MEC_EMI_BASE + 3) -#define MEC_EMI_EC_DATA_B0 (MEC_EMI_BASE + 4) -#define MEC_EMI_EC_DATA_B1 (MEC_EMI_BASE + 5) -#define MEC_EMI_EC_DATA_B2 (MEC_EMI_BASE + 6) -#define MEC_EMI_EC_DATA_B3 (MEC_EMI_BASE + 7) - -/* - * cros_ec_lpc_mec_init - * - * Initialize MEC I/O. - */ -void cros_ec_lpc_mec_init(void); - -/* - * cros_ec_lpc_mec_destroy - * - * Cleanup MEC I/O. - */ -void cros_ec_lpc_mec_destroy(void); - -/** - * cros_ec_lpc_io_bytes_mec - Read / write bytes to MEC EMI port - * - * @io_type: MEC_IO_READ or MEC_IO_WRITE, depending on request - * @offset: Base read / write address - * @length: Number of bytes to read / write - * @buf: Destination / source buffer - * - * @return 8-bit checksum of all bytes read / written - */ -u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type, - unsigned int offset, unsigned int length, u8 *buf); - -#endif /* __LINUX_MFD_CROS_EC_MEC_H */ diff --git a/include/linux/mfd/cros_ec_lpc_reg.h b/include/linux/mfd/cros_ec_lpc_reg.h deleted file mode 100644 index 5560bef63c2b..000000000000 --- a/include/linux/mfd/cros_ec_lpc_reg.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * cros_ec_lpc_reg - LPC access to the Chrome OS Embedded Controller - * - * Copyright (C) 2016 Google, Inc - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * This driver uses the Chrome OS EC byte-level message-based protocol for - * communicating the keyboard state (which keys are pressed) from a keyboard EC - * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing, - * but everything else (including deghosting) is done here. The main - * motivation for this is to keep the EC firmware as simple as possible, since - * it cannot be easily upgraded and EC flash/IRAM space is relatively - * expensive. - */ - -#ifndef __LINUX_MFD_CROS_EC_REG_H -#define __LINUX_MFD_CROS_EC_REG_H - -/** - * cros_ec_lpc_read_bytes - Read bytes from a given LPC-mapped address. - * Returns 8-bit checksum of all bytes read. - * - * @offset: Base read address - * @length: Number of bytes to read - * @dest: Destination buffer - */ -u8 cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length, u8 *dest); - -/** - * cros_ec_lpc_write_bytes - Write bytes to a given LPC-mapped address. - * Returns 8-bit checksum of all bytes written. - * - * @offset: Base write address - * @length: Number of bytes to write - * @msg: Write data buffer - */ -u8 cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length, u8 *msg); - -/** - * cros_ec_lpc_reg_init - * - * Initialize register I/O. - */ -void cros_ec_lpc_reg_init(void); - -/** - * cros_ec_lpc_reg_destroy - * - * Cleanup reg I/O. - */ -void cros_ec_lpc_reg_destroy(void); - -#endif /* __LINUX_MFD_CROS_EC_REG_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 1e52b8fd1685..fcf9cc9d535f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2163,7 +2163,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn, struct mminit_pfnnid_cache *state); #endif -#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP) +#if !defined(CONFIG_FLAT_NODE_MEM_MAP) void zero_resv_unavail(void); #else static inline void zero_resv_unavail(void) {} diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 9f0caccd5833..847705a6d0ec 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -633,9 +633,6 @@ typedef struct pglist_data { struct page_ext *node_page_ext; #endif #endif -#ifndef CONFIG_NO_BOOTMEM - struct bootmem_data *bdata; -#endif #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) /* * Must be held any time you expect node_start_pfn, node_present_pages @@ -869,7 +866,7 @@ static inline int is_highmem_idx(enum zone_type idx) } /** - * is_highmem - helper function to quickly check if a struct zone is a + * is_highmem - helper function to quickly check if a struct zone is a * highmem zone or not. This is an attempt to keep references * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. * @zone - pointer to struct zone variable diff --git a/include/linux/of.h b/include/linux/of.h index ab96025b2382..a5aee3c438ad 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -388,6 +388,9 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it, extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); extern int of_alias_get_id(struct device_node *np, const char *stem); extern int of_alias_get_highest_id(const char *stem); +extern int of_alias_get_alias_list(const struct of_device_id *matches, + const char *stem, unsigned long *bitmap, + unsigned int nbits); extern int of_machine_is_compatible(const char *compat); @@ -898,6 +901,13 @@ static inline int of_alias_get_highest_id(const char *stem) return -ENOSYS; } +static inline int of_alias_get_alias_list(const struct of_device_id *matches, + const char *stem, unsigned long *bitmap, + unsigned int nbits) +{ + return -ENOSYS; +} + static inline int of_machine_is_compatible(const char *compat) { return 0; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index b1bd2186e6d2..226f96f0dee0 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -241,9 +241,9 @@ static inline gfp_t readahead_gfp_mask(struct address_space *x) typedef int filler_t(void *, struct page *); -pgoff_t page_cache_next_hole(struct address_space *mapping, +pgoff_t page_cache_next_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan); -pgoff_t page_cache_prev_hole(struct address_space *mapping, +pgoff_t page_cache_prev_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan); #define FGP_ACCESSED 0x00000001 @@ -363,17 +363,17 @@ static inline unsigned find_get_pages(struct address_space *mapping, unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, - pgoff_t end, int tag, unsigned int nr_pages, + pgoff_t end, xa_mark_t tag, unsigned int nr_pages, struct page **pages); static inline unsigned find_get_pages_tag(struct address_space *mapping, - pgoff_t *index, int tag, unsigned int nr_pages, + pgoff_t *index, xa_mark_t tag, unsigned int nr_pages, struct page **pages) { return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, nr_pages, pages); } unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, - int tag, unsigned int nr_entries, + xa_mark_t tag, unsigned int nr_entries, struct page **entries, pgoff_t *indices); struct page *grab_cache_page_write_begin(struct address_space *mapping, diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index 6dc456ac6136..081d934eda64 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -9,6 +9,8 @@ #ifndef _LINUX_PAGEVEC_H #define _LINUX_PAGEVEC_H +#include <linux/xarray.h> + /* 15 pointers + header align the pagevec structure to a power of two */ #define PAGEVEC_SIZE 15 @@ -40,12 +42,12 @@ static inline unsigned pagevec_lookup(struct pagevec *pvec, unsigned pagevec_lookup_range_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, pgoff_t end, - int tag); + xa_mark_t tag); unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, pgoff_t end, - int tag, unsigned max_pages); + xa_mark_t tag, unsigned max_pages); static inline unsigned pagevec_lookup_tag(struct pagevec *pvec, - struct address_space *mapping, pgoff_t *index, int tag) + struct address_space *mapping, pgoff_t *index, xa_mark_t tag) { return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag); } diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 2d2096ba1cfe..1ce8e264a269 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -91,8 +91,7 @@ extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ extern __PCPU_ATTRS(sec) __typeof__(type) name; \ - __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ - __typeof__(type) name + __PCPU_ATTRS(sec) __weak __typeof__(type) name #else /* * Normal declaration and definition macros. @@ -101,8 +100,7 @@ extern __PCPU_ATTRS(sec) __typeof__(type) name #define DEFINE_PER_CPU_SECTION(type, name, sec) \ - __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \ - __typeof__(type) name + __PCPU_ATTRS(sec) __typeof__(type) name #endif /* diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h index 8485c6a9a383..6d07eebb3f75 100644 --- a/include/linux/platform_data/gpio-omap.h +++ b/include/linux/platform_data/gpio-omap.h @@ -24,8 +24,10 @@ #ifndef __ASM_ARCH_OMAP_GPIO_H #define __ASM_ARCH_OMAP_GPIO_H +#ifndef __ASSEMBLER__ #include <linux/io.h> #include <linux/platform_device.h> +#endif #define OMAP1_MPUIO_BASE 0xfffb5000 @@ -157,6 +159,7 @@ #define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr)) #define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES) +#ifndef __ASSEMBLER__ struct omap_gpio_reg_offs { u16 revision; u16 direction; @@ -205,4 +208,6 @@ struct omap_gpio_platform_data { int (*get_context_loss_count)(struct device *dev); }; +#endif /* __ASSEMBLER__ */ + #endif diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h index ee495d707f17..fe815d7d9f58 100644 --- a/include/linux/platform_data/shmob_drm.h +++ b/include/linux/platform_data/shmob_drm.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * shmob_drm.h -- SH Mobile DRM driver * * Copyright (C) 2012 Renesas Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __SHMOB_DRM_H__ diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 2efa3470a451..1ea3aab972b4 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -46,7 +46,6 @@ struct sysc_regbits { s8 emufree_shift; }; -#define SYSC_QUIRK_RESOURCE_PROVIDER BIT(9) #define SYSC_QUIRK_LEGACY_IDLE BIT(8) #define SYSC_QUIRK_RESET_STATUS BIT(7) #define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6) diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 34149e8b5f73..06c4c7a6c09c 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -28,34 +28,30 @@ #include <linux/rcupdate.h> #include <linux/spinlock.h> #include <linux/types.h> +#include <linux/xarray.h> + +/* Keep unconverted code working */ +#define radix_tree_root xarray +#define radix_tree_node xa_node /* * The bottom two bits of the slot determine how the remaining bits in the * slot are interpreted: * * 00 - data pointer - * 01 - internal entry - * 10 - exceptional entry - * 11 - this bit combination is currently unused/reserved + * 10 - internal entry + * x1 - value entry * * The internal entry may be a pointer to the next level in the tree, a * sibling entry, or an indicator that the entry in this slot has been moved * to another location in the tree and the lookup should be restarted. While * NULL fits the 'data pointer' pattern, it means that there is no entry in * the tree for this index (no matter what level of the tree it is found at). - * This means that you cannot store NULL in the tree as a value for the index. + * This means that storing a NULL entry in the tree is the same as deleting + * the entry from the tree. */ #define RADIX_TREE_ENTRY_MASK 3UL -#define RADIX_TREE_INTERNAL_NODE 1UL - -/* - * Most users of the radix tree store pointers but shmem/tmpfs stores swap - * entries in the same tree. They are marked as exceptional entries to - * distinguish them from pointers to struct page. - * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it. - */ -#define RADIX_TREE_EXCEPTIONAL_ENTRY 2 -#define RADIX_TREE_EXCEPTIONAL_SHIFT 2 +#define RADIX_TREE_INTERNAL_NODE 2UL static inline bool radix_tree_is_internal_node(void *ptr) { @@ -65,75 +61,32 @@ static inline bool radix_tree_is_internal_node(void *ptr) /*** radix-tree API starts here ***/ -#define RADIX_TREE_MAX_TAGS 3 - -#ifndef RADIX_TREE_MAP_SHIFT -#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) -#endif - +#define RADIX_TREE_MAP_SHIFT XA_CHUNK_SHIFT #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) -#define RADIX_TREE_TAG_LONGS \ - ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) +#define RADIX_TREE_MAX_TAGS XA_MAX_MARKS +#define RADIX_TREE_TAG_LONGS XA_MARK_LONGS #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ RADIX_TREE_MAP_SHIFT)) -/* - * @count is the count of every non-NULL element in the ->slots array - * whether that is an exceptional entry, a retry entry, a user pointer, - * a sibling entry or a pointer to the next level of the tree. - * @exceptional is the count of every element in ->slots which is - * either radix_tree_exceptional_entry() or is a sibling entry for an - * exceptional entry. - */ -struct radix_tree_node { - unsigned char shift; /* Bits remaining in each slot */ - unsigned char offset; /* Slot offset in parent */ - unsigned char count; /* Total entry count */ - unsigned char exceptional; /* Exceptional entry count */ - struct radix_tree_node *parent; /* Used when ascending tree */ - struct radix_tree_root *root; /* The tree we belong to */ - union { - struct list_head private_list; /* For tree user */ - struct rcu_head rcu_head; /* Used when freeing node */ - }; - void __rcu *slots[RADIX_TREE_MAP_SIZE]; - unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; -}; - -/* The IDR tag is stored in the low bits of the GFP flags */ +/* The IDR tag is stored in the low bits of xa_flags */ #define ROOT_IS_IDR ((__force gfp_t)4) -/* The top bits of gfp_mask are used to store the root tags */ +/* The top bits of xa_flags are used to store the root tags */ #define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT) -struct radix_tree_root { - spinlock_t xa_lock; - gfp_t gfp_mask; - struct radix_tree_node __rcu *rnode; -}; - -#define RADIX_TREE_INIT(name, mask) { \ - .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \ - .gfp_mask = (mask), \ - .rnode = NULL, \ -} +#define RADIX_TREE_INIT(name, mask) XARRAY_INIT(name, mask) #define RADIX_TREE(name, mask) \ struct radix_tree_root name = RADIX_TREE_INIT(name, mask) -#define INIT_RADIX_TREE(root, mask) \ -do { \ - spin_lock_init(&(root)->xa_lock); \ - (root)->gfp_mask = (mask); \ - (root)->rnode = NULL; \ -} while (0) +#define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask) static inline bool radix_tree_empty(const struct radix_tree_root *root) { - return root->rnode == NULL; + return root->xa_head == NULL; } /** @@ -143,7 +96,6 @@ static inline bool radix_tree_empty(const struct radix_tree_root *root) * @next_index: one beyond the last index for this chunk * @tags: bit-mask for tag-iterating * @node: node that contains current slot - * @shift: shift for the node that holds our slots * * This radix tree iterator works in terms of "chunks" of slots. A chunk is a * subinterval of slots contained within one radix tree leaf node. It is @@ -157,20 +109,8 @@ struct radix_tree_iter { unsigned long next_index; unsigned long tags; struct radix_tree_node *node; -#ifdef CONFIG_RADIX_TREE_MULTIORDER - unsigned int shift; -#endif }; -static inline unsigned int iter_shift(const struct radix_tree_iter *iter) -{ -#ifdef CONFIG_RADIX_TREE_MULTIORDER - return iter->shift; -#else - return 0; -#endif -} - /** * Radix-tree synchronization * @@ -194,12 +134,11 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter) * radix_tree_lookup_slot * radix_tree_tag_get * radix_tree_gang_lookup - * radix_tree_gang_lookup_slot * radix_tree_gang_lookup_tag * radix_tree_gang_lookup_tag_slot * radix_tree_tagged * - * The first 8 functions are able to be called locklessly, using RCU. The + * The first 7 functions are able to be called locklessly, using RCU. The * caller must ensure calls to these functions are made within rcu_read_lock() * regions. Other readers (lock-free or otherwise) and modifications may be * running concurrently. @@ -269,17 +208,6 @@ static inline int radix_tree_deref_retry(void *arg) } /** - * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry? - * @arg: value returned by radix_tree_deref_slot - * Returns: 0 if well-aligned pointer, non-0 if exceptional entry. - */ -static inline int radix_tree_exceptional_entry(void *arg) -{ - /* Not unlikely because radix_tree_exception often tested first */ - return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY; -} - -/** * radix_tree_exception - radix_tree_deref_slot returned either exception? * @arg: value returned by radix_tree_deref_slot * Returns: 0 if well-aligned pointer, non-0 if either kind of exception. @@ -289,47 +217,28 @@ static inline int radix_tree_exception(void *arg) return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); } -int __radix_tree_create(struct radix_tree_root *, unsigned long index, - unsigned order, struct radix_tree_node **nodep, - void __rcu ***slotp); -int __radix_tree_insert(struct radix_tree_root *, unsigned long index, - unsigned order, void *); -static inline int radix_tree_insert(struct radix_tree_root *root, - unsigned long index, void *entry) -{ - return __radix_tree_insert(root, index, 0, entry); -} +int radix_tree_insert(struct radix_tree_root *, unsigned long index, + void *); void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index, struct radix_tree_node **nodep, void __rcu ***slotp); void *radix_tree_lookup(const struct radix_tree_root *, unsigned long); void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *, unsigned long index); -typedef void (*radix_tree_update_node_t)(struct radix_tree_node *); void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *, - void __rcu **slot, void *entry, - radix_tree_update_node_t update_node); + void __rcu **slot, void *entry); void radix_tree_iter_replace(struct radix_tree_root *, const struct radix_tree_iter *, void __rcu **slot, void *entry); void radix_tree_replace_slot(struct radix_tree_root *, void __rcu **slot, void *entry); -void __radix_tree_delete_node(struct radix_tree_root *, - struct radix_tree_node *, - radix_tree_update_node_t update_node); void radix_tree_iter_delete(struct radix_tree_root *, struct radix_tree_iter *iter, void __rcu **slot); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete(struct radix_tree_root *, unsigned long); -void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *, - void __rcu **slot); unsigned int radix_tree_gang_lookup(const struct radix_tree_root *, void **results, unsigned long first_index, unsigned int max_items); -unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *, - void __rcu ***results, unsigned long *indices, - unsigned long first_index, unsigned int max_items); int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); -int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *, unsigned long index, unsigned int tag); @@ -337,8 +246,6 @@ void *radix_tree_tag_clear(struct radix_tree_root *, unsigned long index, unsigned int tag); int radix_tree_tag_get(const struct radix_tree_root *, unsigned long index, unsigned int tag); -void radix_tree_iter_tag_set(struct radix_tree_root *, - const struct radix_tree_iter *iter, unsigned int tag); void radix_tree_iter_tag_clear(struct radix_tree_root *, const struct radix_tree_iter *iter, unsigned int tag); unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *, @@ -354,12 +261,6 @@ static inline void radix_tree_preload_end(void) preempt_enable(); } -int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t); -int radix_tree_split(struct radix_tree_root *, unsigned long index, - unsigned new_order); -int radix_tree_join(struct radix_tree_root *, unsigned long index, - unsigned new_order, void *); - void __rcu **idr_get_free(struct radix_tree_root *root, struct radix_tree_iter *iter, gfp_t gfp, unsigned long max); @@ -465,7 +366,7 @@ void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter) static inline unsigned long __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) { - return iter->index + (slots << iter_shift(iter)); + return iter->index + slots; } /** @@ -490,21 +391,9 @@ void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot, static __always_inline long radix_tree_chunk_size(struct radix_tree_iter *iter) { - return (iter->next_index - iter->index) >> iter_shift(iter); + return iter->next_index - iter->index; } -#ifdef CONFIG_RADIX_TREE_MULTIORDER -void __rcu **__radix_tree_next_slot(void __rcu **slot, - struct radix_tree_iter *iter, unsigned flags); -#else -/* Can't happen without sibling entries, but the compiler can't tell that */ -static inline void __rcu **__radix_tree_next_slot(void __rcu **slot, - struct radix_tree_iter *iter, unsigned flags) -{ - return slot; -} -#endif - /** * radix_tree_next_slot - find next slot in chunk * @@ -563,8 +452,6 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, return NULL; found: - if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot)))) - return __radix_tree_next_slot(slot, iter, flags); return slot; } @@ -584,23 +471,6 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, slot = radix_tree_next_slot(slot, iter, 0)) /** - * radix_tree_for_each_contig - iterate over contiguous slots - * - * @slot: the void** variable for pointer to slot - * @root: the struct radix_tree_root pointer - * @iter: the struct radix_tree_iter pointer - * @start: iteration starting index - * - * @slot points to radix tree slot, @iter->index contains its index. - */ -#define radix_tree_for_each_contig(slot, root, iter, start) \ - for (slot = radix_tree_iter_init(iter, start) ; \ - slot || (slot = radix_tree_next_chunk(root, iter, \ - RADIX_TREE_ITER_CONTIG)) ; \ - slot = radix_tree_next_slot(slot, iter, \ - RADIX_TREE_ITER_CONTIG)) - -/** * radix_tree_for_each_tagged - iterate over tagged slots * * @slot: the void** variable for pointer to slot diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index af8a61be2d8d..9510c677ac70 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -51,8 +51,8 @@ extern void __rb_insert_augmented(struct rb_node *node, * * On insertion, the user must update the augmented information on the path * leading to the inserted node, then call rb_link_node() as usual and - * rb_augment_inserted() instead of the usual rb_insert_color() call. - * If rb_augment_inserted() rebalances the rbtree, it will callback into + * rb_insert_augmented() instead of the usual rb_insert_color() call. + * If rb_insert_augmented() rebalances the rbtree, it will callback into * a user provided function to update the augmented information on the * affected subtrees. */ diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index e3c5d856b6da..507a2b524208 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -305,14 +305,22 @@ struct fw_rsc_vdev { struct fw_rsc_vdev_vring vring[0]; } __packed; +struct rproc; + /** * struct rproc_mem_entry - memory entry descriptor * @va: virtual address * @dma: dma address * @len: length, in bytes * @da: device address + * @release: release associated memory * @priv: associated data + * @name: associated memory region name (optional) * @node: list node + * @rsc_offset: offset in resource table + * @flags: iommu protection flags + * @of_resm_idx: reserved memory phandle index + * @alloc: specific memory allocator function */ struct rproc_mem_entry { void *va; @@ -320,10 +328,15 @@ struct rproc_mem_entry { int len; u32 da; void *priv; + char name[32]; struct list_head node; + u32 rsc_offset; + u32 flags; + u32 of_resm_idx; + int (*alloc)(struct rproc *rproc, struct rproc_mem_entry *mem); + int (*release)(struct rproc *rproc, struct rproc_mem_entry *mem); }; -struct rproc; struct firmware; /** @@ -399,6 +412,9 @@ enum rproc_crash_type { * @node: list node related to the rproc segment list * @da: device address of the segment * @size: size of the segment + * @priv: private data associated with the dump_segment + * @dump: custom dump function to fill device memory segment associated + * with coredump */ struct rproc_dump_segment { struct list_head node; @@ -406,6 +422,9 @@ struct rproc_dump_segment { dma_addr_t da; size_t size; + void *priv; + void (*dump)(struct rproc *rproc, struct rproc_dump_segment *segment, + void *dest); loff_t offset; }; @@ -439,7 +458,9 @@ struct rproc_dump_segment { * @cached_table: copy of the resource table * @table_sz: size of @cached_table * @has_iommu: flag to indicate if remote processor is behind an MMU + * @auto_boot: flag to indicate if remote processor should be auto-started * @dump_segments: list of segments in the firmware + * @nb_vdev: number of vdev currently handled by rproc */ struct rproc { struct list_head node; @@ -472,6 +493,7 @@ struct rproc { bool has_iommu; bool auto_boot; struct list_head dump_segments; + int nb_vdev; }; /** @@ -499,7 +521,6 @@ struct rproc_subdev { /** * struct rproc_vring - remoteproc vring state * @va: virtual address - * @dma: dma address * @len: length, in bytes * @da: device address * @align: vring alignment @@ -509,7 +530,6 @@ struct rproc_subdev { */ struct rproc_vring { void *va; - dma_addr_t dma; int len; u32 da; u32 align; @@ -528,6 +548,7 @@ struct rproc_vring { * @vdev: the virio device * @vring: the vrings for this vdev * @rsc_offset: offset of the vdev's resource entry + * @index: vdev position versus other vdev declared in resource table */ struct rproc_vdev { struct kref refcount; @@ -540,6 +561,7 @@ struct rproc_vdev { struct virtio_device vdev; struct rproc_vring vring[RVDEV_NUM_VRINGS]; u32 rsc_offset; + u32 index; }; struct rproc *rproc_get_by_phandle(phandle phandle); @@ -553,10 +575,29 @@ int rproc_add(struct rproc *rproc); int rproc_del(struct rproc *rproc); void rproc_free(struct rproc *rproc); +void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem); + +struct rproc_mem_entry * +rproc_mem_entry_init(struct device *dev, + void *va, dma_addr_t dma, int len, u32 da, + int (*alloc)(struct rproc *, struct rproc_mem_entry *), + int (*release)(struct rproc *, struct rproc_mem_entry *), + const char *name, ...); + +struct rproc_mem_entry * +rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len, + u32 da, const char *name, ...); + int rproc_boot(struct rproc *rproc); void rproc_shutdown(struct rproc *rproc); void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type); int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size); +int rproc_coredump_add_custom_segment(struct rproc *rproc, + dma_addr_t da, size_t size, + void (*dumpfn)(struct rproc *rproc, + struct rproc_dump_segment *segment, + void *dest), + void *priv); static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev) { diff --git a/include/linux/reset.h b/include/linux/reset.h index 09732c36f351..29af6d6b2f4b 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -116,7 +116,7 @@ static inline int device_reset_optional(struct device *dev) * @id: reset line name * * Returns a struct reset_control or IS_ERR() condition containing errno. - * If this function is called more then once for the same reset_control it will + * If this function is called more than once for the same reset_control it will * return -EBUSY. * * See reset_control_get_shared for details on shared references to diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 6aedc30003e7..c8bb4a2b48c3 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -167,17 +167,12 @@ struct rtc_device { #define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */ #define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */ -extern struct rtc_device *rtc_device_register(const char *name, - struct device *dev, - const struct rtc_class_ops *ops, - struct module *owner); extern struct rtc_device *devm_rtc_device_register(struct device *dev, const char *name, const struct rtc_class_ops *ops, struct module *owner); struct rtc_device *devm_rtc_allocate_device(struct device *dev); int __rtc_register_device(struct module *owner, struct rtc_device *rtc); -extern void rtc_device_unregister(struct rtc_device *rtc); extern void devm_rtc_device_unregister(struct device *dev, struct rtc_device *rtc); @@ -277,4 +272,20 @@ static inline int rtc_nvmem_register(struct rtc_device *rtc, static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {} #endif +#ifdef CONFIG_RTC_INTF_SYSFS +int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp); +int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps); +#else +static inline +int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp) +{ + return 0; +} + +static inline +int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps) +{ + return 0; +} +#endif #endif /* _LINUX_RTC_H_ */ diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h index 04f1321d14c4..f30954cc059d 100644 --- a/include/linux/sched/stat.h +++ b/include/linux/sched/stat.h @@ -20,7 +20,6 @@ extern unsigned long nr_running(void); extern bool single_task_running(void); extern unsigned long nr_iowait(void); extern unsigned long nr_iowait_cpu(int cpu); -extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); static inline int sched_info_on(void) { diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index f4c9fc0fc755..3105055c00a7 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -91,6 +91,8 @@ struct scmi_clk_ops { * to sustained performance level mapping * @freq_get: gets the frequency for a given device using sustained frequency * to sustained performance level mapping + * @est_power_get: gets the estimated power cost for a given performance domain + * at a given frequency */ struct scmi_perf_ops { int (*limits_set)(const struct scmi_handle *handle, u32 domain, @@ -110,6 +112,8 @@ struct scmi_perf_ops { unsigned long rate, bool poll); int (*freq_get)(const struct scmi_handle *handle, u32 domain, unsigned long *rate, bool poll); + int (*est_power_get)(const struct scmi_handle *handle, u32 domain, + unsigned long *rate, unsigned long *power); }; /** diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 406edae44ca3..047fa67d039b 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -144,6 +144,8 @@ struct uart_port { void (*handle_break)(struct uart_port *); int (*rs485_config)(struct uart_port *, struct serial_rs485 *rs485); + int (*iso7816_config)(struct uart_port *, + struct serial_iso7816 *iso7816); unsigned int irq; /* irq number */ unsigned long irqflags; /* irq flags */ unsigned int uartclk; /* base uart clock */ @@ -260,6 +262,7 @@ struct uart_port { struct attribute_group *attr_group; /* port specific attributes */ const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ struct serial_rs485 rs485; + struct serial_iso7816 iso7816; void *private_data; /* generic platform data pointer */ }; diff --git a/include/linux/signal.h b/include/linux/signal.h index 200ed96a05af..f428e86f4800 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -129,9 +129,11 @@ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \ b3 = b->sig[3]; b2 = b->sig[2]; \ r->sig[3] = op(a3, b3); \ r->sig[2] = op(a2, b2); \ + /* fall through */ \ case 2: \ a1 = a->sig[1]; b1 = b->sig[1]; \ r->sig[1] = op(a1, b1); \ + /* fall through */ \ case 1: \ a0 = a->sig[0]; b0 = b->sig[0]; \ r->sig[0] = op(a0, b0); \ @@ -161,7 +163,9 @@ static inline void name(sigset_t *set) \ switch (_NSIG_WORDS) { \ case 4: set->sig[3] = op(set->sig[3]); \ set->sig[2] = op(set->sig[2]); \ + /* fall through */ \ case 2: set->sig[1] = op(set->sig[1]); \ + /* fall through */ \ case 1: set->sig[0] = op(set->sig[0]); \ break; \ default: \ @@ -182,6 +186,7 @@ static inline void sigemptyset(sigset_t *set) memset(set, 0, sizeof(sigset_t)); break; case 2: set->sig[1] = 0; + /* fall through */ case 1: set->sig[0] = 0; break; } @@ -194,6 +199,7 @@ static inline void sigfillset(sigset_t *set) memset(set, -1, sizeof(sigset_t)); break; case 2: set->sig[1] = -1; + /* fall through */ case 1: set->sig[0] = -1; break; } diff --git a/include/linux/soc/amlogic/meson-canvas.h b/include/linux/soc/amlogic/meson-canvas.h new file mode 100644 index 000000000000..b4dde2fbeb3f --- /dev/null +++ b/include/linux/soc/amlogic/meson-canvas.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2018 BayLibre, SAS + */ +#ifndef __SOC_MESON_CANVAS_H +#define __SOC_MESON_CANVAS_H + +#include <linux/kernel.h> + +#define MESON_CANVAS_WRAP_NONE 0x00 +#define MESON_CANVAS_WRAP_X 0x01 +#define MESON_CANVAS_WRAP_Y 0x02 + +#define MESON_CANVAS_BLKMODE_LINEAR 0x00 +#define MESON_CANVAS_BLKMODE_32x32 0x01 +#define MESON_CANVAS_BLKMODE_64x64 0x02 + +#define MESON_CANVAS_ENDIAN_SWAP16 0x1 +#define MESON_CANVAS_ENDIAN_SWAP32 0x3 +#define MESON_CANVAS_ENDIAN_SWAP64 0x7 +#define MESON_CANVAS_ENDIAN_SWAP128 0xf + +struct meson_canvas; + +/** + * meson_canvas_get() - get a canvas provider instance + * + * @dev: consumer device pointer + */ +struct meson_canvas *meson_canvas_get(struct device *dev); + +/** + * meson_canvas_alloc() - take ownership of a canvas + * + * @canvas: canvas provider instance retrieved from meson_canvas_get() + * @canvas_index: will be filled with the canvas ID + */ +int meson_canvas_alloc(struct meson_canvas *canvas, u8 *canvas_index); + +/** + * meson_canvas_free() - remove ownership from a canvas + * + * @canvas: canvas provider instance retrieved from meson_canvas_get() + * @canvas_index: canvas ID that was obtained via meson_canvas_alloc() + */ +int meson_canvas_free(struct meson_canvas *canvas, u8 canvas_index); + +/** + * meson_canvas_config() - configure a canvas + * + * @canvas: canvas provider instance retrieved from meson_canvas_get() + * @canvas_index: canvas ID that was obtained via meson_canvas_alloc() + * @addr: physical address to the pixel buffer + * @stride: width of the buffer + * @height: height of the buffer + * @wrap: undocumented + * @blkmode: block mode (linear, 32x32, 64x64) + * @endian: byte swapping (swap16, swap32, swap64, swap128) + */ +int meson_canvas_config(struct meson_canvas *canvas, u8 canvas_index, + u32 addr, u32 stride, u32 height, + unsigned int wrap, unsigned int blkmode, + unsigned int endian); + +#endif diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index 7e3b9c605ab2..69c285b1c990 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -70,25 +70,51 @@ struct llcc_slice_config { /** * llcc_drv_data - Data associated with the llcc driver * @regmap: regmap associated with the llcc device + * @bcast_regmap: regmap associated with llcc broadcast offset * @cfg: pointer to the data structure for slice configuration * @lock: mutex associated with each slice * @cfg_size: size of the config data table * @max_slices: max slices as read from device tree - * @bcast_off: Offset of the broadcast bank * @num_banks: Number of llcc banks * @bitmap: Bit map to track the active slice ids * @offsets: Pointer to the bank offsets array + * @ecc_irq: interrupt for llcc cache error detection and reporting */ struct llcc_drv_data { struct regmap *regmap; + struct regmap *bcast_regmap; const struct llcc_slice_config *cfg; struct mutex lock; u32 cfg_size; u32 max_slices; - u32 bcast_off; u32 num_banks; unsigned long *bitmap; u32 *offsets; + int ecc_irq; +}; + +/** + * llcc_edac_reg_data - llcc edac registers data for each error type + * @name: Name of the error + * @synd_reg: Syndrome register address + * @count_status_reg: Status register address to read the error count + * @ways_status_reg: Status register address to read the error ways + * @reg_cnt: Number of registers + * @count_mask: Mask value to get the error count + * @ways_mask: Mask value to get the error ways + * @count_shift: Shift value to get the error count + * @ways_shift: Shift value to get the error ways + */ +struct llcc_edac_reg_data { + char *name; + u64 synd_reg; + u64 count_status_reg; + u64 ways_status_reg; + u32 reg_cnt; + u32 count_mask; + u32 ways_mask; + u8 count_shift; + u8 ways_shift; }; #if IS_ENABLED(CONFIG_QCOM_LLCC) diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 40d2822f0e2f..5a3e95017fc6 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -67,7 +67,7 @@ struct cache_detail { struct module * owner; int hash_size; struct hlist_head * hash_table; - rwlock_t hash_lock; + spinlock_t hash_lock; char *name; void (*cache_put)(struct kref *); @@ -168,8 +168,8 @@ extern const struct file_operations content_file_operations_pipefs; extern const struct file_operations cache_flush_operations_pipefs; extern struct cache_head * -sunrpc_cache_lookup(struct cache_detail *detail, - struct cache_head *key, int hash); +sunrpc_cache_lookup_rcu(struct cache_detail *detail, + struct cache_head *key, int hash); extern struct cache_head * sunrpc_cache_update(struct cache_detail *detail, struct cache_head *new, struct cache_head *old, int hash); @@ -186,6 +186,12 @@ static inline struct cache_head *cache_get(struct cache_head *h) return h; } +static inline struct cache_head *cache_get_rcu(struct cache_head *h) +{ + if (kref_get_unless_zero(&h->ref)) + return h; + return NULL; +} static inline void cache_put(struct cache_head *h, struct cache_detail *cd) { @@ -224,9 +230,9 @@ extern void sunrpc_cache_unregister_pipefs(struct cache_detail *); extern void sunrpc_cache_unhash(struct cache_detail *, struct cache_head *); /* Must store cache_detail in seq_file->private if using next three functions */ -extern void *cache_seq_start(struct seq_file *file, loff_t *pos); -extern void *cache_seq_next(struct seq_file *file, void *p, loff_t *pos); -extern void cache_seq_stop(struct seq_file *file, void *p); +extern void *cache_seq_start_rcu(struct seq_file *file, loff_t *pos); +extern void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos); +extern void cache_seq_stop_rcu(struct seq_file *file, void *p); extern void qword_add(char **bpp, int *lp, char *str); extern void qword_addhex(char **bpp, int *lp, char *buf, int blen); diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index fd78f78df5c6..e6e26918504c 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -113,13 +113,14 @@ struct svcxprt_rdma { /* sc_flags */ #define RDMAXPRT_CONN_PENDING 3 -#define RPCRDMA_LISTEN_BACKLOG 10 -#define RPCRDMA_MAX_REQUESTS 32 - -/* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our - * current NFSv4.1 implementation supports one backchannel slot. +/* + * Default connection parameters */ -#define RPCRDMA_MAX_BC_REQUESTS 2 +enum { + RPCRDMA_LISTEN_BACKLOG = 10, + RPCRDMA_MAX_REQUESTS = 64, + RPCRDMA_MAX_BC_REQUESTS = 2, +}; #define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index 04e404a07882..3e53a6e2ada7 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h @@ -82,6 +82,7 @@ struct auth_domain { struct hlist_node hash; char *name; struct auth_ops *flavour; + struct rcu_head rcu_head; }; /* diff --git a/include/linux/swap.h b/include/linux/swap.h index 38195f5c96b1..d8a07a4f171d 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -300,17 +300,12 @@ void *workingset_eviction(struct address_space *mapping, struct page *page); void workingset_refault(struct page *page, void *shadow); void workingset_activation(struct page *page); -/* Do not use directly, use workingset_lookup_update */ -void workingset_update_node(struct radix_tree_node *node); - -/* Returns workingset_update_node() if the mapping has shadow entries. */ -#define workingset_lookup_update(mapping) \ -({ \ - radix_tree_update_node_t __helper = workingset_update_node; \ - if (dax_mapping(mapping) || shmem_mapping(mapping)) \ - __helper = NULL; \ - __helper; \ -}) +/* Only track the nodes of mappings with shadow entries */ +void workingset_update_node(struct xa_node *node); +#define mapping_set_update(xas, mapping) do { \ + if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \ + xas_set_update(xas, workingset_update_node); \ +} while (0) /* linux/mm/page_alloc.c */ extern unsigned long totalram_pages; @@ -409,7 +404,7 @@ extern void show_swap_cache_info(void); extern int add_to_swap(struct page *page); extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); extern int __add_to_swap_cache(struct page *page, swp_entry_t entry); -extern void __delete_from_swap_cache(struct page *); +extern void __delete_from_swap_cache(struct page *, swp_entry_t entry); extern void delete_from_swap_cache(struct page *); extern void free_page_and_swap_cache(struct page *); extern void free_pages_and_swap_cache(struct page **, int); @@ -563,7 +558,8 @@ static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, return -1; } -static inline void __delete_from_swap_cache(struct page *page) +static inline void __delete_from_swap_cache(struct page *page, + swp_entry_t entry) { } diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 22af9d8a84ae..4d961668e5fc 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -18,9 +18,8 @@ * * swp_entry_t's are *never* stored anywhere in their arch-dependent format. */ -#define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \ - (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT)) -#define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1) +#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT) +#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1) /* * Store a type+offset into a swp_entry_t in an arch-independent format @@ -29,8 +28,7 @@ static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) { swp_entry_t ret; - ret.val = (type << SWP_TYPE_SHIFT(ret)) | - (offset & SWP_OFFSET_MASK(ret)); + ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK); return ret; } @@ -40,7 +38,7 @@ static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) */ static inline unsigned swp_type(swp_entry_t entry) { - return (entry.val >> SWP_TYPE_SHIFT(entry)); + return (entry.val >> SWP_TYPE_SHIFT); } /* @@ -49,7 +47,7 @@ static inline unsigned swp_type(swp_entry_t entry) */ static inline pgoff_t swp_offset(swp_entry_t entry) { - return entry.val & SWP_OFFSET_MASK(entry); + return entry.val & SWP_OFFSET_MASK; } #ifdef CONFIG_MMU @@ -90,16 +88,13 @@ static inline swp_entry_t radix_to_swp_entry(void *arg) { swp_entry_t entry; - entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT; + entry.val = xa_to_value(arg); return entry; } static inline void *swp_to_radix_entry(swp_entry_t entry) { - unsigned long value; - - value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT; - return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY); + return xa_mk_value(entry.val); } #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index a2b3dfcee0b5..6cfe05893a76 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -453,6 +453,79 @@ static inline int tee_shm_get_id(struct tee_shm *shm) */ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); +/** + * tee_client_open_context() - Open a TEE context + * @start: if not NULL, continue search after this context + * @match: function to check TEE device + * @data: data for match function + * @vers: if not NULL, version data of TEE device of the context returned + * + * This function does an operation similar to open("/dev/teeX") in user space. + * A returned context must be released with tee_client_close_context(). + * + * Returns a TEE context of the first TEE device matched by the match() + * callback or an ERR_PTR. + */ +struct tee_context * +tee_client_open_context(struct tee_context *start, + int (*match)(struct tee_ioctl_version_data *, + const void *), + const void *data, struct tee_ioctl_version_data *vers); + +/** + * tee_client_close_context() - Close a TEE context + * @ctx: TEE context to close + * + * Note that all sessions previously opened with this context will be + * closed when this function is called. + */ +void tee_client_close_context(struct tee_context *ctx); + +/** + * tee_client_get_version() - Query version of TEE + * @ctx: TEE context to TEE to query + * @vers: Pointer to version data + */ +void tee_client_get_version(struct tee_context *ctx, + struct tee_ioctl_version_data *vers); + +/** + * tee_client_open_session() - Open a session to a Trusted Application + * @ctx: TEE context + * @arg: Open session arguments, see description of + * struct tee_ioctl_open_session_arg + * @param: Parameters passed to the Trusted Application + * + * Returns < 0 on error else see @arg->ret for result. If @arg->ret + * is TEEC_SUCCESS the session identifier is available in @arg->session. + */ +int tee_client_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param); + +/** + * tee_client_close_session() - Close a session to a Trusted Application + * @ctx: TEE Context + * @session: Session id + * + * Return < 0 on error else 0, regardless the session will not be + * valid after this function has returned. + */ +int tee_client_close_session(struct tee_context *ctx, u32 session); + +/** + * tee_client_invoke_func() - Invoke a function in a Trusted Application + * @ctx: TEE Context + * @arg: Invoke arguments, see description of + * struct tee_ioctl_invoke_arg + * @param: Parameters passed to the Trusted Application + * + * Returns < 0 on error else see @arg->ret for result. + */ +int tee_client_invoke_func(struct tee_context *ctx, + struct tee_ioctl_invoke_arg *arg, + struct tee_param *param); + static inline bool tee_param_is_memref(struct tee_param *param) { switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 78a010e19ed4..4130a5497d40 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -575,7 +575,8 @@ extern int bpf_get_kprobe_info(const struct perf_event *event, bool perf_type_tracepoint); #endif #ifdef CONFIG_UPROBE_EVENTS -extern int perf_uprobe_init(struct perf_event *event, bool is_retprobe); +extern int perf_uprobe_init(struct perf_event *event, + unsigned long ref_ctr_offset, bool is_retprobe); extern void perf_uprobe_destroy(struct perf_event *event); extern int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type, const char **filename, diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index bb9d2084af03..103a48a48872 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -123,6 +123,7 @@ extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs); extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs); extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); +extern int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc); extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); extern int uprobe_mmap(struct vm_area_struct *vma); @@ -160,6 +161,10 @@ uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) { return -ENOSYS; } +static inline int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc) +{ + return -ENOSYS; +} static inline int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add) { diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 3fd07912909c..8dc77e40bc03 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -135,13 +135,6 @@ extern int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt); int vty_init(const struct file_operations *console_fops); -static inline bool vt_force_oops_output(struct vc_data *vc) -{ - if (oops_in_progress && vc->vc_panic_force_write && panic_timeout >= 0) - return true; - return false; -} - extern char vt_dont_switch; extern int default_utf8; extern int global_cursor_default; diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 2dfc8006fe64..d9514928ddac 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -4,10 +4,432 @@ /* * eXtensible Arrays * Copyright (c) 2017 Microsoft Corporation - * Author: Matthew Wilcox <mawilcox@microsoft.com> + * Author: Matthew Wilcox <willy@infradead.org> + * + * See Documentation/core-api/xarray.rst for how to use the XArray. */ +#include <linux/bug.h> +#include <linux/compiler.h> +#include <linux/gfp.h> +#include <linux/kconfig.h> +#include <linux/kernel.h> +#include <linux/rcupdate.h> #include <linux/spinlock.h> +#include <linux/types.h> + +/* + * The bottom two bits of the entry determine how the XArray interprets + * the contents: + * + * 00: Pointer entry + * 10: Internal entry + * x1: Value entry or tagged pointer + * + * Attempting to store internal entries in the XArray is a bug. + * + * Most internal entries are pointers to the next node in the tree. + * The following internal entries have a special meaning: + * + * 0-62: Sibling entries + * 256: Zero entry + * 257: Retry entry + * + * Errors are also represented as internal entries, but use the negative + * space (-4094 to -2). They're never stored in the slots array; only + * returned by the normal API. + */ + +#define BITS_PER_XA_VALUE (BITS_PER_LONG - 1) + +/** + * xa_mk_value() - Create an XArray entry from an integer. + * @v: Value to store in XArray. + * + * Context: Any context. + * Return: An entry suitable for storing in the XArray. + */ +static inline void *xa_mk_value(unsigned long v) +{ + WARN_ON((long)v < 0); + return (void *)((v << 1) | 1); +} + +/** + * xa_to_value() - Get value stored in an XArray entry. + * @entry: XArray entry. + * + * Context: Any context. + * Return: The value stored in the XArray entry. + */ +static inline unsigned long xa_to_value(const void *entry) +{ + return (unsigned long)entry >> 1; +} + +/** + * xa_is_value() - Determine if an entry is a value. + * @entry: XArray entry. + * + * Context: Any context. + * Return: True if the entry is a value, false if it is a pointer. + */ +static inline bool xa_is_value(const void *entry) +{ + return (unsigned long)entry & 1; +} + +/** + * xa_tag_pointer() - Create an XArray entry for a tagged pointer. + * @p: Plain pointer. + * @tag: Tag value (0, 1 or 3). + * + * If the user of the XArray prefers, they can tag their pointers instead + * of storing value entries. Three tags are available (0, 1 and 3). + * These are distinct from the xa_mark_t as they are not replicated up + * through the array and cannot be searched for. + * + * Context: Any context. + * Return: An XArray entry. + */ +static inline void *xa_tag_pointer(void *p, unsigned long tag) +{ + return (void *)((unsigned long)p | tag); +} + +/** + * xa_untag_pointer() - Turn an XArray entry into a plain pointer. + * @entry: XArray entry. + * + * If you have stored a tagged pointer in the XArray, call this function + * to get the untagged version of the pointer. + * + * Context: Any context. + * Return: A pointer. + */ +static inline void *xa_untag_pointer(void *entry) +{ + return (void *)((unsigned long)entry & ~3UL); +} + +/** + * xa_pointer_tag() - Get the tag stored in an XArray entry. + * @entry: XArray entry. + * + * If you have stored a tagged pointer in the XArray, call this function + * to get the tag of that pointer. + * + * Context: Any context. + * Return: A tag. + */ +static inline unsigned int xa_pointer_tag(void *entry) +{ + return (unsigned long)entry & 3UL; +} + +/* + * xa_mk_internal() - Create an internal entry. + * @v: Value to turn into an internal entry. + * + * Context: Any context. + * Return: An XArray internal entry corresponding to this value. + */ +static inline void *xa_mk_internal(unsigned long v) +{ + return (void *)((v << 2) | 2); +} + +/* + * xa_to_internal() - Extract the value from an internal entry. + * @entry: XArray entry. + * + * Context: Any context. + * Return: The value which was stored in the internal entry. + */ +static inline unsigned long xa_to_internal(const void *entry) +{ + return (unsigned long)entry >> 2; +} + +/* + * xa_is_internal() - Is the entry an internal entry? + * @entry: XArray entry. + * + * Context: Any context. + * Return: %true if the entry is an internal entry. + */ +static inline bool xa_is_internal(const void *entry) +{ + return ((unsigned long)entry & 3) == 2; +} + +/** + * xa_is_err() - Report whether an XArray operation returned an error + * @entry: Result from calling an XArray function + * + * If an XArray operation cannot complete an operation, it will return + * a special value indicating an error. This function tells you + * whether an error occurred; xa_err() tells you which error occurred. + * + * Context: Any context. + * Return: %true if the entry indicates an error. + */ +static inline bool xa_is_err(const void *entry) +{ + return unlikely(xa_is_internal(entry)); +} + +/** + * xa_err() - Turn an XArray result into an errno. + * @entry: Result from calling an XArray function. + * + * If an XArray operation cannot complete an operation, it will return + * a special pointer value which encodes an errno. This function extracts + * the errno from the pointer value, or returns 0 if the pointer does not + * represent an errno. + * + * Context: Any context. + * Return: A negative errno or 0. + */ +static inline int xa_err(void *entry) +{ + /* xa_to_internal() would not do sign extension. */ + if (xa_is_err(entry)) + return (long)entry >> 2; + return 0; +} + +typedef unsigned __bitwise xa_mark_t; +#define XA_MARK_0 ((__force xa_mark_t)0U) +#define XA_MARK_1 ((__force xa_mark_t)1U) +#define XA_MARK_2 ((__force xa_mark_t)2U) +#define XA_PRESENT ((__force xa_mark_t)8U) +#define XA_MARK_MAX XA_MARK_2 +#define XA_FREE_MARK XA_MARK_0 + +enum xa_lock_type { + XA_LOCK_IRQ = 1, + XA_LOCK_BH = 2, +}; + +/* + * Values for xa_flags. The radix tree stores its GFP flags in the xa_flags, + * and we remain compatible with that. + */ +#define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ) +#define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH) +#define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) +#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ + (__force unsigned)(mark))) + +#define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK)) + +/** + * struct xarray - The anchor of the XArray. + * @xa_lock: Lock that protects the contents of the XArray. + * + * To use the xarray, define it statically or embed it in your data structure. + * It is a very small data structure, so it does not usually make sense to + * allocate it separately and keep a pointer to it in your data structure. + * + * You may use the xa_lock to protect your own data structures as well. + */ +/* + * If all of the entries in the array are NULL, @xa_head is a NULL pointer. + * If the only non-NULL entry in the array is at index 0, @xa_head is that + * entry. If any other entry in the array is non-NULL, @xa_head points + * to an @xa_node. + */ +struct xarray { + spinlock_t xa_lock; +/* private: The rest of the data structure is not to be used directly. */ + gfp_t xa_flags; + void __rcu * xa_head; +}; + +#define XARRAY_INIT(name, flags) { \ + .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \ + .xa_flags = flags, \ + .xa_head = NULL, \ +} + +/** + * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags. + * @name: A string that names your XArray. + * @flags: XA_FLAG values. + * + * This is intended for file scope definitions of XArrays. It declares + * and initialises an empty XArray with the chosen name and flags. It is + * equivalent to calling xa_init_flags() on the array, but it does the + * initialisation at compiletime instead of runtime. + */ +#define DEFINE_XARRAY_FLAGS(name, flags) \ + struct xarray name = XARRAY_INIT(name, flags) + +/** + * DEFINE_XARRAY() - Define an XArray. + * @name: A string that names your XArray. + * + * This is intended for file scope definitions of XArrays. It declares + * and initialises an empty XArray with the chosen name. It is equivalent + * to calling xa_init() on the array, but it does the initialisation at + * compiletime instead of runtime. + */ +#define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0) + +/** + * DEFINE_XARRAY_ALLOC() - Define an XArray which can allocate IDs. + * @name: A string that names your XArray. + * + * This is intended for file scope definitions of allocating XArrays. + * See also DEFINE_XARRAY(). + */ +#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) + +void xa_init_flags(struct xarray *, gfp_t flags); +void *xa_load(struct xarray *, unsigned long index); +void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); +void *xa_cmpxchg(struct xarray *, unsigned long index, + void *old, void *entry, gfp_t); +int xa_reserve(struct xarray *, unsigned long index, gfp_t); +void *xa_store_range(struct xarray *, unsigned long first, unsigned long last, + void *entry, gfp_t); +bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t); +void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); +void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); +void *xa_find(struct xarray *xa, unsigned long *index, + unsigned long max, xa_mark_t) __attribute__((nonnull(2))); +void *xa_find_after(struct xarray *xa, unsigned long *index, + unsigned long max, xa_mark_t) __attribute__((nonnull(2))); +unsigned int xa_extract(struct xarray *, void **dst, unsigned long start, + unsigned long max, unsigned int n, xa_mark_t); +void xa_destroy(struct xarray *); + +/** + * xa_init() - Initialise an empty XArray. + * @xa: XArray. + * + * An empty XArray is full of NULL entries. + * + * Context: Any context. + */ +static inline void xa_init(struct xarray *xa) +{ + xa_init_flags(xa, 0); +} + +/** + * xa_empty() - Determine if an array has any present entries. + * @xa: XArray. + * + * Context: Any context. + * Return: %true if the array contains only NULL pointers. + */ +static inline bool xa_empty(const struct xarray *xa) +{ + return xa->xa_head == NULL; +} + +/** + * xa_marked() - Inquire whether any entry in this array has a mark set + * @xa: Array + * @mark: Mark value + * + * Context: Any context. + * Return: %true if any entry has this mark set. + */ +static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) +{ + return xa->xa_flags & XA_FLAGS_MARK(mark); +} + +/** + * xa_erase() - Erase this entry from the XArray. + * @xa: XArray. + * @index: Index of entry. + * + * This function is the equivalent of calling xa_store() with %NULL as + * the third argument. The XArray does not need to allocate memory, so + * the user does not need to provide GFP flags. + * + * Context: Process context. Takes and releases the xa_lock. + * Return: The entry which used to be at this index. + */ +static inline void *xa_erase(struct xarray *xa, unsigned long index) +{ + return xa_store(xa, index, NULL, 0); +} + +/** + * xa_insert() - Store this entry in the XArray unless another entry is + * already present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * If you would rather see the existing entry in the array, use xa_cmpxchg(). + * This function is for users who don't care what the entry is, only that + * one is present. + * + * Context: Process context. Takes and releases the xa_lock. + * May sleep if the @gfp flags permit. + * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +static inline int xa_insert(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp); + if (!curr) + return 0; + if (xa_is_err(curr)) + return xa_err(curr); + return -EEXIST; +} + +/** + * xa_release() - Release a reserved entry. + * @xa: XArray. + * @index: Index of entry. + * + * After calling xa_reserve(), you can call this function to release the + * reservation. If the entry at @index has been stored to, this function + * will do nothing. + */ +static inline void xa_release(struct xarray *xa, unsigned long index) +{ + xa_cmpxchg(xa, index, NULL, NULL, 0); +} + +/** + * xa_for_each() - Iterate over a portion of an XArray. + * @xa: XArray. + * @entry: Entry retrieved from array. + * @index: Index of @entry. + * @max: Maximum index to retrieve from array. + * @filter: Selection criterion. + * + * Initialise @index to the lowest index you want to retrieve from the + * array. During the iteration, @entry will have the value of the entry + * stored in @xa at @index. The iteration will skip all entries in the + * array which do not match @filter. You may modify @index during the + * iteration if you want to skip or reprocess indices. It is safe to modify + * the array during the iteration. At the end of the iteration, @entry will + * be set to NULL and @index will have a value less than or equal to max. + * + * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have + * to handle your own locking with xas_for_each(), and if you have to unlock + * after each iteration, it will also end up being O(n.log(n)). xa_for_each() + * will spin if it hits a retry entry; if you intend to see retry entries, + * you should use the xas_for_each() iterator instead. The xas_for_each() + * iterator will expand into more inline code than xa_for_each(). + * + * Context: Any context. Takes and releases the RCU lock. + */ +#define xa_for_each(xa, entry, index, max, filter) \ + for (entry = xa_find(xa, &index, max, filter); entry; \ + entry = xa_find_after(xa, &index, max, filter)) #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) #define xa_lock(xa) spin_lock(&(xa)->xa_lock) @@ -21,4 +443,873 @@ #define xa_unlock_irqrestore(xa, flags) \ spin_unlock_irqrestore(&(xa)->xa_lock, flags) +/* + * Versions of the normal API which require the caller to hold the + * xa_lock. If the GFP flags allow it, they will drop the lock to + * allocate memory, then reacquire it afterwards. These functions + * may also re-enable interrupts if the XArray flags indicate the + * locking should be interrupt safe. + */ +void *__xa_erase(struct xarray *, unsigned long index); +void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); +void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, + void *entry, gfp_t); +int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); +void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); +void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); + +/** + * __xa_insert() - Store this entry in the XArray unless another entry is + * already present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * If you would rather see the existing entry in the array, use __xa_cmpxchg(). + * This function is for users who don't care what the entry is, only that + * one is present. + * + * Context: Any context. Expects xa_lock to be held on entry. May + * release and reacquire xa_lock if the @gfp flags permit. + * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +static inline int __xa_insert(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp); + if (!curr) + return 0; + if (xa_is_err(curr)) + return xa_err(curr); + return -EEXIST; +} + +/** + * xa_erase_bh() - Erase this entry from the XArray. + * @xa: XArray. + * @index: Index of entry. + * + * This function is the equivalent of calling xa_store() with %NULL as + * the third argument. The XArray does not need to allocate memory, so + * the user does not need to provide GFP flags. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling softirqs. + * Return: The entry which used to be at this index. + */ +static inline void *xa_erase_bh(struct xarray *xa, unsigned long index) +{ + void *entry; + + xa_lock_bh(xa); + entry = __xa_erase(xa, index); + xa_unlock_bh(xa); + + return entry; +} + +/** + * xa_erase_irq() - Erase this entry from the XArray. + * @xa: XArray. + * @index: Index of entry. + * + * This function is the equivalent of calling xa_store() with %NULL as + * the third argument. The XArray does not need to allocate memory, so + * the user does not need to provide GFP flags. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. + * Return: The entry which used to be at this index. + */ +static inline void *xa_erase_irq(struct xarray *xa, unsigned long index) +{ + void *entry; + + xa_lock_irq(xa); + entry = __xa_erase(xa, index); + xa_unlock_irq(xa); + + return entry; +} + +/** + * xa_alloc() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @max: Maximum ID to allocate (inclusive). + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Allocates an unused ID in the range specified by @id and @max. + * Updates the @id pointer with the index, then stores the entry at that + * index. A concurrent lookup will not see an uninitialised @id. + * + * Context: Process context. Takes and releases the xa_lock. May sleep if + * the @gfp flags permit. + * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if + * there is no more space in the XArray. + */ +static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, + gfp_t gfp) +{ + int err; + + xa_lock(xa); + err = __xa_alloc(xa, id, max, entry, gfp); + xa_unlock(xa); + + return err; +} + +/** + * xa_alloc_bh() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @max: Maximum ID to allocate (inclusive). + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Allocates an unused ID in the range specified by @id and @max. + * Updates the @id pointer with the index, then stores the entry at that + * index. A concurrent lookup will not see an uninitialised @id. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling softirqs. May sleep if the @gfp flags permit. + * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if + * there is no more space in the XArray. + */ +static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry, + gfp_t gfp) +{ + int err; + + xa_lock_bh(xa); + err = __xa_alloc(xa, id, max, entry, gfp); + xa_unlock_bh(xa); + + return err; +} + +/** + * xa_alloc_irq() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @max: Maximum ID to allocate (inclusive). + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Allocates an unused ID in the range specified by @id and @max. + * Updates the @id pointer with the index, then stores the entry at that + * index. A concurrent lookup will not see an uninitialised @id. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. May sleep if the @gfp flags permit. + * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if + * there is no more space in the XArray. + */ +static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry, + gfp_t gfp) +{ + int err; + + xa_lock_irq(xa); + err = __xa_alloc(xa, id, max, entry, gfp); + xa_unlock_irq(xa); + + return err; +} + +/* Everything below here is the Advanced API. Proceed with caution. */ + +/* + * The xarray is constructed out of a set of 'chunks' of pointers. Choosing + * the best chunk size requires some tradeoffs. A power of two recommends + * itself so that we can walk the tree based purely on shifts and masks. + * Generally, the larger the better; as the number of slots per level of the + * tree increases, the less tall the tree needs to be. But that needs to be + * balanced against the memory consumption of each node. On a 64-bit system, + * xa_node is currently 576 bytes, and we get 7 of them per 4kB page. If we + * doubled the number of slots per node, we'd get only 3 nodes per 4kB page. + */ +#ifndef XA_CHUNK_SHIFT +#define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) +#endif +#define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT) +#define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1) +#define XA_MAX_MARKS 3 +#define XA_MARK_LONGS DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG) + +/* + * @count is the count of every non-NULL element in the ->slots array + * whether that is a value entry, a retry entry, a user pointer, + * a sibling entry or a pointer to the next level of the tree. + * @nr_values is the count of every element in ->slots which is + * either a value entry or a sibling of a value entry. + */ +struct xa_node { + unsigned char shift; /* Bits remaining in each slot */ + unsigned char offset; /* Slot offset in parent */ + unsigned char count; /* Total entry count */ + unsigned char nr_values; /* Value entry count */ + struct xa_node __rcu *parent; /* NULL at top of tree */ + struct xarray *array; /* The array we belong to */ + union { + struct list_head private_list; /* For tree user */ + struct rcu_head rcu_head; /* Used when freeing node */ + }; + void __rcu *slots[XA_CHUNK_SIZE]; + union { + unsigned long tags[XA_MAX_MARKS][XA_MARK_LONGS]; + unsigned long marks[XA_MAX_MARKS][XA_MARK_LONGS]; + }; +}; + +void xa_dump(const struct xarray *); +void xa_dump_node(const struct xa_node *); + +#ifdef XA_DEBUG +#define XA_BUG_ON(xa, x) do { \ + if (x) { \ + xa_dump(xa); \ + BUG(); \ + } \ + } while (0) +#define XA_NODE_BUG_ON(node, x) do { \ + if (x) { \ + if (node) xa_dump_node(node); \ + BUG(); \ + } \ + } while (0) +#else +#define XA_BUG_ON(xa, x) do { } while (0) +#define XA_NODE_BUG_ON(node, x) do { } while (0) +#endif + +/* Private */ +static inline void *xa_head(const struct xarray *xa) +{ + return rcu_dereference_check(xa->xa_head, + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline void *xa_head_locked(const struct xarray *xa) +{ + return rcu_dereference_protected(xa->xa_head, + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline void *xa_entry(const struct xarray *xa, + const struct xa_node *node, unsigned int offset) +{ + XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE); + return rcu_dereference_check(node->slots[offset], + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline void *xa_entry_locked(const struct xarray *xa, + const struct xa_node *node, unsigned int offset) +{ + XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE); + return rcu_dereference_protected(node->slots[offset], + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline struct xa_node *xa_parent(const struct xarray *xa, + const struct xa_node *node) +{ + return rcu_dereference_check(node->parent, + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline struct xa_node *xa_parent_locked(const struct xarray *xa, + const struct xa_node *node) +{ + return rcu_dereference_protected(node->parent, + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline void *xa_mk_node(const struct xa_node *node) +{ + return (void *)((unsigned long)node | 2); +} + +/* Private */ +static inline struct xa_node *xa_to_node(const void *entry) +{ + return (struct xa_node *)((unsigned long)entry - 2); +} + +/* Private */ +static inline bool xa_is_node(const void *entry) +{ + return xa_is_internal(entry) && (unsigned long)entry > 4096; +} + +/* Private */ +static inline void *xa_mk_sibling(unsigned int offset) +{ + return xa_mk_internal(offset); +} + +/* Private */ +static inline unsigned long xa_to_sibling(const void *entry) +{ + return xa_to_internal(entry); +} + +/** + * xa_is_sibling() - Is the entry a sibling entry? + * @entry: Entry retrieved from the XArray + * + * Return: %true if the entry is a sibling entry. + */ +static inline bool xa_is_sibling(const void *entry) +{ + return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) && + (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); +} + +#define XA_ZERO_ENTRY xa_mk_internal(256) +#define XA_RETRY_ENTRY xa_mk_internal(257) + +/** + * xa_is_zero() - Is the entry a zero entry? + * @entry: Entry retrieved from the XArray + * + * Return: %true if the entry is a zero entry. + */ +static inline bool xa_is_zero(const void *entry) +{ + return unlikely(entry == XA_ZERO_ENTRY); +} + +/** + * xa_is_retry() - Is the entry a retry entry? + * @entry: Entry retrieved from the XArray + * + * Return: %true if the entry is a retry entry. + */ +static inline bool xa_is_retry(const void *entry) +{ + return unlikely(entry == XA_RETRY_ENTRY); +} + +/** + * typedef xa_update_node_t - A callback function from the XArray. + * @node: The node which is being processed + * + * This function is called every time the XArray updates the count of + * present and value entries in a node. It allows advanced users to + * maintain the private_list in the node. + * + * Context: The xa_lock is held and interrupts may be disabled. + * Implementations should not drop the xa_lock, nor re-enable + * interrupts. + */ +typedef void (*xa_update_node_t)(struct xa_node *node); + +/* + * The xa_state is opaque to its users. It contains various different pieces + * of state involved in the current operation on the XArray. It should be + * declared on the stack and passed between the various internal routines. + * The various elements in it should not be accessed directly, but only + * through the provided accessor functions. The below documentation is for + * the benefit of those working on the code, not for users of the XArray. + * + * @xa_node usually points to the xa_node containing the slot we're operating + * on (and @xa_offset is the offset in the slots array). If there is a + * single entry in the array at index 0, there are no allocated xa_nodes to + * point to, and so we store %NULL in @xa_node. @xa_node is set to + * the value %XAS_RESTART if the xa_state is not walked to the correct + * position in the tree of nodes for this operation. If an error occurs + * during an operation, it is set to an %XAS_ERROR value. If we run off the + * end of the allocated nodes, it is set to %XAS_BOUNDS. + */ +struct xa_state { + struct xarray *xa; + unsigned long xa_index; + unsigned char xa_shift; + unsigned char xa_sibs; + unsigned char xa_offset; + unsigned char xa_pad; /* Helps gcc generate better code */ + struct xa_node *xa_node; + struct xa_node *xa_alloc; + xa_update_node_t xa_update; +}; + +/* + * We encode errnos in the xas->xa_node. If an error has happened, we need to + * drop the lock to fix it, and once we've done so the xa_state is invalid. + */ +#define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL)) +#define XAS_BOUNDS ((struct xa_node *)1UL) +#define XAS_RESTART ((struct xa_node *)3UL) + +#define __XA_STATE(array, index, shift, sibs) { \ + .xa = array, \ + .xa_index = index, \ + .xa_shift = shift, \ + .xa_sibs = sibs, \ + .xa_offset = 0, \ + .xa_pad = 0, \ + .xa_node = XAS_RESTART, \ + .xa_alloc = NULL, \ + .xa_update = NULL \ +} + +/** + * XA_STATE() - Declare an XArray operation state. + * @name: Name of this operation state (usually xas). + * @array: Array to operate on. + * @index: Initial index of interest. + * + * Declare and initialise an xa_state on the stack. + */ +#define XA_STATE(name, array, index) \ + struct xa_state name = __XA_STATE(array, index, 0, 0) + +/** + * XA_STATE_ORDER() - Declare an XArray operation state. + * @name: Name of this operation state (usually xas). + * @array: Array to operate on. + * @index: Initial index of interest. + * @order: Order of entry. + * + * Declare and initialise an xa_state on the stack. This variant of + * XA_STATE() allows you to specify the 'order' of the element you + * want to operate on.` + */ +#define XA_STATE_ORDER(name, array, index, order) \ + struct xa_state name = __XA_STATE(array, \ + (index >> order) << order, \ + order - (order % XA_CHUNK_SHIFT), \ + (1U << (order % XA_CHUNK_SHIFT)) - 1) + +#define xas_marked(xas, mark) xa_marked((xas)->xa, (mark)) +#define xas_trylock(xas) xa_trylock((xas)->xa) +#define xas_lock(xas) xa_lock((xas)->xa) +#define xas_unlock(xas) xa_unlock((xas)->xa) +#define xas_lock_bh(xas) xa_lock_bh((xas)->xa) +#define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa) +#define xas_lock_irq(xas) xa_lock_irq((xas)->xa) +#define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa) +#define xas_lock_irqsave(xas, flags) \ + xa_lock_irqsave((xas)->xa, flags) +#define xas_unlock_irqrestore(xas, flags) \ + xa_unlock_irqrestore((xas)->xa, flags) + +/** + * xas_error() - Return an errno stored in the xa_state. + * @xas: XArray operation state. + * + * Return: 0 if no error has been noted. A negative errno if one has. + */ +static inline int xas_error(const struct xa_state *xas) +{ + return xa_err(xas->xa_node); +} + +/** + * xas_set_err() - Note an error in the xa_state. + * @xas: XArray operation state. + * @err: Negative error number. + * + * Only call this function with a negative @err; zero or positive errors + * will probably not behave the way you think they should. If you want + * to clear the error from an xa_state, use xas_reset(). + */ +static inline void xas_set_err(struct xa_state *xas, long err) +{ + xas->xa_node = XA_ERROR(err); +} + +/** + * xas_invalid() - Is the xas in a retry or error state? + * @xas: XArray operation state. + * + * Return: %true if the xas cannot be used for operations. + */ +static inline bool xas_invalid(const struct xa_state *xas) +{ + return (unsigned long)xas->xa_node & 3; +} + +/** + * xas_valid() - Is the xas a valid cursor into the array? + * @xas: XArray operation state. + * + * Return: %true if the xas can be used for operations. + */ +static inline bool xas_valid(const struct xa_state *xas) +{ + return !xas_invalid(xas); +} + +/** + * xas_is_node() - Does the xas point to a node? + * @xas: XArray operation state. + * + * Return: %true if the xas currently references a node. + */ +static inline bool xas_is_node(const struct xa_state *xas) +{ + return xas_valid(xas) && xas->xa_node; +} + +/* True if the pointer is something other than a node */ +static inline bool xas_not_node(struct xa_node *node) +{ + return ((unsigned long)node & 3) || !node; +} + +/* True if the node represents RESTART or an error */ +static inline bool xas_frozen(struct xa_node *node) +{ + return (unsigned long)node & 2; +} + +/* True if the node represents head-of-tree, RESTART or BOUNDS */ +static inline bool xas_top(struct xa_node *node) +{ + return node <= XAS_RESTART; +} + +/** + * xas_reset() - Reset an XArray operation state. + * @xas: XArray operation state. + * + * Resets the error or walk state of the @xas so future walks of the + * array will start from the root. Use this if you have dropped the + * xarray lock and want to reuse the xa_state. + * + * Context: Any context. + */ +static inline void xas_reset(struct xa_state *xas) +{ + xas->xa_node = XAS_RESTART; +} + +/** + * xas_retry() - Retry the operation if appropriate. + * @xas: XArray operation state. + * @entry: Entry from xarray. + * + * The advanced functions may sometimes return an internal entry, such as + * a retry entry or a zero entry. This function sets up the @xas to restart + * the walk from the head of the array if needed. + * + * Context: Any context. + * Return: true if the operation needs to be retried. + */ +static inline bool xas_retry(struct xa_state *xas, const void *entry) +{ + if (xa_is_zero(entry)) + return true; + if (!xa_is_retry(entry)) + return false; + xas_reset(xas); + return true; +} + +void *xas_load(struct xa_state *); +void *xas_store(struct xa_state *, void *entry); +void *xas_find(struct xa_state *, unsigned long max); +void *xas_find_conflict(struct xa_state *); + +bool xas_get_mark(const struct xa_state *, xa_mark_t); +void xas_set_mark(const struct xa_state *, xa_mark_t); +void xas_clear_mark(const struct xa_state *, xa_mark_t); +void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t); +void xas_init_marks(const struct xa_state *); + +bool xas_nomem(struct xa_state *, gfp_t); +void xas_pause(struct xa_state *); + +void xas_create_range(struct xa_state *); + +/** + * xas_reload() - Refetch an entry from the xarray. + * @xas: XArray operation state. + * + * Use this function to check that a previously loaded entry still has + * the same value. This is useful for the lockless pagecache lookup where + * we walk the array with only the RCU lock to protect us, lock the page, + * then check that the page hasn't moved since we looked it up. + * + * The caller guarantees that @xas is still valid. If it may be in an + * error or restart state, call xas_load() instead. + * + * Return: The entry at this location in the xarray. + */ +static inline void *xas_reload(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + + if (node) + return xa_entry(xas->xa, node, xas->xa_offset); + return xa_head(xas->xa); +} + +/** + * xas_set() - Set up XArray operation state for a different index. + * @xas: XArray operation state. + * @index: New index into the XArray. + * + * Move the operation state to refer to a different index. This will + * have the effect of starting a walk from the top; see xas_next() + * to move to an adjacent index. + */ +static inline void xas_set(struct xa_state *xas, unsigned long index) +{ + xas->xa_index = index; + xas->xa_node = XAS_RESTART; +} + +/** + * xas_set_order() - Set up XArray operation state for a multislot entry. + * @xas: XArray operation state. + * @index: Target of the operation. + * @order: Entry occupies 2^@order indices. + */ +static inline void xas_set_order(struct xa_state *xas, unsigned long index, + unsigned int order) +{ +#ifdef CONFIG_XARRAY_MULTI + xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0; + xas->xa_shift = order - (order % XA_CHUNK_SHIFT); + xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; + xas->xa_node = XAS_RESTART; +#else + BUG_ON(order > 0); + xas_set(xas, index); +#endif +} + +/** + * xas_set_update() - Set up XArray operation state for a callback. + * @xas: XArray operation state. + * @update: Function to call when updating a node. + * + * The XArray can notify a caller after it has updated an xa_node. + * This is advanced functionality and is only needed by the page cache. + */ +static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update) +{ + xas->xa_update = update; +} + +/** + * xas_next_entry() - Advance iterator to next present entry. + * @xas: XArray operation state. + * @max: Highest index to return. + * + * xas_next_entry() is an inline function to optimise xarray traversal for + * speed. It is equivalent to calling xas_find(), and will call xas_find() + * for all the hard cases. + * + * Return: The next present entry after the one currently referred to by @xas. + */ +static inline void *xas_next_entry(struct xa_state *xas, unsigned long max) +{ + struct xa_node *node = xas->xa_node; + void *entry; + + if (unlikely(xas_not_node(node) || node->shift || + xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK))) + return xas_find(xas, max); + + do { + if (unlikely(xas->xa_index >= max)) + return xas_find(xas, max); + if (unlikely(xas->xa_offset == XA_CHUNK_MASK)) + return xas_find(xas, max); + entry = xa_entry(xas->xa, node, xas->xa_offset + 1); + if (unlikely(xa_is_internal(entry))) + return xas_find(xas, max); + xas->xa_offset++; + xas->xa_index++; + } while (!entry); + + return entry; +} + +/* Private */ +static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance, + xa_mark_t mark) +{ + unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark]; + unsigned int offset = xas->xa_offset; + + if (advance) + offset++; + if (XA_CHUNK_SIZE == BITS_PER_LONG) { + if (offset < XA_CHUNK_SIZE) { + unsigned long data = *addr & (~0UL << offset); + if (data) + return __ffs(data); + } + return XA_CHUNK_SIZE; + } + + return find_next_bit(addr, XA_CHUNK_SIZE, offset); +} + +/** + * xas_next_marked() - Advance iterator to next marked entry. + * @xas: XArray operation state. + * @max: Highest index to return. + * @mark: Mark to search for. + * + * xas_next_marked() is an inline function to optimise xarray traversal for + * speed. It is equivalent to calling xas_find_marked(), and will call + * xas_find_marked() for all the hard cases. + * + * Return: The next marked entry after the one currently referred to by @xas. + */ +static inline void *xas_next_marked(struct xa_state *xas, unsigned long max, + xa_mark_t mark) +{ + struct xa_node *node = xas->xa_node; + unsigned int offset; + + if (unlikely(xas_not_node(node) || node->shift)) + return xas_find_marked(xas, max, mark); + offset = xas_find_chunk(xas, true, mark); + xas->xa_offset = offset; + xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset; + if (xas->xa_index > max) + return NULL; + if (offset == XA_CHUNK_SIZE) + return xas_find_marked(xas, max, mark); + return xa_entry(xas->xa, node, offset); +} + +/* + * If iterating while holding a lock, drop the lock and reschedule + * every %XA_CHECK_SCHED loops. + */ +enum { + XA_CHECK_SCHED = 4096, +}; + +/** + * xas_for_each() - Iterate over a range of an XArray. + * @xas: XArray operation state. + * @entry: Entry retrieved from the array. + * @max: Maximum index to retrieve from array. + * + * The loop body will be executed for each entry present in the xarray + * between the current xas position and @max. @entry will be set to + * the entry retrieved from the xarray. It is safe to delete entries + * from the array in the loop body. You should hold either the RCU lock + * or the xa_lock while iterating. If you need to drop the lock, call + * xas_pause() first. + */ +#define xas_for_each(xas, entry, max) \ + for (entry = xas_find(xas, max); entry; \ + entry = xas_next_entry(xas, max)) + +/** + * xas_for_each_marked() - Iterate over a range of an XArray. + * @xas: XArray operation state. + * @entry: Entry retrieved from the array. + * @max: Maximum index to retrieve from array. + * @mark: Mark to search for. + * + * The loop body will be executed for each marked entry in the xarray + * between the current xas position and @max. @entry will be set to + * the entry retrieved from the xarray. It is safe to delete entries + * from the array in the loop body. You should hold either the RCU lock + * or the xa_lock while iterating. If you need to drop the lock, call + * xas_pause() first. + */ +#define xas_for_each_marked(xas, entry, max, mark) \ + for (entry = xas_find_marked(xas, max, mark); entry; \ + entry = xas_next_marked(xas, max, mark)) + +/** + * xas_for_each_conflict() - Iterate over a range of an XArray. + * @xas: XArray operation state. + * @entry: Entry retrieved from the array. + * + * The loop body will be executed for each entry in the XArray that lies + * within the range specified by @xas. If the loop completes successfully, + * any entries that lie in this range will be replaced by @entry. The caller + * may break out of the loop; if they do so, the contents of the XArray will + * be unchanged. The operation may fail due to an out of memory condition. + * The caller may also call xa_set_err() to exit the loop while setting an + * error to record the reason. + */ +#define xas_for_each_conflict(xas, entry) \ + while ((entry = xas_find_conflict(xas))) + +void *__xas_next(struct xa_state *); +void *__xas_prev(struct xa_state *); + +/** + * xas_prev() - Move iterator to previous index. + * @xas: XArray operation state. + * + * If the @xas was in an error state, it will remain in an error state + * and this function will return %NULL. If the @xas has never been walked, + * it will have the effect of calling xas_load(). Otherwise one will be + * subtracted from the index and the state will be walked to the correct + * location in the array for the next operation. + * + * If the iterator was referencing index 0, this function wraps + * around to %ULONG_MAX. + * + * Return: The entry at the new index. This may be %NULL or an internal + * entry. + */ +static inline void *xas_prev(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + + if (unlikely(xas_not_node(node) || node->shift || + xas->xa_offset == 0)) + return __xas_prev(xas); + + xas->xa_index--; + xas->xa_offset--; + return xa_entry(xas->xa, node, xas->xa_offset); +} + +/** + * xas_next() - Move state to next index. + * @xas: XArray operation state. + * + * If the @xas was in an error state, it will remain in an error state + * and this function will return %NULL. If the @xas has never been walked, + * it will have the effect of calling xas_load(). Otherwise one will be + * added to the index and the state will be walked to the correct + * location in the array for the next operation. + * + * If the iterator was referencing index %ULONG_MAX, this function wraps + * around to 0. + * + * Return: The entry at the new index. This may be %NULL or an internal + * entry. + */ +static inline void *xas_next(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + + if (unlikely(xas_not_node(node) || node->shift || + xas->xa_offset == XA_CHUNK_MASK)) + return __xas_next(xas); + + xas->xa_index++; + xas->xa_offset++; + return xa_entry(xas->xa, node, xas->xa_offset); +} + #endif /* _LINUX_XARRAY_H */ |