diff options
author | Jens Axboe <axboe@kernel.dk> | 2022-07-24 18:41:03 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-07-24 18:41:03 -0600 |
commit | 4effe18fc0da27ae5d51a702841e87fa13b8a32d (patch) | |
tree | 468f353a3713c93b27e7b2c262efd747e66ff199 /io_uring/alloc_cache.h | |
parent | 32e09298c8b3ff29177c825ab711a4a692d4caad (diff) | |
parent | f6b543fd03d347e8bf245cee4f2d54eb6ffd8fcb (diff) |
Merge branch 'for-5.20/io_uring' into for-5.20/io_uring-zerocopy-send
* for-5.20/io_uring: (716 commits)
io_uring: ensure REQ_F_ISREG is set async offload
net: fix compat pointer in get_compat_msghdr()
io_uring: Don't require reinitable percpu_ref
io_uring: fix types in io_recvmsg_multishot_overflow
io_uring: Use atomic_long_try_cmpxchg in __io_account_mem
io_uring: support multishot in recvmsg
net: copy from user before calling __get_compat_msghdr
net: copy from user before calling __copy_msghdr
io_uring: support 0 length iov in buffer select in compat
io_uring: fix multishot ending when not polled
io_uring: add netmsg cache
io_uring: impose max limit on apoll cache
io_uring: add abstraction around apoll cache
io_uring: move apoll cache to poll.c
io_uring: consolidate hash_locked io-wq handling
io_uring: clear REQ_F_HASH_LOCKED on hash removal
io_uring: don't race double poll setting REQ_F_ASYNC_DATA
io_uring: don't miss setting REQ_F_DOUBLE_POLL
io_uring: disable multishot recvmsg
io_uring: only trace one of complete or overflow
...
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/alloc_cache.h')
-rw-r--r-- | io_uring/alloc_cache.h | 53 |
1 files changed, 53 insertions, 0 deletions
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h new file mode 100644 index 000000000000..729793ae9712 --- /dev/null +++ b/io_uring/alloc_cache.h @@ -0,0 +1,53 @@ +#ifndef IOU_ALLOC_CACHE_H +#define IOU_ALLOC_CACHE_H + +/* + * Don't allow the cache to grow beyond this size. + */ +#define IO_ALLOC_CACHE_MAX 512 + +struct io_cache_entry { + struct hlist_node node; +}; + +static inline bool io_alloc_cache_put(struct io_alloc_cache *cache, + struct io_cache_entry *entry) +{ + if (cache->nr_cached < IO_ALLOC_CACHE_MAX) { + cache->nr_cached++; + hlist_add_head(&entry->node, &cache->list); + return true; + } + return false; +} + +static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache) +{ + if (!hlist_empty(&cache->list)) { + struct hlist_node *node = cache->list.first; + + hlist_del(node); + return container_of(node, struct io_cache_entry, node); + } + + return NULL; +} + +static inline void io_alloc_cache_init(struct io_alloc_cache *cache) +{ + INIT_HLIST_HEAD(&cache->list); + cache->nr_cached = 0; +} + +static inline void io_alloc_cache_free(struct io_alloc_cache *cache, + void (*free)(struct io_cache_entry *)) +{ + while (!hlist_empty(&cache->list)) { + struct hlist_node *node = cache->list.first; + + hlist_del(node); + free(container_of(node, struct io_cache_entry, node)); + } + cache->nr_cached = 0; +} +#endif |