diff options
Diffstat (limited to 'fs/fscache')
-rw-r--r-- | fs/fscache/Kconfig | 3 | ||||
-rw-r--r-- | fs/fscache/Makefile | 6 | ||||
-rw-r--r-- | fs/fscache/cache.c | 618 | ||||
-rw-r--r-- | fs/fscache/cookie.c | 1448 | ||||
-rw-r--r-- | fs/fscache/fsdef.c | 98 | ||||
-rw-r--r-- | fs/fscache/internal.h | 317 | ||||
-rw-r--r-- | fs/fscache/io.c | 376 | ||||
-rw-r--r-- | fs/fscache/main.c | 147 | ||||
-rw-r--r-- | fs/fscache/netfs.c | 74 | ||||
-rw-r--r-- | fs/fscache/object.c | 1125 | ||||
-rw-r--r-- | fs/fscache/operation.c | 633 | ||||
-rw-r--r-- | fs/fscache/page.c | 1242 | ||||
-rw-r--r-- | fs/fscache/proc.c | 47 | ||||
-rw-r--r-- | fs/fscache/stats.c | 293 | ||||
-rw-r--r-- | fs/fscache/volume.c | 517 |
15 files changed, 2051 insertions, 4893 deletions
diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig index b313a978ae0a..76316c4a3fb7 100644 --- a/fs/fscache/Kconfig +++ b/fs/fscache/Kconfig @@ -38,3 +38,6 @@ config FSCACHE_DEBUG enabled by setting bits in /sys/modules/fscache/parameter/debug. See Documentation/filesystems/caching/fscache.rst for more information. + +config FSCACHE_OLD_API + bool diff --git a/fs/fscache/Makefile b/fs/fscache/Makefile index 03a871d689bb..afb090ea16c4 100644 --- a/fs/fscache/Makefile +++ b/fs/fscache/Makefile @@ -6,13 +6,9 @@ fscache-y := \ cache.o \ cookie.o \ - fsdef.o \ io.o \ main.o \ - netfs.o \ - object.o \ - operation.o \ - page.o + volume.o fscache-$(CONFIG_PROC_FS) += proc.o fscache-$(CONFIG_FSCACHE_STATS) += stats.o diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c index bd4f44c1cce0..2749933852a9 100644 --- a/fs/fscache/cache.c +++ b/fs/fscache/cache.c @@ -1,209 +1,229 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* FS-Cache cache handling * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #define FSCACHE_DEBUG_LEVEL CACHE -#include <linux/module.h> +#include <linux/export.h> #include <linux/slab.h> #include "internal.h" -LIST_HEAD(fscache_cache_list); +static LIST_HEAD(fscache_caches); DECLARE_RWSEM(fscache_addremove_sem); -DECLARE_WAIT_QUEUE_HEAD(fscache_cache_cleared_wq); -EXPORT_SYMBOL(fscache_cache_cleared_wq); +EXPORT_SYMBOL(fscache_addremove_sem); +DECLARE_WAIT_QUEUE_HEAD(fscache_clearance_waiters); +EXPORT_SYMBOL(fscache_clearance_waiters); -static LIST_HEAD(fscache_cache_tag_list); +static atomic_t fscache_cache_debug_id; /* - * look up a cache tag + * Allocate a cache cookie. */ -struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *name) +static struct fscache_cache *fscache_alloc_cache(const char *name) { - struct fscache_cache_tag *tag, *xtag; - - /* firstly check for the existence of the tag under read lock */ - down_read(&fscache_addremove_sem); - - list_for_each_entry(tag, &fscache_cache_tag_list, link) { - if (strcmp(tag->name, name) == 0) { - atomic_inc(&tag->usage); - up_read(&fscache_addremove_sem); - return tag; - } - } - - up_read(&fscache_addremove_sem); - - /* the tag does not exist - create a candidate */ - xtag = kzalloc(sizeof(*xtag) + strlen(name) + 1, GFP_KERNEL); - if (!xtag) - /* return a dummy tag if out of memory */ - return ERR_PTR(-ENOMEM); - - atomic_set(&xtag->usage, 1); - strcpy(xtag->name, name); - - /* write lock, search again and add if still not present */ - down_write(&fscache_addremove_sem); + struct fscache_cache *cache; - list_for_each_entry(tag, &fscache_cache_tag_list, link) { - if (strcmp(tag->name, name) == 0) { - atomic_inc(&tag->usage); - up_write(&fscache_addremove_sem); - kfree(xtag); - return tag; + cache = kzalloc(sizeof(*cache), GFP_KERNEL); + if (cache) { + if (name) { + cache->name = kstrdup(name, GFP_KERNEL); + if (!cache->name) { + kfree(cache); + return NULL; + } } + refcount_set(&cache->ref, 1); + INIT_LIST_HEAD(&cache->cache_link); + cache->debug_id = atomic_inc_return(&fscache_cache_debug_id); } - - list_add_tail(&xtag->link, &fscache_cache_tag_list); - up_write(&fscache_addremove_sem); - return xtag; + return cache; } -/* - * release a reference to a cache tag - */ -void __fscache_release_cache_tag(struct fscache_cache_tag *tag) +static bool fscache_get_cache_maybe(struct fscache_cache *cache, + enum fscache_cache_trace where) { - if (tag != ERR_PTR(-ENOMEM)) { - down_write(&fscache_addremove_sem); + bool success; + int ref; - if (atomic_dec_and_test(&tag->usage)) - list_del_init(&tag->link); - else - tag = NULL; - - up_write(&fscache_addremove_sem); - - kfree(tag); - } + success = __refcount_inc_not_zero(&cache->ref, &ref); + if (success) + trace_fscache_cache(cache->debug_id, ref + 1, where); + return success; } /* - * select a cache in which to store an object - * - the cache addremove semaphore must be at least read-locked by the caller - * - the object will never be an index + * Look up a cache cookie. */ -struct fscache_cache *fscache_select_cache_for_object( - struct fscache_cookie *cookie) +struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache) { - struct fscache_cache_tag *tag; - struct fscache_object *object; - struct fscache_cache *cache; + struct fscache_cache *candidate, *cache, *unnamed = NULL; - _enter(""); + /* firstly check for the existence of the cache under read lock */ + down_read(&fscache_addremove_sem); - if (list_empty(&fscache_cache_list)) { - _leave(" = NULL [no cache]"); - return NULL; + list_for_each_entry(cache, &fscache_caches, cache_link) { + if (cache->name && name && strcmp(cache->name, name) == 0 && + fscache_get_cache_maybe(cache, fscache_cache_get_acquire)) + goto got_cache_r; + if (!cache->name && !name && + fscache_get_cache_maybe(cache, fscache_cache_get_acquire)) + goto got_cache_r; } - /* we check the parent to determine the cache to use */ - spin_lock(&cookie->lock); + if (!name) { + list_for_each_entry(cache, &fscache_caches, cache_link) { + if (cache->name && + fscache_get_cache_maybe(cache, fscache_cache_get_acquire)) + goto got_cache_r; + } + } - /* the first in the parent's backing list should be the preferred - * cache */ - if (!hlist_empty(&cookie->backing_objects)) { - object = hlist_entry(cookie->backing_objects.first, - struct fscache_object, cookie_link); + up_read(&fscache_addremove_sem); - cache = object->cache; - if (fscache_object_is_dying(object) || - test_bit(FSCACHE_IOERROR, &cache->flags)) - cache = NULL; + /* the cache does not exist - create a candidate */ + candidate = fscache_alloc_cache(name); + if (!candidate) + return ERR_PTR(-ENOMEM); - spin_unlock(&cookie->lock); - _leave(" = %s [parent]", cache ? cache->tag->name : "NULL"); - return cache; - } + /* write lock, search again and add if still not present */ + down_write(&fscache_addremove_sem); - /* the parent is unbacked */ - if (cookie->type != FSCACHE_COOKIE_TYPE_INDEX) { - /* cookie not an index and is unbacked */ - spin_unlock(&cookie->lock); - _leave(" = NULL [cookie ub,ni]"); - return NULL; + list_for_each_entry(cache, &fscache_caches, cache_link) { + if (cache->name && name && strcmp(cache->name, name) == 0 && + fscache_get_cache_maybe(cache, fscache_cache_get_acquire)) + goto got_cache_w; + if (!cache->name) { + unnamed = cache; + if (!name && + fscache_get_cache_maybe(cache, fscache_cache_get_acquire)) + goto got_cache_w; + } } - spin_unlock(&cookie->lock); + if (unnamed && is_cache && + fscache_get_cache_maybe(unnamed, fscache_cache_get_acquire)) + goto use_unnamed_cache; - if (!cookie->def->select_cache) - goto no_preference; + if (!name) { + list_for_each_entry(cache, &fscache_caches, cache_link) { + if (cache->name && + fscache_get_cache_maybe(cache, fscache_cache_get_acquire)) + goto got_cache_w; + } + } - /* ask the netfs for its preference */ - tag = cookie->def->select_cache(cookie->parent->netfs_data, - cookie->netfs_data); - if (!tag) - goto no_preference; + list_add_tail(&candidate->cache_link, &fscache_caches); + trace_fscache_cache(candidate->debug_id, + refcount_read(&candidate->ref), + fscache_cache_new_acquire); + up_write(&fscache_addremove_sem); + return candidate; - if (tag == ERR_PTR(-ENOMEM)) { - _leave(" = NULL [nomem tag]"); - return NULL; - } +got_cache_r: + up_read(&fscache_addremove_sem); + return cache; +use_unnamed_cache: + cache = unnamed; + cache->name = candidate->name; + candidate->name = NULL; +got_cache_w: + up_write(&fscache_addremove_sem); + kfree(candidate->name); + kfree(candidate); + return cache; +} - if (!tag->cache) { - _leave(" = NULL [unbacked tag]"); - return NULL; - } +/** + * fscache_acquire_cache - Acquire a cache-level cookie. + * @name: The name of the cache. + * + * Get a cookie to represent an actual cache. If a name is given and there is + * a nameless cache record available, this will acquire that and set its name, + * directing all the volumes using it to this cache. + * + * The cache will be switched over to the preparing state if not currently in + * use, otherwise -EBUSY will be returned. + */ +struct fscache_cache *fscache_acquire_cache(const char *name) +{ + struct fscache_cache *cache; - if (test_bit(FSCACHE_IOERROR, &tag->cache->flags)) - return NULL; + ASSERT(name); + cache = fscache_lookup_cache(name, true); + if (IS_ERR(cache)) + return cache; - _leave(" = %s [specific]", tag->name); - return tag->cache; + if (!fscache_set_cache_state_maybe(cache, + FSCACHE_CACHE_IS_NOT_PRESENT, + FSCACHE_CACHE_IS_PREPARING)) { + pr_warn("Cache tag %s in use\n", name); + fscache_put_cache(cache, fscache_cache_put_cache); + return ERR_PTR(-EBUSY); + } -no_preference: - /* netfs has no preference - just select first cache */ - cache = list_entry(fscache_cache_list.next, - struct fscache_cache, link); - _leave(" = %s [first]", cache->tag->name); return cache; } +EXPORT_SYMBOL(fscache_acquire_cache); /** - * fscache_init_cache - Initialise a cache record - * @cache: The cache record to be initialised - * @ops: The cache operations to be installed in that record - * @idfmt: Format string to define identifier - * @...: sprintf-style arguments + * fscache_put_cache - Release a cache-level cookie. + * @cache: The cache cookie to be released + * @where: An indication of where the release happened * - * Initialise a record of a cache and fill in the name. - * - * See Documentation/filesystems/caching/backend-api.rst for a complete - * description. + * Release the caller's reference on a cache-level cookie. The @where + * indication should give information about the circumstances in which the call + * occurs and will be logged through a tracepoint. */ -void fscache_init_cache(struct fscache_cache *cache, - const struct fscache_cache_ops *ops, - const char *idfmt, - ...) +void fscache_put_cache(struct fscache_cache *cache, + enum fscache_cache_trace where) { - va_list va; + unsigned int debug_id = cache->debug_id; + bool zero; + int ref; - memset(cache, 0, sizeof(*cache)); + if (IS_ERR_OR_NULL(cache)) + return; - cache->ops = ops; + zero = __refcount_dec_and_test(&cache->ref, &ref); + trace_fscache_cache(debug_id, ref - 1, where); - va_start(va, idfmt); - vsnprintf(cache->identifier, sizeof(cache->identifier), idfmt, va); - va_end(va); + if (zero) { + down_write(&fscache_addremove_sem); + list_del_init(&cache->cache_link); + up_write(&fscache_addremove_sem); + kfree(cache->name); + kfree(cache); + } +} - INIT_WORK(&cache->op_gc, fscache_operation_gc); - INIT_LIST_HEAD(&cache->link); - INIT_LIST_HEAD(&cache->object_list); - INIT_LIST_HEAD(&cache->op_gc_list); - spin_lock_init(&cache->object_list_lock); - spin_lock_init(&cache->op_gc_list_lock); +/** + * fscache_relinquish_cache - Reset cache state and release cookie + * @cache: The cache cookie to be released + * + * Reset the state of a cache and release the caller's reference on a cache + * cookie. + */ +void fscache_relinquish_cache(struct fscache_cache *cache) +{ + enum fscache_cache_trace where = + (cache->state == FSCACHE_CACHE_IS_PREPARING) ? + fscache_cache_put_prep_failed : + fscache_cache_put_relinquish; + + cache->ops = NULL; + cache->cache_priv = NULL; + smp_store_release(&cache->state, FSCACHE_CACHE_IS_NOT_PRESENT); + fscache_put_cache(cache, where); } -EXPORT_SYMBOL(fscache_init_cache); +EXPORT_SYMBOL(fscache_relinquish_cache); /** * fscache_add_cache - Declare a cache as being open for business - * @cache: The record describing the cache - * @ifsdef: The record of the cache object describing the top-level index - * @tagname: The tag describing this cache + * @cache: The cache-level cookie representing the cache + * @ops: Table of cache operations to use + * @cache_priv: Private data for the cache record * * Add a cache to the system, making it available for netfs's to use. * @@ -211,93 +231,97 @@ EXPORT_SYMBOL(fscache_init_cache); * description. */ int fscache_add_cache(struct fscache_cache *cache, - struct fscache_object *ifsdef, - const char *tagname) + const struct fscache_cache_ops *ops, + void *cache_priv) { - struct fscache_cache_tag *tag; - - ASSERTCMP(ifsdef->cookie, ==, &fscache_fsdef_index); - BUG_ON(!cache->ops); - BUG_ON(!ifsdef); + int n_accesses; - cache->flags = 0; - ifsdef->event_mask = - ((1 << NR_FSCACHE_OBJECT_EVENTS) - 1) & - ~(1 << FSCACHE_OBJECT_EV_CLEARED); - __set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &ifsdef->flags); + _enter("{%s,%s}", ops->name, cache->name); - if (!tagname) - tagname = cache->identifier; + BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING); - BUG_ON(!tagname[0]); - - _enter("{%s.%s},,%s", cache->ops->name, cache->identifier, tagname); - - /* we use the cache tag to uniquely identify caches */ - tag = __fscache_lookup_cache_tag(tagname); - if (IS_ERR(tag)) - goto nomem; - - if (test_and_set_bit(FSCACHE_TAG_RESERVED, &tag->flags)) - goto tag_in_use; - - cache->kobj = kobject_create_and_add(tagname, fscache_root); - if (!cache->kobj) - goto error; - - ifsdef->cache = cache; - cache->fsdef = ifsdef; + /* Get a ref on the cache cookie and keep its n_accesses counter raised + * by 1 to prevent wakeups from transitioning it to 0 until we're + * withdrawing caching services from it. + */ + n_accesses = atomic_inc_return(&cache->n_accesses); + trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref), + n_accesses, fscache_access_cache_pin); down_write(&fscache_addremove_sem); - tag->cache = cache; - cache->tag = tag; - - /* add the cache to the list */ - list_add(&cache->link, &fscache_cache_list); - - /* add the cache's netfs definition index object to the cache's - * list */ - spin_lock(&cache->object_list_lock); - list_add_tail(&ifsdef->cache_link, &cache->object_list); - spin_unlock(&cache->object_list_lock); - - /* add the cache's netfs definition index object to the top level index - * cookie as a known backing object */ - spin_lock(&fscache_fsdef_index.lock); - - hlist_add_head(&ifsdef->cookie_link, - &fscache_fsdef_index.backing_objects); - - refcount_inc(&fscache_fsdef_index.ref); + cache->ops = ops; + cache->cache_priv = cache_priv; + fscache_set_cache_state(cache, FSCACHE_CACHE_IS_ACTIVE); - /* done */ - spin_unlock(&fscache_fsdef_index.lock); up_write(&fscache_addremove_sem); - - pr_notice("Cache \"%s\" added (type %s)\n", - cache->tag->name, cache->ops->name); - kobject_uevent(cache->kobj, KOBJ_ADD); - - _leave(" = 0 [%s]", cache->identifier); + pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name); + _leave(" = 0 [%s]", cache->name); return 0; +} +EXPORT_SYMBOL(fscache_add_cache); -tag_in_use: - pr_err("Cache tag '%s' already in use\n", tagname); - __fscache_release_cache_tag(tag); - _leave(" = -EXIST"); - return -EEXIST; - -error: - __fscache_release_cache_tag(tag); - _leave(" = -EINVAL"); - return -EINVAL; +/** + * fscache_begin_cache_access - Pin a cache so it can be accessed + * @cache: The cache-level cookie + * @why: An indication of the circumstances of the access for tracing + * + * Attempt to pin the cache to prevent it from going away whilst we're + * accessing it and returns true if successful. This works as follows: + * + * (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE), + * then we return false to indicate access was not permitted. + * + * (2) If the cache tests as live, then we increment the n_accesses count and + * then recheck the liveness, ending the access if it ceased to be live. + * + * (3) When we end the access, we decrement n_accesses and wake up the any + * waiters if it reaches 0. + * + * (4) Whilst the cache is caching, n_accesses is kept artificially + * incremented to prevent wakeups from happening. + * + * (5) When the cache is taken offline, the state is changed to prevent new + * accesses, n_accesses is decremented and we wait for n_accesses to + * become 0. + */ +bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why) +{ + int n_accesses; + + if (!fscache_cache_is_live(cache)) + return false; + + n_accesses = atomic_inc_return(&cache->n_accesses); + smp_mb__after_atomic(); /* Reread live flag after n_accesses */ + trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref), + n_accesses, why); + if (!fscache_cache_is_live(cache)) { + fscache_end_cache_access(cache, fscache_access_unlive); + return false; + } + return true; +} -nomem: - _leave(" = -ENOMEM"); - return -ENOMEM; +/** + * fscache_end_cache_access - Unpin a cache at the end of an access. + * @cache: The cache-level cookie + * @why: An indication of the circumstances of the access for tracing + * + * Unpin a cache after we've accessed it. The @why indicator is merely + * provided for tracing purposes. + */ +void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why) +{ + int n_accesses; + + smp_mb__before_atomic(); + n_accesses = atomic_dec_return(&cache->n_accesses); + trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref), + n_accesses, why); + if (n_accesses == 0) + wake_up_var(&cache->n_accesses); } -EXPORT_SYMBOL(fscache_add_cache); /** * fscache_io_error - Note a cache I/O error @@ -311,106 +335,94 @@ EXPORT_SYMBOL(fscache_add_cache); */ void fscache_io_error(struct fscache_cache *cache) { - if (!test_and_set_bit(FSCACHE_IOERROR, &cache->flags)) + if (fscache_set_cache_state_maybe(cache, + FSCACHE_CACHE_IS_ACTIVE, + FSCACHE_CACHE_GOT_IOERROR)) pr_err("Cache '%s' stopped due to I/O error\n", - cache->ops->name); + cache->name); } EXPORT_SYMBOL(fscache_io_error); -/* - * request withdrawal of all the objects in a cache - * - all the objects being withdrawn are moved onto the supplied list +/** + * fscache_withdraw_cache - Withdraw a cache from the active service + * @cache: The cache cookie + * + * Begin the process of withdrawing a cache from service. This stops new + * cache-level and volume-level accesses from taking place and waits for + * currently ongoing cache-level accesses to end. */ -static void fscache_withdraw_all_objects(struct fscache_cache *cache, - struct list_head *dying_objects) +void fscache_withdraw_cache(struct fscache_cache *cache) { - struct fscache_object *object; + int n_accesses; - while (!list_empty(&cache->object_list)) { - spin_lock(&cache->object_list_lock); + pr_notice("Withdrawing cache \"%s\" (%u objs)\n", + cache->name, atomic_read(&cache->object_count)); - if (!list_empty(&cache->object_list)) { - object = list_entry(cache->object_list.next, - struct fscache_object, cache_link); - list_move_tail(&object->cache_link, dying_objects); + fscache_set_cache_state(cache, FSCACHE_CACHE_IS_WITHDRAWN); - _debug("withdraw %x", object->cookie->debug_id); + /* Allow wakeups on dec-to-0 */ + n_accesses = atomic_dec_return(&cache->n_accesses); + trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref), + n_accesses, fscache_access_cache_unpin); - /* This must be done under object_list_lock to prevent - * a race with fscache_drop_object(). - */ - fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); - } - - spin_unlock(&cache->object_list_lock); - cond_resched(); - } + wait_var_event(&cache->n_accesses, + atomic_read(&cache->n_accesses) == 0); } +EXPORT_SYMBOL(fscache_withdraw_cache); -/** - * fscache_withdraw_cache - Withdraw a cache from the active service - * @cache: The record describing the cache - * - * Withdraw a cache from service, unbinding all its cache objects from the - * netfs cookies they're currently representing. - * - * See Documentation/filesystems/caching/backend-api.rst for a complete - * description. +#ifdef CONFIG_PROC_FS +static const char fscache_cache_states[NR__FSCACHE_CACHE_STATE] = "-PAEW"; + +/* + * Generate a list of caches in /proc/fs/fscache/caches */ -void fscache_withdraw_cache(struct fscache_cache *cache) +static int fscache_caches_seq_show(struct seq_file *m, void *v) { - LIST_HEAD(dying_objects); + struct fscache_cache *cache; - _enter(""); + if (v == &fscache_caches) { + seq_puts(m, + "CACHE REF VOLS OBJS ACCES S NAME\n" + "======== ===== ===== ===== ===== = ===============\n" + ); + return 0; + } - pr_notice("Withdrawing cache \"%s\"\n", - cache->tag->name); + cache = list_entry(v, struct fscache_cache, cache_link); + seq_printf(m, + "%08x %5d %5d %5d %5d %c %s\n", + cache->debug_id, + refcount_read(&cache->ref), + atomic_read(&cache->n_volumes), + atomic_read(&cache->object_count), + atomic_read(&cache->n_accesses), + fscache_cache_states[cache->state], + cache->name ?: "-"); + return 0; +} - /* make the cache unavailable for cookie acquisition */ - if (test_and_set_bit(FSCACHE_CACHE_WITHDRAWN, &cache->flags)) - BUG(); +static void *fscache_caches_seq_start(struct seq_file *m, loff_t *_pos) + __acquires(fscache_addremove_sem) +{ + down_read(&fscache_addremove_sem); + return seq_list_start_head(&fscache_caches, *_pos); +} - down_write(&fscache_addremove_sem); - list_del_init(&cache->link); - cache->tag->cache = NULL; - up_write(&fscache_addremove_sem); +static void *fscache_caches_seq_next(struct seq_file *m, void *v, loff_t *_pos) +{ + return seq_list_next(v, &fscache_caches, _pos); +} - /* make sure all pages pinned by operations on behalf of the netfs are - * written to disk */ - fscache_stat(&fscache_n_cop_sync_cache); - cache->ops->sync_cache(cache); - fscache_stat_d(&fscache_n_cop_sync_cache); - - /* dissociate all the netfs pages backed by this cache from the block - * mappings in the cache */ - fscache_stat(&fscache_n_cop_dissociate_pages); - cache->ops->dissociate_pages(cache); - fscache_stat_d(&fscache_n_cop_dissociate_pages); - - /* we now have to destroy all the active objects pertaining to this - * cache - which we do by passing them off to thread pool to be - * disposed of */ - _debug("destroy"); - - fscache_withdraw_all_objects(cache, &dying_objects); - - /* wait for all extant objects to finish their outstanding operations - * and go away */ - _debug("wait for finish"); - wait_event(fscache_cache_cleared_wq, - atomic_read(&cache->object_count) == 0); - _debug("wait for clearance"); - wait_event(fscache_cache_cleared_wq, - list_empty(&cache->object_list)); - _debug("cleared"); - ASSERT(list_empty(&dying_objects)); - - kobject_put(cache->kobj); - - clear_bit(FSCACHE_TAG_RESERVED, &cache->tag->flags); - fscache_release_cache_tag(cache->tag); - cache->tag = NULL; - - _leave(""); +static void fscache_caches_seq_stop(struct seq_file *m, void *v) + __releases(fscache_addremove_sem) +{ + up_read(&fscache_addremove_sem); } -EXPORT_SYMBOL(fscache_withdraw_cache); + +const struct seq_operations fscache_caches_seq_ops = { + .start = fscache_caches_seq_start, + .next = fscache_caches_seq_next, + .stop = fscache_caches_seq_stop, + .show = fscache_caches_seq_show, +}; +#endif /* CONFIG_PROC_FS */ diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index cd42be646ed3..9bb1ab5fe5ed 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* netfs cookie management * - * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * See Documentation/filesystems/caching/netfs-api.rst for more information on @@ -15,70 +15,258 @@ struct kmem_cache *fscache_cookie_jar; -static atomic_t fscache_object_debug_id = ATOMIC_INIT(0); +static void fscache_cookie_lru_timed_out(struct timer_list *timer); +static void fscache_cookie_lru_worker(struct work_struct *work); +static void fscache_cookie_worker(struct work_struct *work); +static void fscache_unhash_cookie(struct fscache_cookie *cookie); +static void fscache_perform_invalidation(struct fscache_cookie *cookie); #define fscache_cookie_hash_shift 15 static struct hlist_bl_head fscache_cookie_hash[1 << fscache_cookie_hash_shift]; static LIST_HEAD(fscache_cookies); static DEFINE_RWLOCK(fscache_cookies_lock); - -static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie, - loff_t object_size); -static int fscache_alloc_object(struct fscache_cache *cache, - struct fscache_cookie *cookie); -static int fscache_attach_object(struct fscache_cookie *cookie, - struct fscache_object *object); - -static void fscache_print_cookie(struct fscache_cookie *cookie, char prefix) +static LIST_HEAD(fscache_cookie_lru); +static DEFINE_SPINLOCK(fscache_cookie_lru_lock); +DEFINE_TIMER(fscache_cookie_lru_timer, fscache_cookie_lru_timed_out); +static DECLARE_WORK(fscache_cookie_lru_work, fscache_cookie_lru_worker); +static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] = "-LCAIFUWRD"; +unsigned int fscache_lru_cookie_timeout = 10 * HZ; + +void fscache_print_cookie(struct fscache_cookie *cookie, char prefix) { - struct fscache_object *object; - struct hlist_node *o; const u8 *k; - unsigned loop; - pr_err("%c-cookie c=%08x [p=%08x fl=%lx nc=%u na=%u]\n", + pr_err("%c-cookie c=%08x [fl=%lx na=%u nA=%u s=%c]\n", prefix, cookie->debug_id, - cookie->parent ? cookie->parent->debug_id : 0, cookie->flags, - atomic_read(&cookie->n_children), - atomic_read(&cookie->n_active)); - pr_err("%c-cookie d=%p{%s} n=%p\n", + atomic_read(&cookie->n_active), + atomic_read(&cookie->n_accesses), + fscache_cookie_states[cookie->state]); + pr_err("%c-cookie V=%08x [%s]\n", prefix, - cookie->def, - cookie->def ? cookie->def->name : "?", - cookie->netfs_data); - - o = READ_ONCE(cookie->backing_objects.first); - if (o) { - object = hlist_entry(o, struct fscache_object, cookie_link); - pr_err("%c-cookie o=%u\n", prefix, object->debug_id); - } + cookie->volume->debug_id, + cookie->volume->key); - pr_err("%c-key=[%u] '", prefix, cookie->key_len); k = (cookie->key_len <= sizeof(cookie->inline_key)) ? cookie->inline_key : cookie->key; - for (loop = 0; loop < cookie->key_len; loop++) - pr_cont("%02x", k[loop]); - pr_cont("'\n"); + pr_err("%c-key=[%u] '%*phN'\n", prefix, cookie->key_len, cookie->key_len, k); } -void fscache_free_cookie(struct fscache_cookie *cookie) +static void fscache_free_cookie(struct fscache_cookie *cookie) { - if (cookie) { - BUG_ON(!hlist_empty(&cookie->backing_objects)); - write_lock(&fscache_cookies_lock); - list_del(&cookie->proc_link); - write_unlock(&fscache_cookies_lock); - if (cookie->aux_len > sizeof(cookie->inline_aux)) - kfree(cookie->aux); - if (cookie->key_len > sizeof(cookie->inline_key)) - kfree(cookie->key); - kmem_cache_free(fscache_cookie_jar, cookie); + if (WARN_ON_ONCE(!list_empty(&cookie->commit_link))) { + spin_lock(&fscache_cookie_lru_lock); + list_del_init(&cookie->commit_link); + spin_unlock(&fscache_cookie_lru_lock); + fscache_stat_d(&fscache_n_cookies_lru); + fscache_stat(&fscache_n_cookies_lru_removed); + } + + if (WARN_ON_ONCE(test_bit(FSCACHE_COOKIE_IS_HASHED, &cookie->flags))) { + fscache_print_cookie(cookie, 'F'); + return; } + + write_lock(&fscache_cookies_lock); + list_del(&cookie->proc_link); + write_unlock(&fscache_cookies_lock); + if (cookie->aux_len > sizeof(cookie->inline_aux)) + kfree(cookie->aux); + if (cookie->key_len > sizeof(cookie->inline_key)) + kfree(cookie->key); + fscache_stat_d(&fscache_n_cookies); + kmem_cache_free(fscache_cookie_jar, cookie); +} + +static void __fscache_queue_cookie(struct fscache_cookie *cookie) +{ + if (!queue_work(fscache_wq, &cookie->work)) + fscache_put_cookie(cookie, fscache_cookie_put_over_queued); +} + +static void fscache_queue_cookie(struct fscache_cookie *cookie, + enum fscache_cookie_trace where) +{ + fscache_get_cookie(cookie, where); + __fscache_queue_cookie(cookie); } /* + * Initialise the access gate on a cookie by setting a flag to prevent the + * state machine from being queued when the access counter transitions to 0. + * We're only interested in this when we withdraw caching services from the + * cookie. + */ +static void fscache_init_access_gate(struct fscache_cookie *cookie) +{ + int n_accesses; + + n_accesses = atomic_read(&cookie->n_accesses); + trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref), + n_accesses, fscache_access_cache_pin); + set_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags); +} + +/** + * fscache_end_cookie_access - Unpin a cache at the end of an access. + * @cookie: A data file cookie + * @why: An indication of the circumstances of the access for tracing + * + * Unpin a cache cookie after we've accessed it and bring a deferred + * relinquishment or withdrawal state into effect. + * + * The @why indicator is provided for tracing purposes. + */ +void fscache_end_cookie_access(struct fscache_cookie *cookie, + enum fscache_access_trace why) +{ + int n_accesses; + + smp_mb__before_atomic(); + n_accesses = atomic_dec_return(&cookie->n_accesses); + trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref), + n_accesses, why); + if (n_accesses == 0 && + !test_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags)) + fscache_queue_cookie(cookie, fscache_cookie_get_end_access); +} +EXPORT_SYMBOL(fscache_end_cookie_access); + +/* + * Pin the cache behind a cookie so that we can access it. + */ +static void __fscache_begin_cookie_access(struct fscache_cookie *cookie, + enum fscache_access_trace why) +{ + int n_accesses; + + n_accesses = atomic_inc_return(&cookie->n_accesses); + smp_mb__after_atomic(); /* (Future) read state after is-caching. + * Reread n_accesses after is-caching + */ + trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref), + n_accesses, why); +} + +/** + * fscache_begin_cookie_access - Pin a cache so data can be accessed + * @cookie: A data file cookie + * @why: An indication of the circumstances of the access for tracing + * + * Attempt to pin the cache to prevent it from going away whilst we're + * accessing data and returns true if successful. This works as follows: + * + * (1) If the cookie is not being cached (ie. FSCACHE_COOKIE_IS_CACHING is not + * set), we return false to indicate access was not permitted. + * + * (2) If the cookie is being cached, we increment its n_accesses count and + * then recheck the IS_CACHING flag, ending the access if it got cleared. + * + * (3) When we end the access, we decrement the cookie's n_accesses and wake + * up the any waiters if it reaches 0. + * + * (4) Whilst the cookie is actively being cached, its n_accesses is kept + * artificially incremented to prevent wakeups from happening. + * + * (5) When the cache is taken offline or if the cookie is culled, the flag is + * cleared to prevent new accesses, the cookie's n_accesses is decremented + * and we wait for it to become 0. + * + * The @why indicator are merely provided for tracing purposes. + */ +bool fscache_begin_cookie_access(struct fscache_cookie *cookie, + enum fscache_access_trace why) +{ + if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) + return false; + __fscache_begin_cookie_access(cookie, why); + if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags) || + !fscache_cache_is_live(cookie->volume->cache)) { + fscache_end_cookie_access(cookie, fscache_access_unlive); + return false; + } + return true; +} + +static inline void wake_up_cookie_state(struct fscache_cookie *cookie) +{ + /* Use a barrier to ensure that waiters see the state variable + * change, as spin_unlock doesn't guarantee a barrier. + * + * See comments over wake_up_bit() and waitqueue_active(). + */ + smp_mb(); + wake_up_var(&cookie->state); +} + +/* + * Change the state a cookie is at and wake up anyone waiting for that. Impose + * an ordering between the stuff stored in the cookie and the state member. + * Paired with fscache_cookie_state(). + */ +static void __fscache_set_cookie_state(struct fscache_cookie *cookie, + enum fscache_cookie_state state) +{ + smp_store_release(&cookie->state, state); +} + +static void fscache_set_cookie_state(struct fscache_cookie *cookie, + enum fscache_cookie_state state) +{ + spin_lock(&cookie->lock); + __fscache_set_cookie_state(cookie, state); + spin_unlock(&cookie->lock); + wake_up_cookie_state(cookie); +} + +/** + * fscache_cookie_lookup_negative - Note negative lookup + * @cookie: The cookie that was being looked up + * + * Note that some part of the metadata path in the cache doesn't exist and so + * we can release any waiting readers in the certain knowledge that there's + * nothing for them to actually read. + * + * This function uses no locking and must only be called from the state machine. + */ +void fscache_cookie_lookup_negative(struct fscache_cookie *cookie) +{ + set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); + fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_CREATING); +} +EXPORT_SYMBOL(fscache_cookie_lookup_negative); + +/** + * fscache_resume_after_invalidation - Allow I/O to resume after invalidation + * @cookie: The cookie that was invalidated + * + * Tell fscache that invalidation is sufficiently complete that I/O can be + * allowed again. + */ +void fscache_resume_after_invalidation(struct fscache_cookie *cookie) +{ + fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE); +} +EXPORT_SYMBOL(fscache_resume_after_invalidation); + +/** + * fscache_caching_failed - Report that a failure stopped caching on a cookie + * @cookie: The cookie that was affected + * + * Tell fscache that caching on a cookie needs to be stopped due to some sort + * of failure. + * + * This function uses no locking and must only be called from the state machine. + */ +void fscache_caching_failed(struct fscache_cookie *cookie) +{ + clear_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags); + fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_FAILED); +} +EXPORT_SYMBOL(fscache_caching_failed); + +/* * Set the index key in a cookie. The cookie struct has space for a 16-byte * key plus length and hash, but if that's not big enough, it's instead a * pointer to a buffer containing 3 bytes of hash, 1 byte of length and then @@ -87,38 +275,35 @@ void fscache_free_cookie(struct fscache_cookie *cookie) static int fscache_set_key(struct fscache_cookie *cookie, const void *index_key, size_t index_key_len) { - u32 *buf; - int bufs; + void *buf; + size_t buf_size; - bufs = DIV_ROUND_UP(index_key_len, sizeof(*buf)); + buf_size = round_up(index_key_len, sizeof(__le32)); if (index_key_len > sizeof(cookie->inline_key)) { - buf = kcalloc(bufs, sizeof(*buf), GFP_KERNEL); + buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOMEM; cookie->key = buf; } else { - buf = (u32 *)cookie->inline_key; + buf = cookie->inline_key; } memcpy(buf, index_key, index_key_len); - cookie->key_hash = fscache_hash(0, buf, bufs); + cookie->key_hash = fscache_hash(cookie->volume->key_hash, + buf, buf_size); return 0; } -static long fscache_compare_cookie(const struct fscache_cookie *a, - const struct fscache_cookie *b) +static bool fscache_cookie_same(const struct fscache_cookie *a, + const struct fscache_cookie *b) { const void *ka, *kb; - if (a->key_hash != b->key_hash) - return (long)a->key_hash - (long)b->key_hash; - if (a->parent != b->parent) - return (long)a->parent - (long)b->parent; - if (a->key_len != b->key_len) - return (long)a->key_len - (long)b->key_len; - if (a->type != b->type) - return (long)a->type - (long)b->type; + if (a->key_hash != b->key_hash || + a->volume != b->volume || + a->key_len != b->key_len) + return false; if (a->key_len <= sizeof(a->inline_key)) { ka = &a->inline_key; @@ -127,7 +312,7 @@ static long fscache_compare_cookie(const struct fscache_cookie *a, ka = a->key; kb = b->key; } - return memcmp(ka, kb, a->key_len); + return memcmp(ka, kb, a->key_len) == 0; } static atomic_t fscache_cookie_debug_id = ATOMIC_INIT(1); @@ -135,12 +320,11 @@ static atomic_t fscache_cookie_debug_id = ATOMIC_INIT(1); /* * Allocate a cookie. */ -struct fscache_cookie *fscache_alloc_cookie( - struct fscache_cookie *parent, - const struct fscache_cookie_def *def, +static struct fscache_cookie *fscache_alloc_cookie( + struct fscache_volume *volume, + u8 advice, const void *index_key, size_t index_key_len, const void *aux_data, size_t aux_data_len, - void *netfs_data, loff_t object_size) { struct fscache_cookie *cookie; @@ -149,9 +333,15 @@ struct fscache_cookie *fscache_alloc_cookie( cookie = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL); if (!cookie) return NULL; + fscache_stat(&fscache_n_cookies); - cookie->key_len = index_key_len; - cookie->aux_len = aux_data_len; + cookie->volume = volume; + cookie->advice = advice; + cookie->key_len = index_key_len; + cookie->aux_len = aux_data_len; + cookie->object_size = object_size; + if (object_size == 0) + __set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); if (fscache_set_key(cookie, index_key, index_key_len) < 0) goto nomem; @@ -165,30 +355,16 @@ struct fscache_cookie *fscache_alloc_cookie( } refcount_set(&cookie->ref, 1); - atomic_set(&cookie->n_children, 0); cookie->debug_id = atomic_inc_return(&fscache_cookie_debug_id); - - /* We keep the active count elevated until relinquishment to prevent an - * attempt to wake up every time the object operations queue quiesces. - */ - atomic_set(&cookie->n_active, 1); - - cookie->def = def; - cookie->parent = parent; - cookie->netfs_data = netfs_data; - cookie->flags = (1 << FSCACHE_COOKIE_NO_DATA_YET); - cookie->type = def->type; spin_lock_init(&cookie->lock); - spin_lock_init(&cookie->stores_lock); - INIT_HLIST_HEAD(&cookie->backing_objects); - - /* radix tree insertion won't use the preallocation pool unless it's - * told it may not wait */ - INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); + INIT_LIST_HEAD(&cookie->commit_link); + INIT_WORK(&cookie->work, fscache_cookie_worker); + __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT); write_lock(&fscache_cookies_lock); list_add_tail(&cookie->proc_link, &fscache_cookies); write_unlock(&fscache_cookies_lock); + fscache_see_cookie(cookie, fscache_cookie_new_acquire); return cookie; nomem: @@ -196,13 +372,28 @@ nomem: return NULL; } +static void fscache_wait_on_collision(struct fscache_cookie *candidate, + struct fscache_cookie *wait_for) +{ + enum fscache_cookie_state *statep = &wait_for->state; + + wait_var_event_timeout(statep, READ_ONCE(*statep) == FSCACHE_COOKIE_STATE_DROPPED, + 20 * HZ); + if (READ_ONCE(*statep) != FSCACHE_COOKIE_STATE_DROPPED) { + pr_notice("Potential collision c=%08x old: c=%08x", + candidate->debug_id, wait_for->debug_id); + wait_var_event(statep, READ_ONCE(*statep) == FSCACHE_COOKIE_STATE_DROPPED); + } +} + /* * Attempt to insert the new cookie into the hash. If there's a collision, we - * return the old cookie if it's not in use and an error otherwise. + * wait for the old cookie to complete if it's being relinquished and an error + * otherwise. */ -struct fscache_cookie *fscache_hash_cookie(struct fscache_cookie *candidate) +static bool fscache_hash_cookie(struct fscache_cookie *candidate) { - struct fscache_cookie *cursor; + struct fscache_cookie *cursor, *wait_for = NULL; struct hlist_bl_head *h; struct hlist_bl_node *p; unsigned int bucket; @@ -212,64 +403,53 @@ struct fscache_cookie *fscache_hash_cookie(struct fscache_cookie *candidate) hlist_bl_lock(h); hlist_bl_for_each_entry(cursor, p, h, hash_link) { - if (fscache_compare_cookie(candidate, cursor) == 0) - goto collision; + if (fscache_cookie_same(candidate, cursor)) { + if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cursor->flags)) + goto collision; + wait_for = fscache_get_cookie(cursor, + fscache_cookie_get_hash_collision); + break; + } } - __set_bit(FSCACHE_COOKIE_ACQUIRED, &candidate->flags); - fscache_cookie_get(candidate->parent, fscache_cookie_get_acquire_parent); - atomic_inc(&candidate->parent->n_children); + fscache_get_volume(candidate->volume, fscache_volume_get_cookie); + atomic_inc(&candidate->volume->n_cookies); hlist_bl_add_head(&candidate->hash_link, h); + set_bit(FSCACHE_COOKIE_IS_HASHED, &candidate->flags); hlist_bl_unlock(h); - return candidate; -collision: - if (test_and_set_bit(FSCACHE_COOKIE_ACQUIRED, &cursor->flags)) { - trace_fscache_cookie(cursor->debug_id, refcount_read(&cursor->ref), - fscache_cookie_collision); - pr_err("Duplicate cookie detected\n"); - fscache_print_cookie(cursor, 'O'); - fscache_print_cookie(candidate, 'N'); - hlist_bl_unlock(h); - return NULL; + if (wait_for) { + fscache_wait_on_collision(candidate, wait_for); + fscache_put_cookie(wait_for, fscache_cookie_put_hash_collision); } + return true; - fscache_cookie_get(cursor, fscache_cookie_get_reacquire); +collision: + trace_fscache_cookie(cursor->debug_id, refcount_read(&cursor->ref), + fscache_cookie_collision); + pr_err("Duplicate cookie detected\n"); + fscache_print_cookie(cursor, 'O'); + fscache_print_cookie(candidate, 'N'); hlist_bl_unlock(h); - return cursor; + return false; } /* - * request a cookie to represent an object (index, datafile, xattr, etc) - * - parent specifies the parent object - * - the top level index cookie for each netfs is stored in the fscache_netfs - * struct upon registration - * - def points to the definition - * - the netfs_data will be passed to the functions pointed to in *def - * - all attached caches will be searched to see if they contain this object - * - index objects aren't stored on disk until there's a dependent file that - * needs storing - * - other objects are stored in a selected cache immediately, and all the - * indices forming the path to it are instantiated if necessary - * - we never let on to the netfs about errors - * - we may set a negative cookie pointer, but that's okay + * Request a cookie to represent a data storage object within a volume. + * + * We never let on to the netfs about errors. We may set a negative cookie + * pointer, but that's okay */ struct fscache_cookie *__fscache_acquire_cookie( - struct fscache_cookie *parent, - const struct fscache_cookie_def *def, + struct fscache_volume *volume, + u8 advice, const void *index_key, size_t index_key_len, const void *aux_data, size_t aux_data_len, - void *netfs_data, - loff_t object_size, - bool enable) + loff_t object_size) { - struct fscache_cookie *candidate, *cookie; - - BUG_ON(!def); + struct fscache_cookie *cookie; - _enter("{%s},{%s},%p,%u", - parent ? (char *) parent->def->name : "<no-parent>", - def->name, netfs_data, enable); + _enter("V=%x", volume->debug_id); if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255) return NULL; @@ -280,563 +460,440 @@ struct fscache_cookie *__fscache_acquire_cookie( fscache_stat(&fscache_n_acquires); - /* if there's no parent cookie, then we don't create one here either */ - if (!parent) { - fscache_stat(&fscache_n_acquires_null); - _leave(" [no parent]"); - return NULL; - } - - /* validate the definition */ - BUG_ON(!def->name[0]); - - BUG_ON(def->type == FSCACHE_COOKIE_TYPE_INDEX && - parent->type != FSCACHE_COOKIE_TYPE_INDEX); - - candidate = fscache_alloc_cookie(parent, def, - index_key, index_key_len, - aux_data, aux_data_len, - netfs_data, object_size); - if (!candidate) { + cookie = fscache_alloc_cookie(volume, advice, + index_key, index_key_len, + aux_data, aux_data_len, + object_size); + if (!cookie) { fscache_stat(&fscache_n_acquires_oom); - _leave(" [ENOMEM]"); return NULL; } - cookie = fscache_hash_cookie(candidate); - if (!cookie) { - trace_fscache_cookie(candidate->debug_id, 1, - fscache_cookie_discard); - goto out; - } - - if (cookie == candidate) - candidate = NULL; - - switch (cookie->type) { - case FSCACHE_COOKIE_TYPE_INDEX: - fscache_stat(&fscache_n_cookie_index); - break; - case FSCACHE_COOKIE_TYPE_DATAFILE: - fscache_stat(&fscache_n_cookie_data); - break; - default: - fscache_stat(&fscache_n_cookie_special); - break; + if (!fscache_hash_cookie(cookie)) { + fscache_see_cookie(cookie, fscache_cookie_discard); + fscache_free_cookie(cookie); + return NULL; } trace_fscache_acquire(cookie); - - if (enable) { - /* if the object is an index then we need do nothing more here - * - we create indices on disk when we need them as an index - * may exist in multiple caches */ - if (cookie->type != FSCACHE_COOKIE_TYPE_INDEX) { - if (fscache_acquire_non_index_cookie(cookie, object_size) == 0) { - set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags); - } else { - atomic_dec(&parent->n_children); - fscache_cookie_put(cookie, - fscache_cookie_put_acquire_nobufs); - fscache_stat(&fscache_n_acquires_nobufs); - _leave(" = NULL"); - return NULL; - } - } else { - set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags); - } - } - fscache_stat(&fscache_n_acquires_ok); - -out: - fscache_free_cookie(candidate); + _leave(" = c=%08x", cookie->debug_id); return cookie; } EXPORT_SYMBOL(__fscache_acquire_cookie); /* - * Enable a cookie to permit it to accept new operations. + * Prepare a cache object to be written to. */ -void __fscache_enable_cookie(struct fscache_cookie *cookie, - const void *aux_data, - loff_t object_size, - bool (*can_enable)(void *data), - void *data) +static void fscache_prepare_to_write(struct fscache_cookie *cookie) { - _enter("%x", cookie->debug_id); - - trace_fscache_enable(cookie); - - wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK, - TASK_UNINTERRUPTIBLE); - - fscache_update_aux(cookie, aux_data); - - if (test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags)) - goto out_unlock; - - if (can_enable && !can_enable(data)) { - /* The netfs decided it didn't want to enable after all */ - } else if (cookie->type != FSCACHE_COOKIE_TYPE_INDEX) { - /* Wait for outstanding disablement to complete */ - __fscache_wait_on_invalidate(cookie); - - if (fscache_acquire_non_index_cookie(cookie, object_size) == 0) - set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags); - } else { - set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags); - } - -out_unlock: - clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags); - wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK); + cookie->volume->cache->ops->prepare_to_write(cookie); } -EXPORT_SYMBOL(__fscache_enable_cookie); /* - * acquire a non-index cookie - * - this must make sure the index chain is instantiated and instantiate the - * object representation too + * Look up a cookie in the cache. */ -static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie, - loff_t object_size) +static void fscache_perform_lookup(struct fscache_cookie *cookie) { - struct fscache_object *object; - struct fscache_cache *cache; - int ret; + enum fscache_access_trace trace = fscache_access_lookup_cookie_end_failed; + bool need_withdraw = false; _enter(""); - set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); - - /* now we need to see whether the backing objects for this cookie yet - * exist, if not there'll be nothing to search */ - down_read(&fscache_addremove_sem); - - if (list_empty(&fscache_cache_list)) { - up_read(&fscache_addremove_sem); - _leave(" = 0 [no caches]"); - return 0; - } - - /* select a cache in which to store the object */ - cache = fscache_select_cache_for_object(cookie->parent); - if (!cache) { - up_read(&fscache_addremove_sem); - fscache_stat(&fscache_n_acquires_no_cache); - _leave(" = -ENOMEDIUM [no cache]"); - return -ENOMEDIUM; - } - - _debug("cache %s", cache->tag->name); - - set_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); - - /* ask the cache to allocate objects for this cookie and its parent - * chain */ - ret = fscache_alloc_object(cache, cookie); - if (ret < 0) { - up_read(&fscache_addremove_sem); - _leave(" = %d", ret); - return ret; - } - - spin_lock(&cookie->lock); - if (hlist_empty(&cookie->backing_objects)) { - spin_unlock(&cookie->lock); - goto unavailable; + if (!cookie->volume->cache_priv) { + fscache_create_volume(cookie->volume, true); + if (!cookie->volume->cache_priv) { + fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT); + goto out; + } } - object = hlist_entry(cookie->backing_objects.first, - struct fscache_object, cookie_link); - - fscache_set_store_limit(object, object_size); - - /* initiate the process of looking up all the objects in the chain - * (done by fscache_initialise_object()) */ - fscache_raise_event(object, FSCACHE_OBJECT_EV_NEW_CHILD); - - spin_unlock(&cookie->lock); - - /* we may be required to wait for lookup to complete at this point */ - if (!fscache_defer_lookup) { - wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, - TASK_UNINTERRUPTIBLE); - if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags)) - goto unavailable; + if (!cookie->volume->cache->ops->lookup_cookie(cookie)) { + if (cookie->state != FSCACHE_COOKIE_STATE_FAILED) + fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT); + need_withdraw = true; + _leave(" [fail]"); + goto out; } - up_read(&fscache_addremove_sem); - _leave(" = 0 [deferred]"); - return 0; + fscache_see_cookie(cookie, fscache_cookie_see_active); + fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE); + trace = fscache_access_lookup_cookie_end; -unavailable: - up_read(&fscache_addremove_sem); - _leave(" = -ENOBUFS"); - return -ENOBUFS; +out: + fscache_end_cookie_access(cookie, trace); + if (need_withdraw) + fscache_withdraw_cookie(cookie); + fscache_end_volume_access(cookie->volume, cookie, trace); } /* - * recursively allocate cache object records for a cookie/cache combination - * - caller must be holding the addremove sem + * Begin the process of looking up a cookie. We offload the actual process to + * a worker thread. */ -static int fscache_alloc_object(struct fscache_cache *cache, - struct fscache_cookie *cookie) +static bool fscache_begin_lookup(struct fscache_cookie *cookie, bool will_modify) { - struct fscache_object *object; - int ret; - - _enter("%s,%x{%s}", cache->tag->name, cookie->debug_id, cookie->def->name); - - spin_lock(&cookie->lock); - hlist_for_each_entry(object, &cookie->backing_objects, - cookie_link) { - if (object->cache == cache) - goto object_already_extant; + if (will_modify) { + set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags); + set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags); } - spin_unlock(&cookie->lock); - - /* ask the cache to allocate an object (we may end up with duplicate - * objects at this stage, but we sort that out later) */ - fscache_stat(&fscache_n_cop_alloc_object); - object = cache->ops->alloc_object(cache, cookie); - fscache_stat_d(&fscache_n_cop_alloc_object); - if (IS_ERR(object)) { - fscache_stat(&fscache_n_object_no_alloc); - ret = PTR_ERR(object); - goto error; - } - - ASSERTCMP(object->cookie, ==, cookie); - fscache_stat(&fscache_n_object_alloc); - - object->debug_id = atomic_inc_return(&fscache_object_debug_id); - - _debug("ALLOC OBJ%x: %s {%lx}", - object->debug_id, cookie->def->name, object->events); - - ret = fscache_alloc_object(cache, cookie->parent); - if (ret < 0) - goto error_put; - - /* only attach if we managed to allocate all we needed, otherwise - * discard the object we just allocated and instead use the one - * attached to the cookie */ - if (fscache_attach_object(cookie, object) < 0) { - fscache_stat(&fscache_n_cop_put_object); - cache->ops->put_object(object, fscache_obj_put_attach_fail); - fscache_stat_d(&fscache_n_cop_put_object); - } - - _leave(" = 0"); - return 0; - -object_already_extant: - ret = -ENOBUFS; - if (fscache_object_is_dying(object) || - fscache_cache_is_broken(object)) { - spin_unlock(&cookie->lock); - goto error; - } - spin_unlock(&cookie->lock); - _leave(" = 0 [found]"); - return 0; - -error_put: - fscache_stat(&fscache_n_cop_put_object); - cache->ops->put_object(object, fscache_obj_put_alloc_fail); - fscache_stat_d(&fscache_n_cop_put_object); -error: - _leave(" = %d", ret); - return ret; + if (!fscache_begin_volume_access(cookie->volume, cookie, + fscache_access_lookup_cookie)) + return false; + + __fscache_begin_cookie_access(cookie, fscache_access_lookup_cookie); + __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_LOOKING_UP); + set_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags); + set_bit(FSCACHE_COOKIE_HAS_BEEN_CACHED, &cookie->flags); + return true; } /* - * attach a cache object to a cookie + * Start using the cookie for I/O. This prevents the backing object from being + * reaped by VM pressure. */ -static int fscache_attach_object(struct fscache_cookie *cookie, - struct fscache_object *object) +void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify) { - struct fscache_object *p; - struct fscache_cache *cache = object->cache; - int ret; + enum fscache_cookie_state state; + bool queue = false; + int n_active; - _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id); + _enter("c=%08x", cookie->debug_id); - ASSERTCMP(object->cookie, ==, cookie); + if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags), + "Trying to use relinquished cookie\n")) + return; spin_lock(&cookie->lock); - /* there may be multiple initial creations of this object, but we only - * want one */ - ret = -EEXIST; - hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) { - if (p->cache == object->cache) { - if (fscache_object_is_dying(p)) - ret = -ENOBUFS; - goto cant_attach_object; - } - } + n_active = atomic_inc_return(&cookie->n_active); + trace_fscache_active(cookie->debug_id, refcount_read(&cookie->ref), + n_active, atomic_read(&cookie->n_accesses), + will_modify ? + fscache_active_use_modify : fscache_active_use); + +again: + state = fscache_cookie_state(cookie); + switch (state) { + case FSCACHE_COOKIE_STATE_QUIESCENT: + queue = fscache_begin_lookup(cookie, will_modify); + break; - /* pin the parent object */ - spin_lock_nested(&cookie->parent->lock, 1); - hlist_for_each_entry(p, &cookie->parent->backing_objects, - cookie_link) { - if (p->cache == object->cache) { - if (fscache_object_is_dying(p)) { - ret = -ENOBUFS; - spin_unlock(&cookie->parent->lock); - goto cant_attach_object; - } - object->parent = p; - spin_lock(&p->lock); - p->n_children++; - spin_unlock(&p->lock); - break; + case FSCACHE_COOKIE_STATE_LOOKING_UP: + case FSCACHE_COOKIE_STATE_CREATING: + if (will_modify) + set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags); + break; + case FSCACHE_COOKIE_STATE_ACTIVE: + case FSCACHE_COOKIE_STATE_INVALIDATING: + if (will_modify && + !test_and_set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags)) { + set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags); + queue = true; } - } - spin_unlock(&cookie->parent->lock); - - /* attach to the cache's object list */ - if (list_empty(&object->cache_link)) { - spin_lock(&cache->object_list_lock); - list_add(&object->cache_link, &cache->object_list); - spin_unlock(&cache->object_list_lock); - } - - /* Attach to the cookie. The object already has a ref on it. */ - hlist_add_head(&object->cookie_link, &cookie->backing_objects); - ret = 0; - -cant_attach_object: - spin_unlock(&cookie->lock); - _leave(" = %d", ret); - return ret; -} - -/* - * Invalidate an object. Callable with spinlocks held. - */ -void __fscache_invalidate(struct fscache_cookie *cookie) -{ - struct fscache_object *object; - - _enter("{%s}", cookie->def->name); - - fscache_stat(&fscache_n_invalidates); + break; - /* Only permit invalidation of data files. Invalidating an index will - * require the caller to release all its attachments to the tree rooted - * there, and if it's doing that, it may as well just retire the - * cookie. - */ - ASSERTCMP(cookie->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE); + case FSCACHE_COOKIE_STATE_FAILED: + case FSCACHE_COOKIE_STATE_WITHDRAWING: + break; - /* If there's an object, we tell the object state machine to handle the - * invalidation on our behalf, otherwise there's nothing to do. - */ - if (!hlist_empty(&cookie->backing_objects)) { + case FSCACHE_COOKIE_STATE_LRU_DISCARDING: + spin_unlock(&cookie->lock); + wait_var_event(&cookie->state, + fscache_cookie_state(cookie) != + FSCACHE_COOKIE_STATE_LRU_DISCARDING); spin_lock(&cookie->lock); + goto again; - if (fscache_cookie_enabled(cookie) && - !hlist_empty(&cookie->backing_objects) && - !test_and_set_bit(FSCACHE_COOKIE_INVALIDATING, - &cookie->flags)) { - object = hlist_entry(cookie->backing_objects.first, - struct fscache_object, - cookie_link); - if (fscache_object_is_live(object)) - fscache_raise_event( - object, FSCACHE_OBJECT_EV_INVALIDATE); - } - - spin_unlock(&cookie->lock); + case FSCACHE_COOKIE_STATE_DROPPED: + case FSCACHE_COOKIE_STATE_RELINQUISHING: + WARN(1, "Can't use cookie in state %u\n", state); + break; } + spin_unlock(&cookie->lock); + if (queue) + fscache_queue_cookie(cookie, fscache_cookie_get_use_work); _leave(""); } -EXPORT_SYMBOL(__fscache_invalidate); +EXPORT_SYMBOL(__fscache_use_cookie); -/* - * Wait for object invalidation to complete. - */ -void __fscache_wait_on_invalidate(struct fscache_cookie *cookie) +static void fscache_unuse_cookie_locked(struct fscache_cookie *cookie) { - _enter("%x", cookie->debug_id); + clear_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags); + if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) + return; - wait_on_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING, - TASK_UNINTERRUPTIBLE); + cookie->unused_at = jiffies; + spin_lock(&fscache_cookie_lru_lock); + if (list_empty(&cookie->commit_link)) { + fscache_get_cookie(cookie, fscache_cookie_get_lru); + fscache_stat(&fscache_n_cookies_lru); + } + list_move_tail(&cookie->commit_link, &fscache_cookie_lru); - _leave(""); + spin_unlock(&fscache_cookie_lru_lock); + timer_reduce(&fscache_cookie_lru_timer, + jiffies + fscache_lru_cookie_timeout); } -EXPORT_SYMBOL(__fscache_wait_on_invalidate); /* - * update the index entries backing a cookie + * Stop using the cookie for I/O. */ -void __fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data) +void __fscache_unuse_cookie(struct fscache_cookie *cookie, + const void *aux_data, const loff_t *object_size) { - struct fscache_object *object; - - fscache_stat(&fscache_n_updates); - - if (!cookie) { - fscache_stat(&fscache_n_updates_null); - _leave(" [no cookie]"); + unsigned int debug_id = cookie->debug_id; + unsigned int r = refcount_read(&cookie->ref); + unsigned int a = atomic_read(&cookie->n_accesses); + unsigned int c; + + if (aux_data || object_size) + __fscache_update_cookie(cookie, aux_data, object_size); + + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ + c = atomic_fetch_add_unless(&cookie->n_active, -1, 1); + if (c != 1) { + trace_fscache_active(debug_id, r, c - 1, a, fscache_active_unuse); return; } - _enter("{%s}", cookie->def->name); - spin_lock(&cookie->lock); - - fscache_update_aux(cookie, aux_data); - - if (fscache_cookie_enabled(cookie)) { - /* update the index entry on disk in each cache backing this - * cookie. - */ - hlist_for_each_entry(object, - &cookie->backing_objects, cookie_link) { - fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE); - } - } - + r = refcount_read(&cookie->ref); + a = atomic_read(&cookie->n_accesses); + c = atomic_dec_return(&cookie->n_active); + trace_fscache_active(debug_id, r, c, a, fscache_active_unuse); + if (c == 0) + fscache_unuse_cookie_locked(cookie); spin_unlock(&cookie->lock); - _leave(""); } -EXPORT_SYMBOL(__fscache_update_cookie); +EXPORT_SYMBOL(__fscache_unuse_cookie); /* - * Disable a cookie to stop it from accepting new requests from the netfs. + * Perform work upon the cookie, such as committing its cache state, + * relinquishing it or withdrawing the backing cache. We're protected from the + * cache going away under us as object withdrawal must come through this + * non-reentrant work item. */ -void __fscache_disable_cookie(struct fscache_cookie *cookie, - const void *aux_data, - bool invalidate) +static void fscache_cookie_state_machine(struct fscache_cookie *cookie) { - struct fscache_object *object; - bool awaken = false; + enum fscache_cookie_state state; + bool wake = false; - _enter("%x,%u", cookie->debug_id, invalidate); + _enter("c=%x", cookie->debug_id); - trace_fscache_disable(cookie); - - ASSERTCMP(atomic_read(&cookie->n_active), >, 0); - - if (atomic_read(&cookie->n_children) != 0) { - pr_err("Cookie '%s' still has children\n", - cookie->def->name); - BUG(); - } +again: + spin_lock(&cookie->lock); +again_locked: + state = cookie->state; + switch (state) { + case FSCACHE_COOKIE_STATE_QUIESCENT: + /* The QUIESCENT state is jumped to the LOOKING_UP state by + * fscache_use_cookie(). + */ - wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK, - TASK_UNINTERRUPTIBLE); + if (atomic_read(&cookie->n_accesses) == 0 && + test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) { + __fscache_set_cookie_state(cookie, + FSCACHE_COOKIE_STATE_RELINQUISHING); + wake = true; + goto again_locked; + } + break; - fscache_update_aux(cookie, aux_data); + case FSCACHE_COOKIE_STATE_LOOKING_UP: + spin_unlock(&cookie->lock); + fscache_init_access_gate(cookie); + fscache_perform_lookup(cookie); + goto again; - if (!test_and_clear_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags)) - goto out_unlock_enable; + case FSCACHE_COOKIE_STATE_INVALIDATING: + spin_unlock(&cookie->lock); + fscache_perform_invalidation(cookie); + goto again; + + case FSCACHE_COOKIE_STATE_ACTIVE: + if (test_and_clear_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags)) { + spin_unlock(&cookie->lock); + fscache_prepare_to_write(cookie); + spin_lock(&cookie->lock); + } + if (test_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags)) { + __fscache_set_cookie_state(cookie, + FSCACHE_COOKIE_STATE_LRU_DISCARDING); + wake = true; + goto again_locked; + } + fallthrough; - /* If the cookie is being invalidated, wait for that to complete first - * so that we can reuse the flag. - */ - __fscache_wait_on_invalidate(cookie); + case FSCACHE_COOKIE_STATE_FAILED: + if (atomic_read(&cookie->n_accesses) != 0) + break; + if (test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) { + __fscache_set_cookie_state(cookie, + FSCACHE_COOKIE_STATE_RELINQUISHING); + wake = true; + goto again_locked; + } + if (test_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags)) { + __fscache_set_cookie_state(cookie, + FSCACHE_COOKIE_STATE_WITHDRAWING); + wake = true; + goto again_locked; + } + break; - /* Dispose of the backing objects */ - set_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags); + case FSCACHE_COOKIE_STATE_LRU_DISCARDING: + case FSCACHE_COOKIE_STATE_RELINQUISHING: + case FSCACHE_COOKIE_STATE_WITHDRAWING: + if (cookie->cache_priv) { + spin_unlock(&cookie->lock); + cookie->volume->cache->ops->withdraw_cookie(cookie); + spin_lock(&cookie->lock); + } - spin_lock(&cookie->lock); - if (!hlist_empty(&cookie->backing_objects)) { - hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) { - if (invalidate) - set_bit(FSCACHE_OBJECT_RETIRED, &object->flags); - clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); - fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); + switch (state) { + case FSCACHE_COOKIE_STATE_RELINQUISHING: + fscache_see_cookie(cookie, fscache_cookie_see_relinquish); + fscache_unhash_cookie(cookie); + __fscache_set_cookie_state(cookie, + FSCACHE_COOKIE_STATE_DROPPED); + wake = true; + goto out; + case FSCACHE_COOKIE_STATE_LRU_DISCARDING: + fscache_see_cookie(cookie, fscache_cookie_see_lru_discard); + break; + case FSCACHE_COOKIE_STATE_WITHDRAWING: + fscache_see_cookie(cookie, fscache_cookie_see_withdraw); + break; + default: + BUG(); } - } else { - if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) - awaken = true; - } - spin_unlock(&cookie->lock); - if (awaken) - wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING); - /* Wait for cessation of activity requiring access to the netfs (when - * n_active reaches 0). This makes sure outstanding reads and writes - * have completed. - */ - if (!atomic_dec_and_test(&cookie->n_active)) { - wait_var_event(&cookie->n_active, - !atomic_read(&cookie->n_active)); - } + clear_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags); + clear_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags); + clear_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags); + clear_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags); + set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); + __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT); + wake = true; + goto again_locked; - /* Make sure any pending writes are cancelled. */ - if (cookie->type != FSCACHE_COOKIE_TYPE_INDEX) - fscache_invalidate_writes(cookie); + case FSCACHE_COOKIE_STATE_DROPPED: + break; - /* Reset the cookie state if it wasn't relinquished */ - if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) { - atomic_inc(&cookie->n_active); - set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); + default: + WARN_ONCE(1, "Cookie %x in unexpected state %u\n", + cookie->debug_id, state); + break; } -out_unlock_enable: - clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags); - wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK); +out: + spin_unlock(&cookie->lock); + if (wake) + wake_up_cookie_state(cookie); _leave(""); } -EXPORT_SYMBOL(__fscache_disable_cookie); + +static void fscache_cookie_worker(struct work_struct *work) +{ + struct fscache_cookie *cookie = container_of(work, struct fscache_cookie, work); + + fscache_see_cookie(cookie, fscache_cookie_see_work); + fscache_cookie_state_machine(cookie); + fscache_put_cookie(cookie, fscache_cookie_put_work); +} /* - * release a cookie back to the cache - * - the object will be marked as recyclable on disk if retire is true - * - all dependents of this cookie must have already been unregistered - * (indices/files/pages) + * Wait for the object to become inactive. The cookie's work item will be + * scheduled when someone transitions n_accesses to 0 - but if someone's + * already done that, schedule it anyway. */ -void __fscache_relinquish_cookie(struct fscache_cookie *cookie, - const void *aux_data, - bool retire) +static void __fscache_withdraw_cookie(struct fscache_cookie *cookie) { - fscache_stat(&fscache_n_relinquishes); - if (retire) - fscache_stat(&fscache_n_relinquishes_retire); + int n_accesses; + bool unpinned; + + unpinned = test_and_clear_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags); + + /* Need to read the access count after unpinning */ + n_accesses = atomic_read(&cookie->n_accesses); + if (unpinned) + trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref), + n_accesses, fscache_access_cache_unpin); + if (n_accesses == 0) + fscache_queue_cookie(cookie, fscache_cookie_get_end_access); +} - if (!cookie) { - fscache_stat(&fscache_n_relinquishes_null); - _leave(" [no cookie]"); - return; - } +static void fscache_cookie_lru_do_one(struct fscache_cookie *cookie) +{ + fscache_see_cookie(cookie, fscache_cookie_see_lru_do_one); - _enter("%x{%s,%d},%d", - cookie->debug_id, cookie->def->name, - atomic_read(&cookie->n_active), retire); + spin_lock(&cookie->lock); + if (cookie->state != FSCACHE_COOKIE_STATE_ACTIVE || + time_before(jiffies, cookie->unused_at + fscache_lru_cookie_timeout) || + atomic_read(&cookie->n_active) > 0) { + spin_unlock(&cookie->lock); + fscache_stat(&fscache_n_cookies_lru_removed); + } else { + set_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags); + spin_unlock(&cookie->lock); + fscache_stat(&fscache_n_cookies_lru_expired); + _debug("lru c=%x", cookie->debug_id); + __fscache_withdraw_cookie(cookie); + } - trace_fscache_relinquish(cookie, retire); + fscache_put_cookie(cookie, fscache_cookie_put_lru); +} - /* No further netfs-accessing operations on this cookie permitted */ - if (test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) - BUG(); +static void fscache_cookie_lru_worker(struct work_struct *work) +{ + struct fscache_cookie *cookie; + unsigned long unused_at; - __fscache_disable_cookie(cookie, aux_data, retire); + spin_lock(&fscache_cookie_lru_lock); - /* Clear pointers back to the netfs */ - cookie->netfs_data = NULL; - cookie->def = NULL; - BUG_ON(!radix_tree_empty(&cookie->stores)); + while (!list_empty(&fscache_cookie_lru)) { + cookie = list_first_entry(&fscache_cookie_lru, + struct fscache_cookie, commit_link); + unused_at = cookie->unused_at + fscache_lru_cookie_timeout; + if (time_before(jiffies, unused_at)) { + timer_reduce(&fscache_cookie_lru_timer, unused_at); + break; + } - if (cookie->parent) { - ASSERTCMP(refcount_read(&cookie->parent->ref), >, 0); - ASSERTCMP(atomic_read(&cookie->parent->n_children), >, 0); - atomic_dec(&cookie->parent->n_children); + list_del_init(&cookie->commit_link); + fscache_stat_d(&fscache_n_cookies_lru); + spin_unlock(&fscache_cookie_lru_lock); + fscache_cookie_lru_do_one(cookie); + spin_lock(&fscache_cookie_lru_lock); } - /* Dispose of the netfs's link to the cookie */ - fscache_cookie_put(cookie, fscache_cookie_put_relinquish); + spin_unlock(&fscache_cookie_lru_lock); +} - _leave(""); +static void fscache_cookie_lru_timed_out(struct timer_list *timer) +{ + queue_work(fscache_wq, &fscache_cookie_lru_work); +} + +static void fscache_cookie_drop_from_lru(struct fscache_cookie *cookie) +{ + bool need_put = false; + + if (!list_empty(&cookie->commit_link)) { + spin_lock(&fscache_cookie_lru_lock); + if (!list_empty(&cookie->commit_link)) { + list_del_init(&cookie->commit_link); + fscache_stat_d(&fscache_n_cookies_lru); + fscache_stat(&fscache_n_cookies_lru_dropped); + need_put = true; + } + spin_unlock(&fscache_cookie_lru_lock); + if (need_put) + fscache_put_cookie(cookie, fscache_cookie_put_lru); + } } -EXPORT_SYMBOL(__fscache_relinquish_cookie); /* * Remove a cookie from the hash table. @@ -851,43 +908,91 @@ static void fscache_unhash_cookie(struct fscache_cookie *cookie) hlist_bl_lock(h); hlist_bl_del(&cookie->hash_link); + clear_bit(FSCACHE_COOKIE_IS_HASHED, &cookie->flags); hlist_bl_unlock(h); + fscache_stat(&fscache_n_relinquishes_dropped); } +static void fscache_drop_withdraw_cookie(struct fscache_cookie *cookie) +{ + fscache_cookie_drop_from_lru(cookie); + __fscache_withdraw_cookie(cookie); +} + +/** + * fscache_withdraw_cookie - Mark a cookie for withdrawal + * @cookie: The cookie to be withdrawn. + * + * Allow the cache backend to withdraw the backing for a cookie for its own + * reasons, even if that cookie is in active use. + */ +void fscache_withdraw_cookie(struct fscache_cookie *cookie) +{ + set_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags); + fscache_drop_withdraw_cookie(cookie); +} +EXPORT_SYMBOL(fscache_withdraw_cookie); + /* - * Drop a reference to a cookie. + * Allow the netfs to release a cookie back to the cache. + * - the object will be marked as recyclable on disk if retire is true */ -void fscache_cookie_put(struct fscache_cookie *cookie, - enum fscache_cookie_trace where) +void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire) { - struct fscache_cookie *parent; - int ref; + fscache_stat(&fscache_n_relinquishes); + if (retire) + fscache_stat(&fscache_n_relinquishes_retire); + + _enter("c=%08x{%d},%d", + cookie->debug_id, atomic_read(&cookie->n_active), retire); - _enter("%x", cookie->debug_id); + if (WARN(test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags), + "Cookie c=%x already relinquished\n", cookie->debug_id)) + return; - do { - unsigned int cookie_debug_id = cookie->debug_id; - bool zero = __refcount_dec_and_test(&cookie->ref, &ref); + if (retire) + set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags); + trace_fscache_relinquish(cookie, retire); - trace_fscache_cookie(cookie_debug_id, ref - 1, where); - if (!zero) - return; + ASSERTCMP(atomic_read(&cookie->n_active), ==, 0); + ASSERTCMP(atomic_read(&cookie->volume->n_cookies), >, 0); + atomic_dec(&cookie->volume->n_cookies); - parent = cookie->parent; + if (test_bit(FSCACHE_COOKIE_HAS_BEEN_CACHED, &cookie->flags)) { + set_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags); + fscache_drop_withdraw_cookie(cookie); + } else { + fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_DROPPED); fscache_unhash_cookie(cookie); - fscache_free_cookie(cookie); + } + fscache_put_cookie(cookie, fscache_cookie_put_relinquish); +} +EXPORT_SYMBOL(__fscache_relinquish_cookie); - cookie = parent; - where = fscache_cookie_put_parent; - } while (cookie); +/* + * Drop a reference to a cookie. + */ +void fscache_put_cookie(struct fscache_cookie *cookie, + enum fscache_cookie_trace where) +{ + struct fscache_volume *volume = cookie->volume; + unsigned int cookie_debug_id = cookie->debug_id; + bool zero; + int ref; - _leave(""); + zero = __refcount_dec_and_test(&cookie->ref, &ref); + trace_fscache_cookie(cookie_debug_id, ref - 1, where); + if (zero) { + fscache_free_cookie(cookie); + fscache_put_volume(volume, fscache_volume_put_cookie); + } } +EXPORT_SYMBOL(fscache_put_cookie); /* * Get a reference to a cookie. */ -struct fscache_cookie *fscache_cookie_get(struct fscache_cookie *cookie, +struct fscache_cookie *fscache_get_cookie(struct fscache_cookie *cookie, enum fscache_cookie_trace where) { int ref; @@ -896,85 +1001,73 @@ struct fscache_cookie *fscache_cookie_get(struct fscache_cookie *cookie, trace_fscache_cookie(cookie->debug_id, ref + 1, where); return cookie; } +EXPORT_SYMBOL(fscache_get_cookie); /* - * check the consistency between the netfs inode and the backing cache - * - * NOTE: it only serves no-index type + * Ask the cache to effect invalidation of a cookie. */ -int __fscache_check_consistency(struct fscache_cookie *cookie, - const void *aux_data) +static void fscache_perform_invalidation(struct fscache_cookie *cookie) { - struct fscache_operation *op; - struct fscache_object *object; - bool wake_cookie = false; - int ret; - - _enter("%p,", cookie); + if (!cookie->volume->cache->ops->invalidate_cookie(cookie)) + fscache_caching_failed(cookie); + fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end); +} - ASSERTCMP(cookie->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE); +/* + * Invalidate an object. + */ +void __fscache_invalidate(struct fscache_cookie *cookie, + const void *aux_data, loff_t new_size, + unsigned int flags) +{ + bool is_caching; - if (fscache_wait_for_deferred_lookup(cookie) < 0) - return -ERESTARTSYS; + _enter("c=%x", cookie->debug_id); - if (hlist_empty(&cookie->backing_objects)) - return 0; + fscache_stat(&fscache_n_invalidates); - op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY); - if (!op) - return -ENOMEM; + if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags), + "Trying to invalidate relinquished cookie\n")) + return; - fscache_operation_init(cookie, op, NULL, NULL, NULL); - op->flags = FSCACHE_OP_MYTHREAD | - (1 << FSCACHE_OP_WAITING) | - (1 << FSCACHE_OP_UNUSE_COOKIE); - trace_fscache_page_op(cookie, NULL, op, fscache_page_op_check_consistency); + if ((flags & FSCACHE_INVAL_DIO_WRITE) && + test_and_set_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags)) + return; spin_lock(&cookie->lock); + set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); + fscache_update_aux(cookie, aux_data, &new_size); + cookie->inval_counter++; + trace_fscache_invalidate(cookie, new_size); - fscache_update_aux(cookie, aux_data); - - if (!fscache_cookie_enabled(cookie) || - hlist_empty(&cookie->backing_objects)) - goto inconsistent; - object = hlist_entry(cookie->backing_objects.first, - struct fscache_object, cookie_link); - if (test_bit(FSCACHE_IOERROR, &object->cache->flags)) - goto inconsistent; - - op->debug_id = atomic_inc_return(&fscache_op_debug_id); + switch (cookie->state) { + case FSCACHE_COOKIE_STATE_INVALIDATING: /* is_still_valid will catch it */ + default: + spin_unlock(&cookie->lock); + _leave(" [no %u]", cookie->state); + return; - __fscache_use_cookie(cookie); - if (fscache_submit_op(object, op) < 0) - goto submit_failed; + case FSCACHE_COOKIE_STATE_LOOKING_UP: + case FSCACHE_COOKIE_STATE_CREATING: + spin_unlock(&cookie->lock); + _leave(" [look %x]", cookie->inval_counter); + return; - /* the work queue now carries its own ref on the object */ - spin_unlock(&cookie->lock); + case FSCACHE_COOKIE_STATE_ACTIVE: + is_caching = fscache_begin_cookie_access( + cookie, fscache_access_invalidate_cookie); + if (is_caching) + __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_INVALIDATING); + spin_unlock(&cookie->lock); + wake_up_cookie_state(cookie); - ret = fscache_wait_for_operation_activation(object, op, NULL, NULL); - if (ret == 0) { - /* ask the cache to honour the operation */ - ret = object->cache->ops->check_consistency(op); - fscache_op_complete(op, false); - } else if (ret == -ENOBUFS) { - ret = 0; + if (is_caching) + fscache_queue_cookie(cookie, fscache_cookie_get_inval_work); + _leave(" [inv]"); + return; } - - fscache_put_operation(op); - _leave(" = %d", ret); - return ret; - -submit_failed: - wake_cookie = __fscache_unuse_cookie(cookie); -inconsistent: - spin_unlock(&cookie->lock); - if (wake_cookie) - __fscache_wake_unused_cookie(cookie); - kfree(op); - _leave(" = -ESTALE"); - return -ESTALE; } -EXPORT_SYMBOL(__fscache_check_consistency); +EXPORT_SYMBOL(__fscache_invalidate); /* * Generate a list of extant cookies in /proc/fs/fscache/cookies @@ -983,44 +1076,27 @@ static int fscache_cookies_seq_show(struct seq_file *m, void *v) { struct fscache_cookie *cookie; unsigned int keylen = 0, auxlen = 0; - char _type[3], *type; u8 *p; if (v == &fscache_cookies) { seq_puts(m, - "COOKIE PARENT USAGE CHILD ACT TY FL DEF NETFS_DATA\n" - "======== ======== ===== ===== === == === ================ ==========\n" + "COOKIE VOLUME REF ACT ACC S FL DEF \n" + "======== ======== === === === = == ================\n" ); return 0; } cookie = list_entry(v, struct fscache_cookie, proc_link); - switch (cookie->type) { - case 0: - type = "IX"; - break; - case 1: - type = "DT"; - break; - default: - snprintf(_type, sizeof(_type), "%02u", - cookie->type); - type = _type; - break; - } - seq_printf(m, - "%08x %08x %5u %5u %3u %s %03lx %-16s %px", + "%08x %08x %3d %3d %3d %c %02lx", cookie->debug_id, - cookie->parent ? cookie->parent->debug_id : 0, + cookie->volume->debug_id, refcount_read(&cookie->ref), - atomic_read(&cookie->n_children), atomic_read(&cookie->n_active), - type, - cookie->flags, - cookie->def->name, - cookie->netfs_data); + atomic_read(&cookie->n_accesses), + fscache_cookie_states[cookie->state], + cookie->flags); keylen = cookie->key_len; auxlen = cookie->aux_len; diff --git a/fs/fscache/fsdef.c b/fs/fscache/fsdef.c deleted file mode 100644 index 0402673c680e..000000000000 --- a/fs/fscache/fsdef.c +++ /dev/null @@ -1,98 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* Filesystem index definition - * - * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - */ - -#define FSCACHE_DEBUG_LEVEL CACHE -#include <linux/module.h> -#include "internal.h" - -static -enum fscache_checkaux fscache_fsdef_netfs_check_aux(void *cookie_netfs_data, - const void *data, - uint16_t datalen, - loff_t object_size); - -/* - * The root index is owned by FS-Cache itself. - * - * When a netfs requests caching facilities, FS-Cache will, if one doesn't - * already exist, create an entry in the root index with the key being the name - * of the netfs ("AFS" for example), and the auxiliary data holding the index - * structure version supplied by the netfs: - * - * FSDEF - * | - * +-----------+ - * | | - * NFS AFS - * [v=1] [v=1] - * - * If an entry with the appropriate name does already exist, the version is - * compared. If the version is different, the entire subtree from that entry - * will be discarded and a new entry created. - * - * The new entry will be an index, and a cookie referring to it will be passed - * to the netfs. This is then the root handle by which the netfs accesses the - * cache. It can create whatever objects it likes in that index, including - * further indices. - */ -static struct fscache_cookie_def fscache_fsdef_index_def = { - .name = ".FS-Cache", - .type = FSCACHE_COOKIE_TYPE_INDEX, -}; - -struct fscache_cookie fscache_fsdef_index = { - .debug_id = 1, - .ref = REFCOUNT_INIT(1), - .n_active = ATOMIC_INIT(1), - .lock = __SPIN_LOCK_UNLOCKED(fscache_fsdef_index.lock), - .backing_objects = HLIST_HEAD_INIT, - .def = &fscache_fsdef_index_def, - .flags = 1 << FSCACHE_COOKIE_ENABLED, - .type = FSCACHE_COOKIE_TYPE_INDEX, -}; -EXPORT_SYMBOL(fscache_fsdef_index); - -/* - * Definition of an entry in the root index. Each entry is an index, keyed to - * a specific netfs and only applicable to a particular version of the index - * structure used by that netfs. - */ -struct fscache_cookie_def fscache_fsdef_netfs_def = { - .name = "FSDEF.netfs", - .type = FSCACHE_COOKIE_TYPE_INDEX, - .check_aux = fscache_fsdef_netfs_check_aux, -}; - -/* - * check that the index structure version number stored in the auxiliary data - * matches the one the netfs gave us - */ -static enum fscache_checkaux fscache_fsdef_netfs_check_aux( - void *cookie_netfs_data, - const void *data, - uint16_t datalen, - loff_t object_size) -{ - struct fscache_netfs *netfs = cookie_netfs_data; - uint32_t version; - - _enter("{%s},,%hu", netfs->name, datalen); - - if (datalen != sizeof(version)) { - _leave(" = OBSOLETE [dl=%d v=%zu]", datalen, sizeof(version)); - return FSCACHE_CHECKAUX_OBSOLETE; - } - - memcpy(&version, data, sizeof(version)); - if (version != netfs->version) { - _leave(" = OBSOLETE [ver=%x net=%x]", version, netfs->version); - return FSCACHE_CHECKAUX_OBSOLETE; - } - - _leave(" = OKAY"); - return FSCACHE_CHECKAUX_OKAY; -} diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index c3e4804b8fcb..f121c21590dc 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h @@ -1,65 +1,69 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Internal definitions for FS-Cache * - * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ -/* - * Lock order, in the order in which multiple locks should be obtained: - * - fscache_addremove_sem - * - cookie->lock - * - cookie->parent->lock - * - cache->object_list_lock - * - object->lock - * - object->parent->lock - * - cookie->stores_lock - * - fscache_thread_lock - * - */ - #ifdef pr_fmt #undef pr_fmt #endif #define pr_fmt(fmt) "FS-Cache: " fmt +#include <linux/slab.h> #include <linux/fscache-cache.h> #include <trace/events/fscache.h> #include <linux/sched.h> #include <linux/seq_file.h> -#define FSCACHE_MIN_THREADS 4 -#define FSCACHE_MAX_THREADS 32 - /* * cache.c */ -extern struct list_head fscache_cache_list; -extern struct rw_semaphore fscache_addremove_sem; +#ifdef CONFIG_PROC_FS +extern const struct seq_operations fscache_caches_seq_ops; +#endif +bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why); +void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why); +struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache); +void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where); + +static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache) +{ + return smp_load_acquire(&cache->state); +} + +static inline bool fscache_cache_is_live(const struct fscache_cache *cache) +{ + return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE; +} -extern struct fscache_cache *fscache_select_cache_for_object( - struct fscache_cookie *); +static inline void fscache_set_cache_state(struct fscache_cache *cache, + enum fscache_cache_state new_state) +{ + smp_store_release(&cache->state, new_state); + +} + +static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache, + enum fscache_cache_state old_state, + enum fscache_cache_state new_state) +{ + return try_cmpxchg_release(&cache->state, &old_state, new_state); +} /* * cookie.c */ extern struct kmem_cache *fscache_cookie_jar; extern const struct seq_operations fscache_cookies_seq_ops; +extern struct timer_list fscache_cookie_lru_timer; + +extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix); +extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie, + enum fscache_access_trace why); -extern void fscache_free_cookie(struct fscache_cookie *); -extern struct fscache_cookie *fscache_alloc_cookie(struct fscache_cookie *, - const struct fscache_cookie_def *, - const void *, size_t, - const void *, size_t, - void *, loff_t); -extern struct fscache_cookie *fscache_hash_cookie(struct fscache_cookie *); -extern struct fscache_cookie *fscache_cookie_get(struct fscache_cookie *, - enum fscache_cookie_trace); -extern void fscache_cookie_put(struct fscache_cookie *, - enum fscache_cookie_trace); - -static inline void fscache_cookie_see(struct fscache_cookie *cookie, +static inline void fscache_see_cookie(struct fscache_cookie *cookie, enum fscache_cookie_trace where) { trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref), @@ -67,60 +71,22 @@ static inline void fscache_cookie_see(struct fscache_cookie *cookie, } /* - * fsdef.c + * io.c */ -extern struct fscache_cookie fscache_fsdef_index; -extern struct fscache_cookie_def fscache_fsdef_netfs_def; - -/* - * main.c - */ -extern unsigned fscache_defer_lookup; -extern unsigned fscache_defer_create; -extern unsigned fscache_debug; -extern struct kobject *fscache_root; -extern struct workqueue_struct *fscache_object_wq; -extern struct workqueue_struct *fscache_op_wq; -DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait); - -extern unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n); - -static inline bool fscache_object_congested(void) +static inline void fscache_end_operation(struct netfs_cache_resources *cres) { - return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq); + const struct netfs_cache_ops *ops = fscache_operation_valid(cres); + + if (ops) + ops->end_operation(cres); } /* - * object.c + * main.c */ -extern void fscache_enqueue_object(struct fscache_object *); +extern unsigned fscache_debug; -/* - * operation.c - */ -extern int fscache_submit_exclusive_op(struct fscache_object *, - struct fscache_operation *); -extern int fscache_submit_op(struct fscache_object *, - struct fscache_operation *); -extern int fscache_cancel_op(struct fscache_operation *, bool); -extern void fscache_cancel_all_ops(struct fscache_object *); -extern void fscache_abort_object(struct fscache_object *); -extern void fscache_start_operations(struct fscache_object *); -extern void fscache_operation_gc(struct work_struct *); - -/* - * page.c - */ -extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *); -extern int fscache_wait_for_operation_activation(struct fscache_object *, - struct fscache_operation *, - atomic_t *, - atomic_t *); -extern void fscache_invalidate_writes(struct fscache_cookie *); -struct fscache_retrieval *fscache_alloc_retrieval(struct fscache_cookie *cookie, - struct address_space *mapping, - fscache_rw_complete_t end_io_func, - void *context); +extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len); /* * proc.c @@ -137,125 +103,27 @@ extern void fscache_proc_cleanup(void); * stats.c */ #ifdef CONFIG_FSCACHE_STATS -extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS]; -extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS]; - -extern atomic_t fscache_n_op_pend; -extern atomic_t fscache_n_op_run; -extern atomic_t fscache_n_op_enqueue; -extern atomic_t fscache_n_op_deferred_release; -extern atomic_t fscache_n_op_initialised; -extern atomic_t fscache_n_op_release; -extern atomic_t fscache_n_op_gc; -extern atomic_t fscache_n_op_cancelled; -extern atomic_t fscache_n_op_rejected; - -extern atomic_t fscache_n_attr_changed; -extern atomic_t fscache_n_attr_changed_ok; -extern atomic_t fscache_n_attr_changed_nobufs; -extern atomic_t fscache_n_attr_changed_nomem; -extern atomic_t fscache_n_attr_changed_calls; - -extern atomic_t fscache_n_allocs; -extern atomic_t fscache_n_allocs_ok; -extern atomic_t fscache_n_allocs_wait; -extern atomic_t fscache_n_allocs_nobufs; -extern atomic_t fscache_n_allocs_intr; -extern atomic_t fscache_n_allocs_object_dead; -extern atomic_t fscache_n_alloc_ops; -extern atomic_t fscache_n_alloc_op_waits; - -extern atomic_t fscache_n_retrievals; -extern atomic_t fscache_n_retrievals_ok; -extern atomic_t fscache_n_retrievals_wait; -extern atomic_t fscache_n_retrievals_nodata; -extern atomic_t fscache_n_retrievals_nobufs; -extern atomic_t fscache_n_retrievals_intr; -extern atomic_t fscache_n_retrievals_nomem; -extern atomic_t fscache_n_retrievals_object_dead; -extern atomic_t fscache_n_retrieval_ops; -extern atomic_t fscache_n_retrieval_op_waits; - -extern atomic_t fscache_n_stores; -extern atomic_t fscache_n_stores_ok; -extern atomic_t fscache_n_stores_again; -extern atomic_t fscache_n_stores_nobufs; -extern atomic_t fscache_n_stores_oom; -extern atomic_t fscache_n_store_ops; -extern atomic_t fscache_n_store_calls; -extern atomic_t fscache_n_store_pages; -extern atomic_t fscache_n_store_radix_deletes; -extern atomic_t fscache_n_store_pages_over_limit; - -extern atomic_t fscache_n_store_vmscan_not_storing; -extern atomic_t fscache_n_store_vmscan_gone; -extern atomic_t fscache_n_store_vmscan_busy; -extern atomic_t fscache_n_store_vmscan_cancelled; -extern atomic_t fscache_n_store_vmscan_wait; - -extern atomic_t fscache_n_marks; -extern atomic_t fscache_n_uncaches; +extern atomic_t fscache_n_volumes; +extern atomic_t fscache_n_volumes_collision; +extern atomic_t fscache_n_volumes_nomem; +extern atomic_t fscache_n_cookies; +extern atomic_t fscache_n_cookies_lru; +extern atomic_t fscache_n_cookies_lru_expired; +extern atomic_t fscache_n_cookies_lru_removed; +extern atomic_t fscache_n_cookies_lru_dropped; extern atomic_t fscache_n_acquires; -extern atomic_t fscache_n_acquires_null; -extern atomic_t fscache_n_acquires_no_cache; extern atomic_t fscache_n_acquires_ok; -extern atomic_t fscache_n_acquires_nobufs; extern atomic_t fscache_n_acquires_oom; extern atomic_t fscache_n_invalidates; -extern atomic_t fscache_n_invalidates_run; - -extern atomic_t fscache_n_updates; -extern atomic_t fscache_n_updates_null; -extern atomic_t fscache_n_updates_run; extern atomic_t fscache_n_relinquishes; -extern atomic_t fscache_n_relinquishes_null; -extern atomic_t fscache_n_relinquishes_waitcrt; extern atomic_t fscache_n_relinquishes_retire; +extern atomic_t fscache_n_relinquishes_dropped; -extern atomic_t fscache_n_cookie_index; -extern atomic_t fscache_n_cookie_data; -extern atomic_t fscache_n_cookie_special; - -extern atomic_t fscache_n_object_alloc; -extern atomic_t fscache_n_object_no_alloc; -extern atomic_t fscache_n_object_lookups; -extern atomic_t fscache_n_object_lookups_negative; -extern atomic_t fscache_n_object_lookups_positive; -extern atomic_t fscache_n_object_lookups_timed_out; -extern atomic_t fscache_n_object_created; -extern atomic_t fscache_n_object_avail; -extern atomic_t fscache_n_object_dead; - -extern atomic_t fscache_n_checkaux_none; -extern atomic_t fscache_n_checkaux_okay; -extern atomic_t fscache_n_checkaux_update; -extern atomic_t fscache_n_checkaux_obsolete; - -extern atomic_t fscache_n_cop_alloc_object; -extern atomic_t fscache_n_cop_lookup_object; -extern atomic_t fscache_n_cop_lookup_complete; -extern atomic_t fscache_n_cop_grab_object; -extern atomic_t fscache_n_cop_invalidate_object; -extern atomic_t fscache_n_cop_update_object; -extern atomic_t fscache_n_cop_drop_object; -extern atomic_t fscache_n_cop_put_object; -extern atomic_t fscache_n_cop_sync_cache; -extern atomic_t fscache_n_cop_attr_changed; -extern atomic_t fscache_n_cop_read_or_alloc_page; -extern atomic_t fscache_n_cop_read_or_alloc_pages; -extern atomic_t fscache_n_cop_allocate_page; -extern atomic_t fscache_n_cop_allocate_pages; -extern atomic_t fscache_n_cop_write_page; -extern atomic_t fscache_n_cop_uncache_page; -extern atomic_t fscache_n_cop_dissociate_pages; - -extern atomic_t fscache_n_cache_no_space_reject; -extern atomic_t fscache_n_cache_stale_objects; -extern atomic_t fscache_n_cache_retired_objects; -extern atomic_t fscache_n_cache_culled_objects; +extern atomic_t fscache_n_resizes; +extern atomic_t fscache_n_resizes_null; static inline void fscache_stat(atomic_t *stat) { @@ -278,71 +146,26 @@ int fscache_stats_show(struct seq_file *m, void *v); #endif /* - * raise an event on an object - * - if the event is not masked for that object, then the object is - * queued for attention by the thread pool. - */ -static inline void fscache_raise_event(struct fscache_object *object, - unsigned event) -{ - BUG_ON(event >= NR_FSCACHE_OBJECT_EVENTS); -#if 0 - printk("*** fscache_raise_event(OBJ%d{%lx},%x)\n", - object->debug_id, object->event_mask, (1 << event)); -#endif - if (!test_and_set_bit(event, &object->events) && - test_bit(event, &object->event_mask)) - fscache_enqueue_object(object); -} - -/* - * get an extra reference to a netfs retrieval context + * volume.c */ -static inline -void *fscache_get_context(struct fscache_cookie *cookie, void *context) -{ - if (cookie->def->get_context) - cookie->def->get_context(cookie->netfs_data, context); - return context; -} +extern const struct seq_operations fscache_volumes_seq_ops; -/* - * release a reference to a netfs retrieval context - */ -static inline -void fscache_put_context(struct fscache_cookie *cookie, void *context) -{ - if (cookie->def->put_context) - cookie->def->put_context(cookie->netfs_data, context); -} +struct fscache_volume *fscache_get_volume(struct fscache_volume *volume, + enum fscache_volume_trace where); +void fscache_put_volume(struct fscache_volume *volume, + enum fscache_volume_trace where); +bool fscache_begin_volume_access(struct fscache_volume *volume, + struct fscache_cookie *cookie, + enum fscache_access_trace why); +void fscache_create_volume(struct fscache_volume *volume, bool wait); -/* - * Update the auxiliary data on a cookie. - */ -static inline -void fscache_update_aux(struct fscache_cookie *cookie, const void *aux_data) -{ - void *p; - - if (!aux_data) - return; - if (cookie->aux_len <= sizeof(cookie->inline_aux)) - p = cookie->inline_aux; - else - p = cookie->aux; - - if (memcmp(p, aux_data, cookie->aux_len) != 0) { - memcpy(p, aux_data, cookie->aux_len); - set_bit(FSCACHE_COOKIE_AUX_UPDATED, &cookie->flags); - } -} /*****************************************************************************/ /* * debug tracing */ #define dbgprintk(FMT, ...) \ - printk(KERN_DEBUG "[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__) + printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__) #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__) #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__) @@ -395,7 +218,7 @@ do { \ #define FSCACHE_DEBUG_CACHE 0 #define FSCACHE_DEBUG_COOKIE 1 -#define FSCACHE_DEBUG_PAGE 2 +#define FSCACHE_DEBUG_OBJECT 2 #define FSCACHE_DEBUG_OPERATION 3 #define FSCACHE_POINT_ENTER 1 diff --git a/fs/fscache/io.c b/fs/fscache/io.c index 8ecc1141802f..7a769ea57720 100644 --- a/fs/fscache/io.c +++ b/fs/fscache/io.c @@ -4,113 +4,323 @@ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ - -#define FSCACHE_DEBUG_LEVEL PAGE -#include <linux/module.h> -#define FSCACHE_USE_NEW_IO_API +#define FSCACHE_DEBUG_LEVEL OPERATION #include <linux/fscache-cache.h> +#include <linux/uio.h> +#include <linux/bvec.h> #include <linux/slab.h> -#include <linux/netfs.h> +#include <linux/uio.h> #include "internal.h" -/* - * Start a cache read operation. - * - we return: - * -ENOMEM - out of memory, some pages may be being read - * -ERESTARTSYS - interrupted, some pages may be being read - * -ENOBUFS - no backing object or space available in which to cache any - * pages not being read - * -ENODATA - no data available in the backing object for some or all of - * the pages - * 0 - dispatched a read on all pages +/** + * fscache_wait_for_operation - Wait for an object become accessible + * @cres: The cache resources for the operation being performed + * @want_state: The minimum state the object must be at + * + * See if the target cache object is at the specified minimum state of + * accessibility yet, and if not, wait for it. */ -int __fscache_begin_read_operation(struct netfs_read_request *rreq, - struct fscache_cookie *cookie) +bool fscache_wait_for_operation(struct netfs_cache_resources *cres, + enum fscache_want_state want_state) { - struct fscache_retrieval *op; - struct fscache_object *object; - bool wake_cookie = false; - int ret; + struct fscache_cookie *cookie = fscache_cres_cookie(cres); + enum fscache_cookie_state state; - _enter("rr=%08x", rreq->debug_id); +again: + if (!fscache_cache_is_live(cookie->volume->cache)) { + _leave(" [broken]"); + return false; + } - fscache_stat(&fscache_n_retrievals); + state = fscache_cookie_state(cookie); + _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state); - if (hlist_empty(&cookie->backing_objects)) - goto nobufs; + switch (state) { + case FSCACHE_COOKIE_STATE_CREATING: + case FSCACHE_COOKIE_STATE_INVALIDATING: + if (want_state == FSCACHE_WANT_PARAMS) + goto ready; /* There can be no content */ + fallthrough; + case FSCACHE_COOKIE_STATE_LOOKING_UP: + case FSCACHE_COOKIE_STATE_LRU_DISCARDING: + wait_var_event(&cookie->state, + fscache_cookie_state(cookie) != state); + goto again; - if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { - _leave(" = -ENOBUFS [invalidating]"); - return -ENOBUFS; + case FSCACHE_COOKIE_STATE_ACTIVE: + goto ready; + case FSCACHE_COOKIE_STATE_DROPPED: + case FSCACHE_COOKIE_STATE_RELINQUISHING: + default: + _leave(" [not live]"); + return false; } - ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); +ready: + if (!cres->cache_priv2) + return cookie->volume->cache->ops->begin_operation(cres, want_state); + return true; +} +EXPORT_SYMBOL(fscache_wait_for_operation); + +/* + * Begin an I/O operation on the cache, waiting till we reach the right state. + * + * Attaches the resources required to the operation resources record. + */ +static int fscache_begin_operation(struct netfs_cache_resources *cres, + struct fscache_cookie *cookie, + enum fscache_want_state want_state, + enum fscache_access_trace why) +{ + enum fscache_cookie_state state; + long timeo; + bool once_only = false; - if (fscache_wait_for_deferred_lookup(cookie) < 0) - return -ERESTARTSYS; + cres->ops = NULL; + cres->cache_priv = cookie; + cres->cache_priv2 = NULL; + cres->debug_id = cookie->debug_id; + cres->inval_counter = cookie->inval_counter; - op = fscache_alloc_retrieval(cookie, NULL, NULL, NULL); - if (!op) - return -ENOMEM; - trace_fscache_page_op(cookie, NULL, &op->op, fscache_page_op_retr_multi); + if (!fscache_begin_cookie_access(cookie, why)) + return -ENOBUFS; +again: spin_lock(&cookie->lock); - if (!fscache_cookie_enabled(cookie) || - hlist_empty(&cookie->backing_objects)) - goto nobufs_unlock; - object = hlist_entry(cookie->backing_objects.first, - struct fscache_object, cookie_link); + state = fscache_cookie_state(cookie); + _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state); - __fscache_use_cookie(cookie); - atomic_inc(&object->n_reads); - __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); + switch (state) { + case FSCACHE_COOKIE_STATE_LOOKING_UP: + case FSCACHE_COOKIE_STATE_LRU_DISCARDING: + case FSCACHE_COOKIE_STATE_INVALIDATING: + goto wait_for_file_wrangling; + case FSCACHE_COOKIE_STATE_CREATING: + if (want_state == FSCACHE_WANT_PARAMS) + goto ready; /* There can be no content */ + goto wait_for_file_wrangling; + case FSCACHE_COOKIE_STATE_ACTIVE: + goto ready; + case FSCACHE_COOKIE_STATE_DROPPED: + case FSCACHE_COOKIE_STATE_RELINQUISHING: + WARN(1, "Can't use cookie in state %u\n", cookie->state); + goto not_live; + default: + goto not_live; + } - if (fscache_submit_op(object, &op->op) < 0) - goto nobufs_unlock_dec; +ready: spin_unlock(&cookie->lock); + if (!cookie->volume->cache->ops->begin_operation(cres, want_state)) + goto failed; + return 0; - fscache_stat(&fscache_n_retrieval_ops); +wait_for_file_wrangling: + spin_unlock(&cookie->lock); + trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref), + atomic_read(&cookie->n_accesses), + fscache_access_io_wait); + timeo = wait_var_event_timeout(&cookie->state, + fscache_cookie_state(cookie) != state, 20 * HZ); + if (timeo <= 1 && !once_only) { + pr_warn("%s: cookie state change wait timed out: cookie->state=%u state=%u", + __func__, fscache_cookie_state(cookie), state); + fscache_print_cookie(cookie, 'O'); + once_only = true; + } + goto again; - /* we wait for the operation to become active, and then process it - * *here*, in this thread, and not in the thread pool */ - ret = fscache_wait_for_operation_activation( - object, &op->op, - __fscache_stat(&fscache_n_retrieval_op_waits), - __fscache_stat(&fscache_n_retrievals_object_dead)); - if (ret < 0) - goto error; - - /* ask the cache to honour the operation */ - ret = object->cache->ops->begin_read_operation(rreq, op); - -error: - if (ret == -ENOMEM) - fscache_stat(&fscache_n_retrievals_nomem); - else if (ret == -ERESTARTSYS) - fscache_stat(&fscache_n_retrievals_intr); - else if (ret == -ENODATA) - fscache_stat(&fscache_n_retrievals_nodata); - else if (ret < 0) - fscache_stat(&fscache_n_retrievals_nobufs); - else - fscache_stat(&fscache_n_retrievals_ok); - - fscache_put_retrieval(op); - _leave(" = %d", ret); - return ret; - -nobufs_unlock_dec: - atomic_dec(&object->n_reads); - wake_cookie = __fscache_unuse_cookie(cookie); -nobufs_unlock: +not_live: spin_unlock(&cookie->lock); - fscache_put_retrieval(op); - if (wake_cookie) - __fscache_wake_unused_cookie(cookie); -nobufs: - fscache_stat(&fscache_n_retrievals_nobufs); +failed: + cres->cache_priv = NULL; + cres->ops = NULL; + fscache_end_cookie_access(cookie, fscache_access_io_not_live); _leave(" = -ENOBUFS"); return -ENOBUFS; } + +int __fscache_begin_read_operation(struct netfs_cache_resources *cres, + struct fscache_cookie *cookie) +{ + return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS, + fscache_access_io_read); +} EXPORT_SYMBOL(__fscache_begin_read_operation); + +int __fscache_begin_write_operation(struct netfs_cache_resources *cres, + struct fscache_cookie *cookie) +{ + return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS, + fscache_access_io_write); +} +EXPORT_SYMBOL(__fscache_begin_write_operation); + +/** + * fscache_set_page_dirty - Mark page dirty and pin a cache object for writeback + * @page: The page being dirtied + * @cookie: The cookie referring to the cache object + * + * Set the dirty flag on a page and pin an in-use cache object in memory when + * dirtying a page so that writeback can later write to it. This is intended + * to be called from the filesystem's ->set_page_dirty() method. + * + * Returns 1 if PG_dirty was set on the page, 0 otherwise. + */ +int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie) +{ + struct inode *inode = page->mapping->host; + bool need_use = false; + + _enter(""); + + if (!__set_page_dirty_nobuffers(page)) + return 0; + if (!fscache_cookie_valid(cookie)) + return 1; + + if (!(inode->i_state & I_PINNING_FSCACHE_WB)) { + spin_lock(&inode->i_lock); + if (!(inode->i_state & I_PINNING_FSCACHE_WB)) { + inode->i_state |= I_PINNING_FSCACHE_WB; + need_use = true; + } + spin_unlock(&inode->i_lock); + + if (need_use) + fscache_use_cookie(cookie, true); + } + return 1; +} +EXPORT_SYMBOL(fscache_set_page_dirty); + +struct fscache_write_request { + struct netfs_cache_resources cache_resources; + struct address_space *mapping; + loff_t start; + size_t len; + bool set_bits; + netfs_io_terminated_t term_func; + void *term_func_priv; +}; + +void __fscache_clear_page_bits(struct address_space *mapping, + loff_t start, size_t len) +{ + pgoff_t first = start / PAGE_SIZE; + pgoff_t last = (start + len - 1) / PAGE_SIZE; + struct page *page; + + if (len) { + XA_STATE(xas, &mapping->i_pages, first); + + rcu_read_lock(); + xas_for_each(&xas, page, last) { + end_page_fscache(page); + } + rcu_read_unlock(); + } +} +EXPORT_SYMBOL(__fscache_clear_page_bits); + +/* + * Deal with the completion of writing the data to the cache. + */ +static void fscache_wreq_done(void *priv, ssize_t transferred_or_error, + bool was_async) +{ + struct fscache_write_request *wreq = priv; + + fscache_clear_page_bits(fscache_cres_cookie(&wreq->cache_resources), + wreq->mapping, wreq->start, wreq->len, + wreq->set_bits); + + if (wreq->term_func) + wreq->term_func(wreq->term_func_priv, transferred_or_error, + was_async); + fscache_end_operation(&wreq->cache_resources); + kfree(wreq); +} + +void __fscache_write_to_cache(struct fscache_cookie *cookie, + struct address_space *mapping, + loff_t start, size_t len, loff_t i_size, + netfs_io_terminated_t term_func, + void *term_func_priv, + bool cond) +{ + struct fscache_write_request *wreq; + struct netfs_cache_resources *cres; + struct iov_iter iter; + int ret = -ENOBUFS; + + if (len == 0) + goto abandon; + + _enter("%llx,%zx", start, len); + + wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS); + if (!wreq) + goto abandon; + wreq->mapping = mapping; + wreq->start = start; + wreq->len = len; + wreq->set_bits = cond; + wreq->term_func = term_func; + wreq->term_func_priv = term_func_priv; + + cres = &wreq->cache_resources; + if (fscache_begin_operation(cres, cookie, FSCACHE_WANT_WRITE, + fscache_access_io_write) < 0) + goto abandon_free; + + ret = cres->ops->prepare_write(cres, &start, &len, i_size, false); + if (ret < 0) + goto abandon_end; + + /* TODO: Consider clearing page bits now for space the write isn't + * covering. This is more complicated than it appears when THPs are + * taken into account. + */ + + iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len); + fscache_write(cres, start, &iter, fscache_wreq_done, wreq); + return; + +abandon_end: + return fscache_wreq_done(wreq, ret, false); +abandon_free: + kfree(wreq); +abandon: + fscache_clear_page_bits(cookie, mapping, start, len, cond); + if (term_func) + term_func(term_func_priv, ret, false); +} +EXPORT_SYMBOL(__fscache_write_to_cache); + +/* + * Change the size of a backing object. + */ +void __fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size) +{ + struct netfs_cache_resources cres; + + trace_fscache_resize(cookie, new_size); + if (fscache_begin_operation(&cres, cookie, FSCACHE_WANT_WRITE, + fscache_access_io_resize) == 0) { + fscache_stat(&fscache_n_resizes); + set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags); + + /* We cannot defer a resize as we need to do it inside the + * netfs's inode lock so that we're serialised with respect to + * writes. + */ + cookie->volume->cache->ops->resize_cookie(&cres, new_size); + fscache_end_operation(&cres); + } else { + fscache_stat(&fscache_n_resizes_null); + } +} +EXPORT_SYMBOL(__fscache_resize_cookie); diff --git a/fs/fscache/main.c b/fs/fscache/main.c index 4207f98e405f..dad85fd84f6f 100644 --- a/fs/fscache/main.c +++ b/fs/fscache/main.c @@ -1,17 +1,13 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* General filesystem local caching manager * - * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #define FSCACHE_DEBUG_LEVEL CACHE #include <linux/module.h> #include <linux/init.h> -#include <linux/sched.h> -#include <linux/completion.h> -#include <linux/slab.h> -#include <linux/seq_file.h> #define CREATE_TRACE_POINTS #include "internal.h" @@ -19,79 +15,18 @@ MODULE_DESCRIPTION("FS Cache Manager"); MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); -unsigned fscache_defer_lookup = 1; -module_param_named(defer_lookup, fscache_defer_lookup, uint, - S_IWUSR | S_IRUGO); -MODULE_PARM_DESC(fscache_defer_lookup, - "Defer cookie lookup to background thread"); - -unsigned fscache_defer_create = 1; -module_param_named(defer_create, fscache_defer_create, uint, - S_IWUSR | S_IRUGO); -MODULE_PARM_DESC(fscache_defer_create, - "Defer cookie creation to background thread"); - unsigned fscache_debug; module_param_named(debug, fscache_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(fscache_debug, "FS-Cache debugging mask"); -struct kobject *fscache_root; -struct workqueue_struct *fscache_object_wq; -struct workqueue_struct *fscache_op_wq; - -DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait); +EXPORT_TRACEPOINT_SYMBOL(fscache_access_cache); +EXPORT_TRACEPOINT_SYMBOL(fscache_access_volume); +EXPORT_TRACEPOINT_SYMBOL(fscache_access); -/* these values serve as lower bounds, will be adjusted in fscache_init() */ -static unsigned fscache_object_max_active = 4; -static unsigned fscache_op_max_active = 2; - -#ifdef CONFIG_SYSCTL -static struct ctl_table_header *fscache_sysctl_header; - -static int fscache_max_active_sysctl(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos) -{ - struct workqueue_struct **wqp = table->extra1; - unsigned int *datap = table->data; - int ret; - - ret = proc_dointvec(table, write, buffer, lenp, ppos); - if (ret == 0) - workqueue_set_max_active(*wqp, *datap); - return ret; -} - -static struct ctl_table fscache_sysctls[] = { - { - .procname = "object_max_active", - .data = &fscache_object_max_active, - .maxlen = sizeof(unsigned), - .mode = 0644, - .proc_handler = fscache_max_active_sysctl, - .extra1 = &fscache_object_wq, - }, - { - .procname = "operation_max_active", - .data = &fscache_op_max_active, - .maxlen = sizeof(unsigned), - .mode = 0644, - .proc_handler = fscache_max_active_sysctl, - .extra1 = &fscache_op_wq, - }, - {} -}; - -static struct ctl_table fscache_sysctls_root[] = { - { - .procname = "fscache", - .mode = 0555, - .child = fscache_sysctls, - }, - {} -}; -#endif +struct workqueue_struct *fscache_wq; +EXPORT_SYMBOL(fscache_wq); /* * Mixing scores (in bits) for (7,20): @@ -118,15 +53,16 @@ static inline unsigned int fold_hash(unsigned long x, unsigned long y) /* * Generate a hash. This is derived from full_name_hash(), but we want to be * sure it is arch independent and that it doesn't change as bits of the - * computed hash value might appear on disk. The caller also guarantees that - * the hashed data will be a series of aligned 32-bit words. + * computed hash value might appear on disk. The caller must guarantee that + * the source data is a multiple of four bytes in size. */ -unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n) +unsigned int fscache_hash(unsigned int salt, const void *data, size_t len) { - unsigned int a, x = 0, y = salt; + const __le32 *p = data; + unsigned int a, x = 0, y = salt, n = len / sizeof(__le32); for (; n; n--) { - a = *data++; + a = le32_to_cpu(*p++); HASH_MIX(x, y, a); } return fold_hash(x, y); @@ -137,44 +73,16 @@ unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n) */ static int __init fscache_init(void) { - unsigned int nr_cpus = num_possible_cpus(); - unsigned int cpu; - int ret; - - fscache_object_max_active = - clamp_val(nr_cpus, - fscache_object_max_active, WQ_UNBOUND_MAX_ACTIVE); - - ret = -ENOMEM; - fscache_object_wq = alloc_workqueue("fscache_object", WQ_UNBOUND, - fscache_object_max_active); - if (!fscache_object_wq) - goto error_object_wq; - - fscache_op_max_active = - clamp_val(fscache_object_max_active / 2, - fscache_op_max_active, WQ_UNBOUND_MAX_ACTIVE); + int ret = -ENOMEM; - ret = -ENOMEM; - fscache_op_wq = alloc_workqueue("fscache_operation", WQ_UNBOUND, - fscache_op_max_active); - if (!fscache_op_wq) - goto error_op_wq; - - for_each_possible_cpu(cpu) - init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu)); + fscache_wq = alloc_workqueue("fscache", WQ_UNBOUND | WQ_FREEZABLE, 0); + if (!fscache_wq) + goto error_wq; ret = fscache_proc_init(); if (ret < 0) goto error_proc; -#ifdef CONFIG_SYSCTL - ret = -ENOMEM; - fscache_sysctl_header = register_sysctl_table(fscache_sysctls_root); - if (!fscache_sysctl_header) - goto error_sysctl; -#endif - fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar", sizeof(struct fscache_cookie), 0, 0, NULL); @@ -184,26 +92,14 @@ static int __init fscache_init(void) goto error_cookie_jar; } - fscache_root = kobject_create_and_add("fscache", kernel_kobj); - if (!fscache_root) - goto error_kobj; - pr_notice("Loaded\n"); return 0; -error_kobj: - kmem_cache_destroy(fscache_cookie_jar); error_cookie_jar: -#ifdef CONFIG_SYSCTL - unregister_sysctl_table(fscache_sysctl_header); -error_sysctl: -#endif fscache_proc_cleanup(); error_proc: - destroy_workqueue(fscache_op_wq); -error_op_wq: - destroy_workqueue(fscache_object_wq); -error_object_wq: + destroy_workqueue(fscache_wq); +error_wq: return ret; } @@ -216,14 +112,9 @@ static void __exit fscache_exit(void) { _enter(""); - kobject_put(fscache_root); kmem_cache_destroy(fscache_cookie_jar); -#ifdef CONFIG_SYSCTL - unregister_sysctl_table(fscache_sysctl_header); -#endif fscache_proc_cleanup(); - destroy_workqueue(fscache_op_wq); - destroy_workqueue(fscache_object_wq); + destroy_workqueue(fscache_wq); pr_notice("Unloaded\n"); } diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c deleted file mode 100644 index d6bdb7b5e723..000000000000 --- a/fs/fscache/netfs.c +++ /dev/null @@ -1,74 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* FS-Cache netfs (client) registration - * - * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - */ - -#define FSCACHE_DEBUG_LEVEL COOKIE -#include <linux/module.h> -#include <linux/slab.h> -#include "internal.h" - -/* - * register a network filesystem for caching - */ -int __fscache_register_netfs(struct fscache_netfs *netfs) -{ - struct fscache_cookie *candidate, *cookie; - - _enter("{%s}", netfs->name); - - /* allocate a cookie for the primary index */ - candidate = fscache_alloc_cookie(&fscache_fsdef_index, - &fscache_fsdef_netfs_def, - netfs->name, strlen(netfs->name), - &netfs->version, sizeof(netfs->version), - netfs, 0); - if (!candidate) { - _leave(" = -ENOMEM"); - return -ENOMEM; - } - - candidate->flags = 1 << FSCACHE_COOKIE_ENABLED; - - /* check the netfs type is not already present */ - cookie = fscache_hash_cookie(candidate); - if (!cookie) - goto already_registered; - if (cookie != candidate) { - trace_fscache_cookie(candidate->debug_id, 1, fscache_cookie_discard); - fscache_free_cookie(candidate); - } - - fscache_cookie_get(cookie->parent, fscache_cookie_get_register_netfs); - atomic_inc(&cookie->parent->n_children); - - netfs->primary_index = cookie; - - pr_notice("Netfs '%s' registered for caching\n", netfs->name); - trace_fscache_netfs(netfs); - _leave(" = 0"); - return 0; - -already_registered: - fscache_cookie_put(candidate, fscache_cookie_put_dup_netfs); - _leave(" = -EEXIST"); - return -EEXIST; -} -EXPORT_SYMBOL(__fscache_register_netfs); - -/* - * unregister a network filesystem from the cache - * - all cookies must have been released first - */ -void __fscache_unregister_netfs(struct fscache_netfs *netfs) -{ - _enter("{%s.%u}", netfs->name, netfs->version); - - fscache_relinquish_cookie(netfs->primary_index, NULL, false); - pr_notice("Netfs '%s' unregistered from caching\n", netfs->name); - - _leave(""); -} -EXPORT_SYMBOL(__fscache_unregister_netfs); diff --git a/fs/fscache/object.c b/fs/fscache/object.c deleted file mode 100644 index 6a675652129b..000000000000 --- a/fs/fscache/object.c +++ /dev/null @@ -1,1125 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* FS-Cache object state machine handler - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * See Documentation/filesystems/caching/object.rst for a description of the - * object state machine and the in-kernel representations. - */ - -#define FSCACHE_DEBUG_LEVEL COOKIE -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/prefetch.h> -#include "internal.h" - -static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int); -static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int); -static const struct fscache_state *fscache_drop_object(struct fscache_object *, int); -static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int); -static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int); -static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int); -static const struct fscache_state *fscache_kill_object(struct fscache_object *, int); -static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int); -static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int); -static const struct fscache_state *fscache_object_available(struct fscache_object *, int); -static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int); -static const struct fscache_state *fscache_update_object(struct fscache_object *, int); -static const struct fscache_state *fscache_object_dead(struct fscache_object *, int); - -#define __STATE_NAME(n) fscache_osm_##n -#define STATE(n) (&__STATE_NAME(n)) - -/* - * Define a work state. Work states are execution states. No event processing - * is performed by them. The function attached to a work state returns a - * pointer indicating the next state to which the state machine should - * transition. Returning NO_TRANSIT repeats the current state, but goes back - * to the scheduler first. - */ -#define WORK_STATE(n, sn, f) \ - const struct fscache_state __STATE_NAME(n) = { \ - .name = #n, \ - .short_name = sn, \ - .work = f \ - } - -/* - * Returns from work states. - */ -#define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); }) - -#define NO_TRANSIT ((struct fscache_state *)NULL) - -/* - * Define a wait state. Wait states are event processing states. No execution - * is performed by them. Wait states are just tables of "if event X occurs, - * clear it and transition to state Y". The dispatcher returns to the - * scheduler if none of the events in which the wait state has an interest are - * currently pending. - */ -#define WAIT_STATE(n, sn, ...) \ - const struct fscache_state __STATE_NAME(n) = { \ - .name = #n, \ - .short_name = sn, \ - .work = NULL, \ - .transitions = { __VA_ARGS__, { 0, NULL } } \ - } - -#define TRANSIT_TO(state, emask) \ - { .events = (emask), .transit_to = STATE(state) } - -/* - * The object state machine. - */ -static WORK_STATE(INIT_OBJECT, "INIT", fscache_initialise_object); -static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready); -static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation); -static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object); -static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available); -static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents); - -static WORK_STATE(INVALIDATE_OBJECT, "INVL", fscache_invalidate_object); -static WORK_STATE(UPDATE_OBJECT, "UPDT", fscache_update_object); - -static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure); -static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object); -static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents); -static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object); -static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead); - -static WAIT_STATE(WAIT_FOR_INIT, "?INI", - TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); - -static WAIT_STATE(WAIT_FOR_PARENT, "?PRN", - TRANSIT_TO(PARENT_READY, 1 << FSCACHE_OBJECT_EV_PARENT_READY)); - -static WAIT_STATE(WAIT_FOR_CMD, "?CMD", - TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE), - TRANSIT_TO(UPDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_UPDATE), - TRANSIT_TO(JUMPSTART_DEPS, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); - -static WAIT_STATE(WAIT_FOR_CLEARANCE, "?CLR", - TRANSIT_TO(KILL_OBJECT, 1 << FSCACHE_OBJECT_EV_CLEARED)); - -/* - * Out-of-band event transition tables. These are for handling unexpected - * events, such as an I/O error. If an OOB event occurs, the state machine - * clears and disables the event and forces a transition to the nominated work - * state (acurrently executing work states will complete first). - * - * In such a situation, object->state remembers the state the machine should - * have been in/gone to and returning NO_TRANSIT returns to that. - */ -static const struct fscache_transition fscache_osm_init_oob[] = { - TRANSIT_TO(ABORT_INIT, - (1 << FSCACHE_OBJECT_EV_ERROR) | - (1 << FSCACHE_OBJECT_EV_KILL)), - { 0, NULL } -}; - -static const struct fscache_transition fscache_osm_lookup_oob[] = { - TRANSIT_TO(LOOKUP_FAILURE, - (1 << FSCACHE_OBJECT_EV_ERROR) | - (1 << FSCACHE_OBJECT_EV_KILL)), - { 0, NULL } -}; - -static const struct fscache_transition fscache_osm_run_oob[] = { - TRANSIT_TO(KILL_OBJECT, - (1 << FSCACHE_OBJECT_EV_ERROR) | - (1 << FSCACHE_OBJECT_EV_KILL)), - { 0, NULL } -}; - -static int fscache_get_object(struct fscache_object *, - enum fscache_obj_ref_trace); -static void fscache_put_object(struct fscache_object *, - enum fscache_obj_ref_trace); -static bool fscache_enqueue_dependents(struct fscache_object *, int); -static void fscache_dequeue_object(struct fscache_object *); -static void fscache_update_aux_data(struct fscache_object *); - -/* - * we need to notify the parent when an op completes that we had outstanding - * upon it - */ -static inline void fscache_done_parent_op(struct fscache_object *object) -{ - struct fscache_object *parent = object->parent; - - _enter("OBJ%x {OBJ%x,%x}", - object->debug_id, parent->debug_id, parent->n_ops); - - spin_lock_nested(&parent->lock, 1); - parent->n_obj_ops--; - parent->n_ops--; - if (parent->n_ops == 0) - fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED); - spin_unlock(&parent->lock); -} - -/* - * Object state machine dispatcher. - */ -static void fscache_object_sm_dispatcher(struct fscache_object *object) -{ - const struct fscache_transition *t; - const struct fscache_state *state, *new_state; - unsigned long events, event_mask; - bool oob; - int event = -1; - - ASSERT(object != NULL); - - _enter("{OBJ%x,%s,%lx}", - object->debug_id, object->state->name, object->events); - - event_mask = object->event_mask; -restart: - object->event_mask = 0; /* Mask normal event handling */ - state = object->state; -restart_masked: - events = object->events; - - /* Handle any out-of-band events (typically an error) */ - if (events & object->oob_event_mask) { - _debug("{OBJ%x} oob %lx", - object->debug_id, events & object->oob_event_mask); - oob = true; - for (t = object->oob_table; t->events; t++) { - if (events & t->events) { - state = t->transit_to; - ASSERT(state->work != NULL); - event = fls(events & t->events) - 1; - __clear_bit(event, &object->oob_event_mask); - clear_bit(event, &object->events); - goto execute_work_state; - } - } - } - oob = false; - - /* Wait states are just transition tables */ - if (!state->work) { - if (events & event_mask) { - for (t = state->transitions; t->events; t++) { - if (events & t->events) { - new_state = t->transit_to; - event = fls(events & t->events) - 1; - trace_fscache_osm(object, state, - true, false, event); - clear_bit(event, &object->events); - _debug("{OBJ%x} ev %d: %s -> %s", - object->debug_id, event, - state->name, new_state->name); - object->state = state = new_state; - goto execute_work_state; - } - } - - /* The event mask didn't include all the tabled bits */ - BUG(); - } - /* Randomly woke up */ - goto unmask_events; - } - -execute_work_state: - _debug("{OBJ%x} exec %s", object->debug_id, state->name); - - trace_fscache_osm(object, state, false, oob, event); - new_state = state->work(object, event); - event = -1; - if (new_state == NO_TRANSIT) { - _debug("{OBJ%x} %s notrans", object->debug_id, state->name); - if (unlikely(state == STATE(OBJECT_DEAD))) { - _leave(" [dead]"); - return; - } - fscache_enqueue_object(object); - event_mask = object->oob_event_mask; - goto unmask_events; - } - - _debug("{OBJ%x} %s -> %s", - object->debug_id, state->name, new_state->name); - object->state = state = new_state; - - if (state->work) { - if (unlikely(state == STATE(OBJECT_DEAD))) { - _leave(" [dead]"); - return; - } - goto restart_masked; - } - - /* Transited to wait state */ - event_mask = object->oob_event_mask; - for (t = state->transitions; t->events; t++) - event_mask |= t->events; - -unmask_events: - object->event_mask = event_mask; - smp_mb(); - events = object->events; - if (events & event_mask) - goto restart; - _leave(" [msk %lx]", event_mask); -} - -/* - * execute an object - */ -static void fscache_object_work_func(struct work_struct *work) -{ - struct fscache_object *object = - container_of(work, struct fscache_object, work); - - _enter("{OBJ%x}", object->debug_id); - - fscache_object_sm_dispatcher(object); - fscache_put_object(object, fscache_obj_put_work); -} - -/** - * fscache_object_init - Initialise a cache object description - * @object: Object description - * @cookie: Cookie object will be attached to - * @cache: Cache in which backing object will be found - * - * Initialise a cache object description to its basic values. - * - * See Documentation/filesystems/caching/backend-api.rst for a complete - * description. - */ -void fscache_object_init(struct fscache_object *object, - struct fscache_cookie *cookie, - struct fscache_cache *cache) -{ - const struct fscache_transition *t; - - atomic_inc(&cache->object_count); - - object->state = STATE(WAIT_FOR_INIT); - object->oob_table = fscache_osm_init_oob; - object->flags = 1 << FSCACHE_OBJECT_IS_LIVE; - spin_lock_init(&object->lock); - INIT_LIST_HEAD(&object->cache_link); - INIT_HLIST_NODE(&object->cookie_link); - INIT_WORK(&object->work, fscache_object_work_func); - INIT_LIST_HEAD(&object->dependents); - INIT_LIST_HEAD(&object->dep_link); - INIT_LIST_HEAD(&object->pending_ops); - object->n_children = 0; - object->n_ops = object->n_in_progress = object->n_exclusive = 0; - object->events = 0; - object->store_limit = 0; - object->store_limit_l = 0; - object->cache = cache; - object->cookie = cookie; - fscache_cookie_get(cookie, fscache_cookie_get_attach_object); - object->parent = NULL; -#ifdef CONFIG_FSCACHE_OBJECT_LIST - RB_CLEAR_NODE(&object->objlist_link); -#endif - - object->oob_event_mask = 0; - for (t = object->oob_table; t->events; t++) - object->oob_event_mask |= t->events; - object->event_mask = object->oob_event_mask; - for (t = object->state->transitions; t->events; t++) - object->event_mask |= t->events; -} -EXPORT_SYMBOL(fscache_object_init); - -/* - * Mark the object as no longer being live, making sure that we synchronise - * against op submission. - */ -static inline void fscache_mark_object_dead(struct fscache_object *object) -{ - spin_lock(&object->lock); - clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); - spin_unlock(&object->lock); -} - -/* - * Abort object initialisation before we start it. - */ -static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object, - int event) -{ - _enter("{OBJ%x},%d", object->debug_id, event); - - object->oob_event_mask = 0; - fscache_dequeue_object(object); - return transit_to(KILL_OBJECT); -} - -/* - * initialise an object - * - check the specified object's parent to see if we can make use of it - * immediately to do a creation - * - we may need to start the process of creating a parent and we need to wait - * for the parent's lookup and creation to complete if it's not there yet - */ -static const struct fscache_state *fscache_initialise_object(struct fscache_object *object, - int event) -{ - struct fscache_object *parent; - bool success; - - _enter("{OBJ%x},%d", object->debug_id, event); - - ASSERT(list_empty(&object->dep_link)); - - parent = object->parent; - if (!parent) { - _leave(" [no parent]"); - return transit_to(DROP_OBJECT); - } - - _debug("parent: %s of:%lx", parent->state->name, parent->flags); - - if (fscache_object_is_dying(parent)) { - _leave(" [bad parent]"); - return transit_to(DROP_OBJECT); - } - - if (fscache_object_is_available(parent)) { - _leave(" [ready]"); - return transit_to(PARENT_READY); - } - - _debug("wait"); - - spin_lock(&parent->lock); - fscache_stat(&fscache_n_cop_grab_object); - success = false; - if (fscache_object_is_live(parent) && - object->cache->ops->grab_object(object, fscache_obj_get_add_to_deps)) { - list_add(&object->dep_link, &parent->dependents); - success = true; - } - fscache_stat_d(&fscache_n_cop_grab_object); - spin_unlock(&parent->lock); - if (!success) { - _leave(" [grab failed]"); - return transit_to(DROP_OBJECT); - } - - /* fscache_acquire_non_index_cookie() uses this - * to wake the chain up */ - fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD); - _leave(" [wait]"); - return transit_to(WAIT_FOR_PARENT); -} - -/* - * Once the parent object is ready, we should kick off our lookup op. - */ -static const struct fscache_state *fscache_parent_ready(struct fscache_object *object, - int event) -{ - struct fscache_object *parent = object->parent; - - _enter("{OBJ%x},%d", object->debug_id, event); - - ASSERT(parent != NULL); - - spin_lock(&parent->lock); - parent->n_ops++; - parent->n_obj_ops++; - spin_unlock(&parent->lock); - - _leave(""); - return transit_to(LOOK_UP_OBJECT); -} - -/* - * look an object up in the cache from which it was allocated - * - we hold an "access lock" on the parent object, so the parent object cannot - * be withdrawn by either party till we've finished - */ -static const struct fscache_state *fscache_look_up_object(struct fscache_object *object, - int event) -{ - struct fscache_cookie *cookie = object->cookie; - struct fscache_object *parent = object->parent; - int ret; - - _enter("{OBJ%x},%d", object->debug_id, event); - - object->oob_table = fscache_osm_lookup_oob; - - ASSERT(parent != NULL); - ASSERTCMP(parent->n_ops, >, 0); - ASSERTCMP(parent->n_obj_ops, >, 0); - - /* make sure the parent is still available */ - ASSERT(fscache_object_is_available(parent)); - - if (fscache_object_is_dying(parent) || - test_bit(FSCACHE_IOERROR, &object->cache->flags) || - !fscache_use_cookie(object)) { - _leave(" [unavailable]"); - return transit_to(LOOKUP_FAILURE); - } - - _debug("LOOKUP \"%s\" in \"%s\"", - cookie->def->name, object->cache->tag->name); - - fscache_stat(&fscache_n_object_lookups); - fscache_stat(&fscache_n_cop_lookup_object); - ret = object->cache->ops->lookup_object(object); - fscache_stat_d(&fscache_n_cop_lookup_object); - - fscache_unuse_cookie(object); - - if (ret == -ETIMEDOUT) { - /* probably stuck behind another object, so move this one to - * the back of the queue */ - fscache_stat(&fscache_n_object_lookups_timed_out); - _leave(" [timeout]"); - return NO_TRANSIT; - } - - if (ret < 0) { - _leave(" [error]"); - return transit_to(LOOKUP_FAILURE); - } - - _leave(" [ok]"); - return transit_to(OBJECT_AVAILABLE); -} - -/** - * fscache_object_lookup_negative - Note negative cookie lookup - * @object: Object pointing to cookie to mark - * - * Note negative lookup, permitting those waiting to read data from an already - * existing backing object to continue as there's no data for them to read. - */ -void fscache_object_lookup_negative(struct fscache_object *object) -{ - struct fscache_cookie *cookie = object->cookie; - - _enter("{OBJ%x,%s}", object->debug_id, object->state->name); - - if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { - fscache_stat(&fscache_n_object_lookups_negative); - - /* Allow write requests to begin stacking up and read requests to begin - * returning ENODATA. - */ - set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); - clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); - - clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); - wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); - } - _leave(""); -} -EXPORT_SYMBOL(fscache_object_lookup_negative); - -/** - * fscache_obtained_object - Note successful object lookup or creation - * @object: Object pointing to cookie to mark - * - * Note successful lookup and/or creation, permitting those waiting to write - * data to a backing object to continue. - * - * Note that after calling this, an object's cookie may be relinquished by the - * netfs, and so must be accessed with object lock held. - */ -void fscache_obtained_object(struct fscache_object *object) -{ - struct fscache_cookie *cookie = object->cookie; - - _enter("{OBJ%x,%s}", object->debug_id, object->state->name); - - /* if we were still looking up, then we must have a positive lookup - * result, in which case there may be data available */ - if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { - fscache_stat(&fscache_n_object_lookups_positive); - - /* We do (presumably) have data */ - clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); - clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); - - /* Allow write requests to begin stacking up and read requests - * to begin shovelling data. - */ - clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); - wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); - } else { - fscache_stat(&fscache_n_object_created); - } - - set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags); - _leave(""); -} -EXPORT_SYMBOL(fscache_obtained_object); - -/* - * handle an object that has just become available - */ -static const struct fscache_state *fscache_object_available(struct fscache_object *object, - int event) -{ - _enter("{OBJ%x},%d", object->debug_id, event); - - object->oob_table = fscache_osm_run_oob; - - spin_lock(&object->lock); - - fscache_done_parent_op(object); - if (object->n_in_progress == 0) { - if (object->n_ops > 0) { - ASSERTCMP(object->n_ops, >=, object->n_obj_ops); - fscache_start_operations(object); - } else { - ASSERT(list_empty(&object->pending_ops)); - } - } - spin_unlock(&object->lock); - - fscache_stat(&fscache_n_cop_lookup_complete); - object->cache->ops->lookup_complete(object); - fscache_stat_d(&fscache_n_cop_lookup_complete); - - fscache_stat(&fscache_n_object_avail); - - _leave(""); - return transit_to(JUMPSTART_DEPS); -} - -/* - * Wake up this object's dependent objects now that we've become available. - */ -static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object, - int event) -{ - _enter("{OBJ%x},%d", object->debug_id, event); - - if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY)) - return NO_TRANSIT; /* Not finished; requeue */ - return transit_to(WAIT_FOR_CMD); -} - -/* - * Handle lookup or creation failute. - */ -static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object, - int event) -{ - struct fscache_cookie *cookie; - - _enter("{OBJ%x},%d", object->debug_id, event); - - object->oob_event_mask = 0; - - fscache_stat(&fscache_n_cop_lookup_complete); - object->cache->ops->lookup_complete(object); - fscache_stat_d(&fscache_n_cop_lookup_complete); - - set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags); - - cookie = object->cookie; - set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); - if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) - wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); - - fscache_done_parent_op(object); - return transit_to(KILL_OBJECT); -} - -/* - * Wait for completion of all active operations on this object and the death of - * all child objects of this object. - */ -static const struct fscache_state *fscache_kill_object(struct fscache_object *object, - int event) -{ - _enter("{OBJ%x,%d,%d},%d", - object->debug_id, object->n_ops, object->n_children, event); - - fscache_mark_object_dead(object); - object->oob_event_mask = 0; - - if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) { - /* Reject any new read/write ops and abort any that are pending. */ - clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); - fscache_cancel_all_ops(object); - } - - if (list_empty(&object->dependents) && - object->n_ops == 0 && - object->n_children == 0) - return transit_to(DROP_OBJECT); - - if (object->n_in_progress == 0) { - spin_lock(&object->lock); - if (object->n_ops > 0 && object->n_in_progress == 0) - fscache_start_operations(object); - spin_unlock(&object->lock); - } - - if (!list_empty(&object->dependents)) - return transit_to(KILL_DEPENDENTS); - - return transit_to(WAIT_FOR_CLEARANCE); -} - -/* - * Kill dependent objects. - */ -static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object, - int event) -{ - _enter("{OBJ%x},%d", object->debug_id, event); - - if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL)) - return NO_TRANSIT; /* Not finished */ - return transit_to(WAIT_FOR_CLEARANCE); -} - -/* - * Drop an object's attachments - */ -static const struct fscache_state *fscache_drop_object(struct fscache_object *object, - int event) -{ - struct fscache_object *parent = object->parent; - struct fscache_cookie *cookie = object->cookie; - struct fscache_cache *cache = object->cache; - bool awaken = false; - - _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event); - - ASSERT(cookie != NULL); - ASSERT(!hlist_unhashed(&object->cookie_link)); - - if (test_bit(FSCACHE_COOKIE_AUX_UPDATED, &cookie->flags)) { - _debug("final update"); - fscache_update_aux_data(object); - } - - /* Make sure the cookie no longer points here and that the netfs isn't - * waiting for us. - */ - spin_lock(&cookie->lock); - hlist_del_init(&object->cookie_link); - if (hlist_empty(&cookie->backing_objects) && - test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) - awaken = true; - spin_unlock(&cookie->lock); - - if (awaken) - wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING); - if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) - wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); - - - /* Prevent a race with our last child, which has to signal EV_CLEARED - * before dropping our spinlock. - */ - spin_lock(&object->lock); - spin_unlock(&object->lock); - - /* Discard from the cache's collection of objects */ - spin_lock(&cache->object_list_lock); - list_del_init(&object->cache_link); - spin_unlock(&cache->object_list_lock); - - fscache_stat(&fscache_n_cop_drop_object); - cache->ops->drop_object(object); - fscache_stat_d(&fscache_n_cop_drop_object); - - /* The parent object wants to know when all it dependents have gone */ - if (parent) { - _debug("release parent OBJ%x {%d}", - parent->debug_id, parent->n_children); - - spin_lock(&parent->lock); - parent->n_children--; - if (parent->n_children == 0) - fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED); - spin_unlock(&parent->lock); - object->parent = NULL; - } - - /* this just shifts the object release to the work processor */ - fscache_put_object(object, fscache_obj_put_drop_obj); - fscache_stat(&fscache_n_object_dead); - - _leave(""); - return transit_to(OBJECT_DEAD); -} - -/* - * get a ref on an object - */ -static int fscache_get_object(struct fscache_object *object, - enum fscache_obj_ref_trace why) -{ - int ret; - - fscache_stat(&fscache_n_cop_grab_object); - ret = object->cache->ops->grab_object(object, why) ? 0 : -EAGAIN; - fscache_stat_d(&fscache_n_cop_grab_object); - return ret; -} - -/* - * Discard a ref on an object - */ -static void fscache_put_object(struct fscache_object *object, - enum fscache_obj_ref_trace why) -{ - fscache_stat(&fscache_n_cop_put_object); - object->cache->ops->put_object(object, why); - fscache_stat_d(&fscache_n_cop_put_object); -} - -/** - * fscache_object_destroy - Note that a cache object is about to be destroyed - * @object: The object to be destroyed - * - * Note the imminent destruction and deallocation of a cache object record. - */ -void fscache_object_destroy(struct fscache_object *object) -{ - /* We can get rid of the cookie now */ - fscache_cookie_put(object->cookie, fscache_cookie_put_object); - object->cookie = NULL; -} -EXPORT_SYMBOL(fscache_object_destroy); - -/* - * enqueue an object for metadata-type processing - */ -void fscache_enqueue_object(struct fscache_object *object) -{ - _enter("{OBJ%x}", object->debug_id); - - if (fscache_get_object(object, fscache_obj_get_queue) >= 0) { - wait_queue_head_t *cong_wq = - &get_cpu_var(fscache_object_cong_wait); - - if (queue_work(fscache_object_wq, &object->work)) { - if (fscache_object_congested()) - wake_up(cong_wq); - } else - fscache_put_object(object, fscache_obj_put_queue); - - put_cpu_var(fscache_object_cong_wait); - } -} - -/** - * fscache_object_sleep_till_congested - Sleep until object wq is congested - * @timeoutp: Scheduler sleep timeout - * - * Allow an object handler to sleep until the object workqueue is congested. - * - * The caller must set up a wake up event before calling this and must have set - * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own - * condition before calling this function as no test is made here. - * - * %true is returned if the object wq is congested, %false otherwise. - */ -bool fscache_object_sleep_till_congested(signed long *timeoutp) -{ - wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait); - DEFINE_WAIT(wait); - - if (fscache_object_congested()) - return true; - - add_wait_queue_exclusive(cong_wq, &wait); - if (!fscache_object_congested()) - *timeoutp = schedule_timeout(*timeoutp); - finish_wait(cong_wq, &wait); - - return fscache_object_congested(); -} -EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested); - -/* - * Enqueue the dependents of an object for metadata-type processing. - * - * If we don't manage to finish the list before the scheduler wants to run - * again then return false immediately. We return true if the list was - * cleared. - */ -static bool fscache_enqueue_dependents(struct fscache_object *object, int event) -{ - struct fscache_object *dep; - bool ret = true; - - _enter("{OBJ%x}", object->debug_id); - - if (list_empty(&object->dependents)) - return true; - - spin_lock(&object->lock); - - while (!list_empty(&object->dependents)) { - dep = list_entry(object->dependents.next, - struct fscache_object, dep_link); - list_del_init(&dep->dep_link); - - fscache_raise_event(dep, event); - fscache_put_object(dep, fscache_obj_put_enq_dep); - - if (!list_empty(&object->dependents) && need_resched()) { - ret = false; - break; - } - } - - spin_unlock(&object->lock); - return ret; -} - -/* - * remove an object from whatever queue it's waiting on - */ -static void fscache_dequeue_object(struct fscache_object *object) -{ - _enter("{OBJ%x}", object->debug_id); - - if (!list_empty(&object->dep_link)) { - spin_lock(&object->parent->lock); - list_del_init(&object->dep_link); - spin_unlock(&object->parent->lock); - } - - _leave(""); -} - -/** - * fscache_check_aux - Ask the netfs whether an object on disk is still valid - * @object: The object to ask about - * @data: The auxiliary data for the object - * @datalen: The size of the auxiliary data - * @object_size: The size of the object according to the server. - * - * This function consults the netfs about the coherency state of an object. - * The caller must be holding a ref on cookie->n_active (held by - * fscache_look_up_object() on behalf of the cache backend during object lookup - * and creation). - */ -enum fscache_checkaux fscache_check_aux(struct fscache_object *object, - const void *data, uint16_t datalen, - loff_t object_size) -{ - enum fscache_checkaux result; - - if (!object->cookie->def->check_aux) { - fscache_stat(&fscache_n_checkaux_none); - return FSCACHE_CHECKAUX_OKAY; - } - - result = object->cookie->def->check_aux(object->cookie->netfs_data, - data, datalen, object_size); - switch (result) { - /* entry okay as is */ - case FSCACHE_CHECKAUX_OKAY: - fscache_stat(&fscache_n_checkaux_okay); - break; - - /* entry requires update */ - case FSCACHE_CHECKAUX_NEEDS_UPDATE: - fscache_stat(&fscache_n_checkaux_update); - break; - - /* entry requires deletion */ - case FSCACHE_CHECKAUX_OBSOLETE: - fscache_stat(&fscache_n_checkaux_obsolete); - break; - - default: - BUG(); - } - - return result; -} -EXPORT_SYMBOL(fscache_check_aux); - -/* - * Asynchronously invalidate an object. - */ -static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object, - int event) -{ - struct fscache_operation *op; - struct fscache_cookie *cookie = object->cookie; - - _enter("{OBJ%x},%d", object->debug_id, event); - - /* We're going to need the cookie. If the cookie is not available then - * retire the object instead. - */ - if (!fscache_use_cookie(object)) { - ASSERT(radix_tree_empty(&object->cookie->stores)); - set_bit(FSCACHE_OBJECT_RETIRED, &object->flags); - _leave(" [no cookie]"); - return transit_to(KILL_OBJECT); - } - - /* Reject any new read/write ops and abort any that are pending. */ - fscache_invalidate_writes(cookie); - clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); - fscache_cancel_all_ops(object); - - /* Now we have to wait for in-progress reads and writes */ - op = kzalloc(sizeof(*op), GFP_KERNEL); - if (!op) - goto nomem; - - fscache_operation_init(cookie, op, object->cache->ops->invalidate_object, - NULL, NULL); - op->flags = FSCACHE_OP_ASYNC | - (1 << FSCACHE_OP_EXCLUSIVE) | - (1 << FSCACHE_OP_UNUSE_COOKIE); - trace_fscache_page_op(cookie, NULL, op, fscache_page_op_invalidate); - - spin_lock(&cookie->lock); - if (fscache_submit_exclusive_op(object, op) < 0) - goto submit_op_failed; - spin_unlock(&cookie->lock); - fscache_put_operation(op); - - /* Once we've completed the invalidation, we know there will be no data - * stored in the cache and thus we can reinstate the data-check-skip - * optimisation. - */ - set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); - - /* We can allow read and write requests to come in once again. They'll - * queue up behind our exclusive invalidation operation. - */ - if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) - wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING); - _leave(" [ok]"); - return transit_to(UPDATE_OBJECT); - -nomem: - fscache_mark_object_dead(object); - fscache_unuse_cookie(object); - _leave(" [ENOMEM]"); - return transit_to(KILL_OBJECT); - -submit_op_failed: - fscache_mark_object_dead(object); - spin_unlock(&cookie->lock); - fscache_unuse_cookie(object); - kfree(op); - _leave(" [EIO]"); - return transit_to(KILL_OBJECT); -} - -static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object, - int event) -{ - const struct fscache_state *s; - - fscache_stat(&fscache_n_invalidates_run); - fscache_stat(&fscache_n_cop_invalidate_object); - s = _fscache_invalidate_object(object, event); - fscache_stat_d(&fscache_n_cop_invalidate_object); - return s; -} - -/* - * Update auxiliary data. - */ -static void fscache_update_aux_data(struct fscache_object *object) -{ - fscache_stat(&fscache_n_updates_run); - fscache_stat(&fscache_n_cop_update_object); - object->cache->ops->update_object(object); - fscache_stat_d(&fscache_n_cop_update_object); -} - -/* - * Asynchronously update an object. - */ -static const struct fscache_state *fscache_update_object(struct fscache_object *object, - int event) -{ - _enter("{OBJ%x},%d", object->debug_id, event); - - fscache_update_aux_data(object); - - _leave(""); - return transit_to(WAIT_FOR_CMD); -} - -/** - * fscache_object_retrying_stale - Note retrying stale object - * @object: The object that will be retried - * - * Note that an object lookup found an on-disk object that was adjudged to be - * stale and has been deleted. The lookup will be retried. - */ -void fscache_object_retrying_stale(struct fscache_object *object) -{ - fscache_stat(&fscache_n_cache_no_space_reject); -} -EXPORT_SYMBOL(fscache_object_retrying_stale); - -/** - * fscache_object_mark_killed - Note that an object was killed - * @object: The object that was culled - * @why: The reason the object was killed. - * - * Note that an object was killed. Returns true if the object was - * already marked killed, false if it wasn't. - */ -void fscache_object_mark_killed(struct fscache_object *object, - enum fscache_why_object_killed why) -{ - if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) { - pr_err("Error: Object already killed by cache [%s]\n", - object->cache->identifier); - return; - } - - switch (why) { - case FSCACHE_OBJECT_NO_SPACE: - fscache_stat(&fscache_n_cache_no_space_reject); - break; - case FSCACHE_OBJECT_IS_STALE: - fscache_stat(&fscache_n_cache_stale_objects); - break; - case FSCACHE_OBJECT_WAS_RETIRED: - fscache_stat(&fscache_n_cache_retired_objects); - break; - case FSCACHE_OBJECT_WAS_CULLED: - fscache_stat(&fscache_n_cache_culled_objects); - break; - } -} -EXPORT_SYMBOL(fscache_object_mark_killed); - -/* - * The object is dead. We can get here if an object gets queued by an event - * that would lead to its death (such as EV_KILL) when the dispatcher is - * already running (and so can be requeued) but hasn't yet cleared the event - * mask. - */ -static const struct fscache_state *fscache_object_dead(struct fscache_object *object, - int event) -{ - if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD, - &object->flags)) - return NO_TRANSIT; - - WARN(true, "FS-Cache object redispatched after death"); - return NO_TRANSIT; -} diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c deleted file mode 100644 index e002cdfaf3cc..000000000000 --- a/fs/fscache/operation.c +++ /dev/null @@ -1,633 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* FS-Cache worker operation management routines - * - * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * See Documentation/filesystems/caching/operations.rst - */ - -#define FSCACHE_DEBUG_LEVEL OPERATION -#include <linux/module.h> -#include <linux/seq_file.h> -#include <linux/slab.h> -#include "internal.h" - -atomic_t fscache_op_debug_id; -EXPORT_SYMBOL(fscache_op_debug_id); - -static void fscache_operation_dummy_cancel(struct fscache_operation *op) -{ -} - -/** - * fscache_operation_init - Do basic initialisation of an operation - * @cookie: The cookie to operate on - * @op: The operation to initialise - * @processor: The function to perform the operation - * @cancel: A function to handle operation cancellation - * @release: The release function to assign - * - * Do basic initialisation of an operation. The caller must still set flags, - * object and processor if needed. - */ -void fscache_operation_init(struct fscache_cookie *cookie, - struct fscache_operation *op, - fscache_operation_processor_t processor, - fscache_operation_cancel_t cancel, - fscache_operation_release_t release) -{ - INIT_WORK(&op->work, fscache_op_work_func); - atomic_set(&op->usage, 1); - op->state = FSCACHE_OP_ST_INITIALISED; - op->debug_id = atomic_inc_return(&fscache_op_debug_id); - op->processor = processor; - op->cancel = cancel ?: fscache_operation_dummy_cancel; - op->release = release; - INIT_LIST_HEAD(&op->pend_link); - fscache_stat(&fscache_n_op_initialised); - trace_fscache_op(cookie, op, fscache_op_init); -} -EXPORT_SYMBOL(fscache_operation_init); - -/** - * fscache_enqueue_operation - Enqueue an operation for processing - * @op: The operation to enqueue - * - * Enqueue an operation for processing by the FS-Cache thread pool. - * - * This will get its own ref on the object. - */ -void fscache_enqueue_operation(struct fscache_operation *op) -{ - struct fscache_cookie *cookie = op->object->cookie; - - _enter("{OBJ%x OP%x,%u}", - op->object->debug_id, op->debug_id, atomic_read(&op->usage)); - - ASSERT(list_empty(&op->pend_link)); - ASSERT(op->processor != NULL); - ASSERT(fscache_object_is_available(op->object)); - ASSERTCMP(atomic_read(&op->usage), >, 0); - ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS, - op->state, ==, FSCACHE_OP_ST_CANCELLED); - - fscache_stat(&fscache_n_op_enqueue); - switch (op->flags & FSCACHE_OP_TYPE) { - case FSCACHE_OP_ASYNC: - trace_fscache_op(cookie, op, fscache_op_enqueue_async); - _debug("queue async"); - atomic_inc(&op->usage); - if (!queue_work(fscache_op_wq, &op->work)) - fscache_put_operation(op); - break; - case FSCACHE_OP_MYTHREAD: - trace_fscache_op(cookie, op, fscache_op_enqueue_mythread); - _debug("queue for caller's attention"); - break; - default: - pr_err("Unexpected op type %lx", op->flags); - BUG(); - break; - } -} -EXPORT_SYMBOL(fscache_enqueue_operation); - -/* - * start an op running - */ -static void fscache_run_op(struct fscache_object *object, - struct fscache_operation *op) -{ - ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING); - - op->state = FSCACHE_OP_ST_IN_PROGRESS; - object->n_in_progress++; - if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) - wake_up_bit(&op->flags, FSCACHE_OP_WAITING); - if (op->processor) - fscache_enqueue_operation(op); - else - trace_fscache_op(object->cookie, op, fscache_op_run); - fscache_stat(&fscache_n_op_run); -} - -/* - * report an unexpected submission - */ -static void fscache_report_unexpected_submission(struct fscache_object *object, - struct fscache_operation *op, - const struct fscache_state *ostate) -{ - static bool once_only; - struct fscache_operation *p; - unsigned n; - - if (once_only) - return; - once_only = true; - - kdebug("unexpected submission OP%x [OBJ%x %s]", - op->debug_id, object->debug_id, object->state->name); - kdebug("objstate=%s [%s]", object->state->name, ostate->name); - kdebug("objflags=%lx", object->flags); - kdebug("objevent=%lx [%lx]", object->events, object->event_mask); - kdebug("ops=%u inp=%u exc=%u", - object->n_ops, object->n_in_progress, object->n_exclusive); - - if (!list_empty(&object->pending_ops)) { - n = 0; - list_for_each_entry(p, &object->pending_ops, pend_link) { - ASSERTCMP(p->object, ==, object); - kdebug("%p %p", op->processor, op->release); - n++; - } - - kdebug("n=%u", n); - } - - dump_stack(); -} - -/* - * submit an exclusive operation for an object - * - other ops are excluded from running simultaneously with this one - * - this gets any extra refs it needs on an op - */ -int fscache_submit_exclusive_op(struct fscache_object *object, - struct fscache_operation *op) -{ - const struct fscache_state *ostate; - unsigned long flags; - int ret; - - _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); - - trace_fscache_op(object->cookie, op, fscache_op_submit_ex); - - ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED); - ASSERTCMP(atomic_read(&op->usage), >, 0); - - spin_lock(&object->lock); - ASSERTCMP(object->n_ops, >=, object->n_in_progress); - ASSERTCMP(object->n_ops, >=, object->n_exclusive); - ASSERT(list_empty(&op->pend_link)); - - ostate = object->state; - smp_rmb(); - - op->state = FSCACHE_OP_ST_PENDING; - flags = READ_ONCE(object->flags); - if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) { - fscache_stat(&fscache_n_op_rejected); - op->cancel(op); - op->state = FSCACHE_OP_ST_CANCELLED; - ret = -ENOBUFS; - } else if (unlikely(fscache_cache_is_broken(object))) { - op->cancel(op); - op->state = FSCACHE_OP_ST_CANCELLED; - ret = -EIO; - } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) { - op->object = object; - object->n_ops++; - object->n_exclusive++; /* reads and writes must wait */ - - if (object->n_in_progress > 0) { - atomic_inc(&op->usage); - list_add_tail(&op->pend_link, &object->pending_ops); - fscache_stat(&fscache_n_op_pend); - } else if (!list_empty(&object->pending_ops)) { - atomic_inc(&op->usage); - list_add_tail(&op->pend_link, &object->pending_ops); - fscache_stat(&fscache_n_op_pend); - fscache_start_operations(object); - } else { - ASSERTCMP(object->n_in_progress, ==, 0); - fscache_run_op(object, op); - } - - /* need to issue a new write op after this */ - clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); - ret = 0; - } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) { - op->object = object; - object->n_ops++; - object->n_exclusive++; /* reads and writes must wait */ - atomic_inc(&op->usage); - list_add_tail(&op->pend_link, &object->pending_ops); - fscache_stat(&fscache_n_op_pend); - ret = 0; - } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) { - op->cancel(op); - op->state = FSCACHE_OP_ST_CANCELLED; - ret = -ENOBUFS; - } else { - fscache_report_unexpected_submission(object, op, ostate); - op->cancel(op); - op->state = FSCACHE_OP_ST_CANCELLED; - ret = -ENOBUFS; - } - - spin_unlock(&object->lock); - return ret; -} - -/* - * submit an operation for an object - * - objects may be submitted only in the following states: - * - during object creation (write ops may be submitted) - * - whilst the object is active - * - after an I/O error incurred in one of the two above states (op rejected) - * - this gets any extra refs it needs on an op - */ -int fscache_submit_op(struct fscache_object *object, - struct fscache_operation *op) -{ - const struct fscache_state *ostate; - unsigned long flags; - int ret; - - _enter("{OBJ%x OP%x},{%u}", - object->debug_id, op->debug_id, atomic_read(&op->usage)); - - trace_fscache_op(object->cookie, op, fscache_op_submit); - - ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED); - ASSERTCMP(atomic_read(&op->usage), >, 0); - - spin_lock(&object->lock); - ASSERTCMP(object->n_ops, >=, object->n_in_progress); - ASSERTCMP(object->n_ops, >=, object->n_exclusive); - ASSERT(list_empty(&op->pend_link)); - - ostate = object->state; - smp_rmb(); - - op->state = FSCACHE_OP_ST_PENDING; - flags = READ_ONCE(object->flags); - if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) { - fscache_stat(&fscache_n_op_rejected); - op->cancel(op); - op->state = FSCACHE_OP_ST_CANCELLED; - ret = -ENOBUFS; - } else if (unlikely(fscache_cache_is_broken(object))) { - op->cancel(op); - op->state = FSCACHE_OP_ST_CANCELLED; - ret = -EIO; - } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) { - op->object = object; - object->n_ops++; - - if (object->n_exclusive > 0) { - atomic_inc(&op->usage); - list_add_tail(&op->pend_link, &object->pending_ops); - fscache_stat(&fscache_n_op_pend); - } else if (!list_empty(&object->pending_ops)) { - atomic_inc(&op->usage); - list_add_tail(&op->pend_link, &object->pending_ops); - fscache_stat(&fscache_n_op_pend); - fscache_start_operations(object); - } else { - ASSERTCMP(object->n_exclusive, ==, 0); - fscache_run_op(object, op); - } - ret = 0; - } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) { - op->object = object; - object->n_ops++; - atomic_inc(&op->usage); - list_add_tail(&op->pend_link, &object->pending_ops); - fscache_stat(&fscache_n_op_pend); - ret = 0; - } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) { - op->cancel(op); - op->state = FSCACHE_OP_ST_CANCELLED; - ret = -ENOBUFS; - } else { - fscache_report_unexpected_submission(object, op, ostate); - ASSERT(!fscache_object_is_active(object)); - op->cancel(op); - op->state = FSCACHE_OP_ST_CANCELLED; - ret = -ENOBUFS; - } - - spin_unlock(&object->lock); - return ret; -} - -/* - * queue an object for withdrawal on error, aborting all following asynchronous - * operations - */ -void fscache_abort_object(struct fscache_object *object) -{ - _enter("{OBJ%x}", object->debug_id); - - fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR); -} - -/* - * Jump start the operation processing on an object. The caller must hold - * object->lock. - */ -void fscache_start_operations(struct fscache_object *object) -{ - struct fscache_operation *op; - bool stop = false; - - while (!list_empty(&object->pending_ops) && !stop) { - op = list_entry(object->pending_ops.next, - struct fscache_operation, pend_link); - - if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) { - if (object->n_in_progress > 0) - break; - stop = true; - } - list_del_init(&op->pend_link); - fscache_run_op(object, op); - - /* the pending queue was holding a ref on the object */ - fscache_put_operation(op); - } - - ASSERTCMP(object->n_in_progress, <=, object->n_ops); - - _debug("woke %d ops on OBJ%x", - object->n_in_progress, object->debug_id); -} - -/* - * cancel an operation that's pending on an object - */ -int fscache_cancel_op(struct fscache_operation *op, - bool cancel_in_progress_op) -{ - struct fscache_object *object = op->object; - bool put = false; - int ret; - - _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id); - - trace_fscache_op(object->cookie, op, fscache_op_cancel); - - ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING); - ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED); - ASSERTCMP(atomic_read(&op->usage), >, 0); - - spin_lock(&object->lock); - - ret = -EBUSY; - if (op->state == FSCACHE_OP_ST_PENDING) { - ASSERT(!list_empty(&op->pend_link)); - list_del_init(&op->pend_link); - put = true; - - fscache_stat(&fscache_n_op_cancelled); - op->cancel(op); - op->state = FSCACHE_OP_ST_CANCELLED; - if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) - object->n_exclusive--; - if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) - wake_up_bit(&op->flags, FSCACHE_OP_WAITING); - ret = 0; - } else if (op->state == FSCACHE_OP_ST_IN_PROGRESS && cancel_in_progress_op) { - ASSERTCMP(object->n_in_progress, >, 0); - if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) - object->n_exclusive--; - object->n_in_progress--; - if (object->n_in_progress == 0) - fscache_start_operations(object); - - fscache_stat(&fscache_n_op_cancelled); - op->cancel(op); - op->state = FSCACHE_OP_ST_CANCELLED; - if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) - object->n_exclusive--; - if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) - wake_up_bit(&op->flags, FSCACHE_OP_WAITING); - ret = 0; - } - - if (put) - fscache_put_operation(op); - spin_unlock(&object->lock); - _leave(" = %d", ret); - return ret; -} - -/* - * Cancel all pending operations on an object - */ -void fscache_cancel_all_ops(struct fscache_object *object) -{ - struct fscache_operation *op; - - _enter("OBJ%x", object->debug_id); - - spin_lock(&object->lock); - - while (!list_empty(&object->pending_ops)) { - op = list_entry(object->pending_ops.next, - struct fscache_operation, pend_link); - fscache_stat(&fscache_n_op_cancelled); - list_del_init(&op->pend_link); - - trace_fscache_op(object->cookie, op, fscache_op_cancel_all); - - ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING); - op->cancel(op); - op->state = FSCACHE_OP_ST_CANCELLED; - - if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) - object->n_exclusive--; - if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) - wake_up_bit(&op->flags, FSCACHE_OP_WAITING); - fscache_put_operation(op); - cond_resched_lock(&object->lock); - } - - spin_unlock(&object->lock); - _leave(""); -} - -/* - * Record the completion or cancellation of an in-progress operation. - */ -void fscache_op_complete(struct fscache_operation *op, bool cancelled) -{ - struct fscache_object *object = op->object; - - _enter("OBJ%x", object->debug_id); - - ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); - ASSERTCMP(object->n_in_progress, >, 0); - ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags), - object->n_exclusive, >, 0); - ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags), - object->n_in_progress, ==, 1); - - spin_lock(&object->lock); - - if (!cancelled) { - trace_fscache_op(object->cookie, op, fscache_op_completed); - op->state = FSCACHE_OP_ST_COMPLETE; - } else { - op->cancel(op); - trace_fscache_op(object->cookie, op, fscache_op_cancelled); - op->state = FSCACHE_OP_ST_CANCELLED; - } - - if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) - object->n_exclusive--; - object->n_in_progress--; - if (object->n_in_progress == 0) - fscache_start_operations(object); - - spin_unlock(&object->lock); - _leave(""); -} -EXPORT_SYMBOL(fscache_op_complete); - -/* - * release an operation - * - queues pending ops if this is the last in-progress op - */ -void fscache_put_operation(struct fscache_operation *op) -{ - struct fscache_object *object; - struct fscache_cache *cache; - - _enter("{OBJ%x OP%x,%d}", - op->object ? op->object->debug_id : 0, - op->debug_id, atomic_read(&op->usage)); - - ASSERTCMP(atomic_read(&op->usage), >, 0); - - if (!atomic_dec_and_test(&op->usage)) - return; - - trace_fscache_op(op->object ? op->object->cookie : NULL, op, fscache_op_put); - - _debug("PUT OP"); - ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED && - op->state != FSCACHE_OP_ST_COMPLETE, - op->state, ==, FSCACHE_OP_ST_CANCELLED); - - fscache_stat(&fscache_n_op_release); - - if (op->release) { - op->release(op); - op->release = NULL; - } - op->state = FSCACHE_OP_ST_DEAD; - - object = op->object; - if (likely(object)) { - if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) - atomic_dec(&object->n_reads); - if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags)) - fscache_unuse_cookie(object); - - /* now... we may get called with the object spinlock held, so we - * complete the cleanup here only if we can immediately acquire the - * lock, and defer it otherwise */ - if (!spin_trylock(&object->lock)) { - _debug("defer put"); - fscache_stat(&fscache_n_op_deferred_release); - - cache = object->cache; - spin_lock(&cache->op_gc_list_lock); - list_add_tail(&op->pend_link, &cache->op_gc_list); - spin_unlock(&cache->op_gc_list_lock); - schedule_work(&cache->op_gc); - _leave(" [defer]"); - return; - } - - ASSERTCMP(object->n_ops, >, 0); - object->n_ops--; - if (object->n_ops == 0) - fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED); - - spin_unlock(&object->lock); - } - - kfree(op); - _leave(" [done]"); -} -EXPORT_SYMBOL(fscache_put_operation); - -/* - * garbage collect operations that have had their release deferred - */ -void fscache_operation_gc(struct work_struct *work) -{ - struct fscache_operation *op; - struct fscache_object *object; - struct fscache_cache *cache = - container_of(work, struct fscache_cache, op_gc); - int count = 0; - - _enter(""); - - do { - spin_lock(&cache->op_gc_list_lock); - if (list_empty(&cache->op_gc_list)) { - spin_unlock(&cache->op_gc_list_lock); - break; - } - - op = list_entry(cache->op_gc_list.next, - struct fscache_operation, pend_link); - list_del(&op->pend_link); - spin_unlock(&cache->op_gc_list_lock); - - object = op->object; - trace_fscache_op(object->cookie, op, fscache_op_gc); - - spin_lock(&object->lock); - - _debug("GC DEFERRED REL OBJ%x OP%x", - object->debug_id, op->debug_id); - fscache_stat(&fscache_n_op_gc); - - ASSERTCMP(atomic_read(&op->usage), ==, 0); - ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD); - - ASSERTCMP(object->n_ops, >, 0); - object->n_ops--; - if (object->n_ops == 0) - fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED); - - spin_unlock(&object->lock); - kfree(op); - - } while (count++ < 20); - - if (!list_empty(&cache->op_gc_list)) - schedule_work(&cache->op_gc); - - _leave(""); -} - -/* - * execute an operation using fs_op_wq to provide processing context - - * the caller holds a ref to this object, so we don't need to hold one - */ -void fscache_op_work_func(struct work_struct *work) -{ - struct fscache_operation *op = - container_of(work, struct fscache_operation, work); - - _enter("{OBJ%x OP%x,%d}", - op->object->debug_id, op->debug_id, atomic_read(&op->usage)); - - trace_fscache_op(op->object->cookie, op, fscache_op_work); - - ASSERT(op->processor != NULL); - op->processor(op); - fscache_put_operation(op); - - _leave(""); -} diff --git a/fs/fscache/page.c b/fs/fscache/page.c deleted file mode 100644 index 27df94ef0e0b..000000000000 --- a/fs/fscache/page.c +++ /dev/null @@ -1,1242 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* Cache page management and data I/O routines - * - * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - */ - -#define FSCACHE_DEBUG_LEVEL PAGE -#include <linux/module.h> -#include <linux/fscache-cache.h> -#include <linux/buffer_head.h> -#include <linux/pagevec.h> -#include <linux/slab.h> -#include "internal.h" - -/* - * check to see if a page is being written to the cache - */ -bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page) -{ - void *val; - - rcu_read_lock(); - val = radix_tree_lookup(&cookie->stores, page->index); - rcu_read_unlock(); - trace_fscache_check_page(cookie, page, val, 0); - - return val != NULL; -} -EXPORT_SYMBOL(__fscache_check_page_write); - -/* - * wait for a page to finish being written to the cache - */ -void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page) -{ - wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); - - trace_fscache_page(cookie, page, fscache_page_write_wait); - - wait_event(*wq, !__fscache_check_page_write(cookie, page)); -} -EXPORT_SYMBOL(__fscache_wait_on_page_write); - -/* - * wait for a page to finish being written to the cache. Put a timeout here - * since we might be called recursively via parent fs. - */ -static -bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page) -{ - wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); - - return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page), - HZ); -} - -/* - * decide whether a page can be released, possibly by cancelling a store to it - * - we're allowed to sleep if __GFP_DIRECT_RECLAIM is flagged - */ -bool __fscache_maybe_release_page(struct fscache_cookie *cookie, - struct page *page, - gfp_t gfp) -{ - struct page *xpage; - void *val; - - _enter("%p,%p,%x", cookie, page, gfp); - - trace_fscache_page(cookie, page, fscache_page_maybe_release); - -try_again: - rcu_read_lock(); - val = radix_tree_lookup(&cookie->stores, page->index); - if (!val) { - rcu_read_unlock(); - fscache_stat(&fscache_n_store_vmscan_not_storing); - __fscache_uncache_page(cookie, page); - return true; - } - - /* see if the page is actually undergoing storage - if so we can't get - * rid of it till the cache has finished with it */ - if (radix_tree_tag_get(&cookie->stores, page->index, - FSCACHE_COOKIE_STORING_TAG)) { - rcu_read_unlock(); - goto page_busy; - } - - /* the page is pending storage, so we attempt to cancel the store and - * discard the store request so that the page can be reclaimed */ - spin_lock(&cookie->stores_lock); - rcu_read_unlock(); - - if (radix_tree_tag_get(&cookie->stores, page->index, - FSCACHE_COOKIE_STORING_TAG)) { - /* the page started to undergo storage whilst we were looking, - * so now we can only wait or return */ - spin_unlock(&cookie->stores_lock); - goto page_busy; - } - - xpage = radix_tree_delete(&cookie->stores, page->index); - trace_fscache_page(cookie, page, fscache_page_radix_delete); - spin_unlock(&cookie->stores_lock); - - if (xpage) { - fscache_stat(&fscache_n_store_vmscan_cancelled); - fscache_stat(&fscache_n_store_radix_deletes); - ASSERTCMP(xpage, ==, page); - } else { - fscache_stat(&fscache_n_store_vmscan_gone); - } - - wake_up_bit(&cookie->flags, 0); - trace_fscache_wake_cookie(cookie); - if (xpage) - put_page(xpage); - __fscache_uncache_page(cookie, page); - return true; - -page_busy: - /* We will wait here if we're allowed to, but that could deadlock the - * allocator as the work threads writing to the cache may all end up - * sleeping on memory allocation, so we may need to impose a timeout - * too. */ - if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) { - fscache_stat(&fscache_n_store_vmscan_busy); - return false; - } - - fscache_stat(&fscache_n_store_vmscan_wait); - if (!release_page_wait_timeout(cookie, page)) - _debug("fscache writeout timeout page: %p{%lx}", - page, page->index); - - gfp &= ~__GFP_DIRECT_RECLAIM; - goto try_again; -} -EXPORT_SYMBOL(__fscache_maybe_release_page); - -/* - * note that a page has finished being written to the cache - */ -static void fscache_end_page_write(struct fscache_object *object, - struct page *page) -{ - struct fscache_cookie *cookie; - struct page *xpage = NULL, *val; - - spin_lock(&object->lock); - cookie = object->cookie; - if (cookie) { - /* delete the page from the tree if it is now no longer - * pending */ - spin_lock(&cookie->stores_lock); - radix_tree_tag_clear(&cookie->stores, page->index, - FSCACHE_COOKIE_STORING_TAG); - trace_fscache_page(cookie, page, fscache_page_radix_clear_store); - if (!radix_tree_tag_get(&cookie->stores, page->index, - FSCACHE_COOKIE_PENDING_TAG)) { - fscache_stat(&fscache_n_store_radix_deletes); - xpage = radix_tree_delete(&cookie->stores, page->index); - trace_fscache_page(cookie, page, fscache_page_radix_delete); - trace_fscache_page(cookie, page, fscache_page_write_end); - - val = radix_tree_lookup(&cookie->stores, page->index); - trace_fscache_check_page(cookie, page, val, 1); - } else { - trace_fscache_page(cookie, page, fscache_page_write_end_pend); - } - spin_unlock(&cookie->stores_lock); - wake_up_bit(&cookie->flags, 0); - trace_fscache_wake_cookie(cookie); - } else { - trace_fscache_page(cookie, page, fscache_page_write_end_noc); - } - spin_unlock(&object->lock); - if (xpage) - put_page(xpage); -} - -/* - * actually apply the changed attributes to a cache object - */ -static void fscache_attr_changed_op(struct fscache_operation *op) -{ - struct fscache_object *object = op->object; - int ret; - - _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); - - fscache_stat(&fscache_n_attr_changed_calls); - - if (fscache_object_is_active(object)) { - fscache_stat(&fscache_n_cop_attr_changed); - ret = object->cache->ops->attr_changed(object); - fscache_stat_d(&fscache_n_cop_attr_changed); - if (ret < 0) - fscache_abort_object(object); - fscache_op_complete(op, ret < 0); - } else { - fscache_op_complete(op, true); - } - - _leave(""); -} - -/* - * notification that the attributes on an object have changed - */ -int __fscache_attr_changed(struct fscache_cookie *cookie) -{ - struct fscache_operation *op; - struct fscache_object *object; - bool wake_cookie = false; - - _enter("%p", cookie); - - ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); - - fscache_stat(&fscache_n_attr_changed); - - op = kzalloc(sizeof(*op), GFP_KERNEL); - if (!op) { - fscache_stat(&fscache_n_attr_changed_nomem); - _leave(" = -ENOMEM"); - return -ENOMEM; - } - - fscache_operation_init(cookie, op, fscache_attr_changed_op, NULL, NULL); - trace_fscache_page_op(cookie, NULL, op, fscache_page_op_attr_changed); - op->flags = FSCACHE_OP_ASYNC | - (1 << FSCACHE_OP_EXCLUSIVE) | - (1 << FSCACHE_OP_UNUSE_COOKIE); - - spin_lock(&cookie->lock); - - if (!fscache_cookie_enabled(cookie) || - hlist_empty(&cookie->backing_objects)) - goto nobufs; - object = hlist_entry(cookie->backing_objects.first, - struct fscache_object, cookie_link); - - __fscache_use_cookie(cookie); - if (fscache_submit_exclusive_op(object, op) < 0) - goto nobufs_dec; - spin_unlock(&cookie->lock); - fscache_stat(&fscache_n_attr_changed_ok); - fscache_put_operation(op); - _leave(" = 0"); - return 0; - -nobufs_dec: - wake_cookie = __fscache_unuse_cookie(cookie); -nobufs: - spin_unlock(&cookie->lock); - fscache_put_operation(op); - if (wake_cookie) - __fscache_wake_unused_cookie(cookie); - fscache_stat(&fscache_n_attr_changed_nobufs); - _leave(" = %d", -ENOBUFS); - return -ENOBUFS; -} -EXPORT_SYMBOL(__fscache_attr_changed); - -/* - * Handle cancellation of a pending retrieval op - */ -static void fscache_do_cancel_retrieval(struct fscache_operation *_op) -{ - struct fscache_retrieval *op = - container_of(_op, struct fscache_retrieval, op); - - atomic_set(&op->n_pages, 0); -} - -/* - * release a retrieval op reference - */ -static void fscache_release_retrieval_op(struct fscache_operation *_op) -{ - struct fscache_retrieval *op = - container_of(_op, struct fscache_retrieval, op); - - _enter("{OP%x}", op->op.debug_id); - - ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED, - atomic_read(&op->n_pages), ==, 0); - - if (op->context) - fscache_put_context(op->cookie, op->context); - - _leave(""); -} - -/* - * allocate a retrieval op - */ -struct fscache_retrieval *fscache_alloc_retrieval( - struct fscache_cookie *cookie, - struct address_space *mapping, - fscache_rw_complete_t end_io_func, - void *context) -{ - struct fscache_retrieval *op; - - /* allocate a retrieval operation and attempt to submit it */ - op = kzalloc(sizeof(*op), GFP_NOIO); - if (!op) { - fscache_stat(&fscache_n_retrievals_nomem); - return NULL; - } - - fscache_operation_init(cookie, &op->op, NULL, - fscache_do_cancel_retrieval, - fscache_release_retrieval_op); - op->op.flags = FSCACHE_OP_MYTHREAD | - (1UL << FSCACHE_OP_WAITING) | - (1UL << FSCACHE_OP_UNUSE_COOKIE); - op->cookie = cookie; - op->mapping = mapping; - op->end_io_func = end_io_func; - op->context = context; - INIT_LIST_HEAD(&op->to_do); - - /* Pin the netfs read context in case we need to do the actual netfs - * read because we've encountered a cache read failure. - */ - if (context) - fscache_get_context(op->cookie, context); - return op; -} - -/* - * wait for a deferred lookup to complete - */ -int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) -{ - _enter(""); - - if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) { - _leave(" = 0 [imm]"); - return 0; - } - - fscache_stat(&fscache_n_retrievals_wait); - - if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, - TASK_INTERRUPTIBLE) != 0) { - fscache_stat(&fscache_n_retrievals_intr); - _leave(" = -ERESTARTSYS"); - return -ERESTARTSYS; - } - - ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)); - - smp_rmb(); - _leave(" = 0 [dly]"); - return 0; -} - -/* - * wait for an object to become active (or dead) - */ -int fscache_wait_for_operation_activation(struct fscache_object *object, - struct fscache_operation *op, - atomic_t *stat_op_waits, - atomic_t *stat_object_dead) -{ - int ret; - - if (!test_bit(FSCACHE_OP_WAITING, &op->flags)) - goto check_if_dead; - - _debug(">>> WT"); - if (stat_op_waits) - fscache_stat(stat_op_waits); - if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING, - TASK_INTERRUPTIBLE) != 0) { - trace_fscache_op(object->cookie, op, fscache_op_signal); - ret = fscache_cancel_op(op, false); - if (ret == 0) - return -ERESTARTSYS; - - /* it's been removed from the pending queue by another party, - * so we should get to run shortly */ - wait_on_bit(&op->flags, FSCACHE_OP_WAITING, - TASK_UNINTERRUPTIBLE); - } - _debug("<<< GO"); - -check_if_dead: - if (op->state == FSCACHE_OP_ST_CANCELLED) { - if (stat_object_dead) - fscache_stat(stat_object_dead); - _leave(" = -ENOBUFS [cancelled]"); - return -ENOBUFS; - } - if (unlikely(fscache_object_is_dying(object) || - fscache_cache_is_broken(object))) { - enum fscache_operation_state state = op->state; - trace_fscache_op(object->cookie, op, fscache_op_signal); - fscache_cancel_op(op, true); - if (stat_object_dead) - fscache_stat(stat_object_dead); - _leave(" = -ENOBUFS [obj dead %d]", state); - return -ENOBUFS; - } - return 0; -} - -/* - * read a page from the cache or allocate a block in which to store it - * - we return: - * -ENOMEM - out of memory, nothing done - * -ERESTARTSYS - interrupted - * -ENOBUFS - no backing object available in which to cache the block - * -ENODATA - no data available in the backing object for this block - * 0 - dispatched a read - it'll call end_io_func() when finished - */ -int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, - struct page *page, - fscache_rw_complete_t end_io_func, - void *context, - gfp_t gfp) -{ - struct fscache_retrieval *op; - struct fscache_object *object; - bool wake_cookie = false; - int ret; - - _enter("%p,%p,,,", cookie, page); - - fscache_stat(&fscache_n_retrievals); - - if (hlist_empty(&cookie->backing_objects)) - goto nobufs; - - if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { - _leave(" = -ENOBUFS [invalidating]"); - return -ENOBUFS; - } - - ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); - ASSERTCMP(page, !=, NULL); - - if (fscache_wait_for_deferred_lookup(cookie) < 0) - return -ERESTARTSYS; - - op = fscache_alloc_retrieval(cookie, page->mapping, - end_io_func, context); - if (!op) { - _leave(" = -ENOMEM"); - return -ENOMEM; - } - atomic_set(&op->n_pages, 1); - trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_retr_one); - - spin_lock(&cookie->lock); - - if (!fscache_cookie_enabled(cookie) || - hlist_empty(&cookie->backing_objects)) - goto nobufs_unlock; - object = hlist_entry(cookie->backing_objects.first, - struct fscache_object, cookie_link); - - ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)); - - __fscache_use_cookie(cookie); - atomic_inc(&object->n_reads); - __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); - - if (fscache_submit_op(object, &op->op) < 0) - goto nobufs_unlock_dec; - spin_unlock(&cookie->lock); - - fscache_stat(&fscache_n_retrieval_ops); - - /* we wait for the operation to become active, and then process it - * *here*, in this thread, and not in the thread pool */ - ret = fscache_wait_for_operation_activation( - object, &op->op, - __fscache_stat(&fscache_n_retrieval_op_waits), - __fscache_stat(&fscache_n_retrievals_object_dead)); - if (ret < 0) - goto error; - - /* ask the cache to honour the operation */ - if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { - fscache_stat(&fscache_n_cop_allocate_page); - ret = object->cache->ops->allocate_page(op, page, gfp); - fscache_stat_d(&fscache_n_cop_allocate_page); - if (ret == 0) - ret = -ENODATA; - } else { - fscache_stat(&fscache_n_cop_read_or_alloc_page); - ret = object->cache->ops->read_or_alloc_page(op, page, gfp); - fscache_stat_d(&fscache_n_cop_read_or_alloc_page); - } - -error: - if (ret == -ENOMEM) - fscache_stat(&fscache_n_retrievals_nomem); - else if (ret == -ERESTARTSYS) - fscache_stat(&fscache_n_retrievals_intr); - else if (ret == -ENODATA) - fscache_stat(&fscache_n_retrievals_nodata); - else if (ret < 0) - fscache_stat(&fscache_n_retrievals_nobufs); - else - fscache_stat(&fscache_n_retrievals_ok); - - fscache_put_retrieval(op); - _leave(" = %d", ret); - return ret; - -nobufs_unlock_dec: - atomic_dec(&object->n_reads); - wake_cookie = __fscache_unuse_cookie(cookie); -nobufs_unlock: - spin_unlock(&cookie->lock); - if (wake_cookie) - __fscache_wake_unused_cookie(cookie); - fscache_put_retrieval(op); -nobufs: - fscache_stat(&fscache_n_retrievals_nobufs); - _leave(" = -ENOBUFS"); - return -ENOBUFS; -} -EXPORT_SYMBOL(__fscache_read_or_alloc_page); - -/* - * read a list of page from the cache or allocate a block in which to store - * them - * - we return: - * -ENOMEM - out of memory, some pages may be being read - * -ERESTARTSYS - interrupted, some pages may be being read - * -ENOBUFS - no backing object or space available in which to cache any - * pages not being read - * -ENODATA - no data available in the backing object for some or all of - * the pages - * 0 - dispatched a read on all pages - * - * end_io_func() will be called for each page read from the cache as it is - * finishes being read - * - * any pages for which a read is dispatched will be removed from pages and - * nr_pages - */ -int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, - struct address_space *mapping, - struct list_head *pages, - unsigned *nr_pages, - fscache_rw_complete_t end_io_func, - void *context, - gfp_t gfp) -{ - struct fscache_retrieval *op; - struct fscache_object *object; - bool wake_cookie = false; - int ret; - - _enter("%p,,%d,,,", cookie, *nr_pages); - - fscache_stat(&fscache_n_retrievals); - - if (hlist_empty(&cookie->backing_objects)) - goto nobufs; - - if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { - _leave(" = -ENOBUFS [invalidating]"); - return -ENOBUFS; - } - - ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); - ASSERTCMP(*nr_pages, >, 0); - ASSERT(!list_empty(pages)); - - if (fscache_wait_for_deferred_lookup(cookie) < 0) - return -ERESTARTSYS; - - op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context); - if (!op) - return -ENOMEM; - atomic_set(&op->n_pages, *nr_pages); - trace_fscache_page_op(cookie, NULL, &op->op, fscache_page_op_retr_multi); - - spin_lock(&cookie->lock); - - if (!fscache_cookie_enabled(cookie) || - hlist_empty(&cookie->backing_objects)) - goto nobufs_unlock; - object = hlist_entry(cookie->backing_objects.first, - struct fscache_object, cookie_link); - - __fscache_use_cookie(cookie); - atomic_inc(&object->n_reads); - __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); - - if (fscache_submit_op(object, &op->op) < 0) - goto nobufs_unlock_dec; - spin_unlock(&cookie->lock); - - fscache_stat(&fscache_n_retrieval_ops); - - /* we wait for the operation to become active, and then process it - * *here*, in this thread, and not in the thread pool */ - ret = fscache_wait_for_operation_activation( - object, &op->op, - __fscache_stat(&fscache_n_retrieval_op_waits), - __fscache_stat(&fscache_n_retrievals_object_dead)); - if (ret < 0) - goto error; - - /* ask the cache to honour the operation */ - if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { - fscache_stat(&fscache_n_cop_allocate_pages); - ret = object->cache->ops->allocate_pages( - op, pages, nr_pages, gfp); - fscache_stat_d(&fscache_n_cop_allocate_pages); - } else { - fscache_stat(&fscache_n_cop_read_or_alloc_pages); - ret = object->cache->ops->read_or_alloc_pages( - op, pages, nr_pages, gfp); - fscache_stat_d(&fscache_n_cop_read_or_alloc_pages); - } - -error: - if (ret == -ENOMEM) - fscache_stat(&fscache_n_retrievals_nomem); - else if (ret == -ERESTARTSYS) - fscache_stat(&fscache_n_retrievals_intr); - else if (ret == -ENODATA) - fscache_stat(&fscache_n_retrievals_nodata); - else if (ret < 0) - fscache_stat(&fscache_n_retrievals_nobufs); - else - fscache_stat(&fscache_n_retrievals_ok); - - fscache_put_retrieval(op); - _leave(" = %d", ret); - return ret; - -nobufs_unlock_dec: - atomic_dec(&object->n_reads); - wake_cookie = __fscache_unuse_cookie(cookie); -nobufs_unlock: - spin_unlock(&cookie->lock); - fscache_put_retrieval(op); - if (wake_cookie) - __fscache_wake_unused_cookie(cookie); -nobufs: - fscache_stat(&fscache_n_retrievals_nobufs); - _leave(" = -ENOBUFS"); - return -ENOBUFS; -} -EXPORT_SYMBOL(__fscache_read_or_alloc_pages); - -/* - * allocate a block in the cache on which to store a page - * - we return: - * -ENOMEM - out of memory, nothing done - * -ERESTARTSYS - interrupted - * -ENOBUFS - no backing object available in which to cache the block - * 0 - block allocated - */ -int __fscache_alloc_page(struct fscache_cookie *cookie, - struct page *page, - gfp_t gfp) -{ - struct fscache_retrieval *op; - struct fscache_object *object; - bool wake_cookie = false; - int ret; - - _enter("%p,%p,,,", cookie, page); - - fscache_stat(&fscache_n_allocs); - - if (hlist_empty(&cookie->backing_objects)) - goto nobufs; - - ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); - ASSERTCMP(page, !=, NULL); - - if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { - _leave(" = -ENOBUFS [invalidating]"); - return -ENOBUFS; - } - - if (fscache_wait_for_deferred_lookup(cookie) < 0) - return -ERESTARTSYS; - - op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL); - if (!op) - return -ENOMEM; - atomic_set(&op->n_pages, 1); - trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_alloc_one); - - spin_lock(&cookie->lock); - - if (!fscache_cookie_enabled(cookie) || - hlist_empty(&cookie->backing_objects)) - goto nobufs_unlock; - object = hlist_entry(cookie->backing_objects.first, - struct fscache_object, cookie_link); - - __fscache_use_cookie(cookie); - if (fscache_submit_op(object, &op->op) < 0) - goto nobufs_unlock_dec; - spin_unlock(&cookie->lock); - - fscache_stat(&fscache_n_alloc_ops); - - ret = fscache_wait_for_operation_activation( - object, &op->op, - __fscache_stat(&fscache_n_alloc_op_waits), - __fscache_stat(&fscache_n_allocs_object_dead)); - if (ret < 0) - goto error; - - /* ask the cache to honour the operation */ - fscache_stat(&fscache_n_cop_allocate_page); - ret = object->cache->ops->allocate_page(op, page, gfp); - fscache_stat_d(&fscache_n_cop_allocate_page); - -error: - if (ret == -ERESTARTSYS) - fscache_stat(&fscache_n_allocs_intr); - else if (ret < 0) - fscache_stat(&fscache_n_allocs_nobufs); - else - fscache_stat(&fscache_n_allocs_ok); - - fscache_put_retrieval(op); - _leave(" = %d", ret); - return ret; - -nobufs_unlock_dec: - wake_cookie = __fscache_unuse_cookie(cookie); -nobufs_unlock: - spin_unlock(&cookie->lock); - fscache_put_retrieval(op); - if (wake_cookie) - __fscache_wake_unused_cookie(cookie); -nobufs: - fscache_stat(&fscache_n_allocs_nobufs); - _leave(" = -ENOBUFS"); - return -ENOBUFS; -} -EXPORT_SYMBOL(__fscache_alloc_page); - -/* - * Unmark pages allocate in the readahead code path (via: - * fscache_readpages_or_alloc) after delegating to the base filesystem - */ -void __fscache_readpages_cancel(struct fscache_cookie *cookie, - struct list_head *pages) -{ - struct page *page; - - list_for_each_entry(page, pages, lru) { - if (PageFsCache(page)) - __fscache_uncache_page(cookie, page); - } -} -EXPORT_SYMBOL(__fscache_readpages_cancel); - -/* - * release a write op reference - */ -static void fscache_release_write_op(struct fscache_operation *_op) -{ - _enter("{OP%x}", _op->debug_id); -} - -/* - * perform the background storage of a page into the cache - */ -static void fscache_write_op(struct fscache_operation *_op) -{ - struct fscache_storage *op = - container_of(_op, struct fscache_storage, op); - struct fscache_object *object = op->op.object; - struct fscache_cookie *cookie; - struct page *page; - unsigned n; - void *results[1]; - int ret; - - _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); - -again: - spin_lock(&object->lock); - cookie = object->cookie; - - if (!fscache_object_is_active(object)) { - /* If we get here, then the on-disk cache object likely no - * longer exists, so we should just cancel this write - * operation. - */ - spin_unlock(&object->lock); - fscache_op_complete(&op->op, true); - _leave(" [inactive]"); - return; - } - - if (!cookie) { - /* If we get here, then the cookie belonging to the object was - * detached, probably by the cookie being withdrawn due to - * memory pressure, which means that the pages we might write - * to the cache from no longer exist - therefore, we can just - * cancel this write operation. - */ - spin_unlock(&object->lock); - fscache_op_complete(&op->op, true); - _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}", - _op->flags, _op->state, object->state->short_name, - object->flags); - return; - } - - spin_lock(&cookie->stores_lock); - - fscache_stat(&fscache_n_store_calls); - - /* find a page to store */ - results[0] = NULL; - page = NULL; - n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1, - FSCACHE_COOKIE_PENDING_TAG); - trace_fscache_gang_lookup(cookie, &op->op, results, n, op->store_limit); - if (n != 1) - goto superseded; - page = results[0]; - _debug("gang %d [%lx]", n, page->index); - - radix_tree_tag_set(&cookie->stores, page->index, - FSCACHE_COOKIE_STORING_TAG); - radix_tree_tag_clear(&cookie->stores, page->index, - FSCACHE_COOKIE_PENDING_TAG); - trace_fscache_page(cookie, page, fscache_page_radix_pend2store); - - spin_unlock(&cookie->stores_lock); - spin_unlock(&object->lock); - - if (page->index >= op->store_limit) - goto discard_page; - - fscache_stat(&fscache_n_store_pages); - fscache_stat(&fscache_n_cop_write_page); - ret = object->cache->ops->write_page(op, page); - fscache_stat_d(&fscache_n_cop_write_page); - trace_fscache_wrote_page(cookie, page, &op->op, ret); - fscache_end_page_write(object, page); - if (ret < 0) { - fscache_abort_object(object); - fscache_op_complete(&op->op, true); - } else { - fscache_enqueue_operation(&op->op); - } - - _leave(""); - return; - -discard_page: - fscache_stat(&fscache_n_store_pages_over_limit); - trace_fscache_wrote_page(cookie, page, &op->op, -ENOBUFS); - fscache_end_page_write(object, page); - goto again; - -superseded: - /* this writer is going away and there aren't any more things to - * write */ - _debug("cease"); - spin_unlock(&cookie->stores_lock); - clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); - spin_unlock(&object->lock); - fscache_op_complete(&op->op, false); - _leave(""); -} - -/* - * Clear the pages pending writing for invalidation - */ -void fscache_invalidate_writes(struct fscache_cookie *cookie) -{ - struct page *page; - void *results[16]; - int n, i; - - _enter(""); - - for (;;) { - spin_lock(&cookie->stores_lock); - n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, - ARRAY_SIZE(results), - FSCACHE_COOKIE_PENDING_TAG); - if (n == 0) { - spin_unlock(&cookie->stores_lock); - break; - } - - for (i = n - 1; i >= 0; i--) { - page = results[i]; - radix_tree_delete(&cookie->stores, page->index); - trace_fscache_page(cookie, page, fscache_page_radix_delete); - trace_fscache_page(cookie, page, fscache_page_inval); - } - - spin_unlock(&cookie->stores_lock); - - for (i = n - 1; i >= 0; i--) - put_page(results[i]); - } - - wake_up_bit(&cookie->flags, 0); - trace_fscache_wake_cookie(cookie); - - _leave(""); -} - -/* - * request a page be stored in the cache - * - returns: - * -ENOMEM - out of memory, nothing done - * -ENOBUFS - no backing object available in which to cache the page - * 0 - dispatched a write - it'll call end_io_func() when finished - * - * if the cookie still has a backing object at this point, that object can be - * in one of a few states with respect to storage processing: - * - * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is - * set) - * - * (a) no writes yet - * - * (b) writes deferred till post-creation (mark page for writing and - * return immediately) - * - * (2) negative lookup, object created, initial fill being made from netfs - * - * (a) fill point not yet reached this page (mark page for writing and - * return) - * - * (b) fill point passed this page (queue op to store this page) - * - * (3) object extant (queue op to store this page) - * - * any other state is invalid - */ -int __fscache_write_page(struct fscache_cookie *cookie, - struct page *page, - loff_t object_size, - gfp_t gfp) -{ - struct fscache_storage *op; - struct fscache_object *object; - bool wake_cookie = false; - int ret; - - _enter("%p,%x,", cookie, (u32) page->flags); - - ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); - ASSERT(PageFsCache(page)); - - fscache_stat(&fscache_n_stores); - - if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { - _leave(" = -ENOBUFS [invalidating]"); - return -ENOBUFS; - } - - op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY); - if (!op) - goto nomem; - - fscache_operation_init(cookie, &op->op, fscache_write_op, NULL, - fscache_release_write_op); - op->op.flags = FSCACHE_OP_ASYNC | - (1 << FSCACHE_OP_WAITING) | - (1 << FSCACHE_OP_UNUSE_COOKIE); - - ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM); - if (ret < 0) - goto nomem_free; - - trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_write_one); - - ret = -ENOBUFS; - spin_lock(&cookie->lock); - - if (!fscache_cookie_enabled(cookie) || - hlist_empty(&cookie->backing_objects)) - goto nobufs; - object = hlist_entry(cookie->backing_objects.first, - struct fscache_object, cookie_link); - if (test_bit(FSCACHE_IOERROR, &object->cache->flags)) - goto nobufs; - - trace_fscache_page(cookie, page, fscache_page_write); - - /* add the page to the pending-storage radix tree on the backing - * object */ - spin_lock(&object->lock); - - if (object->store_limit_l != object_size) - fscache_set_store_limit(object, object_size); - - spin_lock(&cookie->stores_lock); - - _debug("store limit %llx", (unsigned long long) object->store_limit); - - ret = radix_tree_insert(&cookie->stores, page->index, page); - if (ret < 0) { - if (ret == -EEXIST) - goto already_queued; - _debug("insert failed %d", ret); - goto nobufs_unlock_obj; - } - - trace_fscache_page(cookie, page, fscache_page_radix_insert); - radix_tree_tag_set(&cookie->stores, page->index, - FSCACHE_COOKIE_PENDING_TAG); - trace_fscache_page(cookie, page, fscache_page_radix_set_pend); - get_page(page); - - /* we only want one writer at a time, but we do need to queue new - * writers after exclusive ops */ - if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags)) - goto already_pending; - - spin_unlock(&cookie->stores_lock); - spin_unlock(&object->lock); - - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); - op->store_limit = object->store_limit; - - __fscache_use_cookie(cookie); - if (fscache_submit_op(object, &op->op) < 0) - goto submit_failed; - - spin_unlock(&cookie->lock); - radix_tree_preload_end(); - fscache_stat(&fscache_n_store_ops); - fscache_stat(&fscache_n_stores_ok); - - /* the work queue now carries its own ref on the object */ - fscache_put_operation(&op->op); - _leave(" = 0"); - return 0; - -already_queued: - fscache_stat(&fscache_n_stores_again); -already_pending: - spin_unlock(&cookie->stores_lock); - spin_unlock(&object->lock); - spin_unlock(&cookie->lock); - radix_tree_preload_end(); - fscache_put_operation(&op->op); - fscache_stat(&fscache_n_stores_ok); - _leave(" = 0"); - return 0; - -submit_failed: - spin_lock(&cookie->stores_lock); - radix_tree_delete(&cookie->stores, page->index); - trace_fscache_page(cookie, page, fscache_page_radix_delete); - spin_unlock(&cookie->stores_lock); - wake_cookie = __fscache_unuse_cookie(cookie); - put_page(page); - ret = -ENOBUFS; - goto nobufs; - -nobufs_unlock_obj: - spin_unlock(&cookie->stores_lock); - spin_unlock(&object->lock); -nobufs: - spin_unlock(&cookie->lock); - radix_tree_preload_end(); - fscache_put_operation(&op->op); - if (wake_cookie) - __fscache_wake_unused_cookie(cookie); - fscache_stat(&fscache_n_stores_nobufs); - _leave(" = -ENOBUFS"); - return -ENOBUFS; - -nomem_free: - fscache_put_operation(&op->op); -nomem: - fscache_stat(&fscache_n_stores_oom); - _leave(" = -ENOMEM"); - return -ENOMEM; -} -EXPORT_SYMBOL(__fscache_write_page); - -/* - * remove a page from the cache - */ -void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page) -{ - struct fscache_object *object; - - _enter(",%p", page); - - ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); - ASSERTCMP(page, !=, NULL); - - fscache_stat(&fscache_n_uncaches); - - /* cache withdrawal may beat us to it */ - if (!PageFsCache(page)) - goto done; - - trace_fscache_page(cookie, page, fscache_page_uncache); - - /* get the object */ - spin_lock(&cookie->lock); - - if (hlist_empty(&cookie->backing_objects)) { - ClearPageFsCache(page); - goto done_unlock; - } - - object = hlist_entry(cookie->backing_objects.first, - struct fscache_object, cookie_link); - - /* there might now be stuff on disk we could read */ - clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); - - /* only invoke the cache backend if we managed to mark the page - * uncached here; this deals with synchronisation vs withdrawal */ - if (TestClearPageFsCache(page) && - object->cache->ops->uncache_page) { - /* the cache backend releases the cookie lock */ - fscache_stat(&fscache_n_cop_uncache_page); - object->cache->ops->uncache_page(object, page); - fscache_stat_d(&fscache_n_cop_uncache_page); - goto done; - } - -done_unlock: - spin_unlock(&cookie->lock); -done: - _leave(""); -} -EXPORT_SYMBOL(__fscache_uncache_page); - -/** - * fscache_mark_page_cached - Mark a page as being cached - * @op: The retrieval op pages are being marked for - * @page: The page to be marked - * - * Mark a netfs page as being cached. After this is called, the netfs - * must call fscache_uncache_page() to remove the mark. - */ -void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page) -{ - struct fscache_cookie *cookie = op->op.object->cookie; - -#ifdef CONFIG_FSCACHE_STATS - atomic_inc(&fscache_n_marks); -#endif - - trace_fscache_page(cookie, page, fscache_page_cached); - - _debug("- mark %p{%lx}", page, page->index); - if (TestSetPageFsCache(page)) { - static bool once_only; - if (!once_only) { - once_only = true; - pr_warn("Cookie type %s marked page %lx multiple times\n", - cookie->def->name, page->index); - } - } - - if (cookie->def->mark_page_cached) - cookie->def->mark_page_cached(cookie->netfs_data, - op->mapping, page); -} -EXPORT_SYMBOL(fscache_mark_page_cached); - -/** - * fscache_mark_pages_cached - Mark pages as being cached - * @op: The retrieval op pages are being marked for - * @pagevec: The pages to be marked - * - * Mark a bunch of netfs pages as being cached. After this is called, - * the netfs must call fscache_uncache_page() to remove the mark. - */ -void fscache_mark_pages_cached(struct fscache_retrieval *op, - struct pagevec *pagevec) -{ - unsigned long loop; - - for (loop = 0; loop < pagevec->nr; loop++) - fscache_mark_page_cached(op, pagevec->pages[loop]); - - pagevec_reinit(pagevec); -} -EXPORT_SYMBOL(fscache_mark_pages_cached); - -/* - * Uncache all the pages in an inode that are marked PG_fscache, assuming them - * to be associated with the given cookie. - */ -void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, - struct inode *inode) -{ - struct address_space *mapping = inode->i_mapping; - struct pagevec pvec; - pgoff_t next; - int i; - - _enter("%p,%p", cookie, inode); - - if (!mapping || mapping->nrpages == 0) { - _leave(" [no pages]"); - return; - } - - pagevec_init(&pvec); - next = 0; - do { - if (!pagevec_lookup(&pvec, mapping, &next)) - break; - for (i = 0; i < pagevec_count(&pvec); i++) { - struct page *page = pvec.pages[i]; - if (PageFsCache(page)) { - __fscache_wait_on_page_write(cookie, page); - __fscache_uncache_page(cookie, page); - } - } - pagevec_release(&pvec); - cond_resched(); - } while (next); - - _leave(""); -} -EXPORT_SYMBOL(__fscache_uncache_all_inode_pages); diff --git a/fs/fscache/proc.c b/fs/fscache/proc.c index 061df8f61ffc..dc3b0e9c8cce 100644 --- a/fs/fscache/proc.c +++ b/fs/fscache/proc.c @@ -1,11 +1,11 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* FS-Cache statistics viewing interface * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ -#define FSCACHE_DEBUG_LEVEL OPERATION +#define FSCACHE_DEBUG_LEVEL CACHE #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> @@ -16,42 +16,32 @@ */ int __init fscache_proc_init(void) { - _enter(""); - if (!proc_mkdir("fs/fscache", NULL)) goto error_dir; + if (!proc_create_seq("fs/fscache/caches", S_IFREG | 0444, NULL, + &fscache_caches_seq_ops)) + goto error; + + if (!proc_create_seq("fs/fscache/volumes", S_IFREG | 0444, NULL, + &fscache_volumes_seq_ops)) + goto error; + if (!proc_create_seq("fs/fscache/cookies", S_IFREG | 0444, NULL, &fscache_cookies_seq_ops)) - goto error_cookies; + goto error; #ifdef CONFIG_FSCACHE_STATS if (!proc_create_single("fs/fscache/stats", S_IFREG | 0444, NULL, - fscache_stats_show)) - goto error_stats; + fscache_stats_show)) + goto error; #endif -#ifdef CONFIG_FSCACHE_OBJECT_LIST - if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL, - &fscache_objlist_proc_ops)) - goto error_objects; -#endif - - _leave(" = 0"); return 0; -#ifdef CONFIG_FSCACHE_OBJECT_LIST -error_objects: -#endif -#ifdef CONFIG_FSCACHE_STATS - remove_proc_entry("fs/fscache/stats", NULL); -error_stats: -#endif - remove_proc_entry("fs/fscache/cookies", NULL); -error_cookies: +error: remove_proc_entry("fs/fscache", NULL); error_dir: - _leave(" = -ENOMEM"); return -ENOMEM; } @@ -60,12 +50,5 @@ error_dir: */ void fscache_proc_cleanup(void) { -#ifdef CONFIG_FSCACHE_OBJECT_LIST - remove_proc_entry("fs/fscache/objects", NULL); -#endif -#ifdef CONFIG_FSCACHE_STATS - remove_proc_entry("fs/fscache/stats", NULL); -#endif - remove_proc_entry("fs/fscache/cookies", NULL); - remove_proc_entry("fs/fscache", NULL); + remove_proc_subtree("fs/fscache", NULL); } diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c index a7c3ed89a3e0..fc94e5e79f1c 100644 --- a/fs/fscache/stats.c +++ b/fs/fscache/stats.c @@ -1,12 +1,11 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* FS-Cache statistics * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ -#define FSCACHE_DEBUG_LEVEL THREAD -#include <linux/module.h> +#define FSCACHE_DEBUG_LEVEL CACHE #include <linux/proc_fs.h> #include <linux/seq_file.h> #include "internal.h" @@ -14,122 +13,41 @@ /* * operation counters */ -atomic_t fscache_n_op_pend; -atomic_t fscache_n_op_run; -atomic_t fscache_n_op_enqueue; -atomic_t fscache_n_op_deferred_release; -atomic_t fscache_n_op_initialised; -atomic_t fscache_n_op_release; -atomic_t fscache_n_op_gc; -atomic_t fscache_n_op_cancelled; -atomic_t fscache_n_op_rejected; - -atomic_t fscache_n_attr_changed; -atomic_t fscache_n_attr_changed_ok; -atomic_t fscache_n_attr_changed_nobufs; -atomic_t fscache_n_attr_changed_nomem; -atomic_t fscache_n_attr_changed_calls; - -atomic_t fscache_n_allocs; -atomic_t fscache_n_allocs_ok; -atomic_t fscache_n_allocs_wait; -atomic_t fscache_n_allocs_nobufs; -atomic_t fscache_n_allocs_intr; -atomic_t fscache_n_allocs_object_dead; -atomic_t fscache_n_alloc_ops; -atomic_t fscache_n_alloc_op_waits; - -atomic_t fscache_n_retrievals; -atomic_t fscache_n_retrievals_ok; -atomic_t fscache_n_retrievals_wait; -atomic_t fscache_n_retrievals_nodata; -atomic_t fscache_n_retrievals_nobufs; -atomic_t fscache_n_retrievals_intr; -atomic_t fscache_n_retrievals_nomem; -atomic_t fscache_n_retrievals_object_dead; -atomic_t fscache_n_retrieval_ops; -atomic_t fscache_n_retrieval_op_waits; - -atomic_t fscache_n_stores; -atomic_t fscache_n_stores_ok; -atomic_t fscache_n_stores_again; -atomic_t fscache_n_stores_nobufs; -atomic_t fscache_n_stores_oom; -atomic_t fscache_n_store_ops; -atomic_t fscache_n_store_calls; -atomic_t fscache_n_store_pages; -atomic_t fscache_n_store_radix_deletes; -atomic_t fscache_n_store_pages_over_limit; - -atomic_t fscache_n_store_vmscan_not_storing; -atomic_t fscache_n_store_vmscan_gone; -atomic_t fscache_n_store_vmscan_busy; -atomic_t fscache_n_store_vmscan_cancelled; -atomic_t fscache_n_store_vmscan_wait; - -atomic_t fscache_n_marks; -atomic_t fscache_n_uncaches; +atomic_t fscache_n_volumes; +atomic_t fscache_n_volumes_collision; +atomic_t fscache_n_volumes_nomem; +atomic_t fscache_n_cookies; +atomic_t fscache_n_cookies_lru; +atomic_t fscache_n_cookies_lru_expired; +atomic_t fscache_n_cookies_lru_removed; +atomic_t fscache_n_cookies_lru_dropped; atomic_t fscache_n_acquires; -atomic_t fscache_n_acquires_null; -atomic_t fscache_n_acquires_no_cache; atomic_t fscache_n_acquires_ok; -atomic_t fscache_n_acquires_nobufs; atomic_t fscache_n_acquires_oom; atomic_t fscache_n_invalidates; -atomic_t fscache_n_invalidates_run; atomic_t fscache_n_updates; -atomic_t fscache_n_updates_null; -atomic_t fscache_n_updates_run; +EXPORT_SYMBOL(fscache_n_updates); atomic_t fscache_n_relinquishes; -atomic_t fscache_n_relinquishes_null; -atomic_t fscache_n_relinquishes_waitcrt; atomic_t fscache_n_relinquishes_retire; - -atomic_t fscache_n_cookie_index; -atomic_t fscache_n_cookie_data; -atomic_t fscache_n_cookie_special; - -atomic_t fscache_n_object_alloc; -atomic_t fscache_n_object_no_alloc; -atomic_t fscache_n_object_lookups; -atomic_t fscache_n_object_lookups_negative; -atomic_t fscache_n_object_lookups_positive; -atomic_t fscache_n_object_lookups_timed_out; -atomic_t fscache_n_object_created; -atomic_t fscache_n_object_avail; -atomic_t fscache_n_object_dead; - -atomic_t fscache_n_checkaux_none; -atomic_t fscache_n_checkaux_okay; -atomic_t fscache_n_checkaux_update; -atomic_t fscache_n_checkaux_obsolete; - -atomic_t fscache_n_cop_alloc_object; -atomic_t fscache_n_cop_lookup_object; -atomic_t fscache_n_cop_lookup_complete; -atomic_t fscache_n_cop_grab_object; -atomic_t fscache_n_cop_invalidate_object; -atomic_t fscache_n_cop_update_object; -atomic_t fscache_n_cop_drop_object; -atomic_t fscache_n_cop_put_object; -atomic_t fscache_n_cop_sync_cache; -atomic_t fscache_n_cop_attr_changed; -atomic_t fscache_n_cop_read_or_alloc_page; -atomic_t fscache_n_cop_read_or_alloc_pages; -atomic_t fscache_n_cop_allocate_page; -atomic_t fscache_n_cop_allocate_pages; -atomic_t fscache_n_cop_write_page; -atomic_t fscache_n_cop_uncache_page; -atomic_t fscache_n_cop_dissociate_pages; - -atomic_t fscache_n_cache_no_space_reject; -atomic_t fscache_n_cache_stale_objects; -atomic_t fscache_n_cache_retired_objects; -atomic_t fscache_n_cache_culled_objects; +atomic_t fscache_n_relinquishes_dropped; + +atomic_t fscache_n_resizes; +atomic_t fscache_n_resizes_null; + +atomic_t fscache_n_read; +EXPORT_SYMBOL(fscache_n_read); +atomic_t fscache_n_write; +EXPORT_SYMBOL(fscache_n_write); +atomic_t fscache_n_no_write_space; +EXPORT_SYMBOL(fscache_n_no_write_space); +atomic_t fscache_n_no_create_space; +EXPORT_SYMBOL(fscache_n_no_create_space); +atomic_t fscache_n_culled; +EXPORT_SYMBOL(fscache_n_culled); /* * display the general statistics @@ -137,147 +55,48 @@ atomic_t fscache_n_cache_culled_objects; int fscache_stats_show(struct seq_file *m, void *v) { seq_puts(m, "FS-Cache statistics\n"); - - seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n", - atomic_read(&fscache_n_cookie_index), - atomic_read(&fscache_n_cookie_data), - atomic_read(&fscache_n_cookie_special)); - - seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n", - atomic_read(&fscache_n_object_alloc), - atomic_read(&fscache_n_object_no_alloc), - atomic_read(&fscache_n_object_avail), - atomic_read(&fscache_n_object_dead)); - seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n", - atomic_read(&fscache_n_checkaux_none), - atomic_read(&fscache_n_checkaux_okay), - atomic_read(&fscache_n_checkaux_update), - atomic_read(&fscache_n_checkaux_obsolete)); - - seq_printf(m, "Pages : mrk=%u unc=%u\n", - atomic_read(&fscache_n_marks), - atomic_read(&fscache_n_uncaches)); - - seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u" - " oom=%u\n", + seq_printf(m, "Cookies: n=%d v=%d vcol=%u voom=%u\n", + atomic_read(&fscache_n_cookies), + atomic_read(&fscache_n_volumes), + atomic_read(&fscache_n_volumes_collision), + atomic_read(&fscache_n_volumes_nomem) + ); + + seq_printf(m, "Acquire: n=%u ok=%u oom=%u\n", atomic_read(&fscache_n_acquires), - atomic_read(&fscache_n_acquires_null), - atomic_read(&fscache_n_acquires_no_cache), atomic_read(&fscache_n_acquires_ok), - atomic_read(&fscache_n_acquires_nobufs), atomic_read(&fscache_n_acquires_oom)); - seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n", - atomic_read(&fscache_n_object_lookups), - atomic_read(&fscache_n_object_lookups_negative), - atomic_read(&fscache_n_object_lookups_positive), - atomic_read(&fscache_n_object_created), - atomic_read(&fscache_n_object_lookups_timed_out)); + seq_printf(m, "LRU : n=%u exp=%u rmv=%u drp=%u at=%ld\n", + atomic_read(&fscache_n_cookies_lru), + atomic_read(&fscache_n_cookies_lru_expired), + atomic_read(&fscache_n_cookies_lru_removed), + atomic_read(&fscache_n_cookies_lru_dropped), + timer_pending(&fscache_cookie_lru_timer) ? + fscache_cookie_lru_timer.expires - jiffies : 0); - seq_printf(m, "Invals : n=%u run=%u\n", - atomic_read(&fscache_n_invalidates), - atomic_read(&fscache_n_invalidates_run)); + seq_printf(m, "Invals : n=%u\n", + atomic_read(&fscache_n_invalidates)); - seq_printf(m, "Updates: n=%u nul=%u run=%u\n", + seq_printf(m, "Updates: n=%u rsz=%u rsn=%u\n", atomic_read(&fscache_n_updates), - atomic_read(&fscache_n_updates_null), - atomic_read(&fscache_n_updates_run)); + atomic_read(&fscache_n_resizes), + atomic_read(&fscache_n_resizes_null)); - seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n", + seq_printf(m, "Relinqs: n=%u rtr=%u drop=%u\n", atomic_read(&fscache_n_relinquishes), - atomic_read(&fscache_n_relinquishes_null), - atomic_read(&fscache_n_relinquishes_waitcrt), - atomic_read(&fscache_n_relinquishes_retire)); - - seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n", - atomic_read(&fscache_n_attr_changed), - atomic_read(&fscache_n_attr_changed_ok), - atomic_read(&fscache_n_attr_changed_nobufs), - atomic_read(&fscache_n_attr_changed_nomem), - atomic_read(&fscache_n_attr_changed_calls)); - - seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n", - atomic_read(&fscache_n_allocs), - atomic_read(&fscache_n_allocs_ok), - atomic_read(&fscache_n_allocs_wait), - atomic_read(&fscache_n_allocs_nobufs), - atomic_read(&fscache_n_allocs_intr)); - seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n", - atomic_read(&fscache_n_alloc_ops), - atomic_read(&fscache_n_alloc_op_waits), - atomic_read(&fscache_n_allocs_object_dead)); - - seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u" - " int=%u oom=%u\n", - atomic_read(&fscache_n_retrievals), - atomic_read(&fscache_n_retrievals_ok), - atomic_read(&fscache_n_retrievals_wait), - atomic_read(&fscache_n_retrievals_nodata), - atomic_read(&fscache_n_retrievals_nobufs), - atomic_read(&fscache_n_retrievals_intr), - atomic_read(&fscache_n_retrievals_nomem)); - seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n", - atomic_read(&fscache_n_retrieval_ops), - atomic_read(&fscache_n_retrieval_op_waits), - atomic_read(&fscache_n_retrievals_object_dead)); - - seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n", - atomic_read(&fscache_n_stores), - atomic_read(&fscache_n_stores_ok), - atomic_read(&fscache_n_stores_again), - atomic_read(&fscache_n_stores_nobufs), - atomic_read(&fscache_n_stores_oom)); - seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n", - atomic_read(&fscache_n_store_ops), - atomic_read(&fscache_n_store_calls), - atomic_read(&fscache_n_store_pages), - atomic_read(&fscache_n_store_radix_deletes), - atomic_read(&fscache_n_store_pages_over_limit)); + atomic_read(&fscache_n_relinquishes_retire), + atomic_read(&fscache_n_relinquishes_dropped)); - seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n", - atomic_read(&fscache_n_store_vmscan_not_storing), - atomic_read(&fscache_n_store_vmscan_gone), - atomic_read(&fscache_n_store_vmscan_busy), - atomic_read(&fscache_n_store_vmscan_cancelled), - atomic_read(&fscache_n_store_vmscan_wait)); + seq_printf(m, "NoSpace: nwr=%u ncr=%u cull=%u\n", + atomic_read(&fscache_n_no_write_space), + atomic_read(&fscache_n_no_create_space), + atomic_read(&fscache_n_culled)); - seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n", - atomic_read(&fscache_n_op_pend), - atomic_read(&fscache_n_op_run), - atomic_read(&fscache_n_op_enqueue), - atomic_read(&fscache_n_op_cancelled), - atomic_read(&fscache_n_op_rejected)); - seq_printf(m, "Ops : ini=%u dfr=%u rel=%u gc=%u\n", - atomic_read(&fscache_n_op_initialised), - atomic_read(&fscache_n_op_deferred_release), - atomic_read(&fscache_n_op_release), - atomic_read(&fscache_n_op_gc)); + seq_printf(m, "IO : rd=%u wr=%u\n", + atomic_read(&fscache_n_read), + atomic_read(&fscache_n_write)); - seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n", - atomic_read(&fscache_n_cop_alloc_object), - atomic_read(&fscache_n_cop_lookup_object), - atomic_read(&fscache_n_cop_lookup_complete), - atomic_read(&fscache_n_cop_grab_object)); - seq_printf(m, "CacheOp: inv=%d upo=%d dro=%d pto=%d atc=%d syn=%d\n", - atomic_read(&fscache_n_cop_invalidate_object), - atomic_read(&fscache_n_cop_update_object), - atomic_read(&fscache_n_cop_drop_object), - atomic_read(&fscache_n_cop_put_object), - atomic_read(&fscache_n_cop_attr_changed), - atomic_read(&fscache_n_cop_sync_cache)); - seq_printf(m, "CacheOp: rap=%d ras=%d alp=%d als=%d wrp=%d ucp=%d dsp=%d\n", - atomic_read(&fscache_n_cop_read_or_alloc_page), - atomic_read(&fscache_n_cop_read_or_alloc_pages), - atomic_read(&fscache_n_cop_allocate_page), - atomic_read(&fscache_n_cop_allocate_pages), - atomic_read(&fscache_n_cop_write_page), - atomic_read(&fscache_n_cop_uncache_page), - atomic_read(&fscache_n_cop_dissociate_pages)); - seq_printf(m, "CacheEv: nsp=%d stl=%d rtr=%d cul=%d\n", - atomic_read(&fscache_n_cache_no_space_reject), - atomic_read(&fscache_n_cache_stale_objects), - atomic_read(&fscache_n_cache_retired_objects), - atomic_read(&fscache_n_cache_culled_objects)); netfs_stats_show(m); return 0; } diff --git a/fs/fscache/volume.c b/fs/fscache/volume.c new file mode 100644 index 000000000000..a57c6cbee858 --- /dev/null +++ b/fs/fscache/volume.c @@ -0,0 +1,517 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Volume-level cache cookie handling. + * + * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define FSCACHE_DEBUG_LEVEL COOKIE +#include <linux/export.h> +#include <linux/slab.h> +#include "internal.h" + +#define fscache_volume_hash_shift 10 +static struct hlist_bl_head fscache_volume_hash[1 << fscache_volume_hash_shift]; +static atomic_t fscache_volume_debug_id; +static LIST_HEAD(fscache_volumes); + +static void fscache_create_volume_work(struct work_struct *work); + +struct fscache_volume *fscache_get_volume(struct fscache_volume *volume, + enum fscache_volume_trace where) +{ + int ref; + + __refcount_inc(&volume->ref, &ref); + trace_fscache_volume(volume->debug_id, ref + 1, where); + return volume; +} + +static void fscache_see_volume(struct fscache_volume *volume, + enum fscache_volume_trace where) +{ + int ref = refcount_read(&volume->ref); + + trace_fscache_volume(volume->debug_id, ref, where); +} + +/* + * Pin the cache behind a volume so that we can access it. + */ +static void __fscache_begin_volume_access(struct fscache_volume *volume, + struct fscache_cookie *cookie, + enum fscache_access_trace why) +{ + int n_accesses; + + n_accesses = atomic_inc_return(&volume->n_accesses); + smp_mb__after_atomic(); + trace_fscache_access_volume(volume->debug_id, cookie ? cookie->debug_id : 0, + refcount_read(&volume->ref), + n_accesses, why); +} + +/** + * fscache_begin_volume_access - Pin a cache so a volume can be accessed + * @volume: The volume cookie + * @cookie: A datafile cookie for a tracing reference (or NULL) + * @why: An indication of the circumstances of the access for tracing + * + * Attempt to pin the cache to prevent it from going away whilst we're + * accessing a volume and returns true if successful. This works as follows: + * + * (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE), + * then we return false to indicate access was not permitted. + * + * (2) If the cache tests as live, then we increment the volume's n_accesses + * count and then recheck the cache liveness, ending the access if it + * ceased to be live. + * + * (3) When we end the access, we decrement the volume's n_accesses and wake + * up the any waiters if it reaches 0. + * + * (4) Whilst the cache is caching, the volume's n_accesses is kept + * artificially incremented to prevent wakeups from happening. + * + * (5) When the cache is taken offline, the state is changed to prevent new + * accesses, the volume's n_accesses is decremented and we wait for it to + * become 0. + * + * The datafile @cookie and the @why indicator are merely provided for tracing + * purposes. + */ +bool fscache_begin_volume_access(struct fscache_volume *volume, + struct fscache_cookie *cookie, + enum fscache_access_trace why) +{ + if (!fscache_cache_is_live(volume->cache)) + return false; + __fscache_begin_volume_access(volume, cookie, why); + if (!fscache_cache_is_live(volume->cache)) { + fscache_end_volume_access(volume, cookie, fscache_access_unlive); + return false; + } + return true; +} + +/** + * fscache_end_volume_access - Unpin a cache at the end of an access. + * @volume: The volume cookie + * @cookie: A datafile cookie for a tracing reference (or NULL) + * @why: An indication of the circumstances of the access for tracing + * + * Unpin a cache volume after we've accessed it. The datafile @cookie and the + * @why indicator are merely provided for tracing purposes. + */ +void fscache_end_volume_access(struct fscache_volume *volume, + struct fscache_cookie *cookie, + enum fscache_access_trace why) +{ + int n_accesses; + + smp_mb__before_atomic(); + n_accesses = atomic_dec_return(&volume->n_accesses); + trace_fscache_access_volume(volume->debug_id, cookie ? cookie->debug_id : 0, + refcount_read(&volume->ref), + n_accesses, why); + if (n_accesses == 0) + wake_up_var(&volume->n_accesses); +} +EXPORT_SYMBOL(fscache_end_volume_access); + +static bool fscache_volume_same(const struct fscache_volume *a, + const struct fscache_volume *b) +{ + size_t klen; + + if (a->key_hash != b->key_hash || + a->cache != b->cache || + a->key[0] != b->key[0]) + return false; + + klen = round_up(a->key[0] + 1, sizeof(__le32)); + return memcmp(a->key, b->key, klen) == 0; +} + +static bool fscache_is_acquire_pending(struct fscache_volume *volume) +{ + return test_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &volume->flags); +} + +static void fscache_wait_on_volume_collision(struct fscache_volume *candidate, + unsigned int collidee_debug_id) +{ + wait_var_event_timeout(&candidate->flags, + fscache_is_acquire_pending(candidate), 20 * HZ); + if (!fscache_is_acquire_pending(candidate)) { + pr_notice("Potential volume collision new=%08x old=%08x", + candidate->debug_id, collidee_debug_id); + fscache_stat(&fscache_n_volumes_collision); + wait_var_event(&candidate->flags, fscache_is_acquire_pending(candidate)); + } +} + +/* + * Attempt to insert the new volume into the hash. If there's a collision, we + * wait for the old volume to complete if it's being relinquished and an error + * otherwise. + */ +static bool fscache_hash_volume(struct fscache_volume *candidate) +{ + struct fscache_volume *cursor; + struct hlist_bl_head *h; + struct hlist_bl_node *p; + unsigned int bucket, collidee_debug_id = 0; + + bucket = candidate->key_hash & (ARRAY_SIZE(fscache_volume_hash) - 1); + h = &fscache_volume_hash[bucket]; + + hlist_bl_lock(h); + hlist_bl_for_each_entry(cursor, p, h, hash_link) { + if (fscache_volume_same(candidate, cursor)) { + if (!test_bit(FSCACHE_VOLUME_RELINQUISHED, &cursor->flags)) + goto collision; + fscache_see_volume(cursor, fscache_volume_get_hash_collision); + set_bit(FSCACHE_VOLUME_COLLIDED_WITH, &cursor->flags); + set_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &candidate->flags); + collidee_debug_id = cursor->debug_id; + break; + } + } + + hlist_bl_add_head(&candidate->hash_link, h); + hlist_bl_unlock(h); + + if (test_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &candidate->flags)) + fscache_wait_on_volume_collision(candidate, collidee_debug_id); + return true; + +collision: + fscache_see_volume(cursor, fscache_volume_collision); + hlist_bl_unlock(h); + return false; +} + +/* + * Allocate and initialise a volume representation cookie. + */ +static struct fscache_volume *fscache_alloc_volume(const char *volume_key, + const char *cache_name, + const void *coherency_data, + size_t coherency_len) +{ + struct fscache_volume *volume; + struct fscache_cache *cache; + size_t klen, hlen; + char *key; + + if (!coherency_data) + coherency_len = 0; + + cache = fscache_lookup_cache(cache_name, false); + if (IS_ERR(cache)) + return NULL; + + volume = kzalloc(struct_size(volume, coherency, coherency_len), + GFP_KERNEL); + if (!volume) + goto err_cache; + + volume->cache = cache; + volume->coherency_len = coherency_len; + if (coherency_data) + memcpy(volume->coherency, coherency_data, coherency_len); + INIT_LIST_HEAD(&volume->proc_link); + INIT_WORK(&volume->work, fscache_create_volume_work); + refcount_set(&volume->ref, 1); + spin_lock_init(&volume->lock); + + /* Stick the length on the front of the key and pad it out to make + * hashing easier. + */ + klen = strlen(volume_key); + hlen = round_up(1 + klen + 1, sizeof(__le32)); + key = kzalloc(hlen, GFP_KERNEL); + if (!key) + goto err_vol; + key[0] = klen; + memcpy(key + 1, volume_key, klen); + + volume->key = key; + volume->key_hash = fscache_hash(0, key, hlen); + + volume->debug_id = atomic_inc_return(&fscache_volume_debug_id); + down_write(&fscache_addremove_sem); + atomic_inc(&cache->n_volumes); + list_add_tail(&volume->proc_link, &fscache_volumes); + fscache_see_volume(volume, fscache_volume_new_acquire); + fscache_stat(&fscache_n_volumes); + up_write(&fscache_addremove_sem); + _leave(" = v=%x", volume->debug_id); + return volume; + +err_vol: + kfree(volume); +err_cache: + fscache_put_cache(cache, fscache_cache_put_alloc_volume); + fscache_stat(&fscache_n_volumes_nomem); + return NULL; +} + +/* + * Create a volume's representation on disk. Have a volume ref and a cache + * access we have to release. + */ +static void fscache_create_volume_work(struct work_struct *work) +{ + const struct fscache_cache_ops *ops; + struct fscache_volume *volume = + container_of(work, struct fscache_volume, work); + + fscache_see_volume(volume, fscache_volume_see_create_work); + + ops = volume->cache->ops; + if (ops->acquire_volume) + ops->acquire_volume(volume); + fscache_end_cache_access(volume->cache, + fscache_access_acquire_volume_end); + + clear_bit_unlock(FSCACHE_VOLUME_CREATING, &volume->flags); + wake_up_bit(&volume->flags, FSCACHE_VOLUME_CREATING); + fscache_put_volume(volume, fscache_volume_put_create_work); +} + +/* + * Dispatch a worker thread to create a volume's representation on disk. + */ +void fscache_create_volume(struct fscache_volume *volume, bool wait) +{ + if (test_and_set_bit(FSCACHE_VOLUME_CREATING, &volume->flags)) + goto maybe_wait; + if (volume->cache_priv) + goto no_wait; /* We raced */ + if (!fscache_begin_cache_access(volume->cache, + fscache_access_acquire_volume)) + goto no_wait; + + fscache_get_volume(volume, fscache_volume_get_create_work); + if (!schedule_work(&volume->work)) + fscache_put_volume(volume, fscache_volume_put_create_work); + +maybe_wait: + if (wait) { + fscache_see_volume(volume, fscache_volume_wait_create_work); + wait_on_bit(&volume->flags, FSCACHE_VOLUME_CREATING, + TASK_UNINTERRUPTIBLE); + } + return; +no_wait: + clear_bit_unlock(FSCACHE_VOLUME_CREATING, &volume->flags); + wake_up_bit(&volume->flags, FSCACHE_VOLUME_CREATING); +} + +/* + * Acquire a volume representation cookie and link it to a (proposed) cache. + */ +struct fscache_volume *__fscache_acquire_volume(const char *volume_key, + const char *cache_name, + const void *coherency_data, + size_t coherency_len) +{ + struct fscache_volume *volume; + + volume = fscache_alloc_volume(volume_key, cache_name, + coherency_data, coherency_len); + if (!volume) + return ERR_PTR(-ENOMEM); + + if (!fscache_hash_volume(volume)) { + fscache_put_volume(volume, fscache_volume_put_hash_collision); + return ERR_PTR(-EBUSY); + } + + fscache_create_volume(volume, false); + return volume; +} +EXPORT_SYMBOL(__fscache_acquire_volume); + +static void fscache_wake_pending_volume(struct fscache_volume *volume, + struct hlist_bl_head *h) +{ + struct fscache_volume *cursor; + struct hlist_bl_node *p; + + hlist_bl_for_each_entry(cursor, p, h, hash_link) { + if (fscache_volume_same(cursor, volume)) { + fscache_see_volume(cursor, fscache_volume_see_hash_wake); + clear_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &cursor->flags); + wake_up_bit(&cursor->flags, FSCACHE_VOLUME_ACQUIRE_PENDING); + return; + } + } +} + +/* + * Remove a volume cookie from the hash table. + */ +static void fscache_unhash_volume(struct fscache_volume *volume) +{ + struct hlist_bl_head *h; + unsigned int bucket; + + bucket = volume->key_hash & (ARRAY_SIZE(fscache_volume_hash) - 1); + h = &fscache_volume_hash[bucket]; + + hlist_bl_lock(h); + hlist_bl_del(&volume->hash_link); + if (test_bit(FSCACHE_VOLUME_COLLIDED_WITH, &volume->flags)) + fscache_wake_pending_volume(volume, h); + hlist_bl_unlock(h); +} + +/* + * Drop a cache's volume attachments. + */ +static void fscache_free_volume(struct fscache_volume *volume) +{ + struct fscache_cache *cache = volume->cache; + + if (volume->cache_priv) { + __fscache_begin_volume_access(volume, NULL, + fscache_access_relinquish_volume); + if (volume->cache_priv) + cache->ops->free_volume(volume); + fscache_end_volume_access(volume, NULL, + fscache_access_relinquish_volume_end); + } + + down_write(&fscache_addremove_sem); + list_del_init(&volume->proc_link); + atomic_dec(&volume->cache->n_volumes); + up_write(&fscache_addremove_sem); + + if (!hlist_bl_unhashed(&volume->hash_link)) + fscache_unhash_volume(volume); + + trace_fscache_volume(volume->debug_id, 0, fscache_volume_free); + kfree(volume->key); + kfree(volume); + fscache_stat_d(&fscache_n_volumes); + fscache_put_cache(cache, fscache_cache_put_volume); +} + +/* + * Drop a reference to a volume cookie. + */ +void fscache_put_volume(struct fscache_volume *volume, + enum fscache_volume_trace where) +{ + if (volume) { + unsigned int debug_id = volume->debug_id; + bool zero; + int ref; + + zero = __refcount_dec_and_test(&volume->ref, &ref); + trace_fscache_volume(debug_id, ref - 1, where); + if (zero) + fscache_free_volume(volume); + } +} + +/* + * Relinquish a volume representation cookie. + */ +void __fscache_relinquish_volume(struct fscache_volume *volume, + const void *coherency_data, + bool invalidate) +{ + if (WARN_ON(test_and_set_bit(FSCACHE_VOLUME_RELINQUISHED, &volume->flags))) + return; + + if (invalidate) { + set_bit(FSCACHE_VOLUME_INVALIDATE, &volume->flags); + } else if (coherency_data) { + memcpy(volume->coherency, coherency_data, volume->coherency_len); + } + + fscache_put_volume(volume, fscache_volume_put_relinquish); +} +EXPORT_SYMBOL(__fscache_relinquish_volume); + +/** + * fscache_withdraw_volume - Withdraw a volume from being cached + * @volume: Volume cookie + * + * Withdraw a cache volume from service, waiting for all accesses to complete + * before returning. + */ +void fscache_withdraw_volume(struct fscache_volume *volume) +{ + int n_accesses; + + _debug("withdraw V=%x", volume->debug_id); + + /* Allow wakeups on dec-to-0 */ + n_accesses = atomic_dec_return(&volume->n_accesses); + trace_fscache_access_volume(volume->debug_id, 0, + refcount_read(&volume->ref), + n_accesses, fscache_access_cache_unpin); + + wait_var_event(&volume->n_accesses, + atomic_read(&volume->n_accesses) == 0); +} +EXPORT_SYMBOL(fscache_withdraw_volume); + +#ifdef CONFIG_PROC_FS +/* + * Generate a list of volumes in /proc/fs/fscache/volumes + */ +static int fscache_volumes_seq_show(struct seq_file *m, void *v) +{ + struct fscache_volume *volume; + + if (v == &fscache_volumes) { + seq_puts(m, + "VOLUME REF nCOOK ACC FL CACHE KEY\n" + "======== ===== ===== === == =============== ================\n"); + return 0; + } + + volume = list_entry(v, struct fscache_volume, proc_link); + seq_printf(m, + "%08x %5d %5d %3d %02lx %-15.15s %s\n", + volume->debug_id, + refcount_read(&volume->ref), + atomic_read(&volume->n_cookies), + atomic_read(&volume->n_accesses), + volume->flags, + volume->cache->name ?: "-", + volume->key + 1); + return 0; +} + +static void *fscache_volumes_seq_start(struct seq_file *m, loff_t *_pos) + __acquires(&fscache_addremove_sem) +{ + down_read(&fscache_addremove_sem); + return seq_list_start_head(&fscache_volumes, *_pos); +} + +static void *fscache_volumes_seq_next(struct seq_file *m, void *v, loff_t *_pos) +{ + return seq_list_next(v, &fscache_volumes, _pos); +} + +static void fscache_volumes_seq_stop(struct seq_file *m, void *v) + __releases(&fscache_addremove_sem) +{ + up_read(&fscache_addremove_sem); +} + +const struct seq_operations fscache_volumes_seq_ops = { + .start = fscache_volumes_seq_start, + .next = fscache_volumes_seq_next, + .stop = fscache_volumes_seq_stop, + .show = fscache_volumes_seq_show, +}; +#endif /* CONFIG_PROC_FS */ |