summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2024-06-29 21:02:17 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2024-07-01 22:56:28 -0400
commita2d23f3d916bf9abd77944882cba131af1085bcc (patch)
tree0aa90255a9354617ddf44bb6ab828210ea283253 /fs
parentb5cbb42dc59f519fa3cf49b9afbd5ee4805be01b (diff)
bcachefs: io clock: run timer fns under clock lock
We don't have a way to flush a timer that's executing the callback, and this is simple and limited enough in scope that we can just use the lock instead. Needed for the next patch that adds direct wakeups from the allocator to copygc, where we're now more frequently calling io_timer_del() on an expiring timer. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs')
-rw-r--r--fs/bcachefs/clock.c7
1 files changed, 2 insertions, 5 deletions
diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c
index 363644451106..0f40b585ce2b 100644
--- a/fs/bcachefs/clock.c
+++ b/fs/bcachefs/clock.c
@@ -132,14 +132,9 @@ static struct io_timer *get_expired_timer(struct io_clock *clock,
{
struct io_timer *ret = NULL;
- spin_lock(&clock->timer_lock);
-
if (clock->timers.used &&
time_after_eq(now, clock->timers.data[0]->expire))
heap_pop(&clock->timers, ret, io_timer_cmp, NULL);
-
- spin_unlock(&clock->timer_lock);
-
return ret;
}
@@ -148,8 +143,10 @@ void __bch2_increment_clock(struct io_clock *clock, unsigned sectors)
struct io_timer *timer;
unsigned long now = atomic64_add_return(sectors, &clock->now);
+ spin_lock(&clock->timer_lock);
while ((timer = get_expired_timer(clock, now)))
timer->fn(timer);
+ spin_unlock(&clock->timer_lock);
}
void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)