diff options
-rw-r--r-- | rust/kernel/sync/lock.rs | 29 | ||||
-rw-r--r-- | rust/kernel/sync/lock/spinlock.rs | 3 |
2 files changed, 30 insertions, 2 deletions
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs index 46e3328b67a6..802465d1fa54 100644 --- a/rust/kernel/sync/lock.rs +++ b/rust/kernel/sync/lock.rs @@ -6,7 +6,7 @@ //! spinlocks, raw spinlocks) to be provided with minimal effort. use super::LockClassKey; -use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque}; +use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque, types::ScopeGuard}; use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned}; use macros::pin_data; @@ -22,6 +22,8 @@ pub mod spinlock; /// /// - Implementers must ensure that only one thread/CPU may access the protected data once the lock /// is owned, that is, between calls to `lock` and `unlock`. +/// - Implementers must also ensure that `relock` uses the same locking method as the original +/// lock operation. pub unsafe trait Backend { /// The state required by the lock. type State; @@ -55,6 +57,17 @@ pub unsafe trait Backend { /// /// It must only be called by the current owner of the lock. unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState); + + /// Reacquires the lock, making the caller its owner. + /// + /// # Safety + /// + /// Callers must ensure that `guard_state` comes from a previous call to [`Backend::lock`] (or + /// variant) that has been unlocked with [`Backend::unlock`] and will be relocked now. + unsafe fn relock(ptr: *mut Self::State, guard_state: &mut Self::GuardState) { + // SAFETY: The safety requirements ensure that the lock is initialised. + *guard_state = unsafe { Self::lock(ptr) }; + } } /// A mutual exclusion primitive. @@ -126,6 +139,20 @@ pub struct Guard<'a, T: ?Sized, B: Backend> { // SAFETY: `Guard` is sync when the data protected by the lock is also sync. unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {} +impl<T: ?Sized, B: Backend> Guard<'_, T, B> { + #[allow(dead_code)] + pub(crate) fn do_unlocked(&mut self, cb: impl FnOnce()) { + // SAFETY: The caller owns the lock, so it is safe to unlock it. + unsafe { B::unlock(self.lock.state.get(), &self.state) }; + + // SAFETY: The lock was just unlocked above and is being relocked now. + let _relock = + ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) }); + + cb(); + } +} + impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> { type Target = T; diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs index a52d20fc9755..979b56464a4e 100644 --- a/rust/kernel/sync/lock/spinlock.rs +++ b/rust/kernel/sync/lock/spinlock.rs @@ -87,7 +87,8 @@ pub type SpinLock<T> = super::Lock<T, SpinLockBackend>; /// A kernel `spinlock_t` lock backend. pub struct SpinLockBackend; -// SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion. +// SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion. `relock` uses the +// default implementation that always calls the same locking method. unsafe impl super::Backend for SpinLockBackend { type State = bindings::spinlock_t; type GuardState = (); |