summaryrefslogtreecommitdiff
path: root/rust/kernel/sync
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-03-11 12:31:28 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-03-11 12:31:28 -0700
commit8ede842f669b6f78812349bbef4d1efd0fbdafce (patch)
tree40ddd87520e029396801e7ca068f638ef3e3a2b5 /rust/kernel/sync
parent5a2a15cd7f91c4c065a8acaa36afc9fcdcdd4dcd (diff)
parent768409cff6cc89fe1194da880537a09857b6e4db (diff)
Merge tag 'rust-6.9' of https://github.com/Rust-for-Linux/linux
Pull Rust updates from Miguel Ojeda: "Another routine one in terms of features. We got two version upgrades this time, but in terms of lines, 'alloc' changes are not very large. Toolchain and infrastructure: - Upgrade to Rust 1.76.0 This time around, due to how the kernel and Rust schedules have aligned, there are two upgrades in fact. These allow us to remove two more unstable features ('const_maybe_uninit_zeroed' and 'ptr_metadata') from the list, among other improvements - Mark 'rustc' (and others) invocations as recursive, which fixes a new warning and prepares us for the future in case we eventually take advantage of the Make jobserver 'kernel' crate: - Add the 'container_of!' macro - Stop using the unstable 'ptr_metadata' feature by employing the now stable 'byte_sub' method to implement 'Arc::from_raw()' - Add the 'time' module with a 'msecs_to_jiffies()' conversion function to begin with, to be used by Rust Binder - Add 'notify_sync()' and 'wait_interruptible_timeout()' methods to 'CondVar', to be used by Rust Binder - Update integer types for 'CondVar' - Rename 'wait_list' field to 'wait_queue_head' in 'CondVar' - Implement 'Display' and 'Debug' for 'BStr' - Add the 'try_from_foreign()' method to the 'ForeignOwnable' trait - Add reexports for macros so that they can be used from the right module (in addition to the root) - A series of code documentation improvements, including adding intra-doc links, consistency improvements, typo fixes... 'macros' crate: - Place generated 'init_module()' function in '.init.text' Documentation: - Add documentation on Rust doctests and how they work" * tag 'rust-6.9' of https://github.com/Rust-for-Linux/linux: (29 commits) rust: upgrade to Rust 1.76.0 kbuild: mark `rustc` (and others) invocations as recursive rust: add `container_of!` macro rust: str: implement `Display` and `Debug` for `BStr` rust: module: place generated init_module() function in .init.text rust: types: add `try_from_foreign()` method docs: rust: Add description of Rust documentation test as KUnit ones docs: rust: Move testing to a separate page rust: kernel: stop using ptr_metadata feature rust: kernel: add reexports for macros rust: locked_by: shorten doclink preview rust: kernel: remove unneeded doclink targets rust: kernel: add doclinks rust: kernel: add blank lines in front of code blocks rust: kernel: mark code fragments in docs with backticks rust: kernel: unify spelling of refcount in docs rust: str: move SAFETY comment in front of unsafe block rust: str: use `NUL` instead of 0 in doc comments rust: kernel: add srctree-relative doclinks rust: ioctl: end top-level module docs with full stop ...
Diffstat (limited to 'rust/kernel/sync')
-rw-r--r--rust/kernel/sync/arc.rs30
-rw-r--r--rust/kernel/sync/condvar.rs110
-rw-r--r--rust/kernel/sync/lock.rs19
-rw-r--r--rust/kernel/sync/lock/mutex.rs3
-rw-r--r--rust/kernel/sync/lock/spinlock.rs5
-rw-r--r--rust/kernel/sync/locked_by.rs7
6 files changed, 125 insertions, 49 deletions
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index 77cdbcf7bd2e..7d4c4bf58388 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -30,7 +30,7 @@ use core::{
mem::{ManuallyDrop, MaybeUninit},
ops::{Deref, DerefMut},
pin::Pin,
- ptr::{NonNull, Pointee},
+ ptr::NonNull,
};
use macros::pin_data;
@@ -56,7 +56,7 @@ mod std_vendor;
/// b: u32,
/// }
///
-/// // Create a ref-counted instance of `Example`.
+/// // Create a refcounted instance of `Example`.
/// let obj = Arc::try_new(Example { a: 10, b: 20 })?;
///
/// // Get a new pointer to `obj` and increment the refcount.
@@ -239,22 +239,20 @@ impl<T: ?Sized> Arc<T> {
// binary, so its layout is not so large that it can trigger arithmetic overflow.
let val_offset = unsafe { refcount_layout.extend(val_layout).unwrap_unchecked().1 };
- let metadata: <T as Pointee>::Metadata = core::ptr::metadata(ptr);
- // SAFETY: The metadata of `T` and `ArcInner<T>` is the same because `ArcInner` is a struct
- // with `T` as its last field.
+ // Pointer casts leave the metadata unchanged. This is okay because the metadata of `T` and
+ // `ArcInner<T>` is the same since `ArcInner` is a struct with `T` as its last field.
//
// This is documented at:
// <https://doc.rust-lang.org/std/ptr/trait.Pointee.html>.
- let metadata: <ArcInner<T> as Pointee>::Metadata =
- unsafe { core::mem::transmute_copy(&metadata) };
+ let ptr = ptr as *const ArcInner<T>;
+
// SAFETY: The pointer is in-bounds of an allocation both before and after offsetting the
// pointer, since it originates from a previous call to `Arc::into_raw` and is still valid.
- let ptr = unsafe { (ptr as *mut u8).sub(val_offset) as *mut () };
- let ptr = core::ptr::from_raw_parts_mut(ptr, metadata);
+ let ptr = unsafe { ptr.byte_sub(val_offset) };
// SAFETY: By the safety requirements we know that `ptr` came from `Arc::into_raw`, so the
// reference count held then will be owned by the new `Arc` object.
- unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) }
+ unsafe { Self::from_inner(NonNull::new_unchecked(ptr.cast_mut())) }
}
/// Returns an [`ArcBorrow`] from the given [`Arc`].
@@ -365,12 +363,12 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> {
/// A borrowed reference to an [`Arc`] instance.
///
/// For cases when one doesn't ever need to increment the refcount on the allocation, it is simpler
-/// to use just `&T`, which we can trivially get from an `Arc<T>` instance.
+/// to use just `&T`, which we can trivially get from an [`Arc<T>`] instance.
///
/// However, when one may need to increment the refcount, it is preferable to use an `ArcBorrow<T>`
/// over `&Arc<T>` because the latter results in a double-indirection: a pointer (shared reference)
-/// to a pointer (`Arc<T>`) to the object (`T`). An [`ArcBorrow`] eliminates this double
-/// indirection while still allowing one to increment the refcount and getting an `Arc<T>` when/if
+/// to a pointer ([`Arc<T>`]) to the object (`T`). An [`ArcBorrow`] eliminates this double
+/// indirection while still allowing one to increment the refcount and getting an [`Arc<T>`] when/if
/// needed.
///
/// # Invariants
@@ -510,7 +508,7 @@ impl<T: ?Sized> Deref for ArcBorrow<'_, T> {
/// # test().unwrap();
/// ```
///
-/// In the following example we first allocate memory for a ref-counted `Example` but we don't
+/// In the following example we first allocate memory for a refcounted `Example` but we don't
/// initialise it on allocation. We do initialise it later with a call to [`UniqueArc::write`],
/// followed by a conversion to `Arc<Example>`. This is particularly useful when allocation happens
/// in one context (e.g., sleepable) and initialisation in another (e.g., atomic):
@@ -560,7 +558,7 @@ impl<T> UniqueArc<T> {
/// Tries to allocate a new [`UniqueArc`] instance.
pub fn try_new(value: T) -> Result<Self, AllocError> {
Ok(Self {
- // INVARIANT: The newly-created object has a ref-count of 1.
+ // INVARIANT: The newly-created object has a refcount of 1.
inner: Arc::try_new(value)?,
})
}
@@ -574,7 +572,7 @@ impl<T> UniqueArc<T> {
data <- init::uninit::<T, AllocError>(),
}? AllocError))?;
Ok(UniqueArc {
- // INVARIANT: The newly-created object has a ref-count of 1.
+ // INVARIANT: The newly-created object has a refcount of 1.
// SAFETY: The pointer from the `Box` is valid.
inner: unsafe { Arc::from_inner(Box::leak(inner).into()) },
})
diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs
index f65e19d5a37c..0c3671caffeb 100644
--- a/rust/kernel/sync/condvar.rs
+++ b/rust/kernel/sync/condvar.rs
@@ -6,8 +6,18 @@
//! variable.
use super::{lock::Backend, lock::Guard, LockClassKey};
-use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque};
+use crate::{
+ bindings,
+ init::PinInit,
+ pin_init,
+ str::CStr,
+ task::{MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE, TASK_NORMAL, TASK_UNINTERRUPTIBLE},
+ time::Jiffies,
+ types::Opaque,
+};
+use core::ffi::{c_int, c_long};
use core::marker::PhantomPinned;
+use core::ptr;
use macros::pin_data;
/// Creates a [`CondVar`] initialiser with the given name and a newly-created lock class.
@@ -17,6 +27,7 @@ macro_rules! new_condvar {
$crate::sync::CondVar::new($crate::optional_name!($($name)?), $crate::static_lock_class!())
};
}
+pub use new_condvar;
/// A conditional variable.
///
@@ -34,8 +45,7 @@ macro_rules! new_condvar {
/// The following is an example of using a condvar with a mutex:
///
/// ```
-/// use kernel::sync::{CondVar, Mutex};
-/// use kernel::{new_condvar, new_mutex};
+/// use kernel::sync::{new_condvar, new_mutex, CondVar, Mutex};
///
/// #[pin_data]
/// pub struct Example {
@@ -73,10 +83,12 @@ macro_rules! new_condvar {
#[pin_data]
pub struct CondVar {
#[pin]
- pub(crate) wait_list: Opaque<bindings::wait_queue_head>,
+ pub(crate) wait_queue_head: Opaque<bindings::wait_queue_head>,
/// A condvar needs to be pinned because it contains a [`struct list_head`] that is
/// self-referential, so it cannot be safely moved once it is initialised.
+ ///
+ /// [`struct list_head`]: srctree/include/linux/types.h
#[pin]
_pin: PhantomPinned,
}
@@ -96,28 +108,35 @@ impl CondVar {
_pin: PhantomPinned,
// SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
// static lifetimes so they live indefinitely.
- wait_list <- Opaque::ffi_init(|slot| unsafe {
+ wait_queue_head <- Opaque::ffi_init(|slot| unsafe {
bindings::__init_waitqueue_head(slot, name.as_char_ptr(), key.as_ptr())
}),
})
}
- fn wait_internal<T: ?Sized, B: Backend>(&self, wait_state: u32, guard: &mut Guard<'_, T, B>) {
+ fn wait_internal<T: ?Sized, B: Backend>(
+ &self,
+ wait_state: c_int,
+ guard: &mut Guard<'_, T, B>,
+ timeout_in_jiffies: c_long,
+ ) -> c_long {
let wait = Opaque::<bindings::wait_queue_entry>::uninit();
// SAFETY: `wait` points to valid memory.
unsafe { bindings::init_wait(wait.get()) };
- // SAFETY: Both `wait` and `wait_list` point to valid memory.
+ // SAFETY: Both `wait` and `wait_queue_head` point to valid memory.
unsafe {
- bindings::prepare_to_wait_exclusive(self.wait_list.get(), wait.get(), wait_state as _)
+ bindings::prepare_to_wait_exclusive(self.wait_queue_head.get(), wait.get(), wait_state)
};
- // SAFETY: No arguments, switches to another thread.
- guard.do_unlocked(|| unsafe { bindings::schedule() });
+ // SAFETY: Switches to another thread. The timeout can be any number.
+ let ret = guard.do_unlocked(|| unsafe { bindings::schedule_timeout(timeout_in_jiffies) });
+
+ // SAFETY: Both `wait` and `wait_queue_head` point to valid memory.
+ unsafe { bindings::finish_wait(self.wait_queue_head.get(), wait.get()) };
- // SAFETY: Both `wait` and `wait_list` point to valid memory.
- unsafe { bindings::finish_wait(self.wait_list.get(), wait.get()) };
+ ret
}
/// Releases the lock and waits for a notification in uninterruptible mode.
@@ -127,7 +146,7 @@ impl CondVar {
/// [`CondVar::notify_one`] or [`CondVar::notify_all`]. Note that it may also wake up
/// spuriously.
pub fn wait<T: ?Sized, B: Backend>(&self, guard: &mut Guard<'_, T, B>) {
- self.wait_internal(bindings::TASK_UNINTERRUPTIBLE, guard);
+ self.wait_internal(TASK_UNINTERRUPTIBLE, guard, MAX_SCHEDULE_TIMEOUT);
}
/// Releases the lock and waits for a notification in interruptible mode.
@@ -138,29 +157,60 @@ impl CondVar {
/// Returns whether there is a signal pending.
#[must_use = "wait_interruptible returns if a signal is pending, so the caller must check the return value"]
pub fn wait_interruptible<T: ?Sized, B: Backend>(&self, guard: &mut Guard<'_, T, B>) -> bool {
- self.wait_internal(bindings::TASK_INTERRUPTIBLE, guard);
+ self.wait_internal(TASK_INTERRUPTIBLE, guard, MAX_SCHEDULE_TIMEOUT);
crate::current!().signal_pending()
}
- /// Calls the kernel function to notify the appropriate number of threads with the given flags.
- fn notify(&self, count: i32, flags: u32) {
- // SAFETY: `wait_list` points to valid memory.
+ /// Releases the lock and waits for a notification in interruptible mode.
+ ///
+ /// Atomically releases the given lock (whose ownership is proven by the guard) and puts the
+ /// thread to sleep. It wakes up when notified by [`CondVar::notify_one`] or
+ /// [`CondVar::notify_all`], or when a timeout occurs, or when the thread receives a signal.
+ #[must_use = "wait_interruptible_timeout returns if a signal is pending, so the caller must check the return value"]
+ pub fn wait_interruptible_timeout<T: ?Sized, B: Backend>(
+ &self,
+ guard: &mut Guard<'_, T, B>,
+ jiffies: Jiffies,
+ ) -> CondVarTimeoutResult {
+ let jiffies = jiffies.try_into().unwrap_or(MAX_SCHEDULE_TIMEOUT);
+ let res = self.wait_internal(TASK_INTERRUPTIBLE, guard, jiffies);
+
+ match (res as Jiffies, crate::current!().signal_pending()) {
+ (jiffies, true) => CondVarTimeoutResult::Signal { jiffies },
+ (0, false) => CondVarTimeoutResult::Timeout,
+ (jiffies, false) => CondVarTimeoutResult::Woken { jiffies },
+ }
+ }
+
+ /// Calls the kernel function to notify the appropriate number of threads.
+ fn notify(&self, count: c_int) {
+ // SAFETY: `wait_queue_head` points to valid memory.
unsafe {
bindings::__wake_up(
- self.wait_list.get(),
- bindings::TASK_NORMAL,
+ self.wait_queue_head.get(),
+ TASK_NORMAL,
count,
- flags as _,
+ ptr::null_mut(),
)
};
}
+ /// Calls the kernel function to notify one thread synchronously.
+ ///
+ /// This method behaves like `notify_one`, except that it hints to the scheduler that the
+ /// current thread is about to go to sleep, so it should schedule the target thread on the same
+ /// CPU.
+ pub fn notify_sync(&self) {
+ // SAFETY: `wait_queue_head` points to valid memory.
+ unsafe { bindings::__wake_up_sync(self.wait_queue_head.get(), TASK_NORMAL) };
+ }
+
/// Wakes a single waiter up, if any.
///
/// This is not 'sticky' in the sense that if no thread is waiting, the notification is lost
/// completely (as opposed to automatically waking up the next waiter).
pub fn notify_one(&self) {
- self.notify(1, 0);
+ self.notify(1);
}
/// Wakes all waiters up, if any.
@@ -168,6 +218,22 @@ impl CondVar {
/// This is not 'sticky' in the sense that if no thread is waiting, the notification is lost
/// completely (as opposed to automatically waking up the next waiter).
pub fn notify_all(&self) {
- self.notify(0, 0);
+ self.notify(0);
}
}
+
+/// The return type of `wait_timeout`.
+pub enum CondVarTimeoutResult {
+ /// The timeout was reached.
+ Timeout,
+ /// Somebody woke us up.
+ Woken {
+ /// Remaining sleep duration.
+ jiffies: Jiffies,
+ },
+ /// A signal occurred.
+ Signal {
+ /// Remaining sleep duration.
+ jiffies: Jiffies,
+ },
+}
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index f12a684bc957..5b5c8efe427a 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -21,14 +21,21 @@ pub mod spinlock;
/// # Safety
///
/// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
-/// is owned, that is, between calls to `lock` and `unlock`.
-/// - Implementers must also ensure that `relock` uses the same locking method as the original
-/// lock operation.
+/// is owned, that is, between calls to [`lock`] and [`unlock`].
+/// - Implementers must also ensure that [`relock`] uses the same locking method as the original
+/// lock operation.
+///
+/// [`lock`]: Backend::lock
+/// [`unlock`]: Backend::unlock
+/// [`relock`]: Backend::relock
pub unsafe trait Backend {
/// The state required by the lock.
type State;
- /// The state required to be kept between lock and unlock.
+ /// The state required to be kept between [`lock`] and [`unlock`].
+ ///
+ /// [`lock`]: Backend::lock
+ /// [`unlock`]: Backend::unlock
type GuardState;
/// Initialises the lock.
@@ -139,7 +146,7 @@ pub struct Guard<'a, T: ?Sized, B: Backend> {
unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {}
impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
- pub(crate) fn do_unlocked(&mut self, cb: impl FnOnce()) {
+ pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
// SAFETY: The caller owns the lock, so it is safe to unlock it.
unsafe { B::unlock(self.lock.state.get(), &self.state) };
@@ -147,7 +154,7 @@ impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
let _relock =
ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) });
- cb();
+ cb()
}
}
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
index 8c524a3ec45a..ef4c4634d294 100644
--- a/rust/kernel/sync/lock/mutex.rs
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -17,6 +17,7 @@ macro_rules! new_mutex {
$inner, $crate::optional_name!($($name)?), $crate::static_lock_class!())
};
}
+pub use new_mutex;
/// A mutual exclusion primitive.
///
@@ -35,7 +36,7 @@ macro_rules! new_mutex {
/// contains an inner struct (`Inner`) that is protected by a mutex.
///
/// ```
-/// use kernel::{init::InPlaceInit, init::PinInit, new_mutex, pin_init, sync::Mutex};
+/// use kernel::sync::{new_mutex, Mutex};
///
/// struct Inner {
/// a: u32,
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
index 068535ce1b29..0b22c635634f 100644
--- a/rust/kernel/sync/lock/spinlock.rs
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -17,6 +17,7 @@ macro_rules! new_spinlock {
$inner, $crate::optional_name!($($name)?), $crate::static_lock_class!())
};
}
+pub use new_spinlock;
/// A spinlock.
///
@@ -33,7 +34,7 @@ macro_rules! new_spinlock {
/// contains an inner struct (`Inner`) that is protected by a spinlock.
///
/// ```
-/// use kernel::{init::InPlaceInit, init::PinInit, new_spinlock, pin_init, sync::SpinLock};
+/// use kernel::sync::{new_spinlock, SpinLock};
///
/// struct Inner {
/// a: u32,
@@ -112,7 +113,7 @@ unsafe impl super::Backend for SpinLockBackend {
unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
// SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
- // caller is the owner of the mutex.
+ // caller is the owner of the spinlock.
unsafe { bindings::spin_unlock(ptr) }
}
}
diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs
index b17ee5cd98f3..babc731bd5f6 100644
--- a/rust/kernel/sync/locked_by.rs
+++ b/rust/kernel/sync/locked_by.rs
@@ -9,14 +9,17 @@ use core::{cell::UnsafeCell, mem::size_of, ptr};
/// Allows access to some data to be serialised by a lock that does not wrap it.
///
/// In most cases, data protected by a lock is wrapped by the appropriate lock type, e.g.,
-/// [`super::Mutex`] or [`super::SpinLock`]. [`LockedBy`] is meant for cases when this is not
-/// possible. For example, if a container has a lock and some data in the contained elements needs
+/// [`Mutex`] or [`SpinLock`]. [`LockedBy`] is meant for cases when this is not possible.
+/// For example, if a container has a lock and some data in the contained elements needs
/// to be protected by the same lock.
///
/// [`LockedBy`] wraps the data in lieu of another locking primitive, and only allows access to it
/// when the caller shows evidence that the 'external' lock is locked. It panics if the evidence
/// refers to the wrong instance of the lock.
///
+/// [`Mutex`]: super::Mutex
+/// [`SpinLock`]: super::SpinLock
+///
/// # Examples
///
/// The following is an example for illustrative purposes: `InnerDirectory::bytes_used` is an