summaryrefslogtreecommitdiff
path: root/rust/kernel/sync
diff options
context:
space:
mode:
Diffstat (limited to 'rust/kernel/sync')
-rw-r--r--rust/kernel/sync/arc.rs31
-rw-r--r--rust/kernel/sync/arc/std_vendor.rs2
-rw-r--r--rust/kernel/sync/condvar.rs7
-rw-r--r--rust/kernel/sync/lock.rs40
-rw-r--r--rust/kernel/sync/lock/global.rs301
-rw-r--r--rust/kernel/sync/lock/mutex.rs15
-rw-r--r--rust/kernel/sync/lock/spinlock.rs15
-rw-r--r--rust/kernel/sync/locked_by.rs2
-rw-r--r--rust/kernel/sync/poll.rs121
9 files changed, 498 insertions, 36 deletions
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index 3021f30fd822..fa4509406ee9 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -17,13 +17,12 @@
//! [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html
use crate::{
- alloc::{box_ext::BoxExt, AllocError, Flags},
+ alloc::{AllocError, Flags, KBox},
bindings,
init::{self, InPlaceInit, Init, PinInit},
try_init,
types::{ForeignOwnable, Opaque},
};
-use alloc::boxed::Box;
use core::{
alloc::Layout,
fmt,
@@ -171,9 +170,6 @@ impl<T: ?Sized> ArcInner<T> {
}
}
-// This is to allow [`Arc`] (and variants) to be used as the type of `self`.
-impl<T: ?Sized> core::ops::Receiver for Arc<T> {}
-
// This is to allow coercion from `Arc<T>` to `Arc<U>` if `T` can be converted to the
// dynamically-sized type (DST) `U`.
impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::CoerceUnsized<Arc<U>> for Arc<T> {}
@@ -204,11 +200,11 @@ impl<T> Arc<T> {
data: contents,
};
- let inner = <Box<_> as BoxExt<_>>::new(value, flags)?;
+ let inner = KBox::new(value, flags)?;
// SAFETY: We just created `inner` with a reference count of 1, which is owned by the new
// `Arc` object.
- Ok(unsafe { Self::from_inner(Box::leak(inner).into()) })
+ Ok(unsafe { Self::from_inner(KBox::leak(inner).into()) })
}
}
@@ -336,12 +332,12 @@ impl<T: ?Sized> Arc<T> {
impl<T: 'static> ForeignOwnable for Arc<T> {
type Borrowed<'a> = ArcBorrow<'a, T>;
- fn into_foreign(self) -> *const core::ffi::c_void {
+ fn into_foreign(self) -> *const crate::ffi::c_void {
ManuallyDrop::new(self).ptr.as_ptr() as _
}
- unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> ArcBorrow<'a, T> {
- // SAFETY: By the safety requirement of this function, we know that `ptr` came from
+ unsafe fn borrow<'a>(ptr: *const crate::ffi::c_void) -> ArcBorrow<'a, T> {
+ // By the safety requirement of this function, we know that `ptr` came from
// a previous call to `Arc::into_foreign`.
let inner = NonNull::new(ptr as *mut ArcInner<T>).unwrap();
@@ -350,7 +346,7 @@ impl<T: 'static> ForeignOwnable for Arc<T> {
unsafe { ArcBorrow::new(inner) }
}
- unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self {
+ unsafe fn from_foreign(ptr: *const crate::ffi::c_void) -> Self {
// SAFETY: By the safety requirement of this function, we know that `ptr` came from
// a previous call to `Arc::into_foreign`, which guarantees that `ptr` is valid and
// holds a reference count increment that is transferrable to us.
@@ -401,8 +397,8 @@ impl<T: ?Sized> Drop for Arc<T> {
if is_zero {
// The count reached zero, we must free the memory.
//
- // SAFETY: The pointer was initialised from the result of `Box::leak`.
- unsafe { drop(Box::from_raw(self.ptr.as_ptr())) };
+ // SAFETY: The pointer was initialised from the result of `KBox::leak`.
+ unsafe { drop(KBox::from_raw(self.ptr.as_ptr())) };
}
}
}
@@ -480,9 +476,6 @@ pub struct ArcBorrow<'a, T: ?Sized + 'a> {
_p: PhantomData<&'a ()>,
}
-// This is to allow [`ArcBorrow`] (and variants) to be used as the type of `self`.
-impl<T: ?Sized> core::ops::Receiver for ArcBorrow<'_, T> {}
-
// This is to allow `ArcBorrow<U>` to be dispatched on when `ArcBorrow<T>` can be coerced into
// `ArcBorrow<U>`.
impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<ArcBorrow<'_, U>>
@@ -647,7 +640,7 @@ impl<T> UniqueArc<T> {
/// Tries to allocate a new [`UniqueArc`] instance whose contents are not initialised yet.
pub fn new_uninit(flags: Flags) -> Result<UniqueArc<MaybeUninit<T>>, AllocError> {
// INVARIANT: The refcount is initialised to a non-zero value.
- let inner = Box::try_init::<AllocError>(
+ let inner = KBox::try_init::<AllocError>(
try_init!(ArcInner {
// SAFETY: There are no safety requirements for this FFI call.
refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }),
@@ -657,8 +650,8 @@ impl<T> UniqueArc<T> {
)?;
Ok(UniqueArc {
// INVARIANT: The newly-created object has a refcount of 1.
- // SAFETY: The pointer from the `Box` is valid.
- inner: unsafe { Arc::from_inner(Box::leak(inner).into()) },
+ // SAFETY: The pointer from the `KBox` is valid.
+ inner: unsafe { Arc::from_inner(KBox::leak(inner).into()) },
})
}
}
diff --git a/rust/kernel/sync/arc/std_vendor.rs b/rust/kernel/sync/arc/std_vendor.rs
index a66a0c2831b3..11b3f4ecca5f 100644
--- a/rust/kernel/sync/arc/std_vendor.rs
+++ b/rust/kernel/sync/arc/std_vendor.rs
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
+//! Rust standard library vendored code.
+//!
//! The contents of this file come from the Rust standard library, hosted in
//! the <https://github.com/rust-lang/rust> repository, licensed under
//! "Apache-2.0 OR MIT" and adapted for kernel use. For copyright details,
diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs
index 2b306afbe56d..7df565038d7d 100644
--- a/rust/kernel/sync/condvar.rs
+++ b/rust/kernel/sync/condvar.rs
@@ -7,6 +7,7 @@
use super::{lock::Backend, lock::Guard, LockClassKey};
use crate::{
+ ffi::{c_int, c_long},
init::PinInit,
pin_init,
str::CStr,
@@ -14,7 +15,6 @@ use crate::{
time::Jiffies,
types::Opaque,
};
-use core::ffi::{c_int, c_long};
use core::marker::PhantomPinned;
use core::ptr;
use macros::pin_data;
@@ -70,8 +70,8 @@ pub use new_condvar;
/// }
///
/// /// Allocates a new boxed `Example`.
-/// fn new_example() -> Result<Pin<Box<Example>>> {
-/// Box::pin_init(pin_init!(Example {
+/// fn new_example() -> Result<Pin<KBox<Example>>> {
+/// KBox::pin_init(pin_init!(Example {
/// value <- new_mutex!(0),
/// value_changed <- new_condvar!(),
/// }), GFP_KERNEL)
@@ -93,7 +93,6 @@ pub struct CondVar {
}
// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on any thread.
-#[allow(clippy::non_send_fields_in_send_ty)]
unsafe impl Send for CondVar {}
// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on multiple threads
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index f6c34ca4d819..41dcddac69e2 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -6,13 +6,21 @@
//! spinlocks, raw spinlocks) to be provided with minimal effort.
use super::LockClassKey;
-use crate::{init::PinInit, pin_init, str::CStr, types::Opaque, types::ScopeGuard};
-use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned};
+use crate::{
+ init::PinInit,
+ pin_init,
+ str::CStr,
+ types::{NotThreadSafe, Opaque, ScopeGuard},
+};
+use core::{cell::UnsafeCell, marker::PhantomPinned};
use macros::pin_data;
pub mod mutex;
pub mod spinlock;
+pub(super) mod global;
+pub use global::{GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy};
+
/// The "backend" of a lock.
///
/// It is the actual implementation of the lock, without the need to repeat patterns used in all
@@ -46,7 +54,7 @@ pub unsafe trait Backend {
/// remain valid for read indefinitely.
unsafe fn init(
ptr: *mut Self::State,
- name: *const core::ffi::c_char,
+ name: *const crate::ffi::c_char,
key: *mut bindings::lock_class_key,
);
@@ -58,6 +66,13 @@ pub unsafe trait Backend {
#[must_use]
unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState;
+ /// Tries to acquire the lock.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that [`Backend::init`] has been previously called.
+ unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState>;
+
/// Releases the lock, giving up its ownership.
///
/// # Safety
@@ -128,6 +143,15 @@ impl<T: ?Sized, B: Backend> Lock<T, B> {
// SAFETY: The lock was just acquired.
unsafe { Guard::new(self, state) }
}
+
+ /// Tries to acquire the lock.
+ ///
+ /// Returns a guard that can be used to access the data protected by the lock if successful.
+ pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
+ // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
+ // that `init` was called.
+ unsafe { B::try_lock(self.state.get()).map(|state| Guard::new(self, state)) }
+ }
}
/// A lock guard.
@@ -139,7 +163,7 @@ impl<T: ?Sized, B: Backend> Lock<T, B> {
pub struct Guard<'a, T: ?Sized, B: Backend> {
pub(crate) lock: &'a Lock<T, B>,
pub(crate) state: B::GuardState,
- _not_send: PhantomData<*mut ()>,
+ _not_send: NotThreadSafe,
}
// SAFETY: `Guard` is sync when the data protected by the lock is also sync.
@@ -150,9 +174,9 @@ impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
// SAFETY: The caller owns the lock, so it is safe to unlock it.
unsafe { B::unlock(self.lock.state.get(), &self.state) };
- // SAFETY: The lock was just unlocked above and is being relocked now.
- let _relock =
- ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) });
+ let _relock = ScopeGuard::new(||
+ // SAFETY: The lock was just unlocked above and is being relocked now.
+ unsafe { B::relock(self.lock.state.get(), &mut self.state) });
cb()
}
@@ -191,7 +215,7 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
Self {
lock,
state,
- _not_send: PhantomData,
+ _not_send: NotThreadSafe,
}
}
}
diff --git a/rust/kernel/sync/lock/global.rs b/rust/kernel/sync/lock/global.rs
new file mode 100644
index 000000000000..480ee724e3cc
--- /dev/null
+++ b/rust/kernel/sync/lock/global.rs
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Support for defining statics containing locks.
+
+use crate::{
+ str::CStr,
+ sync::lock::{Backend, Guard, Lock},
+ sync::{LockClassKey, LockedBy},
+ types::Opaque,
+};
+use core::{
+ cell::UnsafeCell,
+ marker::{PhantomData, PhantomPinned},
+};
+
+/// Trait implemented for marker types for global locks.
+///
+/// See [`global_lock!`] for examples.
+pub trait GlobalLockBackend {
+ /// The name for this global lock.
+ const NAME: &'static CStr;
+ /// Item type stored in this global lock.
+ type Item: 'static;
+ /// The backend used for this global lock.
+ type Backend: Backend + 'static;
+ /// The class for this global lock.
+ fn get_lock_class() -> &'static LockClassKey;
+}
+
+/// Type used for global locks.
+///
+/// See [`global_lock!`] for examples.
+pub struct GlobalLock<B: GlobalLockBackend> {
+ inner: Lock<B::Item, B::Backend>,
+}
+
+impl<B: GlobalLockBackend> GlobalLock<B> {
+ /// Creates a global lock.
+ ///
+ /// # Safety
+ ///
+ /// * Before any other method on this lock is called, [`Self::init`] must be called.
+ /// * The type `B` must not be used with any other lock.
+ pub const unsafe fn new(data: B::Item) -> Self {
+ Self {
+ inner: Lock {
+ state: Opaque::uninit(),
+ data: UnsafeCell::new(data),
+ _pin: PhantomPinned,
+ },
+ }
+ }
+
+ /// Initializes a global lock.
+ ///
+ /// # Safety
+ ///
+ /// Must not be called more than once on a given lock.
+ pub unsafe fn init(&'static self) {
+ // SAFETY: The pointer to `state` is valid for the duration of this call, and both `name`
+ // and `key` are valid indefinitely. The `state` is pinned since we have a `'static`
+ // reference to `self`.
+ //
+ // We have exclusive access to the `state` since the caller of `new` promised to call
+ // `init` before using any other methods. As `init` can only be called once, all other
+ // uses of this lock must happen after this call.
+ unsafe {
+ B::Backend::init(
+ self.inner.state.get(),
+ B::NAME.as_char_ptr(),
+ B::get_lock_class().as_ptr(),
+ )
+ }
+ }
+
+ /// Lock this global lock.
+ pub fn lock(&'static self) -> GlobalGuard<B> {
+ GlobalGuard {
+ inner: self.inner.lock(),
+ }
+ }
+
+ /// Try to lock this global lock.
+ pub fn try_lock(&'static self) -> Option<GlobalGuard<B>> {
+ Some(GlobalGuard {
+ inner: self.inner.try_lock()?,
+ })
+ }
+}
+
+/// A guard for a [`GlobalLock`].
+///
+/// See [`global_lock!`] for examples.
+pub struct GlobalGuard<B: GlobalLockBackend> {
+ inner: Guard<'static, B::Item, B::Backend>,
+}
+
+impl<B: GlobalLockBackend> core::ops::Deref for GlobalGuard<B> {
+ type Target = B::Item;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+impl<B: GlobalLockBackend> core::ops::DerefMut for GlobalGuard<B> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.inner
+ }
+}
+
+/// A version of [`LockedBy`] for a [`GlobalLock`].
+///
+/// See [`global_lock!`] for examples.
+pub struct GlobalLockedBy<T: ?Sized, B: GlobalLockBackend> {
+ _backend: PhantomData<B>,
+ value: UnsafeCell<T>,
+}
+
+// SAFETY: The same thread-safety rules as `LockedBy` apply to `GlobalLockedBy`.
+unsafe impl<T, B> Send for GlobalLockedBy<T, B>
+where
+ T: ?Sized,
+ B: GlobalLockBackend,
+ LockedBy<T, B::Item>: Send,
+{
+}
+
+// SAFETY: The same thread-safety rules as `LockedBy` apply to `GlobalLockedBy`.
+unsafe impl<T, B> Sync for GlobalLockedBy<T, B>
+where
+ T: ?Sized,
+ B: GlobalLockBackend,
+ LockedBy<T, B::Item>: Sync,
+{
+}
+
+impl<T, B: GlobalLockBackend> GlobalLockedBy<T, B> {
+ /// Create a new [`GlobalLockedBy`].
+ ///
+ /// The provided value will be protected by the global lock indicated by `B`.
+ pub fn new(val: T) -> Self {
+ Self {
+ value: UnsafeCell::new(val),
+ _backend: PhantomData,
+ }
+ }
+}
+
+impl<T: ?Sized, B: GlobalLockBackend> GlobalLockedBy<T, B> {
+ /// Access the value immutably.
+ ///
+ /// The caller must prove shared access to the lock.
+ pub fn as_ref<'a>(&'a self, _guard: &'a GlobalGuard<B>) -> &'a T {
+ // SAFETY: The lock is globally unique, so there can only be one guard.
+ unsafe { &*self.value.get() }
+ }
+
+ /// Access the value mutably.
+ ///
+ /// The caller must prove shared exclusive to the lock.
+ pub fn as_mut<'a>(&'a self, _guard: &'a mut GlobalGuard<B>) -> &'a mut T {
+ // SAFETY: The lock is globally unique, so there can only be one guard.
+ unsafe { &mut *self.value.get() }
+ }
+
+ /// Access the value mutably directly.
+ ///
+ /// The caller has exclusive access to this `GlobalLockedBy`, so they do not need to hold the
+ /// lock.
+ pub fn get_mut(&mut self) -> &mut T {
+ self.value.get_mut()
+ }
+}
+
+/// Defines a global lock.
+///
+/// The global mutex must be initialized before first use. Usually this is done by calling
+/// [`GlobalLock::init`] in the module initializer.
+///
+/// # Examples
+///
+/// A global counter:
+///
+/// ```
+/// # mod ex {
+/// # use kernel::prelude::*;
+/// kernel::sync::global_lock! {
+/// // SAFETY: Initialized in module initializer before first use.
+/// unsafe(uninit) static MY_COUNTER: Mutex<u32> = 0;
+/// }
+///
+/// fn increment_counter() -> u32 {
+/// let mut guard = MY_COUNTER.lock();
+/// *guard += 1;
+/// *guard
+/// }
+///
+/// impl kernel::Module for MyModule {
+/// fn init(_module: &'static ThisModule) -> Result<Self> {
+/// // SAFETY: Called exactly once.
+/// unsafe { MY_COUNTER.init() };
+///
+/// Ok(MyModule {})
+/// }
+/// }
+/// # struct MyModule {}
+/// # }
+/// ```
+///
+/// A global mutex used to protect all instances of a given struct:
+///
+/// ```
+/// # mod ex {
+/// # use kernel::prelude::*;
+/// use kernel::sync::{GlobalGuard, GlobalLockedBy};
+///
+/// kernel::sync::global_lock! {
+/// // SAFETY: Initialized in module initializer before first use.
+/// unsafe(uninit) static MY_MUTEX: Mutex<()> = ();
+/// }
+///
+/// /// All instances of this struct are protected by `MY_MUTEX`.
+/// struct MyStruct {
+/// my_counter: GlobalLockedBy<u32, MY_MUTEX>,
+/// }
+///
+/// impl MyStruct {
+/// /// Increment the counter in this instance.
+/// ///
+/// /// The caller must hold the `MY_MUTEX` mutex.
+/// fn increment(&self, guard: &mut GlobalGuard<MY_MUTEX>) -> u32 {
+/// let my_counter = self.my_counter.as_mut(guard);
+/// *my_counter += 1;
+/// *my_counter
+/// }
+/// }
+///
+/// impl kernel::Module for MyModule {
+/// fn init(_module: &'static ThisModule) -> Result<Self> {
+/// // SAFETY: Called exactly once.
+/// unsafe { MY_MUTEX.init() };
+///
+/// Ok(MyModule {})
+/// }
+/// }
+/// # struct MyModule {}
+/// # }
+/// ```
+#[macro_export]
+macro_rules! global_lock {
+ {
+ $(#[$meta:meta])* $pub:vis
+ unsafe(uninit) static $name:ident: $kind:ident<$valuety:ty> = $value:expr;
+ } => {
+ #[doc = ::core::concat!(
+ "Backend type used by [`",
+ ::core::stringify!($name),
+ "`](static@",
+ ::core::stringify!($name),
+ ")."
+ )]
+ #[allow(non_camel_case_types, unreachable_pub)]
+ $pub enum $name {}
+
+ impl $crate::sync::lock::GlobalLockBackend for $name {
+ const NAME: &'static $crate::str::CStr = $crate::c_str!(::core::stringify!($name));
+ type Item = $valuety;
+ type Backend = $crate::global_lock_inner!(backend $kind);
+
+ fn get_lock_class() -> &'static $crate::sync::LockClassKey {
+ $crate::static_lock_class!()
+ }
+ }
+
+ $(#[$meta])*
+ $pub static $name: $crate::sync::lock::GlobalLock<$name> = {
+ // Defined here to be outside the unsafe scope.
+ let init: $valuety = $value;
+
+ // SAFETY:
+ // * The user of this macro promises to initialize the macro before use.
+ // * We are only generating one static with this backend type.
+ unsafe { $crate::sync::lock::GlobalLock::new(init) }
+ };
+ };
+}
+pub use global_lock;
+
+#[doc(hidden)]
+#[macro_export]
+macro_rules! global_lock_inner {
+ (backend Mutex) => {
+ $crate::sync::lock::mutex::MutexBackend
+ };
+ (backend SpinLock) => {
+ $crate::sync::lock::spinlock::SpinLockBackend
+ };
+}
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
index 30632070ee67..0e946ebefce1 100644
--- a/rust/kernel/sync/lock/mutex.rs
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -58,7 +58,7 @@ pub use new_mutex;
/// }
///
/// // Allocate a boxed `Example`.
-/// let e = Box::pin_init(Example::new(), GFP_KERNEL)?;
+/// let e = KBox::pin_init(Example::new(), GFP_KERNEL)?;
/// assert_eq!(e.c, 10);
/// assert_eq!(e.d.lock().a, 20);
/// assert_eq!(e.d.lock().b, 30);
@@ -96,7 +96,7 @@ unsafe impl super::Backend for MutexBackend {
unsafe fn init(
ptr: *mut Self::State,
- name: *const core::ffi::c_char,
+ name: *const crate::ffi::c_char,
key: *mut bindings::lock_class_key,
) {
// SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and
@@ -115,4 +115,15 @@ unsafe impl super::Backend for MutexBackend {
// caller is the owner of the mutex.
unsafe { bindings::mutex_unlock(ptr) };
}
+
+ unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
+ // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
+ let result = unsafe { bindings::mutex_trylock(ptr) };
+
+ if result != 0 {
+ Some(())
+ } else {
+ None
+ }
+ }
}
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
index ea5c5bc1ce12..9f4d128bed98 100644
--- a/rust/kernel/sync/lock/spinlock.rs
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -56,7 +56,7 @@ pub use new_spinlock;
/// }
///
/// // Allocate a boxed `Example`.
-/// let e = Box::pin_init(Example::new(), GFP_KERNEL)?;
+/// let e = KBox::pin_init(Example::new(), GFP_KERNEL)?;
/// assert_eq!(e.c, 10);
/// assert_eq!(e.d.lock().a, 20);
/// assert_eq!(e.d.lock().b, 30);
@@ -95,7 +95,7 @@ unsafe impl super::Backend for SpinLockBackend {
unsafe fn init(
ptr: *mut Self::State,
- name: *const core::ffi::c_char,
+ name: *const crate::ffi::c_char,
key: *mut bindings::lock_class_key,
) {
// SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and
@@ -114,4 +114,15 @@ unsafe impl super::Backend for SpinLockBackend {
// caller is the owner of the spinlock.
unsafe { bindings::spin_unlock(ptr) }
}
+
+ unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
+ // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
+ let result = unsafe { bindings::spin_trylock(ptr) };
+
+ if result != 0 {
+ Some(())
+ } else {
+ None
+ }
+ }
}
diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs
index ce2ee8d87865..a7b244675c2b 100644
--- a/rust/kernel/sync/locked_by.rs
+++ b/rust/kernel/sync/locked_by.rs
@@ -43,7 +43,7 @@ use core::{cell::UnsafeCell, mem::size_of, ptr};
/// struct InnerDirectory {
/// /// The sum of the bytes used by all files.
/// bytes_used: u64,
-/// _files: Vec<File>,
+/// _files: KVec<File>,
/// }
///
/// struct Directory {
diff --git a/rust/kernel/sync/poll.rs b/rust/kernel/sync/poll.rs
new file mode 100644
index 000000000000..d5f17153b424
--- /dev/null
+++ b/rust/kernel/sync/poll.rs
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Utilities for working with `struct poll_table`.
+
+use crate::{
+ bindings,
+ fs::File,
+ prelude::*,
+ sync::{CondVar, LockClassKey},
+ types::Opaque,
+};
+use core::ops::Deref;
+
+/// Creates a [`PollCondVar`] initialiser with the given name and a newly-created lock class.
+#[macro_export]
+macro_rules! new_poll_condvar {
+ ($($name:literal)?) => {
+ $crate::sync::poll::PollCondVar::new(
+ $crate::optional_name!($($name)?), $crate::static_lock_class!()
+ )
+ };
+}
+
+/// Wraps the kernel's `struct poll_table`.
+///
+/// # Invariants
+///
+/// This struct contains a valid `struct poll_table`.
+///
+/// For a `struct poll_table` to be valid, its `_qproc` function must follow the safety
+/// requirements of `_qproc` functions:
+///
+/// * The `_qproc` function is given permission to enqueue a waiter to the provided `poll_table`
+/// during the call. Once the waiter is removed and an rcu grace period has passed, it must no
+/// longer access the `wait_queue_head`.
+#[repr(transparent)]
+pub struct PollTable(Opaque<bindings::poll_table>);
+
+impl PollTable {
+ /// Creates a reference to a [`PollTable`] from a valid pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that for the duration of 'a, the pointer will point at a valid poll
+ /// table (as defined in the type invariants).
+ ///
+ /// The caller must also ensure that the `poll_table` is only accessed via the returned
+ /// reference for the duration of 'a.
+ pub unsafe fn from_ptr<'a>(ptr: *mut bindings::poll_table) -> &'a mut PollTable {
+ // SAFETY: The safety requirements guarantee the validity of the dereference, while the
+ // `PollTable` type being transparent makes the cast ok.
+ unsafe { &mut *ptr.cast() }
+ }
+
+ fn get_qproc(&self) -> bindings::poll_queue_proc {
+ let ptr = self.0.get();
+ // SAFETY: The `ptr` is valid because it originates from a reference, and the `_qproc`
+ // field is not modified concurrently with this call since we have an immutable reference.
+ unsafe { (*ptr)._qproc }
+ }
+
+ /// Register this [`PollTable`] with the provided [`PollCondVar`], so that it can be notified
+ /// using the condition variable.
+ pub fn register_wait(&mut self, file: &File, cv: &PollCondVar) {
+ if let Some(qproc) = self.get_qproc() {
+ // SAFETY: The pointers to `file` and `self` need to be valid for the duration of this
+ // call to `qproc`, which they are because they are references.
+ //
+ // The `cv.wait_queue_head` pointer must be valid until an rcu grace period after the
+ // waiter is removed. The `PollCondVar` is pinned, so before `cv.wait_queue_head` can
+ // be destroyed, the destructor must run. That destructor first removes all waiters,
+ // and then waits for an rcu grace period. Therefore, `cv.wait_queue_head` is valid for
+ // long enough.
+ unsafe { qproc(file.as_ptr() as _, cv.wait_queue_head.get(), self.0.get()) };
+ }
+ }
+}
+
+/// A wrapper around [`CondVar`] that makes it usable with [`PollTable`].
+///
+/// [`CondVar`]: crate::sync::CondVar
+#[pin_data(PinnedDrop)]
+pub struct PollCondVar {
+ #[pin]
+ inner: CondVar,
+}
+
+impl PollCondVar {
+ /// Constructs a new condvar initialiser.
+ pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
+ pin_init!(Self {
+ inner <- CondVar::new(name, key),
+ })
+ }
+}
+
+// Make the `CondVar` methods callable on `PollCondVar`.
+impl Deref for PollCondVar {
+ type Target = CondVar;
+
+ fn deref(&self) -> &CondVar {
+ &self.inner
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for PollCondVar {
+ fn drop(self: Pin<&mut Self>) {
+ // Clear anything registered using `register_wait`.
+ //
+ // SAFETY: The pointer points at a valid `wait_queue_head`.
+ unsafe { bindings::__wake_up_pollfree(self.inner.wait_queue_head.get()) };
+
+ // Wait for epoll items to be properly removed.
+ //
+ // SAFETY: Just an FFI call.
+ unsafe { bindings::synchronize_rcu() };
+ }
+}