tokio/sync/rwlock.rs
1use crate::sync::batch_semaphore::{Semaphore, TryAcquireError};
2use crate::sync::mutex::TryLockError;
3#[cfg(all(tokio_unstable, feature = "tracing"))]
4use crate::util::trace;
5use std::cell::UnsafeCell;
6use std::marker;
7use std::marker::PhantomData;
8use std::sync::Arc;
9
10pub(crate) mod owned_read_guard;
11pub(crate) mod owned_write_guard;
12pub(crate) mod owned_write_guard_mapped;
13pub(crate) mod read_guard;
14pub(crate) mod write_guard;
15pub(crate) mod write_guard_mapped;
16pub(crate) use owned_read_guard::OwnedRwLockReadGuard;
17pub(crate) use owned_write_guard::OwnedRwLockWriteGuard;
18pub(crate) use owned_write_guard_mapped::OwnedRwLockMappedWriteGuard;
19pub(crate) use read_guard::RwLockReadGuard;
20pub(crate) use write_guard::RwLockWriteGuard;
21pub(crate) use write_guard_mapped::RwLockMappedWriteGuard;
22
23#[cfg(not(loom))]
24const MAX_READS: u32 = u32::MAX >> 3;
25
26#[cfg(loom)]
27const MAX_READS: u32 = 10;
28
29/// An asynchronous reader-writer lock.
30///
31/// This type of lock allows a number of readers or at most one writer at any
32/// point in time. The write portion of this lock typically allows modification
33/// of the underlying data (exclusive access) and the read portion of this lock
34/// typically allows for read-only access (shared access).
35///
36/// In comparison, a [`Mutex`] does not distinguish between readers or writers
37/// that acquire the lock, therefore causing any tasks waiting for the lock to
38/// become available to yield. An `RwLock` will allow any number of readers to
39/// acquire the lock as long as a writer is not holding the lock.
40///
41/// The priority policy of Tokio's read-write lock is _fair_ (or
42/// [_write-preferring_]), in order to ensure that readers cannot starve
43/// writers. Fairness is ensured using a first-in, first-out queue for the tasks
44/// awaiting the lock; if a task that wishes to acquire the write lock is at the
45/// head of the queue, read locks will not be given out until the write lock has
46/// been released. This is in contrast to the Rust standard library's
47/// `std::sync::RwLock`, where the priority policy is dependent on the
48/// operating system's implementation.
49///
50/// The type parameter `T` represents the data that this lock protects. It is
51/// required that `T` satisfies [`Send`] to be shared across threads. The RAII guards
52/// returned from the locking methods implement [`Deref`](trait@std::ops::Deref)
53/// (and [`DerefMut`](trait@std::ops::DerefMut)
54/// for the `write` methods) to allow access to the content of the lock.
55///
56/// # Examples
57///
58/// ```
59/// use tokio::sync::RwLock;
60///
61/// # #[tokio::main(flavor = "current_thread")]
62/// # async fn main() {
63/// let lock = RwLock::new(5);
64///
65/// // many reader locks can be held at once
66/// {
67/// let r1 = lock.read().await;
68/// let r2 = lock.read().await;
69/// assert_eq!(*r1, 5);
70/// assert_eq!(*r2, 5);
71/// } // read locks are dropped at this point
72///
73/// // only one write lock may be held, however
74/// {
75/// let mut w = lock.write().await;
76/// *w += 1;
77/// assert_eq!(*w, 6);
78/// } // write lock is dropped here
79/// # }
80/// ```
81///
82/// [`Mutex`]: struct@super::Mutex
83/// [`RwLock`]: struct@RwLock
84/// [`RwLockReadGuard`]: struct@RwLockReadGuard
85/// [`RwLockWriteGuard`]: struct@RwLockWriteGuard
86/// [`Send`]: trait@std::marker::Send
87/// [_write-preferring_]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock#Priority_policies
88pub struct RwLock<T: ?Sized> {
89 #[cfg(all(tokio_unstable, feature = "tracing"))]
90 resource_span: tracing::Span,
91
92 // maximum number of concurrent readers
93 mr: u32,
94
95 //semaphore to coordinate read and write access to T
96 s: Semaphore,
97
98 //inner data T
99 c: UnsafeCell<T>,
100}
101
102#[test]
103#[cfg(not(loom))]
104fn bounds() {
105 fn check_send<T: Send>() {}
106 fn check_sync<T: Sync>() {}
107 fn check_unpin<T: Unpin>() {}
108 // This has to take a value, since the async fn's return type is unnameable.
109 fn check_send_sync_val<T: Send + Sync>(_t: T) {}
110
111 check_send::<RwLock<u32>>();
112 check_sync::<RwLock<u32>>();
113 check_unpin::<RwLock<u32>>();
114
115 check_send::<RwLockReadGuard<'_, u32>>();
116 check_sync::<RwLockReadGuard<'_, u32>>();
117 check_unpin::<RwLockReadGuard<'_, u32>>();
118
119 check_send::<OwnedRwLockReadGuard<u32, i32>>();
120 check_sync::<OwnedRwLockReadGuard<u32, i32>>();
121 check_unpin::<OwnedRwLockReadGuard<u32, i32>>();
122
123 check_send::<RwLockWriteGuard<'_, u32>>();
124 check_sync::<RwLockWriteGuard<'_, u32>>();
125 check_unpin::<RwLockWriteGuard<'_, u32>>();
126
127 check_send::<RwLockMappedWriteGuard<'_, u32>>();
128 check_sync::<RwLockMappedWriteGuard<'_, u32>>();
129 check_unpin::<RwLockMappedWriteGuard<'_, u32>>();
130
131 check_send::<OwnedRwLockWriteGuard<u32>>();
132 check_sync::<OwnedRwLockWriteGuard<u32>>();
133 check_unpin::<OwnedRwLockWriteGuard<u32>>();
134
135 check_send::<OwnedRwLockMappedWriteGuard<u32, i32>>();
136 check_sync::<OwnedRwLockMappedWriteGuard<u32, i32>>();
137 check_unpin::<OwnedRwLockMappedWriteGuard<u32, i32>>();
138
139 let rwlock = Arc::new(RwLock::new(0));
140 check_send_sync_val(rwlock.read());
141 check_send_sync_val(Arc::clone(&rwlock).read_owned());
142 check_send_sync_val(rwlock.write());
143 check_send_sync_val(Arc::clone(&rwlock).write_owned());
144}
145
146// As long as T: Send + Sync, it's fine to send and share RwLock<T> between threads.
147// If T were not Send, sending and sharing a RwLock<T> would be bad, since you can access T through
148// RwLock<T>.
149unsafe impl<T> Send for RwLock<T> where T: ?Sized + Send {}
150unsafe impl<T> Sync for RwLock<T> where T: ?Sized + Send + Sync {}
151// NB: These impls need to be explicit since we're storing a raw pointer.
152// Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over
153// `T` is `Send`.
154unsafe impl<T> Send for RwLockReadGuard<'_, T> where T: ?Sized + Sync {}
155unsafe impl<T> Sync for RwLockReadGuard<'_, T> where T: ?Sized + Send + Sync {}
156// T is required to be `Send` because an OwnedRwLockReadGuard can be used to drop the value held in
157// the RwLock, unlike RwLockReadGuard.
158unsafe impl<T, U> Send for OwnedRwLockReadGuard<T, U>
159where
160 T: ?Sized + Send + Sync,
161 U: ?Sized + Sync,
162{
163}
164unsafe impl<T, U> Sync for OwnedRwLockReadGuard<T, U>
165where
166 T: ?Sized + Send + Sync,
167 U: ?Sized + Send + Sync,
168{
169}
170unsafe impl<T> Sync for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
171unsafe impl<T> Sync for OwnedRwLockWriteGuard<T> where T: ?Sized + Send + Sync {}
172unsafe impl<T> Sync for RwLockMappedWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
173unsafe impl<T, U> Sync for OwnedRwLockMappedWriteGuard<T, U>
174where
175 T: ?Sized + Send + Sync,
176 U: ?Sized + Send + Sync,
177{
178}
179// Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over
180// `T` is `Send` - but since this is also provides mutable access, we need to
181// make sure that `T` is `Send` since its value can be sent across thread
182// boundaries.
183unsafe impl<T> Send for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
184unsafe impl<T> Send for OwnedRwLockWriteGuard<T> where T: ?Sized + Send + Sync {}
185unsafe impl<T> Send for RwLockMappedWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
186unsafe impl<T, U> Send for OwnedRwLockMappedWriteGuard<T, U>
187where
188 T: ?Sized + Send + Sync,
189 U: ?Sized + Send + Sync,
190{
191}
192
193impl<T: ?Sized> RwLock<T> {
194 /// Creates a new instance of an `RwLock<T>` which is unlocked.
195 ///
196 /// # Examples
197 ///
198 /// ```
199 /// use tokio::sync::RwLock;
200 ///
201 /// let lock = RwLock::new(5);
202 /// ```
203 #[track_caller]
204 pub fn new(value: T) -> RwLock<T>
205 where
206 T: Sized,
207 {
208 #[cfg(all(tokio_unstable, feature = "tracing"))]
209 let resource_span = {
210 let location = std::panic::Location::caller();
211 let resource_span = tracing::trace_span!(
212 parent: None,
213 "runtime.resource",
214 concrete_type = "RwLock",
215 kind = "Sync",
216 loc.file = location.file(),
217 loc.line = location.line(),
218 loc.col = location.column(),
219 );
220
221 resource_span.in_scope(|| {
222 tracing::trace!(
223 target: "runtime::resource::state_update",
224 max_readers = MAX_READS,
225 );
226
227 tracing::trace!(
228 target: "runtime::resource::state_update",
229 write_locked = false,
230 );
231
232 tracing::trace!(
233 target: "runtime::resource::state_update",
234 current_readers = 0,
235 );
236 });
237
238 resource_span
239 };
240
241 #[cfg(all(tokio_unstable, feature = "tracing"))]
242 let s = resource_span.in_scope(|| Semaphore::new(MAX_READS as usize));
243
244 #[cfg(any(not(tokio_unstable), not(feature = "tracing")))]
245 let s = Semaphore::new(MAX_READS as usize);
246
247 RwLock {
248 mr: MAX_READS,
249 c: UnsafeCell::new(value),
250 s,
251 #[cfg(all(tokio_unstable, feature = "tracing"))]
252 resource_span,
253 }
254 }
255
256 /// Creates a new instance of an `RwLock<T>` which is unlocked
257 /// and allows a maximum of `max_reads` concurrent readers.
258 ///
259 /// # Examples
260 ///
261 /// ```
262 /// use tokio::sync::RwLock;
263 ///
264 /// let lock = RwLock::with_max_readers(5, 1024);
265 /// ```
266 ///
267 /// # Panics
268 ///
269 /// Panics if `max_reads` is more than `u32::MAX >> 3`.
270 #[track_caller]
271 pub fn with_max_readers(value: T, max_reads: u32) -> RwLock<T>
272 where
273 T: Sized,
274 {
275 assert!(
276 max_reads <= MAX_READS,
277 "a RwLock may not be created with more than {MAX_READS} readers"
278 );
279
280 #[cfg(all(tokio_unstable, feature = "tracing"))]
281 let resource_span = {
282 let location = std::panic::Location::caller();
283
284 let resource_span = tracing::trace_span!(
285 parent: None,
286 "runtime.resource",
287 concrete_type = "RwLock",
288 kind = "Sync",
289 loc.file = location.file(),
290 loc.line = location.line(),
291 loc.col = location.column(),
292 );
293
294 resource_span.in_scope(|| {
295 tracing::trace!(
296 target: "runtime::resource::state_update",
297 max_readers = max_reads,
298 );
299
300 tracing::trace!(
301 target: "runtime::resource::state_update",
302 write_locked = false,
303 );
304
305 tracing::trace!(
306 target: "runtime::resource::state_update",
307 current_readers = 0,
308 );
309 });
310
311 resource_span
312 };
313
314 #[cfg(all(tokio_unstable, feature = "tracing"))]
315 let s = resource_span.in_scope(|| Semaphore::new(max_reads as usize));
316
317 #[cfg(any(not(tokio_unstable), not(feature = "tracing")))]
318 let s = Semaphore::new(max_reads as usize);
319
320 RwLock {
321 mr: max_reads,
322 c: UnsafeCell::new(value),
323 s,
324 #[cfg(all(tokio_unstable, feature = "tracing"))]
325 resource_span,
326 }
327 }
328
329 /// Creates a new instance of an `RwLock<T>` which is unlocked.
330 ///
331 /// When using the `tracing` [unstable feature], a `RwLock` created with
332 /// `const_new` will not be instrumented. As such, it will not be visible
333 /// in [`tokio-console`]. Instead, [`RwLock::new`] should be used to create
334 /// an instrumented object if that is needed.
335 ///
336 /// # Examples
337 ///
338 /// ```
339 /// use tokio::sync::RwLock;
340 ///
341 /// static LOCK: RwLock<i32> = RwLock::const_new(5);
342 /// ```
343 ///
344 /// [`tokio-console`]: https://github.com/tokio-rs/console
345 /// [unstable feature]: crate#unstable-features
346 #[cfg(not(all(loom, test)))]
347 pub const fn const_new(value: T) -> RwLock<T>
348 where
349 T: Sized,
350 {
351 RwLock {
352 mr: MAX_READS,
353 c: UnsafeCell::new(value),
354 s: Semaphore::const_new(MAX_READS as usize),
355 #[cfg(all(tokio_unstable, feature = "tracing"))]
356 resource_span: tracing::Span::none(),
357 }
358 }
359
360 /// Creates a new instance of an `RwLock<T>` which is unlocked
361 /// and allows a maximum of `max_reads` concurrent readers.
362 ///
363 /// # Examples
364 ///
365 /// ```
366 /// use tokio::sync::RwLock;
367 ///
368 /// static LOCK: RwLock<i32> = RwLock::const_with_max_readers(5, 1024);
369 /// ```
370 #[cfg(not(all(loom, test)))]
371 pub const fn const_with_max_readers(value: T, max_reads: u32) -> RwLock<T>
372 where
373 T: Sized,
374 {
375 assert!(max_reads <= MAX_READS);
376
377 RwLock {
378 mr: max_reads,
379 c: UnsafeCell::new(value),
380 s: Semaphore::const_new(max_reads as usize),
381 #[cfg(all(tokio_unstable, feature = "tracing"))]
382 resource_span: tracing::Span::none(),
383 }
384 }
385
386 /// Locks this `RwLock` with shared read access, causing the current task
387 /// to yield until the lock has been acquired.
388 ///
389 /// The calling task will yield until there are no writers which hold the
390 /// lock. There may be other readers inside the lock when the task resumes.
391 ///
392 /// Note that under the priority policy of [`RwLock`], read locks are not
393 /// granted until prior write locks, to prevent starvation. Therefore
394 /// deadlock may occur if a read lock is held by the current task, a write
395 /// lock attempt is made, and then a subsequent read lock attempt is made
396 /// by the current task.
397 ///
398 /// Returns an RAII guard which will drop this read access of the `RwLock`
399 /// when dropped.
400 ///
401 /// # Cancel safety
402 ///
403 /// This method uses a queue to fairly distribute locks in the order they
404 /// were requested. Cancelling a call to `read` makes you lose your place in
405 /// the queue.
406 ///
407 /// # Examples
408 ///
409 /// ```
410 /// use std::sync::Arc;
411 /// use tokio::sync::RwLock;
412 ///
413 /// # #[tokio::main(flavor = "current_thread")]
414 /// # async fn main() {
415 /// let lock = Arc::new(RwLock::new(1));
416 /// let c_lock = lock.clone();
417 ///
418 /// let n = lock.read().await;
419 /// assert_eq!(*n, 1);
420 ///
421 /// tokio::spawn(async move {
422 /// // While main has an active read lock, we acquire one too.
423 /// let r = c_lock.read().await;
424 /// assert_eq!(*r, 1);
425 /// }).await.expect("The spawned task has panicked");
426 ///
427 /// // Drop the guard after the spawned task finishes.
428 /// drop(n);
429 /// # }
430 /// ```
431 pub async fn read(&self) -> RwLockReadGuard<'_, T> {
432 let acquire_fut = async {
433 self.s.acquire(1).await.unwrap_or_else(|_| {
434 // The semaphore was closed. but, we never explicitly close it, and we have a
435 // handle to it through the Arc, which means that this can never happen.
436 unreachable!()
437 });
438
439 RwLockReadGuard {
440 s: &self.s,
441 data: self.c.get(),
442 marker: PhantomData,
443 #[cfg(all(tokio_unstable, feature = "tracing"))]
444 resource_span: self.resource_span.clone(),
445 }
446 };
447
448 #[cfg(all(tokio_unstable, feature = "tracing"))]
449 let acquire_fut = trace::async_op(
450 move || acquire_fut,
451 self.resource_span.clone(),
452 "RwLock::read",
453 "poll",
454 false,
455 );
456
457 #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
458 let guard = acquire_fut.await;
459
460 #[cfg(all(tokio_unstable, feature = "tracing"))]
461 self.resource_span.in_scope(|| {
462 tracing::trace!(
463 target: "runtime::resource::state_update",
464 current_readers = 1,
465 current_readers.op = "add",
466 )
467 });
468
469 guard
470 }
471
472 /// Blockingly locks this `RwLock` with shared read access.
473 ///
474 /// This method is intended for use cases where you
475 /// need to use this rwlock in asynchronous code as well as in synchronous code.
476 ///
477 /// Returns an RAII guard which will drop the read access of this `RwLock` when dropped.
478 ///
479 /// # Panics
480 ///
481 /// This function panics if called within an asynchronous execution context.
482 ///
483 /// - If you find yourself in an asynchronous execution context and needing
484 /// to call some (synchronous) function which performs one of these
485 /// `blocking_` operations, then consider wrapping that call inside
486 /// [`spawn_blocking()`][crate::runtime::Handle::spawn_blocking]
487 /// (or [`block_in_place()`][crate::task::block_in_place]).
488 ///
489 /// # Examples
490 ///
491 /// ```
492 /// # #[cfg(not(target_family = "wasm"))]
493 /// # {
494 /// use std::sync::Arc;
495 /// use tokio::sync::RwLock;
496 ///
497 /// #[tokio::main]
498 /// async fn main() {
499 /// let rwlock = Arc::new(RwLock::new(1));
500 /// let mut write_lock = rwlock.write().await;
501 ///
502 /// let blocking_task = tokio::task::spawn_blocking({
503 /// let rwlock = Arc::clone(&rwlock);
504 /// move || {
505 /// // This shall block until the `write_lock` is released.
506 /// let read_lock = rwlock.blocking_read();
507 /// assert_eq!(*read_lock, 0);
508 /// }
509 /// });
510 ///
511 /// *write_lock -= 1;
512 /// drop(write_lock); // release the lock.
513 ///
514 /// // Await the completion of the blocking task.
515 /// blocking_task.await.unwrap();
516 ///
517 /// // Assert uncontended.
518 /// assert!(rwlock.try_write().is_ok());
519 /// }
520 /// # }
521 /// ```
522 #[track_caller]
523 #[cfg(feature = "sync")]
524 pub fn blocking_read(&self) -> RwLockReadGuard<'_, T> {
525 crate::future::block_on(self.read())
526 }
527
528 /// Locks this `RwLock` with shared read access, causing the current task
529 /// to yield until the lock has been acquired.
530 ///
531 /// The calling task will yield until there are no writers which hold the
532 /// lock. There may be other readers inside the lock when the task resumes.
533 ///
534 /// This method is identical to [`RwLock::read`], except that the returned
535 /// guard references the `RwLock` with an [`Arc`] rather than by borrowing
536 /// it. Therefore, the `RwLock` must be wrapped in an `Arc` to call this
537 /// method, and the guard will live for the `'static` lifetime, as it keeps
538 /// the `RwLock` alive by holding an `Arc`.
539 ///
540 /// Note that under the priority policy of [`RwLock`], read locks are not
541 /// granted until prior write locks, to prevent starvation. Therefore
542 /// deadlock may occur if a read lock is held by the current task, a write
543 /// lock attempt is made, and then a subsequent read lock attempt is made
544 /// by the current task.
545 ///
546 /// Returns an RAII guard which will drop this read access of the `RwLock`
547 /// when dropped.
548 ///
549 /// # Cancel safety
550 ///
551 /// This method uses a queue to fairly distribute locks in the order they
552 /// were requested. Cancelling a call to `read_owned` makes you lose your
553 /// place in the queue.
554 ///
555 /// # Examples
556 ///
557 /// ```
558 /// use std::sync::Arc;
559 /// use tokio::sync::RwLock;
560 ///
561 /// # #[tokio::main(flavor = "current_thread")]
562 /// # async fn main() {
563 /// let lock = Arc::new(RwLock::new(1));
564 /// let c_lock = lock.clone();
565 ///
566 /// let n = lock.read_owned().await;
567 /// assert_eq!(*n, 1);
568 ///
569 /// tokio::spawn(async move {
570 /// // While main has an active read lock, we acquire one too.
571 /// let r = c_lock.read_owned().await;
572 /// assert_eq!(*r, 1);
573 /// }).await.expect("The spawned task has panicked");
574 ///
575 /// // Drop the guard after the spawned task finishes.
576 /// drop(n);
577 ///}
578 /// ```
579 pub async fn read_owned(self: Arc<Self>) -> OwnedRwLockReadGuard<T> {
580 #[cfg(all(tokio_unstable, feature = "tracing"))]
581 let resource_span = self.resource_span.clone();
582
583 let acquire_fut = async {
584 self.s.acquire(1).await.unwrap_or_else(|_| {
585 // The semaphore was closed. but, we never explicitly close it, and we have a
586 // handle to it through the Arc, which means that this can never happen.
587 unreachable!()
588 });
589
590 OwnedRwLockReadGuard {
591 #[cfg(all(tokio_unstable, feature = "tracing"))]
592 resource_span: self.resource_span.clone(),
593 data: self.c.get(),
594 lock: self,
595 _p: PhantomData,
596 }
597 };
598
599 #[cfg(all(tokio_unstable, feature = "tracing"))]
600 let acquire_fut = trace::async_op(
601 move || acquire_fut,
602 resource_span,
603 "RwLock::read_owned",
604 "poll",
605 false,
606 );
607
608 #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
609 let guard = acquire_fut.await;
610
611 #[cfg(all(tokio_unstable, feature = "tracing"))]
612 guard.resource_span.in_scope(|| {
613 tracing::trace!(
614 target: "runtime::resource::state_update",
615 current_readers = 1,
616 current_readers.op = "add",
617 )
618 });
619
620 guard
621 }
622
623 /// Attempts to acquire this `RwLock` with shared read access.
624 ///
625 /// If the access couldn't be acquired immediately, returns [`TryLockError`].
626 /// Otherwise, an RAII guard is returned which will release read access
627 /// when dropped.
628 ///
629 /// [`TryLockError`]: TryLockError
630 ///
631 /// # Examples
632 ///
633 /// ```
634 /// use std::sync::Arc;
635 /// use tokio::sync::RwLock;
636 ///
637 /// # #[tokio::main(flavor = "current_thread")]
638 /// # async fn main() {
639 /// let lock = Arc::new(RwLock::new(1));
640 /// let c_lock = lock.clone();
641 ///
642 /// let v = lock.try_read().unwrap();
643 /// assert_eq!(*v, 1);
644 ///
645 /// tokio::spawn(async move {
646 /// // While main has an active read lock, we acquire one too.
647 /// let n = c_lock.read().await;
648 /// assert_eq!(*n, 1);
649 /// }).await.expect("The spawned task has panicked");
650 ///
651 /// // Drop the guard when spawned task finishes.
652 /// drop(v);
653 /// # }
654 /// ```
655 pub fn try_read(&self) -> Result<RwLockReadGuard<'_, T>, TryLockError> {
656 match self.s.try_acquire(1) {
657 Ok(permit) => permit,
658 Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
659 Err(TryAcquireError::Closed) => unreachable!(),
660 }
661
662 let guard = RwLockReadGuard {
663 s: &self.s,
664 data: self.c.get(),
665 marker: marker::PhantomData,
666 #[cfg(all(tokio_unstable, feature = "tracing"))]
667 resource_span: self.resource_span.clone(),
668 };
669
670 #[cfg(all(tokio_unstable, feature = "tracing"))]
671 self.resource_span.in_scope(|| {
672 tracing::trace!(
673 target: "runtime::resource::state_update",
674 current_readers = 1,
675 current_readers.op = "add",
676 )
677 });
678
679 Ok(guard)
680 }
681
682 /// Attempts to acquire this `RwLock` with shared read access.
683 ///
684 /// If the access couldn't be acquired immediately, returns [`TryLockError`].
685 /// Otherwise, an RAII guard is returned which will release read access
686 /// when dropped.
687 ///
688 /// This method is identical to [`RwLock::try_read`], except that the
689 /// returned guard references the `RwLock` with an [`Arc`] rather than by
690 /// borrowing it. Therefore, the `RwLock` must be wrapped in an `Arc` to
691 /// call this method, and the guard will live for the `'static` lifetime,
692 /// as it keeps the `RwLock` alive by holding an `Arc`.
693 ///
694 /// [`TryLockError`]: TryLockError
695 ///
696 /// # Examples
697 ///
698 /// ```
699 /// use std::sync::Arc;
700 /// use tokio::sync::RwLock;
701 ///
702 /// # #[tokio::main(flavor = "current_thread")]
703 /// # async fn main() {
704 /// let lock = Arc::new(RwLock::new(1));
705 /// let c_lock = lock.clone();
706 ///
707 /// let v = lock.try_read_owned().unwrap();
708 /// assert_eq!(*v, 1);
709 ///
710 /// tokio::spawn(async move {
711 /// // While main has an active read lock, we acquire one too.
712 /// let n = c_lock.read_owned().await;
713 /// assert_eq!(*n, 1);
714 /// }).await.expect("The spawned task has panicked");
715 ///
716 /// // Drop the guard when spawned task finishes.
717 /// drop(v);
718 /// # }
719 /// ```
720 pub fn try_read_owned(self: Arc<Self>) -> Result<OwnedRwLockReadGuard<T>, TryLockError> {
721 match self.s.try_acquire(1) {
722 Ok(permit) => permit,
723 Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
724 Err(TryAcquireError::Closed) => unreachable!(),
725 }
726
727 let guard = OwnedRwLockReadGuard {
728 #[cfg(all(tokio_unstable, feature = "tracing"))]
729 resource_span: self.resource_span.clone(),
730 data: self.c.get(),
731 lock: self,
732 _p: PhantomData,
733 };
734
735 #[cfg(all(tokio_unstable, feature = "tracing"))]
736 guard.resource_span.in_scope(|| {
737 tracing::trace!(
738 target: "runtime::resource::state_update",
739 current_readers = 1,
740 current_readers.op = "add",
741 )
742 });
743
744 Ok(guard)
745 }
746
747 /// Locks this `RwLock` with exclusive write access, causing the current
748 /// task to yield until the lock has been acquired.
749 ///
750 /// The calling task will yield while other writers or readers currently
751 /// have access to the lock.
752 ///
753 /// Returns an RAII guard which will drop the write access of this `RwLock`
754 /// when dropped.
755 ///
756 /// # Cancel safety
757 ///
758 /// This method uses a queue to fairly distribute locks in the order they
759 /// were requested. Cancelling a call to `write` makes you lose your place
760 /// in the queue.
761 ///
762 /// # Examples
763 ///
764 /// ```
765 /// use tokio::sync::RwLock;
766 ///
767 /// # #[tokio::main(flavor = "current_thread")]
768 /// # async fn main() {
769 /// let lock = RwLock::new(1);
770 ///
771 /// let mut n = lock.write().await;
772 /// *n = 2;
773 /// # }
774 /// ```
775 pub async fn write(&self) -> RwLockWriteGuard<'_, T> {
776 let acquire_fut = async {
777 self.s.acquire(self.mr as usize).await.unwrap_or_else(|_| {
778 // The semaphore was closed. but, we never explicitly close it, and we have a
779 // handle to it through the Arc, which means that this can never happen.
780 unreachable!()
781 });
782
783 RwLockWriteGuard {
784 permits_acquired: self.mr,
785 s: &self.s,
786 data: self.c.get(),
787 marker: marker::PhantomData,
788 #[cfg(all(tokio_unstable, feature = "tracing"))]
789 resource_span: self.resource_span.clone(),
790 }
791 };
792
793 #[cfg(all(tokio_unstable, feature = "tracing"))]
794 let acquire_fut = trace::async_op(
795 move || acquire_fut,
796 self.resource_span.clone(),
797 "RwLock::write",
798 "poll",
799 false,
800 );
801
802 #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
803 let guard = acquire_fut.await;
804
805 #[cfg(all(tokio_unstable, feature = "tracing"))]
806 self.resource_span.in_scope(|| {
807 tracing::trace!(
808 target: "runtime::resource::state_update",
809 write_locked = true,
810 write_locked.op = "override",
811 )
812 });
813
814 guard
815 }
816
817 /// Blockingly locks this `RwLock` with exclusive write access.
818 ///
819 /// This method is intended for use cases where you
820 /// need to use this rwlock in asynchronous code as well as in synchronous code.
821 ///
822 /// Returns an RAII guard which will drop the write access of this `RwLock` when dropped.
823 ///
824 /// # Panics
825 ///
826 /// This function panics if called within an asynchronous execution context.
827 ///
828 /// - If you find yourself in an asynchronous execution context and needing
829 /// to call some (synchronous) function which performs one of these
830 /// `blocking_` operations, then consider wrapping that call inside
831 /// [`spawn_blocking()`][crate::runtime::Handle::spawn_blocking]
832 /// (or [`block_in_place()`][crate::task::block_in_place]).
833 ///
834 /// # Examples
835 ///
836 /// ```
837 /// # #[cfg(not(target_family = "wasm"))]
838 /// # {
839 /// use std::sync::Arc;
840 /// use tokio::{sync::RwLock};
841 ///
842 /// #[tokio::main]
843 /// async fn main() {
844 /// let rwlock = Arc::new(RwLock::new(1));
845 /// let read_lock = rwlock.read().await;
846 ///
847 /// let blocking_task = tokio::task::spawn_blocking({
848 /// let rwlock = Arc::clone(&rwlock);
849 /// move || {
850 /// // This shall block until the `read_lock` is released.
851 /// let mut write_lock = rwlock.blocking_write();
852 /// *write_lock = 2;
853 /// }
854 /// });
855 ///
856 /// assert_eq!(*read_lock, 1);
857 /// // Release the last outstanding read lock.
858 /// drop(read_lock);
859 ///
860 /// // Await the completion of the blocking task.
861 /// blocking_task.await.unwrap();
862 ///
863 /// // Assert uncontended.
864 /// let read_lock = rwlock.try_read().unwrap();
865 /// assert_eq!(*read_lock, 2);
866 /// }
867 /// # }
868 /// ```
869 #[track_caller]
870 #[cfg(feature = "sync")]
871 pub fn blocking_write(&self) -> RwLockWriteGuard<'_, T> {
872 crate::future::block_on(self.write())
873 }
874
875 /// Locks this `RwLock` with exclusive write access, causing the current
876 /// task to yield until the lock has been acquired.
877 ///
878 /// The calling task will yield while other writers or readers currently
879 /// have access to the lock.
880 ///
881 /// This method is identical to [`RwLock::write`], except that the returned
882 /// guard references the `RwLock` with an [`Arc`] rather than by borrowing
883 /// it. Therefore, the `RwLock` must be wrapped in an `Arc` to call this
884 /// method, and the guard will live for the `'static` lifetime, as it keeps
885 /// the `RwLock` alive by holding an `Arc`.
886 ///
887 /// Returns an RAII guard which will drop the write access of this `RwLock`
888 /// when dropped.
889 ///
890 /// # Cancel safety
891 ///
892 /// This method uses a queue to fairly distribute locks in the order they
893 /// were requested. Cancelling a call to `write_owned` makes you lose your
894 /// place in the queue.
895 ///
896 /// # Examples
897 ///
898 /// ```
899 /// use std::sync::Arc;
900 /// use tokio::sync::RwLock;
901 ///
902 /// # #[tokio::main(flavor = "current_thread")]
903 /// # async fn main() {
904 /// let lock = Arc::new(RwLock::new(1));
905 ///
906 /// let mut n = lock.write_owned().await;
907 /// *n = 2;
908 ///}
909 /// ```
910 pub async fn write_owned(self: Arc<Self>) -> OwnedRwLockWriteGuard<T> {
911 #[cfg(all(tokio_unstable, feature = "tracing"))]
912 let resource_span = self.resource_span.clone();
913
914 let acquire_fut = async {
915 self.s.acquire(self.mr as usize).await.unwrap_or_else(|_| {
916 // The semaphore was closed. but, we never explicitly close it, and we have a
917 // handle to it through the Arc, which means that this can never happen.
918 unreachable!()
919 });
920
921 OwnedRwLockWriteGuard {
922 #[cfg(all(tokio_unstable, feature = "tracing"))]
923 resource_span: self.resource_span.clone(),
924 permits_acquired: self.mr,
925 data: self.c.get(),
926 lock: self,
927 _p: PhantomData,
928 }
929 };
930
931 #[cfg(all(tokio_unstable, feature = "tracing"))]
932 let acquire_fut = trace::async_op(
933 move || acquire_fut,
934 resource_span,
935 "RwLock::write_owned",
936 "poll",
937 false,
938 );
939
940 #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
941 let guard = acquire_fut.await;
942
943 #[cfg(all(tokio_unstable, feature = "tracing"))]
944 guard.resource_span.in_scope(|| {
945 tracing::trace!(
946 target: "runtime::resource::state_update",
947 write_locked = true,
948 write_locked.op = "override",
949 )
950 });
951
952 guard
953 }
954
955 /// Attempts to acquire this `RwLock` with exclusive write access.
956 ///
957 /// If the access couldn't be acquired immediately, returns [`TryLockError`].
958 /// Otherwise, an RAII guard is returned which will release write access
959 /// when dropped.
960 ///
961 /// [`TryLockError`]: TryLockError
962 ///
963 /// # Examples
964 ///
965 /// ```
966 /// use tokio::sync::RwLock;
967 ///
968 /// # #[tokio::main(flavor = "current_thread")]
969 /// # async fn main() {
970 /// let rw = RwLock::new(1);
971 ///
972 /// let v = rw.read().await;
973 /// assert_eq!(*v, 1);
974 ///
975 /// assert!(rw.try_write().is_err());
976 /// # }
977 /// ```
978 pub fn try_write(&self) -> Result<RwLockWriteGuard<'_, T>, TryLockError> {
979 match self.s.try_acquire(self.mr as usize) {
980 Ok(permit) => permit,
981 Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
982 Err(TryAcquireError::Closed) => unreachable!(),
983 }
984
985 let guard = RwLockWriteGuard {
986 permits_acquired: self.mr,
987 s: &self.s,
988 data: self.c.get(),
989 marker: marker::PhantomData,
990 #[cfg(all(tokio_unstable, feature = "tracing"))]
991 resource_span: self.resource_span.clone(),
992 };
993
994 #[cfg(all(tokio_unstable, feature = "tracing"))]
995 self.resource_span.in_scope(|| {
996 tracing::trace!(
997 target: "runtime::resource::state_update",
998 write_locked = true,
999 write_locked.op = "override",
1000 )
1001 });
1002
1003 Ok(guard)
1004 }
1005
1006 /// Attempts to acquire this `RwLock` with exclusive write access.
1007 ///
1008 /// If the access couldn't be acquired immediately, returns [`TryLockError`].
1009 /// Otherwise, an RAII guard is returned which will release write access
1010 /// when dropped.
1011 ///
1012 /// This method is identical to [`RwLock::try_write`], except that the
1013 /// returned guard references the `RwLock` with an [`Arc`] rather than by
1014 /// borrowing it. Therefore, the `RwLock` must be wrapped in an `Arc` to
1015 /// call this method, and the guard will live for the `'static` lifetime,
1016 /// as it keeps the `RwLock` alive by holding an `Arc`.
1017 ///
1018 /// [`TryLockError`]: TryLockError
1019 ///
1020 /// # Examples
1021 ///
1022 /// ```
1023 /// use std::sync::Arc;
1024 /// use tokio::sync::RwLock;
1025 ///
1026 /// # #[tokio::main(flavor = "current_thread")]
1027 /// # async fn main() {
1028 /// let rw = Arc::new(RwLock::new(1));
1029 ///
1030 /// let v = Arc::clone(&rw).read_owned().await;
1031 /// assert_eq!(*v, 1);
1032 ///
1033 /// assert!(rw.try_write_owned().is_err());
1034 /// # }
1035 /// ```
1036 pub fn try_write_owned(self: Arc<Self>) -> Result<OwnedRwLockWriteGuard<T>, TryLockError> {
1037 match self.s.try_acquire(self.mr as usize) {
1038 Ok(permit) => permit,
1039 Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
1040 Err(TryAcquireError::Closed) => unreachable!(),
1041 }
1042
1043 let guard = OwnedRwLockWriteGuard {
1044 #[cfg(all(tokio_unstable, feature = "tracing"))]
1045 resource_span: self.resource_span.clone(),
1046 permits_acquired: self.mr,
1047 data: self.c.get(),
1048 lock: self,
1049 _p: PhantomData,
1050 };
1051
1052 #[cfg(all(tokio_unstable, feature = "tracing"))]
1053 guard.resource_span.in_scope(|| {
1054 tracing::trace!(
1055 target: "runtime::resource::state_update",
1056 write_locked = true,
1057 write_locked.op = "override",
1058 )
1059 });
1060
1061 Ok(guard)
1062 }
1063
1064 /// Returns a mutable reference to the underlying data.
1065 ///
1066 /// Since this call borrows the `RwLock` mutably, no actual locking needs to
1067 /// take place -- the mutable borrow statically guarantees no locks exist.
1068 ///
1069 /// # Examples
1070 ///
1071 /// ```
1072 /// use tokio::sync::RwLock;
1073 ///
1074 /// fn main() {
1075 /// let mut lock = RwLock::new(1);
1076 ///
1077 /// let n = lock.get_mut();
1078 /// *n = 2;
1079 /// }
1080 /// ```
1081 pub fn get_mut(&mut self) -> &mut T {
1082 self.c.get_mut()
1083 }
1084
1085 /// Consumes the lock, returning the underlying data.
1086 pub fn into_inner(self) -> T
1087 where
1088 T: Sized,
1089 {
1090 self.c.into_inner()
1091 }
1092}
1093
1094impl<T> From<T> for RwLock<T> {
1095 fn from(s: T) -> Self {
1096 Self::new(s)
1097 }
1098}
1099
1100impl<T> Default for RwLock<T>
1101where
1102 T: Default,
1103{
1104 fn default() -> Self {
1105 Self::new(T::default())
1106 }
1107}
1108
1109impl<T: ?Sized> std::fmt::Debug for RwLock<T>
1110where
1111 T: std::fmt::Debug,
1112{
1113 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1114 let mut d = f.debug_struct("RwLock");
1115 match self.try_read() {
1116 Ok(inner) => d.field("data", &&*inner),
1117 Err(_) => d.field("data", &format_args!("<locked>")),
1118 };
1119 d.finish()
1120 }
1121}