tokio/runtime/
mod.rs

1//! The Tokio runtime.
2//!
3//! Unlike other Rust programs, asynchronous applications require runtime
4//! support. In particular, the following runtime services are necessary:
5//!
6//! * An **I/O event loop**, called the driver, which drives I/O resources and
7//!   dispatches I/O events to tasks that depend on them.
8//! * A **scheduler** to execute [tasks] that use these I/O resources.
9//! * A **timer** for scheduling work to run after a set period of time.
10//!
11//! Tokio's [`Runtime`] bundles all of these services as a single type, allowing
12//! them to be started, shut down, and configured together. However, often it is
13//! not required to configure a [`Runtime`] manually, and a user may just use the
14//! [`tokio::main`] attribute macro, which creates a [`Runtime`] under the hood.
15//!
16//! # Choose your runtime
17//!
18//! Here is the rules of thumb to choose the right runtime for your application.
19//!
20//! ```plaintext
21//!    +------------------------------------------------------+
22//!    | Do you want work-stealing or multi-thread scheduler? |
23//!    +------------------------------------------------------+
24//!                    | Yes              | No
25//!                    |                  |
26//!                    |                  |
27//!                    v                  |
28//!      +------------------------+       |
29//!      | Multi-threaded Runtime |       |
30//!      +------------------------+       |
31//!                                       |
32//!                                       V
33//!                      +--------------------------------+
34//!                      | Do you execute `!Send` Future? |
35//!                      +--------------------------------+
36//!                            | Yes                 | No
37//!                            |                     |
38//!                            V                     |
39//!              +--------------------------+        |
40//!              | Local Runtime (unstable) |        |
41//!              +--------------------------+        |
42//!                                                  |
43//!                                                  v
44//!                                      +------------------------+
45//!                                      | Current-thread Runtime |
46//!                                      +------------------------+
47//! ```
48//!
49//! The above decision tree is not exhaustive. there are other factors that
50//! may influence your decision.
51//!
52//! ## Bridging with sync code
53//!
54//! See <https://tokio.rs/tokio/topics/bridging> for details.
55//!
56//! ## NUMA awareness
57//!
58//! The tokio runtime is not NUMA (Non-Uniform Memory Access) aware.
59//! You may want to start multiple runtimes instead of a single runtime
60//! for better performance on NUMA systems.
61//!
62//! # Usage
63//!
64//! When no fine tuning is required, the [`tokio::main`] attribute macro can be
65//! used.
66//!
67//! ```no_run
68//! # #[cfg(not(target_family = "wasm"))]
69//! # {
70//! use tokio::net::TcpListener;
71//! use tokio::io::{AsyncReadExt, AsyncWriteExt};
72//!
73//! #[tokio::main]
74//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
75//!     let listener = TcpListener::bind("127.0.0.1:8080").await?;
76//!
77//!     loop {
78//!         let (mut socket, _) = listener.accept().await?;
79//!
80//!         tokio::spawn(async move {
81//!             let mut buf = [0; 1024];
82//!
83//!             // In a loop, read data from the socket and write the data back.
84//!             loop {
85//!                 let n = match socket.read(&mut buf).await {
86//!                     // socket closed
87//!                     Ok(0) => return,
88//!                     Ok(n) => n,
89//!                     Err(e) => {
90//!                         println!("failed to read from socket; err = {:?}", e);
91//!                         return;
92//!                     }
93//!                 };
94//!
95//!                 // Write the data back
96//!                 if let Err(e) = socket.write_all(&buf[0..n]).await {
97//!                     println!("failed to write to socket; err = {:?}", e);
98//!                     return;
99//!                 }
100//!             }
101//!         });
102//!     }
103//! }
104//! # }
105//! ```
106//!
107//! From within the context of the runtime, additional tasks are spawned using
108//! the [`tokio::spawn`] function. Futures spawned using this function will be
109//! executed on the same thread pool used by the [`Runtime`].
110//!
111//! A [`Runtime`] instance can also be used directly.
112//!
113//! ```no_run
114//! # #[cfg(not(target_family = "wasm"))]
115//! # {
116//! use tokio::net::TcpListener;
117//! use tokio::io::{AsyncReadExt, AsyncWriteExt};
118//! use tokio::runtime::Runtime;
119//!
120//! fn main() -> Result<(), Box<dyn std::error::Error>> {
121//!     // Create the runtime
122//!     let rt  = Runtime::new()?;
123//!
124//!     // Spawn the root task
125//!     rt.block_on(async {
126//!         let listener = TcpListener::bind("127.0.0.1:8080").await?;
127//!
128//!         loop {
129//!             let (mut socket, _) = listener.accept().await?;
130//!
131//!             tokio::spawn(async move {
132//!                 let mut buf = [0; 1024];
133//!
134//!                 // In a loop, read data from the socket and write the data back.
135//!                 loop {
136//!                     let n = match socket.read(&mut buf).await {
137//!                         // socket closed
138//!                         Ok(0) => return,
139//!                         Ok(n) => n,
140//!                         Err(e) => {
141//!                             println!("failed to read from socket; err = {:?}", e);
142//!                             return;
143//!                         }
144//!                     };
145//!
146//!                     // Write the data back
147//!                     if let Err(e) = socket.write_all(&buf[0..n]).await {
148//!                         println!("failed to write to socket; err = {:?}", e);
149//!                         return;
150//!                     }
151//!                 }
152//!             });
153//!         }
154//!     })
155//! }
156//! # }
157//! ```
158//!
159//! ## Runtime Configurations
160//!
161//! Tokio provides multiple task scheduling strategies, suitable for different
162//! applications. The [runtime builder] or `#[tokio::main]` attribute may be
163//! used to select which scheduler to use.
164//!
165//! #### Multi-Thread Scheduler
166//!
167//! The multi-thread scheduler executes futures on a _thread pool_, using a
168//! work-stealing strategy. By default, it will start a worker thread for each
169//! CPU core available on the system. This tends to be the ideal configuration
170//! for most applications. The multi-thread scheduler requires the `rt-multi-thread`
171//! feature flag, and is selected by default:
172//! ```
173//! # #[cfg(not(target_family = "wasm"))]
174//! # {
175//! use tokio::runtime;
176//!
177//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
178//! let threaded_rt = runtime::Runtime::new()?;
179//! # Ok(()) }
180//! # }
181//! ```
182//!
183//! Most applications should use the multi-thread scheduler, except in some
184//! niche use-cases, such as when running only a single thread is required.
185//!
186//! #### Current-Thread Scheduler
187//!
188//! The current-thread scheduler provides a _single-threaded_ future executor.
189//! All tasks will be created and executed on the current thread. This requires
190//! the `rt` feature flag.
191//! ```
192//! use tokio::runtime;
193//!
194//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
195//! let rt = runtime::Builder::new_current_thread()
196//!     .build()?;
197//! # Ok(()) }
198//! ```
199//!
200//! #### Resource drivers
201//!
202//! When configuring a runtime by hand, no resource drivers are enabled by
203//! default. In this case, attempting to use networking types or time types will
204//! fail. In order to enable these types, the resource drivers must be enabled.
205//! This is done with [`Builder::enable_io`] and [`Builder::enable_time`]. As a
206//! shorthand, [`Builder::enable_all`] enables both resource drivers.
207//!
208//! ## Lifetime of spawned threads
209//!
210//! The runtime may spawn threads depending on its configuration and usage. The
211//! multi-thread scheduler spawns threads to schedule tasks and for `spawn_blocking`
212//! calls.
213//!
214//! While the `Runtime` is active, threads may shut down after periods of being
215//! idle. Once `Runtime` is dropped, all runtime threads have usually been
216//! terminated, but in the presence of unstoppable spawned work are not
217//! guaranteed to have been terminated. See the
218//! [struct level documentation](Runtime#shutdown) for more details.
219//!
220//! [tasks]: crate::task
221//! [`Runtime`]: Runtime
222//! [`tokio::spawn`]: crate::spawn
223//! [`tokio::main`]: ../attr.main.html
224//! [runtime builder]: crate::runtime::Builder
225//! [`Runtime::new`]: crate::runtime::Runtime::new
226//! [`Builder::enable_io`]: crate::runtime::Builder::enable_io
227//! [`Builder::enable_time`]: crate::runtime::Builder::enable_time
228//! [`Builder::enable_all`]: crate::runtime::Builder::enable_all
229//!
230//! # Detailed runtime behavior
231//!
232//! This section gives more details into how the Tokio runtime will schedule
233//! tasks for execution.
234//!
235//! At its most basic level, a runtime has a collection of tasks that need to be
236//! scheduled. It will repeatedly remove a task from that collection and
237//! schedule it (by calling [`poll`]). When the collection is empty, the thread
238//! will go to sleep until a task is added to the collection.
239//!
240//! However, the above is not sufficient to guarantee a well-behaved runtime.
241//! For example, the runtime might have a single task that is always ready to be
242//! scheduled, and schedule that task every time. This is a problem because it
243//! starves other tasks by not scheduling them. To solve this, Tokio provides
244//! the following fairness guarantee:
245//!
246//! > If the total number of tasks does not grow without bound, and no task is
247//! > [blocking the thread], then it is guaranteed that tasks are scheduled
248//! > fairly.
249//!
250//! Or, more formally:
251//!
252//! > Under the following two assumptions:
253//! >
254//! > * There is some number `MAX_TASKS` such that the total number of tasks on
255//! >   the runtime at any specific point in time never exceeds `MAX_TASKS`.
256//! > * There is some number `MAX_SCHEDULE` such that calling [`poll`] on any
257//! >   task spawned on the runtime returns within `MAX_SCHEDULE` time units.
258//! >
259//! > Then, there is some number `MAX_DELAY` such that when a task is woken, it
260//! > will be scheduled by the runtime within `MAX_DELAY` time units.
261//!
262//! (Here, `MAX_TASKS` and `MAX_SCHEDULE` can be any number and the user of
263//! the runtime may choose them. The `MAX_DELAY` number is controlled by the
264//! runtime, and depends on the value of `MAX_TASKS` and `MAX_SCHEDULE`.)
265//!
266//! Other than the above fairness guarantee, there is no guarantee about the
267//! order in which tasks are scheduled. There is also no guarantee that the
268//! runtime is equally fair to all tasks. For example, if the runtime has two
269//! tasks A and B that are both ready, then the runtime may schedule A five
270//! times before it schedules B. This is the case even if A yields using
271//! [`yield_now`]. All that is guaranteed is that it will schedule B eventually.
272//!
273//! Normally, tasks are scheduled only if they have been woken by calling
274//! [`wake`] on their waker. However, this is not guaranteed, and Tokio may
275//! schedule tasks that have not been woken under some circumstances. This is
276//! called a spurious wakeup.
277//!
278//! ## IO and timers
279//!
280//! Beyond just scheduling tasks, the runtime must also manage IO resources and
281//! timers. It does this by periodically checking whether there are any IO
282//! resources or timers that are ready, and waking the relevant task so that
283//! it will be scheduled.
284//!
285//! These checks are performed periodically between scheduling tasks. Under the
286//! same assumptions as the previous fairness guarantee, Tokio guarantees that
287//! it will wake tasks with an IO or timer event within some maximum number of
288//! time units.
289//!
290//! ## Current thread runtime (behavior at the time of writing)
291//!
292//! This section describes how the [current thread runtime] behaves today. This
293//! behavior may change in future versions of Tokio.
294//!
295//! The current thread runtime maintains two FIFO queues of tasks that are ready
296//! to be scheduled: the global queue and the local queue. The runtime will prefer
297//! to choose the next task to schedule from the local queue, and will only pick a
298//! task from the global queue if the local queue is empty, or if it has picked
299//! a task from the local queue 31 times in a row. The number 31 can be
300//! changed using the [`global_queue_interval`] setting.
301//!
302//! The runtime will check for new IO or timer events whenever there are no
303//! tasks ready to be scheduled, or when it has scheduled 61 tasks in a row. The
304//! number 61 may be changed using the [`event_interval`] setting.
305//!
306//! When a task is woken from within a task running on the runtime, then the
307//! woken task is added directly to the local queue. Otherwise, the task is
308//! added to the global queue. The current thread runtime does not use [the lifo
309//! slot optimization].
310//!
311//! ## Multi threaded runtime (behavior at the time of writing)
312//!
313//! This section describes how the [multi thread runtime] behaves today. This
314//! behavior may change in future versions of Tokio.
315//!
316//! A multi thread runtime has a fixed number of worker threads, which are all
317//! created on startup. The multi thread runtime maintains one global queue, and
318//! a local queue for each worker thread. The local queue of a worker thread can
319//! fit at most 256 tasks. If more than 256 tasks are added to the local queue,
320//! then half of them are moved to the global queue to make space.
321//!
322//! The runtime will prefer to choose the next task to schedule from the local
323//! queue, and will only pick a task from the global queue if the local queue is
324//! empty, or if it has picked a task from the local queue
325//! [`global_queue_interval`] times in a row. If the value of
326//! [`global_queue_interval`] is not explicitly set using the runtime builder,
327//! then the runtime will dynamically compute it using a heuristic that targets
328//! 10ms intervals between each check of the global queue (based on the
329//! [`worker_mean_poll_time`] metric).
330//!
331//! If both the local queue and global queue is empty, then the worker thread
332//! will attempt to steal tasks from the local queue of another worker thread.
333//! Stealing is done by moving half of the tasks in one local queue to another
334//! local queue.
335//!
336//! The runtime will check for new IO or timer events whenever there are no
337//! tasks ready to be scheduled, or when it has scheduled 61 tasks in a row. The
338//! number 61 may be changed using the [`event_interval`] setting.
339//!
340//! The multi thread runtime uses [the lifo slot optimization]: Whenever a task
341//! wakes up another task, the other task is added to the worker thread's lifo
342//! slot instead of being added to a queue. If there was already a task in the
343//! lifo slot when this happened, then the lifo slot is replaced, and the task
344//! that used to be in the lifo slot is placed in the thread's local queue.
345//! When the runtime finishes scheduling a task, it will schedule the task in
346//! the lifo slot immediately, if any. When the lifo slot is used, the [coop
347//! budget] is not reset. Furthermore, if a worker thread uses the lifo slot
348//! three times in a row, it is temporarily disabled until the worker thread has
349//! scheduled a task that didn't come from the lifo slot. The lifo slot can be
350//! disabled using the [`disable_lifo_slot`] setting. The lifo slot is separate
351//! from the local queue, so other worker threads cannot steal the task in the
352//! lifo slot.
353//!
354//! When a task is woken from a thread that is not a worker thread, then the
355//! task is placed in the global queue.
356//!
357//! [`poll`]: std::future::Future::poll
358//! [`wake`]: std::task::Waker::wake
359//! [`yield_now`]: crate::task::yield_now
360//! [blocking the thread]: https://ryhl.io/blog/async-what-is-blocking/
361//! [current thread runtime]: crate::runtime::Builder::new_current_thread
362//! [multi thread runtime]: crate::runtime::Builder::new_multi_thread
363//! [`global_queue_interval`]: crate::runtime::Builder::global_queue_interval
364//! [`event_interval`]: crate::runtime::Builder::event_interval
365//! [`disable_lifo_slot`]: crate::runtime::Builder::disable_lifo_slot
366//! [the lifo slot optimization]: crate::runtime::Builder::disable_lifo_slot
367//! [coop budget]: crate::task::coop#cooperative-scheduling
368//! [`worker_mean_poll_time`]: crate::runtime::RuntimeMetrics::worker_mean_poll_time
369
370// At the top due to macros
371#[cfg(test)]
372#[cfg(not(target_family = "wasm"))]
373#[macro_use]
374mod tests;
375
376pub(crate) mod context;
377
378pub(crate) mod park;
379
380pub(crate) mod driver;
381
382pub(crate) mod scheduler;
383
384cfg_io_driver_impl! {
385    pub(crate) mod io;
386}
387
388cfg_process_driver! {
389    mod process;
390}
391
392cfg_time! {
393    pub(crate) mod time;
394}
395
396cfg_signal_internal_and_unix! {
397    pub(crate) mod signal;
398}
399
400cfg_rt! {
401    pub(crate) mod task;
402
403    mod config;
404    use config::Config;
405
406    mod blocking;
407    #[cfg_attr(target_os = "wasi", allow(unused_imports))]
408    pub(crate) use blocking::spawn_blocking;
409
410    cfg_trace! {
411        pub(crate) use blocking::Mandatory;
412    }
413
414    cfg_fs! {
415        pub(crate) use blocking::spawn_mandatory_blocking;
416    }
417
418    mod builder;
419    pub use self::builder::Builder;
420    cfg_unstable! {
421        mod id;
422        #[cfg_attr(not(tokio_unstable), allow(unreachable_pub))]
423        pub use id::Id;
424
425        pub use self::builder::UnhandledPanic;
426        pub use crate::util::rand::RngSeed;
427
428        mod local_runtime;
429        pub use local_runtime::{LocalRuntime, LocalOptions};
430    }
431
432    cfg_taskdump! {
433        pub mod dump;
434        pub use dump::Dump;
435    }
436
437    mod task_hooks;
438    pub(crate) use task_hooks::{TaskHooks, TaskCallback};
439    cfg_unstable! {
440        pub use task_hooks::TaskMeta;
441    }
442    #[cfg(not(tokio_unstable))]
443    pub(crate) use task_hooks::TaskMeta;
444
445    mod handle;
446    pub use handle::{EnterGuard, Handle, TryCurrentError};
447
448    mod runtime;
449    pub use runtime::{Runtime, RuntimeFlavor};
450
451    /// Boundary value to prevent stack overflow caused by a large-sized
452    /// Future being placed in the stack.
453    pub(crate) const BOX_FUTURE_THRESHOLD: usize = if cfg!(debug_assertions)  {
454        2048
455    } else {
456        16384
457    };
458
459    mod thread_id;
460    pub(crate) use thread_id::ThreadId;
461
462    pub(crate) mod metrics;
463    pub use metrics::RuntimeMetrics;
464
465    cfg_unstable_metrics! {
466        pub use metrics::{HistogramScale, HistogramConfiguration, LogHistogram, LogHistogramBuilder, InvalidHistogramConfiguration} ;
467
468        cfg_net! {
469            pub(crate) use metrics::IoDriverMetrics;
470        }
471    }
472
473    pub(crate) use metrics::{MetricsBatch, SchedulerMetrics, WorkerMetrics, HistogramBuilder};
474
475    /// After thread starts / before thread stops
476    type Callback = std::sync::Arc<dyn Fn() + Send + Sync>;
477}