io_uring/
lib.rs

1//! The `io_uring` library for Rust.
2//!
3//! The crate only provides a summary of the parameters.
4//! For more detailed documentation, see manpage.
5#![warn(rust_2018_idioms, unused_qualifications)]
6
7#[macro_use]
8mod util;
9pub mod cqueue;
10pub mod opcode;
11pub mod register;
12pub mod squeue;
13mod submit;
14mod sys;
15pub mod types;
16
17use std::marker::PhantomData;
18use std::mem::ManuallyDrop;
19use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
20use std::{cmp, io, mem};
21
22#[cfg(feature = "io_safety")]
23use std::os::unix::io::{AsFd, BorrowedFd};
24
25pub use cqueue::CompletionQueue;
26pub use register::Probe;
27pub use squeue::SubmissionQueue;
28pub use submit::Submitter;
29use util::{Mmap, OwnedFd};
30
31/// IoUring instance
32///
33/// - `S`: The ring's submission queue entry (SQE) type, either [`squeue::Entry`] or
34///   [`squeue::Entry128`];
35/// - `C`: The ring's completion queue entry (CQE) type, either [`cqueue::Entry`] or
36///   [`cqueue::Entry32`].
37pub struct IoUring<S = squeue::Entry, C = cqueue::Entry>
38where
39    S: squeue::EntryMarker,
40    C: cqueue::EntryMarker,
41{
42    sq: squeue::Inner<S>,
43    cq: cqueue::Inner<C>,
44    fd: OwnedFd,
45    params: Parameters,
46    memory: ManuallyDrop<MemoryMap>,
47}
48
49#[allow(dead_code)]
50struct MemoryMap {
51    sq_mmap: Mmap,
52    sqe_mmap: Mmap,
53    cq_mmap: Option<Mmap>,
54}
55
56/// IoUring build params
57#[derive(Clone, Default)]
58pub struct Builder<S = squeue::Entry, C = cqueue::Entry>
59where
60    S: squeue::EntryMarker,
61    C: cqueue::EntryMarker,
62{
63    dontfork: bool,
64    params: sys::io_uring_params,
65    phantom: PhantomData<(S, C)>,
66}
67
68/// The parameters that were used to construct an [`IoUring`].
69#[derive(Clone)]
70pub struct Parameters(sys::io_uring_params);
71
72unsafe impl<S: squeue::EntryMarker, C: cqueue::EntryMarker> Send for IoUring<S, C> {}
73unsafe impl<S: squeue::EntryMarker, C: cqueue::EntryMarker> Sync for IoUring<S, C> {}
74
75impl IoUring<squeue::Entry, cqueue::Entry> {
76    /// Create a new `IoUring` instance with default configuration parameters. See [`Builder`] to
77    /// customize it further.
78    ///
79    /// The `entries` sets the size of queue,
80    /// and its value should be the power of two.
81    pub fn new(entries: u32) -> io::Result<Self> {
82        Self::builder().build(entries)
83    }
84}
85
86impl<S: squeue::EntryMarker, C: cqueue::EntryMarker> IoUring<S, C> {
87    /// Create a [`Builder`] for an `IoUring` instance.
88    ///
89    /// This allows for further customization than [`new`](Self::new).
90    ///
91    /// Unlike [`IoUring::new`], this function is available for any combination of submission
92    /// queue entry (SQE) and completion queue entry (CQE) types.
93    #[must_use]
94    pub fn builder() -> Builder<S, C> {
95        Builder {
96            dontfork: false,
97            params: sys::io_uring_params {
98                flags: S::BUILD_FLAGS | C::BUILD_FLAGS,
99                ..Default::default()
100            },
101            phantom: PhantomData,
102        }
103    }
104
105    fn with_params(entries: u32, mut p: sys::io_uring_params) -> io::Result<Self> {
106        // NOTE: The `SubmissionQueue` and `CompletionQueue` are references,
107        // and their lifetime can never exceed `MemoryMap`.
108        //
109        // The memory mapped regions of `MemoryMap` never move,
110        // so `SubmissionQueue` and `CompletionQueue` are `Unpin`.
111        //
112        // I really hope that Rust can safely use self-reference types.
113        #[inline]
114        unsafe fn setup_queue<S: squeue::EntryMarker, C: cqueue::EntryMarker>(
115            fd: &OwnedFd,
116            p: &sys::io_uring_params,
117        ) -> io::Result<(MemoryMap, squeue::Inner<S>, cqueue::Inner<C>)> {
118            let sq_len = p.sq_off.array as usize + p.sq_entries as usize * mem::size_of::<u32>();
119            let cq_len = p.cq_off.cqes as usize + p.cq_entries as usize * mem::size_of::<C>();
120            let sqe_len = p.sq_entries as usize * mem::size_of::<S>();
121            let sqe_mmap = Mmap::new(fd, sys::IORING_OFF_SQES as _, sqe_len)?;
122
123            if p.features & sys::IORING_FEAT_SINGLE_MMAP != 0 {
124                let scq_mmap =
125                    Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, cmp::max(sq_len, cq_len))?;
126
127                let sq = squeue::Inner::new(&scq_mmap, &sqe_mmap, p);
128                let cq = cqueue::Inner::new(&scq_mmap, p);
129                let mm = MemoryMap {
130                    sq_mmap: scq_mmap,
131                    cq_mmap: None,
132                    sqe_mmap,
133                };
134
135                Ok((mm, sq, cq))
136            } else {
137                let sq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, sq_len)?;
138                let cq_mmap = Mmap::new(fd, sys::IORING_OFF_CQ_RING as _, cq_len)?;
139
140                let sq = squeue::Inner::new(&sq_mmap, &sqe_mmap, p);
141                let cq = cqueue::Inner::new(&cq_mmap, p);
142                let mm = MemoryMap {
143                    cq_mmap: Some(cq_mmap),
144                    sq_mmap,
145                    sqe_mmap,
146                };
147
148                Ok((mm, sq, cq))
149            }
150        }
151
152        let fd: OwnedFd = unsafe { OwnedFd::from_raw_fd(sys::io_uring_setup(entries, &mut p)?) };
153
154        let (mm, sq, cq) = unsafe { setup_queue(&fd, &p)? };
155
156        Ok(IoUring {
157            sq,
158            cq,
159            fd,
160            params: Parameters(p),
161            memory: ManuallyDrop::new(mm),
162        })
163    }
164
165    /// Get the submitter of this io_uring instance, which can be used to submit submission queue
166    /// events to the kernel for execution and to register files or buffers with it.
167    #[inline]
168    pub fn submitter(&self) -> Submitter<'_> {
169        Submitter::new(
170            &self.fd,
171            &self.params,
172            self.sq.head,
173            self.sq.tail,
174            self.sq.flags,
175        )
176    }
177
178    /// Get the parameters that were used to construct this instance.
179    #[inline]
180    pub fn params(&self) -> &Parameters {
181        &self.params
182    }
183
184    /// Initiate asynchronous I/O. See [`Submitter::submit`] for more details.
185    #[inline]
186    pub fn submit(&self) -> io::Result<usize> {
187        self.submitter().submit()
188    }
189
190    /// Initiate and/or complete asynchronous I/O. See [`Submitter::submit_and_wait`] for more
191    /// details.
192    #[inline]
193    pub fn submit_and_wait(&self, want: usize) -> io::Result<usize> {
194        self.submitter().submit_and_wait(want)
195    }
196
197    /// Get the submitter, submission queue and completion queue of the io_uring instance. This can
198    /// be used to operate on the different parts of the io_uring instance independently.
199    ///
200    /// If you use this method to obtain `sq` and `cq`,
201    /// please note that you need to `drop` or `sync` the queue before and after submit,
202    /// otherwise the queue will not be updated.
203    #[inline]
204    pub fn split(
205        &mut self,
206    ) -> (
207        Submitter<'_>,
208        SubmissionQueue<'_, S>,
209        CompletionQueue<'_, C>,
210    ) {
211        let submit = Submitter::new(
212            &self.fd,
213            &self.params,
214            self.sq.head,
215            self.sq.tail,
216            self.sq.flags,
217        );
218        (submit, self.sq.borrow(), self.cq.borrow())
219    }
220
221    /// Get the submission queue of the io_uring instance. This is used to send I/O requests to the
222    /// kernel.
223    #[inline]
224    pub fn submission(&mut self) -> SubmissionQueue<'_, S> {
225        self.sq.borrow()
226    }
227
228    /// Get the submission queue of the io_uring instance from a shared reference.
229    ///
230    /// # Safety
231    ///
232    /// No other [`SubmissionQueue`]s may exist when calling this function.
233    #[inline]
234    pub unsafe fn submission_shared(&self) -> SubmissionQueue<'_, S> {
235        self.sq.borrow_shared()
236    }
237
238    /// Get completion queue of the io_uring instance. This is used to receive I/O completion
239    /// events from the kernel.
240    #[inline]
241    pub fn completion(&mut self) -> CompletionQueue<'_, C> {
242        self.cq.borrow()
243    }
244
245    /// Get the completion queue of the io_uring instance from a shared reference.
246    ///
247    /// # Safety
248    ///
249    /// No other [`CompletionQueue`]s may exist when calling this function.
250    #[inline]
251    pub unsafe fn completion_shared(&self) -> CompletionQueue<'_, C> {
252        self.cq.borrow_shared()
253    }
254}
255
256impl<S: squeue::EntryMarker, C: cqueue::EntryMarker> Drop for IoUring<S, C> {
257    fn drop(&mut self) {
258        // Ensure that `MemoryMap` is released before `fd`.
259        unsafe {
260            ManuallyDrop::drop(&mut self.memory);
261        }
262    }
263}
264
265impl<S: squeue::EntryMarker, C: cqueue::EntryMarker> Builder<S, C> {
266    /// Do not make this io_uring instance accessible by child processes after a fork.
267    pub fn dontfork(&mut self) -> &mut Self {
268        self.dontfork = true;
269        self
270    }
271
272    /// Perform busy-waiting for I/O completion events, as opposed to getting notifications via an
273    /// asynchronous IRQ (Interrupt Request). This will reduce latency, but increases CPU usage.
274    ///
275    /// This is only usable on file systems that support polling and files opened with `O_DIRECT`.
276    pub fn setup_iopoll(&mut self) -> &mut Self {
277        self.params.flags |= sys::IORING_SETUP_IOPOLL;
278        self
279    }
280
281    /// Use a kernel thread to perform submission queue polling. This allows your application to
282    /// issue I/O without ever context switching into the kernel, however it does use up a lot more
283    /// CPU. You should use it when you are expecting very large amounts of I/O.
284    ///
285    /// After `idle` milliseconds, the kernel thread will go to sleep and you will have to wake it up
286    /// again with a system call (this is handled by [`Submitter::submit`] and
287    /// [`Submitter::submit_and_wait`] automatically).
288    ///
289    /// Before version 5.11 of the Linux kernel, to successfully use this feature, the application
290    /// must register a set of files to be used for IO through io_uring_register(2) using the
291    /// IORING_REGISTER_FILES opcode. Failure to do so will result in submitted IO being errored
292    /// with EBADF. The presence of this feature can be detected by the IORING_FEAT_SQPOLL_NONFIXED
293    /// feature flag. In version 5.11 and later, it is no longer necessary to register files to use
294    /// this feature. 5.11 also allows using this as non-root, if the user has the CAP_SYS_NICE
295    /// capability. In 5.13 this requirement was also relaxed, and no special privileges are needed
296    /// for SQPOLL in newer kernels. Certain stable kernels older than 5.13 may also support
297    /// unprivileged SQPOLL.
298    pub fn setup_sqpoll(&mut self, idle: u32) -> &mut Self {
299        self.params.flags |= sys::IORING_SETUP_SQPOLL;
300        self.params.sq_thread_idle = idle;
301        self
302    }
303
304    /// Bind the kernel's poll thread to the specified cpu. This flag is only meaningful when
305    /// [`Builder::setup_sqpoll`] is enabled.
306    pub fn setup_sqpoll_cpu(&mut self, cpu: u32) -> &mut Self {
307        self.params.flags |= sys::IORING_SETUP_SQ_AFF;
308        self.params.sq_thread_cpu = cpu;
309        self
310    }
311
312    /// Create the completion queue with the specified number of entries. The value must be greater
313    /// than `entries`, and may be rounded up to the next power-of-two.
314    pub fn setup_cqsize(&mut self, entries: u32) -> &mut Self {
315        self.params.flags |= sys::IORING_SETUP_CQSIZE;
316        self.params.cq_entries = entries;
317        self
318    }
319
320    /// Clamp the sizes of the submission queue and completion queue at their maximum values instead
321    /// of returning an error when you attempt to resize them beyond their maximum values.
322    pub fn setup_clamp(&mut self) -> &mut Self {
323        self.params.flags |= sys::IORING_SETUP_CLAMP;
324        self
325    }
326
327    /// Share the asynchronous worker thread backend of this io_uring with the specified io_uring
328    /// file descriptor instead of creating a new thread pool.
329    pub fn setup_attach_wq(&mut self, fd: RawFd) -> &mut Self {
330        self.params.flags |= sys::IORING_SETUP_ATTACH_WQ;
331        self.params.wq_fd = fd as _;
332        self
333    }
334
335    /// Start the io_uring instance with all its rings disabled. This allows you to register
336    /// restrictions, buffers and files before the kernel starts processing submission queue
337    /// events. You are only able to [register restrictions](Submitter::register_restrictions) when
338    /// the rings are disabled due to concurrency issues. You can enable the rings with
339    /// [`Submitter::register_enable_rings`]. Available since 5.10.
340
341    pub fn setup_r_disabled(&mut self) -> &mut Self {
342        self.params.flags |= sys::IORING_SETUP_R_DISABLED;
343        self
344    }
345
346    /// Normally io_uring stops submitting a batch of request, if one of these requests results in
347    /// an error. This can cause submission of less than what is expected, if a request ends in
348    /// error while being submitted. If the ring is created with this flag, io_uring_enter(2) will
349    /// continue submitting requests even if it encounters an error submitting a request. CQEs are
350    /// still posted for errored request regardless of whether or not this flag is set at ring
351    /// creation time, the only difference is if the submit sequence is halted or continued when an
352    /// error is observed. Available since 5.18.
353    pub fn setup_submit_all(&mut self) -> &mut Self {
354        self.params.flags |= sys::IORING_SETUP_SUBMIT_ALL;
355        self
356    }
357
358    /// By default, io_uring will interrupt a task running in userspace when a completion event
359    /// comes in. This is to ensure that completions run in a timely manner. For a lot of use
360    /// cases, this is overkill and can cause reduced performance from both the inter-processor
361    /// interrupt used to do this, the kernel/user transition, the needless interruption of the
362    /// tasks userspace activities, and reduced batching if completions come in at a rapid rate.
363    /// Most applications don't need the forceful interruption, as the events are processed at any
364    /// kernel/user transition. The exception are setups where the application uses multiple
365    /// threads operating on the same ring, where the application waiting on completions isn't the
366    /// one that submitted them. For most other use cases, setting this flag will improve
367    /// performance. Available since 5.19.
368    pub fn setup_coop_taskrun(&mut self) -> &mut Self {
369        self.params.flags |= sys::IORING_SETUP_COOP_TASKRUN;
370        self
371    }
372
373    /// Used in conjunction with IORING_SETUP_COOP_TASKRUN, this provides a flag,
374    /// IORING_SQ_TASKRUN, which is set in the SQ ring flags whenever completions are pending that
375    /// should be processed. As an example, liburing will check for this flag even when doing
376    /// io_uring_peek_cqe(3) and enter the kernel to process them, and applications can do the
377    /// same. This makes IORING_SETUP_TASKRUN_FLAG safe to use even when applications rely on a
378    /// peek style operation on the CQ ring to see if anything might be pending to reap. Available
379    /// since 5.19.
380    pub fn setup_taskrun_flag(&mut self) -> &mut Self {
381        self.params.flags |= sys::IORING_SETUP_TASKRUN_FLAG;
382        self
383    }
384
385    /// By default, io_uring will process all outstanding work at the end of any system call or
386    /// thread interrupt. This can delay the application from making other progress. Setting this
387    /// flag will hint to io_uring that it should defer work until an io_uring_enter(2) call with
388    /// the IORING_ENTER_GETEVENTS flag set. This allows the application to request work to run
389    /// just just before it wants to process completions. This flag requires the
390    /// IORING_SETUP_SINGLE_ISSUER flag to be set, and also enforces that the call to
391    /// io_uring_enter(2) is called from the same thread that submitted requests. Note that if this
392    /// flag is set then it is the application's responsibility to periodically trigger work (for
393    /// example via any of the CQE waiting functions) or else completions may not be delivered.
394    /// Available since 6.1.
395    pub fn setup_defer_taskrun(&mut self) -> &mut Self {
396        self.params.flags |= sys::IORING_SETUP_DEFER_TASKRUN;
397        self
398    }
399
400    /// Hint the kernel that a single task will submit requests. Used for optimizations. This is
401    /// enforced by the kernel, and request that don't respect that will fail with -EEXIST.
402    /// If [`Builder::setup_sqpoll`] is enabled, the polling task is doing the submissions and multiple
403    /// userspace tasks can call [`Submitter::enter`] and higher level APIs. Available since 6.0.
404    pub fn setup_single_issuer(&mut self) -> &mut Self {
405        self.params.flags |= sys::IORING_SETUP_SINGLE_ISSUER;
406        self
407    }
408
409    /// Build an [IoUring], with the specified number of entries in the submission queue and
410    /// completion queue unless [`setup_cqsize`](Self::setup_cqsize) has been called.
411    pub fn build(&self, entries: u32) -> io::Result<IoUring<S, C>> {
412        let ring = IoUring::with_params(entries, self.params)?;
413
414        if self.dontfork {
415            ring.memory.sq_mmap.dontfork()?;
416            ring.memory.sqe_mmap.dontfork()?;
417            if let Some(cq_mmap) = ring.memory.cq_mmap.as_ref() {
418                cq_mmap.dontfork()?;
419            }
420        }
421
422        Ok(ring)
423    }
424}
425
426impl Parameters {
427    /// Whether a kernel thread is performing queue polling. Enabled with [`Builder::setup_sqpoll`].
428    pub fn is_setup_sqpoll(&self) -> bool {
429        self.0.flags & sys::IORING_SETUP_SQPOLL != 0
430    }
431
432    /// Whether waiting for completion events is done with a busy loop instead of using IRQs.
433    /// Enabled with [`Builder::setup_iopoll`].
434    pub fn is_setup_iopoll(&self) -> bool {
435        self.0.flags & sys::IORING_SETUP_IOPOLL != 0
436    }
437
438    /// Whether the single issuer hint is enabled. Enabled with [`Builder::setup_single_issuer`].
439    pub fn is_setup_single_issuer(&self) -> bool {
440        self.0.flags & sys::IORING_SETUP_SINGLE_ISSUER != 0
441    }
442
443    /// If this flag is set, the SQ and CQ rings were mapped with a single `mmap(2)` call. This
444    /// means that only two syscalls were used instead of three.
445    pub fn is_feature_single_mmap(&self) -> bool {
446        self.0.features & sys::IORING_FEAT_SINGLE_MMAP != 0
447    }
448
449    /// If this flag is set, io_uring supports never dropping completion events. If a completion
450    /// event occurs and the CQ ring is full, the kernel stores the event internally until such a
451    /// time that the CQ ring has room for more entries.
452    pub fn is_feature_nodrop(&self) -> bool {
453        self.0.features & sys::IORING_FEAT_NODROP != 0
454    }
455
456    /// If this flag is set, applications can be certain that any data for async offload has been
457    /// consumed when the kernel has consumed the SQE.
458    pub fn is_feature_submit_stable(&self) -> bool {
459        self.0.features & sys::IORING_FEAT_SUBMIT_STABLE != 0
460    }
461
462    /// If this flag is set, applications can specify offset == -1 with [`Readv`](opcode::Readv),
463    /// [`Writev`](opcode::Writev), [`ReadFixed`](opcode::ReadFixed),
464    /// [`WriteFixed`](opcode::WriteFixed), [`Read`](opcode::Read) and [`Write`](opcode::Write),
465    /// which behaves exactly like setting offset == -1 in `preadv2(2)` and `pwritev2(2)`: it’ll use
466    /// (and update) the current file position.
467    ///
468    /// This obviously comes with the caveat that if the application has multiple reads or writes in flight,
469    /// then the end result will not be as expected.
470    /// This is similar to threads sharing a file descriptor and doing IO using the current file position.
471    pub fn is_feature_rw_cur_pos(&self) -> bool {
472        self.0.features & sys::IORING_FEAT_RW_CUR_POS != 0
473    }
474
475    /// If this flag is set, then io_uring guarantees that both sync and async execution of
476    /// a request assumes the credentials of the task that called [`Submitter::enter`] to queue the requests.
477    /// If this flag isn’t set, then requests are issued with the credentials of the task that originally registered the io_uring.
478    /// If only one task is using a ring, then this flag doesn’t matter as the credentials will always be the same.
479    ///
480    /// Note that this is the default behavior, tasks can still register different personalities
481    /// through [`Submitter::register_personality`].
482    pub fn is_feature_cur_personality(&self) -> bool {
483        self.0.features & sys::IORING_FEAT_CUR_PERSONALITY != 0
484    }
485
486    /// Whether async pollable I/O is fast.
487    ///
488    /// See [the commit message that introduced
489    /// it](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=d7718a9d25a61442da8ee8aeeff6a0097f0ccfd6)
490    /// for more details.
491    ///
492    /// If this flag is set, then io_uring supports using an internal poll mechanism to drive
493    /// data/space readiness. This means that requests that cannot read or write data to a file no
494    /// longer need to be punted to an async thread for handling, instead they will begin operation
495    /// when the file is ready. This is similar to doing poll + read/write in userspace, but
496    /// eliminates the need to do so. If this flag is set, requests waiting on space/data consume a
497    /// lot less resources doing so as they are not blocking a thread. Available since kernel 5.7.
498    pub fn is_feature_fast_poll(&self) -> bool {
499        self.0.features & sys::IORING_FEAT_FAST_POLL != 0
500    }
501
502    /// Whether poll events are stored using 32 bits instead of 16. This allows the user to use
503    /// `EPOLLEXCLUSIVE`.
504    ///
505    /// If this flag is set, the IORING_OP_POLL_ADD command accepts the full 32-bit range of epoll
506    /// based flags. Most notably EPOLLEXCLUSIVE which allows exclusive (waking single waiters)
507    /// behavior. Available since kernel 5.9.
508    pub fn is_feature_poll_32bits(&self) -> bool {
509        self.0.features & sys::IORING_FEAT_POLL_32BITS != 0
510    }
511
512    /// If this flag is set, the IORING_SETUP_SQPOLL feature no longer requires the use of fixed
513    /// files. Any normal file descriptor can be used for IO commands without needing registration.
514    /// Available since kernel 5.11.
515    pub fn is_feature_sqpoll_nonfixed(&self) -> bool {
516        self.0.features & sys::IORING_FEAT_SQPOLL_NONFIXED != 0
517    }
518
519    /// If this flag is set, then the io_uring_enter(2) system call supports passing in an extended
520    /// argument instead of just the sigset_t of earlier kernels. This extended argument is of type
521    /// struct io_uring_getevents_arg and allows the caller to pass in both a sigset_t and a
522    /// timeout argument for waiting on events. The struct layout is as follows:
523    ///
524    /// // struct io_uring_getevents_arg {
525    /// //     __u64 sigmask;
526    /// //     __u32 sigmask_sz;
527    /// //     __u32 pad;
528    /// //     __u64 ts;
529    /// // };
530    ///
531    /// and a pointer to this struct must be passed in if IORING_ENTER_EXT_ARG is set in the flags
532    /// for the enter system call. Available since kernel 5.11.
533    pub fn is_feature_ext_arg(&self) -> bool {
534        self.0.features & sys::IORING_FEAT_EXT_ARG != 0
535    }
536
537    /// If this flag is set, io_uring is using native workers for its async helpers. Previous
538    /// kernels used kernel threads that assumed the identity of the original io_uring owning task,
539    /// but later kernels will actively create what looks more like regular process threads
540    /// instead. Available since kernel 5.12.
541    pub fn is_feature_native_workers(&self) -> bool {
542        self.0.features & sys::IORING_FEAT_NATIVE_WORKERS != 0
543    }
544
545    /// Whether the kernel supports tagging resources.
546    ///
547    /// If this flag is set, then io_uring supports a variety of features related to fixed files
548    /// and buffers. In particular, it indicates that registered buffers can be updated in-place,
549    /// whereas before the full set would have to be unregistered first. Available since kernel
550    /// 5.13.
551    pub fn is_feature_resource_tagging(&self) -> bool {
552        self.0.features & sys::IORING_FEAT_RSRC_TAGS != 0
553    }
554
555    /// Whether the kernel supports `IOSQE_CQE_SKIP_SUCCESS`.
556    ///
557    /// This feature allows skipping the generation of a CQE if a SQE executes normally. Available
558    /// since kernel 5.17.
559    pub fn is_feature_skip_cqe_on_success(&self) -> bool {
560        self.0.features & sys::IORING_FEAT_CQE_SKIP != 0
561    }
562
563    /// Whether the kernel supports deferred file assignment.
564    ///
565    /// If this flag is set, then io_uring supports sane assignment of files for SQEs that have
566    /// dependencies. For example, if a chain of SQEs are submitted with IOSQE_IO_LINK, then
567    /// kernels without this flag will prepare the file for each link upfront. If a previous link
568    /// opens a file with a known index, eg if direct descriptors are used with open or accept,
569    /// then file assignment needs to happen post execution of that SQE. If this flag is set, then
570    /// the kernel will defer file assignment until execution of a given request is started.
571    /// Available since kernel 5.17.
572    pub fn is_feature_linked_file(&self) -> bool {
573        self.0.features & sys::IORING_FEAT_LINKED_FILE != 0
574    }
575
576    /// The number of submission queue entries allocated.
577    pub fn sq_entries(&self) -> u32 {
578        self.0.sq_entries
579    }
580
581    /// The number of completion queue entries allocated.
582    pub fn cq_entries(&self) -> u32 {
583        self.0.cq_entries
584    }
585}
586
587impl std::fmt::Debug for Parameters {
588    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
589        f.debug_struct("Parameters")
590            .field("is_setup_sqpoll", &self.is_setup_sqpoll())
591            .field("is_setup_iopoll", &self.is_setup_iopoll())
592            .field("is_setup_single_issuer", &self.is_setup_single_issuer())
593            .field("is_feature_single_mmap", &self.is_feature_single_mmap())
594            .field("is_feature_nodrop", &self.is_feature_nodrop())
595            .field("is_feature_submit_stable", &self.is_feature_submit_stable())
596            .field("is_feature_rw_cur_pos", &self.is_feature_rw_cur_pos())
597            .field(
598                "is_feature_cur_personality",
599                &self.is_feature_cur_personality(),
600            )
601            .field("is_feature_poll_32bits", &self.is_feature_poll_32bits())
602            .field("sq_entries", &self.0.sq_entries)
603            .field("cq_entries", &self.0.cq_entries)
604            .finish()
605    }
606}
607
608impl<S: squeue::EntryMarker, C: cqueue::EntryMarker> AsRawFd for IoUring<S, C> {
609    fn as_raw_fd(&self) -> RawFd {
610        self.fd.as_raw_fd()
611    }
612}
613
614#[cfg(feature = "io_safety")]
615impl<S: squeue::EntryMarker, C: cqueue::EntryMarker> AsFd for IoUring<S, C> {
616    fn as_fd(&self) -> BorrowedFd<'_> {
617        self.fd.as_fd()
618    }
619}