1#![allow(clippy::new_without_default)]
4
5use std::convert::TryInto;
6use std::mem;
7use std::os::unix::io::RawFd;
8
9use crate::squeue::Entry;
10use crate::squeue::Entry128;
11use crate::sys;
12use crate::types::{self, sealed};
13
14macro_rules! assign_fd {
15 ( $sqe:ident . fd = $opfd:expr ) => {
16 match $opfd {
17 sealed::Target::Fd(fd) => $sqe.fd = fd,
18 sealed::Target::Fixed(idx) => {
19 $sqe.fd = idx as _;
20 $sqe.flags |= crate::squeue::Flags::FIXED_FILE.bits();
21 }
22 }
23 };
24}
25
26macro_rules! opcode {
27 (@type impl sealed::UseFixed ) => {
28 sealed::Target
29 };
30 (@type impl sealed::UseFd ) => {
31 RawFd
32 };
33 (@type $name:ty ) => {
34 $name
35 };
36 (
37 $( #[$outer:meta] )*
38 pub struct $name:ident {
39 $( #[$new_meta:meta] )*
40
41 $( $field:ident : { $( $tnt:tt )+ } ),*
42
43 $(,)?
44
45 ;;
46
47 $(
48 $( #[$opt_meta:meta] )*
49 $opt_field:ident : $opt_tname:ty = $default:expr
50 ),*
51
52 $(,)?
53 }
54
55 pub const CODE = $opcode:expr;
56
57 $( #[$build_meta:meta] )*
58 pub fn build($self:ident) -> $entry:ty $build_block:block
59 ) => {
60 $( #[$outer] )*
61 pub struct $name {
62 $( $field : opcode!(@type $( $tnt )*), )*
63 $( $opt_field : $opt_tname, )*
64 }
65
66 impl $name {
67 $( #[$new_meta] )*
68 #[inline]
69 pub fn new($( $field : $( $tnt )* ),*) -> Self {
70 $name {
71 $( $field: $field.into(), )*
72 $( $opt_field: $default, )*
73 }
74 }
75
76 pub const CODE: u8 = $opcode as _;
80
81 $(
82 $( #[$opt_meta] )*
83 #[inline]
84 pub const fn $opt_field(mut self, $opt_field: $opt_tname) -> Self {
85 self.$opt_field = $opt_field;
86 self
87 }
88 )*
89
90 $( #[$build_meta] )*
91 #[inline]
92 pub fn build($self) -> $entry $build_block
93 }
94 }
95}
96
97#[inline(always)]
99fn sqe_zeroed() -> sys::io_uring_sqe {
100 unsafe { mem::zeroed() }
101}
102
103opcode! {
104 #[derive(Debug)]
108 pub struct Nop { ;; }
109
110 pub const CODE = sys::IORING_OP_NOP;
111
112 pub fn build(self) -> Entry {
113 let Nop {} = self;
114
115 let mut sqe = sqe_zeroed();
116 sqe.opcode = Self::CODE;
117 sqe.fd = -1;
118 Entry(sqe)
119 }
120}
121
122opcode! {
123 #[derive(Debug)]
125 pub struct Readv {
126 fd: { impl sealed::UseFixed },
127 iovec: { *const libc::iovec },
128 len: { u32 },
129 ;;
130 ioprio: u16 = 0,
131 offset: u64 = 0,
132 rw_flags: types::RwFlags = 0,
135 buf_group: u16 = 0
136 }
137
138 pub const CODE = sys::IORING_OP_READV;
139
140 pub fn build(self) -> Entry {
141 let Readv {
142 fd,
143 iovec, len, offset,
144 ioprio, rw_flags,
145 buf_group
146 } = self;
147
148 let mut sqe = sqe_zeroed();
149 sqe.opcode = Self::CODE;
150 assign_fd!(sqe.fd = fd);
151 sqe.ioprio = ioprio;
152 sqe.__bindgen_anon_2.addr = iovec as _;
153 sqe.len = len;
154 sqe.__bindgen_anon_1.off = offset;
155 sqe.__bindgen_anon_3.rw_flags = rw_flags;
156 sqe.__bindgen_anon_4.buf_group = buf_group;
157 Entry(sqe)
158 }
159}
160
161opcode! {
162 #[derive(Debug)]
164 pub struct Writev {
165 fd: { impl sealed::UseFixed },
166 iovec: { *const libc::iovec },
167 len: { u32 },
168 ;;
169 ioprio: u16 = 0,
170 offset: u64 = 0,
171 rw_flags: types::RwFlags = 0
174 }
175
176 pub const CODE = sys::IORING_OP_WRITEV;
177
178 pub fn build(self) -> Entry {
179 let Writev {
180 fd,
181 iovec, len, offset,
182 ioprio, rw_flags
183 } = self;
184
185 let mut sqe = sqe_zeroed();
186 sqe.opcode = Self::CODE;
187 assign_fd!(sqe.fd = fd);
188 sqe.ioprio = ioprio;
189 sqe.__bindgen_anon_2.addr = iovec as _;
190 sqe.len = len;
191 sqe.__bindgen_anon_1.off = offset;
192 sqe.__bindgen_anon_3.rw_flags = rw_flags;
193 Entry(sqe)
194 }
195}
196
197opcode! {
198 #[derive(Debug)]
207 pub struct Fsync {
208 fd: { impl sealed::UseFixed },
209 ;;
210 flags: types::FsyncFlags = types::FsyncFlags::empty()
214 }
215
216 pub const CODE = sys::IORING_OP_FSYNC;
217
218 pub fn build(self) -> Entry {
219 let Fsync { fd, flags } = self;
220
221 let mut sqe = sqe_zeroed();
222 sqe.opcode = Self::CODE;
223 assign_fd!(sqe.fd = fd);
224 sqe.__bindgen_anon_3.fsync_flags = flags.bits();
225 Entry(sqe)
226 }
227}
228
229opcode! {
230 #[derive(Debug)]
235 pub struct ReadFixed {
236 fd: { impl sealed::UseFixed },
239 buf: { *mut u8 },
240 len: { u32 },
241 buf_index: { u16 },
242 ;;
243 offset: u64 = 0,
244 ioprio: u16 = 0,
245 rw_flags: types::RwFlags = 0
248 }
249
250 pub const CODE = sys::IORING_OP_READ_FIXED;
251
252 pub fn build(self) -> Entry {
253 let ReadFixed {
254 fd,
255 buf, len, offset,
256 buf_index,
257 ioprio, rw_flags
258 } = self;
259
260 let mut sqe = sqe_zeroed();
261 sqe.opcode = Self::CODE;
262 assign_fd!(sqe.fd = fd);
263 sqe.ioprio = ioprio;
264 sqe.__bindgen_anon_2.addr = buf as _;
265 sqe.len = len;
266 sqe.__bindgen_anon_1.off = offset;
267 sqe.__bindgen_anon_3.rw_flags = rw_flags;
268 sqe.__bindgen_anon_4.buf_index = buf_index;
269 Entry(sqe)
270 }
271}
272
273opcode! {
274 #[derive(Debug)]
279 pub struct WriteFixed {
280 fd: { impl sealed::UseFixed },
283 buf: { *const u8 },
284 len: { u32 },
285 buf_index: { u16 },
286 ;;
287 ioprio: u16 = 0,
288 offset: u64 = 0,
289 rw_flags: types::RwFlags = 0
292 }
293
294 pub const CODE = sys::IORING_OP_WRITE_FIXED;
295
296 pub fn build(self) -> Entry {
297 let WriteFixed {
298 fd,
299 buf, len, offset,
300 buf_index,
301 ioprio, rw_flags
302 } = self;
303
304 let mut sqe = sqe_zeroed();
305 sqe.opcode = Self::CODE;
306 assign_fd!(sqe.fd = fd);
307 sqe.ioprio = ioprio;
308 sqe.__bindgen_anon_2.addr = buf as _;
309 sqe.len = len;
310 sqe.__bindgen_anon_1.off = offset;
311 sqe.__bindgen_anon_3.rw_flags = rw_flags;
312 sqe.__bindgen_anon_4.buf_index = buf_index;
313 Entry(sqe)
314 }
315}
316
317opcode! {
318 #[derive(Debug)]
330 pub struct PollAdd {
331 fd: { impl sealed::UseFixed },
334 flags: { u32 },
335 ;;
336 multi: bool = false
337 }
338
339 pub const CODE = sys::IORING_OP_POLL_ADD;
340
341 pub fn build(self) -> Entry {
342 let PollAdd { fd, flags, multi } = self;
343
344 let mut sqe = sqe_zeroed();
345 sqe.opcode = Self::CODE;
346 assign_fd!(sqe.fd = fd);
347 if multi {
348 sqe.len = sys::IORING_POLL_ADD_MULTI;
349 }
350
351 #[cfg(target_endian = "little")] {
352 sqe.__bindgen_anon_3.poll32_events = flags;
353 }
354
355 #[cfg(target_endian = "big")] {
356 let x = flags << 16;
357 let y = flags >> 16;
358 let flags = x | y;
359 sqe.__bindgen_anon_3.poll32_events = flags;
360 }
361
362 Entry(sqe)
363 }
364}
365
366opcode! {
367 #[derive(Debug)]
372 pub struct PollRemove {
373 user_data: { u64 }
374 ;;
375 }
376
377 pub const CODE = sys::IORING_OP_POLL_REMOVE;
378
379 pub fn build(self) -> Entry {
380 let PollRemove { user_data } = self;
381
382 let mut sqe = sqe_zeroed();
383 sqe.opcode = Self::CODE;
384 sqe.fd = -1;
385 sqe.__bindgen_anon_2.addr = user_data;
386 Entry(sqe)
387 }
388}
389
390opcode! {
391 #[derive(Debug)]
393 pub struct SyncFileRange {
394 fd: { impl sealed::UseFixed },
395 len: { u32 },
396 ;;
397 offset: u64 = 0,
399 flags: u32 = 0
401 }
402
403 pub const CODE = sys::IORING_OP_SYNC_FILE_RANGE;
404
405 pub fn build(self) -> Entry {
406 let SyncFileRange {
407 fd,
408 len, offset,
409 flags
410 } = self;
411
412 let mut sqe = sqe_zeroed();
413 sqe.opcode = Self::CODE;
414 assign_fd!(sqe.fd = fd);
415 sqe.len = len;
416 sqe.__bindgen_anon_1.off = offset;
417 sqe.__bindgen_anon_3.sync_range_flags = flags;
418 Entry(sqe)
419 }
420}
421
422opcode! {
423 #[derive(Debug)]
428 pub struct SendMsg {
429 fd: { impl sealed::UseFixed },
430 msg: { *const libc::msghdr },
431 ;;
432 ioprio: u16 = 0,
433 flags: u32 = 0
434 }
435
436 pub const CODE = sys::IORING_OP_SENDMSG;
437
438 pub fn build(self) -> Entry {
439 let SendMsg { fd, msg, ioprio, flags } = self;
440
441 let mut sqe = sqe_zeroed();
442 sqe.opcode = Self::CODE;
443 assign_fd!(sqe.fd = fd);
444 sqe.ioprio = ioprio;
445 sqe.__bindgen_anon_2.addr = msg as _;
446 sqe.len = 1;
447 sqe.__bindgen_anon_3.msg_flags = flags;
448 Entry(sqe)
449 }
450}
451
452opcode! {
453 #[derive(Debug)]
457 pub struct RecvMsg {
458 fd: { impl sealed::UseFixed },
459 msg: { *mut libc::msghdr },
460 ;;
461 ioprio: u16 = 0,
462 flags: u32 = 0,
463 buf_group: u16 = 0
464 }
465
466 pub const CODE = sys::IORING_OP_RECVMSG;
467
468 pub fn build(self) -> Entry {
469 let RecvMsg { fd, msg, ioprio, flags, buf_group } = self;
470
471 let mut sqe = sqe_zeroed();
472 sqe.opcode = Self::CODE;
473 assign_fd!(sqe.fd = fd);
474 sqe.ioprio = ioprio;
475 sqe.__bindgen_anon_2.addr = msg as _;
476 sqe.len = 1;
477 sqe.__bindgen_anon_3.msg_flags = flags;
478 sqe.__bindgen_anon_4.buf_group = buf_group;
479 Entry(sqe)
480 }
481}
482
483opcode! {
484 #[derive(Debug)]
507 pub struct RecvMsgMulti {
508 fd: { impl sealed::UseFixed },
509 msg: { *const libc::msghdr },
510 buf_group: { u16 },
511 ;;
512 ioprio: u16 = 0,
513 flags: u32 = 0
514 }
515
516 pub const CODE = sys::IORING_OP_RECVMSG;
517
518 pub fn build(self) -> Entry {
519 let RecvMsgMulti { fd, msg, buf_group, ioprio, flags } = self;
520
521 let mut sqe = sqe_zeroed();
522 sqe.opcode = Self::CODE;
523 assign_fd!(sqe.fd = fd);
524 sqe.__bindgen_anon_2.addr = msg as _;
525 sqe.len = 1;
526 sqe.__bindgen_anon_3.msg_flags = flags;
527 sqe.__bindgen_anon_4.buf_group = buf_group;
528 sqe.flags |= 1 << sys::IOSQE_BUFFER_SELECT_BIT;
529 sqe.ioprio = ioprio | (sys::IORING_RECV_MULTISHOT as u16);
530 Entry(sqe)
531 }
532}
533
534opcode! {
535 #[derive(Debug)]
544 pub struct Timeout {
545 timespec: { *const types::Timespec },
546 ;;
547 count: u32 = 0,
549
550 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
552 }
553
554 pub const CODE = sys::IORING_OP_TIMEOUT;
555
556 pub fn build(self) -> Entry {
557 let Timeout { timespec, count, flags } = self;
558
559 let mut sqe = sqe_zeroed();
560 sqe.opcode = Self::CODE;
561 sqe.fd = -1;
562 sqe.__bindgen_anon_2.addr = timespec as _;
563 sqe.len = 1;
564 sqe.__bindgen_anon_1.off = count as _;
565 sqe.__bindgen_anon_3.timeout_flags = flags.bits();
566 Entry(sqe)
567 }
568}
569
570opcode! {
573 pub struct TimeoutRemove {
575 user_data: { u64 },
576 ;;
577 }
578
579 pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
580
581 pub fn build(self) -> Entry {
582 let TimeoutRemove { user_data } = self;
583
584 let mut sqe = sqe_zeroed();
585 sqe.opcode = Self::CODE;
586 sqe.fd = -1;
587 sqe.__bindgen_anon_2.addr = user_data;
588 Entry(sqe)
589 }
590}
591
592opcode! {
593 pub struct TimeoutUpdate {
596 user_data: { u64 },
597 timespec: { *const types::Timespec },
598 ;;
599 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
600 }
601
602 pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
603
604 pub fn build(self) -> Entry {
605 let TimeoutUpdate { user_data, timespec, flags } = self;
606
607 let mut sqe = sqe_zeroed();
608 sqe.opcode = Self::CODE;
609 sqe.fd = -1;
610 sqe.__bindgen_anon_1.off = timespec as _;
611 sqe.__bindgen_anon_2.addr = user_data;
612 sqe.__bindgen_anon_3.timeout_flags = flags.bits() | sys::IORING_TIMEOUT_UPDATE;
613 Entry(sqe)
614 }
615}
616
617opcode! {
618 pub struct Accept {
620 fd: { impl sealed::UseFixed },
621 addr: { *mut libc::sockaddr },
622 addrlen: { *mut libc::socklen_t },
623 ;;
624 file_index: Option<types::DestinationSlot> = None,
625 flags: i32 = 0
626 }
627
628 pub const CODE = sys::IORING_OP_ACCEPT;
629
630 pub fn build(self) -> Entry {
631 let Accept { fd, addr, addrlen, file_index, flags } = self;
632
633 let mut sqe = sqe_zeroed();
634 sqe.opcode = Self::CODE;
635 assign_fd!(sqe.fd = fd);
636 sqe.__bindgen_anon_2.addr = addr as _;
637 sqe.__bindgen_anon_1.addr2 = addrlen as _;
638 sqe.__bindgen_anon_3.accept_flags = flags as _;
639 if let Some(dest) = file_index {
640 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
641 }
642 Entry(sqe)
643 }
644}
645
646opcode! {
647 pub struct AsyncCancel {
649 user_data: { u64 }
650 ;;
651
652 }
654
655 pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
656
657 pub fn build(self) -> Entry {
658 let AsyncCancel { user_data } = self;
659
660 let mut sqe = sqe_zeroed();
661 sqe.opcode = Self::CODE;
662 sqe.fd = -1;
663 sqe.__bindgen_anon_2.addr = user_data;
664 Entry(sqe)
665 }
666}
667
668opcode! {
669 pub struct LinkTimeout {
673 timespec: { *const types::Timespec },
674 ;;
675 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
676 }
677
678 pub const CODE = sys::IORING_OP_LINK_TIMEOUT;
679
680 pub fn build(self) -> Entry {
681 let LinkTimeout { timespec, flags } = self;
682
683 let mut sqe = sqe_zeroed();
684 sqe.opcode = Self::CODE;
685 sqe.fd = -1;
686 sqe.__bindgen_anon_2.addr = timespec as _;
687 sqe.len = 1;
688 sqe.__bindgen_anon_3.timeout_flags = flags.bits();
689 Entry(sqe)
690 }
691}
692
693opcode! {
694 pub struct Connect {
696 fd: { impl sealed::UseFixed },
697 addr: { *const libc::sockaddr },
698 addrlen: { libc::socklen_t }
699 ;;
700 }
701
702 pub const CODE = sys::IORING_OP_CONNECT;
703
704 pub fn build(self) -> Entry {
705 let Connect { fd, addr, addrlen } = self;
706
707 let mut sqe = sqe_zeroed();
708 sqe.opcode = Self::CODE;
709 assign_fd!(sqe.fd = fd);
710 sqe.__bindgen_anon_2.addr = addr as _;
711 sqe.__bindgen_anon_1.off = addrlen as _;
712 Entry(sqe)
713 }
714}
715
716opcode! {
719 pub struct Fallocate {
721 fd: { impl sealed::UseFixed },
722 len: { u64 },
723 ;;
724 offset: u64 = 0,
725 mode: i32 = 0
726 }
727
728 pub const CODE = sys::IORING_OP_FALLOCATE;
729
730 pub fn build(self) -> Entry {
731 let Fallocate { fd, len, offset, mode } = self;
732
733 let mut sqe = sqe_zeroed();
734 sqe.opcode = Self::CODE;
735 assign_fd!(sqe.fd = fd);
736 sqe.__bindgen_anon_2.addr = len;
737 sqe.len = mode as _;
738 sqe.__bindgen_anon_1.off = offset;
739 Entry(sqe)
740 }
741}
742
743opcode! {
744 pub struct OpenAt {
746 dirfd: { impl sealed::UseFd },
747 pathname: { *const libc::c_char },
748 ;;
749 file_index: Option<types::DestinationSlot> = None,
750 flags: i32 = 0,
751 mode: libc::mode_t = 0
752 }
753
754 pub const CODE = sys::IORING_OP_OPENAT;
755
756 pub fn build(self) -> Entry {
757 let OpenAt { dirfd, pathname, file_index, flags, mode } = self;
758
759 let mut sqe = sqe_zeroed();
760 sqe.opcode = Self::CODE;
761 sqe.fd = dirfd;
762 sqe.__bindgen_anon_2.addr = pathname as _;
763 sqe.len = mode;
764 sqe.__bindgen_anon_3.open_flags = flags as _;
765 if let Some(dest) = file_index {
766 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
767 }
768 Entry(sqe)
769 }
770}
771
772opcode! {
773 pub struct Close {
777 fd: { impl sealed::UseFixed },
778 ;;
779 }
780
781 pub const CODE = sys::IORING_OP_CLOSE;
782
783 pub fn build(self) -> Entry {
784 let Close { fd } = self;
785
786 let mut sqe = sqe_zeroed();
787 sqe.opcode = Self::CODE;
788 match fd {
789 sealed::Target::Fd(fd) => sqe.fd = fd,
790 sealed::Target::Fixed(idx) => {
791 sqe.fd = 0;
792 sqe.__bindgen_anon_5.file_index = idx + 1;
793 }
794 }
795 Entry(sqe)
796 }
797}
798
799opcode! {
800 pub struct FilesUpdate {
804 fds: { *const RawFd },
805 len: { u32 },
806 ;;
807 offset: i32 = 0
808 }
809
810 pub const CODE = sys::IORING_OP_FILES_UPDATE;
811
812 pub fn build(self) -> Entry {
813 let FilesUpdate { fds, len, offset } = self;
814
815 let mut sqe = sqe_zeroed();
816 sqe.opcode = Self::CODE;
817 sqe.fd = -1;
818 sqe.__bindgen_anon_2.addr = fds as _;
819 sqe.len = len;
820 sqe.__bindgen_anon_1.off = offset as _;
821 Entry(sqe)
822 }
823}
824
825opcode! {
826 pub struct Statx {
828 dirfd: { impl sealed::UseFd },
829 pathname: { *const libc::c_char },
830 statxbuf: { *mut types::statx },
831 ;;
832 flags: i32 = 0,
833 mask: u32 = 0
834 }
835
836 pub const CODE = sys::IORING_OP_STATX;
837
838 pub fn build(self) -> Entry {
839 let Statx {
840 dirfd, pathname, statxbuf,
841 flags, mask
842 } = self;
843
844 let mut sqe = sqe_zeroed();
845 sqe.opcode = Self::CODE;
846 sqe.fd = dirfd;
847 sqe.__bindgen_anon_2.addr = pathname as _;
848 sqe.len = mask;
849 sqe.__bindgen_anon_1.off = statxbuf as _;
850 sqe.__bindgen_anon_3.statx_flags = flags as _;
851 Entry(sqe)
852 }
853}
854
855opcode! {
856 pub struct Read {
867 fd: { impl sealed::UseFixed },
868 buf: { *mut u8 },
869 len: { u32 },
870 ;;
871 offset: u64 = 0,
877 ioprio: u16 = 0,
878 rw_flags: types::RwFlags = 0,
879 buf_group: u16 = 0
880 }
881
882 pub const CODE = sys::IORING_OP_READ;
883
884 pub fn build(self) -> Entry {
885 let Read {
886 fd,
887 buf, len, offset,
888 ioprio, rw_flags,
889 buf_group
890 } = self;
891
892 let mut sqe = sqe_zeroed();
893 sqe.opcode = Self::CODE;
894 assign_fd!(sqe.fd = fd);
895 sqe.ioprio = ioprio;
896 sqe.__bindgen_anon_2.addr = buf as _;
897 sqe.len = len;
898 sqe.__bindgen_anon_1.off = offset;
899 sqe.__bindgen_anon_3.rw_flags = rw_flags;
900 sqe.__bindgen_anon_4.buf_group = buf_group;
901 Entry(sqe)
902 }
903}
904
905opcode! {
906 pub struct Write {
917 fd: { impl sealed::UseFixed },
918 buf: { *const u8 },
919 len: { u32 },
920 ;;
921 offset: u64 = 0,
927 ioprio: u16 = 0,
928 rw_flags: types::RwFlags = 0
929 }
930
931 pub const CODE = sys::IORING_OP_WRITE;
932
933 pub fn build(self) -> Entry {
934 let Write {
935 fd,
936 buf, len, offset,
937 ioprio, rw_flags
938 } = self;
939
940 let mut sqe = sqe_zeroed();
941 sqe.opcode = Self::CODE;
942 assign_fd!(sqe.fd = fd);
943 sqe.ioprio = ioprio;
944 sqe.__bindgen_anon_2.addr = buf as _;
945 sqe.len = len;
946 sqe.__bindgen_anon_1.off = offset;
947 sqe.__bindgen_anon_3.rw_flags = rw_flags;
948 Entry(sqe)
949 }
950}
951
952opcode! {
953 pub struct Fadvise {
955 fd: { impl sealed::UseFixed },
956 len: { libc::off_t },
957 advice: { i32 },
958 ;;
959 offset: u64 = 0,
960 }
961
962 pub const CODE = sys::IORING_OP_FADVISE;
963
964 pub fn build(self) -> Entry {
965 let Fadvise { fd, len, advice, offset } = self;
966
967 let mut sqe = sqe_zeroed();
968 sqe.opcode = Self::CODE;
969 assign_fd!(sqe.fd = fd);
970 sqe.len = len as _;
971 sqe.__bindgen_anon_1.off = offset;
972 sqe.__bindgen_anon_3.fadvise_advice = advice as _;
973 Entry(sqe)
974 }
975}
976
977opcode! {
978 pub struct Madvise {
980 addr: { *const libc::c_void },
981 len: { libc::off_t },
982 advice: { i32 },
983 ;;
984 }
985
986 pub const CODE = sys::IORING_OP_MADVISE;
987
988 pub fn build(self) -> Entry {
989 let Madvise { addr, len, advice } = self;
990
991 let mut sqe = sqe_zeroed();
992 sqe.opcode = Self::CODE;
993 sqe.fd = -1;
994 sqe.__bindgen_anon_2.addr = addr as _;
995 sqe.len = len as _;
996 sqe.__bindgen_anon_3.fadvise_advice = advice as _;
997 Entry(sqe)
998 }
999}
1000
1001opcode! {
1002 pub struct Send {
1004 fd: { impl sealed::UseFixed },
1005 buf: { *const u8 },
1006 len: { u32 },
1007 ;;
1008 flags: i32 = 0
1009 }
1010
1011 pub const CODE = sys::IORING_OP_SEND;
1012
1013 pub fn build(self) -> Entry {
1014 let Send { fd, buf, len, flags } = self;
1015
1016 let mut sqe = sqe_zeroed();
1017 sqe.opcode = Self::CODE;
1018 assign_fd!(sqe.fd = fd);
1019 sqe.__bindgen_anon_2.addr = buf as _;
1020 sqe.len = len;
1021 sqe.__bindgen_anon_3.msg_flags = flags as _;
1022 Entry(sqe)
1023 }
1024}
1025
1026opcode! {
1027 pub struct Recv {
1029 fd: { impl sealed::UseFixed },
1030 buf: { *mut u8 },
1031 len: { u32 },
1032 ;;
1033 flags: i32 = 0,
1034 buf_group: u16 = 0
1035 }
1036
1037 pub const CODE = sys::IORING_OP_RECV;
1038
1039 pub fn build(self) -> Entry {
1040 let Recv { fd, buf, len, flags, buf_group } = self;
1041
1042 let mut sqe = sqe_zeroed();
1043 sqe.opcode = Self::CODE;
1044 assign_fd!(sqe.fd = fd);
1045 sqe.__bindgen_anon_2.addr = buf as _;
1046 sqe.len = len;
1047 sqe.__bindgen_anon_3.msg_flags = flags as _;
1048 sqe.__bindgen_anon_4.buf_group = buf_group;
1049 Entry(sqe)
1050 }
1051}
1052
1053opcode! {
1054 pub struct RecvMulti {
1070 fd: { impl sealed::UseFixed },
1071 buf_group: { u16 },
1072 ;;
1073 flags: i32 = 0,
1074 }
1075
1076 pub const CODE = sys::IORING_OP_RECV;
1077
1078 pub fn build(self) -> Entry {
1079 let RecvMulti { fd, buf_group, flags } = self;
1080
1081 let mut sqe = sqe_zeroed();
1082 sqe.opcode = Self::CODE;
1083 assign_fd!(sqe.fd = fd);
1084 sqe.__bindgen_anon_3.msg_flags = flags as _;
1085 sqe.__bindgen_anon_4.buf_group = buf_group;
1086 sqe.flags |= 1 << sys::IOSQE_BUFFER_SELECT_BIT;
1087 sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
1088 Entry(sqe)
1089 }
1090}
1091
1092opcode! {
1093 pub struct OpenAt2 {
1095 dirfd: { impl sealed::UseFd },
1096 pathname: { *const libc::c_char },
1097 how: { *const types::OpenHow }
1098 ;;
1099 file_index: Option<types::DestinationSlot> = None,
1100 }
1101
1102 pub const CODE = sys::IORING_OP_OPENAT2;
1103
1104 pub fn build(self) -> Entry {
1105 let OpenAt2 { dirfd, pathname, how, file_index } = self;
1106
1107 let mut sqe = sqe_zeroed();
1108 sqe.opcode = Self::CODE;
1109 sqe.fd = dirfd;
1110 sqe.__bindgen_anon_2.addr = pathname as _;
1111 sqe.len = mem::size_of::<sys::open_how>() as _;
1112 sqe.__bindgen_anon_1.off = how as _;
1113 if let Some(dest) = file_index {
1114 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1115 }
1116 Entry(sqe)
1117 }
1118}
1119
1120opcode! {
1121 pub struct EpollCtl {
1123 epfd: { impl sealed::UseFixed },
1124 fd: { impl sealed::UseFd },
1125 op: { i32 },
1126 ev: { *const types::epoll_event },
1127 ;;
1128 }
1129
1130 pub const CODE = sys::IORING_OP_EPOLL_CTL;
1131
1132 pub fn build(self) -> Entry {
1133 let EpollCtl { epfd, fd, op, ev } = self;
1134
1135 let mut sqe = sqe_zeroed();
1136 sqe.opcode = Self::CODE;
1137 assign_fd!(sqe.fd = epfd);
1138 sqe.__bindgen_anon_2.addr = ev as _;
1139 sqe.len = op as _;
1140 sqe.__bindgen_anon_1.off = fd as _;
1141 Entry(sqe)
1142 }
1143}
1144
1145opcode! {
1148 pub struct Splice {
1153 fd_in: { impl sealed::UseFixed },
1154 off_in: { i64 },
1155 fd_out: { impl sealed::UseFixed },
1156 off_out: { i64 },
1157 len: { u32 },
1158 ;;
1159 flags: u32 = 0
1161 }
1162
1163 pub const CODE = sys::IORING_OP_SPLICE;
1164
1165 pub fn build(self) -> Entry {
1166 let Splice { fd_in, off_in, fd_out, off_out, len, mut flags } = self;
1167
1168 let mut sqe = sqe_zeroed();
1169 sqe.opcode = Self::CODE;
1170 assign_fd!(sqe.fd = fd_out);
1171 sqe.len = len;
1172 sqe.__bindgen_anon_1.off = off_out as _;
1173
1174 sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1175 sealed::Target::Fd(fd) => fd,
1176 sealed::Target::Fixed(idx) => {
1177 flags |= sys::SPLICE_F_FD_IN_FIXED;
1178 idx as _
1179 }
1180 };
1181
1182 sqe.__bindgen_anon_2.splice_off_in = off_in as _;
1183 sqe.__bindgen_anon_3.splice_flags = flags;
1184 Entry(sqe)
1185 }
1186}
1187
1188opcode! {
1189 pub struct ProvideBuffers {
1193 addr: { *mut u8 },
1194 len: { i32 },
1195 nbufs: { u16 },
1196 bgid: { u16 },
1197 bid: { u16 }
1198 ;;
1199 }
1200
1201 pub const CODE = sys::IORING_OP_PROVIDE_BUFFERS;
1202
1203 pub fn build(self) -> Entry {
1204 let ProvideBuffers { addr, len, nbufs, bgid, bid } = self;
1205
1206 let mut sqe = sqe_zeroed();
1207 sqe.opcode = Self::CODE;
1208 sqe.fd = nbufs as _;
1209 sqe.__bindgen_anon_2.addr = addr as _;
1210 sqe.len = len as _;
1211 sqe.__bindgen_anon_1.off = bid as _;
1212 sqe.__bindgen_anon_4.buf_group = bgid;
1213 Entry(sqe)
1214 }
1215}
1216
1217opcode! {
1218 pub struct RemoveBuffers {
1221 nbufs: { u16 },
1222 bgid: { u16 }
1223 ;;
1224 }
1225
1226 pub const CODE = sys::IORING_OP_REMOVE_BUFFERS;
1227
1228 pub fn build(self) -> Entry {
1229 let RemoveBuffers { nbufs, bgid } = self;
1230
1231 let mut sqe = sqe_zeroed();
1232 sqe.opcode = Self::CODE;
1233 sqe.fd = nbufs as _;
1234 sqe.__bindgen_anon_4.buf_group = bgid;
1235 Entry(sqe)
1236 }
1237}
1238
1239opcode! {
1242 pub struct Tee {
1244 fd_in: { impl sealed::UseFixed },
1245 fd_out: { impl sealed::UseFixed },
1246 len: { u32 }
1247 ;;
1248 flags: u32 = 0
1249 }
1250
1251 pub const CODE = sys::IORING_OP_TEE;
1252
1253 pub fn build(self) -> Entry {
1254 let Tee { fd_in, fd_out, len, mut flags } = self;
1255
1256 let mut sqe = sqe_zeroed();
1257 sqe.opcode = Self::CODE;
1258
1259 assign_fd!(sqe.fd = fd_out);
1260 sqe.len = len;
1261
1262 sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1263 sealed::Target::Fd(fd) => fd,
1264 sealed::Target::Fixed(idx) => {
1265 flags |= sys::SPLICE_F_FD_IN_FIXED;
1266 idx as _
1267 }
1268 };
1269
1270 sqe.__bindgen_anon_3.splice_flags = flags;
1271
1272 Entry(sqe)
1273 }
1274}
1275
1276opcode! {
1279 pub struct Shutdown {
1282 fd: { impl sealed::UseFixed },
1283 how: { i32 },
1284 ;;
1285 }
1286
1287 pub const CODE = sys::IORING_OP_SHUTDOWN;
1288
1289 pub fn build(self) -> Entry {
1290 let Shutdown { fd, how } = self;
1291
1292 let mut sqe = sqe_zeroed();
1293 sqe.opcode = Self::CODE;
1294 assign_fd!(sqe.fd = fd);
1295 sqe.len = how as _;
1296 Entry(sqe)
1297 }
1298}
1299
1300opcode! {
1301 pub struct RenameAt {
1304 olddirfd: { impl sealed::UseFd },
1305 oldpath: { *const libc::c_char },
1306 newdirfd: { impl sealed::UseFd },
1307 newpath: { *const libc::c_char },
1308 ;;
1309 flags: u32 = 0
1310 }
1311
1312 pub const CODE = sys::IORING_OP_RENAMEAT;
1313
1314 pub fn build(self) -> Entry {
1315 let RenameAt {
1316 olddirfd, oldpath,
1317 newdirfd, newpath,
1318 flags
1319 } = self;
1320
1321 let mut sqe = sqe_zeroed();
1322 sqe.opcode = Self::CODE;
1323 sqe.fd = olddirfd;
1324 sqe.__bindgen_anon_2.addr = oldpath as _;
1325 sqe.len = newdirfd as _;
1326 sqe.__bindgen_anon_1.off = newpath as _;
1327 sqe.__bindgen_anon_3.rename_flags = flags;
1328 Entry(sqe)
1329 }
1330}
1331
1332opcode! {
1333 pub struct UnlinkAt {
1336 dirfd: { impl sealed::UseFd },
1337 pathname: { *const libc::c_char },
1338 ;;
1339 flags: i32 = 0
1340 }
1341
1342 pub const CODE = sys::IORING_OP_UNLINKAT;
1343
1344 pub fn build(self) -> Entry {
1345 let UnlinkAt { dirfd, pathname, flags } = self;
1346
1347 let mut sqe = sqe_zeroed();
1348 sqe.opcode = Self::CODE;
1349 sqe.fd = dirfd;
1350 sqe.__bindgen_anon_2.addr = pathname as _;
1351 sqe.__bindgen_anon_3.unlink_flags = flags as _;
1352 Entry(sqe)
1353 }
1354}
1355
1356opcode! {
1359 pub struct MkDirAt {
1361 dirfd: { impl sealed::UseFd },
1362 pathname: { *const libc::c_char },
1363 ;;
1364 mode: libc::mode_t = 0
1365 }
1366
1367 pub const CODE = sys::IORING_OP_MKDIRAT;
1368
1369 pub fn build(self) -> Entry {
1370 let MkDirAt { dirfd, pathname, mode } = self;
1371
1372 let mut sqe = sqe_zeroed();
1373 sqe.opcode = Self::CODE;
1374 sqe.fd = dirfd;
1375 sqe.__bindgen_anon_2.addr = pathname as _;
1376 sqe.len = mode;
1377 Entry(sqe)
1378 }
1379}
1380
1381opcode! {
1382 pub struct SymlinkAt {
1384 newdirfd: { impl sealed::UseFd },
1385 target: { *const libc::c_char },
1386 linkpath: { *const libc::c_char },
1387 ;;
1388 }
1389
1390 pub const CODE = sys::IORING_OP_SYMLINKAT;
1391
1392 pub fn build(self) -> Entry {
1393 let SymlinkAt { newdirfd, target, linkpath } = self;
1394
1395 let mut sqe = sqe_zeroed();
1396 sqe.opcode = Self::CODE;
1397 sqe.fd = newdirfd;
1398 sqe.__bindgen_anon_2.addr = target as _;
1399 sqe.__bindgen_anon_1.addr2 = linkpath as _;
1400 Entry(sqe)
1401 }
1402}
1403
1404opcode! {
1405 pub struct LinkAt {
1407 olddirfd: { impl sealed::UseFd },
1408 oldpath: { *const libc::c_char },
1409 newdirfd: { impl sealed::UseFd },
1410 newpath: { *const libc::c_char },
1411 ;;
1412 flags: i32 = 0
1413 }
1414
1415 pub const CODE = sys::IORING_OP_LINKAT;
1416
1417 pub fn build(self) -> Entry {
1418 let LinkAt { olddirfd, oldpath, newdirfd, newpath, flags } = self;
1419
1420 let mut sqe = sqe_zeroed();
1421 sqe.opcode = Self::CODE;
1422 sqe.fd = olddirfd as _;
1423 sqe.__bindgen_anon_2.addr = oldpath as _;
1424 sqe.len = newdirfd as _;
1425 sqe.__bindgen_anon_1.addr2 = newpath as _;
1426 sqe.__bindgen_anon_3.hardlink_flags = flags as _;
1427 Entry(sqe)
1428 }
1429}
1430
1431opcode! {
1434 pub struct MsgRingData {
1436 ring_fd: { impl sealed::UseFd },
1437 result: { i32 },
1438 user_data: { u64 },
1439 user_flags: { Option<u32> },
1440 ;;
1441 opcode_flags: u32 = 0
1442 }
1443
1444 pub const CODE = sys::IORING_OP_MSG_RING;
1445
1446 pub fn build(self) -> Entry {
1447 let MsgRingData { ring_fd, result, user_data, user_flags, opcode_flags } = self;
1448
1449 let mut sqe = sqe_zeroed();
1450 sqe.opcode = Self::CODE;
1451 sqe.__bindgen_anon_2.addr = sys::IORING_MSG_DATA.into();
1452 sqe.fd = ring_fd;
1453 sqe.len = result as u32;
1454 sqe.__bindgen_anon_1.off = user_data;
1455 sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1456 if let Some(flags) = user_flags {
1457 sqe.__bindgen_anon_5.file_index = flags;
1458 unsafe {sqe.__bindgen_anon_3.msg_ring_flags |= sys::IORING_MSG_RING_FLAGS_PASS};
1459 }
1460 Entry(sqe)
1461 }
1462}
1463
1464opcode! {
1467 pub struct AsyncCancel2 {
1471 builder: { types::CancelBuilder }
1472 ;;
1473 }
1474
1475 pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
1476
1477 pub fn build(self) -> Entry {
1478 let AsyncCancel2 { builder } = self;
1479
1480 let mut sqe = sqe_zeroed();
1481 sqe.opcode = Self::CODE;
1482 sqe.fd = builder.to_fd();
1483 sqe.__bindgen_anon_2.addr = builder.user_data.unwrap_or(0);
1484 sqe.__bindgen_anon_3.cancel_flags = builder.flags.bits();
1485 Entry(sqe)
1486 }
1487}
1488
1489opcode! {
1490 pub struct UringCmd16 {
1492 fd: { impl sealed::UseFixed },
1493 cmd_op: { u32 },
1494 ;;
1495 buf_index: Option<u16> = None,
1498 cmd: [u8; 16] = [0u8; 16]
1500 }
1501
1502 pub const CODE = sys::IORING_OP_URING_CMD;
1503
1504 pub fn build(self) -> Entry {
1505 let UringCmd16 { fd, cmd_op, cmd, buf_index } = self;
1506
1507 let mut sqe = sqe_zeroed();
1508 sqe.opcode = Self::CODE;
1509 assign_fd!(sqe.fd = fd);
1510 sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1511 unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd };
1512 if let Some(buf_index) = buf_index {
1513 sqe.__bindgen_anon_4.buf_index = buf_index;
1514 unsafe {
1515 sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1516 }
1517 }
1518 Entry(sqe)
1519 }
1520}
1521
1522opcode! {
1523 pub struct UringCmd80 {
1525 fd: { impl sealed::UseFixed },
1526 cmd_op: { u32 },
1527 ;;
1528 buf_index: Option<u16> = None,
1531 cmd: [u8; 80] = [0u8; 80]
1533 }
1534
1535 pub const CODE = sys::IORING_OP_URING_CMD;
1536
1537 pub fn build(self) -> Entry128 {
1538 let UringCmd80 { fd, cmd_op, cmd, buf_index } = self;
1539
1540 let cmd1 = cmd[..16].try_into().unwrap();
1541 let cmd2 = cmd[16..].try_into().unwrap();
1542
1543 let mut sqe = sqe_zeroed();
1544 sqe.opcode = Self::CODE;
1545 assign_fd!(sqe.fd = fd);
1546 sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1547 unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd1 };
1548 if let Some(buf_index) = buf_index {
1549 sqe.__bindgen_anon_4.buf_index = buf_index;
1550 unsafe {
1551 sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1552 }
1553 }
1554 Entry128(Entry(sqe), cmd2)
1555 }
1556}
1557
1558opcode! {
1559 pub struct Socket {
1569 domain: { i32 },
1570 socket_type: { i32 },
1571 protocol: { i32 },
1572 ;;
1573 file_index: Option<types::DestinationSlot> = None,
1574 flags: types::RwFlags = 0,
1575 }
1576
1577 pub const CODE = sys::IORING_OP_SOCKET;
1578
1579 pub fn build(self) -> Entry {
1580 let Socket { domain, socket_type, protocol, file_index, flags } = self;
1581
1582 let mut sqe = sqe_zeroed();
1583 sqe.opcode = Self::CODE;
1584 sqe.fd = domain as _;
1585 sqe.__bindgen_anon_1.off = socket_type as _;
1586 sqe.len = protocol as _;
1587 sqe.__bindgen_anon_3.rw_flags = flags;
1588 if let Some(dest) = file_index {
1589 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1590 }
1591 Entry(sqe)
1592 }
1593}
1594
1595opcode! {
1596 pub struct AcceptMulti {
1602 fd: { impl sealed::UseFixed },
1603 ;;
1604 allocate_file_index: bool = false,
1605 flags: i32 = 0
1606 }
1607
1608 pub const CODE = sys::IORING_OP_ACCEPT;
1609
1610 pub fn build(self) -> Entry {
1611 let AcceptMulti { fd, allocate_file_index, flags } = self;
1612
1613 let mut sqe = sqe_zeroed();
1614 sqe.opcode = Self::CODE;
1615 assign_fd!(sqe.fd = fd);
1616 sqe.ioprio = sys::IORING_ACCEPT_MULTISHOT as u16;
1617 sqe.__bindgen_anon_3.accept_flags = flags as _;
1620 if allocate_file_index {
1621 sqe.__bindgen_anon_5.file_index = sys::IORING_FILE_INDEX_ALLOC as u32;
1622 }
1623 Entry(sqe)
1624 }
1625}
1626
1627opcode! {
1630 pub struct MsgRingSendFd {
1632 ring_fd: { impl sealed::UseFd },
1633 fixed_slot_src: { types::Fixed },
1634 dest_slot_index: { types::DestinationSlot },
1635 user_data: { u64 },
1636 ;;
1637 opcode_flags: u32 = 0
1638 }
1639
1640 pub const CODE = sys::IORING_OP_MSG_RING;
1641
1642 pub fn build(self) -> Entry {
1643 let MsgRingSendFd { ring_fd, fixed_slot_src, dest_slot_index, user_data, opcode_flags } = self;
1644
1645 let mut sqe = sqe_zeroed();
1646 sqe.opcode = Self::CODE;
1647 sqe.__bindgen_anon_2.addr = sys::IORING_MSG_SEND_FD.into();
1648 sqe.fd = ring_fd;
1649 sqe.__bindgen_anon_1.off = user_data;
1650 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = fixed_slot_src.0 as u64 };
1651 sqe.__bindgen_anon_5.file_index = dest_slot_index.kernel_index_arg();
1652 sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1653 Entry(sqe)
1654 }
1655}
1656
1657opcode! {
1660 pub struct SendZc {
1668 fd: { impl sealed::UseFixed },
1669 buf: { *const u8 },
1670 len: { u32 },
1671 ;;
1672 buf_index: Option<u16> = None,
1679 dest_addr: *const libc::sockaddr = core::ptr::null(),
1680 dest_addr_len: libc::socklen_t = 0,
1681 flags: i32 = 0,
1682 zc_flags: u16 = 0,
1683 }
1684
1685 pub const CODE = sys::IORING_OP_SEND_ZC;
1686
1687 pub fn build(self) -> Entry {
1688 let SendZc { fd, buf, len, buf_index, dest_addr, dest_addr_len, flags, zc_flags } = self;
1689
1690 let mut sqe = sqe_zeroed();
1691 sqe.opcode = Self::CODE;
1692 assign_fd!(sqe.fd = fd);
1693 sqe.__bindgen_anon_2.addr = buf as _;
1694 sqe.len = len;
1695 sqe.__bindgen_anon_3.msg_flags = flags as _;
1696 sqe.ioprio = zc_flags;
1697 if let Some(buf_index) = buf_index {
1698 sqe.__bindgen_anon_4.buf_index = buf_index;
1699 sqe.ioprio |= sys::IORING_RECVSEND_FIXED_BUF as u16;
1700 }
1701 sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1702 sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1703 Entry(sqe)
1704 }
1705}
1706
1707opcode! {
1710 #[derive(Debug)]
1715 pub struct SendMsgZc {
1716 fd: { impl sealed::UseFixed },
1717 msg: { *const libc::msghdr },
1718 ;;
1719 ioprio: u16 = 0,
1720 flags: u32 = 0
1721 }
1722
1723 pub const CODE = sys::IORING_OP_SENDMSG_ZC;
1724
1725 pub fn build(self) -> Entry {
1726 let SendMsgZc { fd, msg, ioprio, flags } = self;
1727
1728 let mut sqe = sqe_zeroed();
1729 sqe.opcode = Self::CODE;
1730 assign_fd!(sqe.fd = fd);
1731 sqe.ioprio = ioprio;
1732 sqe.__bindgen_anon_2.addr = msg as _;
1733 sqe.len = 1;
1734 sqe.__bindgen_anon_3.msg_flags = flags;
1735 Entry(sqe)
1736 }
1737}
1738
1739opcode! {
1742 #[derive(Debug)]
1751 pub struct FutexWait {
1752 futex: { *const u32 },
1753 val: { u64 },
1754 mask: { u64 },
1755 futex_flags: { u32 },
1756 ;;
1757 flags: u32 = 0
1758 }
1759
1760 pub const CODE = sys::IORING_OP_FUTEX_WAIT;
1761
1762 pub fn build(self) -> Entry {
1763 let FutexWait { futex, val, mask, futex_flags, flags } = self;
1764
1765 let mut sqe = sqe_zeroed();
1766 sqe.opcode = Self::CODE;
1767 sqe.fd = futex_flags as _;
1768 sqe.__bindgen_anon_2.addr = futex as usize as _;
1769 sqe.__bindgen_anon_1.off = val;
1770 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1771 sqe.__bindgen_anon_3.futex_flags = flags;
1772 Entry(sqe)
1773 }
1774}
1775
1776opcode! {
1777 #[derive(Debug)]
1785 pub struct FutexWake {
1786 futex: { *const u32 },
1787 val: { u64 },
1788 mask: { u64 },
1789 futex_flags: { u32 },
1790 ;;
1791 flags: u32 = 0
1792 }
1793
1794 pub const CODE = sys::IORING_OP_FUTEX_WAKE;
1795
1796 pub fn build(self) -> Entry {
1797 let FutexWake { futex, val, mask, futex_flags, flags } = self;
1798
1799 let mut sqe = sqe_zeroed();
1800 sqe.opcode = Self::CODE;
1801 sqe.fd = futex_flags as _;
1802 sqe.__bindgen_anon_2.addr = futex as usize as _;
1803 sqe.__bindgen_anon_1.off = val;
1804 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1805 sqe.__bindgen_anon_3.futex_flags = flags;
1806 Entry(sqe)
1807 }
1808}
1809
1810opcode! {
1811 #[derive(Debug)]
1817 pub struct FutexWaitV {
1818 futexv: { *const types::FutexWaitV },
1819 nr_futex: { u32 },
1820 ;;
1821 flags: u32 = 0
1822 }
1823
1824 pub const CODE = sys::IORING_OP_FUTEX_WAITV;
1825
1826 pub fn build(self) -> Entry {
1827 let FutexWaitV { futexv, nr_futex, flags } = self;
1828
1829 let mut sqe = sqe_zeroed();
1830 sqe.opcode = Self::CODE;
1831 sqe.__bindgen_anon_2.addr = futexv as usize as _;
1832 sqe.len = nr_futex;
1833 sqe.__bindgen_anon_3.futex_flags = flags;
1834 Entry(sqe)
1835 }
1836}