1#![allow(clippy::new_without_default)]
4
5use std::convert::TryInto;
6use std::mem;
7use std::os::unix::io::RawFd;
8
9use crate::squeue::Entry;
10use crate::squeue::Entry128;
11use crate::sys;
12use crate::types::{self, sealed};
13
14macro_rules! assign_fd {
15 ( $sqe:ident . fd = $opfd:expr ) => {
16 match $opfd {
17 sealed::Target::Fd(fd) => $sqe.fd = fd,
18 sealed::Target::Fixed(idx) => {
19 $sqe.fd = idx as _;
20 $sqe.flags |= crate::squeue::Flags::FIXED_FILE.bits();
21 }
22 }
23 };
24}
25
26macro_rules! opcode {
27 (@type impl sealed::UseFixed ) => {
28 sealed::Target
29 };
30 (@type impl sealed::UseFd ) => {
31 RawFd
32 };
33 (@type $name:ty ) => {
34 $name
35 };
36 (
37 $( #[$outer:meta] )*
38 pub struct $name:ident {
39 $( #[$new_meta:meta] )*
40
41 $( $field:ident : { $( $tnt:tt )+ } ),*
42
43 $(,)?
44
45 ;;
46
47 $(
48 $( #[$opt_meta:meta] )*
49 $opt_field:ident : $opt_tname:ty = $default:expr
50 ),*
51
52 $(,)?
53 }
54
55 pub const CODE = $opcode:expr;
56
57 $( #[$build_meta:meta] )*
58 pub fn build($self:ident) -> $entry:ty $build_block:block
59 ) => {
60 $( #[$outer] )*
61 pub struct $name {
62 $( $field : opcode!(@type $( $tnt )*), )*
63 $( $opt_field : $opt_tname, )*
64 }
65
66 impl $name {
67 $( #[$new_meta] )*
68 #[inline]
69 pub fn new($( $field : $( $tnt )* ),*) -> Self {
70 $name {
71 $( $field: $field.into(), )*
72 $( $opt_field: $default, )*
73 }
74 }
75
76 pub const CODE: u8 = $opcode as _;
80
81 $(
82 $( #[$opt_meta] )*
83 #[inline]
84 pub const fn $opt_field(mut self, $opt_field: $opt_tname) -> Self {
85 self.$opt_field = $opt_field;
86 self
87 }
88 )*
89
90 $( #[$build_meta] )*
91 #[inline]
92 pub fn build($self) -> $entry $build_block
93 }
94 }
95}
96
97#[inline(always)]
99fn sqe_zeroed() -> sys::io_uring_sqe {
100 unsafe { mem::zeroed() }
101}
102
103opcode! {
104 #[derive(Debug)]
108 pub struct Nop { ;; }
109
110 pub const CODE = sys::IORING_OP_NOP;
111
112 pub fn build(self) -> Entry {
113 let Nop {} = self;
114
115 let mut sqe = sqe_zeroed();
116 sqe.opcode = Self::CODE;
117 sqe.fd = -1;
118 Entry(sqe)
119 }
120}
121
122opcode! {
123 #[derive(Debug)]
125 pub struct Readv {
126 fd: { impl sealed::UseFixed },
127 iovec: { *const libc::iovec },
128 len: { u32 },
129 ;;
130 ioprio: u16 = 0,
131 offset: u64 = 0,
132 rw_flags: types::RwFlags = 0,
135 buf_group: u16 = 0
136 }
137
138 pub const CODE = sys::IORING_OP_READV;
139
140 pub fn build(self) -> Entry {
141 let Readv {
142 fd,
143 iovec, len, offset,
144 ioprio, rw_flags,
145 buf_group
146 } = self;
147
148 let mut sqe = sqe_zeroed();
149 sqe.opcode = Self::CODE;
150 assign_fd!(sqe.fd = fd);
151 sqe.ioprio = ioprio;
152 sqe.__bindgen_anon_2.addr = iovec as _;
153 sqe.len = len;
154 sqe.__bindgen_anon_1.off = offset;
155 sqe.__bindgen_anon_3.rw_flags = rw_flags;
156 sqe.__bindgen_anon_4.buf_group = buf_group;
157 Entry(sqe)
158 }
159}
160
161opcode! {
162 #[derive(Debug)]
164 pub struct Writev {
165 fd: { impl sealed::UseFixed },
166 iovec: { *const libc::iovec },
167 len: { u32 },
168 ;;
169 ioprio: u16 = 0,
170 offset: u64 = 0,
171 rw_flags: types::RwFlags = 0
174 }
175
176 pub const CODE = sys::IORING_OP_WRITEV;
177
178 pub fn build(self) -> Entry {
179 let Writev {
180 fd,
181 iovec, len, offset,
182 ioprio, rw_flags
183 } = self;
184
185 let mut sqe = sqe_zeroed();
186 sqe.opcode = Self::CODE;
187 assign_fd!(sqe.fd = fd);
188 sqe.ioprio = ioprio;
189 sqe.__bindgen_anon_2.addr = iovec as _;
190 sqe.len = len;
191 sqe.__bindgen_anon_1.off = offset;
192 sqe.__bindgen_anon_3.rw_flags = rw_flags;
193 Entry(sqe)
194 }
195}
196
197opcode! {
198 #[derive(Debug)]
207 pub struct Fsync {
208 fd: { impl sealed::UseFixed },
209 ;;
210 flags: types::FsyncFlags = types::FsyncFlags::empty()
214 }
215
216 pub const CODE = sys::IORING_OP_FSYNC;
217
218 pub fn build(self) -> Entry {
219 let Fsync { fd, flags } = self;
220
221 let mut sqe = sqe_zeroed();
222 sqe.opcode = Self::CODE;
223 assign_fd!(sqe.fd = fd);
224 sqe.__bindgen_anon_3.fsync_flags = flags.bits();
225 Entry(sqe)
226 }
227}
228
229opcode! {
230 #[derive(Debug)]
235 pub struct ReadFixed {
236 fd: { impl sealed::UseFixed },
237 buf: { *mut u8 },
238 len: { u32 },
239 buf_index: { u16 },
240 ;;
241 ioprio: u16 = 0,
242 offset: u64 = 0,
244 rw_flags: types::RwFlags = 0
247 }
248
249 pub const CODE = sys::IORING_OP_READ_FIXED;
250
251 pub fn build(self) -> Entry {
252 let ReadFixed {
253 fd,
254 buf, len, offset,
255 buf_index,
256 ioprio, rw_flags
257 } = self;
258
259 let mut sqe = sqe_zeroed();
260 sqe.opcode = Self::CODE;
261 assign_fd!(sqe.fd = fd);
262 sqe.ioprio = ioprio;
263 sqe.__bindgen_anon_2.addr = buf as _;
264 sqe.len = len;
265 sqe.__bindgen_anon_1.off = offset;
266 sqe.__bindgen_anon_3.rw_flags = rw_flags;
267 sqe.__bindgen_anon_4.buf_index = buf_index;
268 Entry(sqe)
269 }
270}
271
272opcode! {
273 #[derive(Debug)]
278 pub struct WriteFixed {
279 fd: { impl sealed::UseFixed },
280 buf: { *const u8 },
281 len: { u32 },
282 buf_index: { u16 },
283 ;;
284 ioprio: u16 = 0,
285 offset: u64 = 0,
287 rw_flags: types::RwFlags = 0
290 }
291
292 pub const CODE = sys::IORING_OP_WRITE_FIXED;
293
294 pub fn build(self) -> Entry {
295 let WriteFixed {
296 fd,
297 buf, len, offset,
298 buf_index,
299 ioprio, rw_flags
300 } = self;
301
302 let mut sqe = sqe_zeroed();
303 sqe.opcode = Self::CODE;
304 assign_fd!(sqe.fd = fd);
305 sqe.ioprio = ioprio;
306 sqe.__bindgen_anon_2.addr = buf as _;
307 sqe.len = len;
308 sqe.__bindgen_anon_1.off = offset;
309 sqe.__bindgen_anon_3.rw_flags = rw_flags;
310 sqe.__bindgen_anon_4.buf_index = buf_index;
311 Entry(sqe)
312 }
313}
314
315opcode! {
316 #[derive(Debug)]
328 pub struct PollAdd {
329 fd: { impl sealed::UseFixed },
332 flags: { u32 },
333 ;;
334 multi: bool = false
335 }
336
337 pub const CODE = sys::IORING_OP_POLL_ADD;
338
339 pub fn build(self) -> Entry {
340 let PollAdd { fd, flags, multi } = self;
341
342 let mut sqe = sqe_zeroed();
343 sqe.opcode = Self::CODE;
344 assign_fd!(sqe.fd = fd);
345 if multi {
346 sqe.len = sys::IORING_POLL_ADD_MULTI;
347 }
348
349 #[cfg(target_endian = "little")] {
350 sqe.__bindgen_anon_3.poll32_events = flags;
351 }
352
353 #[cfg(target_endian = "big")] {
354 let x = flags << 16;
355 let y = flags >> 16;
356 let flags = x | y;
357 sqe.__bindgen_anon_3.poll32_events = flags;
358 }
359
360 Entry(sqe)
361 }
362}
363
364opcode! {
365 #[derive(Debug)]
370 pub struct PollRemove {
371 user_data: { u64 }
372 ;;
373 }
374
375 pub const CODE = sys::IORING_OP_POLL_REMOVE;
376
377 pub fn build(self) -> Entry {
378 let PollRemove { user_data } = self;
379
380 let mut sqe = sqe_zeroed();
381 sqe.opcode = Self::CODE;
382 sqe.fd = -1;
383 sqe.__bindgen_anon_2.addr = user_data;
384 Entry(sqe)
385 }
386}
387
388opcode! {
389 #[derive(Debug)]
391 pub struct SyncFileRange {
392 fd: { impl sealed::UseFixed },
393 len: { u32 },
394 ;;
395 offset: u64 = 0,
397 flags: u32 = 0
399 }
400
401 pub const CODE = sys::IORING_OP_SYNC_FILE_RANGE;
402
403 pub fn build(self) -> Entry {
404 let SyncFileRange {
405 fd,
406 len, offset,
407 flags
408 } = self;
409
410 let mut sqe = sqe_zeroed();
411 sqe.opcode = Self::CODE;
412 assign_fd!(sqe.fd = fd);
413 sqe.len = len;
414 sqe.__bindgen_anon_1.off = offset;
415 sqe.__bindgen_anon_3.sync_range_flags = flags;
416 Entry(sqe)
417 }
418}
419
420opcode! {
421 #[derive(Debug)]
426 pub struct SendMsg {
427 fd: { impl sealed::UseFixed },
428 msg: { *const libc::msghdr },
429 ;;
430 ioprio: u16 = 0,
431 flags: u32 = 0
432 }
433
434 pub const CODE = sys::IORING_OP_SENDMSG;
435
436 pub fn build(self) -> Entry {
437 let SendMsg { fd, msg, ioprio, flags } = self;
438
439 let mut sqe = sqe_zeroed();
440 sqe.opcode = Self::CODE;
441 assign_fd!(sqe.fd = fd);
442 sqe.ioprio = ioprio;
443 sqe.__bindgen_anon_2.addr = msg as _;
444 sqe.len = 1;
445 sqe.__bindgen_anon_3.msg_flags = flags;
446 Entry(sqe)
447 }
448}
449
450opcode! {
451 #[derive(Debug)]
455 pub struct RecvMsg {
456 fd: { impl sealed::UseFixed },
457 msg: { *mut libc::msghdr },
458 ;;
459 ioprio: u16 = 0,
460 flags: u32 = 0,
461 buf_group: u16 = 0
462 }
463
464 pub const CODE = sys::IORING_OP_RECVMSG;
465
466 pub fn build(self) -> Entry {
467 let RecvMsg { fd, msg, ioprio, flags, buf_group } = self;
468
469 let mut sqe = sqe_zeroed();
470 sqe.opcode = Self::CODE;
471 assign_fd!(sqe.fd = fd);
472 sqe.ioprio = ioprio;
473 sqe.__bindgen_anon_2.addr = msg as _;
474 sqe.len = 1;
475 sqe.__bindgen_anon_3.msg_flags = flags;
476 sqe.__bindgen_anon_4.buf_group = buf_group;
477 Entry(sqe)
478 }
479}
480
481opcode! {
482 #[derive(Debug)]
505 pub struct RecvMsgMulti {
506 fd: { impl sealed::UseFixed },
507 msg: { *const libc::msghdr },
508 buf_group: { u16 },
509 ;;
510 ioprio: u16 = 0,
511 flags: u32 = 0
512 }
513
514 pub const CODE = sys::IORING_OP_RECVMSG;
515
516 pub fn build(self) -> Entry {
517 let RecvMsgMulti { fd, msg, buf_group, ioprio, flags } = self;
518
519 let mut sqe = sqe_zeroed();
520 sqe.opcode = Self::CODE;
521 assign_fd!(sqe.fd = fd);
522 sqe.__bindgen_anon_2.addr = msg as _;
523 sqe.len = 1;
524 sqe.__bindgen_anon_3.msg_flags = flags;
525 sqe.__bindgen_anon_4.buf_group = buf_group;
526 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
527 sqe.ioprio = ioprio | (sys::IORING_RECV_MULTISHOT as u16);
528 Entry(sqe)
529 }
530}
531
532opcode! {
533 #[derive(Debug)]
542 pub struct Timeout {
543 timespec: { *const types::Timespec },
544 ;;
545 count: u32 = 0,
547
548 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
550 }
551
552 pub const CODE = sys::IORING_OP_TIMEOUT;
553
554 pub fn build(self) -> Entry {
555 let Timeout { timespec, count, flags } = self;
556
557 let mut sqe = sqe_zeroed();
558 sqe.opcode = Self::CODE;
559 sqe.fd = -1;
560 sqe.__bindgen_anon_2.addr = timespec as _;
561 sqe.len = 1;
562 sqe.__bindgen_anon_1.off = count as _;
563 sqe.__bindgen_anon_3.timeout_flags = flags.bits();
564 Entry(sqe)
565 }
566}
567
568opcode! {
571 pub struct TimeoutRemove {
573 user_data: { u64 },
574 ;;
575 }
576
577 pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
578
579 pub fn build(self) -> Entry {
580 let TimeoutRemove { user_data } = self;
581
582 let mut sqe = sqe_zeroed();
583 sqe.opcode = Self::CODE;
584 sqe.fd = -1;
585 sqe.__bindgen_anon_2.addr = user_data;
586 Entry(sqe)
587 }
588}
589
590opcode! {
591 pub struct TimeoutUpdate {
594 user_data: { u64 },
595 timespec: { *const types::Timespec },
596 ;;
597 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
598 }
599
600 pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
601
602 pub fn build(self) -> Entry {
603 let TimeoutUpdate { user_data, timespec, flags } = self;
604
605 let mut sqe = sqe_zeroed();
606 sqe.opcode = Self::CODE;
607 sqe.fd = -1;
608 sqe.__bindgen_anon_1.off = timespec as _;
609 sqe.__bindgen_anon_2.addr = user_data;
610 sqe.__bindgen_anon_3.timeout_flags = flags.bits() | sys::IORING_TIMEOUT_UPDATE;
611 Entry(sqe)
612 }
613}
614
615opcode! {
616 pub struct Accept {
618 fd: { impl sealed::UseFixed },
619 addr: { *mut libc::sockaddr },
620 addrlen: { *mut libc::socklen_t },
621 ;;
622 file_index: Option<types::DestinationSlot> = None,
623 flags: i32 = 0
624 }
625
626 pub const CODE = sys::IORING_OP_ACCEPT;
627
628 pub fn build(self) -> Entry {
629 let Accept { fd, addr, addrlen, file_index, flags } = self;
630
631 let mut sqe = sqe_zeroed();
632 sqe.opcode = Self::CODE;
633 assign_fd!(sqe.fd = fd);
634 sqe.__bindgen_anon_2.addr = addr as _;
635 sqe.__bindgen_anon_1.addr2 = addrlen as _;
636 sqe.__bindgen_anon_3.accept_flags = flags as _;
637 if let Some(dest) = file_index {
638 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
639 }
640 Entry(sqe)
641 }
642}
643
644opcode! {
645 pub struct AsyncCancel {
647 user_data: { u64 }
648 ;;
649
650 }
652
653 pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
654
655 pub fn build(self) -> Entry {
656 let AsyncCancel { user_data } = self;
657
658 let mut sqe = sqe_zeroed();
659 sqe.opcode = Self::CODE;
660 sqe.fd = -1;
661 sqe.__bindgen_anon_2.addr = user_data;
662 Entry(sqe)
663 }
664}
665
666opcode! {
667 pub struct LinkTimeout {
671 timespec: { *const types::Timespec },
672 ;;
673 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
674 }
675
676 pub const CODE = sys::IORING_OP_LINK_TIMEOUT;
677
678 pub fn build(self) -> Entry {
679 let LinkTimeout { timespec, flags } = self;
680
681 let mut sqe = sqe_zeroed();
682 sqe.opcode = Self::CODE;
683 sqe.fd = -1;
684 sqe.__bindgen_anon_2.addr = timespec as _;
685 sqe.len = 1;
686 sqe.__bindgen_anon_3.timeout_flags = flags.bits();
687 Entry(sqe)
688 }
689}
690
691opcode! {
692 pub struct Connect {
694 fd: { impl sealed::UseFixed },
695 addr: { *const libc::sockaddr },
696 addrlen: { libc::socklen_t }
697 ;;
698 }
699
700 pub const CODE = sys::IORING_OP_CONNECT;
701
702 pub fn build(self) -> Entry {
703 let Connect { fd, addr, addrlen } = self;
704
705 let mut sqe = sqe_zeroed();
706 sqe.opcode = Self::CODE;
707 assign_fd!(sqe.fd = fd);
708 sqe.__bindgen_anon_2.addr = addr as _;
709 sqe.__bindgen_anon_1.off = addrlen as _;
710 Entry(sqe)
711 }
712}
713
714opcode! {
717 pub struct Fallocate {
719 fd: { impl sealed::UseFixed },
720 len: { u64 },
721 ;;
722 offset: u64 = 0,
723 mode: i32 = 0
724 }
725
726 pub const CODE = sys::IORING_OP_FALLOCATE;
727
728 pub fn build(self) -> Entry {
729 let Fallocate { fd, len, offset, mode } = self;
730
731 let mut sqe = sqe_zeroed();
732 sqe.opcode = Self::CODE;
733 assign_fd!(sqe.fd = fd);
734 sqe.__bindgen_anon_2.addr = len;
735 sqe.len = mode as _;
736 sqe.__bindgen_anon_1.off = offset;
737 Entry(sqe)
738 }
739}
740
741opcode! {
742 pub struct OpenAt {
744 dirfd: { impl sealed::UseFd },
745 pathname: { *const libc::c_char },
746 ;;
747 file_index: Option<types::DestinationSlot> = None,
748 flags: i32 = 0,
749 mode: libc::mode_t = 0
750 }
751
752 pub const CODE = sys::IORING_OP_OPENAT;
753
754 pub fn build(self) -> Entry {
755 let OpenAt { dirfd, pathname, file_index, flags, mode } = self;
756
757 let mut sqe = sqe_zeroed();
758 sqe.opcode = Self::CODE;
759 sqe.fd = dirfd;
760 sqe.__bindgen_anon_2.addr = pathname as _;
761 sqe.len = mode;
762 sqe.__bindgen_anon_3.open_flags = flags as _;
763 if let Some(dest) = file_index {
764 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
765 }
766 Entry(sqe)
767 }
768}
769
770opcode! {
771 pub struct Close {
775 fd: { impl sealed::UseFixed },
776 ;;
777 }
778
779 pub const CODE = sys::IORING_OP_CLOSE;
780
781 pub fn build(self) -> Entry {
782 let Close { fd } = self;
783
784 let mut sqe = sqe_zeroed();
785 sqe.opcode = Self::CODE;
786 match fd {
787 sealed::Target::Fd(fd) => sqe.fd = fd,
788 sealed::Target::Fixed(idx) => {
789 sqe.fd = 0;
790 sqe.__bindgen_anon_5.file_index = idx + 1;
791 }
792 }
793 Entry(sqe)
794 }
795}
796
797opcode! {
798 pub struct FilesUpdate {
802 fds: { *const RawFd },
803 len: { u32 },
804 ;;
805 offset: i32 = 0
806 }
807
808 pub const CODE = sys::IORING_OP_FILES_UPDATE;
809
810 pub fn build(self) -> Entry {
811 let FilesUpdate { fds, len, offset } = self;
812
813 let mut sqe = sqe_zeroed();
814 sqe.opcode = Self::CODE;
815 sqe.fd = -1;
816 sqe.__bindgen_anon_2.addr = fds as _;
817 sqe.len = len;
818 sqe.__bindgen_anon_1.off = offset as _;
819 Entry(sqe)
820 }
821}
822
823opcode! {
824 pub struct Statx {
826 dirfd: { impl sealed::UseFd },
827 pathname: { *const libc::c_char },
828 statxbuf: { *mut types::statx },
829 ;;
830 flags: i32 = 0,
831 mask: u32 = 0
832 }
833
834 pub const CODE = sys::IORING_OP_STATX;
835
836 pub fn build(self) -> Entry {
837 let Statx {
838 dirfd, pathname, statxbuf,
839 flags, mask
840 } = self;
841
842 let mut sqe = sqe_zeroed();
843 sqe.opcode = Self::CODE;
844 sqe.fd = dirfd;
845 sqe.__bindgen_anon_2.addr = pathname as _;
846 sqe.len = mask;
847 sqe.__bindgen_anon_1.off = statxbuf as _;
848 sqe.__bindgen_anon_3.statx_flags = flags as _;
849 Entry(sqe)
850 }
851}
852
853opcode! {
854 pub struct Read {
865 fd: { impl sealed::UseFixed },
866 buf: { *mut u8 },
867 len: { u32 },
868 ;;
869 offset: u64 = 0,
875 ioprio: u16 = 0,
876 rw_flags: types::RwFlags = 0,
877 buf_group: u16 = 0
878 }
879
880 pub const CODE = sys::IORING_OP_READ;
881
882 pub fn build(self) -> Entry {
883 let Read {
884 fd,
885 buf, len, offset,
886 ioprio, rw_flags,
887 buf_group
888 } = self;
889
890 let mut sqe = sqe_zeroed();
891 sqe.opcode = Self::CODE;
892 assign_fd!(sqe.fd = fd);
893 sqe.ioprio = ioprio;
894 sqe.__bindgen_anon_2.addr = buf as _;
895 sqe.len = len;
896 sqe.__bindgen_anon_1.off = offset;
897 sqe.__bindgen_anon_3.rw_flags = rw_flags;
898 sqe.__bindgen_anon_4.buf_group = buf_group;
899 Entry(sqe)
900 }
901}
902
903opcode! {
904 pub struct Write {
915 fd: { impl sealed::UseFixed },
916 buf: { *const u8 },
917 len: { u32 },
918 ;;
919 offset: u64 = 0,
925 ioprio: u16 = 0,
926 rw_flags: types::RwFlags = 0
927 }
928
929 pub const CODE = sys::IORING_OP_WRITE;
930
931 pub fn build(self) -> Entry {
932 let Write {
933 fd,
934 buf, len, offset,
935 ioprio, rw_flags
936 } = self;
937
938 let mut sqe = sqe_zeroed();
939 sqe.opcode = Self::CODE;
940 assign_fd!(sqe.fd = fd);
941 sqe.ioprio = ioprio;
942 sqe.__bindgen_anon_2.addr = buf as _;
943 sqe.len = len;
944 sqe.__bindgen_anon_1.off = offset;
945 sqe.__bindgen_anon_3.rw_flags = rw_flags;
946 Entry(sqe)
947 }
948}
949
950opcode! {
951 pub struct Fadvise {
953 fd: { impl sealed::UseFixed },
954 len: { libc::off_t },
955 advice: { i32 },
956 ;;
957 offset: u64 = 0,
958 }
959
960 pub const CODE = sys::IORING_OP_FADVISE;
961
962 pub fn build(self) -> Entry {
963 let Fadvise { fd, len, advice, offset } = self;
964
965 let mut sqe = sqe_zeroed();
966 sqe.opcode = Self::CODE;
967 assign_fd!(sqe.fd = fd);
968 sqe.len = len as _;
969 sqe.__bindgen_anon_1.off = offset;
970 sqe.__bindgen_anon_3.fadvise_advice = advice as _;
971 Entry(sqe)
972 }
973}
974
975opcode! {
976 pub struct Madvise {
978 addr: { *const libc::c_void },
979 len: { libc::off_t },
980 advice: { i32 },
981 ;;
982 }
983
984 pub const CODE = sys::IORING_OP_MADVISE;
985
986 pub fn build(self) -> Entry {
987 let Madvise { addr, len, advice } = self;
988
989 let mut sqe = sqe_zeroed();
990 sqe.opcode = Self::CODE;
991 sqe.fd = -1;
992 sqe.__bindgen_anon_2.addr = addr as _;
993 sqe.len = len as _;
994 sqe.__bindgen_anon_3.fadvise_advice = advice as _;
995 Entry(sqe)
996 }
997}
998
999opcode! {
1000 pub struct Send {
1002 fd: { impl sealed::UseFixed },
1003 buf: { *const u8 },
1004 len: { u32 },
1005 ;;
1006 flags: i32 = 0,
1007
1008 dest_addr: *const libc::sockaddr = core::ptr::null(),
1013 dest_addr_len: libc::socklen_t = 0,
1014 }
1015
1016 pub const CODE = sys::IORING_OP_SEND;
1017
1018 pub fn build(self) -> Entry {
1019 let Send { fd, buf, len, flags, dest_addr, dest_addr_len } = self;
1020
1021 let mut sqe = sqe_zeroed();
1022 sqe.opcode = Self::CODE;
1023 assign_fd!(sqe.fd = fd);
1024 sqe.__bindgen_anon_2.addr = buf as _;
1025 sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1026 sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1027 sqe.len = len;
1028 sqe.__bindgen_anon_3.msg_flags = flags as _;
1029 Entry(sqe)
1030 }
1031}
1032
1033opcode! {
1034 pub struct Recv {
1036 fd: { impl sealed::UseFixed },
1037 buf: { *mut u8 },
1038 len: { u32 },
1039 ;;
1040 flags: i32 = 0,
1041 buf_group: u16 = 0
1042 }
1043
1044 pub const CODE = sys::IORING_OP_RECV;
1045
1046 pub fn build(self) -> Entry {
1047 let Recv { fd, buf, len, flags, buf_group } = self;
1048
1049 let mut sqe = sqe_zeroed();
1050 sqe.opcode = Self::CODE;
1051 assign_fd!(sqe.fd = fd);
1052 sqe.__bindgen_anon_2.addr = buf as _;
1053 sqe.len = len;
1054 sqe.__bindgen_anon_3.msg_flags = flags as _;
1055 sqe.__bindgen_anon_4.buf_group = buf_group;
1056 Entry(sqe)
1057 }
1058}
1059
1060opcode! {
1061 pub struct RecvMulti {
1077 fd: { impl sealed::UseFixed },
1078 buf_group: { u16 },
1079 ;;
1080 flags: i32 = 0,
1081 }
1082
1083 pub const CODE = sys::IORING_OP_RECV;
1084
1085 pub fn build(self) -> Entry {
1086 let RecvMulti { fd, buf_group, flags } = self;
1087
1088 let mut sqe = sqe_zeroed();
1089 sqe.opcode = Self::CODE;
1090 assign_fd!(sqe.fd = fd);
1091 sqe.__bindgen_anon_3.msg_flags = flags as _;
1092 sqe.__bindgen_anon_4.buf_group = buf_group;
1093 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
1094 sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
1095 Entry(sqe)
1096 }
1097}
1098
1099opcode! {
1100 pub struct OpenAt2 {
1102 dirfd: { impl sealed::UseFd },
1103 pathname: { *const libc::c_char },
1104 how: { *const types::OpenHow }
1105 ;;
1106 file_index: Option<types::DestinationSlot> = None,
1107 }
1108
1109 pub const CODE = sys::IORING_OP_OPENAT2;
1110
1111 pub fn build(self) -> Entry {
1112 let OpenAt2 { dirfd, pathname, how, file_index } = self;
1113
1114 let mut sqe = sqe_zeroed();
1115 sqe.opcode = Self::CODE;
1116 sqe.fd = dirfd;
1117 sqe.__bindgen_anon_2.addr = pathname as _;
1118 sqe.len = mem::size_of::<sys::open_how>() as _;
1119 sqe.__bindgen_anon_1.off = how as _;
1120 if let Some(dest) = file_index {
1121 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1122 }
1123 Entry(sqe)
1124 }
1125}
1126
1127opcode! {
1128 pub struct EpollCtl {
1130 epfd: { impl sealed::UseFixed },
1131 fd: { impl sealed::UseFd },
1132 op: { i32 },
1133 ev: { *const types::epoll_event },
1134 ;;
1135 }
1136
1137 pub const CODE = sys::IORING_OP_EPOLL_CTL;
1138
1139 pub fn build(self) -> Entry {
1140 let EpollCtl { epfd, fd, op, ev } = self;
1141
1142 let mut sqe = sqe_zeroed();
1143 sqe.opcode = Self::CODE;
1144 assign_fd!(sqe.fd = epfd);
1145 sqe.__bindgen_anon_2.addr = ev as _;
1146 sqe.len = op as _;
1147 sqe.__bindgen_anon_1.off = fd as _;
1148 Entry(sqe)
1149 }
1150}
1151
1152opcode! {
1155 pub struct Splice {
1160 fd_in: { impl sealed::UseFixed },
1161 off_in: { i64 },
1162 fd_out: { impl sealed::UseFixed },
1163 off_out: { i64 },
1164 len: { u32 },
1165 ;;
1166 flags: u32 = 0
1168 }
1169
1170 pub const CODE = sys::IORING_OP_SPLICE;
1171
1172 pub fn build(self) -> Entry {
1173 let Splice { fd_in, off_in, fd_out, off_out, len, mut flags } = self;
1174
1175 let mut sqe = sqe_zeroed();
1176 sqe.opcode = Self::CODE;
1177 assign_fd!(sqe.fd = fd_out);
1178 sqe.len = len;
1179 sqe.__bindgen_anon_1.off = off_out as _;
1180
1181 sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1182 sealed::Target::Fd(fd) => fd,
1183 sealed::Target::Fixed(idx) => {
1184 flags |= sys::SPLICE_F_FD_IN_FIXED;
1185 idx as _
1186 }
1187 };
1188
1189 sqe.__bindgen_anon_2.splice_off_in = off_in as _;
1190 sqe.__bindgen_anon_3.splice_flags = flags;
1191 Entry(sqe)
1192 }
1193}
1194
1195opcode! {
1196 pub struct ProvideBuffers {
1200 addr: { *mut u8 },
1201 len: { i32 },
1202 nbufs: { u16 },
1203 bgid: { u16 },
1204 bid: { u16 }
1205 ;;
1206 }
1207
1208 pub const CODE = sys::IORING_OP_PROVIDE_BUFFERS;
1209
1210 pub fn build(self) -> Entry {
1211 let ProvideBuffers { addr, len, nbufs, bgid, bid } = self;
1212
1213 let mut sqe = sqe_zeroed();
1214 sqe.opcode = Self::CODE;
1215 sqe.fd = nbufs as _;
1216 sqe.__bindgen_anon_2.addr = addr as _;
1217 sqe.len = len as _;
1218 sqe.__bindgen_anon_1.off = bid as _;
1219 sqe.__bindgen_anon_4.buf_group = bgid;
1220 Entry(sqe)
1221 }
1222}
1223
1224opcode! {
1225 pub struct RemoveBuffers {
1228 nbufs: { u16 },
1229 bgid: { u16 }
1230 ;;
1231 }
1232
1233 pub const CODE = sys::IORING_OP_REMOVE_BUFFERS;
1234
1235 pub fn build(self) -> Entry {
1236 let RemoveBuffers { nbufs, bgid } = self;
1237
1238 let mut sqe = sqe_zeroed();
1239 sqe.opcode = Self::CODE;
1240 sqe.fd = nbufs as _;
1241 sqe.__bindgen_anon_4.buf_group = bgid;
1242 Entry(sqe)
1243 }
1244}
1245
1246opcode! {
1249 pub struct Tee {
1251 fd_in: { impl sealed::UseFixed },
1252 fd_out: { impl sealed::UseFixed },
1253 len: { u32 }
1254 ;;
1255 flags: u32 = 0
1256 }
1257
1258 pub const CODE = sys::IORING_OP_TEE;
1259
1260 pub fn build(self) -> Entry {
1261 let Tee { fd_in, fd_out, len, mut flags } = self;
1262
1263 let mut sqe = sqe_zeroed();
1264 sqe.opcode = Self::CODE;
1265
1266 assign_fd!(sqe.fd = fd_out);
1267 sqe.len = len;
1268
1269 sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1270 sealed::Target::Fd(fd) => fd,
1271 sealed::Target::Fixed(idx) => {
1272 flags |= sys::SPLICE_F_FD_IN_FIXED;
1273 idx as _
1274 }
1275 };
1276
1277 sqe.__bindgen_anon_3.splice_flags = flags;
1278
1279 Entry(sqe)
1280 }
1281}
1282
1283opcode! {
1286 pub struct Shutdown {
1289 fd: { impl sealed::UseFixed },
1290 how: { i32 },
1291 ;;
1292 }
1293
1294 pub const CODE = sys::IORING_OP_SHUTDOWN;
1295
1296 pub fn build(self) -> Entry {
1297 let Shutdown { fd, how } = self;
1298
1299 let mut sqe = sqe_zeroed();
1300 sqe.opcode = Self::CODE;
1301 assign_fd!(sqe.fd = fd);
1302 sqe.len = how as _;
1303 Entry(sqe)
1304 }
1305}
1306
1307opcode! {
1308 pub struct RenameAt {
1311 olddirfd: { impl sealed::UseFd },
1312 oldpath: { *const libc::c_char },
1313 newdirfd: { impl sealed::UseFd },
1314 newpath: { *const libc::c_char },
1315 ;;
1316 flags: u32 = 0
1317 }
1318
1319 pub const CODE = sys::IORING_OP_RENAMEAT;
1320
1321 pub fn build(self) -> Entry {
1322 let RenameAt {
1323 olddirfd, oldpath,
1324 newdirfd, newpath,
1325 flags
1326 } = self;
1327
1328 let mut sqe = sqe_zeroed();
1329 sqe.opcode = Self::CODE;
1330 sqe.fd = olddirfd;
1331 sqe.__bindgen_anon_2.addr = oldpath as _;
1332 sqe.len = newdirfd as _;
1333 sqe.__bindgen_anon_1.off = newpath as _;
1334 sqe.__bindgen_anon_3.rename_flags = flags;
1335 Entry(sqe)
1336 }
1337}
1338
1339opcode! {
1340 pub struct UnlinkAt {
1343 dirfd: { impl sealed::UseFd },
1344 pathname: { *const libc::c_char },
1345 ;;
1346 flags: i32 = 0
1347 }
1348
1349 pub const CODE = sys::IORING_OP_UNLINKAT;
1350
1351 pub fn build(self) -> Entry {
1352 let UnlinkAt { dirfd, pathname, flags } = self;
1353
1354 let mut sqe = sqe_zeroed();
1355 sqe.opcode = Self::CODE;
1356 sqe.fd = dirfd;
1357 sqe.__bindgen_anon_2.addr = pathname as _;
1358 sqe.__bindgen_anon_3.unlink_flags = flags as _;
1359 Entry(sqe)
1360 }
1361}
1362
1363opcode! {
1366 pub struct MkDirAt {
1368 dirfd: { impl sealed::UseFd },
1369 pathname: { *const libc::c_char },
1370 ;;
1371 mode: libc::mode_t = 0
1372 }
1373
1374 pub const CODE = sys::IORING_OP_MKDIRAT;
1375
1376 pub fn build(self) -> Entry {
1377 let MkDirAt { dirfd, pathname, mode } = self;
1378
1379 let mut sqe = sqe_zeroed();
1380 sqe.opcode = Self::CODE;
1381 sqe.fd = dirfd;
1382 sqe.__bindgen_anon_2.addr = pathname as _;
1383 sqe.len = mode;
1384 Entry(sqe)
1385 }
1386}
1387
1388opcode! {
1389 pub struct SymlinkAt {
1391 newdirfd: { impl sealed::UseFd },
1392 target: { *const libc::c_char },
1393 linkpath: { *const libc::c_char },
1394 ;;
1395 }
1396
1397 pub const CODE = sys::IORING_OP_SYMLINKAT;
1398
1399 pub fn build(self) -> Entry {
1400 let SymlinkAt { newdirfd, target, linkpath } = self;
1401
1402 let mut sqe = sqe_zeroed();
1403 sqe.opcode = Self::CODE;
1404 sqe.fd = newdirfd;
1405 sqe.__bindgen_anon_2.addr = target as _;
1406 sqe.__bindgen_anon_1.addr2 = linkpath as _;
1407 Entry(sqe)
1408 }
1409}
1410
1411opcode! {
1412 pub struct LinkAt {
1414 olddirfd: { impl sealed::UseFd },
1415 oldpath: { *const libc::c_char },
1416 newdirfd: { impl sealed::UseFd },
1417 newpath: { *const libc::c_char },
1418 ;;
1419 flags: i32 = 0
1420 }
1421
1422 pub const CODE = sys::IORING_OP_LINKAT;
1423
1424 pub fn build(self) -> Entry {
1425 let LinkAt { olddirfd, oldpath, newdirfd, newpath, flags } = self;
1426
1427 let mut sqe = sqe_zeroed();
1428 sqe.opcode = Self::CODE;
1429 sqe.fd = olddirfd as _;
1430 sqe.__bindgen_anon_2.addr = oldpath as _;
1431 sqe.len = newdirfd as _;
1432 sqe.__bindgen_anon_1.addr2 = newpath as _;
1433 sqe.__bindgen_anon_3.hardlink_flags = flags as _;
1434 Entry(sqe)
1435 }
1436}
1437
1438opcode! {
1441 pub struct MsgRingData {
1443 ring_fd: { impl sealed::UseFd },
1444 result: { i32 },
1445 user_data: { u64 },
1446 user_flags: { Option<u32> },
1447 ;;
1448 opcode_flags: u32 = 0
1449 }
1450
1451 pub const CODE = sys::IORING_OP_MSG_RING;
1452
1453 pub fn build(self) -> Entry {
1454 let MsgRingData { ring_fd, result, user_data, user_flags, opcode_flags } = self;
1455
1456 let mut sqe = sqe_zeroed();
1457 sqe.opcode = Self::CODE;
1458 sqe.__bindgen_anon_2.addr = sys::IORING_MSG_DATA.into();
1459 sqe.fd = ring_fd;
1460 sqe.len = result as u32;
1461 sqe.__bindgen_anon_1.off = user_data;
1462 sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1463 if let Some(flags) = user_flags {
1464 sqe.__bindgen_anon_5.file_index = flags;
1465 unsafe {sqe.__bindgen_anon_3.msg_ring_flags |= sys::IORING_MSG_RING_FLAGS_PASS};
1466 }
1467 Entry(sqe)
1468 }
1469}
1470
1471opcode! {
1474 pub struct AsyncCancel2 {
1478 builder: { types::CancelBuilder }
1479 ;;
1480 }
1481
1482 pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
1483
1484 pub fn build(self) -> Entry {
1485 let AsyncCancel2 { builder } = self;
1486
1487 let mut sqe = sqe_zeroed();
1488 sqe.opcode = Self::CODE;
1489 sqe.fd = builder.to_fd();
1490 sqe.__bindgen_anon_2.addr = builder.user_data.unwrap_or(0);
1491 sqe.__bindgen_anon_3.cancel_flags = builder.flags.bits();
1492 Entry(sqe)
1493 }
1494}
1495
1496opcode! {
1497 pub struct UringCmd16 {
1499 fd: { impl sealed::UseFixed },
1500 cmd_op: { u32 },
1501 ;;
1502 buf_index: Option<u16> = None,
1505 cmd: [u8; 16] = [0u8; 16]
1507 }
1508
1509 pub const CODE = sys::IORING_OP_URING_CMD;
1510
1511 pub fn build(self) -> Entry {
1512 let UringCmd16 { fd, cmd_op, cmd, buf_index } = self;
1513
1514 let mut sqe = sqe_zeroed();
1515 sqe.opcode = Self::CODE;
1516 assign_fd!(sqe.fd = fd);
1517 sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1518 unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd };
1519 if let Some(buf_index) = buf_index {
1520 sqe.__bindgen_anon_4.buf_index = buf_index;
1521 unsafe {
1522 sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1523 }
1524 }
1525 Entry(sqe)
1526 }
1527}
1528
1529opcode! {
1530 pub struct UringCmd80 {
1532 fd: { impl sealed::UseFixed },
1533 cmd_op: { u32 },
1534 ;;
1535 buf_index: Option<u16> = None,
1538 cmd: [u8; 80] = [0u8; 80]
1540 }
1541
1542 pub const CODE = sys::IORING_OP_URING_CMD;
1543
1544 pub fn build(self) -> Entry128 {
1545 let UringCmd80 { fd, cmd_op, cmd, buf_index } = self;
1546
1547 let cmd1 = cmd[..16].try_into().unwrap();
1548 let cmd2 = cmd[16..].try_into().unwrap();
1549
1550 let mut sqe = sqe_zeroed();
1551 sqe.opcode = Self::CODE;
1552 assign_fd!(sqe.fd = fd);
1553 sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1554 unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd1 };
1555 if let Some(buf_index) = buf_index {
1556 sqe.__bindgen_anon_4.buf_index = buf_index;
1557 unsafe {
1558 sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1559 }
1560 }
1561 Entry128(Entry(sqe), cmd2)
1562 }
1563}
1564
1565opcode! {
1566 pub struct Socket {
1576 domain: { i32 },
1577 socket_type: { i32 },
1578 protocol: { i32 },
1579 ;;
1580 file_index: Option<types::DestinationSlot> = None,
1581 flags: types::RwFlags = 0,
1582 }
1583
1584 pub const CODE = sys::IORING_OP_SOCKET;
1585
1586 pub fn build(self) -> Entry {
1587 let Socket { domain, socket_type, protocol, file_index, flags } = self;
1588
1589 let mut sqe = sqe_zeroed();
1590 sqe.opcode = Self::CODE;
1591 sqe.fd = domain as _;
1592 sqe.__bindgen_anon_1.off = socket_type as _;
1593 sqe.len = protocol as _;
1594 sqe.__bindgen_anon_3.rw_flags = flags;
1595 if let Some(dest) = file_index {
1596 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1597 }
1598 Entry(sqe)
1599 }
1600}
1601
1602opcode! {
1603 pub struct AcceptMulti {
1609 fd: { impl sealed::UseFixed },
1610 ;;
1611 allocate_file_index: bool = false,
1612 flags: i32 = 0
1613 }
1614
1615 pub const CODE = sys::IORING_OP_ACCEPT;
1616
1617 pub fn build(self) -> Entry {
1618 let AcceptMulti { fd, allocate_file_index, flags } = self;
1619
1620 let mut sqe = sqe_zeroed();
1621 sqe.opcode = Self::CODE;
1622 assign_fd!(sqe.fd = fd);
1623 sqe.ioprio = sys::IORING_ACCEPT_MULTISHOT as u16;
1624 sqe.__bindgen_anon_3.accept_flags = flags as _;
1627 if allocate_file_index {
1628 sqe.__bindgen_anon_5.file_index = sys::IORING_FILE_INDEX_ALLOC as u32;
1629 }
1630 Entry(sqe)
1631 }
1632}
1633
1634opcode! {
1637 pub struct MsgRingSendFd {
1639 ring_fd: { impl sealed::UseFd },
1640 fixed_slot_src: { types::Fixed },
1641 dest_slot_index: { types::DestinationSlot },
1642 user_data: { u64 },
1643 ;;
1644 opcode_flags: u32 = 0
1645 }
1646
1647 pub const CODE = sys::IORING_OP_MSG_RING;
1648
1649 pub fn build(self) -> Entry {
1650 let MsgRingSendFd { ring_fd, fixed_slot_src, dest_slot_index, user_data, opcode_flags } = self;
1651
1652 let mut sqe = sqe_zeroed();
1653 sqe.opcode = Self::CODE;
1654 sqe.__bindgen_anon_2.addr = sys::IORING_MSG_SEND_FD.into();
1655 sqe.fd = ring_fd;
1656 sqe.__bindgen_anon_1.off = user_data;
1657 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = fixed_slot_src.0 as u64 };
1658 sqe.__bindgen_anon_5.file_index = dest_slot_index.kernel_index_arg();
1659 sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1660 Entry(sqe)
1661 }
1662}
1663
1664opcode! {
1667 pub struct SendZc {
1681 fd: { impl sealed::UseFixed },
1682 buf: { *const u8 },
1683 len: { u32 },
1684 ;;
1685 buf_index: Option<u16> = None,
1692 dest_addr: *const libc::sockaddr = core::ptr::null(),
1693 dest_addr_len: libc::socklen_t = 0,
1694 flags: i32 = 0,
1695 zc_flags: u16 = 0,
1696 }
1697
1698 pub const CODE = sys::IORING_OP_SEND_ZC;
1699
1700 pub fn build(self) -> Entry {
1701 let SendZc { fd, buf, len, buf_index, dest_addr, dest_addr_len, flags, zc_flags } = self;
1702
1703 let mut sqe = sqe_zeroed();
1704 sqe.opcode = Self::CODE;
1705 assign_fd!(sqe.fd = fd);
1706 sqe.__bindgen_anon_2.addr = buf as _;
1707 sqe.len = len;
1708 sqe.__bindgen_anon_3.msg_flags = flags as _;
1709 sqe.ioprio = zc_flags;
1710 if let Some(buf_index) = buf_index {
1711 sqe.__bindgen_anon_4.buf_index = buf_index;
1712 sqe.ioprio |= sys::IORING_RECVSEND_FIXED_BUF as u16;
1713 }
1714 sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1715 sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1716 Entry(sqe)
1717 }
1718}
1719
1720opcode! {
1723 #[derive(Debug)]
1728 pub struct SendMsgZc {
1729 fd: { impl sealed::UseFixed },
1730 msg: { *const libc::msghdr },
1731 ;;
1732 ioprio: u16 = 0,
1733 flags: u32 = 0
1734 }
1735
1736 pub const CODE = sys::IORING_OP_SENDMSG_ZC;
1737
1738 pub fn build(self) -> Entry {
1739 let SendMsgZc { fd, msg, ioprio, flags } = self;
1740
1741 let mut sqe = sqe_zeroed();
1742 sqe.opcode = Self::CODE;
1743 assign_fd!(sqe.fd = fd);
1744 sqe.ioprio = ioprio;
1745 sqe.__bindgen_anon_2.addr = msg as _;
1746 sqe.len = 1;
1747 sqe.__bindgen_anon_3.msg_flags = flags;
1748 Entry(sqe)
1749 }
1750}
1751
1752opcode! {
1755 #[derive(Debug)]
1764 pub struct FutexWait {
1765 futex: { *const u32 },
1766 val: { u64 },
1767 mask: { u64 },
1768 futex_flags: { u32 },
1769 ;;
1770 flags: u32 = 0
1771 }
1772
1773 pub const CODE = sys::IORING_OP_FUTEX_WAIT;
1774
1775 pub fn build(self) -> Entry {
1776 let FutexWait { futex, val, mask, futex_flags, flags } = self;
1777
1778 let mut sqe = sqe_zeroed();
1779 sqe.opcode = Self::CODE;
1780 sqe.fd = futex_flags as _;
1781 sqe.__bindgen_anon_2.addr = futex as usize as _;
1782 sqe.__bindgen_anon_1.off = val;
1783 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1784 sqe.__bindgen_anon_3.futex_flags = flags;
1785 Entry(sqe)
1786 }
1787}
1788
1789opcode! {
1790 #[derive(Debug)]
1798 pub struct FutexWake {
1799 futex: { *const u32 },
1800 val: { u64 },
1801 mask: { u64 },
1802 futex_flags: { u32 },
1803 ;;
1804 flags: u32 = 0
1805 }
1806
1807 pub const CODE = sys::IORING_OP_FUTEX_WAKE;
1808
1809 pub fn build(self) -> Entry {
1810 let FutexWake { futex, val, mask, futex_flags, flags } = self;
1811
1812 let mut sqe = sqe_zeroed();
1813 sqe.opcode = Self::CODE;
1814 sqe.fd = futex_flags as _;
1815 sqe.__bindgen_anon_2.addr = futex as usize as _;
1816 sqe.__bindgen_anon_1.off = val;
1817 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1818 sqe.__bindgen_anon_3.futex_flags = flags;
1819 Entry(sqe)
1820 }
1821}
1822
1823opcode! {
1824 #[derive(Debug)]
1830 pub struct FutexWaitV {
1831 futexv: { *const types::FutexWaitV },
1832 nr_futex: { u32 },
1833 ;;
1834 flags: u32 = 0
1835 }
1836
1837 pub const CODE = sys::IORING_OP_FUTEX_WAITV;
1838
1839 pub fn build(self) -> Entry {
1840 let FutexWaitV { futexv, nr_futex, flags } = self;
1841
1842 let mut sqe = sqe_zeroed();
1843 sqe.opcode = Self::CODE;
1844 sqe.__bindgen_anon_2.addr = futexv as usize as _;
1845 sqe.len = nr_futex;
1846 sqe.__bindgen_anon_3.futex_flags = flags;
1847 Entry(sqe)
1848 }
1849}
1850
1851opcode! {
1854 #[derive(Debug)]
1859 pub struct FixedFdInstall {
1860 fd: { types::Fixed },
1861 file_flags: { u32 },
1862 ;;
1863 }
1864
1865 pub const CODE = sys::IORING_OP_FIXED_FD_INSTALL;
1866
1867 pub fn build(self) -> Entry {
1868 let FixedFdInstall { fd, file_flags } = self;
1869
1870 let mut sqe = sqe_zeroed();
1871 sqe.opcode = Self::CODE;
1872 sqe.fd = fd.0 as _;
1873 sqe.flags = crate::squeue::Flags::FIXED_FILE.bits();
1874 sqe.__bindgen_anon_3.install_fd_flags = file_flags;
1875 Entry(sqe)
1876 }
1877}
1878
1879opcode! {
1882 #[derive(Debug)]
1884 pub struct Ftruncate {
1885 fd: { impl sealed::UseFixed },
1886 len: { u64 },
1887 ;;
1888 }
1889
1890 pub const CODE = sys::IORING_OP_FTRUNCATE;
1891
1892 pub fn build(self) -> Entry {
1893 let Ftruncate { fd, len } = self;
1894
1895 let mut sqe = sqe_zeroed();
1896 sqe.opcode = Self::CODE;
1897 assign_fd!(sqe.fd = fd);
1898 sqe.__bindgen_anon_1.off = len;
1899 Entry(sqe)
1900 }
1901}
1902
1903opcode! {
1906 pub struct SendBundle {
1908 fd: { impl sealed::UseFixed },
1909 buf_group: { u16 },
1910 ;;
1911 flags: i32 = 0,
1912 len: u32 = 0
1913 }
1914
1915 pub const CODE = sys::IORING_OP_SEND;
1916
1917 pub fn build(self) -> Entry {
1918 let SendBundle { fd, len, flags, buf_group } = self;
1919
1920 let mut sqe = sqe_zeroed();
1921 sqe.opcode = Self::CODE;
1922 assign_fd!(sqe.fd = fd);
1923 sqe.len = len;
1924 sqe.__bindgen_anon_3.msg_flags = flags as _;
1925 sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
1926 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
1927 sqe.__bindgen_anon_4.buf_group = buf_group;
1928 Entry(sqe)
1929 }
1930}
1931
1932opcode! {
1933 pub struct RecvBundle {
1943 fd: { impl sealed::UseFixed },
1944 buf_group: { u16 },
1945 ;;
1946 flags: i32 = 0
1947 }
1948
1949 pub const CODE = sys::IORING_OP_RECV;
1950
1951 pub fn build(self) -> Entry {
1952 let RecvBundle { fd, buf_group, flags } = self;
1953
1954 let mut sqe = sqe_zeroed();
1955 sqe.opcode = Self::CODE;
1956 assign_fd!(sqe.fd = fd);
1957 sqe.__bindgen_anon_3.msg_flags = flags as _;
1958 sqe.__bindgen_anon_4.buf_group = buf_group;
1959 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
1960 sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
1961 Entry(sqe)
1962 }
1963}
1964
1965opcode! {
1966 pub struct RecvMultiBundle {
1984 fd: { impl sealed::UseFixed },
1985 buf_group: { u16 },
1986 ;;
1987 flags: i32 = 0
1988 }
1989
1990 pub const CODE = sys::IORING_OP_RECV;
1991
1992 pub fn build(self) -> Entry {
1993 let RecvMultiBundle { fd, buf_group, flags } = self;
1994
1995 let mut sqe = sqe_zeroed();
1996 sqe.opcode = Self::CODE;
1997 assign_fd!(sqe.fd = fd);
1998 sqe.__bindgen_anon_3.msg_flags = flags as _;
1999 sqe.__bindgen_anon_4.buf_group = buf_group;
2000 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
2001 sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
2002 sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
2003 Entry(sqe)
2004 }
2005}