1#![allow(clippy::new_without_default)]
4
5use std::convert::TryInto;
6use std::mem;
7use std::os::unix::io::RawFd;
8
9use crate::squeue::Entry;
10use crate::squeue::Entry128;
11use crate::sys;
12use crate::types::{self, sealed};
13
14macro_rules! assign_fd {
15 ( $sqe:ident . fd = $opfd:expr ) => {
16 match $opfd {
17 sealed::Target::Fd(fd) => $sqe.fd = fd,
18 sealed::Target::Fixed(idx) => {
19 $sqe.fd = idx as _;
20 $sqe.flags |= crate::squeue::Flags::FIXED_FILE.bits();
21 }
22 }
23 };
24}
25
26macro_rules! opcode {
27 (@type impl sealed::UseFixed ) => {
28 sealed::Target
29 };
30 (@type impl sealed::UseFd ) => {
31 RawFd
32 };
33 (@type $name:ty ) => {
34 $name
35 };
36 (
37 $( #[$outer:meta] )*
38 pub struct $name:ident {
39 $( #[$new_meta:meta] )*
40
41 $( $field:ident : { $( $tnt:tt )+ } ),*
42
43 $(,)?
44
45 ;;
46
47 $(
48 $( #[$opt_meta:meta] )*
49 $opt_field:ident : $opt_tname:ty = $default:expr
50 ),*
51
52 $(,)?
53 }
54
55 pub const CODE = $opcode:expr;
56
57 $( #[$build_meta:meta] )*
58 pub fn build($self:ident) -> $entry:ty $build_block:block
59 ) => {
60 $( #[$outer] )*
61 pub struct $name {
62 $( $field : opcode!(@type $( $tnt )*), )*
63 $( $opt_field : $opt_tname, )*
64 }
65
66 impl $name {
67 $( #[$new_meta] )*
68 #[inline]
69 pub fn new($( $field : $( $tnt )* ),*) -> Self {
70 $name {
71 $( $field: $field.into(), )*
72 $( $opt_field: $default, )*
73 }
74 }
75
76 pub const CODE: u8 = $opcode as _;
80
81 $(
82 $( #[$opt_meta] )*
83 #[inline]
84 pub const fn $opt_field(mut self, $opt_field: $opt_tname) -> Self {
85 self.$opt_field = $opt_field;
86 self
87 }
88 )*
89
90 $( #[$build_meta] )*
91 #[inline]
92 pub fn build($self) -> $entry $build_block
93 }
94 }
95}
96
97#[inline(always)]
99fn sqe_zeroed() -> sys::io_uring_sqe {
100 unsafe { mem::zeroed() }
101}
102
103opcode! {
104 #[derive(Debug)]
108 pub struct Nop { ;; }
109
110 pub const CODE = sys::IORING_OP_NOP;
111
112 pub fn build(self) -> Entry {
113 let Nop {} = self;
114
115 let mut sqe = sqe_zeroed();
116 sqe.opcode = Self::CODE;
117 sqe.fd = -1;
118 Entry(sqe)
119 }
120}
121
122opcode! {
123 #[derive(Debug)]
125 pub struct Readv {
126 fd: { impl sealed::UseFixed },
127 iovec: { *const libc::iovec },
128 len: { u32 },
129 ;;
130 ioprio: u16 = 0,
131 offset: u64 = 0,
132 rw_flags: types::RwFlags = 0,
135 buf_group: u16 = 0
136 }
137
138 pub const CODE = sys::IORING_OP_READV;
139
140 pub fn build(self) -> Entry {
141 let Readv {
142 fd,
143 iovec, len, offset,
144 ioprio, rw_flags,
145 buf_group
146 } = self;
147
148 let mut sqe = sqe_zeroed();
149 sqe.opcode = Self::CODE;
150 assign_fd!(sqe.fd = fd);
151 sqe.ioprio = ioprio;
152 sqe.__bindgen_anon_2.addr = iovec as _;
153 sqe.len = len;
154 sqe.__bindgen_anon_1.off = offset;
155 sqe.__bindgen_anon_3.rw_flags = rw_flags;
156 sqe.__bindgen_anon_4.buf_group = buf_group;
157 Entry(sqe)
158 }
159}
160
161opcode! {
162 #[derive(Debug)]
164 pub struct Writev {
165 fd: { impl sealed::UseFixed },
166 iovec: { *const libc::iovec },
167 len: { u32 },
168 ;;
169 ioprio: u16 = 0,
170 offset: u64 = 0,
171 rw_flags: types::RwFlags = 0
174 }
175
176 pub const CODE = sys::IORING_OP_WRITEV;
177
178 pub fn build(self) -> Entry {
179 let Writev {
180 fd,
181 iovec, len, offset,
182 ioprio, rw_flags
183 } = self;
184
185 let mut sqe = sqe_zeroed();
186 sqe.opcode = Self::CODE;
187 assign_fd!(sqe.fd = fd);
188 sqe.ioprio = ioprio;
189 sqe.__bindgen_anon_2.addr = iovec as _;
190 sqe.len = len;
191 sqe.__bindgen_anon_1.off = offset;
192 sqe.__bindgen_anon_3.rw_flags = rw_flags;
193 Entry(sqe)
194 }
195}
196
197opcode! {
198 #[derive(Debug)]
207 pub struct Fsync {
208 fd: { impl sealed::UseFixed },
209 ;;
210 flags: types::FsyncFlags = types::FsyncFlags::empty()
214 }
215
216 pub const CODE = sys::IORING_OP_FSYNC;
217
218 pub fn build(self) -> Entry {
219 let Fsync { fd, flags } = self;
220
221 let mut sqe = sqe_zeroed();
222 sqe.opcode = Self::CODE;
223 assign_fd!(sqe.fd = fd);
224 sqe.__bindgen_anon_3.fsync_flags = flags.bits();
225 Entry(sqe)
226 }
227}
228
229opcode! {
230 #[derive(Debug)]
235 pub struct ReadFixed {
236 fd: { impl sealed::UseFixed },
237 buf: { *mut u8 },
238 len: { u32 },
239 buf_index: { u16 },
240 ;;
241 ioprio: u16 = 0,
242 offset: u64 = 0,
244 rw_flags: types::RwFlags = 0
247 }
248
249 pub const CODE = sys::IORING_OP_READ_FIXED;
250
251 pub fn build(self) -> Entry {
252 let ReadFixed {
253 fd,
254 buf, len, offset,
255 buf_index,
256 ioprio, rw_flags
257 } = self;
258
259 let mut sqe = sqe_zeroed();
260 sqe.opcode = Self::CODE;
261 assign_fd!(sqe.fd = fd);
262 sqe.ioprio = ioprio;
263 sqe.__bindgen_anon_2.addr = buf as _;
264 sqe.len = len;
265 sqe.__bindgen_anon_1.off = offset;
266 sqe.__bindgen_anon_3.rw_flags = rw_flags;
267 sqe.__bindgen_anon_4.buf_index = buf_index;
268 Entry(sqe)
269 }
270}
271
272opcode! {
273 #[derive(Debug)]
278 pub struct WriteFixed {
279 fd: { impl sealed::UseFixed },
280 buf: { *const u8 },
281 len: { u32 },
282 buf_index: { u16 },
283 ;;
284 ioprio: u16 = 0,
285 offset: u64 = 0,
287 rw_flags: types::RwFlags = 0
290 }
291
292 pub const CODE = sys::IORING_OP_WRITE_FIXED;
293
294 pub fn build(self) -> Entry {
295 let WriteFixed {
296 fd,
297 buf, len, offset,
298 buf_index,
299 ioprio, rw_flags
300 } = self;
301
302 let mut sqe = sqe_zeroed();
303 sqe.opcode = Self::CODE;
304 assign_fd!(sqe.fd = fd);
305 sqe.ioprio = ioprio;
306 sqe.__bindgen_anon_2.addr = buf as _;
307 sqe.len = len;
308 sqe.__bindgen_anon_1.off = offset;
309 sqe.__bindgen_anon_3.rw_flags = rw_flags;
310 sqe.__bindgen_anon_4.buf_index = buf_index;
311 Entry(sqe)
312 }
313}
314
315opcode! {
316 #[derive(Debug)]
328 pub struct PollAdd {
329 fd: { impl sealed::UseFixed },
332 flags: { u32 },
333 ;;
334 multi: bool = false
335 }
336
337 pub const CODE = sys::IORING_OP_POLL_ADD;
338
339 pub fn build(self) -> Entry {
340 let PollAdd { fd, flags, multi } = self;
341
342 let mut sqe = sqe_zeroed();
343 sqe.opcode = Self::CODE;
344 assign_fd!(sqe.fd = fd);
345 if multi {
346 sqe.len = sys::IORING_POLL_ADD_MULTI;
347 }
348
349 #[cfg(target_endian = "little")] {
350 sqe.__bindgen_anon_3.poll32_events = flags;
351 }
352
353 #[cfg(target_endian = "big")] {
354 let x = flags << 16;
355 let y = flags >> 16;
356 let flags = x | y;
357 sqe.__bindgen_anon_3.poll32_events = flags;
358 }
359
360 Entry(sqe)
361 }
362}
363
364opcode! {
365 #[derive(Debug)]
370 pub struct PollRemove {
371 user_data: { u64 }
372 ;;
373 }
374
375 pub const CODE = sys::IORING_OP_POLL_REMOVE;
376
377 pub fn build(self) -> Entry {
378 let PollRemove { user_data } = self;
379
380 let mut sqe = sqe_zeroed();
381 sqe.opcode = Self::CODE;
382 sqe.fd = -1;
383 sqe.__bindgen_anon_2.addr = user_data;
384 Entry(sqe)
385 }
386}
387
388opcode! {
389 #[derive(Debug)]
391 pub struct SyncFileRange {
392 fd: { impl sealed::UseFixed },
393 len: { u32 },
394 ;;
395 offset: u64 = 0,
397 flags: u32 = 0
399 }
400
401 pub const CODE = sys::IORING_OP_SYNC_FILE_RANGE;
402
403 pub fn build(self) -> Entry {
404 let SyncFileRange {
405 fd,
406 len, offset,
407 flags
408 } = self;
409
410 let mut sqe = sqe_zeroed();
411 sqe.opcode = Self::CODE;
412 assign_fd!(sqe.fd = fd);
413 sqe.len = len;
414 sqe.__bindgen_anon_1.off = offset;
415 sqe.__bindgen_anon_3.sync_range_flags = flags;
416 Entry(sqe)
417 }
418}
419
420opcode! {
421 #[derive(Debug)]
426 pub struct SendMsg {
427 fd: { impl sealed::UseFixed },
428 msg: { *const libc::msghdr },
429 ;;
430 ioprio: u16 = 0,
431 flags: u32 = 0
432 }
433
434 pub const CODE = sys::IORING_OP_SENDMSG;
435
436 pub fn build(self) -> Entry {
437 let SendMsg { fd, msg, ioprio, flags } = self;
438
439 let mut sqe = sqe_zeroed();
440 sqe.opcode = Self::CODE;
441 assign_fd!(sqe.fd = fd);
442 sqe.ioprio = ioprio;
443 sqe.__bindgen_anon_2.addr = msg as _;
444 sqe.len = 1;
445 sqe.__bindgen_anon_3.msg_flags = flags;
446 Entry(sqe)
447 }
448}
449
450opcode! {
451 #[derive(Debug)]
455 pub struct RecvMsg {
456 fd: { impl sealed::UseFixed },
457 msg: { *mut libc::msghdr },
458 ;;
459 ioprio: u16 = 0,
460 flags: u32 = 0,
461 buf_group: u16 = 0
462 }
463
464 pub const CODE = sys::IORING_OP_RECVMSG;
465
466 pub fn build(self) -> Entry {
467 let RecvMsg { fd, msg, ioprio, flags, buf_group } = self;
468
469 let mut sqe = sqe_zeroed();
470 sqe.opcode = Self::CODE;
471 assign_fd!(sqe.fd = fd);
472 sqe.ioprio = ioprio;
473 sqe.__bindgen_anon_2.addr = msg as _;
474 sqe.len = 1;
475 sqe.__bindgen_anon_3.msg_flags = flags;
476 sqe.__bindgen_anon_4.buf_group = buf_group;
477 Entry(sqe)
478 }
479}
480
481opcode! {
482 #[derive(Debug)]
505 pub struct RecvMsgMulti {
506 fd: { impl sealed::UseFixed },
507 msg: { *const libc::msghdr },
508 buf_group: { u16 },
509 ;;
510 ioprio: u16 = 0,
511 flags: u32 = 0
512 }
513
514 pub const CODE = sys::IORING_OP_RECVMSG;
515
516 pub fn build(self) -> Entry {
517 let RecvMsgMulti { fd, msg, buf_group, ioprio, flags } = self;
518
519 let mut sqe = sqe_zeroed();
520 sqe.opcode = Self::CODE;
521 assign_fd!(sqe.fd = fd);
522 sqe.__bindgen_anon_2.addr = msg as _;
523 sqe.len = 1;
524 sqe.__bindgen_anon_3.msg_flags = flags;
525 sqe.__bindgen_anon_4.buf_group = buf_group;
526 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
527 sqe.ioprio = ioprio | (sys::IORING_RECV_MULTISHOT as u16);
528 Entry(sqe)
529 }
530}
531
532opcode! {
533 #[derive(Debug)]
542 pub struct Timeout {
543 timespec: { *const types::Timespec },
544 ;;
545 count: u32 = 0,
547
548 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
550 }
551
552 pub const CODE = sys::IORING_OP_TIMEOUT;
553
554 pub fn build(self) -> Entry {
555 let Timeout { timespec, count, flags } = self;
556
557 let mut sqe = sqe_zeroed();
558 sqe.opcode = Self::CODE;
559 sqe.fd = -1;
560 sqe.__bindgen_anon_2.addr = timespec as _;
561 sqe.len = 1;
562 sqe.__bindgen_anon_1.off = count as _;
563 sqe.__bindgen_anon_3.timeout_flags = flags.bits();
564 Entry(sqe)
565 }
566}
567
568opcode! {
571 pub struct TimeoutRemove {
573 user_data: { u64 },
574 ;;
575 }
576
577 pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
578
579 pub fn build(self) -> Entry {
580 let TimeoutRemove { user_data } = self;
581
582 let mut sqe = sqe_zeroed();
583 sqe.opcode = Self::CODE;
584 sqe.fd = -1;
585 sqe.__bindgen_anon_2.addr = user_data;
586 Entry(sqe)
587 }
588}
589
590opcode! {
591 pub struct TimeoutUpdate {
594 user_data: { u64 },
595 timespec: { *const types::Timespec },
596 ;;
597 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
598 }
599
600 pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
601
602 pub fn build(self) -> Entry {
603 let TimeoutUpdate { user_data, timespec, flags } = self;
604
605 let mut sqe = sqe_zeroed();
606 sqe.opcode = Self::CODE;
607 sqe.fd = -1;
608 sqe.__bindgen_anon_1.off = timespec as _;
609 sqe.__bindgen_anon_2.addr = user_data;
610 sqe.__bindgen_anon_3.timeout_flags = flags.bits() | sys::IORING_TIMEOUT_UPDATE;
611 Entry(sqe)
612 }
613}
614
615opcode! {
616 pub struct Accept {
618 fd: { impl sealed::UseFixed },
619 addr: { *mut libc::sockaddr },
620 addrlen: { *mut libc::socklen_t },
621 ;;
622 file_index: Option<types::DestinationSlot> = None,
623 flags: i32 = 0
624 }
625
626 pub const CODE = sys::IORING_OP_ACCEPT;
627
628 pub fn build(self) -> Entry {
629 let Accept { fd, addr, addrlen, file_index, flags } = self;
630
631 let mut sqe = sqe_zeroed();
632 sqe.opcode = Self::CODE;
633 assign_fd!(sqe.fd = fd);
634 sqe.__bindgen_anon_2.addr = addr as _;
635 sqe.__bindgen_anon_1.addr2 = addrlen as _;
636 sqe.__bindgen_anon_3.accept_flags = flags as _;
637 if let Some(dest) = file_index {
638 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
639 }
640 Entry(sqe)
641 }
642}
643
644opcode! {
645 pub struct SetSockOpt {
647 fd: { impl sealed::UseFixed },
648 level: { u32 },
649 optname: { u32 },
650 optval: { *const libc::c_void },
651 optlen: { u32 },
652 ;;
653 flags: u32 = 0
654 }
655
656 pub const CODE = sys::IORING_OP_URING_CMD;
657
658 pub fn build(self) -> Entry {
659 let SetSockOpt { fd, level, optname, optval, optlen, flags } = self;
660 let mut sqe = sqe_zeroed();
661 sqe.opcode = Self::CODE;
662 assign_fd!(sqe.fd = fd);
663 sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = sys::SOCKET_URING_OP_SETSOCKOPT;
664
665 sqe.__bindgen_anon_2.__bindgen_anon_1.level = level;
666 sqe.__bindgen_anon_2.__bindgen_anon_1.optname = optname;
667 sqe.__bindgen_anon_3.uring_cmd_flags = flags;
668 sqe.__bindgen_anon_5.optlen = optlen;
669 unsafe { *sqe.__bindgen_anon_6.optval.as_mut() = optval as u64 };
670 Entry(sqe)
671 }
672}
673
674opcode! {
675 pub struct AsyncCancel {
677 user_data: { u64 }
678 ;;
679
680 }
682
683 pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
684
685 pub fn build(self) -> Entry {
686 let AsyncCancel { user_data } = self;
687
688 let mut sqe = sqe_zeroed();
689 sqe.opcode = Self::CODE;
690 sqe.fd = -1;
691 sqe.__bindgen_anon_2.addr = user_data;
692 Entry(sqe)
693 }
694}
695
696opcode! {
697 pub struct LinkTimeout {
701 timespec: { *const types::Timespec },
702 ;;
703 flags: types::TimeoutFlags = types::TimeoutFlags::empty()
704 }
705
706 pub const CODE = sys::IORING_OP_LINK_TIMEOUT;
707
708 pub fn build(self) -> Entry {
709 let LinkTimeout { timespec, flags } = self;
710
711 let mut sqe = sqe_zeroed();
712 sqe.opcode = Self::CODE;
713 sqe.fd = -1;
714 sqe.__bindgen_anon_2.addr = timespec as _;
715 sqe.len = 1;
716 sqe.__bindgen_anon_3.timeout_flags = flags.bits();
717 Entry(sqe)
718 }
719}
720
721opcode! {
722 pub struct Connect {
724 fd: { impl sealed::UseFixed },
725 addr: { *const libc::sockaddr },
726 addrlen: { libc::socklen_t }
727 ;;
728 }
729
730 pub const CODE = sys::IORING_OP_CONNECT;
731
732 pub fn build(self) -> Entry {
733 let Connect { fd, addr, addrlen } = self;
734
735 let mut sqe = sqe_zeroed();
736 sqe.opcode = Self::CODE;
737 assign_fd!(sqe.fd = fd);
738 sqe.__bindgen_anon_2.addr = addr as _;
739 sqe.__bindgen_anon_1.off = addrlen as _;
740 Entry(sqe)
741 }
742}
743
744opcode! {
747 pub struct Fallocate {
749 fd: { impl sealed::UseFixed },
750 len: { u64 },
751 ;;
752 offset: u64 = 0,
753 mode: i32 = 0
754 }
755
756 pub const CODE = sys::IORING_OP_FALLOCATE;
757
758 pub fn build(self) -> Entry {
759 let Fallocate { fd, len, offset, mode } = self;
760
761 let mut sqe = sqe_zeroed();
762 sqe.opcode = Self::CODE;
763 assign_fd!(sqe.fd = fd);
764 sqe.__bindgen_anon_2.addr = len;
765 sqe.len = mode as _;
766 sqe.__bindgen_anon_1.off = offset;
767 Entry(sqe)
768 }
769}
770
771opcode! {
772 pub struct OpenAt {
774 dirfd: { impl sealed::UseFd },
775 pathname: { *const libc::c_char },
776 ;;
777 file_index: Option<types::DestinationSlot> = None,
778 flags: i32 = 0,
779 mode: libc::mode_t = 0
780 }
781
782 pub const CODE = sys::IORING_OP_OPENAT;
783
784 pub fn build(self) -> Entry {
785 let OpenAt { dirfd, pathname, file_index, flags, mode } = self;
786
787 let mut sqe = sqe_zeroed();
788 sqe.opcode = Self::CODE;
789 sqe.fd = dirfd;
790 sqe.__bindgen_anon_2.addr = pathname as _;
791 sqe.len = mode;
792 sqe.__bindgen_anon_3.open_flags = flags as _;
793 if let Some(dest) = file_index {
794 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
795 }
796 Entry(sqe)
797 }
798}
799
800opcode! {
801 pub struct Close {
805 fd: { impl sealed::UseFixed },
806 ;;
807 }
808
809 pub const CODE = sys::IORING_OP_CLOSE;
810
811 pub fn build(self) -> Entry {
812 let Close { fd } = self;
813
814 let mut sqe = sqe_zeroed();
815 sqe.opcode = Self::CODE;
816 match fd {
817 sealed::Target::Fd(fd) => sqe.fd = fd,
818 sealed::Target::Fixed(idx) => {
819 sqe.fd = 0;
820 sqe.__bindgen_anon_5.file_index = idx + 1;
821 }
822 }
823 Entry(sqe)
824 }
825}
826
827opcode! {
828 pub struct FilesUpdate {
832 fds: { *const RawFd },
833 len: { u32 },
834 ;;
835 offset: i32 = 0
836 }
837
838 pub const CODE = sys::IORING_OP_FILES_UPDATE;
839
840 pub fn build(self) -> Entry {
841 let FilesUpdate { fds, len, offset } = self;
842
843 let mut sqe = sqe_zeroed();
844 sqe.opcode = Self::CODE;
845 sqe.fd = -1;
846 sqe.__bindgen_anon_2.addr = fds as _;
847 sqe.len = len;
848 sqe.__bindgen_anon_1.off = offset as _;
849 Entry(sqe)
850 }
851}
852
853opcode! {
854 pub struct Statx {
856 dirfd: { impl sealed::UseFd },
857 pathname: { *const libc::c_char },
858 statxbuf: { *mut types::statx },
859 ;;
860 flags: i32 = 0,
861 mask: u32 = 0
862 }
863
864 pub const CODE = sys::IORING_OP_STATX;
865
866 pub fn build(self) -> Entry {
867 let Statx {
868 dirfd, pathname, statxbuf,
869 flags, mask
870 } = self;
871
872 let mut sqe = sqe_zeroed();
873 sqe.opcode = Self::CODE;
874 sqe.fd = dirfd;
875 sqe.__bindgen_anon_2.addr = pathname as _;
876 sqe.len = mask;
877 sqe.__bindgen_anon_1.off = statxbuf as _;
878 sqe.__bindgen_anon_3.statx_flags = flags as _;
879 Entry(sqe)
880 }
881}
882
883opcode! {
884 pub struct Read {
895 fd: { impl sealed::UseFixed },
896 buf: { *mut u8 },
897 len: { u32 },
898 ;;
899 offset: u64 = 0,
905 ioprio: u16 = 0,
906 rw_flags: types::RwFlags = 0,
907 buf_group: u16 = 0
908 }
909
910 pub const CODE = sys::IORING_OP_READ;
911
912 pub fn build(self) -> Entry {
913 let Read {
914 fd,
915 buf, len, offset,
916 ioprio, rw_flags,
917 buf_group
918 } = self;
919
920 let mut sqe = sqe_zeroed();
921 sqe.opcode = Self::CODE;
922 assign_fd!(sqe.fd = fd);
923 sqe.ioprio = ioprio;
924 sqe.__bindgen_anon_2.addr = buf as _;
925 sqe.len = len;
926 sqe.__bindgen_anon_1.off = offset;
927 sqe.__bindgen_anon_3.rw_flags = rw_flags;
928 sqe.__bindgen_anon_4.buf_group = buf_group;
929 Entry(sqe)
930 }
931}
932
933opcode! {
934 pub struct Write {
945 fd: { impl sealed::UseFixed },
946 buf: { *const u8 },
947 len: { u32 },
948 ;;
949 offset: u64 = 0,
955 ioprio: u16 = 0,
956 rw_flags: types::RwFlags = 0
957 }
958
959 pub const CODE = sys::IORING_OP_WRITE;
960
961 pub fn build(self) -> Entry {
962 let Write {
963 fd,
964 buf, len, offset,
965 ioprio, rw_flags
966 } = self;
967
968 let mut sqe = sqe_zeroed();
969 sqe.opcode = Self::CODE;
970 assign_fd!(sqe.fd = fd);
971 sqe.ioprio = ioprio;
972 sqe.__bindgen_anon_2.addr = buf as _;
973 sqe.len = len;
974 sqe.__bindgen_anon_1.off = offset;
975 sqe.__bindgen_anon_3.rw_flags = rw_flags;
976 Entry(sqe)
977 }
978}
979
980opcode! {
981 pub struct Fadvise {
983 fd: { impl sealed::UseFixed },
984 len: { libc::off_t },
985 advice: { i32 },
986 ;;
987 offset: u64 = 0,
988 }
989
990 pub const CODE = sys::IORING_OP_FADVISE;
991
992 pub fn build(self) -> Entry {
993 let Fadvise { fd, len, advice, offset } = self;
994
995 let mut sqe = sqe_zeroed();
996 sqe.opcode = Self::CODE;
997 assign_fd!(sqe.fd = fd);
998 sqe.len = len as _;
999 sqe.__bindgen_anon_1.off = offset;
1000 sqe.__bindgen_anon_3.fadvise_advice = advice as _;
1001 Entry(sqe)
1002 }
1003}
1004
1005opcode! {
1006 pub struct Madvise {
1008 addr: { *const libc::c_void },
1009 len: { libc::off_t },
1010 advice: { i32 },
1011 ;;
1012 }
1013
1014 pub const CODE = sys::IORING_OP_MADVISE;
1015
1016 pub fn build(self) -> Entry {
1017 let Madvise { addr, len, advice } = self;
1018
1019 let mut sqe = sqe_zeroed();
1020 sqe.opcode = Self::CODE;
1021 sqe.fd = -1;
1022 sqe.__bindgen_anon_2.addr = addr as _;
1023 sqe.len = len as _;
1024 sqe.__bindgen_anon_3.fadvise_advice = advice as _;
1025 Entry(sqe)
1026 }
1027}
1028
1029opcode! {
1030 pub struct Send {
1032 fd: { impl sealed::UseFixed },
1033 buf: { *const u8 },
1034 len: { u32 },
1035 ;;
1036 flags: i32 = 0,
1037
1038 dest_addr: *const libc::sockaddr = core::ptr::null(),
1043 dest_addr_len: libc::socklen_t = 0,
1044 }
1045
1046 pub const CODE = sys::IORING_OP_SEND;
1047
1048 pub fn build(self) -> Entry {
1049 let Send { fd, buf, len, flags, dest_addr, dest_addr_len } = self;
1050
1051 let mut sqe = sqe_zeroed();
1052 sqe.opcode = Self::CODE;
1053 assign_fd!(sqe.fd = fd);
1054 sqe.__bindgen_anon_2.addr = buf as _;
1055 sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1056 sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1057 sqe.len = len;
1058 sqe.__bindgen_anon_3.msg_flags = flags as _;
1059 Entry(sqe)
1060 }
1061}
1062
1063opcode! {
1064 pub struct Recv {
1066 fd: { impl sealed::UseFixed },
1067 buf: { *mut u8 },
1068 len: { u32 },
1069 ;;
1070 flags: i32 = 0,
1071 buf_group: u16 = 0
1072 }
1073
1074 pub const CODE = sys::IORING_OP_RECV;
1075
1076 pub fn build(self) -> Entry {
1077 let Recv { fd, buf, len, flags, buf_group } = self;
1078
1079 let mut sqe = sqe_zeroed();
1080 sqe.opcode = Self::CODE;
1081 assign_fd!(sqe.fd = fd);
1082 sqe.__bindgen_anon_2.addr = buf as _;
1083 sqe.len = len;
1084 sqe.__bindgen_anon_3.msg_flags = flags as _;
1085 sqe.__bindgen_anon_4.buf_group = buf_group;
1086 Entry(sqe)
1087 }
1088}
1089
1090opcode! {
1091 pub struct RecvMulti {
1107 fd: { impl sealed::UseFixed },
1108 buf_group: { u16 },
1109 ;;
1110 flags: i32 = 0,
1111 }
1112
1113 pub const CODE = sys::IORING_OP_RECV;
1114
1115 pub fn build(self) -> Entry {
1116 let RecvMulti { fd, buf_group, flags } = self;
1117
1118 let mut sqe = sqe_zeroed();
1119 sqe.opcode = Self::CODE;
1120 assign_fd!(sqe.fd = fd);
1121 sqe.__bindgen_anon_3.msg_flags = flags as _;
1122 sqe.__bindgen_anon_4.buf_group = buf_group;
1123 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
1124 sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
1125 Entry(sqe)
1126 }
1127}
1128
1129opcode! {
1130 pub struct OpenAt2 {
1132 dirfd: { impl sealed::UseFd },
1133 pathname: { *const libc::c_char },
1134 how: { *const types::OpenHow }
1135 ;;
1136 file_index: Option<types::DestinationSlot> = None,
1137 }
1138
1139 pub const CODE = sys::IORING_OP_OPENAT2;
1140
1141 pub fn build(self) -> Entry {
1142 let OpenAt2 { dirfd, pathname, how, file_index } = self;
1143
1144 let mut sqe = sqe_zeroed();
1145 sqe.opcode = Self::CODE;
1146 sqe.fd = dirfd;
1147 sqe.__bindgen_anon_2.addr = pathname as _;
1148 sqe.len = mem::size_of::<sys::open_how>() as _;
1149 sqe.__bindgen_anon_1.off = how as _;
1150 if let Some(dest) = file_index {
1151 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1152 }
1153 Entry(sqe)
1154 }
1155}
1156
1157opcode! {
1158 pub struct EpollCtl {
1160 epfd: { impl sealed::UseFixed },
1161 fd: { impl sealed::UseFd },
1162 op: { i32 },
1163 ev: { *const types::epoll_event },
1164 ;;
1165 }
1166
1167 pub const CODE = sys::IORING_OP_EPOLL_CTL;
1168
1169 pub fn build(self) -> Entry {
1170 let EpollCtl { epfd, fd, op, ev } = self;
1171
1172 let mut sqe = sqe_zeroed();
1173 sqe.opcode = Self::CODE;
1174 assign_fd!(sqe.fd = epfd);
1175 sqe.__bindgen_anon_2.addr = ev as _;
1176 sqe.len = op as _;
1177 sqe.__bindgen_anon_1.off = fd as _;
1178 Entry(sqe)
1179 }
1180}
1181
1182opcode! {
1185 pub struct Splice {
1190 fd_in: { impl sealed::UseFixed },
1191 off_in: { i64 },
1192 fd_out: { impl sealed::UseFixed },
1193 off_out: { i64 },
1194 len: { u32 },
1195 ;;
1196 flags: u32 = 0
1198 }
1199
1200 pub const CODE = sys::IORING_OP_SPLICE;
1201
1202 pub fn build(self) -> Entry {
1203 let Splice { fd_in, off_in, fd_out, off_out, len, mut flags } = self;
1204
1205 let mut sqe = sqe_zeroed();
1206 sqe.opcode = Self::CODE;
1207 assign_fd!(sqe.fd = fd_out);
1208 sqe.len = len;
1209 sqe.__bindgen_anon_1.off = off_out as _;
1210
1211 sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1212 sealed::Target::Fd(fd) => fd,
1213 sealed::Target::Fixed(idx) => {
1214 flags |= sys::SPLICE_F_FD_IN_FIXED;
1215 idx as _
1216 }
1217 };
1218
1219 sqe.__bindgen_anon_2.splice_off_in = off_in as _;
1220 sqe.__bindgen_anon_3.splice_flags = flags;
1221 Entry(sqe)
1222 }
1223}
1224
1225opcode! {
1226 pub struct ProvideBuffers {
1230 addr: { *mut u8 },
1231 len: { i32 },
1232 nbufs: { u16 },
1233 bgid: { u16 },
1234 bid: { u16 }
1235 ;;
1236 }
1237
1238 pub const CODE = sys::IORING_OP_PROVIDE_BUFFERS;
1239
1240 pub fn build(self) -> Entry {
1241 let ProvideBuffers { addr, len, nbufs, bgid, bid } = self;
1242
1243 let mut sqe = sqe_zeroed();
1244 sqe.opcode = Self::CODE;
1245 sqe.fd = nbufs as _;
1246 sqe.__bindgen_anon_2.addr = addr as _;
1247 sqe.len = len as _;
1248 sqe.__bindgen_anon_1.off = bid as _;
1249 sqe.__bindgen_anon_4.buf_group = bgid;
1250 Entry(sqe)
1251 }
1252}
1253
1254opcode! {
1255 pub struct RemoveBuffers {
1258 nbufs: { u16 },
1259 bgid: { u16 }
1260 ;;
1261 }
1262
1263 pub const CODE = sys::IORING_OP_REMOVE_BUFFERS;
1264
1265 pub fn build(self) -> Entry {
1266 let RemoveBuffers { nbufs, bgid } = self;
1267
1268 let mut sqe = sqe_zeroed();
1269 sqe.opcode = Self::CODE;
1270 sqe.fd = nbufs as _;
1271 sqe.__bindgen_anon_4.buf_group = bgid;
1272 Entry(sqe)
1273 }
1274}
1275
1276opcode! {
1279 pub struct Tee {
1281 fd_in: { impl sealed::UseFixed },
1282 fd_out: { impl sealed::UseFixed },
1283 len: { u32 }
1284 ;;
1285 flags: u32 = 0
1286 }
1287
1288 pub const CODE = sys::IORING_OP_TEE;
1289
1290 pub fn build(self) -> Entry {
1291 let Tee { fd_in, fd_out, len, mut flags } = self;
1292
1293 let mut sqe = sqe_zeroed();
1294 sqe.opcode = Self::CODE;
1295
1296 assign_fd!(sqe.fd = fd_out);
1297 sqe.len = len;
1298
1299 sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1300 sealed::Target::Fd(fd) => fd,
1301 sealed::Target::Fixed(idx) => {
1302 flags |= sys::SPLICE_F_FD_IN_FIXED;
1303 idx as _
1304 }
1305 };
1306
1307 sqe.__bindgen_anon_3.splice_flags = flags;
1308
1309 Entry(sqe)
1310 }
1311}
1312
1313opcode! {
1316 pub struct Shutdown {
1319 fd: { impl sealed::UseFixed },
1320 how: { i32 },
1321 ;;
1322 }
1323
1324 pub const CODE = sys::IORING_OP_SHUTDOWN;
1325
1326 pub fn build(self) -> Entry {
1327 let Shutdown { fd, how } = self;
1328
1329 let mut sqe = sqe_zeroed();
1330 sqe.opcode = Self::CODE;
1331 assign_fd!(sqe.fd = fd);
1332 sqe.len = how as _;
1333 Entry(sqe)
1334 }
1335}
1336
1337opcode! {
1338 pub struct RenameAt {
1341 olddirfd: { impl sealed::UseFd },
1342 oldpath: { *const libc::c_char },
1343 newdirfd: { impl sealed::UseFd },
1344 newpath: { *const libc::c_char },
1345 ;;
1346 flags: u32 = 0
1347 }
1348
1349 pub const CODE = sys::IORING_OP_RENAMEAT;
1350
1351 pub fn build(self) -> Entry {
1352 let RenameAt {
1353 olddirfd, oldpath,
1354 newdirfd, newpath,
1355 flags
1356 } = self;
1357
1358 let mut sqe = sqe_zeroed();
1359 sqe.opcode = Self::CODE;
1360 sqe.fd = olddirfd;
1361 sqe.__bindgen_anon_2.addr = oldpath as _;
1362 sqe.len = newdirfd as _;
1363 sqe.__bindgen_anon_1.off = newpath as _;
1364 sqe.__bindgen_anon_3.rename_flags = flags;
1365 Entry(sqe)
1366 }
1367}
1368
1369opcode! {
1370 pub struct UnlinkAt {
1373 dirfd: { impl sealed::UseFd },
1374 pathname: { *const libc::c_char },
1375 ;;
1376 flags: i32 = 0
1377 }
1378
1379 pub const CODE = sys::IORING_OP_UNLINKAT;
1380
1381 pub fn build(self) -> Entry {
1382 let UnlinkAt { dirfd, pathname, flags } = self;
1383
1384 let mut sqe = sqe_zeroed();
1385 sqe.opcode = Self::CODE;
1386 sqe.fd = dirfd;
1387 sqe.__bindgen_anon_2.addr = pathname as _;
1388 sqe.__bindgen_anon_3.unlink_flags = flags as _;
1389 Entry(sqe)
1390 }
1391}
1392
1393opcode! {
1396 pub struct MkDirAt {
1398 dirfd: { impl sealed::UseFd },
1399 pathname: { *const libc::c_char },
1400 ;;
1401 mode: libc::mode_t = 0
1402 }
1403
1404 pub const CODE = sys::IORING_OP_MKDIRAT;
1405
1406 pub fn build(self) -> Entry {
1407 let MkDirAt { dirfd, pathname, mode } = self;
1408
1409 let mut sqe = sqe_zeroed();
1410 sqe.opcode = Self::CODE;
1411 sqe.fd = dirfd;
1412 sqe.__bindgen_anon_2.addr = pathname as _;
1413 sqe.len = mode;
1414 Entry(sqe)
1415 }
1416}
1417
1418opcode! {
1419 pub struct SymlinkAt {
1421 newdirfd: { impl sealed::UseFd },
1422 target: { *const libc::c_char },
1423 linkpath: { *const libc::c_char },
1424 ;;
1425 }
1426
1427 pub const CODE = sys::IORING_OP_SYMLINKAT;
1428
1429 pub fn build(self) -> Entry {
1430 let SymlinkAt { newdirfd, target, linkpath } = self;
1431
1432 let mut sqe = sqe_zeroed();
1433 sqe.opcode = Self::CODE;
1434 sqe.fd = newdirfd;
1435 sqe.__bindgen_anon_2.addr = target as _;
1436 sqe.__bindgen_anon_1.addr2 = linkpath as _;
1437 Entry(sqe)
1438 }
1439}
1440
1441opcode! {
1442 pub struct LinkAt {
1444 olddirfd: { impl sealed::UseFd },
1445 oldpath: { *const libc::c_char },
1446 newdirfd: { impl sealed::UseFd },
1447 newpath: { *const libc::c_char },
1448 ;;
1449 flags: i32 = 0
1450 }
1451
1452 pub const CODE = sys::IORING_OP_LINKAT;
1453
1454 pub fn build(self) -> Entry {
1455 let LinkAt { olddirfd, oldpath, newdirfd, newpath, flags } = self;
1456
1457 let mut sqe = sqe_zeroed();
1458 sqe.opcode = Self::CODE;
1459 sqe.fd = olddirfd as _;
1460 sqe.__bindgen_anon_2.addr = oldpath as _;
1461 sqe.len = newdirfd as _;
1462 sqe.__bindgen_anon_1.addr2 = newpath as _;
1463 sqe.__bindgen_anon_3.hardlink_flags = flags as _;
1464 Entry(sqe)
1465 }
1466}
1467
1468opcode! {
1471 pub struct MsgRingData {
1473 ring_fd: { impl sealed::UseFd },
1474 result: { i32 },
1475 user_data: { u64 },
1476 user_flags: { Option<u32> },
1477 ;;
1478 opcode_flags: u32 = 0
1479 }
1480
1481 pub const CODE = sys::IORING_OP_MSG_RING;
1482
1483 pub fn build(self) -> Entry {
1484 let MsgRingData { ring_fd, result, user_data, user_flags, opcode_flags } = self;
1485
1486 let mut sqe = sqe_zeroed();
1487 sqe.opcode = Self::CODE;
1488 sqe.__bindgen_anon_2.addr = sys::IORING_MSG_DATA.into();
1489 sqe.fd = ring_fd;
1490 sqe.len = result as u32;
1491 sqe.__bindgen_anon_1.off = user_data;
1492 sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1493 if let Some(flags) = user_flags {
1494 sqe.__bindgen_anon_5.file_index = flags;
1495 unsafe {sqe.__bindgen_anon_3.msg_ring_flags |= sys::IORING_MSG_RING_FLAGS_PASS};
1496 }
1497 Entry(sqe)
1498 }
1499}
1500
1501opcode! {
1504 pub struct AsyncCancel2 {
1508 builder: { types::CancelBuilder }
1509 ;;
1510 }
1511
1512 pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
1513
1514 pub fn build(self) -> Entry {
1515 let AsyncCancel2 { builder } = self;
1516
1517 let mut sqe = sqe_zeroed();
1518 sqe.opcode = Self::CODE;
1519 sqe.fd = builder.to_fd();
1520 sqe.__bindgen_anon_2.addr = builder.user_data.unwrap_or(0);
1521 sqe.__bindgen_anon_3.cancel_flags = builder.flags.bits();
1522 Entry(sqe)
1523 }
1524}
1525
1526opcode! {
1527 pub struct UringCmd16 {
1529 fd: { impl sealed::UseFixed },
1530 cmd_op: { u32 },
1531 ;;
1532 buf_index: Option<u16> = None,
1535 cmd: [u8; 16] = [0u8; 16]
1537 }
1538
1539 pub const CODE = sys::IORING_OP_URING_CMD;
1540
1541 pub fn build(self) -> Entry {
1542 let UringCmd16 { fd, cmd_op, cmd, buf_index } = self;
1543
1544 let mut sqe = sqe_zeroed();
1545 sqe.opcode = Self::CODE;
1546 assign_fd!(sqe.fd = fd);
1547 sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1548 unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd };
1549 if let Some(buf_index) = buf_index {
1550 sqe.__bindgen_anon_4.buf_index = buf_index;
1551 unsafe {
1552 sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1553 }
1554 }
1555 Entry(sqe)
1556 }
1557}
1558
1559opcode! {
1560 pub struct UringCmd80 {
1562 fd: { impl sealed::UseFixed },
1563 cmd_op: { u32 },
1564 ;;
1565 buf_index: Option<u16> = None,
1568 cmd: [u8; 80] = [0u8; 80]
1570 }
1571
1572 pub const CODE = sys::IORING_OP_URING_CMD;
1573
1574 pub fn build(self) -> Entry128 {
1575 let UringCmd80 { fd, cmd_op, cmd, buf_index } = self;
1576
1577 let cmd1 = cmd[..16].try_into().unwrap();
1578 let cmd2 = cmd[16..].try_into().unwrap();
1579
1580 let mut sqe = sqe_zeroed();
1581 sqe.opcode = Self::CODE;
1582 assign_fd!(sqe.fd = fd);
1583 sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1584 unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd1 };
1585 if let Some(buf_index) = buf_index {
1586 sqe.__bindgen_anon_4.buf_index = buf_index;
1587 unsafe {
1588 sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1589 }
1590 }
1591 Entry128(Entry(sqe), cmd2)
1592 }
1593}
1594
1595opcode! {
1596 pub struct Socket {
1606 domain: { i32 },
1607 socket_type: { i32 },
1608 protocol: { i32 },
1609 ;;
1610 file_index: Option<types::DestinationSlot> = None,
1611 flags: types::RwFlags = 0,
1612 }
1613
1614 pub const CODE = sys::IORING_OP_SOCKET;
1615
1616 pub fn build(self) -> Entry {
1617 let Socket { domain, socket_type, protocol, file_index, flags } = self;
1618
1619 let mut sqe = sqe_zeroed();
1620 sqe.opcode = Self::CODE;
1621 sqe.fd = domain as _;
1622 sqe.__bindgen_anon_1.off = socket_type as _;
1623 sqe.len = protocol as _;
1624 sqe.__bindgen_anon_3.rw_flags = flags;
1625 if let Some(dest) = file_index {
1626 sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1627 }
1628 Entry(sqe)
1629 }
1630}
1631
1632opcode! {
1633 pub struct AcceptMulti {
1639 fd: { impl sealed::UseFixed },
1640 ;;
1641 allocate_file_index: bool = false,
1642 flags: i32 = 0
1643 }
1644
1645 pub const CODE = sys::IORING_OP_ACCEPT;
1646
1647 pub fn build(self) -> Entry {
1648 let AcceptMulti { fd, allocate_file_index, flags } = self;
1649
1650 let mut sqe = sqe_zeroed();
1651 sqe.opcode = Self::CODE;
1652 assign_fd!(sqe.fd = fd);
1653 sqe.ioprio = sys::IORING_ACCEPT_MULTISHOT as u16;
1654 sqe.__bindgen_anon_3.accept_flags = flags as _;
1657 if allocate_file_index {
1658 sqe.__bindgen_anon_5.file_index = sys::IORING_FILE_INDEX_ALLOC as u32;
1659 }
1660 Entry(sqe)
1661 }
1662}
1663
1664opcode! {
1667 pub struct MsgRingSendFd {
1669 ring_fd: { impl sealed::UseFd },
1670 fixed_slot_src: { types::Fixed },
1671 dest_slot_index: { types::DestinationSlot },
1672 user_data: { u64 },
1673 ;;
1674 opcode_flags: u32 = 0
1675 }
1676
1677 pub const CODE = sys::IORING_OP_MSG_RING;
1678
1679 pub fn build(self) -> Entry {
1680 let MsgRingSendFd { ring_fd, fixed_slot_src, dest_slot_index, user_data, opcode_flags } = self;
1681
1682 let mut sqe = sqe_zeroed();
1683 sqe.opcode = Self::CODE;
1684 sqe.__bindgen_anon_2.addr = sys::IORING_MSG_SEND_FD.into();
1685 sqe.fd = ring_fd;
1686 sqe.__bindgen_anon_1.off = user_data;
1687 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = fixed_slot_src.0 as u64 };
1688 sqe.__bindgen_anon_5.file_index = dest_slot_index.kernel_index_arg();
1689 sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1690 Entry(sqe)
1691 }
1692}
1693
1694opcode! {
1697 pub struct SendZc {
1711 fd: { impl sealed::UseFixed },
1712 buf: { *const u8 },
1713 len: { u32 },
1714 ;;
1715 buf_index: Option<u16> = None,
1722 dest_addr: *const libc::sockaddr = core::ptr::null(),
1723 dest_addr_len: libc::socklen_t = 0,
1724 flags: i32 = 0,
1725 zc_flags: u16 = 0,
1726 }
1727
1728 pub const CODE = sys::IORING_OP_SEND_ZC;
1729
1730 pub fn build(self) -> Entry {
1731 let SendZc { fd, buf, len, buf_index, dest_addr, dest_addr_len, flags, zc_flags } = self;
1732
1733 let mut sqe = sqe_zeroed();
1734 sqe.opcode = Self::CODE;
1735 assign_fd!(sqe.fd = fd);
1736 sqe.__bindgen_anon_2.addr = buf as _;
1737 sqe.len = len;
1738 sqe.__bindgen_anon_3.msg_flags = flags as _;
1739 sqe.ioprio = zc_flags;
1740 if let Some(buf_index) = buf_index {
1741 sqe.__bindgen_anon_4.buf_index = buf_index;
1742 sqe.ioprio |= sys::IORING_RECVSEND_FIXED_BUF as u16;
1743 }
1744 sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1745 sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1746 Entry(sqe)
1747 }
1748}
1749
1750opcode! {
1753 #[derive(Debug)]
1758 pub struct SendMsgZc {
1759 fd: { impl sealed::UseFixed },
1760 msg: { *const libc::msghdr },
1761 ;;
1762 ioprio: u16 = 0,
1763 flags: u32 = 0
1764 }
1765
1766 pub const CODE = sys::IORING_OP_SENDMSG_ZC;
1767
1768 pub fn build(self) -> Entry {
1769 let SendMsgZc { fd, msg, ioprio, flags } = self;
1770
1771 let mut sqe = sqe_zeroed();
1772 sqe.opcode = Self::CODE;
1773 assign_fd!(sqe.fd = fd);
1774 sqe.ioprio = ioprio;
1775 sqe.__bindgen_anon_2.addr = msg as _;
1776 sqe.len = 1;
1777 sqe.__bindgen_anon_3.msg_flags = flags;
1778 Entry(sqe)
1779 }
1780}
1781
1782opcode! {
1785 #[derive(Debug)]
1794 pub struct FutexWait {
1795 futex: { *const u32 },
1796 val: { u64 },
1797 mask: { u64 },
1798 futex_flags: { u32 },
1799 ;;
1800 flags: u32 = 0
1801 }
1802
1803 pub const CODE = sys::IORING_OP_FUTEX_WAIT;
1804
1805 pub fn build(self) -> Entry {
1806 let FutexWait { futex, val, mask, futex_flags, flags } = self;
1807
1808 let mut sqe = sqe_zeroed();
1809 sqe.opcode = Self::CODE;
1810 sqe.fd = futex_flags as _;
1811 sqe.__bindgen_anon_2.addr = futex as usize as _;
1812 sqe.__bindgen_anon_1.off = val;
1813 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1814 sqe.__bindgen_anon_3.futex_flags = flags;
1815 Entry(sqe)
1816 }
1817}
1818
1819opcode! {
1820 #[derive(Debug)]
1828 pub struct FutexWake {
1829 futex: { *const u32 },
1830 val: { u64 },
1831 mask: { u64 },
1832 futex_flags: { u32 },
1833 ;;
1834 flags: u32 = 0
1835 }
1836
1837 pub const CODE = sys::IORING_OP_FUTEX_WAKE;
1838
1839 pub fn build(self) -> Entry {
1840 let FutexWake { futex, val, mask, futex_flags, flags } = self;
1841
1842 let mut sqe = sqe_zeroed();
1843 sqe.opcode = Self::CODE;
1844 sqe.fd = futex_flags as _;
1845 sqe.__bindgen_anon_2.addr = futex as usize as _;
1846 sqe.__bindgen_anon_1.off = val;
1847 unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1848 sqe.__bindgen_anon_3.futex_flags = flags;
1849 Entry(sqe)
1850 }
1851}
1852
1853opcode! {
1854 #[derive(Debug)]
1860 pub struct FutexWaitV {
1861 futexv: { *const types::FutexWaitV },
1862 nr_futex: { u32 },
1863 ;;
1864 flags: u32 = 0
1865 }
1866
1867 pub const CODE = sys::IORING_OP_FUTEX_WAITV;
1868
1869 pub fn build(self) -> Entry {
1870 let FutexWaitV { futexv, nr_futex, flags } = self;
1871
1872 let mut sqe = sqe_zeroed();
1873 sqe.opcode = Self::CODE;
1874 sqe.__bindgen_anon_2.addr = futexv as usize as _;
1875 sqe.len = nr_futex;
1876 sqe.__bindgen_anon_3.futex_flags = flags;
1877 Entry(sqe)
1878 }
1879}
1880
1881opcode! {
1884 #[derive(Debug)]
1889 pub struct FixedFdInstall {
1890 fd: { types::Fixed },
1891 file_flags: { u32 },
1892 ;;
1893 }
1894
1895 pub const CODE = sys::IORING_OP_FIXED_FD_INSTALL;
1896
1897 pub fn build(self) -> Entry {
1898 let FixedFdInstall { fd, file_flags } = self;
1899
1900 let mut sqe = sqe_zeroed();
1901 sqe.opcode = Self::CODE;
1902 sqe.fd = fd.0 as _;
1903 sqe.flags = crate::squeue::Flags::FIXED_FILE.bits();
1904 sqe.__bindgen_anon_3.install_fd_flags = file_flags;
1905 Entry(sqe)
1906 }
1907}
1908
1909opcode! {
1912 #[derive(Debug)]
1914 pub struct Ftruncate {
1915 fd: { impl sealed::UseFixed },
1916 len: { u64 },
1917 ;;
1918 }
1919
1920 pub const CODE = sys::IORING_OP_FTRUNCATE;
1921
1922 pub fn build(self) -> Entry {
1923 let Ftruncate { fd, len } = self;
1924
1925 let mut sqe = sqe_zeroed();
1926 sqe.opcode = Self::CODE;
1927 assign_fd!(sqe.fd = fd);
1928 sqe.__bindgen_anon_1.off = len;
1929 Entry(sqe)
1930 }
1931}
1932
1933opcode! {
1936 pub struct SendBundle {
1938 fd: { impl sealed::UseFixed },
1939 buf_group: { u16 },
1940 ;;
1941 flags: i32 = 0,
1942 len: u32 = 0
1943 }
1944
1945 pub const CODE = sys::IORING_OP_SEND;
1946
1947 pub fn build(self) -> Entry {
1948 let SendBundle { fd, len, flags, buf_group } = self;
1949
1950 let mut sqe = sqe_zeroed();
1951 sqe.opcode = Self::CODE;
1952 assign_fd!(sqe.fd = fd);
1953 sqe.len = len;
1954 sqe.__bindgen_anon_3.msg_flags = flags as _;
1955 sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
1956 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
1957 sqe.__bindgen_anon_4.buf_group = buf_group;
1958 Entry(sqe)
1959 }
1960}
1961
1962opcode! {
1963 pub struct RecvBundle {
1973 fd: { impl sealed::UseFixed },
1974 buf_group: { u16 },
1975 ;;
1976 flags: i32 = 0
1977 }
1978
1979 pub const CODE = sys::IORING_OP_RECV;
1980
1981 pub fn build(self) -> Entry {
1982 let RecvBundle { fd, buf_group, flags } = self;
1983
1984 let mut sqe = sqe_zeroed();
1985 sqe.opcode = Self::CODE;
1986 assign_fd!(sqe.fd = fd);
1987 sqe.__bindgen_anon_3.msg_flags = flags as _;
1988 sqe.__bindgen_anon_4.buf_group = buf_group;
1989 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
1990 sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
1991 Entry(sqe)
1992 }
1993}
1994
1995opcode! {
1996 pub struct RecvMultiBundle {
2014 fd: { impl sealed::UseFixed },
2015 buf_group: { u16 },
2016 ;;
2017 flags: i32 = 0
2018 }
2019
2020 pub const CODE = sys::IORING_OP_RECV;
2021
2022 pub fn build(self) -> Entry {
2023 let RecvMultiBundle { fd, buf_group, flags } = self;
2024
2025 let mut sqe = sqe_zeroed();
2026 sqe.opcode = Self::CODE;
2027 assign_fd!(sqe.fd = fd);
2028 sqe.__bindgen_anon_3.msg_flags = flags as _;
2029 sqe.__bindgen_anon_4.buf_group = buf_group;
2030 sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
2031 sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
2032 sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
2033 Entry(sqe)
2034 }
2035}