io_uring/
opcode.rs

1//! Operation codes that can be used to construct [`squeue::Entry`](crate::squeue::Entry)s.
2
3#![allow(clippy::new_without_default)]
4
5use std::convert::TryInto;
6use std::mem;
7use std::os::unix::io::RawFd;
8
9use crate::squeue::Entry;
10use crate::squeue::Entry128;
11use crate::sys;
12use crate::types::{self, sealed};
13
14macro_rules! assign_fd {
15    ( $sqe:ident . fd = $opfd:expr ) => {
16        match $opfd {
17            sealed::Target::Fd(fd) => $sqe.fd = fd,
18            sealed::Target::Fixed(idx) => {
19                $sqe.fd = idx as _;
20                $sqe.flags |= crate::squeue::Flags::FIXED_FILE.bits();
21            }
22        }
23    };
24}
25
26macro_rules! opcode {
27    (@type impl sealed::UseFixed ) => {
28        sealed::Target
29    };
30    (@type impl sealed::UseFd ) => {
31        RawFd
32    };
33    (@type $name:ty ) => {
34        $name
35    };
36    (
37        $( #[$outer:meta] )*
38        pub struct $name:ident {
39            $( #[$new_meta:meta] )*
40
41            $( $field:ident : { $( $tnt:tt )+ } ),*
42
43            $(,)?
44
45            ;;
46
47            $(
48                $( #[$opt_meta:meta] )*
49                $opt_field:ident : $opt_tname:ty = $default:expr
50            ),*
51
52            $(,)?
53        }
54
55        pub const CODE = $opcode:expr;
56
57        $( #[$build_meta:meta] )*
58        pub fn build($self:ident) -> $entry:ty $build_block:block
59    ) => {
60        $( #[$outer] )*
61        pub struct $name {
62            $( $field : opcode!(@type $( $tnt )*), )*
63            $( $opt_field : $opt_tname, )*
64        }
65
66        impl $name {
67            $( #[$new_meta] )*
68            #[inline]
69            pub fn new($( $field : $( $tnt )* ),*) -> Self {
70                $name {
71                    $( $field: $field.into(), )*
72                    $( $opt_field: $default, )*
73                }
74            }
75
76            /// The opcode of the operation. This can be passed to
77            /// [`Probe::is_supported`](crate::Probe::is_supported) to check if this operation is
78            /// supported with the current kernel.
79            pub const CODE: u8 = $opcode as _;
80
81            $(
82                $( #[$opt_meta] )*
83                #[inline]
84                pub const fn $opt_field(mut self, $opt_field: $opt_tname) -> Self {
85                    self.$opt_field = $opt_field;
86                    self
87                }
88            )*
89
90            $( #[$build_meta] )*
91            #[inline]
92            pub fn build($self) -> $entry $build_block
93        }
94    }
95}
96
97/// inline zeroed to improve codegen
98#[inline(always)]
99fn sqe_zeroed() -> sys::io_uring_sqe {
100    unsafe { mem::zeroed() }
101}
102
103opcode! {
104    /// Do not perform any I/O.
105    ///
106    /// This is useful for testing the performance of the io_uring implementation itself.
107    #[derive(Debug)]
108    pub struct Nop { ;; }
109
110    pub const CODE = sys::IORING_OP_NOP;
111
112    pub fn build(self) -> Entry {
113        let Nop {} = self;
114
115        let mut sqe = sqe_zeroed();
116        sqe.opcode = Self::CODE;
117        sqe.fd = -1;
118        Entry(sqe)
119    }
120}
121
122opcode! {
123    /// Vectored read, equivalent to `preadv2(2)`.
124    #[derive(Debug)]
125    pub struct Readv {
126        fd: { impl sealed::UseFixed },
127        iovec: { *const libc::iovec },
128        len: { u32 },
129        ;;
130        ioprio: u16 = 0,
131        offset: u64 = 0,
132        /// specified for read operations, contains a bitwise OR of per-I/O flags,
133        /// as described in the `preadv2(2)` man page.
134        rw_flags: types::RwFlags = 0,
135        buf_group: u16 = 0
136    }
137
138    pub const CODE = sys::IORING_OP_READV;
139
140    pub fn build(self) -> Entry {
141        let Readv {
142            fd,
143            iovec, len, offset,
144            ioprio, rw_flags,
145            buf_group
146        } = self;
147
148        let mut sqe = sqe_zeroed();
149        sqe.opcode = Self::CODE;
150        assign_fd!(sqe.fd = fd);
151        sqe.ioprio = ioprio;
152        sqe.__bindgen_anon_2.addr = iovec as _;
153        sqe.len = len;
154        sqe.__bindgen_anon_1.off = offset;
155        sqe.__bindgen_anon_3.rw_flags = rw_flags;
156        sqe.__bindgen_anon_4.buf_group = buf_group;
157        Entry(sqe)
158    }
159}
160
161opcode! {
162    /// Vectored write, equivalent to `pwritev2(2)`.
163    #[derive(Debug)]
164    pub struct Writev {
165        fd: { impl sealed::UseFixed },
166        iovec: { *const libc::iovec },
167        len: { u32 },
168        ;;
169        ioprio: u16 = 0,
170        offset: u64 = 0,
171        /// specified for write operations, contains a bitwise OR of per-I/O flags,
172        /// as described in the `preadv2(2)` man page.
173        rw_flags: types::RwFlags = 0
174    }
175
176    pub const CODE = sys::IORING_OP_WRITEV;
177
178    pub fn build(self) -> Entry {
179        let Writev {
180            fd,
181            iovec, len, offset,
182            ioprio, rw_flags
183        } = self;
184
185        let mut sqe = sqe_zeroed();
186        sqe.opcode = Self::CODE;
187        assign_fd!(sqe.fd = fd);
188        sqe.ioprio = ioprio;
189        sqe.__bindgen_anon_2.addr = iovec as _;
190        sqe.len = len;
191        sqe.__bindgen_anon_1.off = offset;
192        sqe.__bindgen_anon_3.rw_flags = rw_flags;
193        Entry(sqe)
194    }
195}
196
197opcode! {
198    /// File sync, equivalent to `fsync(2)`.
199    ///
200    /// Note that, while I/O is initiated in the order in which it appears in the submission queue,
201    /// completions are unordered. For example, an application which places a write I/O followed by
202    /// an fsync in the submission queue cannot expect the fsync to apply to the write. The two
203    /// operations execute in parallel, so the fsync may complete before the write is issued to the
204    /// storage. The same is also true for previously issued writes that have not completed prior to
205    /// the fsync.
206    #[derive(Debug)]
207    pub struct Fsync {
208        fd: { impl sealed::UseFixed },
209        ;;
210        /// The `flags` bit mask may contain either 0, for a normal file integrity sync,
211        /// or [types::FsyncFlags::DATASYNC] to provide data sync only semantics.
212        /// See the descriptions of `O_SYNC` and `O_DSYNC` in the `open(2)` manual page for more information.
213        flags: types::FsyncFlags = types::FsyncFlags::empty()
214    }
215
216    pub const CODE = sys::IORING_OP_FSYNC;
217
218    pub fn build(self) -> Entry {
219        let Fsync { fd, flags } = self;
220
221        let mut sqe = sqe_zeroed();
222        sqe.opcode = Self::CODE;
223        assign_fd!(sqe.fd = fd);
224        sqe.__bindgen_anon_3.fsync_flags = flags.bits();
225        Entry(sqe)
226    }
227}
228
229opcode! {
230    /// Read from a file into a fixed buffer that has been previously registered with
231    /// [`Submitter::register_buffers`](crate::Submitter::register_buffers).
232    ///
233    /// The return values match those documented in the `preadv2(2)` man pages.
234    #[derive(Debug)]
235    pub struct ReadFixed {
236        fd: { impl sealed::UseFixed },
237        buf: { *mut u8 },
238        len: { u32 },
239        buf_index: { u16 },
240        ;;
241        ioprio: u16 = 0,
242        /// The offset of the file to read from.
243        offset: u64 = 0,
244        /// Specified for read operations, contains a bitwise OR of per-I/O flags, as described in
245        /// the `preadv2(2)` man page.
246        rw_flags: types::RwFlags = 0
247    }
248
249    pub const CODE = sys::IORING_OP_READ_FIXED;
250
251    pub fn build(self) -> Entry {
252        let ReadFixed {
253            fd,
254            buf, len, offset,
255            buf_index,
256            ioprio, rw_flags
257        } = self;
258
259        let mut sqe = sqe_zeroed();
260        sqe.opcode = Self::CODE;
261        assign_fd!(sqe.fd = fd);
262        sqe.ioprio = ioprio;
263        sqe.__bindgen_anon_2.addr = buf as _;
264        sqe.len = len;
265        sqe.__bindgen_anon_1.off = offset;
266        sqe.__bindgen_anon_3.rw_flags = rw_flags;
267        sqe.__bindgen_anon_4.buf_index = buf_index;
268        Entry(sqe)
269    }
270}
271
272opcode! {
273    /// Write to a file from a fixed buffer that have been previously registered with
274    /// [`Submitter::register_buffers`](crate::Submitter::register_buffers).
275    ///
276    /// The return values match those documented in the `pwritev2(2)` man pages.
277    #[derive(Debug)]
278    pub struct WriteFixed {
279        fd: { impl sealed::UseFixed },
280        buf: { *const u8 },
281        len: { u32 },
282        buf_index: { u16 },
283        ;;
284        ioprio: u16 = 0,
285        /// The offset of the file to write to.
286        offset: u64 = 0,
287        /// Specified for write operations, contains a bitwise OR of per-I/O flags, as described in
288        /// the `pwritev2(2)` man page.
289        rw_flags: types::RwFlags = 0
290    }
291
292    pub const CODE = sys::IORING_OP_WRITE_FIXED;
293
294    pub fn build(self) -> Entry {
295        let WriteFixed {
296            fd,
297            buf, len, offset,
298            buf_index,
299            ioprio, rw_flags
300        } = self;
301
302        let mut sqe = sqe_zeroed();
303        sqe.opcode = Self::CODE;
304        assign_fd!(sqe.fd = fd);
305        sqe.ioprio = ioprio;
306        sqe.__bindgen_anon_2.addr = buf as _;
307        sqe.len = len;
308        sqe.__bindgen_anon_1.off = offset;
309        sqe.__bindgen_anon_3.rw_flags = rw_flags;
310        sqe.__bindgen_anon_4.buf_index = buf_index;
311        Entry(sqe)
312    }
313}
314
315opcode! {
316    /// Poll the specified fd.
317    ///
318    /// Unlike poll or epoll without `EPOLLONESHOT`, this interface defaults to work in one shot mode.
319    /// That is, once the poll operation is completed, it will have to be resubmitted.
320    ///
321    /// If multi is set, the poll will work in multi shot mode instead. That means it will
322    /// repeatedly trigger when the requested event becomes true, and hence multiple CQEs can be
323    /// generated from this single submission. The CQE flags field will have IORING_CQE_F_MORE set
324    /// on completion if the application should expect further CQE entries from the original
325    /// request. If this flag isn't set on completion, then the poll request has been terminated
326    /// and no further events will be generated. This mode is available since 5.13.
327    #[derive(Debug)]
328    pub struct PollAdd {
329        /// The bits that may be set in `flags` are defined in `<poll.h>`,
330        /// and documented in `poll(2)`.
331        fd: { impl sealed::UseFixed },
332        flags: { u32 },
333        ;;
334        multi: bool = false
335    }
336
337    pub const CODE = sys::IORING_OP_POLL_ADD;
338
339    pub fn build(self) -> Entry {
340        let PollAdd { fd, flags, multi } = self;
341
342        let mut sqe = sqe_zeroed();
343        sqe.opcode = Self::CODE;
344        assign_fd!(sqe.fd = fd);
345        if multi {
346            sqe.len = sys::IORING_POLL_ADD_MULTI;
347        }
348
349        #[cfg(target_endian = "little")] {
350            sqe.__bindgen_anon_3.poll32_events = flags;
351        }
352
353        #[cfg(target_endian = "big")] {
354            let x = flags << 16;
355            let y = flags >> 16;
356            let flags = x | y;
357            sqe.__bindgen_anon_3.poll32_events = flags;
358        }
359
360        Entry(sqe)
361    }
362}
363
364opcode! {
365    /// Remove an existing [poll](PollAdd) request.
366    ///
367    /// If found, the `result` method of the `cqueue::Entry` will return 0.
368    /// If not found, `result` will return `-libc::ENOENT`.
369    #[derive(Debug)]
370    pub struct PollRemove {
371        user_data: { u64 }
372        ;;
373    }
374
375    pub const CODE = sys::IORING_OP_POLL_REMOVE;
376
377    pub fn build(self) -> Entry {
378        let PollRemove { user_data } = self;
379
380        let mut sqe = sqe_zeroed();
381        sqe.opcode = Self::CODE;
382        sqe.fd = -1;
383        sqe.__bindgen_anon_2.addr = user_data;
384        Entry(sqe)
385    }
386}
387
388opcode! {
389    /// Sync a file segment with disk, equivalent to `sync_file_range(2)`.
390    #[derive(Debug)]
391    pub struct SyncFileRange {
392        fd: { impl sealed::UseFixed },
393        len: { u32 },
394        ;;
395        /// the offset method holds the offset in bytes
396        offset: u64 = 0,
397        /// the flags method holds the flags for the command
398        flags: u32 = 0
399    }
400
401    pub const CODE = sys::IORING_OP_SYNC_FILE_RANGE;
402
403    pub fn build(self) -> Entry {
404        let SyncFileRange {
405            fd,
406            len, offset,
407            flags
408        } = self;
409
410        let mut sqe = sqe_zeroed();
411        sqe.opcode = Self::CODE;
412        assign_fd!(sqe.fd = fd);
413        sqe.len = len;
414        sqe.__bindgen_anon_1.off = offset;
415        sqe.__bindgen_anon_3.sync_range_flags = flags;
416        Entry(sqe)
417    }
418}
419
420opcode! {
421    /// Send a message on a socket, equivalent to `send(2)`.
422    ///
423    /// fd must be set to the socket file descriptor, addr must contains a pointer to the msghdr
424    /// structure, and flags holds the flags associated with the system call.
425    #[derive(Debug)]
426    pub struct SendMsg {
427        fd: { impl sealed::UseFixed },
428        msg: { *const libc::msghdr },
429        ;;
430        ioprio: u16 = 0,
431        flags: u32 = 0
432    }
433
434    pub const CODE = sys::IORING_OP_SENDMSG;
435
436    pub fn build(self) -> Entry {
437        let SendMsg { fd, msg, ioprio, flags } = self;
438
439        let mut sqe = sqe_zeroed();
440        sqe.opcode = Self::CODE;
441        assign_fd!(sqe.fd = fd);
442        sqe.ioprio = ioprio;
443        sqe.__bindgen_anon_2.addr = msg as _;
444        sqe.len = 1;
445        sqe.__bindgen_anon_3.msg_flags = flags;
446        Entry(sqe)
447    }
448}
449
450opcode! {
451    /// Receive a message on a socket, equivalent to `recvmsg(2)`.
452    ///
453    /// See also the description of [`SendMsg`].
454    #[derive(Debug)]
455    pub struct RecvMsg {
456        fd: { impl sealed::UseFixed },
457        msg: { *mut libc::msghdr },
458        ;;
459        ioprio: u16 = 0,
460        flags: u32 = 0,
461        buf_group: u16 = 0
462    }
463
464    pub const CODE = sys::IORING_OP_RECVMSG;
465
466    pub fn build(self) -> Entry {
467        let RecvMsg { fd, msg, ioprio, flags, buf_group } = self;
468
469        let mut sqe = sqe_zeroed();
470        sqe.opcode = Self::CODE;
471        assign_fd!(sqe.fd = fd);
472        sqe.ioprio = ioprio;
473        sqe.__bindgen_anon_2.addr = msg as _;
474        sqe.len = 1;
475        sqe.__bindgen_anon_3.msg_flags = flags;
476        sqe.__bindgen_anon_4.buf_group = buf_group;
477        Entry(sqe)
478    }
479}
480
481opcode! {
482    /// Receive multiple messages on a socket, equivalent to `recvmsg(2)`.
483    ///
484    /// Parameters:
485    ///     msg:       For this multishot variant of ResvMsg, only the msg_namelen and msg_controllen
486    ///                fields are relevant.
487    ///     buf_group: The id of the provided buffer pool to use for each received message.
488    ///
489    /// See also the description of [`SendMsg`] and [`types::RecvMsgOut`].
490    ///
491    /// The multishot version allows the application to issue a single receive request, which
492    /// repeatedly posts a CQE when data is available. It requires the MSG_WAITALL flag is not set.
493    /// Each CQE will take a buffer out of a provided buffer pool for receiving. The application
494    /// should check the flags of each CQE, regardless of its result. If a posted CQE does not have
495    /// the IORING_CQE_F_MORE flag set then the multishot receive will be done and the application
496    /// should issue a new request.
497    ///
498    /// Unlike [`RecvMsg`], this multishot recvmsg will prepend a struct which describes the layout
499    /// of the rest of the buffer in combination with the initial msghdr structure submitted with
500    /// the request. Use [`types::RecvMsgOut`] to parse the data received and access its
501    /// components.
502    ///
503    /// The recvmsg multishot variant is available since kernel 6.0.
504    #[derive(Debug)]
505    pub struct RecvMsgMulti {
506        fd: { impl sealed::UseFixed },
507        msg: { *const libc::msghdr },
508        buf_group: { u16 },
509        ;;
510        ioprio: u16 = 0,
511        flags: u32 = 0
512    }
513
514    pub const CODE = sys::IORING_OP_RECVMSG;
515
516    pub fn build(self) -> Entry {
517        let RecvMsgMulti { fd, msg, buf_group, ioprio, flags } = self;
518
519        let mut sqe = sqe_zeroed();
520        sqe.opcode = Self::CODE;
521        assign_fd!(sqe.fd = fd);
522        sqe.__bindgen_anon_2.addr = msg as _;
523        sqe.len = 1;
524        sqe.__bindgen_anon_3.msg_flags = flags;
525        sqe.__bindgen_anon_4.buf_group = buf_group;
526        sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
527        sqe.ioprio = ioprio | (sys::IORING_RECV_MULTISHOT as u16);
528        Entry(sqe)
529    }
530}
531
532opcode! {
533    /// Register a timeout operation.
534    ///
535    /// A timeout will trigger a wakeup event on the completion ring for anyone waiting for events.
536    /// A timeout condition is met when either the specified timeout expires, or the specified number of events have completed.
537    /// Either condition will trigger the event.
538    /// The request will complete with `-ETIME` if the timeout got completed through expiration of the timer,
539    /// or 0 if the timeout got completed through requests completing on their own.
540    /// If the timeout was cancelled before it expired, the request will complete with `-ECANCELED`.
541    #[derive(Debug)]
542    pub struct Timeout {
543        timespec: { *const types::Timespec },
544        ;;
545        /// `count` may contain a completion event count.
546        count: u32 = 0,
547
548        /// `flags` may contain [types::TimeoutFlags::ABS] for an absolute timeout value, or 0 for a relative timeout.
549        flags: types::TimeoutFlags = types::TimeoutFlags::empty()
550    }
551
552    pub const CODE = sys::IORING_OP_TIMEOUT;
553
554    pub fn build(self) -> Entry {
555        let Timeout { timespec, count, flags } = self;
556
557        let mut sqe = sqe_zeroed();
558        sqe.opcode = Self::CODE;
559        sqe.fd = -1;
560        sqe.__bindgen_anon_2.addr = timespec as _;
561        sqe.len = 1;
562        sqe.__bindgen_anon_1.off = count as _;
563        sqe.__bindgen_anon_3.timeout_flags = flags.bits();
564        Entry(sqe)
565    }
566}
567
568// === 5.5 ===
569
570opcode! {
571    /// Attempt to remove an existing [timeout operation](Timeout).
572    pub struct TimeoutRemove {
573        user_data: { u64 },
574        ;;
575    }
576
577    pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
578
579    pub fn build(self) -> Entry {
580        let TimeoutRemove { user_data } = self;
581
582        let mut sqe = sqe_zeroed();
583        sqe.opcode = Self::CODE;
584        sqe.fd = -1;
585        sqe.__bindgen_anon_2.addr = user_data;
586        Entry(sqe)
587    }
588}
589
590opcode! {
591    /// Attempt to update an existing [timeout operation](Timeout) with a new timespec.
592    /// The optional `count` value of the original timeout value cannot be updated.
593    pub struct TimeoutUpdate {
594        user_data: { u64 },
595        timespec: { *const types::Timespec },
596        ;;
597        flags: types::TimeoutFlags = types::TimeoutFlags::empty()
598    }
599
600    pub const CODE = sys::IORING_OP_TIMEOUT_REMOVE;
601
602    pub fn build(self) -> Entry {
603        let TimeoutUpdate { user_data, timespec, flags } = self;
604
605        let mut sqe = sqe_zeroed();
606        sqe.opcode = Self::CODE;
607        sqe.fd = -1;
608        sqe.__bindgen_anon_1.off = timespec as _;
609        sqe.__bindgen_anon_2.addr = user_data;
610        sqe.__bindgen_anon_3.timeout_flags = flags.bits() | sys::IORING_TIMEOUT_UPDATE;
611        Entry(sqe)
612    }
613}
614
615opcode! {
616    /// Accept a new connection on a socket, equivalent to `accept4(2)`.
617    pub struct Accept {
618        fd: { impl sealed::UseFixed },
619        addr: { *mut libc::sockaddr },
620        addrlen: { *mut libc::socklen_t },
621        ;;
622        file_index: Option<types::DestinationSlot> = None,
623        flags: i32 = 0
624    }
625
626    pub const CODE = sys::IORING_OP_ACCEPT;
627
628    pub fn build(self) -> Entry {
629        let Accept { fd, addr, addrlen, file_index, flags } = self;
630
631        let mut sqe = sqe_zeroed();
632        sqe.opcode = Self::CODE;
633        assign_fd!(sqe.fd = fd);
634        sqe.__bindgen_anon_2.addr = addr as _;
635        sqe.__bindgen_anon_1.addr2 = addrlen as _;
636        sqe.__bindgen_anon_3.accept_flags = flags as _;
637        if let Some(dest) = file_index {
638            sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
639        }
640        Entry(sqe)
641    }
642}
643
644opcode! {
645    /// Set a socket option.
646    pub struct SetSockOpt {
647        fd: { impl sealed::UseFixed },
648        level: { u32 },
649        optname: { u32 },
650        optval: { *const libc::c_void },
651        optlen: { u32 },
652        ;;
653        flags: u32 = 0
654    }
655
656    pub const CODE = sys::IORING_OP_URING_CMD;
657
658    pub fn build(self) -> Entry {
659        let SetSockOpt { fd, level, optname, optval, optlen, flags } = self;
660        let mut sqe = sqe_zeroed();
661        sqe.opcode = Self::CODE;
662        assign_fd!(sqe.fd = fd);
663        sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = sys::SOCKET_URING_OP_SETSOCKOPT;
664
665        sqe.__bindgen_anon_2.__bindgen_anon_1.level = level;
666        sqe.__bindgen_anon_2.__bindgen_anon_1.optname = optname;
667        sqe.__bindgen_anon_3.uring_cmd_flags = flags;
668        sqe.__bindgen_anon_5.optlen = optlen;
669        unsafe { *sqe.__bindgen_anon_6.optval.as_mut() = optval as u64 };
670        Entry(sqe)
671    }
672}
673
674opcode! {
675    /// Attempt to cancel an already issued request.
676    pub struct AsyncCancel {
677        user_data: { u64 }
678        ;;
679
680        // TODO flags
681    }
682
683    pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
684
685    pub fn build(self) -> Entry {
686        let AsyncCancel { user_data } = self;
687
688        let mut sqe = sqe_zeroed();
689        sqe.opcode = Self::CODE;
690        sqe.fd = -1;
691        sqe.__bindgen_anon_2.addr = user_data;
692        Entry(sqe)
693    }
694}
695
696opcode! {
697    /// This request must be linked with another request through
698    /// [`Flags::IO_LINK`](crate::squeue::Flags::IO_LINK) which is described below.
699    /// Unlike [`Timeout`], [`LinkTimeout`] acts on the linked request, not the completion queue.
700    pub struct LinkTimeout {
701        timespec: { *const types::Timespec },
702        ;;
703        flags: types::TimeoutFlags = types::TimeoutFlags::empty()
704    }
705
706    pub const CODE = sys::IORING_OP_LINK_TIMEOUT;
707
708    pub fn build(self) -> Entry {
709        let LinkTimeout { timespec, flags } = self;
710
711        let mut sqe = sqe_zeroed();
712        sqe.opcode = Self::CODE;
713        sqe.fd = -1;
714        sqe.__bindgen_anon_2.addr = timespec as _;
715        sqe.len = 1;
716        sqe.__bindgen_anon_3.timeout_flags = flags.bits();
717        Entry(sqe)
718    }
719}
720
721opcode! {
722    /// Connect a socket, equivalent to `connect(2)`.
723    pub struct Connect {
724        fd: { impl sealed::UseFixed },
725        addr: { *const libc::sockaddr },
726        addrlen: { libc::socklen_t }
727        ;;
728    }
729
730    pub const CODE = sys::IORING_OP_CONNECT;
731
732    pub fn build(self) -> Entry {
733        let Connect { fd, addr, addrlen } = self;
734
735        let mut sqe = sqe_zeroed();
736        sqe.opcode = Self::CODE;
737        assign_fd!(sqe.fd = fd);
738        sqe.__bindgen_anon_2.addr = addr as _;
739        sqe.__bindgen_anon_1.off = addrlen as _;
740        Entry(sqe)
741    }
742}
743
744// === 5.6 ===
745
746opcode! {
747    /// Preallocate or deallocate space to a file, equivalent to `fallocate(2)`.
748    pub struct Fallocate {
749        fd: { impl sealed::UseFixed },
750        len: { u64 },
751        ;;
752        offset: u64 = 0,
753        mode: i32 = 0
754    }
755
756    pub const CODE = sys::IORING_OP_FALLOCATE;
757
758    pub fn build(self) -> Entry {
759        let Fallocate { fd, len, offset, mode } = self;
760
761        let mut sqe = sqe_zeroed();
762        sqe.opcode = Self::CODE;
763        assign_fd!(sqe.fd = fd);
764        sqe.__bindgen_anon_2.addr = len;
765        sqe.len = mode as _;
766        sqe.__bindgen_anon_1.off = offset;
767        Entry(sqe)
768    }
769}
770
771opcode! {
772    /// Open a file, equivalent to `openat(2)`.
773    pub struct OpenAt {
774        dirfd: { impl sealed::UseFd },
775        pathname: { *const libc::c_char },
776        ;;
777        file_index: Option<types::DestinationSlot> = None,
778        flags: i32 = 0,
779        mode: libc::mode_t = 0
780    }
781
782    pub const CODE = sys::IORING_OP_OPENAT;
783
784    pub fn build(self) -> Entry {
785        let OpenAt { dirfd, pathname, file_index, flags, mode } = self;
786
787        let mut sqe = sqe_zeroed();
788        sqe.opcode = Self::CODE;
789        sqe.fd = dirfd;
790        sqe.__bindgen_anon_2.addr = pathname as _;
791        sqe.len = mode;
792        sqe.__bindgen_anon_3.open_flags = flags as _;
793        if let Some(dest) = file_index {
794            sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
795        }
796        Entry(sqe)
797    }
798}
799
800opcode! {
801    /// Close a file descriptor, equivalent to `close(2)`.
802    ///
803    /// Use a types::Fixed(fd) argument to close an io_uring direct descriptor.
804    pub struct Close {
805        fd: { impl sealed::UseFixed },
806        ;;
807    }
808
809    pub const CODE = sys::IORING_OP_CLOSE;
810
811    pub fn build(self) -> Entry {
812        let Close { fd } = self;
813
814        let mut sqe = sqe_zeroed();
815        sqe.opcode = Self::CODE;
816        match fd {
817            sealed::Target::Fd(fd) => sqe.fd = fd,
818            sealed::Target::Fixed(idx) => {
819                sqe.fd = 0;
820                sqe.__bindgen_anon_5.file_index = idx + 1;
821            }
822        }
823        Entry(sqe)
824    }
825}
826
827opcode! {
828    /// This command is an alternative to using
829    /// [`Submitter::register_files_update`](crate::Submitter::register_files_update) which then
830    /// works in an async fashion, like the rest of the io_uring commands.
831    pub struct FilesUpdate {
832        fds: { *const RawFd },
833        len: { u32 },
834        ;;
835        offset: i32 = 0
836    }
837
838    pub const CODE = sys::IORING_OP_FILES_UPDATE;
839
840    pub fn build(self) -> Entry {
841        let FilesUpdate { fds, len, offset } = self;
842
843        let mut sqe = sqe_zeroed();
844        sqe.opcode = Self::CODE;
845        sqe.fd = -1;
846        sqe.__bindgen_anon_2.addr = fds as _;
847        sqe.len = len;
848        sqe.__bindgen_anon_1.off = offset as _;
849        Entry(sqe)
850    }
851}
852
853opcode! {
854    /// Get file status, equivalent to `statx(2)`.
855    pub struct Statx {
856        dirfd: { impl sealed::UseFd },
857        pathname: { *const libc::c_char },
858        statxbuf: { *mut types::statx },
859        ;;
860        flags: i32 = 0,
861        mask: u32 = 0
862    }
863
864    pub const CODE = sys::IORING_OP_STATX;
865
866    pub fn build(self) -> Entry {
867        let Statx {
868            dirfd, pathname, statxbuf,
869            flags, mask
870        } = self;
871
872        let mut sqe = sqe_zeroed();
873        sqe.opcode = Self::CODE;
874        sqe.fd = dirfd;
875        sqe.__bindgen_anon_2.addr = pathname as _;
876        sqe.len = mask;
877        sqe.__bindgen_anon_1.off = statxbuf as _;
878        sqe.__bindgen_anon_3.statx_flags = flags as _;
879        Entry(sqe)
880    }
881}
882
883opcode! {
884    /// Issue the equivalent of a `pread(2)` or `pwrite(2)` system call
885    ///
886    /// * `fd` is the file descriptor to be operated on,
887    /// * `addr` contains the buffer in question,
888    /// * `len` contains the length of the IO operation,
889    ///
890    /// These are non-vectored versions of the `IORING_OP_READV` and `IORING_OP_WRITEV` opcodes.
891    /// See also `read(2)` and `write(2)` for the general description of the related system call.
892    ///
893    /// Available since 5.6.
894    pub struct Read {
895        fd: { impl sealed::UseFixed },
896        buf: { *mut u8 },
897        len: { u32 },
898        ;;
899        /// `offset` contains the read or write offset.
900        ///
901        /// If `fd` does not refer to a seekable file, `offset` must be set to zero.
902        /// If `offset` is set to `-1`, the offset will use (and advance) the file position,
903        /// like the `read(2)` and `write(2)` system calls.
904        offset: u64 = 0,
905        ioprio: u16 = 0,
906        rw_flags: types::RwFlags = 0,
907        buf_group: u16 = 0
908    }
909
910    pub const CODE = sys::IORING_OP_READ;
911
912    pub fn build(self) -> Entry {
913        let Read {
914            fd,
915            buf, len, offset,
916            ioprio, rw_flags,
917            buf_group
918        } = self;
919
920        let mut sqe = sqe_zeroed();
921        sqe.opcode = Self::CODE;
922        assign_fd!(sqe.fd = fd);
923        sqe.ioprio = ioprio;
924        sqe.__bindgen_anon_2.addr = buf as _;
925        sqe.len = len;
926        sqe.__bindgen_anon_1.off = offset;
927        sqe.__bindgen_anon_3.rw_flags = rw_flags;
928        sqe.__bindgen_anon_4.buf_group = buf_group;
929        Entry(sqe)
930    }
931}
932
933opcode! {
934    /// Issue the equivalent of a `pread(2)` or `pwrite(2)` system call
935    ///
936    /// * `fd` is the file descriptor to be operated on,
937    /// * `addr` contains the buffer in question,
938    /// * `len` contains the length of the IO operation,
939    ///
940    /// These are non-vectored versions of the `IORING_OP_READV` and `IORING_OP_WRITEV` opcodes.
941    /// See also `read(2)` and `write(2)` for the general description of the related system call.
942    ///
943    /// Available since 5.6.
944    pub struct Write {
945        fd: { impl sealed::UseFixed },
946        buf: { *const u8 },
947        len: { u32 },
948        ;;
949        /// `offset` contains the read or write offset.
950        ///
951        /// If `fd` does not refer to a seekable file, `offset` must be set to zero.
952        /// If `offsett` is set to `-1`, the offset will use (and advance) the file position,
953        /// like the `read(2)` and `write(2)` system calls.
954        offset: u64 = 0,
955        ioprio: u16 = 0,
956        rw_flags: types::RwFlags = 0
957    }
958
959    pub const CODE = sys::IORING_OP_WRITE;
960
961    pub fn build(self) -> Entry {
962        let Write {
963            fd,
964            buf, len, offset,
965            ioprio, rw_flags
966        } = self;
967
968        let mut sqe = sqe_zeroed();
969        sqe.opcode = Self::CODE;
970        assign_fd!(sqe.fd = fd);
971        sqe.ioprio = ioprio;
972        sqe.__bindgen_anon_2.addr = buf as _;
973        sqe.len = len;
974        sqe.__bindgen_anon_1.off = offset;
975        sqe.__bindgen_anon_3.rw_flags = rw_flags;
976        Entry(sqe)
977    }
978}
979
980opcode! {
981    /// Predeclare an access pattern for file data, equivalent to `posix_fadvise(2)`.
982    pub struct Fadvise {
983        fd: { impl sealed::UseFixed },
984        len: { libc::off_t },
985        advice: { i32 },
986        ;;
987        offset: u64 = 0,
988    }
989
990    pub const CODE = sys::IORING_OP_FADVISE;
991
992    pub fn build(self) -> Entry {
993        let Fadvise { fd, len, advice, offset } = self;
994
995        let mut sqe = sqe_zeroed();
996        sqe.opcode = Self::CODE;
997        assign_fd!(sqe.fd = fd);
998        sqe.len = len as _;
999        sqe.__bindgen_anon_1.off = offset;
1000        sqe.__bindgen_anon_3.fadvise_advice = advice as _;
1001        Entry(sqe)
1002    }
1003}
1004
1005opcode! {
1006    /// Give advice about use of memory, equivalent to `madvise(2)`.
1007    pub struct Madvise {
1008        addr: { *const libc::c_void },
1009        len: { libc::off_t },
1010        advice: { i32 },
1011        ;;
1012    }
1013
1014    pub const CODE = sys::IORING_OP_MADVISE;
1015
1016    pub fn build(self) -> Entry {
1017        let Madvise { addr, len, advice } = self;
1018
1019        let mut sqe = sqe_zeroed();
1020        sqe.opcode = Self::CODE;
1021        sqe.fd = -1;
1022        sqe.__bindgen_anon_2.addr = addr as _;
1023        sqe.len = len as _;
1024        sqe.__bindgen_anon_3.fadvise_advice = advice as _;
1025        Entry(sqe)
1026    }
1027}
1028
1029opcode! {
1030    /// Send a message on a socket, equivalent to `send(2)`.
1031    pub struct Send {
1032        fd: { impl sealed::UseFixed },
1033        buf: { *const u8 },
1034        len: { u32 },
1035        ;;
1036        flags: i32 = 0,
1037
1038        /// Set the destination address, for sending from an unconnected socket.
1039        ///
1040        /// When set, `dest_addr_len` must be set as well.
1041        /// See also `man 3 io_uring_prep_send_set_addr`.
1042        dest_addr: *const libc::sockaddr = core::ptr::null(),
1043        dest_addr_len: libc::socklen_t = 0,
1044    }
1045
1046    pub const CODE = sys::IORING_OP_SEND;
1047
1048    pub fn build(self) -> Entry {
1049        let Send { fd, buf, len, flags, dest_addr, dest_addr_len } = self;
1050
1051        let mut sqe = sqe_zeroed();
1052        sqe.opcode = Self::CODE;
1053        assign_fd!(sqe.fd = fd);
1054        sqe.__bindgen_anon_2.addr = buf as _;
1055        sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1056        sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1057        sqe.len = len;
1058        sqe.__bindgen_anon_3.msg_flags = flags as _;
1059        Entry(sqe)
1060    }
1061}
1062
1063opcode! {
1064    /// Receive a message from a socket, equivalent to `recv(2)`.
1065    pub struct Recv {
1066        fd: { impl sealed::UseFixed },
1067        buf: { *mut u8 },
1068        len: { u32 },
1069        ;;
1070        flags: i32 = 0,
1071        buf_group: u16 = 0
1072    }
1073
1074    pub const CODE = sys::IORING_OP_RECV;
1075
1076    pub fn build(self) -> Entry {
1077        let Recv { fd, buf, len, flags, buf_group } = self;
1078
1079        let mut sqe = sqe_zeroed();
1080        sqe.opcode = Self::CODE;
1081        assign_fd!(sqe.fd = fd);
1082        sqe.__bindgen_anon_2.addr = buf as _;
1083        sqe.len = len;
1084        sqe.__bindgen_anon_3.msg_flags = flags as _;
1085        sqe.__bindgen_anon_4.buf_group = buf_group;
1086        Entry(sqe)
1087    }
1088}
1089
1090opcode! {
1091    /// Receive multiple messages from a socket, equivalent to `recv(2)`.
1092    ///
1093    /// Parameter:
1094    ///     buf_group: The id of the provided buffer pool to use for each received message.
1095    ///
1096    /// MSG_WAITALL should not be set in flags.
1097    ///
1098    /// The multishot version allows the application to issue a single receive request, which
1099    /// repeatedly posts a CQE when data is available. Each CQE will take a buffer out of a
1100    /// provided buffer pool for receiving. The application should check the flags of each CQE,
1101    /// regardless of its result. If a posted CQE does not have the IORING_CQE_F_MORE flag set then
1102    /// the multishot receive will be done and the application should issue a new request.
1103    ///
1104    /// Multishot variants are available since kernel 6.0.
1105
1106    pub struct RecvMulti {
1107        fd: { impl sealed::UseFixed },
1108        buf_group: { u16 },
1109        ;;
1110        flags: i32 = 0,
1111    }
1112
1113    pub const CODE = sys::IORING_OP_RECV;
1114
1115    pub fn build(self) -> Entry {
1116        let RecvMulti { fd, buf_group, flags } = self;
1117
1118        let mut sqe = sqe_zeroed();
1119        sqe.opcode = Self::CODE;
1120        assign_fd!(sqe.fd = fd);
1121        sqe.__bindgen_anon_3.msg_flags = flags as _;
1122        sqe.__bindgen_anon_4.buf_group = buf_group;
1123        sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
1124        sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
1125        Entry(sqe)
1126    }
1127}
1128
1129opcode! {
1130    /// Open a file, equivalent to `openat2(2)`.
1131    pub struct OpenAt2 {
1132        dirfd: { impl sealed::UseFd },
1133        pathname: { *const libc::c_char },
1134        how: { *const types::OpenHow }
1135        ;;
1136        file_index: Option<types::DestinationSlot> = None,
1137    }
1138
1139    pub const CODE = sys::IORING_OP_OPENAT2;
1140
1141    pub fn build(self) -> Entry {
1142        let OpenAt2 { dirfd, pathname, how, file_index } = self;
1143
1144        let mut sqe = sqe_zeroed();
1145        sqe.opcode = Self::CODE;
1146        sqe.fd = dirfd;
1147        sqe.__bindgen_anon_2.addr = pathname as _;
1148        sqe.len = mem::size_of::<sys::open_how>() as _;
1149        sqe.__bindgen_anon_1.off = how as _;
1150        if let Some(dest) = file_index {
1151            sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1152        }
1153        Entry(sqe)
1154    }
1155}
1156
1157opcode! {
1158    /// Modify an epoll file descriptor, equivalent to `epoll_ctl(2)`.
1159    pub struct EpollCtl {
1160        epfd: { impl sealed::UseFixed },
1161        fd: { impl sealed::UseFd },
1162        op: { i32 },
1163        ev: { *const types::epoll_event },
1164        ;;
1165    }
1166
1167    pub const CODE = sys::IORING_OP_EPOLL_CTL;
1168
1169    pub fn build(self) -> Entry {
1170        let EpollCtl { epfd, fd, op, ev } = self;
1171
1172        let mut sqe = sqe_zeroed();
1173        sqe.opcode = Self::CODE;
1174        assign_fd!(sqe.fd = epfd);
1175        sqe.__bindgen_anon_2.addr = ev as _;
1176        sqe.len = op as _;
1177        sqe.__bindgen_anon_1.off = fd as _;
1178        Entry(sqe)
1179    }
1180}
1181
1182// === 5.7 ===
1183
1184opcode! {
1185    /// Splice data to/from a pipe, equivalent to `splice(2)`.
1186    ///
1187    /// if `fd_in` refers to a pipe, `off_in` must be `-1`;
1188    /// The description of `off_in` also applied to `off_out`.
1189    pub struct Splice {
1190        fd_in: { impl sealed::UseFixed },
1191        off_in: { i64 },
1192        fd_out: { impl sealed::UseFixed },
1193        off_out: { i64 },
1194        len: { u32 },
1195        ;;
1196        /// see man `splice(2)` for description of flags.
1197        flags: u32 = 0
1198    }
1199
1200    pub const CODE = sys::IORING_OP_SPLICE;
1201
1202    pub fn build(self) -> Entry {
1203        let Splice { fd_in, off_in, fd_out, off_out, len, mut flags } = self;
1204
1205        let mut sqe = sqe_zeroed();
1206        sqe.opcode = Self::CODE;
1207        assign_fd!(sqe.fd = fd_out);
1208        sqe.len = len;
1209        sqe.__bindgen_anon_1.off = off_out as _;
1210
1211        sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1212            sealed::Target::Fd(fd) => fd,
1213            sealed::Target::Fixed(idx) => {
1214                flags |= sys::SPLICE_F_FD_IN_FIXED;
1215                idx as _
1216            }
1217        };
1218
1219        sqe.__bindgen_anon_2.splice_off_in = off_in as _;
1220        sqe.__bindgen_anon_3.splice_flags = flags;
1221        Entry(sqe)
1222    }
1223}
1224
1225opcode! {
1226    /// Register `nbufs` buffers that each have the length `len` with ids starting from `bid` in the
1227    /// group `bgid` that can be used for any request. See
1228    /// [`BUFFER_SELECT`](crate::squeue::Flags::BUFFER_SELECT) for more info.
1229    pub struct ProvideBuffers {
1230        addr: { *mut u8 },
1231        len: { i32 },
1232        nbufs: { u16 },
1233        bgid: { u16 },
1234        bid: { u16 }
1235        ;;
1236    }
1237
1238    pub const CODE = sys::IORING_OP_PROVIDE_BUFFERS;
1239
1240    pub fn build(self) -> Entry {
1241        let ProvideBuffers { addr, len, nbufs, bgid, bid } = self;
1242
1243        let mut sqe = sqe_zeroed();
1244        sqe.opcode = Self::CODE;
1245        sqe.fd = nbufs as _;
1246        sqe.__bindgen_anon_2.addr = addr as _;
1247        sqe.len = len as _;
1248        sqe.__bindgen_anon_1.off = bid as _;
1249        sqe.__bindgen_anon_4.buf_group = bgid;
1250        Entry(sqe)
1251    }
1252}
1253
1254opcode! {
1255    /// Remove some number of buffers from a buffer group. See
1256    /// [`BUFFER_SELECT`](crate::squeue::Flags::BUFFER_SELECT) for more info.
1257    pub struct RemoveBuffers {
1258        nbufs: { u16 },
1259        bgid: { u16 }
1260        ;;
1261    }
1262
1263    pub const CODE = sys::IORING_OP_REMOVE_BUFFERS;
1264
1265    pub fn build(self) -> Entry {
1266        let RemoveBuffers { nbufs, bgid } = self;
1267
1268        let mut sqe = sqe_zeroed();
1269        sqe.opcode = Self::CODE;
1270        sqe.fd = nbufs as _;
1271        sqe.__bindgen_anon_4.buf_group = bgid;
1272        Entry(sqe)
1273    }
1274}
1275
1276// === 5.8 ===
1277
1278opcode! {
1279    /// Duplicate pipe content, equivalent to `tee(2)`.
1280    pub struct Tee {
1281        fd_in: { impl sealed::UseFixed },
1282        fd_out: { impl sealed::UseFixed },
1283        len: { u32 }
1284        ;;
1285        flags: u32 = 0
1286    }
1287
1288    pub const CODE = sys::IORING_OP_TEE;
1289
1290    pub fn build(self) -> Entry {
1291        let Tee { fd_in, fd_out, len, mut flags } = self;
1292
1293        let mut sqe = sqe_zeroed();
1294        sqe.opcode = Self::CODE;
1295
1296        assign_fd!(sqe.fd = fd_out);
1297        sqe.len = len;
1298
1299        sqe.__bindgen_anon_5.splice_fd_in = match fd_in {
1300            sealed::Target::Fd(fd) => fd,
1301            sealed::Target::Fixed(idx) => {
1302                flags |= sys::SPLICE_F_FD_IN_FIXED;
1303                idx as _
1304            }
1305        };
1306
1307        sqe.__bindgen_anon_3.splice_flags = flags;
1308
1309        Entry(sqe)
1310    }
1311}
1312
1313// === 5.11 ===
1314
1315opcode! {
1316    /// Shut down all or part of a full duplex connection on a socket, equivalent to `shutdown(2)`.
1317    /// Available since kernel 5.11.
1318    pub struct Shutdown {
1319        fd: { impl sealed::UseFixed },
1320        how: { i32 },
1321        ;;
1322    }
1323
1324    pub const CODE = sys::IORING_OP_SHUTDOWN;
1325
1326    pub fn build(self) -> Entry {
1327        let Shutdown { fd, how } = self;
1328
1329        let mut sqe = sqe_zeroed();
1330        sqe.opcode = Self::CODE;
1331        assign_fd!(sqe.fd = fd);
1332        sqe.len = how as _;
1333        Entry(sqe)
1334    }
1335}
1336
1337opcode! {
1338    // Change the name or location of a file, equivalent to `renameat2(2)`.
1339    // Available since kernel 5.11.
1340    pub struct RenameAt {
1341        olddirfd: { impl sealed::UseFd },
1342        oldpath: { *const libc::c_char },
1343        newdirfd: { impl sealed::UseFd },
1344        newpath: { *const libc::c_char },
1345        ;;
1346        flags: u32 = 0
1347    }
1348
1349    pub const CODE = sys::IORING_OP_RENAMEAT;
1350
1351    pub fn build(self) -> Entry {
1352        let RenameAt {
1353            olddirfd, oldpath,
1354            newdirfd, newpath,
1355            flags
1356        } = self;
1357
1358        let mut sqe = sqe_zeroed();
1359        sqe.opcode = Self::CODE;
1360        sqe.fd = olddirfd;
1361        sqe.__bindgen_anon_2.addr = oldpath as _;
1362        sqe.len = newdirfd as _;
1363        sqe.__bindgen_anon_1.off = newpath as _;
1364        sqe.__bindgen_anon_3.rename_flags = flags;
1365        Entry(sqe)
1366    }
1367}
1368
1369opcode! {
1370    // Delete a name and possible the file it refers to, equivalent to `unlinkat(2)`.
1371    // Available since kernel 5.11.
1372    pub struct UnlinkAt {
1373        dirfd: { impl sealed::UseFd },
1374        pathname: { *const libc::c_char },
1375        ;;
1376        flags: i32 = 0
1377    }
1378
1379    pub const CODE = sys::IORING_OP_UNLINKAT;
1380
1381    pub fn build(self) -> Entry {
1382        let UnlinkAt { dirfd, pathname, flags } = self;
1383
1384        let mut sqe = sqe_zeroed();
1385        sqe.opcode = Self::CODE;
1386        sqe.fd = dirfd;
1387        sqe.__bindgen_anon_2.addr = pathname as _;
1388        sqe.__bindgen_anon_3.unlink_flags = flags as _;
1389        Entry(sqe)
1390    }
1391}
1392
1393// === 5.15 ===
1394
1395opcode! {
1396    /// Make a directory, equivalent to `mkdirat(2)`.
1397    pub struct MkDirAt {
1398        dirfd: { impl sealed::UseFd },
1399        pathname: { *const libc::c_char },
1400        ;;
1401        mode: libc::mode_t = 0
1402    }
1403
1404    pub const CODE = sys::IORING_OP_MKDIRAT;
1405
1406    pub fn build(self) -> Entry {
1407        let MkDirAt { dirfd, pathname, mode } = self;
1408
1409        let mut sqe = sqe_zeroed();
1410        sqe.opcode = Self::CODE;
1411        sqe.fd = dirfd;
1412        sqe.__bindgen_anon_2.addr = pathname as _;
1413        sqe.len = mode;
1414        Entry(sqe)
1415    }
1416}
1417
1418opcode! {
1419    /// Create a symlink, equivalent to `symlinkat(2)`.
1420    pub struct SymlinkAt {
1421        newdirfd: { impl sealed::UseFd },
1422        target: { *const libc::c_char },
1423        linkpath: { *const libc::c_char },
1424        ;;
1425    }
1426
1427    pub const CODE = sys::IORING_OP_SYMLINKAT;
1428
1429    pub fn build(self) -> Entry {
1430        let SymlinkAt { newdirfd, target, linkpath } = self;
1431
1432        let mut sqe = sqe_zeroed();
1433        sqe.opcode = Self::CODE;
1434        sqe.fd = newdirfd;
1435        sqe.__bindgen_anon_2.addr = target as _;
1436        sqe.__bindgen_anon_1.addr2 = linkpath as _;
1437        Entry(sqe)
1438    }
1439}
1440
1441opcode! {
1442    /// Create a hard link, equivalent to `linkat(2)`.
1443    pub struct LinkAt {
1444        olddirfd: { impl sealed::UseFd },
1445        oldpath: { *const libc::c_char },
1446        newdirfd: { impl sealed::UseFd },
1447        newpath: { *const libc::c_char },
1448        ;;
1449        flags: i32 = 0
1450    }
1451
1452    pub const CODE = sys::IORING_OP_LINKAT;
1453
1454    pub fn build(self) -> Entry {
1455        let LinkAt { olddirfd, oldpath, newdirfd, newpath, flags } = self;
1456
1457        let mut sqe = sqe_zeroed();
1458        sqe.opcode = Self::CODE;
1459        sqe.fd = olddirfd as _;
1460        sqe.__bindgen_anon_2.addr = oldpath as _;
1461        sqe.len = newdirfd as _;
1462        sqe.__bindgen_anon_1.addr2 = newpath as _;
1463        sqe.__bindgen_anon_3.hardlink_flags = flags as _;
1464        Entry(sqe)
1465    }
1466}
1467
1468// === 5.18 ===
1469
1470opcode! {
1471    /// Send a message (with data) to a target ring.
1472    pub struct MsgRingData {
1473        ring_fd: { impl sealed::UseFd },
1474        result: { i32 },
1475        user_data: { u64 },
1476        user_flags: { Option<u32> },
1477        ;;
1478        opcode_flags: u32 = 0
1479    }
1480
1481    pub const CODE = sys::IORING_OP_MSG_RING;
1482
1483    pub fn build(self) -> Entry {
1484        let MsgRingData { ring_fd, result, user_data, user_flags, opcode_flags } = self;
1485
1486        let mut sqe = sqe_zeroed();
1487        sqe.opcode = Self::CODE;
1488        sqe.__bindgen_anon_2.addr = sys::IORING_MSG_DATA.into();
1489        sqe.fd = ring_fd;
1490        sqe.len = result as u32;
1491        sqe.__bindgen_anon_1.off = user_data;
1492        sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1493        if let Some(flags) = user_flags {
1494            sqe.__bindgen_anon_5.file_index = flags;
1495            unsafe {sqe.__bindgen_anon_3.msg_ring_flags |= sys::IORING_MSG_RING_FLAGS_PASS};
1496        }
1497        Entry(sqe)
1498    }
1499}
1500
1501// === 5.19 ===
1502
1503opcode! {
1504    /// Attempt to cancel an already issued request, receiving a cancellation
1505    /// builder, which allows for the new cancel criterias introduced since
1506    /// 5.19.
1507    pub struct AsyncCancel2 {
1508        builder: { types::CancelBuilder }
1509        ;;
1510    }
1511
1512    pub const CODE = sys::IORING_OP_ASYNC_CANCEL;
1513
1514    pub fn build(self) -> Entry {
1515        let AsyncCancel2 { builder } = self;
1516
1517        let mut sqe = sqe_zeroed();
1518        sqe.opcode = Self::CODE;
1519        sqe.fd = builder.to_fd();
1520        sqe.__bindgen_anon_2.addr = builder.user_data.unwrap_or(0);
1521        sqe.__bindgen_anon_3.cancel_flags = builder.flags.bits();
1522        Entry(sqe)
1523    }
1524}
1525
1526opcode! {
1527    /// A file/device-specific 16-byte command, akin (but not equivalent) to `ioctl(2)`.
1528    pub struct UringCmd16 {
1529        fd: { impl sealed::UseFixed },
1530        cmd_op: { u32 },
1531        ;;
1532        /// The `buf_index` is an index into an array of fixed buffers,
1533        /// and is only valid if fixed buffers were registered.
1534        buf_index: Option<u16> = None,
1535        /// Arbitrary command data.
1536        cmd: [u8; 16] = [0u8; 16]
1537    }
1538
1539    pub const CODE = sys::IORING_OP_URING_CMD;
1540
1541    pub fn build(self) -> Entry {
1542        let UringCmd16 { fd, cmd_op, cmd, buf_index } = self;
1543
1544        let mut sqe = sqe_zeroed();
1545        sqe.opcode = Self::CODE;
1546        assign_fd!(sqe.fd = fd);
1547        sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1548        unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd };
1549        if let Some(buf_index) = buf_index {
1550            sqe.__bindgen_anon_4.buf_index = buf_index;
1551            unsafe {
1552                sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1553            }
1554        }
1555        Entry(sqe)
1556    }
1557}
1558
1559opcode! {
1560    /// A file/device-specific 80-byte command, akin (but not equivalent) to `ioctl(2)`.
1561    pub struct UringCmd80 {
1562        fd: { impl sealed::UseFixed },
1563        cmd_op: { u32 },
1564        ;;
1565        /// The `buf_index` is an index into an array of fixed buffers,
1566        /// and is only valid if fixed buffers were registered.
1567        buf_index: Option<u16> = None,
1568        /// Arbitrary command data.
1569        cmd: [u8; 80] = [0u8; 80]
1570    }
1571
1572    pub const CODE = sys::IORING_OP_URING_CMD;
1573
1574    pub fn build(self) -> Entry128 {
1575        let UringCmd80 { fd, cmd_op, cmd, buf_index } = self;
1576
1577        let cmd1 = cmd[..16].try_into().unwrap();
1578        let cmd2 = cmd[16..].try_into().unwrap();
1579
1580        let mut sqe = sqe_zeroed();
1581        sqe.opcode = Self::CODE;
1582        assign_fd!(sqe.fd = fd);
1583        sqe.__bindgen_anon_1.__bindgen_anon_1.cmd_op = cmd_op;
1584        unsafe { *sqe.__bindgen_anon_6.cmd.as_mut().as_mut_ptr().cast::<[u8; 16]>() = cmd1 };
1585        if let Some(buf_index) = buf_index {
1586            sqe.__bindgen_anon_4.buf_index = buf_index;
1587            unsafe {
1588                sqe.__bindgen_anon_3.uring_cmd_flags |= sys::IORING_URING_CMD_FIXED;
1589            }
1590        }
1591        Entry128(Entry(sqe), cmd2)
1592    }
1593}
1594
1595opcode! {
1596    /// Create an endpoint for communication, equivalent to `socket(2)`.
1597    ///
1598    /// If the `file_index` argument is set, the resulting socket is
1599    /// directly mapped to the given fixed-file slot instead of being
1600    /// returned as a normal file descriptor. The application must first
1601    /// have registered a file table, and the target slot should fit into
1602    /// it.
1603    ///
1604    /// Available since 5.19.
1605    pub struct Socket {
1606        domain: { i32 },
1607        socket_type: { i32 },
1608        protocol: { i32 },
1609        ;;
1610        file_index: Option<types::DestinationSlot> = None,
1611        flags: types::RwFlags = 0,
1612    }
1613
1614    pub const CODE = sys::IORING_OP_SOCKET;
1615
1616    pub fn build(self) -> Entry {
1617        let Socket { domain, socket_type, protocol, file_index, flags } = self;
1618
1619        let mut sqe = sqe_zeroed();
1620        sqe.opcode = Self::CODE;
1621        sqe.fd = domain as _;
1622        sqe.__bindgen_anon_1.off = socket_type as _;
1623        sqe.len = protocol as _;
1624        sqe.__bindgen_anon_3.rw_flags = flags;
1625        if let Some(dest) = file_index {
1626            sqe.__bindgen_anon_5.file_index = dest.kernel_index_arg();
1627        }
1628        Entry(sqe)
1629    }
1630}
1631
1632opcode! {
1633    /// Accept multiple new connections on a socket.
1634    ///
1635    /// Set the `allocate_file_index` property if fixed file table entries should be used.
1636    ///
1637    /// Available since 5.19.
1638    pub struct AcceptMulti {
1639        fd: { impl sealed::UseFixed },
1640        ;;
1641        allocate_file_index: bool = false,
1642        flags: i32 = 0
1643    }
1644
1645    pub const CODE = sys::IORING_OP_ACCEPT;
1646
1647    pub fn build(self) -> Entry {
1648        let AcceptMulti { fd, allocate_file_index, flags } = self;
1649
1650        let mut sqe = sqe_zeroed();
1651        sqe.opcode = Self::CODE;
1652        assign_fd!(sqe.fd = fd);
1653        sqe.ioprio = sys::IORING_ACCEPT_MULTISHOT as u16;
1654        // No out SockAddr is passed for the multishot accept case.
1655        // The user should perform a syscall to get any resulting connection's remote address.
1656        sqe.__bindgen_anon_3.accept_flags = flags as _;
1657        if allocate_file_index {
1658            sqe.__bindgen_anon_5.file_index = sys::IORING_FILE_INDEX_ALLOC as u32;
1659        }
1660        Entry(sqe)
1661    }
1662}
1663
1664// === 6.0 ===
1665
1666opcode! {
1667    /// Send a message (with fixed FD) to a target ring.
1668    pub struct MsgRingSendFd {
1669        ring_fd: { impl sealed::UseFd },
1670        fixed_slot_src: { types::Fixed },
1671        dest_slot_index: { types::DestinationSlot },
1672        user_data: { u64 },
1673        ;;
1674        opcode_flags: u32 = 0
1675    }
1676
1677    pub const CODE = sys::IORING_OP_MSG_RING;
1678
1679    pub fn build(self) -> Entry {
1680        let MsgRingSendFd { ring_fd, fixed_slot_src, dest_slot_index, user_data, opcode_flags } = self;
1681
1682        let mut sqe = sqe_zeroed();
1683        sqe.opcode = Self::CODE;
1684        sqe.__bindgen_anon_2.addr = sys::IORING_MSG_SEND_FD.into();
1685        sqe.fd = ring_fd;
1686        sqe.__bindgen_anon_1.off = user_data;
1687        unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = fixed_slot_src.0 as u64 };
1688        sqe.__bindgen_anon_5.file_index = dest_slot_index.kernel_index_arg();
1689        sqe.__bindgen_anon_3.msg_ring_flags = opcode_flags;
1690        Entry(sqe)
1691    }
1692}
1693
1694// === 6.0 ===
1695
1696opcode! {
1697    /// Send a zerocopy message on a socket, equivalent to `send(2)`.
1698    ///
1699    /// When `dest_addr` is non-zero it points to the address of the target with `dest_addr_len`
1700    /// specifying its size, turning the request into a `sendto(2)`
1701    ///
1702    /// A fixed (pre-mapped) buffer can optionally be used from pre-mapped buffers that have been
1703    /// previously registered with [`Submitter::register_buffers`](crate::Submitter::register_buffers).
1704    ///
1705    /// This operation might result in two completion queue entries.
1706    /// See the `IORING_OP_SEND_ZC` section at [io_uring_enter][] for the exact semantics.
1707    /// Notifications posted by this operation can be checked with [notif](crate::cqueue::notif).
1708    ///
1709    /// [io_uring_enter]: https://man7.org/linux/man-pages/man2/io_uring_enter.2.html
1710    pub struct SendZc {
1711        fd: { impl sealed::UseFixed },
1712        buf: { *const u8 },
1713        len: { u32 },
1714        ;;
1715        /// The `buf_index` is an index into an array of fixed buffers, and is only valid if fixed
1716        /// buffers were registered.
1717        ///
1718        /// The buf and len arguments must fall within a region specified by buf_index in the
1719        /// previously registered buffer. The buffer need not be aligned with the start of the
1720        /// registered buffer.
1721        buf_index: Option<u16> = None,
1722        dest_addr: *const libc::sockaddr = core::ptr::null(),
1723        dest_addr_len: libc::socklen_t = 0,
1724        flags: i32 = 0,
1725        zc_flags: u16 = 0,
1726    }
1727
1728    pub const CODE = sys::IORING_OP_SEND_ZC;
1729
1730    pub fn build(self) -> Entry {
1731        let SendZc { fd, buf, len, buf_index, dest_addr, dest_addr_len, flags, zc_flags } = self;
1732
1733        let mut sqe = sqe_zeroed();
1734        sqe.opcode = Self::CODE;
1735        assign_fd!(sqe.fd = fd);
1736        sqe.__bindgen_anon_2.addr = buf as _;
1737        sqe.len = len;
1738        sqe.__bindgen_anon_3.msg_flags = flags as _;
1739        sqe.ioprio = zc_flags;
1740        if let Some(buf_index) = buf_index {
1741            sqe.__bindgen_anon_4.buf_index = buf_index;
1742            sqe.ioprio |= sys::IORING_RECVSEND_FIXED_BUF as u16;
1743        }
1744        sqe.__bindgen_anon_1.addr2 = dest_addr as _;
1745        sqe.__bindgen_anon_5.__bindgen_anon_1.addr_len = dest_addr_len as _;
1746        Entry(sqe)
1747    }
1748}
1749
1750// === 6.1 ===
1751
1752opcode! {
1753    /// Send a zerocopy message on a socket, equivalent to `send(2)`.
1754    ///
1755    /// fd must be set to the socket file descriptor, addr must contains a pointer to the msghdr
1756    /// structure, and flags holds the flags associated with the system call.
1757    #[derive(Debug)]
1758    pub struct SendMsgZc {
1759        fd: { impl sealed::UseFixed },
1760        msg: { *const libc::msghdr },
1761        ;;
1762        ioprio: u16 = 0,
1763        flags: u32 = 0
1764    }
1765
1766    pub const CODE = sys::IORING_OP_SENDMSG_ZC;
1767
1768    pub fn build(self) -> Entry {
1769        let SendMsgZc { fd, msg, ioprio, flags } = self;
1770
1771        let mut sqe = sqe_zeroed();
1772        sqe.opcode = Self::CODE;
1773        assign_fd!(sqe.fd = fd);
1774        sqe.ioprio = ioprio;
1775        sqe.__bindgen_anon_2.addr = msg as _;
1776        sqe.len = 1;
1777        sqe.__bindgen_anon_3.msg_flags = flags;
1778        Entry(sqe)
1779    }
1780}
1781
1782// === 6.7 ===
1783
1784opcode! {
1785    /// Wait on a futex, like but not equivalant to `futex(2)`'s `FUTEX_WAIT_BITSET`.
1786    ///
1787    /// Wait on a futex at address `futex` and which still has the value `val` and with `futex2(2)`
1788    /// flags of `futex_flags`. `musk` can be set to a specific bitset mask, which will be matched
1789    /// by the waking side to decide who to wake up. To always get woken, an application may use
1790    /// `FUTEX_BITSET_MATCH_ANY` (truncated to futex bits). `futex_flags` follows the `futex2(2)`
1791    /// flags, not the `futex(2)` v1 interface flags. `flags` are currently unused and hence `0`
1792    /// must be passed.
1793    #[derive(Debug)]
1794    pub struct FutexWait {
1795        futex: { *const u32 },
1796        val: { u64 },
1797        mask: { u64 },
1798        futex_flags: { u32 },
1799        ;;
1800        flags: u32 = 0
1801    }
1802
1803    pub const CODE = sys::IORING_OP_FUTEX_WAIT;
1804
1805    pub fn build(self) -> Entry {
1806        let FutexWait { futex, val, mask, futex_flags, flags } = self;
1807
1808        let mut sqe = sqe_zeroed();
1809        sqe.opcode = Self::CODE;
1810        sqe.fd = futex_flags as _;
1811        sqe.__bindgen_anon_2.addr = futex as usize as _;
1812        sqe.__bindgen_anon_1.off = val;
1813        unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1814        sqe.__bindgen_anon_3.futex_flags = flags;
1815        Entry(sqe)
1816    }
1817}
1818
1819opcode! {
1820    /// Wake up waiters on a futex, like but not equivalant to `futex(2)`'s `FUTEX_WAKE_BITSET`.
1821    ///
1822    /// Wake any waiters on the futex indicated by `futex` and at most `val` futexes. `futex_flags`
1823    /// indicates the `futex2(2)` modifier flags. If a given bitset for who to wake is desired,
1824    /// then that must be set in `mask`. Use `FUTEX_BITSET_MATCH_ANY` (truncated to futex bits) to
1825    /// match any waiter on the given futex. `flags` are currently unused and hence `0` must be
1826    /// passed.
1827    #[derive(Debug)]
1828    pub struct FutexWake {
1829        futex: { *const u32 },
1830        val: { u64 },
1831        mask: { u64 },
1832        futex_flags: { u32 },
1833        ;;
1834        flags: u32 = 0
1835    }
1836
1837    pub const CODE = sys::IORING_OP_FUTEX_WAKE;
1838
1839    pub fn build(self) -> Entry {
1840        let FutexWake { futex, val, mask, futex_flags, flags } = self;
1841
1842        let mut sqe = sqe_zeroed();
1843        sqe.opcode = Self::CODE;
1844        sqe.fd = futex_flags as _;
1845        sqe.__bindgen_anon_2.addr = futex as usize as _;
1846        sqe.__bindgen_anon_1.off = val;
1847        unsafe { sqe.__bindgen_anon_6.__bindgen_anon_1.as_mut().addr3 = mask };
1848        sqe.__bindgen_anon_3.futex_flags = flags;
1849        Entry(sqe)
1850    }
1851}
1852
1853opcode! {
1854    /// Wait on multiple futexes.
1855    ///
1856    /// Wait on multiple futexes at the same time. Futexes are given by `futexv` and `nr_futex` is
1857    /// the number of futexes in that array. Unlike `FutexWait`, the desired bitset mask and values
1858    /// are passed in `futexv`. `flags` are currently unused and hence `0` must be passed.
1859    #[derive(Debug)]
1860    pub struct FutexWaitV {
1861        futexv: { *const types::FutexWaitV },
1862        nr_futex: { u32 },
1863        ;;
1864        flags: u32 = 0
1865    }
1866
1867    pub const CODE = sys::IORING_OP_FUTEX_WAITV;
1868
1869    pub fn build(self) -> Entry {
1870        let FutexWaitV { futexv, nr_futex, flags } = self;
1871
1872        let mut sqe = sqe_zeroed();
1873        sqe.opcode = Self::CODE;
1874        sqe.__bindgen_anon_2.addr = futexv as usize as _;
1875        sqe.len = nr_futex;
1876        sqe.__bindgen_anon_3.futex_flags = flags;
1877        Entry(sqe)
1878    }
1879}
1880
1881// === 6.8 ===
1882
1883opcode! {
1884    /// Install a fixed file descriptor
1885    ///
1886    /// Turns a direct descriptor into a regular file descriptor that can be later used by regular
1887    /// system calls that take a normal raw file descriptor
1888    #[derive(Debug)]
1889    pub struct FixedFdInstall {
1890        fd: { types::Fixed },
1891        file_flags: { u32 },
1892        ;;
1893    }
1894
1895    pub const CODE = sys::IORING_OP_FIXED_FD_INSTALL;
1896
1897    pub fn build(self) -> Entry {
1898        let FixedFdInstall { fd, file_flags } = self;
1899
1900        let mut sqe = sqe_zeroed();
1901        sqe.opcode = Self::CODE;
1902        sqe.fd = fd.0 as _;
1903        sqe.flags = crate::squeue::Flags::FIXED_FILE.bits();
1904        sqe.__bindgen_anon_3.install_fd_flags = file_flags;
1905        Entry(sqe)
1906    }
1907}
1908
1909// === 6.9 ===
1910
1911opcode! {
1912    /// Perform file truncation, equivalent to `ftruncate(2)`.
1913    #[derive(Debug)]
1914    pub struct Ftruncate {
1915        fd: { impl sealed::UseFixed },
1916        len: { u64 },
1917        ;;
1918    }
1919
1920    pub const CODE = sys::IORING_OP_FTRUNCATE;
1921
1922    pub fn build(self) -> Entry {
1923        let Ftruncate { fd, len } = self;
1924
1925        let mut sqe = sqe_zeroed();
1926        sqe.opcode = Self::CODE;
1927        assign_fd!(sqe.fd = fd);
1928        sqe.__bindgen_anon_1.off = len;
1929        Entry(sqe)
1930    }
1931}
1932
1933// === 6.10 ===
1934
1935opcode! {
1936    /// Send a bundle of messages on a socket in a single request.
1937    pub struct SendBundle {
1938        fd: { impl sealed::UseFixed },
1939        buf_group: { u16 },
1940        ;;
1941        flags: i32 = 0,
1942        len: u32 = 0
1943    }
1944
1945    pub const CODE = sys::IORING_OP_SEND;
1946
1947    pub fn build(self) -> Entry {
1948        let SendBundle { fd, len, flags, buf_group } = self;
1949
1950        let mut sqe = sqe_zeroed();
1951        sqe.opcode = Self::CODE;
1952        assign_fd!(sqe.fd = fd);
1953        sqe.len = len;
1954        sqe.__bindgen_anon_3.msg_flags = flags as _;
1955        sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
1956        sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
1957        sqe.__bindgen_anon_4.buf_group = buf_group;
1958        Entry(sqe)
1959    }
1960}
1961
1962opcode! {
1963    /// Receive a bundle of buffers from a socket.
1964    ///
1965    /// Parameter
1966    ///     buf_group: The id of the provided buffer pool to use for the bundle.
1967    ///
1968    /// Note that as of kernel 6.10 first recv always gets a single buffer, while second
1969    /// obtains the bundle of remaining buffers. This behavior may change in the future.
1970    ///
1971    /// Bundle variant is available since kernel 6.10
1972    pub struct RecvBundle {
1973        fd: { impl sealed::UseFixed },
1974        buf_group: { u16 },
1975        ;;
1976        flags: i32 = 0
1977    }
1978
1979    pub const CODE = sys::IORING_OP_RECV;
1980
1981    pub fn build(self) -> Entry {
1982        let RecvBundle { fd, buf_group, flags } = self;
1983
1984        let mut sqe = sqe_zeroed();
1985        sqe.opcode = Self::CODE;
1986        assign_fd!(sqe.fd = fd);
1987        sqe.__bindgen_anon_3.msg_flags = flags as _;
1988        sqe.__bindgen_anon_4.buf_group = buf_group;
1989        sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
1990        sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
1991        Entry(sqe)
1992    }
1993}
1994
1995opcode! {
1996    /// Receive multiple messages from a socket as a bundle.
1997    ///
1998    /// Parameter:
1999    ///     buf_group: The id of the provided buffer pool to use for each received message.
2000    ///
2001    /// MSG_WAITALL should not be set in flags.
2002    ///
2003    /// The multishot version allows the application to issue a single receive request, which
2004    /// repeatedly posts a CQE when data is available. Each CQE will take a bundle of buffers
2005    /// out of a provided buffer pool for receiving. The application should check the flags of each CQE,
2006    /// regardless of its result. If a posted CQE does not have the IORING_CQE_F_MORE flag set then
2007    /// the multishot receive will be done and the application should issue a new request.
2008    ///
2009    /// Note that as of kernel 6.10 first CQE always gets a single buffer, while second
2010    /// obtains the bundle of remaining buffers. This behavior may change in the future.
2011    ///
2012    /// Multishot bundle variant is available since kernel 6.10.
2013    pub struct RecvMultiBundle {
2014        fd: { impl sealed::UseFixed },
2015        buf_group: { u16 },
2016        ;;
2017        flags: i32 = 0
2018    }
2019
2020    pub const CODE = sys::IORING_OP_RECV;
2021
2022    pub fn build(self) -> Entry {
2023        let RecvMultiBundle { fd, buf_group, flags } = self;
2024
2025        let mut sqe = sqe_zeroed();
2026        sqe.opcode = Self::CODE;
2027        assign_fd!(sqe.fd = fd);
2028        sqe.__bindgen_anon_3.msg_flags = flags as _;
2029        sqe.__bindgen_anon_4.buf_group = buf_group;
2030        sqe.flags |= crate::squeue::Flags::BUFFER_SELECT.bits();
2031        sqe.ioprio = sys::IORING_RECV_MULTISHOT as _;
2032        sqe.ioprio |= sys::IORING_RECVSEND_BUNDLE as u16;
2033        Entry(sqe)
2034    }
2035}