io_uring/
submit.rs

1use std::os::unix::io::{AsRawFd, RawFd};
2use std::sync::atomic;
3use std::{io, mem, ptr};
4
5use crate::register::{execute, Probe};
6use crate::sys;
7use crate::types::{CancelBuilder, Timespec};
8use crate::util::{cast_ptr, OwnedFd};
9use crate::Parameters;
10
11use crate::register::Restriction;
12
13use crate::types;
14
15/// Interface for submitting submission queue events in an io_uring instance to the kernel for
16/// executing and registering files or buffers with the instance.
17///
18/// io_uring supports both directly performing I/O on buffers and file descriptors and registering
19/// them beforehand. Registering is slow, but it makes performing the actual I/O much faster.
20pub struct Submitter<'a> {
21    fd: &'a OwnedFd,
22    params: &'a Parameters,
23
24    sq_head: *const atomic::AtomicU32,
25    sq_tail: *const atomic::AtomicU32,
26    sq_flags: *const atomic::AtomicU32,
27}
28
29impl<'a> Submitter<'a> {
30    #[inline]
31    pub(crate) const fn new(
32        fd: &'a OwnedFd,
33        params: &'a Parameters,
34        sq_head: *const atomic::AtomicU32,
35        sq_tail: *const atomic::AtomicU32,
36        sq_flags: *const atomic::AtomicU32,
37    ) -> Submitter<'a> {
38        Submitter {
39            fd,
40            params,
41            sq_head,
42            sq_tail,
43            sq_flags,
44        }
45    }
46
47    #[inline]
48    fn sq_len(&self) -> usize {
49        unsafe {
50            let head = (*self.sq_head).load(atomic::Ordering::Acquire);
51            let tail = (*self.sq_tail).load(atomic::Ordering::Acquire);
52
53            tail.wrapping_sub(head) as usize
54        }
55    }
56
57    /// Whether the kernel thread has gone to sleep because it waited for too long without
58    /// submission queue entries.
59    #[inline]
60    fn sq_need_wakeup(&self) -> bool {
61        unsafe {
62            (*self.sq_flags).load(atomic::Ordering::Relaxed) & sys::IORING_SQ_NEED_WAKEUP != 0
63        }
64    }
65
66    /// CQ ring is overflown
67    fn sq_cq_overflow(&self) -> bool {
68        unsafe {
69            (*self.sq_flags).load(atomic::Ordering::Relaxed) & sys::IORING_SQ_CQ_OVERFLOW != 0
70        }
71    }
72
73    /// Initiate and/or complete asynchronous I/O. This is a low-level wrapper around
74    /// `io_uring_enter` - see `man io_uring_enter` (or [its online
75    /// version](https://manpages.debian.org/unstable/liburing-dev/io_uring_enter.2.en.html) for
76    /// more details.
77    ///
78    /// You will probably want to use a more high-level API such as
79    /// [`submit`](Self::submit) or [`submit_and_wait`](Self::submit_and_wait).
80    ///
81    /// # Safety
82    ///
83    /// This provides a raw interface so developer must ensure that parameters are correct.
84    pub unsafe fn enter<T: Sized>(
85        &self,
86        to_submit: u32,
87        min_complete: u32,
88        flag: u32,
89        arg: Option<&T>,
90    ) -> io::Result<usize> {
91        let arg = arg
92            .map(|arg| cast_ptr(arg).cast())
93            .unwrap_or_else(ptr::null);
94        let size = mem::size_of::<T>();
95        sys::io_uring_enter(
96            self.fd.as_raw_fd(),
97            to_submit,
98            min_complete,
99            flag,
100            arg,
101            size,
102        )
103        .map(|res| res as _)
104    }
105
106    /// Submit all queued submission queue events to the kernel.
107    #[inline]
108    pub fn submit(&self) -> io::Result<usize> {
109        self.submit_and_wait(0)
110    }
111
112    /// Submit all queued submission queue events to the kernel and wait for at least `want`
113    /// completion events to complete.
114    pub fn submit_and_wait(&self, want: usize) -> io::Result<usize> {
115        let len = self.sq_len();
116        let mut flags = 0;
117
118        // This logic suffers from the fact the sq_cq_overflow and sq_need_wakeup
119        // each cause an atomic load of the same variable, self.sq_flags.
120        // In the hottest paths, when a server is running with sqpoll,
121        // this is going to be hit twice, when once would be sufficient.
122        // However, consider that the `SeqCst` barrier required for interpreting
123        // the IORING_ENTER_SQ_WAKEUP bit is required in all paths where sqpoll
124        // is setup when consolidating the reads.
125
126        if want > 0 || self.params.is_setup_iopoll() || self.sq_cq_overflow() {
127            flags |= sys::IORING_ENTER_GETEVENTS;
128        }
129
130        if self.params.is_setup_sqpoll() {
131            // See discussion in [`SubmissionQueue::need_wakeup`].
132            atomic::fence(atomic::Ordering::SeqCst);
133            if self.sq_need_wakeup() {
134                flags |= sys::IORING_ENTER_SQ_WAKEUP;
135            } else if want == 0 {
136                // The kernel thread is polling and hasn't fallen asleep, so we don't need to tell
137                // it to process events or wake it up
138                return Ok(len);
139            }
140        }
141
142        unsafe { self.enter::<libc::sigset_t>(len as _, want as _, flags, None) }
143    }
144
145    pub fn submit_with_args(
146        &self,
147        want: usize,
148        args: &types::SubmitArgs<'_, '_>,
149    ) -> io::Result<usize> {
150        let len = self.sq_len();
151        let mut flags = sys::IORING_ENTER_EXT_ARG;
152
153        if want > 0 || self.params.is_setup_iopoll() || self.sq_cq_overflow() {
154            flags |= sys::IORING_ENTER_GETEVENTS;
155        }
156
157        if self.params.is_setup_sqpoll() {
158            // See discussion in [`SubmissionQueue::need_wakeup`].
159            atomic::fence(atomic::Ordering::SeqCst);
160            if self.sq_need_wakeup() {
161                flags |= sys::IORING_ENTER_SQ_WAKEUP;
162            } else if want == 0 {
163                // The kernel thread is polling and hasn't fallen asleep, so we don't need to tell
164                // it to process events or wake it up
165                return Ok(len);
166            }
167        }
168
169        unsafe { self.enter(len as _, want as _, flags, Some(&args.args)) }
170    }
171
172    /// Wait for the submission queue to have free entries.
173    pub fn squeue_wait(&self) -> io::Result<usize> {
174        unsafe { self.enter::<libc::sigset_t>(0, 0, sys::IORING_ENTER_SQ_WAIT, None) }
175    }
176
177    /// Register in-memory fixed buffers for I/O with the kernel. You can use these buffers with the
178    /// [`ReadFixed`](crate::opcode::ReadFixed) and [`WriteFixed`](crate::opcode::WriteFixed)
179    /// operations.
180    ///
181    /// # Safety
182    ///
183    /// Developers must ensure that the `iov_base` and `iov_len` values are valid and will
184    /// be valid until buffers are unregistered or the ring destroyed, otherwise undefined
185    /// behaviour may occur.
186    pub unsafe fn register_buffers(&self, bufs: &[libc::iovec]) -> io::Result<()> {
187        execute(
188            self.fd.as_raw_fd(),
189            sys::IORING_REGISTER_BUFFERS,
190            bufs.as_ptr().cast(),
191            bufs.len() as _,
192        )
193        .map(drop)
194    }
195
196    /// Update a range of fixed buffers starting at `offset`.
197    ///
198    /// This is required to use buffers registered using
199    /// [`register_buffers_sparse`](Self::register_buffers_sparse),
200    /// although it can be also be used with [`register_buffers`](Self::register_buffers).
201    ///
202    /// See [`register_buffers2`](Self::register_buffers2)
203    /// for more information about resource tagging.
204    ///
205    /// Available since Linux 5.13.
206    ///
207    /// # Safety
208    ///
209    /// Developers must ensure that the `iov_base` and `iov_len` values are valid and will
210    /// be valid until buffers are unregistered or the ring destroyed, otherwise undefined
211    /// behaviour may occur.
212    pub unsafe fn register_buffers_update(
213        &self,
214        offset: u32,
215        bufs: &[libc::iovec],
216        tags: Option<&[u64]>,
217    ) -> io::Result<()> {
218        let nr = tags
219            .as_ref()
220            .map_or(bufs.len(), |tags| bufs.len().min(tags.len()));
221
222        let rr = sys::io_uring_rsrc_update2 {
223            nr: nr as _,
224            data: bufs.as_ptr() as _,
225            tags: tags.map(|tags| tags.as_ptr() as _).unwrap_or(0),
226            offset,
227            ..Default::default()
228        };
229
230        execute(
231            self.fd.as_raw_fd(),
232            sys::IORING_REGISTER_BUFFERS_UPDATE,
233            cast_ptr::<sys::io_uring_rsrc_update2>(&rr).cast(),
234            std::mem::size_of::<sys::io_uring_rsrc_update2>() as _,
235        )
236        .map(drop)
237    }
238
239    /// Variant of [`register_buffers`](Self::register_buffers)
240    /// with resource tagging.
241    ///
242    /// `tags` should be the same length as `bufs` and contain the
243    /// tag value corresponding to the buffer at the same index.
244    ///
245    /// If a tag is zero, then tagging for this particular resource
246    /// (a buffer in this case) is disabled. Otherwise, after the
247    /// resource had been unregistered and it's not used anymore,
248    /// a CQE will be posted with `user_data` set to the specified
249    /// tag and all other fields zeroed.
250    ///
251    /// Available since Linux 5.13.
252    ///
253    /// # Safety
254    ///
255    /// Developers must ensure that the `iov_base` and `iov_len` values are valid and will
256    /// be valid until buffers are unregistered or the ring destroyed, otherwise undefined
257    /// behaviour may occur.
258    pub unsafe fn register_buffers2(&self, bufs: &[libc::iovec], tags: &[u64]) -> io::Result<()> {
259        let rr = sys::io_uring_rsrc_register {
260            nr: bufs.len().min(tags.len()) as _,
261            data: bufs.as_ptr() as _,
262            tags: tags.as_ptr() as _,
263            ..Default::default()
264        };
265        execute(
266            self.fd.as_raw_fd(),
267            sys::IORING_REGISTER_BUFFERS2,
268            cast_ptr::<sys::io_uring_rsrc_register>(&rr).cast(),
269            std::mem::size_of::<sys::io_uring_rsrc_register>() as _,
270        )
271        .map(drop)
272    }
273
274    /// Registers an empty table of nr fixed buffers buffers.
275    ///
276    /// These must be updated before use, using eg.
277    /// [`register_buffers_update`](Self::register_buffers_update).
278    ///
279    /// See [`register_buffers`](Self::register_buffers)
280    /// for more information about fixed buffers.
281    ///
282    /// Available since Linux 5.13.
283    pub fn register_buffers_sparse(&self, nr: u32) -> io::Result<()> {
284        let rr = sys::io_uring_rsrc_register {
285            nr,
286            flags: sys::IORING_RSRC_REGISTER_SPARSE,
287            ..Default::default()
288        };
289        execute(
290            self.fd.as_raw_fd(),
291            sys::IORING_REGISTER_BUFFERS2,
292            cast_ptr::<sys::io_uring_rsrc_register>(&rr).cast(),
293            std::mem::size_of::<sys::io_uring_rsrc_register>() as _,
294        )
295        .map(drop)
296    }
297
298    /// Registers an empty file table of nr_files number of file descriptors. The sparse variant is
299    /// available in kernels 5.19 and later.
300    ///
301    /// Registering a file table is a prerequisite for using any request that
302    /// uses direct descriptors.
303    pub fn register_files_sparse(&self, nr: u32) -> io::Result<()> {
304        let rr = sys::io_uring_rsrc_register {
305            nr,
306            flags: sys::IORING_RSRC_REGISTER_SPARSE,
307            resv2: 0,
308            data: 0,
309            tags: 0,
310        };
311        execute(
312            self.fd.as_raw_fd(),
313            sys::IORING_REGISTER_FILES2,
314            cast_ptr::<sys::io_uring_rsrc_register>(&rr).cast(),
315            mem::size_of::<sys::io_uring_rsrc_register>() as _,
316        )
317        .map(drop)
318    }
319
320    /// Register files for I/O. You can use the registered files with
321    /// [`Fixed`](crate::types::Fixed).
322    ///
323    /// Each fd may be -1, in which case it is considered "sparse", and can be filled in later with
324    /// [`register_files_update`](Self::register_files_update).
325    ///
326    /// Note that this will wait for the ring to idle; it will only return once all active requests
327    /// are complete. Use [`register_files_update`](Self::register_files_update) to avoid this.
328    pub fn register_files(&self, fds: &[RawFd]) -> io::Result<()> {
329        execute(
330            self.fd.as_raw_fd(),
331            sys::IORING_REGISTER_FILES,
332            fds.as_ptr().cast(),
333            fds.len() as _,
334        )
335        .map(drop)
336    }
337
338    /// This operation replaces existing files in the registered file set with new ones,
339    /// either turning a sparse entry (one where fd is equal to -1) into a real one, removing an existing entry (new one is set to -1),
340    /// or replacing an existing entry with a new existing entry. The `offset` parameter specifies
341    /// the offset into the list of registered files at which to start updating files.
342    ///
343    /// You can also perform this asynchronously with the
344    /// [`FilesUpdate`](crate::opcode::FilesUpdate) opcode.
345    pub fn register_files_update(&self, offset: u32, fds: &[RawFd]) -> io::Result<usize> {
346        let fu = sys::io_uring_files_update {
347            offset,
348            resv: 0,
349            fds: fds.as_ptr() as _,
350        };
351        let ret = execute(
352            self.fd.as_raw_fd(),
353            sys::IORING_REGISTER_FILES_UPDATE,
354            cast_ptr::<sys::io_uring_files_update>(&fu).cast(),
355            fds.len() as _,
356        )?;
357        Ok(ret as _)
358    }
359
360    /// Register an eventfd created by [`eventfd`](libc::eventfd) with the io_uring instance.
361    pub fn register_eventfd(&self, eventfd: RawFd) -> io::Result<()> {
362        execute(
363            self.fd.as_raw_fd(),
364            sys::IORING_REGISTER_EVENTFD,
365            cast_ptr::<RawFd>(&eventfd).cast(),
366            1,
367        )
368        .map(drop)
369    }
370
371    /// This works just like [`register_eventfd`](Self::register_eventfd), except notifications are
372    /// only posted for events that complete in an async manner, so requests that complete
373    /// immediately will not cause a notification.
374    pub fn register_eventfd_async(&self, eventfd: RawFd) -> io::Result<()> {
375        execute(
376            self.fd.as_raw_fd(),
377            sys::IORING_REGISTER_EVENTFD_ASYNC,
378            cast_ptr::<RawFd>(&eventfd).cast(),
379            1,
380        )
381        .map(drop)
382    }
383
384    /// Fill in the given [`Probe`] with information about the opcodes supported by io_uring on the
385    /// running kernel.
386    ///
387    /// # Examples
388    ///
389    // This is marked no_run as it is only available from Linux 5.6+, however the latest Ubuntu (on
390    // which CI runs) only has Linux 5.4.
391    /// ```no_run
392    /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
393    /// let io_uring = io_uring::IoUring::new(1)?;
394    /// let mut probe = io_uring::Probe::new();
395    /// io_uring.submitter().register_probe(&mut probe)?;
396    ///
397    /// if probe.is_supported(io_uring::opcode::Read::CODE) {
398    ///     println!("Reading is supported!");
399    /// }
400    /// # Ok(())
401    /// # }
402    /// ```
403    pub fn register_probe(&self, probe: &mut Probe) -> io::Result<()> {
404        execute(
405            self.fd.as_raw_fd(),
406            sys::IORING_REGISTER_PROBE,
407            probe.as_mut_ptr() as *const _,
408            Probe::COUNT as _,
409        )
410        .map(drop)
411    }
412
413    /// Register credentials of the running application with io_uring, and get an id associated with
414    /// these credentials. This ID can then be [passed](crate::squeue::Entry::personality) into
415    /// submission queue entries to issue the request with this process' credentials.
416    ///
417    /// By default, if [`Parameters::is_feature_cur_personality`] is set then requests will use the
418    /// credentials of the task that called [`Submitter::enter`], otherwise they will use the
419    /// credentials of the task that originally registered the io_uring.
420    ///
421    /// [`Parameters::is_feature_cur_personality`]: crate::Parameters::is_feature_cur_personality
422    pub fn register_personality(&self) -> io::Result<u16> {
423        let id = execute(
424            self.fd.as_raw_fd(),
425            sys::IORING_REGISTER_PERSONALITY,
426            ptr::null(),
427            0,
428        )?;
429        Ok(id as u16)
430    }
431
432    /// Unregister all previously registered buffers.
433    ///
434    /// You do not need to explicitly call this before dropping the [`IoUring`](crate::IoUring), as
435    /// it will be cleaned up by the kernel automatically.
436    ///
437    /// Available since Linux 5.1.
438    pub fn unregister_buffers(&self) -> io::Result<()> {
439        execute(
440            self.fd.as_raw_fd(),
441            sys::IORING_UNREGISTER_BUFFERS,
442            ptr::null(),
443            0,
444        )
445        .map(drop)
446    }
447
448    /// Unregister all previously registered files.
449    ///
450    /// You do not need to explicitly call this before dropping the [`IoUring`](crate::IoUring), as
451    /// it will be cleaned up by the kernel automatically.
452    pub fn unregister_files(&self) -> io::Result<()> {
453        execute(
454            self.fd.as_raw_fd(),
455            sys::IORING_UNREGISTER_FILES,
456            ptr::null(),
457            0,
458        )
459        .map(drop)
460    }
461
462    /// Unregister an eventfd file descriptor to stop notifications.
463    pub fn unregister_eventfd(&self) -> io::Result<()> {
464        execute(
465            self.fd.as_raw_fd(),
466            sys::IORING_UNREGISTER_EVENTFD,
467            ptr::null(),
468            0,
469        )
470        .map(drop)
471    }
472
473    /// Unregister a previously registered personality.
474    pub fn unregister_personality(&self, personality: u16) -> io::Result<()> {
475        execute(
476            self.fd.as_raw_fd(),
477            sys::IORING_UNREGISTER_PERSONALITY,
478            ptr::null(),
479            personality as _,
480        )
481        .map(drop)
482    }
483
484    /// Permanently install a feature allowlist. Once this has been called, attempting to perform
485    /// an operation not on the allowlist will fail with `-EACCES`.
486    ///
487    /// This can only be called once, to prevent untrusted code from removing restrictions.
488    pub fn register_restrictions(&self, res: &mut [Restriction]) -> io::Result<()> {
489        execute(
490            self.fd.as_raw_fd(),
491            sys::IORING_REGISTER_RESTRICTIONS,
492            res.as_mut_ptr().cast(),
493            res.len() as _,
494        )
495        .map(drop)
496    }
497
498    /// Enable the rings of the io_uring instance if they have been disabled with
499    /// [`setup_r_disabled`](crate::Builder::setup_r_disabled).
500    pub fn register_enable_rings(&self) -> io::Result<()> {
501        execute(
502            self.fd.as_raw_fd(),
503            sys::IORING_REGISTER_ENABLE_RINGS,
504            ptr::null(),
505            0,
506        )
507        .map(drop)
508    }
509
510    /// Tell io_uring on what CPUs the async workers can run. By default, async workers
511    /// created by io_uring will inherit the CPU mask of its parent. This is usually
512    /// all the CPUs in the system, unless the parent is being run with a limited set.
513    pub fn register_iowq_aff(&self, cpu_set: &libc::cpu_set_t) -> io::Result<()> {
514        execute(
515            self.fd.as_raw_fd(),
516            sys::IORING_REGISTER_IOWQ_AFF,
517            cpu_set as *const _ as *const libc::c_void,
518            mem::size_of::<libc::cpu_set_t>() as u32,
519        )
520        .map(drop)
521    }
522
523    /// Undoes a CPU mask previously set with register_iowq_aff
524    pub fn unregister_iowq_aff(&self) -> io::Result<()> {
525        execute(
526            self.fd.as_raw_fd(),
527            sys::IORING_UNREGISTER_IOWQ_AFF,
528            ptr::null(),
529            0,
530        )
531        .map(drop)
532    }
533
534    /// Get and/or set the limit for number of io_uring worker threads per NUMA
535    /// node. `max[0]` holds the limit for bounded workers, which process I/O
536    /// operations expected to be bound in time, that is I/O on regular files or
537    /// block devices. While `max[1]` holds the limit for unbounded workers,
538    /// which carry out I/O operations that can never complete, for instance I/O
539    /// on sockets. Passing `0` does not change the current limit. Returns
540    /// previous limits on success.
541    pub fn register_iowq_max_workers(&self, max: &mut [u32; 2]) -> io::Result<()> {
542        execute(
543            self.fd.as_raw_fd(),
544            sys::IORING_REGISTER_IOWQ_MAX_WORKERS,
545            max.as_mut_ptr().cast(),
546            max.len() as _,
547        )
548        .map(drop)
549    }
550
551    /// Register buffer ring for provided buffers.
552    ///
553    /// Details can be found in the io_uring_register_buf_ring.3 man page.
554    ///
555    /// If the register command is not supported, or the ring_entries value exceeds
556    /// 32768, the InvalidInput error is returned.
557    ///
558    /// Available since 5.19.
559    ///
560    /// # Safety
561    ///
562    /// Developers must ensure that the `ring_addr` and its length represented by `ring_entries`
563    /// are valid and will be valid until the bgid is unregistered or the ring destroyed,
564    /// otherwise undefined behaviour may occur.
565    pub unsafe fn register_buf_ring(
566        &self,
567        ring_addr: u64,
568        ring_entries: u16,
569        bgid: u16,
570    ) -> io::Result<()> {
571        // The interface type for ring_entries is u32 but the same interface only allows a u16 for
572        // the tail to be specified, so to try and avoid further confusion, we limit the
573        // ring_entries to u16 here too. The value is actually limited to 2^15 (32768) but we can
574        // let the kernel enforce that.
575        let arg = sys::io_uring_buf_reg {
576            ring_addr,
577            ring_entries: ring_entries as _,
578            bgid,
579            ..Default::default()
580        };
581        execute(
582            self.fd.as_raw_fd(),
583            sys::IORING_REGISTER_PBUF_RING,
584            cast_ptr::<sys::io_uring_buf_reg>(&arg).cast(),
585            1,
586        )
587        .map(drop)
588    }
589
590    /// Unregister a previously registered buffer ring.
591    ///
592    /// Available since 5.19.
593    pub fn unregister_buf_ring(&self, bgid: u16) -> io::Result<()> {
594        let arg = sys::io_uring_buf_reg {
595            ring_addr: 0,
596            ring_entries: 0,
597            bgid,
598            ..Default::default()
599        };
600        execute(
601            self.fd.as_raw_fd(),
602            sys::IORING_UNREGISTER_PBUF_RING,
603            cast_ptr::<sys::io_uring_buf_reg>(&arg).cast(),
604            1,
605        )
606        .map(drop)
607    }
608
609    /// Performs a synchronous cancellation request, similar to [AsyncCancel](crate::opcode::AsyncCancel),
610    /// except that it completes synchronously.
611    ///
612    /// Cancellation can target a specific request, or all requests matching some criteria. The
613    /// [`CancelBuilder`] builder supports describing the match criteria for cancellation.
614    ///
615    /// An optional `timeout` can be provided to specify how long to wait for matched requests to be
616    /// canceled. If no timeout is provided, the default is to wait indefinitely.
617    ///
618    /// ### Errors
619    ///
620    /// If no requests are matched, returns:
621    ///
622    /// [io::ErrorKind::NotFound]: `No such file or directory (os error 2)`
623    ///
624    /// If a timeout is supplied, and the timeout elapses prior to all requests being canceled, returns:
625    ///
626    /// [io::ErrorKind::Uncategorized]: `Timer expired (os error 62)`
627    ///
628    /// ### Notes
629    ///
630    /// Only requests which have been submitted to the ring will be considered for cancellation. Requests
631    /// which have been written to the SQ, but not submitted, will not be canceled.
632    ///
633    /// Available since 6.0.
634    pub fn register_sync_cancel(
635        &self,
636        timeout: Option<Timespec>,
637        builder: CancelBuilder,
638    ) -> io::Result<()> {
639        let timespec = timeout.map(|ts| ts.0).unwrap_or(sys::__kernel_timespec {
640            tv_sec: -1,
641            tv_nsec: -1,
642        });
643        let user_data = builder.user_data.unwrap_or(0);
644        let flags = builder.flags.bits();
645        let fd = builder.to_fd();
646
647        let arg = sys::io_uring_sync_cancel_reg {
648            addr: user_data,
649            fd,
650            flags,
651            timeout: timespec,
652            ..Default::default()
653        };
654
655        execute(
656            self.fd.as_raw_fd(),
657            sys::IORING_REGISTER_SYNC_CANCEL,
658            cast_ptr::<sys::io_uring_sync_cancel_reg>(&arg).cast(),
659            1,
660        )
661        .map(drop)
662    }
663}