1use core::sync::atomic::{AtomicUsize, Ordering};
2
3#[derive(Debug)]
5#[repr(C, align(4096))]
6pub struct SigProcControl {
7 pub pending: AtomicU64,
8 pub actions: [RawAction; 64],
9 pub sender_infos: [AtomicU64; 32],
10 }
13#[derive(Debug, Default)]
19#[repr(C, align(16))]
20pub struct RawAction {
21 pub first: AtomicU64,
25 pub user_data: AtomicU64,
28}
29
30#[derive(Debug, Default)]
32#[repr(C)]
33pub struct Sigcontrol {
34 pub word: [AtomicU64; 2],
36
37 pub sender_infos: [AtomicU64; 32],
39
40 pub control_flags: SigatomicUsize,
41
42 pub saved_ip: NonatomicUsize, pub saved_archdep_reg: NonatomicUsize, }
45#[derive(Clone, Copy, Debug)]
46pub struct SenderInfo {
47 pub pid: u32,
48 pub ruid: u32,
49}
50impl SenderInfo {
51 #[inline]
52 pub fn raw(self) -> u64 {
53 u64::from(self.pid) | (u64::from(self.ruid) << 32)
54 }
55 #[inline]
56 pub const fn from_raw(raw: u64) -> Self {
57 Self {
58 pid: raw as u32,
59 ruid: (raw >> 32) as u32,
60 }
61 }
62}
63
64impl Sigcontrol {
65 pub fn currently_pending_unblocked(&self, proc: &SigProcControl) -> u64 {
66 let proc_pending = proc.pending.load(Ordering::Relaxed);
67 let [w0, w1] = core::array::from_fn(|i| {
68 let w = self.word[i].load(Ordering::Relaxed);
69 ((w | (proc_pending >> (i * 32))) & 0xffff_ffff) & (w >> 32)
70 });
71 w0 | (w1 << 32)
73 }
74 pub fn set_allowset(&self, new_allowset: u64) -> u64 {
75 let [w0, w1] = self.word.each_ref().map(|w| w.load(Ordering::Relaxed));
77 let old_a0 = w0 & 0xffff_ffff_0000_0000;
78 let old_a1 = w1 & 0xffff_ffff_0000_0000;
79 let new_a0 = (new_allowset & 0xffff_ffff) << 32;
80 let new_a1 = new_allowset & 0xffff_ffff_0000_0000;
81
82 let prev_w0 = self.word[0].fetch_add(new_a0.wrapping_sub(old_a0), Ordering::Relaxed);
83 let prev_w1 = self.word[0].fetch_add(new_a1.wrapping_sub(old_a1), Ordering::Relaxed);
84 let up0 = prev_w0 & (prev_w0 >> 32);
86 let up1 = prev_w1 & (prev_w1 >> 32);
87
88 up0 | (up1 << 32)
89 }
90}
91
92#[derive(Debug, Default)]
93#[repr(transparent)]
94pub struct SigatomicUsize(AtomicUsize);
95
96impl SigatomicUsize {
97 #[inline]
98 pub fn load(&self, ordering: Ordering) -> usize {
99 let value = self.0.load(Ordering::Relaxed);
100 if ordering != Ordering::Relaxed {
101 core::sync::atomic::compiler_fence(ordering);
102 }
103 value
104 }
105 #[inline]
106 pub fn store(&self, value: usize, ordering: Ordering) {
107 if ordering != Ordering::Relaxed {
108 core::sync::atomic::compiler_fence(ordering);
109 }
110 self.0.store(value, Ordering::Relaxed);
111 }
112}
113#[derive(Debug, Default)]
114#[repr(transparent)]
115pub struct NonatomicUsize(AtomicUsize);
116
117impl NonatomicUsize {
118 #[inline]
119 pub const fn new(a: usize) -> Self {
120 Self(AtomicUsize::new(a))
121 }
122
123 #[inline]
124 pub fn get(&self) -> usize {
125 self.0.load(Ordering::Relaxed)
126 }
127 #[inline]
128 pub fn set(&self, value: usize) {
129 self.0.store(value, Ordering::Relaxed);
130 }
131}
132
133pub fn sig_bit(sig: usize) -> u64 {
134 1 << (sig - 1)
135}
136impl SigProcControl {
137 pub fn signal_will_ign(&self, sig: usize, is_parent_sigchld: bool) -> bool {
138 let flags = self.actions[sig - 1].first.load(Ordering::Relaxed);
139 let will_ign = flags & (1 << 63) != 0;
140 let sig_specific = flags & (1 << 62) != 0; will_ign || (sig == SIGCHLD && is_parent_sigchld && sig_specific)
143 }
144 pub fn signal_will_stop(&self, sig: usize) -> bool {
145 use crate::flag::*;
146 matches!(sig, SIGTSTP | SIGTTIN | SIGTTOU)
147 && self.actions[sig - 1].first.load(Ordering::Relaxed) & (1 << 62) != 0
148 }
149}
150
151#[cfg(not(target_arch = "x86"))]
152pub use core::sync::atomic::AtomicU64;
153
154use crate::SIGCHLD;
155
156#[cfg(target_arch = "x86")]
157pub use self::atomic::AtomicU64;
158
159#[cfg(target_arch = "x86")]
160mod atomic {
161 use core::{cell::UnsafeCell, sync::atomic::Ordering};
162
163 #[derive(Debug, Default)]
164 pub struct AtomicU64(UnsafeCell<u64>);
165
166 unsafe impl Send for AtomicU64 {}
167 unsafe impl Sync for AtomicU64 {}
168
169 impl AtomicU64 {
170 pub const fn new(inner: u64) -> Self {
171 Self(UnsafeCell::new(inner))
172 }
173 pub fn compare_exchange(
174 &self,
175 old: u64,
176 new: u64,
177 _success: Ordering,
178 _failure: Ordering,
179 ) -> Result<u64, u64> {
180 let old_hi = (old >> 32) as u32;
181 let old_lo = old as u32;
182 let new_hi = (new >> 32) as u32;
183 let new_lo = new as u32;
184 let mut out_hi;
185 let mut out_lo;
186
187 unsafe {
188 core::arch::asm!("lock cmpxchg8b [{}]", in(reg) self.0.get(), inout("edx") old_hi => out_hi, inout("eax") old_lo => out_lo, in("ecx") new_hi, in("ebx") new_lo);
189 }
190
191 if old_hi == out_hi && old_lo == out_lo {
192 Ok(old)
193 } else {
194 Err(u64::from(out_lo) | (u64::from(out_hi) << 32))
195 }
196 }
197 pub fn load(&self, ordering: Ordering) -> u64 {
198 match self.compare_exchange(0, 0, ordering, ordering) {
199 Ok(new) => new,
200 Err(new) => new,
201 }
202 }
203 pub fn store(&self, new: u64, ordering: Ordering) {
204 let mut old = 0;
205
206 loop {
207 match self.compare_exchange(old, new, ordering, Ordering::Relaxed) {
208 Ok(_) => break,
209 Err(new) => {
210 old = new;
211 core::hint::spin_loop();
212 }
213 }
214 }
215 }
216 pub fn fetch_update(
217 &self,
218 set_order: Ordering,
219 fetch_order: Ordering,
220 mut f: impl FnMut(u64) -> Option<u64>,
221 ) -> Result<u64, u64> {
222 let mut old = self.load(fetch_order);
223
224 loop {
225 let new = f(old).ok_or(old)?;
226 match self.compare_exchange(old, new, set_order, Ordering::Relaxed) {
227 Ok(_) => return Ok(new),
228 Err(changed) => {
229 old = changed;
230 core::hint::spin_loop();
231 }
232 }
233 }
234 }
235 pub fn fetch_or(&self, bits: u64, order: Ordering) -> u64 {
236 self.fetch_update(order, Ordering::Relaxed, |b| Some(b | bits))
237 .unwrap()
238 }
239 pub fn fetch_and(&self, bits: u64, order: Ordering) -> u64 {
240 self.fetch_update(order, Ordering::Relaxed, |b| Some(b & bits))
241 .unwrap()
242 }
243 pub fn fetch_add(&self, term: u64, order: Ordering) -> u64 {
244 self.fetch_update(order, Ordering::Relaxed, |b| Some(b.wrapping_add(term)))
245 .unwrap()
246 }
247 }
248}
249
250#[cfg(test)]
251mod tests {
252 use std::sync::{
253 atomic::{AtomicU64, Ordering},
254 Arc,
255 };
256
257 #[cfg(not(loom))]
258 use std::{sync::Mutex, thread};
259 #[cfg(not(loom))]
260 fn model(f: impl FnOnce()) {
261 f()
262 }
263
264 #[cfg(loom)]
265 use loom::{model, sync::Mutex, thread};
266
267 use crate::{RawAction, SigProcControl, Sigcontrol};
268
269 struct FakeThread {
270 ctl: Sigcontrol,
271 pctl: SigProcControl,
272 ctxt: Mutex<()>,
273 }
274 impl Default for FakeThread {
275 fn default() -> Self {
276 Self {
277 ctl: Sigcontrol::default(),
278 pctl: SigProcControl {
279 pending: AtomicU64::new(0),
280 actions: core::array::from_fn(|_| RawAction::default()),
281 sender_infos: Default::default(),
282 },
283 ctxt: Default::default(),
284 }
285 }
286 }
287
288 #[test]
289 fn singlethread_mask() {
290 model(|| {
291 let fake_thread = Arc::new(FakeThread::default());
292
293 let thread = {
294 let fake_thread = Arc::clone(&fake_thread);
295
296 thread::spawn(move || {
297 fake_thread.ctl.set_allowset(!0);
298 {
299 let _g = fake_thread.ctxt.lock();
300 if fake_thread
301 .ctl
302 .currently_pending_unblocked(&fake_thread.pctl)
303 == 0
304 {
305 drop(_g);
306 thread::park();
307 }
308 }
309 })
310 };
311
312 for sig in 1..=64 {
313 let _g = fake_thread.ctxt.lock();
314
315 let idx = sig - 1;
316 let bit = 1 << (idx % 32);
317
318 fake_thread.ctl.word[idx / 32].fetch_or(bit, Ordering::Relaxed);
319 let w = fake_thread.ctl.word[idx / 32].load(Ordering::Relaxed);
320
321 if w & (w >> 32) != 0 {
322 thread.thread().unpark();
323 }
324 }
325
326 thread.join().unwrap();
327 });
328 }
329}