pub unsafe auto trait Send { }
Expand description
Types that can be transferred across thread boundaries.
This trait is automatically implemented when the compiler determines it’s appropriate.
An example of a non-Send
type is the reference-counting pointer
rc::Rc
. If two threads attempt to clone Rc
s that point to the same
reference-counted value, they might try to update the reference count at the
same time, which is undefined behavior because Rc
doesn’t use atomic
operations. Its cousin sync::Arc
does use atomic operations (incurring
some overhead) and thus is Send
.
See the Nomicon and the Sync
trait for more details.
Implementors§
impl !Send for Arguments<'_>
impl !Send for LocalWaker
impl !Send for Args
impl !Send for ArgsOs
impl Send for cairo_vm::with_std::string::Drain<'_>
impl Send for core::ffi::c_str::Bytes<'_>
impl Send for Waker
impl<'a> Send for IoSlice<'a>
impl<'a> Send for IoSliceMut<'a>
impl<'a, K, V> Send for lru::Iter<'a, K, V>
impl<'a, K, V> Send for lru::IterMut<'a, K, V>
impl<'a, T, O> Send for bitvec::slice::iter::Iter<'a, T, O>
impl<'a, T, O> Send for bitvec::slice::iter::IterMut<'a, T, O>
impl<'a, T, const CAP: usize> Send for arrayvec::arrayvec::Drain<'a, T, CAP>where
T: Send,
impl<Dyn> Send for DynMetadata<Dyn>where
Dyn: ?Sized,
impl<K, V> Send for hashbrown::map::IterMut<'_, K, V>
impl<K, V, S> Send for LruCache<K, V, S>
impl<K, V, S, A> Send for hashbrown::map::OccupiedEntry<'_, K, V, S, A>
impl<K, V, S, A> Send for RawOccupiedEntryMut<'_, K, V, S, A>
impl<M, T, O> Send for BitRef<'_, M, T, O>
impl<T> !Send for *const Twhere
T: ?Sized,
impl<T> !Send for *mut Twhere
T: ?Sized,
impl<T> !Send for NonNull<T>where
T: ?Sized,
NonNull
pointers are not Send
because the data they reference may be aliased.
impl<T> !Send for MappedMutexGuard<'_, T>where
T: ?Sized,
impl<T> !Send for MappedRwLockReadGuard<'_, T>where
T: ?Sized,
impl<T> !Send for MappedRwLockWriteGuard<'_, T>where
T: ?Sized,
impl<T> !Send for MutexGuard<'_, T>where
T: ?Sized,
impl<T> !Send for ReentrantLockGuard<'_, T>where
T: ?Sized,
impl<T> !Send for RwLockReadGuard<'_, T>where
T: ?Sized,
impl<T> !Send for RwLockWriteGuard<'_, T>where
T: ?Sized,
impl<T> Send for BitSpanError<T>where
T: BitStore,
impl<T> Send for &T
impl<T> Send for ThinBox<T>
ThinBox<T>
is Send
if T
is Send
because the data is owned.
impl<T> Send for Cell<T>
impl<T> Send for RefCell<T>
impl<T> Send for NonZero<T>where
T: ZeroablePrimitive + Send,
impl<T> Send for ChunksExactMut<'_, T>where
T: Send,
impl<T> Send for ChunksMut<'_, T>where
T: Send,
impl<T> Send for cairo_vm::with_std::slice::Iter<'_, T>where
T: Sync,
impl<T> Send for cairo_vm::with_std::slice::IterMut<'_, T>where
T: Send,
impl<T> Send for RChunksExactMut<'_, T>where
T: Send,
impl<T> Send for RChunksMut<'_, T>where
T: Send,
impl<T> Send for AtomicPtr<T>
impl<T> Send for cairo_vm::with_std::sync::mpmc::Receiver<T>where
T: Send,
impl<T> Send for cairo_vm::with_std::sync::mpmc::Sender<T>where
T: Send,
impl<T> Send for cairo_vm::with_std::sync::mpsc::Receiver<T>where
T: Send,
impl<T> Send for cairo_vm::with_std::sync::mpsc::Sender<T>where
T: Send,
impl<T> Send for SyncSender<T>where
T: Send,
impl<T> Send for Mutex<T>
impl<T> Send for OnceLock<T>where
T: Send,
impl<T> Send for ReentrantLock<T>
impl<T> Send for RwLock<T>
impl<T> Send for alloc::collections::linked_list::Iter<'_, T>where
T: Sync,
impl<T> Send for alloc::collections::linked_list::IterMut<'_, T>where
T: Send,
impl<T> Send for JoinHandle<T>
impl<T> Send for MisalignError<T>
impl<T, A> !Send for Rc<T, A>
impl<T, A> !Send for cairo_vm::with_std::rc::Weak<T, A>
impl<T, A> Send for Arc<T, A>
impl<T, A> Send for cairo_vm::with_std::sync::Weak<T, A>
impl<T, A> Send for cairo_vm::with_std::vec::Drain<'_, T, A>
impl<T, A> Send for cairo_vm::with_std::vec::IntoIter<T, A>
impl<T, A> Send for Cursor<'_, T, A>
impl<T, A> Send for CursorMut<'_, T, A>
impl<T, A> Send for LinkedList<T, A>
impl<T, A> Send for alloc::collections::vec_deque::drain::Drain<'_, T, A>
impl<T, A> Send for allocator_api2::stable::boxed::Box<T, A>
impl<T, A> Send for allocator_api2::stable::vec::drain::Drain<'_, T, A>
impl<T, A> Send for allocator_api2::stable::vec::into_iter::IntoIter<T, A>
impl<T, A> Send for hashbrown::table::OccupiedEntry<'_, T, A>
impl<T, N> Send for GenericArray<T, N>where
T: Send,
N: ArrayLength<T>,
impl<T, O> Send for bitvec::boxed::iter::IntoIter<T, O>
impl<T, O> Send for BitBox<T, O>
impl<T, O> Send for BitSlice<T, O>
§Bit-Slice Thread Safety
This allows bit-slice references to be moved across thread boundaries only when
the underlying T
element can tolerate concurrency.
All BitSlice
references, shared or exclusive, are only threadsafe if the T
element type is Send
, because any given bit-slice reference may only have
partial control of a memory element that is also being shared by a bit-slice
reference on another thread. As such, this is never implemented for Cell<U>
,
but always implemented for AtomicU
and U
for a given unsigned integer type
U
.
Atomic integers safely handle concurrent writes, cells do not allow concurrency
at all, so the only missing piece is &mut BitSlice<_, U: Unsigned>
. This is
handled by the aliasing system that the mutable splitters employ: a mutable
reference to an unsynchronized bit-slice can only cross threads when no other
handle is able to exist to the elements it governs. Splitting a mutable
bit-slice causes the split halves to change over to either atomics or cells, so
concurrency is either safe or impossible.