1use crate::{
2 lifetime::{
3 Lifetime, LifetimeLazy, LifetimeRef, LifetimeRefMut, ValueReadAccess, ValueWriteAccess,
4 },
5 managed::{
6 DynamicManagedLazy, DynamicManagedRef, DynamicManagedRefMut, ManagedLazy, ManagedRef,
7 ManagedRefMut,
8 },
9 pointer_alignment_padding,
10 type_hash::TypeHash,
11 Finalize,
12};
13use std::{
14 alloc::{alloc, dealloc, Layout},
15 cell::RefCell,
16 collections::HashMap,
17 ops::Range,
18};
19
20const MEMORY_CHUNK_SIZE: usize = 128;
21const MEMORY_PAGE_SIZE: usize = MEMORY_CHUNK_SIZE * u128::BITS as usize;
22
23thread_local! {
24 static STORAGE: RefCell<ManagedStorage> = Default::default();
25}
26
27pub fn managed_storage_stats() -> ManagedStorageStats {
28 STORAGE.with_borrow(|storage| storage.stats())
29}
30
31enum ManagedObjectHeader {
32 Occupied {
33 id: usize,
34 type_hash: TypeHash,
35 lifetime: Lifetime,
36 layout: Layout,
37 finalizer: unsafe fn(*mut ()),
38 instances_count: usize,
39 padding: u8,
40 },
41 Free,
42}
43
44impl std::fmt::Debug for ManagedObjectHeader {
45 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
46 match self {
47 Self::Occupied {
48 id,
49 type_hash,
50 layout,
51 finalizer,
52 instances_count,
53 ..
54 } => f
55 .debug_struct("Occupied")
56 .field("id", id)
57 .field("type_hash", type_hash)
58 .field("layout", layout)
59 .field("finalizer", finalizer)
60 .field("instances_count", instances_count)
61 .finish_non_exhaustive(),
62 Self::Free => write!(f, "Free"),
63 }
64 }
65}
66
67#[derive(Debug, Default, Clone, Copy)]
68struct OccupancyMap {
69 mask: u128,
71}
72
73impl OccupancyMap {
74 fn occuppy(&mut self, range: OccupancyRange) {
75 self.mask |= range.mask;
76 }
77
78 fn free(&mut self, range: OccupancyRange) {
79 self.mask &= !range.mask;
80 }
81
82 fn is_free(&self, range: OccupancyRange) -> bool {
83 self.mask & range.mask == 0
84 }
85
86 fn find_free_space(
87 &self,
88 object_with_header_size: usize,
89 range: OccupancyRange,
90 ) -> Option<OccupancyRange> {
91 if object_with_header_size > range.byte_size() {
92 return None;
93 }
94 if self.is_free(range) {
95 return range.cut(object_with_header_size);
96 }
97 let (left, right) = range.split()?;
98 let left = self.find_free_space(object_with_header_size, left);
99 let right = self.find_free_space(object_with_header_size, right);
100 match (left, right) {
101 (None, None) => None,
102 (None, Some(right)) => Some(right),
103 (Some(left), None) => Some(left),
104 (Some(left), Some(right)) => {
105 if right.byte_size() < left.byte_size() {
106 Some(right)
107 } else {
108 Some(left)
109 }
110 }
111 }
112 }
113}
114
115#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
116struct OccupancyRange {
117 bits_start_inclusive: usize,
118 bits_end_exclusive: usize,
119 mask: u128,
120}
121
122impl Default for OccupancyRange {
123 fn default() -> Self {
124 Self {
125 bits_start_inclusive: 0,
126 bits_end_exclusive: u128::BITS as _,
127 mask: u128::MAX,
128 }
129 }
130}
131
132impl OccupancyRange {
133 fn range(&self) -> Range<usize> {
134 self.bits_start_inclusive..self.bits_end_exclusive
135 }
136
137 fn byte_offset(&self) -> usize {
138 self.bits_start_inclusive * MEMORY_CHUNK_SIZE
139 }
140
141 fn byte_size(&self) -> usize {
142 (self.bits_end_exclusive - self.bits_start_inclusive) * MEMORY_CHUNK_SIZE
143 }
144
145 fn update_mask(mut self) -> Self {
146 let size = self.bits_end_exclusive - self.bits_start_inclusive;
147 self.mask = if size == u128::BITS as _ {
148 u128::MAX
149 } else {
150 (!u128::MAX.wrapping_shl(size as _)).wrapping_shl(self.bits_start_inclusive as _)
151 };
152 self
153 }
154
155 fn cut(&self, object_with_header_size: usize) -> Option<Self> {
156 let size = object_with_header_size.next_power_of_two() / MEMORY_CHUNK_SIZE;
157 if size <= self.byte_size() {
158 Some(
159 Self {
160 bits_start_inclusive: self.bits_start_inclusive,
161 bits_end_exclusive: self.bits_start_inclusive + size,
162 mask: 0,
163 }
164 .update_mask(),
165 )
166 } else {
167 None
168 }
169 }
170
171 fn split(&self) -> Option<(Self, Self)> {
172 let half_size = (self.bits_end_exclusive - self.bits_start_inclusive) / 2;
173 if half_size == 0 {
174 return None;
175 }
176 let start = self.bits_start_inclusive;
177 let mid = self.bits_start_inclusive + half_size;
178 let end = self.bits_end_exclusive;
179 Some((
180 Self {
181 bits_start_inclusive: start,
182 bits_end_exclusive: mid,
183 mask: 0,
184 }
185 .update_mask(),
186 Self {
187 bits_start_inclusive: mid,
188 bits_end_exclusive: end,
189 mask: 0,
190 }
191 .update_mask(),
192 ))
193 }
194
195 fn from_pointer_size(memory: *const u8, pointer: *const u8, size: usize) -> Self {
196 let offset = pointer as usize - memory as usize;
197 let from = offset / MEMORY_CHUNK_SIZE;
198 let to = from + size.next_power_of_two() / MEMORY_CHUNK_SIZE;
199 Self {
200 bits_start_inclusive: from,
201 bits_end_exclusive: to,
202 mask: 0,
203 }
204 .update_mask()
205 }
206}
207
208enum ManagedMemoryPage {
209 Chunked {
210 memory: *mut u8,
211 layout: Layout,
212 occupancy: OccupancyMap,
213 padding: u8,
214 },
215 Exclusive {
216 memory: *mut u8,
217 layout: Layout,
218 padding: u8,
219 },
220}
221
222impl Drop for ManagedMemoryPage {
223 fn drop(&mut self) {
224 unsafe {
227 match self {
228 ManagedMemoryPage::Chunked { memory, layout, .. } => {
229 dealloc(*memory, *layout);
230 }
231 ManagedMemoryPage::Exclusive { memory, layout, .. } => {
232 dealloc(*memory, *layout);
233 }
234 }
235 }
236 }
237}
238
239impl ManagedMemoryPage {
240 fn new_chunked() -> Option<Self> {
241 let header_layout = Layout::new::<ManagedObjectHeader>().pad_to_align();
242 let layout = Layout::from_size_align(MEMORY_PAGE_SIZE + header_layout.align(), 1).unwrap();
243 unsafe {
244 let memory = alloc(layout);
245 if memory.is_null() {
246 None
247 } else {
248 let padding = pointer_alignment_padding(memory, header_layout.align());
249 for offset in (0..MEMORY_PAGE_SIZE).step_by(MEMORY_CHUNK_SIZE) {
250 memory
251 .add(padding + offset)
252 .cast::<ManagedObjectHeader>()
253 .write(ManagedObjectHeader::Free);
254 }
255 Some(Self::Chunked {
256 memory,
257 layout,
258 occupancy: Default::default(),
259 padding: padding as u8,
260 })
261 }
262 }
263 }
264
265 fn new_exclusive(size: usize, alignment: usize) -> Option<Self> {
266 unsafe {
267 let header_layout = Layout::new::<ManagedObjectHeader>().pad_to_align();
268 let layout =
269 Layout::from_size_align_unchecked(header_layout.size() + size + alignment, 1);
270 let memory = alloc(layout);
271 if memory.is_null() {
272 None
273 } else {
274 let padding = pointer_alignment_padding(memory, header_layout.align());
275 memory
276 .add(padding)
277 .cast::<ManagedObjectHeader>()
278 .write(ManagedObjectHeader::Free);
279 Some(Self::Exclusive {
280 layout,
281 memory,
282 padding: padding as u8,
283 })
284 }
285 }
286 }
287
288 fn alloc_uninitialized(
289 &mut self,
290 id: usize,
291 page: usize,
292 type_hash: TypeHash,
293 layout: Layout,
294 finalizer: unsafe fn(*mut ()),
295 ) -> Option<DynamicManagedBox> {
296 let header_layout = Layout::new::<ManagedObjectHeader>().pad_to_align();
297 match self {
298 ManagedMemoryPage::Chunked {
299 memory,
300 occupancy,
301 padding,
302 ..
303 } => unsafe {
304 let range = occupancy.find_free_space(
305 header_layout.size() + layout.size(),
306 OccupancyRange::default(),
307 )?;
308 let memory = memory.add(*padding as usize + range.byte_offset());
309 let padding = pointer_alignment_padding(memory, layout.align());
310 if header_layout.size() + layout.size() - padding > range.byte_size() {
311 return None;
312 }
313 occupancy.occuppy(range);
314 *memory.cast::<ManagedObjectHeader>().as_mut().unwrap() =
315 ManagedObjectHeader::Occupied {
316 id,
317 type_hash,
318 lifetime: Default::default(),
319 layout,
320 finalizer,
321 instances_count: 1,
322 padding: padding as u8,
323 };
324 Some(DynamicManagedBox {
325 memory,
326 id,
327 page,
328 drop: true,
329 })
330 },
331 ManagedMemoryPage::Exclusive {
332 memory, padding, ..
333 } => unsafe {
334 let memory = memory.add(*padding as usize);
335 let padding = pointer_alignment_padding(memory, layout.align());
336 *memory.cast::<ManagedObjectHeader>().as_mut().unwrap() =
337 ManagedObjectHeader::Occupied {
338 id,
339 type_hash,
340 lifetime: Default::default(),
341 layout,
342 finalizer,
343 instances_count: 1,
344 padding: padding as u8,
345 };
346 Some(DynamicManagedBox {
347 memory,
348 id,
349 page,
350 drop: true,
351 })
352 },
353 }
354 }
355
356 fn owns_pointer(&self, pointer: *const u8) -> bool {
357 let (from, to) = unsafe {
358 match self {
359 ManagedMemoryPage::Chunked { memory, layout, .. }
360 | ManagedMemoryPage::Exclusive { memory, layout, .. } => {
361 (*memory, memory.add(layout.size()))
362 }
363 }
364 };
365 pointer >= from && pointer < to
366 }
367
368 fn total_size(&self) -> usize {
369 match self {
370 ManagedMemoryPage::Chunked { layout, .. }
371 | ManagedMemoryPage::Exclusive { layout, .. } => layout.size(),
372 }
373 }
374
375 fn occupied_size(&self) -> usize {
376 match self {
377 ManagedMemoryPage::Chunked { occupancy, .. } => {
378 occupancy.mask.count_ones() as usize * MEMORY_CHUNK_SIZE
379 }
380 ManagedMemoryPage::Exclusive { layout, .. } => layout.size(),
381 }
382 }
383
384 fn free_size(&self) -> usize {
385 match self {
386 ManagedMemoryPage::Chunked { occupancy, .. } => {
387 occupancy.mask.count_zeros() as usize * MEMORY_CHUNK_SIZE
388 }
389 ManagedMemoryPage::Exclusive { .. } => 0,
390 }
391 }
392}
393
394#[derive(Debug, Default, Clone, PartialEq, Eq)]
395pub struct ManagedStorageStats {
396 pub pages_count: usize,
397 pub chunked_pages_count: usize,
398 pub exclusive_pages_count: usize,
399 pub total_size: usize,
400 pub occupied_size: usize,
401 pub free_size: usize,
402}
403
404#[derive(Default)]
405struct ManagedStorage {
406 object_id_generator: usize,
407 page_id_generator: usize,
408 pages: HashMap<usize, ManagedMemoryPage>,
409}
410
411impl ManagedStorage {
412 fn stats(&self) -> ManagedStorageStats {
413 ManagedStorageStats {
414 pages_count: self.pages.len(),
415 chunked_pages_count: self
416 .pages
417 .values()
418 .filter(|page| matches!(page, ManagedMemoryPage::Chunked { .. }))
419 .count(),
420 exclusive_pages_count: self
421 .pages
422 .values()
423 .filter(|page| matches!(page, ManagedMemoryPage::Exclusive { .. }))
424 .count(),
425 total_size: self.pages.values().map(|page| page.total_size()).sum(),
426 occupied_size: self.pages.values().map(|page| page.occupied_size()).sum(),
427 free_size: self.pages.values().map(|page| page.free_size()).sum(),
428 }
429 }
430
431 fn generate_object_id(&mut self) -> usize {
432 let result = self.object_id_generator;
433 self.object_id_generator = self.object_id_generator.wrapping_add(1);
434 result
435 }
436
437 fn generate_page_id(&mut self) -> usize {
438 let result = self.page_id_generator;
439 self.page_id_generator = self.page_id_generator.wrapping_add(1);
440 result
441 }
442
443 fn alloc_uninitialized(
444 &mut self,
445 type_hash: TypeHash,
446 layout: Layout,
447 finalizer: unsafe fn(*mut ()),
448 ) -> DynamicManagedBox {
449 let id = self.generate_object_id();
450 let size = layout.size() + Layout::new::<ManagedObjectHeader>().size();
451 if size > MEMORY_PAGE_SIZE {
452 let page_id = self.generate_page_id();
453 let mut page = ManagedMemoryPage::new_exclusive(size, layout.align()).unwrap();
454 let object = page
455 .alloc_uninitialized(id, page_id, type_hash, layout, finalizer)
456 .unwrap();
457 self.pages.insert(page_id, page);
458 object
459 } else {
460 for (page_id, page) in &mut self.pages {
461 if matches!(page, ManagedMemoryPage::Chunked { .. }) {
462 if let Some(object) =
463 page.alloc_uninitialized(id, *page_id, type_hash, layout, finalizer)
464 {
465 return object;
466 }
467 }
468 }
469 let page_id = self.generate_page_id();
470 let mut page = ManagedMemoryPage::new_chunked().unwrap();
471 let object = page
472 .alloc_uninitialized(id, page_id, type_hash, layout, finalizer)
473 .unwrap();
474 self.pages.insert(page_id, page);
475 object
476 }
477 }
478
479 fn increment(&mut self, object_id: usize, page_id: usize, pointer: *mut u8) {
480 if let Some(page) = self.pages.get(&page_id) {
481 if page.owns_pointer(pointer) {
482 unsafe {
483 let header = pointer.cast::<ManagedObjectHeader>().as_mut().unwrap();
484 if let ManagedObjectHeader::Occupied {
485 id,
486 instances_count,
487 ..
488 } = header
489 {
490 if object_id == *id {
491 *instances_count += 1;
492 }
493 }
494 }
495 }
496 }
497 }
498
499 fn decrement(&mut self, object_id: usize, page_id: usize, pointer: *mut u8) {
500 if let Some(page) = self.pages.get_mut(&page_id) {
501 if page.owns_pointer(pointer) {
502 let header_size = Layout::new::<ManagedObjectHeader>().pad_to_align().size();
503 unsafe {
504 let header = pointer.cast::<ManagedObjectHeader>().as_mut().unwrap();
505 if let ManagedObjectHeader::Occupied {
506 id,
507 layout,
508 finalizer,
509 instances_count,
510 padding,
511 ..
512 } = header
513 {
514 if object_id == *id && *instances_count > 0 {
515 *instances_count -= 1;
516 if *instances_count == 0 {
517 (finalizer)(
518 pointer.add(header_size + *padding as usize).cast::<()>(),
519 );
520 match page {
521 ManagedMemoryPage::Chunked {
522 memory,
523 occupancy,
524 padding,
525 ..
526 } => {
527 let range = OccupancyRange::from_pointer_size(
528 memory.add(*padding as usize),
529 pointer,
530 header_size + layout.size(),
531 );
532 occupancy.free(range);
533 *header = ManagedObjectHeader::Free;
534 for index in range.range().skip(1) {
535 memory
536 .add(*padding as usize + index * MEMORY_CHUNK_SIZE)
537 .cast::<ManagedObjectHeader>()
538 .write(ManagedObjectHeader::Free);
539 }
540 if occupancy.is_free(OccupancyRange::default()) {
541 self.pages.remove(&page_id);
542 }
543 }
544 ManagedMemoryPage::Exclusive { .. } => {
545 *header = ManagedObjectHeader::Free;
546 self.pages.remove(&page_id);
547 }
548 }
549 }
550 }
551 }
552 }
553 }
554 }
555 }
556
557 fn access_object_lifetime_type<T>(
558 &self,
559 pointer: *mut u8,
560 object_id: usize,
561 page_id: usize,
562 type_check: bool,
563 ) -> Option<(*mut T, *mut Lifetime, TypeHash)> {
564 if let Some(page) = self.pages.get(&page_id) {
565 if page.owns_pointer(pointer) {
566 let header_size = Layout::new::<ManagedObjectHeader>().pad_to_align().size();
567 let header = unsafe { pointer.cast::<ManagedObjectHeader>().as_mut().unwrap() };
568 if let ManagedObjectHeader::Occupied {
569 id,
570 type_hash,
571 lifetime,
572 instances_count,
573 padding,
574 ..
575 } = header
576 {
577 if object_id == *id
578 && *instances_count > 0
579 && (!type_check || *type_hash == TypeHash::of::<T>())
580 {
581 return Some((
582 unsafe { pointer.add(header_size + *padding as usize).cast::<T>() },
583 lifetime,
584 *type_hash,
585 ));
586 }
587 }
588 }
589 }
590 None
591 }
592
593 fn object_type_hash(
594 &self,
595 pointer: *mut u8,
596 object_id: usize,
597 page_id: usize,
598 ) -> Option<TypeHash> {
599 if let Some(page) = self.pages.get(&page_id) {
600 if page.owns_pointer(pointer) {
601 let header = unsafe { pointer.cast::<ManagedObjectHeader>().as_mut().unwrap() };
602 if let ManagedObjectHeader::Occupied {
603 id,
604 type_hash,
605 instances_count,
606 ..
607 } = header
608 {
609 if object_id == *id && *instances_count > 0 {
610 return Some(*type_hash);
611 }
612 }
613 }
614 }
615 None
616 }
617
618 fn object_layout_with_offset(
619 &self,
620 pointer: *mut u8,
621 object_id: usize,
622 page_id: usize,
623 ) -> Option<(Layout, usize)> {
624 if let Some(page) = self.pages.get(&page_id) {
625 if page.owns_pointer(pointer) {
626 let header_size = Layout::new::<ManagedObjectHeader>().pad_to_align().size();
627 let header = unsafe { pointer.cast::<ManagedObjectHeader>().as_mut().unwrap() };
628 if let ManagedObjectHeader::Occupied {
629 id,
630 layout,
631 instances_count,
632 padding,
633 ..
634 } = header
635 {
636 if object_id == *id && *instances_count > 0 {
637 return Some((*layout, header_size + *padding as usize));
638 }
639 }
640 }
641 }
642 None
643 }
644
645 fn object_instances_count(&self, pointer: *mut u8, object_id: usize, page_id: usize) -> usize {
646 if let Some(page) = self.pages.get(&page_id) {
647 if page.owns_pointer(pointer) {
648 let header = unsafe { pointer.cast::<ManagedObjectHeader>().as_mut().unwrap() };
649 if let ManagedObjectHeader::Occupied {
650 id,
651 instances_count,
652 ..
653 } = header
654 {
655 if object_id == *id {
656 return *instances_count;
657 }
658 }
659 }
660 }
661 0
662 }
663}
664
665pub struct ManagedBox<T> {
666 memory: *mut T,
667 id: usize,
668 page: usize,
669 drop: bool,
670}
671
672impl<T: Default> Default for ManagedBox<T> {
673 fn default() -> Self {
674 Self::new(T::default())
675 }
676}
677
678impl<T> Drop for ManagedBox<T> {
679 fn drop(&mut self) {
680 if self.drop {
681 STORAGE.with_borrow_mut(|storage| {
682 storage.decrement(self.id, self.page, self.memory.cast());
683 })
684 }
685 }
686}
687
688impl<T> ManagedBox<T> {
689 pub fn new(value: T) -> Self
690 where
691 T: Finalize,
692 {
693 let mut result = DynamicManagedBox::new(value);
694 result.drop = false;
695 Self {
696 memory: result.memory.cast(),
697 id: result.id,
698 page: result.page,
699 drop: true,
700 }
701 }
702
703 pub fn into_dynamic(mut self) -> DynamicManagedBox {
704 self.drop = false;
705 DynamicManagedBox {
706 memory: self.memory.cast(),
707 id: self.id,
708 page: self.page,
709 drop: true,
710 }
711 }
712
713 pub fn instances_count(&self) -> usize {
714 STORAGE.with_borrow(|storage| {
715 storage.object_instances_count(self.memory.cast(), self.id, self.page)
716 })
717 }
718
719 pub fn does_share_reference(&self, other: &Self) -> bool {
720 self.id == other.id && self.page == other.page && self.memory == other.memory
721 }
722
723 pub fn type_hash(&self) -> Option<TypeHash> {
724 STORAGE
725 .with_borrow(|storage| storage.object_type_hash(self.memory.cast(), self.id, self.page))
726 }
727
728 pub fn lifetime_borrow(&self) -> Option<LifetimeRef> {
729 STORAGE.with_borrow(|storage| {
730 let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
731 self.memory.cast(),
732 self.id,
733 self.page,
734 false,
735 )?;
736 unsafe { lifetime.as_ref()?.borrow() }
737 })
738 }
739
740 pub fn lifetime_borrow_mut(&self) -> Option<LifetimeRefMut> {
741 STORAGE.with_borrow(|storage| {
742 let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
743 self.memory.cast(),
744 self.id,
745 self.page,
746 false,
747 )?;
748 unsafe { lifetime.as_ref()?.borrow_mut() }
749 })
750 }
751
752 pub fn lifetime_lazy(&self) -> Option<LifetimeLazy> {
753 STORAGE.with_borrow(|storage| {
754 let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
755 self.memory.cast(),
756 self.id,
757 self.page,
758 false,
759 )?;
760 unsafe { Some(lifetime.as_ref()?.lazy()) }
761 })
762 }
763
764 pub fn read(&self) -> Option<ValueReadAccess<T>> {
765 STORAGE.with_borrow(|storage| {
766 let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
767 self.memory.cast(),
768 self.id,
769 self.page,
770 true,
771 )?;
772 unsafe { lifetime.as_ref()?.read_ptr(pointer) }
773 })
774 }
775
776 pub fn write(&mut self) -> Option<ValueWriteAccess<T>> {
777 STORAGE.with_borrow(|storage| {
778 let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
779 self.memory.cast(),
780 self.id,
781 self.page,
782 true,
783 )?;
784 unsafe { lifetime.as_mut()?.write_ptr(pointer) }
785 })
786 }
787
788 pub fn borrow(&self) -> Option<ManagedRef<T>> {
789 STORAGE.with_borrow(|storage| {
790 let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
791 self.memory.cast(),
792 self.id,
793 self.page,
794 true,
795 )?;
796 unsafe { ManagedRef::new_raw(pointer, lifetime.as_ref()?.borrow()?) }
797 })
798 }
799
800 pub fn borrow_mut(&mut self) -> Option<ManagedRefMut<T>> {
801 STORAGE.with_borrow(|storage| {
802 let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
803 self.memory.cast(),
804 self.id,
805 self.page,
806 true,
807 )?;
808 unsafe { ManagedRefMut::new_raw(pointer, lifetime.as_mut()?.borrow_mut()?) }
809 })
810 }
811
812 pub fn lazy(&self) -> Option<ManagedLazy<T>> {
813 STORAGE.with_borrow(|storage| {
814 let (pointer, lifetime, _) = storage.access_object_lifetime_type::<T>(
815 self.memory.cast(),
816 self.id,
817 self.page,
818 true,
819 )?;
820 unsafe { ManagedLazy::new_raw(pointer, lifetime.as_mut().unwrap().lazy()) }
821 })
822 }
823
824 pub unsafe fn as_ptr(&self) -> Option<*const T> {
826 STORAGE.with_borrow(|storage| {
827 let (pointer, _, _) = storage.access_object_lifetime_type::<T>(
828 self.memory.cast(),
829 self.id,
830 self.page,
831 true,
832 )?;
833 Some(pointer.cast_const())
834 })
835 }
836
837 pub unsafe fn as_ptr_mut(&mut self) -> Option<*mut T> {
839 STORAGE.with_borrow(|storage| {
840 let (pointer, _, _) = storage.access_object_lifetime_type::<T>(
841 self.memory.cast(),
842 self.id,
843 self.page,
844 true,
845 )?;
846 Some(pointer)
847 })
848 }
849
850 pub unsafe fn as_ptr_raw(&self) -> Option<*const u8> {
852 STORAGE.with_borrow(|storage| {
853 let (pointer, _, _) = storage.access_object_lifetime_type::<T>(
854 self.memory.cast(),
855 self.id,
856 self.page,
857 false,
858 )?;
859 Some(pointer.cast_const().cast())
860 })
861 }
862
863 pub unsafe fn as_mut_ptr_raw(&mut self) -> Option<*mut u8> {
865 STORAGE.with_borrow(|storage| {
866 let (pointer, _, _) = storage.access_object_lifetime_type::<T>(
867 self.memory.cast(),
868 self.id,
869 self.page,
870 false,
871 )?;
872 Some(pointer.cast())
873 })
874 }
875}
876
877impl<T> Clone for ManagedBox<T> {
878 fn clone(&self) -> Self {
879 STORAGE.with_borrow_mut(|storage| {
880 storage.increment(self.id, self.page, self.memory.cast());
881 Self {
882 memory: self.memory,
883 id: self.id,
884 page: self.page,
885 drop: true,
886 }
887 })
888 }
889}
890
891pub struct DynamicManagedBox {
892 memory: *mut u8,
893 id: usize,
894 page: usize,
895 drop: bool,
896}
897
898impl Drop for DynamicManagedBox {
899 fn drop(&mut self) {
900 if self.drop {
901 STORAGE.with_borrow_mut(|storage| {
902 storage.decrement(self.id, self.page, self.memory);
903 })
904 }
905 }
906}
907
908impl DynamicManagedBox {
909 pub fn new<T: Finalize>(value: T) -> Self {
910 unsafe {
911 let mut result =
912 Self::new_uninitialized(TypeHash::of::<T>(), Layout::new::<T>(), T::finalize_raw);
913 result.as_ptr_mut::<T>().unwrap().write(value);
914 result
915 }
916 }
917
918 pub fn new_uninitialized(
919 type_hash: TypeHash,
920 layout: Layout,
921 finalizer: unsafe fn(*mut ()),
922 ) -> Self {
923 STORAGE.with_borrow_mut(|storage| {
924 storage.alloc_uninitialized(type_hash, layout.pad_to_align(), finalizer)
925 })
926 }
927
928 pub fn into_typed<T>(mut self) -> Result<ManagedBox<T>, Self> {
929 if self.is::<T>() {
930 self.drop = false;
931 Ok(ManagedBox {
932 memory: self.memory.cast(),
933 id: self.id,
934 page: self.page,
935 drop: true,
936 })
937 } else {
938 Err(self)
939 }
940 }
941
942 pub fn instances_count(&self) -> usize {
943 STORAGE
944 .with_borrow(|storage| storage.object_instances_count(self.memory, self.id, self.page))
945 }
946
947 pub fn does_share_reference(&self, other: &Self) -> bool {
948 self.id == other.id && self.page == other.page && self.memory == other.memory
949 }
950
951 pub fn type_hash(&self) -> Option<TypeHash> {
952 STORAGE.with_borrow(|storage| storage.object_type_hash(self.memory, self.id, self.page))
953 }
954
955 pub fn lifetime_borrow(&self) -> Option<LifetimeRef> {
956 STORAGE.with_borrow(|storage| {
957 let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
958 self.memory.cast(),
959 self.id,
960 self.page,
961 false,
962 )?;
963 unsafe { lifetime.as_ref()?.borrow() }
964 })
965 }
966
967 pub fn lifetime_borrow_mut(&self) -> Option<LifetimeRefMut> {
968 STORAGE.with_borrow(|storage| {
969 let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
970 self.memory.cast(),
971 self.id,
972 self.page,
973 false,
974 )?;
975 unsafe { lifetime.as_ref()?.borrow_mut() }
976 })
977 }
978
979 pub fn lifetime_lazy(&self) -> Option<LifetimeLazy> {
980 STORAGE.with_borrow(|storage| {
981 let (_, lifetime, _) = storage.access_object_lifetime_type::<u8>(
982 self.memory.cast(),
983 self.id,
984 self.page,
985 false,
986 )?;
987 unsafe { Some(lifetime.as_ref()?.lazy()) }
988 })
989 }
990
991 pub fn is<T>(&self) -> bool {
992 STORAGE.with_borrow(|storage| {
993 storage
994 .access_object_lifetime_type::<T>(self.memory, self.id, self.page, true)
995 .is_some()
996 })
997 }
998
999 pub fn borrow(&self) -> Option<DynamicManagedRef> {
1000 STORAGE.with_borrow(|storage| {
1001 let (pointer, lifetime, type_hash) = storage.access_object_lifetime_type::<u8>(
1002 self.memory,
1003 self.id,
1004 self.page,
1005 false,
1006 )?;
1007 unsafe { DynamicManagedRef::new_raw(type_hash, lifetime.as_ref()?.borrow()?, pointer) }
1008 })
1009 }
1010
1011 pub fn borrow_mut(&mut self) -> Option<DynamicManagedRefMut> {
1012 STORAGE.with_borrow(|storage| {
1013 let (pointer, lifetime, type_hash) = storage.access_object_lifetime_type::<u8>(
1014 self.memory,
1015 self.id,
1016 self.page,
1017 false,
1018 )?;
1019 unsafe {
1020 DynamicManagedRefMut::new_raw(type_hash, lifetime.as_mut()?.borrow_mut()?, pointer)
1021 }
1022 })
1023 }
1024
1025 pub fn lazy(&self) -> Option<DynamicManagedLazy> {
1026 STORAGE.with_borrow(|storage| {
1027 let (pointer, lifetime, type_hash) = storage.access_object_lifetime_type::<u8>(
1028 self.memory,
1029 self.id,
1030 self.page,
1031 false,
1032 )?;
1033 unsafe {
1034 DynamicManagedLazy::new_raw(type_hash, lifetime.as_mut().unwrap().lazy(), pointer)
1035 }
1036 })
1037 }
1038
1039 pub fn read<T>(&self) -> Option<ValueReadAccess<T>> {
1040 STORAGE.with_borrow(|storage| {
1041 let (pointer, lifetime, _) =
1042 storage.access_object_lifetime_type::<T>(self.memory, self.id, self.page, true)?;
1043 unsafe { lifetime.as_ref()?.read_ptr(pointer) }
1044 })
1045 }
1046
1047 pub fn write<T>(&mut self) -> Option<ValueWriteAccess<T>> {
1048 STORAGE.with_borrow(|storage| {
1049 let (pointer, lifetime, _) =
1050 storage.access_object_lifetime_type::<T>(self.memory, self.id, self.page, true)?;
1051 unsafe { lifetime.as_mut()?.write_ptr(pointer) }
1052 })
1053 }
1054
1055 pub unsafe fn memory(&self) -> Option<&[u8]> {
1057 STORAGE.with_borrow(|storage| {
1058 storage
1059 .object_layout_with_offset(self.memory, self.id, self.page)
1060 .map(|(layout, offset)| {
1061 std::slice::from_raw_parts(self.memory.add(offset), layout.size())
1062 })
1063 })
1064 }
1065
1066 pub unsafe fn memory_mut(&mut self) -> Option<&mut [u8]> {
1068 STORAGE.with_borrow(|storage| {
1069 storage
1070 .object_layout_with_offset(self.memory, self.id, self.page)
1071 .map(|(layout, offset)| {
1072 std::slice::from_raw_parts_mut(self.memory.add(offset), layout.size())
1073 })
1074 })
1075 }
1076
1077 pub unsafe fn as_ptr<T>(&self) -> Option<*const T> {
1079 STORAGE.with_borrow(|storage| {
1080 let (pointer, _, _) =
1081 storage.access_object_lifetime_type::<T>(self.memory, self.id, self.page, true)?;
1082 Some(pointer.cast_const().cast())
1083 })
1084 }
1085
1086 pub unsafe fn as_ptr_mut<T>(&mut self) -> Option<*mut T> {
1088 STORAGE.with_borrow(|storage| {
1089 let (pointer, _, _) =
1090 storage.access_object_lifetime_type::<T>(self.memory, self.id, self.page, true)?;
1091 Some(pointer.cast())
1092 })
1093 }
1094
1095 pub unsafe fn as_ptr_raw(&self) -> Option<*const u8> {
1097 STORAGE.with_borrow(|storage| {
1098 let (pointer, _, _) = storage.access_object_lifetime_type::<u8>(
1099 self.memory,
1100 self.id,
1101 self.page,
1102 false,
1103 )?;
1104 Some(pointer.cast_const())
1105 })
1106 }
1107
1108 pub unsafe fn as_mut_ptr_raw(&mut self) -> Option<*mut u8> {
1110 STORAGE.with_borrow(|storage| {
1111 let (pointer, _, _) = storage.access_object_lifetime_type::<u8>(
1112 self.memory,
1113 self.id,
1114 self.page,
1115 false,
1116 )?;
1117 Some(pointer)
1118 })
1119 }
1120}
1121
1122impl Clone for DynamicManagedBox {
1123 fn clone(&self) -> Self {
1124 STORAGE.with_borrow_mut(|storage| {
1125 storage.increment(self.id, self.page, self.memory);
1126 Self {
1127 memory: self.memory,
1128 id: self.id,
1129 page: self.page,
1130 drop: true,
1131 }
1132 })
1133 }
1134}
1135
1136#[cfg(test)]
1137mod tests {
1138 use super::*;
1139
1140 #[test]
1141 fn test_occupancy_range() {
1142 let v = OccupancyRange {
1143 bits_start_inclusive: 0,
1144 bits_end_exclusive: 128,
1145 ..Default::default()
1146 }
1147 .update_mask();
1148 assert_eq!(v.mask, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
1149 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 0..128);
1150 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE);
1151
1152 let v = OccupancyRange::default();
1153 assert_eq!(v.mask, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
1154 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 0..128);
1155 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE);
1156
1157 let v = v.split().unwrap().0;
1158 assert_eq!(v.mask, 0x0000000000000000FFFFFFFFFFFFFFFF);
1159 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 0..64);
1160 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 2);
1161
1162 let v = v.split().unwrap().1;
1163 assert_eq!(v.mask, 0x0000000000000000FFFFFFFF00000000);
1164 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 32..64);
1165 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 4);
1166
1167 let v = v.split().unwrap().0;
1168 assert_eq!(v.mask, 0x00000000000000000000FFFF00000000);
1169 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 32..48);
1170 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 8);
1171
1172 let v = v.split().unwrap().1;
1173 assert_eq!(v.mask, 0x00000000000000000000FF0000000000);
1174 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 40..48);
1175 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 16);
1176
1177 let v = v.split().unwrap().0;
1178 assert_eq!(v.mask, 0x000000000000000000000F0000000000);
1179 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 40..44);
1180 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 32);
1181
1182 let v = v.split().unwrap().1;
1183 assert_eq!(
1184 v.mask,
1185 0b0000000000000000000011000000000000000000000000000000000000000000
1186 );
1187 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 42..44);
1188 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 64);
1189
1190 let v = v.split().unwrap().0;
1191 assert_eq!(
1192 v.mask,
1193 0b0000000000000000000001000000000000000000000000000000000000000000
1194 );
1195 assert_eq!(v.bits_start_inclusive..v.bits_end_exclusive, 42..43);
1196 assert_eq!(v.byte_size(), MEMORY_PAGE_SIZE / 128);
1197
1198 assert!(v.split().is_none());
1199 }
1200
1201 #[test]
1202 fn test_occupancy_map() {
1203 let header_size = Layout::new::<ManagedObjectHeader>().pad_to_align().size();
1204 let mut map = OccupancyMap::default();
1205
1206 let range = map
1207 .find_free_space(
1208 std::mem::size_of::<f32>() + header_size,
1209 OccupancyRange::default(),
1210 )
1211 .unwrap();
1212 map.occuppy(range);
1213 assert_eq!(range.bits_start_inclusive..range.bits_end_exclusive, 0..1);
1214
1215 let range = map
1216 .find_free_space(
1217 std::mem::size_of::<u8>() + header_size,
1218 OccupancyRange::default(),
1219 )
1220 .unwrap();
1221 map.occuppy(range);
1222 assert_eq!(range.bits_start_inclusive..range.bits_end_exclusive, 1..2);
1223 }
1224
1225 #[test]
1226 fn test_managed_box() {
1227 assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1228 let a = ManagedBox::new(42usize);
1229 assert_eq!(
1230 managed_storage_stats(),
1231 ManagedStorageStats {
1232 pages_count: 1,
1233 chunked_pages_count: 1,
1234 total_size: 16392,
1235 occupied_size: 128,
1236 free_size: 16256,
1237 ..Default::default()
1238 }
1239 );
1240 assert_eq!(*a.read().unwrap(), 42);
1241 assert_eq!(a.instances_count(), 1);
1242 let mut b = a.clone();
1243 assert_eq!(
1244 managed_storage_stats(),
1245 ManagedStorageStats {
1246 pages_count: 1,
1247 chunked_pages_count: 1,
1248 total_size: 16392,
1249 occupied_size: 128,
1250 free_size: 16256,
1251 ..Default::default()
1252 }
1253 );
1254 assert_eq!(a.instances_count(), 2);
1255 assert_eq!(b.instances_count(), 2);
1256 assert!(a.does_share_reference(&b));
1257 assert_eq!(*b.read().unwrap(), 42);
1258 *b.write().unwrap() = 10;
1259 assert_eq!(*a.read().unwrap(), 10);
1260 assert_eq!(*b.read().unwrap(), 10);
1261 drop(a);
1262 assert_eq!(
1263 managed_storage_stats(),
1264 ManagedStorageStats {
1265 pages_count: 1,
1266 chunked_pages_count: 1,
1267 total_size: 16392,
1268 occupied_size: 128,
1269 free_size: 16256,
1270 ..Default::default()
1271 }
1272 );
1273 assert_eq!(b.instances_count(), 1);
1274 drop(b);
1275 assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1276 }
1277
1278 #[test]
1279 fn test_dynamic_managed_box() {
1280 assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1281 let a = DynamicManagedBox::new(42usize);
1282 assert_eq!(
1283 managed_storage_stats(),
1284 ManagedStorageStats {
1285 pages_count: 1,
1286 chunked_pages_count: 1,
1287 total_size: 16392,
1288 occupied_size: 128,
1289 free_size: 16256,
1290 ..Default::default()
1291 }
1292 );
1293 assert!(a.is::<usize>());
1294 assert_eq!(*a.read::<usize>().unwrap(), 42);
1295 assert_eq!(a.instances_count(), 1);
1296 let mut b = a.clone();
1297 assert_eq!(
1298 managed_storage_stats(),
1299 ManagedStorageStats {
1300 pages_count: 1,
1301 chunked_pages_count: 1,
1302 total_size: 16392,
1303 occupied_size: 128,
1304 free_size: 16256,
1305 ..Default::default()
1306 }
1307 );
1308 assert!(b.is::<usize>());
1309 assert_eq!(a.instances_count(), 2);
1310 assert_eq!(b.instances_count(), 2);
1311 assert!(a.does_share_reference(&b));
1312 assert_eq!(*b.read::<usize>().unwrap(), 42);
1313 *b.write::<usize>().unwrap() = 10;
1314 assert_eq!(*a.read::<usize>().unwrap(), 10);
1315 assert_eq!(*b.read::<usize>().unwrap(), 10);
1316 drop(a);
1317 assert_eq!(
1318 managed_storage_stats(),
1319 ManagedStorageStats {
1320 pages_count: 1,
1321 chunked_pages_count: 1,
1322 total_size: 16392,
1323 occupied_size: 128,
1324 free_size: 16256,
1325 ..Default::default()
1326 }
1327 );
1328 assert_eq!(b.instances_count(), 1);
1329 drop(b);
1330 assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1331 }
1332
1333 #[test]
1334 fn test_growing_allocations() {
1335 assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1336 let a = ManagedBox::<[u64; 10]>::new(std::array::from_fn(|index| index as _));
1337 assert_eq!(
1338 managed_storage_stats(),
1339 ManagedStorageStats {
1340 pages_count: 1,
1341 chunked_pages_count: 1,
1342 total_size: 16392,
1343 occupied_size: 256,
1344 free_size: 16128,
1345 ..Default::default()
1346 }
1347 );
1348 let b = ManagedBox::<[u64; 100]>::new(std::array::from_fn(|index| index as _));
1349 assert_eq!(
1350 managed_storage_stats(),
1351 ManagedStorageStats {
1352 pages_count: 1,
1353 chunked_pages_count: 1,
1354 total_size: 16392,
1355 occupied_size: 1280,
1356 free_size: 15104,
1357 ..Default::default()
1358 }
1359 );
1360 let c = ManagedBox::<[u64; 1000]>::new(std::array::from_fn(|index| index as _));
1361 assert_eq!(
1362 managed_storage_stats(),
1363 ManagedStorageStats {
1364 pages_count: 1,
1365 chunked_pages_count: 1,
1366 total_size: 16392,
1367 occupied_size: 9472,
1368 free_size: 6912,
1369 ..Default::default()
1370 }
1371 );
1372 let d = ManagedBox::<[u64; 10000]>::new(std::array::from_fn(|index| index as _));
1373 assert_eq!(
1374 managed_storage_stats(),
1375 ManagedStorageStats {
1376 pages_count: 2,
1377 chunked_pages_count: 1,
1378 exclusive_pages_count: 1,
1379 total_size: 96528,
1380 occupied_size: 89608,
1381 free_size: 6912
1382 }
1383 );
1384 drop(a);
1385 assert_eq!(
1386 managed_storage_stats(),
1387 ManagedStorageStats {
1388 pages_count: 2,
1389 chunked_pages_count: 1,
1390 exclusive_pages_count: 1,
1391 total_size: 96528,
1392 occupied_size: 89352,
1393 free_size: 7168
1394 }
1395 );
1396 drop(b);
1397 assert_eq!(
1398 managed_storage_stats(),
1399 ManagedStorageStats {
1400 pages_count: 2,
1401 chunked_pages_count: 1,
1402 exclusive_pages_count: 1,
1403 total_size: 96528,
1404 occupied_size: 88328,
1405 free_size: 8192
1406 }
1407 );
1408 drop(c);
1409 assert_eq!(
1410 managed_storage_stats(),
1411 ManagedStorageStats {
1412 pages_count: 1,
1413 chunked_pages_count: 0,
1414 exclusive_pages_count: 1,
1415 total_size: 80136,
1416 occupied_size: 80136,
1417 free_size: 0
1418 }
1419 );
1420 drop(d);
1421 assert_eq!(managed_storage_stats(), ManagedStorageStats::default());
1422 }
1423
1424 #[test]
1425 fn test_managed_box_borrows() {
1426 let v = ManagedBox::new(42usize);
1427 let r = v.borrow().unwrap();
1428 drop(v);
1429 assert!(r.read().is_none());
1430 }
1431
1432 #[test]
1433 fn test_fuzz_managed_box() {
1434 let builders = [
1435 || DynamicManagedBox::new(1u8),
1436 || DynamicManagedBox::new(2u16),
1437 || DynamicManagedBox::new(3u32),
1438 || DynamicManagedBox::new(4u64),
1439 || DynamicManagedBox::new(5u128),
1440 || DynamicManagedBox::new([42u8; 1000]),
1441 || DynamicManagedBox::new([42u8; 10000]),
1442 || DynamicManagedBox::new([42u8; 100000]),
1443 ];
1444 let mut boxes = std::array::from_fn::<_, 50, _>(|_| None);
1445 for index in 0..100 {
1446 let source = index % builders.len();
1447 let target = index % boxes.len();
1448 boxes[target] = Some((builders[source])());
1449 }
1450 }
1451}