1use crate::utils::BitField;
2
3pub type mi_thread_free_t = usize;
4
5pub type mi_page_kind_t = cty::c_uint;
6pub const mi_page_kind_MI_PAGE_SMALL: mi_page_kind_t = 0;
7pub const mi_page_kind_MI_PAGE_MEDIUM: mi_page_kind_t = 1;
8pub const mi_page_kind_MI_PAGE_LARGE: mi_page_kind_t = 2;
9pub const mi_page_kind_MI_PAGE_HUGE: mi_page_kind_t = 3;
10
11#[repr(C)]
12#[derive(Copy, Clone)]
13pub struct mi_page_t {
14 pub segment_idx: u8,
15 pub _bitfield_align_1: [u8; 0],
16 pub _bitfield_1: BitField<[u8; 1usize]>,
17 pub capacity: u16,
18 pub reserved: u16,
19 pub flags: mi_page_flags_t,
20 pub _bitfield_align_2: [u8; 0],
21 pub _bitfield_2: BitField<[u8; 1usize]>,
22 pub free: *mut mi_block_t,
23 pub keys: [usize; 2usize],
24 pub used: u32,
25 pub xblock_size: u32,
26 pub local_free: *mut mi_block_t,
27 pub xthread_free: mi_thread_free_t,
28 pub xheap: usize,
29 pub next: *mut mi_page_t,
30 pub prev: *mut mi_page_t,
31}
32
33impl mi_page_t {
34 #[inline]
35 pub fn segment_in_use(&self) -> u8 {
36 unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) }
37 }
38 #[inline]
39 pub fn set_segment_in_use(&mut self, val: u8) {
40 unsafe {
41 let val: u8 = ::core::mem::transmute(val);
42 self._bitfield_1.set(0usize, 1u8, val as u64)
43 }
44 }
45 #[inline]
46 pub fn is_reset(&self) -> u8 {
47 unsafe { ::core::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8) }
48 }
49 #[inline]
50 pub fn set_is_reset(&mut self, val: u8) {
51 unsafe {
52 let val: u8 = ::core::mem::transmute(val);
53 self._bitfield_1.set(1usize, 1u8, val as u64)
54 }
55 }
56 #[inline]
57 pub fn is_committed(&self) -> u8 {
58 unsafe { ::core::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u8) }
59 }
60 #[inline]
61 pub fn set_is_committed(&mut self, val: u8) {
62 unsafe {
63 let val: u8 = ::core::mem::transmute(val);
64 self._bitfield_1.set(2usize, 1u8, val as u64)
65 }
66 }
67 #[inline]
68 pub fn is_zero_init(&self) -> u8 {
69 unsafe { ::core::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u8) }
70 }
71 #[inline]
72 pub fn set_is_zero_init(&mut self, val: u8) {
73 unsafe {
74 let val: u8 = ::core::mem::transmute(val);
75 self._bitfield_1.set(3usize, 1u8, val as u64)
76 }
77 }
78 #[inline]
79 pub fn new_bitfield_1(
80 segment_in_use: u8,
81 is_reset: u8,
82 is_committed: u8,
83 is_zero_init: u8,
84 ) -> BitField<[u8; 1usize]> {
85 let mut _bitfield_unit: BitField<[u8; 1usize]> = Default::default();
86 _bitfield_unit.set(0usize, 1u8, {
87 let segment_in_use: u8 = unsafe { ::core::mem::transmute(segment_in_use) };
88 segment_in_use as u64
89 });
90 _bitfield_unit.set(1usize, 1u8, {
91 let is_reset: u8 = unsafe { ::core::mem::transmute(is_reset) };
92 is_reset as u64
93 });
94 _bitfield_unit.set(2usize, 1u8, {
95 let is_committed: u8 = unsafe { ::core::mem::transmute(is_committed) };
96 is_committed as u64
97 });
98 _bitfield_unit.set(3usize, 1u8, {
99 let is_zero_init: u8 = unsafe { ::core::mem::transmute(is_zero_init) };
100 is_zero_init as u64
101 });
102 _bitfield_unit
103 }
104 #[inline]
105 pub fn is_zero(&self) -> u8 {
106 unsafe { ::core::mem::transmute(self._bitfield_2.get(0usize, 1u8) as u8) }
107 }
108 #[inline]
109 pub fn set_is_zero(&mut self, val: u8) {
110 unsafe {
111 let val: u8 = ::core::mem::transmute(val);
112 self._bitfield_2.set(0usize, 1u8, val as u64)
113 }
114 }
115 #[inline]
116 pub fn retire_expire(&self) -> u8 {
117 unsafe { ::core::mem::transmute(self._bitfield_2.get(1usize, 7u8) as u8) }
118 }
119 #[inline]
120 pub fn set_retire_expire(&mut self, val: u8) {
121 unsafe {
122 let val: u8 = ::core::mem::transmute(val);
123 self._bitfield_2.set(1usize, 7u8, val as u64)
124 }
125 }
126 #[inline]
127 pub fn new_bitfield_2(is_zero: u8, retire_expire: u8) -> BitField<[u8; 1usize]> {
128 let mut _bitfield_unit: BitField<[u8; 1usize]> = Default::default();
129 _bitfield_unit.set(0usize, 1u8, {
130 let is_zero: u8 = unsafe { ::core::mem::transmute(is_zero) };
131 is_zero as u64
132 });
133 _bitfield_unit.set(1usize, 7u8, {
134 let retire_expire: u8 = unsafe { ::core::mem::transmute(retire_expire) };
135 retire_expire as u64
136 });
137 _bitfield_unit
138 }
139}
140
141#[repr(C)]
142#[derive(Debug, Copy, Clone)]
143pub struct mi_block_t {
144 pub next: usize,
145}
146pub type mi_delayed_t = cty::c_uint;
147pub const mi_delayed_t_MI_USE_DELAYED_FREE: mi_delayed_t = 0;
148pub const mi_delayed_t_MI_DELAYED_FREEING: mi_delayed_t = 1;
149pub const mi_delayed_t_MI_NO_DELAYED_FREE: mi_delayed_t = 2;
150pub const mi_delayed_t_MI_NEVER_DELAYED_FREE: mi_delayed_t = 3;
151
152#[repr(C)]
153#[derive(Copy, Clone)]
154pub union mi_page_flags_t {
155 pub full_aligned: u8,
156 pub x: mi_page_flags_t_ty_1,
157}
158
159#[repr(C, packed)]
160#[derive(Debug, Copy, Clone)]
161pub struct mi_page_flags_t_ty_1 {
162 pub _bitfield_align_1: [u8; 0],
163 pub _bitfield_1: BitField<[u8; 1usize]>,
164}
165impl mi_page_flags_t_ty_1 {
166 #[inline]
167 pub fn in_full(&self) -> u8 {
168 unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) }
169 }
170 #[inline]
171 pub fn set_in_full(&mut self, val: u8) {
172 unsafe {
173 let val: u8 = ::core::mem::transmute(val);
174 self._bitfield_1.set(0usize, 1u8, val as u64)
175 }
176 }
177 #[inline]
178 pub fn has_aligned(&self) -> u8 {
179 unsafe { ::core::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8) }
180 }
181 #[inline]
182 pub fn set_has_aligned(&mut self, val: u8) {
183 unsafe {
184 let val: u8 = ::core::mem::transmute(val);
185 self._bitfield_1.set(1usize, 1u8, val as u64)
186 }
187 }
188 #[inline]
189 pub fn new_bitfield_1(in_full: u8, has_aligned: u8) -> BitField<[u8; 1usize]> {
190 let mut _bitfield_unit: BitField<[u8; 1usize]> = Default::default();
191 _bitfield_unit.set(0usize, 1u8, {
192 let in_full: u8 = unsafe { ::core::mem::transmute(in_full) };
193 in_full as u64
194 });
195 _bitfield_unit.set(1usize, 1u8, {
196 let has_aligned: u8 = unsafe { ::core::mem::transmute(has_aligned) };
197 has_aligned as u64
198 });
199 _bitfield_unit
200 }
201}
202
203#[repr(C)]
204#[derive(Copy, Clone)]
205pub struct mi_segment_t {
206 pub memid: usize,
207 pub mem_is_pinned: bool,
208 pub mem_is_committed: bool,
209 pub abandoned_next: *mut mi_segment_t,
211 pub next: *mut mi_segment_t,
212 pub prev: *mut mi_segment_t,
213 pub abandoned: usize,
214 pub abandoned_visits: usize,
215 pub used: usize,
216 pub capacity: usize,
217 pub segment_size: usize,
218 pub segment_info_size: usize,
219 pub cookie: usize,
220 pub page_shift: usize,
221 pub thread_id: usize,
222 pub page_kind: mi_page_kind_t,
223 pub pages: [mi_page_t; 1usize],
224}
225
226#[repr(C)]
227#[derive(Debug, Copy, Clone)]
228pub struct mi_page_queue_t {
229 pub first: *mut mi_page_t,
230 pub last: *mut mi_page_t,
231 pub block_size: usize,
232}
233
234#[repr(C)]
235#[derive(Debug, Copy, Clone)]
236pub struct mi_random_ctx_t {
237 pub input: [u32; 16usize],
238 pub output: [u32; 16usize],
239 pub output_available: cty::c_int,
240}
241
242#[repr(C)]
243#[derive(Debug, Copy, Clone)]
244pub struct mi_heap_t {
245 pub tld: *mut mi_tld_t,
246 pub pages_free_direct: [*mut mi_page_t; 130usize],
247 pub pages: [mi_page_queue_t; 75usize],
248 pub thread_delayed_free: mi_block_t,
249 pub thread_id: usize,
250 pub cookie: usize,
251 pub keys: [usize; 2usize],
252 pub random: mi_random_ctx_t,
253 pub page_count: usize,
254 pub page_retired_min: usize,
255 pub page_retired_max: usize,
256 pub next: *mut mi_heap_t,
257 pub no_reclaim: bool,
258}
259
260#[repr(C)]
261#[derive(Debug, Copy, Clone)]
262pub struct mi_stat_count_t {
263 pub allocated: i64,
264 pub freed: i64,
265 pub peak: i64,
266 pub current: i64,
267}
268
269#[repr(C)]
270#[derive(Debug, Copy, Clone)]
271pub struct mi_stat_counter_t {
272 pub total: i64,
273 pub count: i64,
274}
275
276#[repr(C)]
277#[derive(Debug, Copy, Clone)]
278pub struct mi_stats_t {
279 pub segments: mi_stat_count_t,
280 pub pages: mi_stat_count_t,
281 pub reserved: mi_stat_count_t,
282 pub committed: mi_stat_count_t,
283 pub reset: mi_stat_count_t,
284 pub page_committed: mi_stat_count_t,
285 pub segments_abandoned: mi_stat_count_t,
286 pub pages_abandoned: mi_stat_count_t,
287 pub threads: mi_stat_count_t,
288 pub normal: mi_stat_count_t,
289 pub huge: mi_stat_count_t,
290 pub giant: mi_stat_count_t,
291 pub malloc: mi_stat_count_t,
292 pub segments_cache: mi_stat_count_t,
293 pub pages_extended: mi_stat_counter_t,
294 pub mmap_calls: mi_stat_counter_t,
295 pub commit_calls: mi_stat_counter_t,
296 pub page_no_retire: mi_stat_counter_t,
297 pub searches: mi_stat_counter_t,
298 pub normal_count: mi_stat_counter_t,
299 pub huge_count: mi_stat_counter_t,
300 pub giant_count: mi_stat_counter_t,
301 pub normal_bins: [mi_stat_count_t; 74usize],
302}
303
304#[repr(C)]
305#[derive(Debug, Copy, Clone)]
306pub struct mi_segment_queue_t {
307 pub first: *mut mi_segment_t,
308 pub last: *mut mi_segment_t,
309}
310
311#[repr(C)]
312#[derive(Debug, Copy, Clone)]
313pub struct mi_os_tld_t {
314 pub region_idx: usize,
315 pub stats: *mut mi_stats_t,
316}
317
318#[repr(C)]
319#[derive(Debug, Copy, Clone)]
320pub struct mi_segments_tld_s {
321 pub small_free: mi_segment_queue_t,
322 pub medium_free: mi_segment_queue_t,
323 pub pages_reset: mi_page_queue_t,
324 pub count: usize,
325 pub peak_count: usize,
326 pub current_size: usize,
327 pub peak_size: usize,
328 pub cache_count: usize,
329 pub cache_size: usize,
330 pub cache: *mut mi_segment_t,
331 pub stats: *mut mi_stats_t,
332 pub os: *mut mi_os_tld_t,
333}
334pub type mi_segments_tld_t = mi_segments_tld_s;
335#[repr(C)]
336#[derive(Debug, Copy, Clone)]
337pub struct mi_tld_t {
338 pub heartbeat: cty::c_ulonglong,
339 pub recurse: bool,
340 pub heap_backing: *mut mi_heap_t,
341 pub heaps: *mut mi_heap_t,
342 pub segments: mi_segments_tld_t,
343 pub os: mi_os_tld_t,
344 pub stats: mi_stats_t,
345}