cranelift_codegen/machinst/
vcode.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
//! This implements the VCode container: a CFG of Insts that have been lowered.
//!
//! VCode is virtual-register code. An instruction in VCode is almost a machine
//! instruction; however, its register slots can refer to virtual registers in
//! addition to real machine registers.
//!
//! VCode is structured with traditional basic blocks, and
//! each block must be terminated by an unconditional branch (one target), a
//! conditional branch (two targets), or a return (no targets). Note that this
//! slightly differs from the machine code of most ISAs: in most ISAs, a
//! conditional branch has one target (and the not-taken case falls through).
//! However, we expect that machine backends will elide branches to the following
//! block (i.e., zero-offset jumps), and will be able to codegen a branch-cond /
//! branch-uncond pair if *both* targets are not fallthrough. This allows us to
//! play with layout prior to final binary emission, as well, if we want.
//!
//! See the main module comment in `mod.rs` for more details on the VCode-based
//! backend pipeline.

use crate::ir::pcc::*;
use crate::ir::{self, types, Constant, ConstantData, ValueLabel};
use crate::machinst::*;
use crate::ranges::Ranges;
use crate::timing;
use crate::trace;
use crate::CodegenError;
use crate::{LabelValueLoc, ValueLocRange};
use regalloc2::{
    Edit, Function as RegallocFunction, InstOrEdit, InstRange, MachineEnv, Operand,
    OperandConstraint, OperandKind, PRegSet, RegClass,
};
use rustc_hash::FxHashMap;

use core::mem::take;
use cranelift_entity::{entity_impl, Keys};
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::fmt;

/// Index referring to an instruction in VCode.
pub type InsnIndex = regalloc2::Inst;

/// Extension trait for `InsnIndex` to allow conversion to a
/// `BackwardsInsnIndex`.
trait ToBackwardsInsnIndex {
    fn to_backwards_insn_index(&self, num_insts: usize) -> BackwardsInsnIndex;
}

impl ToBackwardsInsnIndex for InsnIndex {
    fn to_backwards_insn_index(&self, num_insts: usize) -> BackwardsInsnIndex {
        BackwardsInsnIndex::new(num_insts - self.index() - 1)
    }
}

/// An index referring to an instruction in the VCode when it is backwards,
/// during VCode construction.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(
    feature = "enable-serde",
    derive(::serde::Serialize, ::serde::Deserialize)
)]
pub struct BackwardsInsnIndex(InsnIndex);

impl BackwardsInsnIndex {
    pub fn new(i: usize) -> Self {
        BackwardsInsnIndex(InsnIndex::new(i))
    }
}

/// Index referring to a basic block in VCode.
pub type BlockIndex = regalloc2::Block;

/// VCodeInst wraps all requirements for a MachInst to be in VCode: it must be
/// a `MachInst` and it must be able to emit itself at least to a `SizeCodeSink`.
pub trait VCodeInst: MachInst + MachInstEmit {}
impl<I: MachInst + MachInstEmit> VCodeInst for I {}

/// A function in "VCode" (virtualized-register code) form, after
/// lowering.  This is essentially a standard CFG of basic blocks,
/// where each basic block consists of lowered instructions produced
/// by the machine-specific backend.
///
/// Note that the VCode is immutable once produced, and is not
/// modified by register allocation in particular. Rather, register
/// allocation on the `VCode` produces a separate `regalloc2::Output`
/// struct, and this can be passed to `emit`. `emit` in turn does not
/// modify the vcode, but produces an `EmitResult`, which contains the
/// machine code itself, and the associated disassembly and/or
/// metadata as requested.
pub struct VCode<I: VCodeInst> {
    /// VReg IR-level types.
    vreg_types: Vec<Type>,

    /// Lowered machine instructions in order corresponding to the original IR.
    insts: Vec<I>,

    /// A map from backwards instruction index to the user stack map for that
    /// instruction.
    ///
    /// This is a sparse side table that only has entries for instructions that
    /// are safepoints, and only for a subset of those that have an associated
    /// user stack map.
    user_stack_maps: FxHashMap<BackwardsInsnIndex, ir::UserStackMap>,

    /// Operands: pre-regalloc references to virtual registers with
    /// constraints, in one flattened array. This allows the regalloc
    /// to efficiently access all operands without requiring expensive
    /// matches or method invocations on insts.
    operands: Vec<Operand>,

    /// Operand index ranges: for each instruction in `insts`, there
    /// is a tuple here providing the range in `operands` for that
    /// instruction's operands.
    operand_ranges: Ranges,

    /// Clobbers: a sparse map from instruction indices to clobber masks.
    clobbers: FxHashMap<InsnIndex, PRegSet>,

    /// Source locations for each instruction. (`SourceLoc` is a `u32`, so it is
    /// reasonable to keep one of these per instruction.)
    srclocs: Vec<RelSourceLoc>,

    /// Entry block.
    entry: BlockIndex,

    /// Block instruction indices.
    block_ranges: Ranges,

    /// Block successors: index range in the `block_succs` list.
    block_succ_range: Ranges,

    /// Block successor lists, concatenated into one vec. The
    /// `block_succ_range` list of tuples above gives (start, end)
    /// ranges within this list that correspond to each basic block's
    /// successors.
    block_succs: Vec<regalloc2::Block>,

    /// Block predecessors: index range in the `block_preds` list.
    block_pred_range: Ranges,

    /// Block predecessor lists, concatenated into one vec. The
    /// `block_pred_range` list of tuples above gives (start, end)
    /// ranges within this list that correspond to each basic block's
    /// predecessors.
    block_preds: Vec<regalloc2::Block>,

    /// Block parameters: index range in `block_params` below.
    block_params_range: Ranges,

    /// Block parameter lists, concatenated into one vec. The
    /// `block_params_range` list of tuples above gives (start, end)
    /// ranges within this list that correspond to each basic block's
    /// blockparam vregs.
    block_params: Vec<regalloc2::VReg>,

    /// Outgoing block arguments on branch instructions, concatenated
    /// into one list.
    ///
    /// Note that this is conceptually a 3D array: we have a VReg list
    /// per block, per successor. We flatten those three dimensions
    /// into this 1D vec, then store index ranges in two levels of
    /// indirection.
    ///
    /// Indexed by the indices in `branch_block_arg_succ_range`.
    branch_block_args: Vec<regalloc2::VReg>,

    /// Array of sequences of (start, end) tuples in
    /// `branch_block_args`, one for each successor; these sequences
    /// for each block are concatenated.
    ///
    /// Indexed by the indices in `branch_block_arg_succ_range`.
    branch_block_arg_range: Ranges,

    /// For a given block, indices in `branch_block_arg_range`
    /// corresponding to all of its successors.
    branch_block_arg_succ_range: Ranges,

    /// Block-order information.
    block_order: BlockLoweringOrder,

    /// ABI object.
    pub(crate) abi: Callee<I::ABIMachineSpec>,

    /// Constant information used during code emission. This should be
    /// immutable across function compilations within the same module.
    emit_info: I::Info,

    /// Constants.
    pub(crate) constants: VCodeConstants,

    /// Value labels for debuginfo attached to vregs.
    debug_value_labels: Vec<(VReg, InsnIndex, InsnIndex, u32)>,

    pub(crate) sigs: SigSet,

    /// Facts on VRegs, for proof-carrying code verification.
    facts: Vec<Option<Fact>>,
}

/// The result of `VCode::emit`. Contains all information computed
/// during emission: actual machine code, optionally a disassembly,
/// and optionally metadata about the code layout.
pub struct EmitResult {
    /// The MachBuffer containing the machine code.
    pub buffer: MachBufferFinalized<Stencil>,

    /// Offset of each basic block, recorded during emission. Computed
    /// only if `debug_value_labels` is non-empty.
    pub bb_offsets: Vec<CodeOffset>,

    /// Final basic-block edges, in terms of code offsets of
    /// bb-starts. Computed only if `debug_value_labels` is non-empty.
    pub bb_edges: Vec<(CodeOffset, CodeOffset)>,

    /// Final length of function body.
    pub func_body_len: CodeOffset,

    /// The pretty-printed disassembly, if any. This uses the same
    /// pretty-printing for MachInsts as the pre-regalloc VCode Debug
    /// implementation, but additionally includes the prologue and
    /// epilogue(s), and makes use of the regalloc results.
    pub disasm: Option<String>,

    /// Offsets of sized stackslots.
    pub sized_stackslot_offsets: PrimaryMap<StackSlot, u32>,

    /// Offsets of dynamic stackslots.
    pub dynamic_stackslot_offsets: PrimaryMap<DynamicStackSlot, u32>,

    /// Value-labels information (debug metadata).
    pub value_labels_ranges: ValueLabelsRanges,

    /// Stack frame size.
    pub frame_size: u32,
}

/// A builder for a VCode function body.
///
/// This builder has the ability to accept instructions in either
/// forward or reverse order, depending on the pass direction that
/// produces the VCode. The lowering from CLIF to VCode<MachInst>
/// ordinarily occurs in reverse order (in order to allow instructions
/// to be lowered only if used, and not merged) so a reversal will
/// occur at the end of lowering to ensure the VCode is in machine
/// order.
///
/// If built in reverse, block and instruction indices used once the
/// VCode is built are relative to the final (reversed) order, not the
/// order of construction. Note that this means we do not know the
/// final block or instruction indices when building, so we do not
/// hand them out. (The user is assumed to know them when appending
/// terminator instructions with successor blocks.)
pub struct VCodeBuilder<I: VCodeInst> {
    /// In-progress VCode.
    pub(crate) vcode: VCode<I>,

    /// In what direction is the build occurring?
    direction: VCodeBuildDirection,

    /// Debug-value label in-progress map, keyed by label. For each
    /// label, we keep disjoint ranges mapping to vregs. We'll flatten
    /// this into (vreg, range, label) tuples when done.
    debug_info: FxHashMap<ValueLabel, Vec<(InsnIndex, InsnIndex, VReg)>>,
}

/// Direction in which a VCodeBuilder builds VCode.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum VCodeBuildDirection {
    // TODO: add `Forward` once we need it and can test it adequately.
    /// Backward-build pass: we expect the producer to call `emit()`
    /// with instructions in reverse program order within each block.
    Backward,
}

impl<I: VCodeInst> VCodeBuilder<I> {
    /// Create a new VCodeBuilder.
    pub fn new(
        sigs: SigSet,
        abi: Callee<I::ABIMachineSpec>,
        emit_info: I::Info,
        block_order: BlockLoweringOrder,
        constants: VCodeConstants,
        direction: VCodeBuildDirection,
    ) -> Self {
        let vcode = VCode::new(sigs, abi, emit_info, block_order, constants);

        VCodeBuilder {
            vcode,
            direction,
            debug_info: FxHashMap::default(),
        }
    }

    pub fn init_retval_area(&mut self, vregs: &mut VRegAllocator<I>) -> CodegenResult<()> {
        self.vcode.abi.init_retval_area(&self.vcode.sigs, vregs)
    }

    /// Access the ABI object.
    pub fn abi(&self) -> &Callee<I::ABIMachineSpec> {
        &self.vcode.abi
    }

    /// Access the ABI object.
    pub fn abi_mut(&mut self) -> &mut Callee<I::ABIMachineSpec> {
        &mut self.vcode.abi
    }

    pub fn sigs(&self) -> &SigSet {
        &self.vcode.sigs
    }

    pub fn sigs_mut(&mut self) -> &mut SigSet {
        &mut self.vcode.sigs
    }

    /// Access to the BlockLoweringOrder object.
    pub fn block_order(&self) -> &BlockLoweringOrder {
        &self.vcode.block_order
    }

    /// Set the current block as the entry block.
    pub fn set_entry(&mut self, block: BlockIndex) {
        self.vcode.entry = block;
    }

    /// End the current basic block. Must be called after emitting vcode insts
    /// for IR insts and prior to ending the function (building the VCode).
    pub fn end_bb(&mut self) {
        let end_idx = self.vcode.insts.len();
        // Add the instruction index range to the list of blocks.
        self.vcode.block_ranges.push_end(end_idx);
        // End the successors list.
        let succ_end = self.vcode.block_succs.len();
        self.vcode.block_succ_range.push_end(succ_end);
        // End the blockparams list.
        let block_params_end = self.vcode.block_params.len();
        self.vcode.block_params_range.push_end(block_params_end);
        // End the branch blockparam args list.
        let branch_block_arg_succ_end = self.vcode.branch_block_arg_range.len();
        self.vcode
            .branch_block_arg_succ_range
            .push_end(branch_block_arg_succ_end);
    }

    pub fn add_block_param(&mut self, param: VirtualReg) {
        self.vcode.block_params.push(param.into());
    }

    fn add_branch_args_for_succ(&mut self, args: &[Reg]) {
        self.vcode
            .branch_block_args
            .extend(args.iter().map(|&arg| VReg::from(arg)));
        let end = self.vcode.branch_block_args.len();
        self.vcode.branch_block_arg_range.push_end(end);
    }

    /// Push an instruction for the current BB and current IR inst
    /// within the BB.
    pub fn push(&mut self, insn: I, loc: RelSourceLoc) {
        self.vcode.insts.push(insn);
        self.vcode.srclocs.push(loc);
    }

    /// Add a successor block with branch args.
    pub fn add_succ(&mut self, block: BlockIndex, args: &[Reg]) {
        self.vcode.block_succs.push(block);
        self.add_branch_args_for_succ(args);
    }

    /// Add a debug value label to a register.
    pub fn add_value_label(&mut self, reg: Reg, label: ValueLabel) {
        // We'll fix up labels in reverse(). Because we're generating
        // code bottom-to-top, the liverange of the label goes *from*
        // the last index at which was defined (or 0, which is the end
        // of the eventual function) *to* just this instruction, and
        // no further.
        let inst = InsnIndex::new(self.vcode.insts.len());
        let labels = self.debug_info.entry(label).or_insert_with(|| vec![]);
        let last = labels
            .last()
            .map(|(_start, end, _vreg)| *end)
            .unwrap_or(InsnIndex::new(0));
        labels.push((last, inst, reg.into()));
    }

    /// Access the constants.
    pub fn constants(&mut self) -> &mut VCodeConstants {
        &mut self.vcode.constants
    }

    fn compute_preds_from_succs(&mut self) {
        // Do a linear-time counting sort: first determine how many
        // times each block appears as a successor.
        let mut starts = vec![0u32; self.vcode.num_blocks()];
        for succ in &self.vcode.block_succs {
            starts[succ.index()] += 1;
        }

        // Determine for each block the starting index where that
        // block's predecessors should go. This is equivalent to the
        // ranges we need to store in block_pred_range.
        self.vcode.block_pred_range.reserve(starts.len());
        let mut end = 0;
        for count in starts.iter_mut() {
            let start = end;
            end += *count;
            *count = start;
            self.vcode.block_pred_range.push_end(end as usize);
        }
        let end = end as usize;
        debug_assert_eq!(end, self.vcode.block_succs.len());

        // Walk over the successors again, this time grouped by
        // predecessor, and push the predecessor at the current
        // starting position of each of its successors. We build
        // each group of predecessors in whatever order Ranges::iter
        // returns them; regalloc2 doesn't care.
        self.vcode.block_preds.resize(end, BlockIndex::invalid());
        for (pred, range) in self.vcode.block_succ_range.iter() {
            let pred = BlockIndex::new(pred);
            for succ in &self.vcode.block_succs[range] {
                let pos = &mut starts[succ.index()];
                self.vcode.block_preds[*pos as usize] = pred;
                *pos += 1;
            }
        }
        debug_assert!(self.vcode.block_preds.iter().all(|pred| pred.is_valid()));
    }

    /// Called once, when a build in Backward order is complete, to
    /// perform the overall reversal (into final forward order) and
    /// finalize metadata accordingly.
    fn reverse_and_finalize(&mut self, vregs: &VRegAllocator<I>) {
        let n_insts = self.vcode.insts.len();
        if n_insts == 0 {
            return;
        }

        // Reverse the per-block and per-inst sequences.
        self.vcode.block_ranges.reverse_index();
        self.vcode.block_ranges.reverse_target(n_insts);
        // block_params_range is indexed by block (and blocks were
        // traversed in reverse) so we reverse it; but block-param
        // sequences in the concatenated vec can remain in reverse
        // order (it is effectively an arena of arbitrarily-placed
        // referenced sequences).
        self.vcode.block_params_range.reverse_index();
        // Likewise, we reverse block_succ_range, but the block_succ
        // concatenated array can remain as-is.
        self.vcode.block_succ_range.reverse_index();
        self.vcode.insts.reverse();
        self.vcode.srclocs.reverse();
        // Likewise, branch_block_arg_succ_range is indexed by block
        // so must be reversed.
        self.vcode.branch_block_arg_succ_range.reverse_index();

        // To translate an instruction index *endpoint* in reversed
        // order to forward order, compute `n_insts - i`.
        //
        // Why not `n_insts - 1 - i`? That would be correct to
        // translate an individual instruction index (for ten insts 0
        // to 9 inclusive, inst 0 becomes 9, and inst 9 becomes
        // 0). But for the usual inclusive-start, exclusive-end range
        // idiom, inclusive starts become exclusive ends and
        // vice-versa, so e.g. an (inclusive) start of 0 becomes an
        // (exclusive) end of 10.
        let translate = |inst: InsnIndex| InsnIndex::new(n_insts - inst.index());

        // Generate debug-value labels based on per-label maps.
        for (label, tuples) in &self.debug_info {
            for &(start, end, vreg) in tuples {
                let vreg = vregs.resolve_vreg_alias(vreg);
                let fwd_start = translate(end);
                let fwd_end = translate(start);
                self.vcode
                    .debug_value_labels
                    .push((vreg, fwd_start, fwd_end, label.as_u32()));
            }
        }

        // Now sort debug value labels by VReg, as required
        // by regalloc2.
        self.vcode
            .debug_value_labels
            .sort_unstable_by_key(|(vreg, _, _, _)| *vreg);
    }

    fn collect_operands(&mut self, vregs: &VRegAllocator<I>) {
        let allocatable = PRegSet::from(self.vcode.machine_env());
        for (i, insn) in self.vcode.insts.iter_mut().enumerate() {
            // Push operands from the instruction onto the operand list.
            //
            // We rename through the vreg alias table as we collect
            // the operands. This is better than a separate post-pass
            // over operands, because it has more cache locality:
            // operands only need to pass through L1 once. This is
            // also better than renaming instructions'
            // operands/registers while lowering, because here we only
            // need to do the `match` over the instruction to visit
            // its register fields (which is slow, branchy code) once.

            let mut op_collector =
                OperandCollector::new(&mut self.vcode.operands, allocatable, |vreg| {
                    vregs.resolve_vreg_alias(vreg)
                });
            insn.get_operands(&mut op_collector);
            let (ops, clobbers) = op_collector.finish();
            self.vcode.operand_ranges.push_end(ops);

            if clobbers != PRegSet::default() {
                self.vcode.clobbers.insert(InsnIndex::new(i), clobbers);
            }

            if let Some((dst, src)) = insn.is_move() {
                // We should never see non-virtual registers present in move
                // instructions.
                assert!(
                    src.is_virtual(),
                    "the real register {src:?} was used as the source of a move instruction"
                );
                assert!(
                    dst.to_reg().is_virtual(),
                    "the real register {:?} was used as the destination of a move instruction",
                    dst.to_reg()
                );
            }
        }

        // Translate blockparam args via the vreg aliases table as well.
        for arg in &mut self.vcode.branch_block_args {
            let new_arg = vregs.resolve_vreg_alias(*arg);
            trace!("operandcollector: block arg {:?} -> {:?}", arg, new_arg);
            *arg = new_arg;
        }
    }

    /// Build the final VCode.
    pub fn build(mut self, mut vregs: VRegAllocator<I>) -> VCode<I> {
        self.vcode.vreg_types = take(&mut vregs.vreg_types);
        self.vcode.facts = take(&mut vregs.facts);

        if self.direction == VCodeBuildDirection::Backward {
            self.reverse_and_finalize(&vregs);
        }
        self.collect_operands(&vregs);

        self.compute_preds_from_succs();
        self.vcode.debug_value_labels.sort_unstable();

        // At this point, nothing in the vcode should mention any
        // VReg which has been aliased. All the appropriate rewriting
        // should have happened above. Just to be sure, let's
        // double-check each field which has vregs.
        // Note: can't easily check vcode.insts, resolved in collect_operands.
        // Operands are resolved in collect_operands.
        vregs.debug_assert_no_vreg_aliases(self.vcode.operands.iter().map(|op| op.vreg()));
        // Currently block params are never aliased to another vreg.
        vregs.debug_assert_no_vreg_aliases(self.vcode.block_params.iter().copied());
        // Branch block args are resolved in collect_operands.
        vregs.debug_assert_no_vreg_aliases(self.vcode.branch_block_args.iter().copied());
        // Debug value labels are resolved in reverse_and_finalize.
        vregs.debug_assert_no_vreg_aliases(
            self.vcode.debug_value_labels.iter().map(|&(vreg, ..)| vreg),
        );
        // Facts are resolved eagerly during set_vreg_alias.
        vregs.debug_assert_no_vreg_aliases(
            self.vcode
                .facts
                .iter()
                .zip(&vregs.vreg_types)
                .enumerate()
                .filter(|(_, (fact, _))| fact.is_some())
                .map(|(vreg, (_, &ty))| {
                    let (regclasses, _) = I::rc_for_type(ty).unwrap();
                    VReg::new(vreg, regclasses[0])
                }),
        );

        self.vcode
    }

    /// Add a user stack map for the associated instruction.
    pub fn add_user_stack_map(
        &mut self,
        inst: BackwardsInsnIndex,
        entries: &[ir::UserStackMapEntry],
    ) {
        let stack_map = ir::UserStackMap::new(entries, self.vcode.abi.sized_stackslot_offsets());
        let old_entry = self.vcode.user_stack_maps.insert(inst, stack_map);
        debug_assert!(old_entry.is_none());
    }
}

const NO_INST_OFFSET: CodeOffset = u32::MAX;

impl<I: VCodeInst> VCode<I> {
    /// New empty VCode.
    fn new(
        sigs: SigSet,
        abi: Callee<I::ABIMachineSpec>,
        emit_info: I::Info,
        block_order: BlockLoweringOrder,
        constants: VCodeConstants,
    ) -> Self {
        let n_blocks = block_order.lowered_order().len();
        VCode {
            sigs,
            vreg_types: vec![],
            insts: Vec::with_capacity(10 * n_blocks),
            user_stack_maps: FxHashMap::default(),
            operands: Vec::with_capacity(30 * n_blocks),
            operand_ranges: Ranges::with_capacity(10 * n_blocks),
            clobbers: FxHashMap::default(),
            srclocs: Vec::with_capacity(10 * n_blocks),
            entry: BlockIndex::new(0),
            block_ranges: Ranges::with_capacity(n_blocks),
            block_succ_range: Ranges::with_capacity(n_blocks),
            block_succs: Vec::with_capacity(n_blocks),
            block_pred_range: Ranges::default(),
            block_preds: Vec::new(),
            block_params_range: Ranges::with_capacity(n_blocks),
            block_params: Vec::with_capacity(5 * n_blocks),
            branch_block_args: Vec::with_capacity(10 * n_blocks),
            branch_block_arg_range: Ranges::with_capacity(2 * n_blocks),
            branch_block_arg_succ_range: Ranges::with_capacity(n_blocks),
            block_order,
            abi,
            emit_info,
            constants,
            debug_value_labels: vec![],
            facts: vec![],
        }
    }

    /// Get the ABI-dependent MachineEnv for managing register allocation.
    pub fn machine_env(&self) -> &MachineEnv {
        self.abi.machine_env(&self.sigs)
    }

    /// Get the number of blocks. Block indices will be in the range `0 ..
    /// (self.num_blocks() - 1)`.
    pub fn num_blocks(&self) -> usize {
        self.block_ranges.len()
    }

    /// The number of lowered instructions.
    pub fn num_insts(&self) -> usize {
        self.insts.len()
    }

    fn compute_clobbers(&self, regalloc: &regalloc2::Output) -> Vec<Writable<RealReg>> {
        let mut clobbered = PRegSet::default();

        // All moves are included in clobbers.
        for (_, Edit::Move { to, .. }) in &regalloc.edits {
            if let Some(preg) = to.as_reg() {
                clobbered.add(preg);
            }
        }

        for (i, range) in self.operand_ranges.iter() {
            // Skip this instruction if not "included in clobbers" as
            // per the MachInst. (Some backends use this to implement
            // ABI specifics; e.g., excluding calls of the same ABI as
            // the current function from clobbers, because by
            // definition everything clobbered by the call can be
            // clobbered by this function without saving as well.)
            if !self.insts[i].is_included_in_clobbers() {
                continue;
            }

            let operands = &self.operands[range.clone()];
            let allocs = &regalloc.allocs[range];
            for (operand, alloc) in operands.iter().zip(allocs.iter()) {
                if operand.kind() == OperandKind::Def {
                    if let Some(preg) = alloc.as_reg() {
                        clobbered.add(preg);
                    }
                }
            }

            // Also add explicitly-clobbered registers.
            if let Some(&inst_clobbered) = self.clobbers.get(&InsnIndex::new(i)) {
                clobbered.union_from(inst_clobbered);
            }
        }

        clobbered
            .into_iter()
            .map(|preg| Writable::from_reg(RealReg::from(preg)))
            .collect()
    }

    /// Emit the instructions to a `MachBuffer`, containing fixed-up
    /// code and external reloc/trap/etc. records ready for use. Takes
    /// the regalloc results as well.
    ///
    /// Returns the machine code itself, and optionally metadata
    /// and/or a disassembly, as an `EmitResult`. The `VCode` itself
    /// is consumed by the emission process.
    pub fn emit(
        mut self,
        regalloc: &regalloc2::Output,
        want_disasm: bool,
        flags: &settings::Flags,
        ctrl_plane: &mut ControlPlane,
    ) -> EmitResult
    where
        I: VCodeInst,
    {
        // To write into disasm string.
        use core::fmt::Write;

        let _tt = timing::vcode_emit();
        let mut buffer = MachBuffer::new();
        let mut bb_starts: Vec<Option<CodeOffset>> = vec![];

        // The first M MachLabels are reserved for block indices.
        buffer.reserve_labels_for_blocks(self.num_blocks());

        // Register all allocated constants with the `MachBuffer` to ensure that
        // any references to the constants during instructions can be handled
        // correctly.
        buffer.register_constants(&self.constants);

        // Construct the final order we emit code in: cold blocks at the end.
        let mut final_order: SmallVec<[BlockIndex; 16]> = smallvec![];
        let mut cold_blocks: SmallVec<[BlockIndex; 16]> = smallvec![];
        for block in 0..self.num_blocks() {
            let block = BlockIndex::new(block);
            if self.block_order.is_cold(block) {
                cold_blocks.push(block);
            } else {
                final_order.push(block);
            }
        }
        final_order.extend(cold_blocks.clone());

        // Compute/save info we need for the prologue: clobbers and
        // number of spillslots.
        //
        // We clone `abi` here because we will mutate it as we
        // generate the prologue and set other info, but we can't
        // mutate `VCode`. The info it usually carries prior to
        // setting clobbers is fairly minimal so this should be
        // relatively cheap.
        let clobbers = self.compute_clobbers(regalloc);
        self.abi
            .compute_frame_layout(&self.sigs, regalloc.num_spillslots, clobbers);

        // Emit blocks.
        let mut cur_srcloc = None;
        let mut last_offset = None;
        let mut inst_offsets = vec![];
        let mut state = I::State::new(&self.abi, std::mem::take(ctrl_plane));

        let mut disasm = String::new();

        if !self.debug_value_labels.is_empty() {
            inst_offsets.resize(self.insts.len(), NO_INST_OFFSET);
        }

        // Count edits per block ahead of time; this is needed for
        // lookahead island emission. (We could derive it per-block
        // with binary search in the edit list, but it's more
        // efficient to do it in one pass here.)
        let mut ra_edits_per_block: SmallVec<[u32; 64]> = smallvec![];
        let mut edit_idx = 0;
        for block in 0..self.num_blocks() {
            let end_inst = InsnIndex::new(self.block_ranges.get(block).end);
            let start_edit_idx = edit_idx;
            while edit_idx < regalloc.edits.len() && regalloc.edits[edit_idx].0.inst() < end_inst {
                edit_idx += 1;
            }
            let end_edit_idx = edit_idx;
            ra_edits_per_block.push((end_edit_idx - start_edit_idx) as u32);
        }

        let is_forward_edge_cfi_enabled = self.abi.is_forward_edge_cfi_enabled();
        let mut bb_padding = match flags.bb_padding_log2_minus_one() {
            0 => Vec::new(),
            n => vec![0; 1 << (n - 1)],
        };
        let mut total_bb_padding = 0;

        for (block_order_idx, &block) in final_order.iter().enumerate() {
            trace!("emitting block {:?}", block);

            // Call the new block hook for state
            state.on_new_block();

            // Emit NOPs to align the block.
            let new_offset = I::align_basic_block(buffer.cur_offset());
            while new_offset > buffer.cur_offset() {
                // Pad with NOPs up to the aligned block offset.
                let nop = I::gen_nop((new_offset - buffer.cur_offset()) as usize);
                nop.emit(&mut buffer, &self.emit_info, &mut Default::default());
            }
            assert_eq!(buffer.cur_offset(), new_offset);

            let do_emit = |inst: &I,
                           disasm: &mut String,
                           buffer: &mut MachBuffer<I>,
                           state: &mut I::State| {
                if want_disasm && !inst.is_args() {
                    let mut s = state.clone();
                    writeln!(disasm, "  {}", inst.pretty_print_inst(&mut s)).unwrap();
                }
                inst.emit(buffer, &self.emit_info, state);
            };

            // Is this the first block? Emit the prologue directly if so.
            if block == self.entry {
                trace!(" -> entry block");
                buffer.start_srcloc(Default::default());
                for inst in &self.abi.gen_prologue() {
                    do_emit(&inst, &mut disasm, &mut buffer, &mut state);
                }
                buffer.end_srcloc();
            }

            // Now emit the regular block body.

            buffer.bind_label(MachLabel::from_block(block), state.ctrl_plane_mut());

            if want_disasm {
                writeln!(&mut disasm, "block{}:", block.index()).unwrap();
            }

            if flags.machine_code_cfg_info() {
                // Track BB starts. If we have backed up due to MachBuffer
                // branch opts, note that the removed blocks were removed.
                let cur_offset = buffer.cur_offset();
                if last_offset.is_some() && cur_offset <= last_offset.unwrap() {
                    for i in (0..bb_starts.len()).rev() {
                        if bb_starts[i].is_some() && cur_offset > bb_starts[i].unwrap() {
                            break;
                        }
                        bb_starts[i] = None;
                    }
                }
                bb_starts.push(Some(cur_offset));
                last_offset = Some(cur_offset);
            }

            if let Some(block_start) = I::gen_block_start(
                self.block_order.is_indirect_branch_target(block),
                is_forward_edge_cfi_enabled,
            ) {
                do_emit(&block_start, &mut disasm, &mut buffer, &mut state);
            }

            for inst_or_edit in regalloc.block_insts_and_edits(&self, block) {
                match inst_or_edit {
                    InstOrEdit::Inst(iix) => {
                        if !self.debug_value_labels.is_empty() {
                            // If we need to produce debug info,
                            // record the offset of each instruction
                            // so that we can translate value-label
                            // ranges to machine-code offsets.

                            // Cold blocks violate monotonicity
                            // assumptions elsewhere (that
                            // instructions in inst-index order are in
                            // order in machine code), so we omit
                            // their offsets here. Value-label range
                            // generation below will skip empty ranges
                            // and ranges with to-offsets of zero.
                            if !self.block_order.is_cold(block) {
                                inst_offsets[iix.index()] = buffer.cur_offset();
                            }
                        }

                        // Update the srcloc at this point in the buffer.
                        let srcloc = self.srclocs[iix.index()];
                        if cur_srcloc != Some(srcloc) {
                            if cur_srcloc.is_some() {
                                buffer.end_srcloc();
                            }
                            buffer.start_srcloc(srcloc);
                            cur_srcloc = Some(srcloc);
                        }

                        // If this is a safepoint, compute a stack map
                        // and pass it to the emit state.
                        let stack_map_disasm = if self.insts[iix.index()].is_safepoint() {
                            let (user_stack_map, user_stack_map_disasm) = {
                                // The `user_stack_maps` is keyed by reverse
                                // instruction index, so we must flip the
                                // index. We can't put this into a helper method
                                // due to borrowck issues because parts of
                                // `self` are borrowed mutably elsewhere in this
                                // function.
                                let index = iix.to_backwards_insn_index(self.num_insts());
                                let user_stack_map = self.user_stack_maps.remove(&index);
                                let user_stack_map_disasm =
                                    user_stack_map.as_ref().map(|m| format!("  ; {m:?}"));
                                (user_stack_map, user_stack_map_disasm)
                            };

                            state.pre_safepoint(user_stack_map);

                            user_stack_map_disasm
                        } else {
                            None
                        };

                        // If the instruction we are about to emit is
                        // a return, place an epilogue at this point
                        // (and don't emit the return; the actual
                        // epilogue will contain it).
                        if self.insts[iix.index()].is_term() == MachTerminator::Ret {
                            for inst in self.abi.gen_epilogue() {
                                do_emit(&inst, &mut disasm, &mut buffer, &mut state);
                            }
                        } else {
                            // Update the operands for this inst using the
                            // allocations from the regalloc result.
                            let mut allocs = regalloc.inst_allocs(iix).iter();
                            self.insts[iix.index()].get_operands(
                                &mut |reg: &mut Reg, constraint, _kind, _pos| {
                                    let alloc = allocs
                                        .next()
                                        .expect("enough allocations for all operands")
                                        .as_reg()
                                        .expect("only register allocations, not stack allocations")
                                        .into();

                                    if let OperandConstraint::FixedReg(rreg) = constraint {
                                        debug_assert_eq!(Reg::from(rreg), alloc);
                                    }
                                    *reg = alloc;
                                },
                            );
                            debug_assert!(allocs.next().is_none());

                            // Emit the instruction!
                            do_emit(
                                &self.insts[iix.index()],
                                &mut disasm,
                                &mut buffer,
                                &mut state,
                            );
                            if let Some(stack_map_disasm) = stack_map_disasm {
                                disasm.push_str(&stack_map_disasm);
                                disasm.push('\n');
                            }
                        }
                    }

                    InstOrEdit::Edit(Edit::Move { from, to }) => {
                        // Create a move/spill/reload instruction and
                        // immediately emit it.
                        match (from.as_reg(), to.as_reg()) {
                            (Some(from), Some(to)) => {
                                // Reg-to-reg move.
                                let from_rreg = Reg::from(from);
                                let to_rreg = Writable::from_reg(Reg::from(to));
                                debug_assert_eq!(from.class(), to.class());
                                let ty = I::canonical_type_for_rc(from.class());
                                let mv = I::gen_move(to_rreg, from_rreg, ty);
                                do_emit(&mv, &mut disasm, &mut buffer, &mut state);
                            }
                            (Some(from), None) => {
                                // Spill from register to spillslot.
                                let to = to.as_stack().unwrap();
                                let from_rreg = RealReg::from(from);
                                let spill = self.abi.gen_spill(to, from_rreg);
                                do_emit(&spill, &mut disasm, &mut buffer, &mut state);
                            }
                            (None, Some(to)) => {
                                // Load from spillslot to register.
                                let from = from.as_stack().unwrap();
                                let to_rreg = Writable::from_reg(RealReg::from(to));
                                let reload = self.abi.gen_reload(to_rreg, from);
                                do_emit(&reload, &mut disasm, &mut buffer, &mut state);
                            }
                            (None, None) => {
                                panic!("regalloc2 should have eliminated stack-to-stack moves!");
                            }
                        }
                    }
                }
            }

            if cur_srcloc.is_some() {
                buffer.end_srcloc();
                cur_srcloc = None;
            }

            // Do we need an island? Get the worst-case size of the next BB, add
            // it to the optional padding behind the block, and pass this to the
            // `MachBuffer` to determine if an island is necessary.
            let worst_case_next_bb = if block_order_idx < final_order.len() - 1 {
                let next_block = final_order[block_order_idx + 1];
                let next_block_range = self.block_ranges.get(next_block.index());
                let next_block_size = next_block_range.len() as u32;
                let next_block_ra_insertions = ra_edits_per_block[next_block.index()];
                I::worst_case_size() * (next_block_size + next_block_ra_insertions)
            } else {
                0
            };
            let padding = if bb_padding.is_empty() {
                0
            } else {
                bb_padding.len() as u32 + I::LabelUse::ALIGN - 1
            };
            if buffer.island_needed(padding + worst_case_next_bb) {
                buffer.emit_island(padding + worst_case_next_bb, ctrl_plane);
            }

            // Insert padding, if configured, to stress the `MachBuffer`'s
            // relocation and island calculations.
            //
            // Padding can get quite large during fuzzing though so place a
            // total cap on it where when a per-function threshold is exceeded
            // the padding is turned back down to zero. This avoids a small-ish
            // test case generating a GB+ memory footprint in Cranelift for
            // example.
            if !bb_padding.is_empty() {
                buffer.put_data(&bb_padding);
                buffer.align_to(I::LabelUse::ALIGN);
                total_bb_padding += bb_padding.len();
                if total_bb_padding > (150 << 20) {
                    bb_padding = Vec::new();
                }
            }
        }

        debug_assert!(
            self.user_stack_maps.is_empty(),
            "any stack maps should have been consumed by instruction emission, still have: {:#?}",
            self.user_stack_maps,
        );

        // Do any optimizations on branches at tail of buffer, as if we had
        // bound one last label.
        buffer.optimize_branches(ctrl_plane);

        // emission state is not needed anymore, move control plane back out
        *ctrl_plane = state.take_ctrl_plane();

        let func_body_len = buffer.cur_offset();

        // Create `bb_edges` and final (filtered) `bb_starts`.
        let mut bb_edges = vec![];
        let mut bb_offsets = vec![];
        if flags.machine_code_cfg_info() {
            for block in 0..self.num_blocks() {
                if bb_starts[block].is_none() {
                    // Block was deleted by MachBuffer; skip.
                    continue;
                }
                let from = bb_starts[block].unwrap();

                bb_offsets.push(from);
                // Resolve each `succ` label and add edges.
                let succs = self.block_succs(BlockIndex::new(block));
                for &succ in succs.iter() {
                    let to = buffer.resolve_label_offset(MachLabel::from_block(succ));
                    bb_edges.push((from, to));
                }
            }
        }

        self.monotonize_inst_offsets(&mut inst_offsets[..], func_body_len);
        let value_labels_ranges =
            self.compute_value_labels_ranges(regalloc, &inst_offsets[..], func_body_len);
        let frame_size = self.abi.frame_size();

        EmitResult {
            buffer: buffer.finish(&self.constants, ctrl_plane),
            bb_offsets,
            bb_edges,
            func_body_len,
            disasm: if want_disasm { Some(disasm) } else { None },
            sized_stackslot_offsets: self.abi.sized_stackslot_offsets().clone(),
            dynamic_stackslot_offsets: self.abi.dynamic_stackslot_offsets().clone(),
            value_labels_ranges,
            frame_size,
        }
    }

    fn monotonize_inst_offsets(&self, inst_offsets: &mut [CodeOffset], func_body_len: u32) {
        if self.debug_value_labels.is_empty() {
            return;
        }

        // During emission, branch removal can make offsets of instructions incorrect.
        // Consider the following sequence: [insi][jmp0][jmp1][jmp2][insj]
        // It will be recorded as (say):    [30]  [34]  [38]  [42]  [<would be 46>]
        // When the jumps get removed we are left with (in "inst_offsets"):
        // [insi][jmp0][jmp1][jmp2][insj][...]
        // [30]  [34]  [38]  [42]  [34]
        // Which violates the monotonicity invariant. This method sets offsets of these
        // removed instructions such as to make them appear zero-sized:
        // [insi][jmp0][jmp1][jmp2][insj][...]
        // [30]  [34]  [34]  [34]  [34]
        //
        let mut next_offset = func_body_len;
        for inst_index in (0..(inst_offsets.len() - 1)).rev() {
            let inst_offset = inst_offsets[inst_index];

            // Not all instructions get their offsets recorded.
            if inst_offset == NO_INST_OFFSET {
                continue;
            }

            if inst_offset > next_offset {
                trace!(
                    "Fixing code offset of the removed Inst {}: {} -> {}",
                    inst_index,
                    inst_offset,
                    next_offset
                );
                inst_offsets[inst_index] = next_offset;
                continue;
            }

            next_offset = inst_offset;
        }
    }

    fn compute_value_labels_ranges(
        &self,
        regalloc: &regalloc2::Output,
        inst_offsets: &[CodeOffset],
        func_body_len: u32,
    ) -> ValueLabelsRanges {
        if self.debug_value_labels.is_empty() {
            return ValueLabelsRanges::default();
        }

        let mut value_labels_ranges: ValueLabelsRanges = HashMap::new();
        for &(label, from, to, alloc) in &regalloc.debug_locations {
            let ranges = value_labels_ranges
                .entry(ValueLabel::from_u32(label))
                .or_insert_with(|| vec![]);
            let from_offset = inst_offsets[from.inst().index()];
            let to_offset = if to.inst().index() == inst_offsets.len() {
                func_body_len
            } else {
                inst_offsets[to.inst().index()]
            };

            // Empty ranges or unavailable offsets can happen
            // due to cold blocks and branch removal (see above).
            if from_offset == NO_INST_OFFSET
                || to_offset == NO_INST_OFFSET
                || from_offset == to_offset
            {
                continue;
            }

            let loc = if let Some(preg) = alloc.as_reg() {
                LabelValueLoc::Reg(Reg::from(preg))
            } else {
                let slot = alloc.as_stack().unwrap();
                let slot_offset = self.abi.get_spillslot_offset(slot);
                let slot_base_to_caller_sp_offset = self.abi.slot_base_to_caller_sp_offset();
                let caller_sp_to_cfa_offset =
                    crate::isa::unwind::systemv::caller_sp_to_cfa_offset();
                // NOTE: this is a negative offset because it's relative to the caller's SP
                let cfa_to_sp_offset =
                    -((slot_base_to_caller_sp_offset + caller_sp_to_cfa_offset) as i64);
                LabelValueLoc::CFAOffset(cfa_to_sp_offset + slot_offset)
            };

            // ValueLocRanges are recorded by *instruction-end
            // offset*. `from_offset` is the *start* of the
            // instruction; that is the same as the end of another
            // instruction, so we only want to begin coverage once
            // we are past the previous instruction's end.
            let start = from_offset + 1;

            // Likewise, `end` is exclusive, but we want to
            // *include* the end of the last
            // instruction. `to_offset` is the start of the
            // `to`-instruction, which is the exclusive end, i.e.,
            // the first instruction not covered. That
            // instruction's start is the same as the end of the
            // last instruction that is included, so we go one
            // byte further to be sure to include it.
            let end = to_offset + 1;

            // Coalesce adjacent ranges that for the same location
            // to minimize output size here and for the consumers.
            if let Some(last_loc_range) = ranges.last_mut() {
                if last_loc_range.loc == loc && last_loc_range.end == start {
                    trace!(
                        "Extending debug range for VL{} in {:?} to {}",
                        label,
                        loc,
                        end
                    );
                    last_loc_range.end = end;
                    continue;
                }
            }

            trace!(
                "Recording debug range for VL{} in {:?}: [Inst {}..Inst {}) [{}..{})",
                label,
                loc,
                from.inst().index(),
                to.inst().index(),
                start,
                end
            );

            ranges.push(ValueLocRange { loc, start, end });
        }

        value_labels_ranges
    }

    /// Get the IR block for a BlockIndex, if one exists.
    pub fn bindex_to_bb(&self, block: BlockIndex) -> Option<ir::Block> {
        self.block_order.lowered_order()[block.index()].orig_block()
    }

    /// Get the type of a VReg.
    pub fn vreg_type(&self, vreg: VReg) -> Type {
        self.vreg_types[vreg.vreg()]
    }

    /// Get the fact, if any, for a given VReg.
    pub fn vreg_fact(&self, vreg: VReg) -> Option<&Fact> {
        self.facts[vreg.vreg()].as_ref()
    }

    /// Set the fact for a given VReg.
    pub fn set_vreg_fact(&mut self, vreg: VReg, fact: Fact) {
        trace!("set fact on {}: {:?}", vreg, fact);
        self.facts[vreg.vreg()] = Some(fact);
    }

    /// Does a given instruction define any facts?
    pub fn inst_defines_facts(&self, inst: InsnIndex) -> bool {
        self.inst_operands(inst)
            .iter()
            .filter(|o| o.kind() == OperandKind::Def)
            .map(|o| o.vreg())
            .any(|vreg| self.facts[vreg.vreg()].is_some())
    }

    /// Get the user stack map associated with the given forward instruction index.
    pub fn get_user_stack_map(&self, inst: InsnIndex) -> Option<&ir::UserStackMap> {
        let index = inst.to_backwards_insn_index(self.num_insts());
        self.user_stack_maps.get(&index)
    }
}

impl<I: VCodeInst> std::ops::Index<InsnIndex> for VCode<I> {
    type Output = I;
    fn index(&self, idx: InsnIndex) -> &Self::Output {
        &self.insts[idx.index()]
    }
}

impl<I: VCodeInst> RegallocFunction for VCode<I> {
    fn num_insts(&self) -> usize {
        self.insts.len()
    }

    fn num_blocks(&self) -> usize {
        self.block_ranges.len()
    }

    fn entry_block(&self) -> BlockIndex {
        self.entry
    }

    fn block_insns(&self, block: BlockIndex) -> InstRange {
        let range = self.block_ranges.get(block.index());
        InstRange::new(InsnIndex::new(range.start), InsnIndex::new(range.end))
    }

    fn block_succs(&self, block: BlockIndex) -> &[BlockIndex] {
        let range = self.block_succ_range.get(block.index());
        &self.block_succs[range]
    }

    fn block_preds(&self, block: BlockIndex) -> &[BlockIndex] {
        let range = self.block_pred_range.get(block.index());
        &self.block_preds[range]
    }

    fn block_params(&self, block: BlockIndex) -> &[VReg] {
        // As a special case we don't return block params for the entry block, as all the arguments
        // will be defined by the `Inst::Args` instruction.
        if block == self.entry {
            return &[];
        }

        let range = self.block_params_range.get(block.index());
        &self.block_params[range]
    }

    fn branch_blockparams(&self, block: BlockIndex, _insn: InsnIndex, succ_idx: usize) -> &[VReg] {
        let succ_range = self.branch_block_arg_succ_range.get(block.index());
        debug_assert!(succ_idx < succ_range.len());
        let branch_block_args = self.branch_block_arg_range.get(succ_range.start + succ_idx);
        &self.branch_block_args[branch_block_args]
    }

    fn is_ret(&self, insn: InsnIndex) -> bool {
        match self.insts[insn.index()].is_term() {
            // We treat blocks terminated by an unconditional trap like a return for regalloc.
            MachTerminator::None => self.insts[insn.index()].is_trap(),
            MachTerminator::Ret | MachTerminator::RetCall => true,
            MachTerminator::Uncond | MachTerminator::Cond | MachTerminator::Indirect => false,
        }
    }

    fn is_branch(&self, insn: InsnIndex) -> bool {
        match self.insts[insn.index()].is_term() {
            MachTerminator::Cond | MachTerminator::Uncond | MachTerminator::Indirect => true,
            _ => false,
        }
    }

    fn inst_operands(&self, insn: InsnIndex) -> &[Operand] {
        let range = self.operand_ranges.get(insn.index());
        &self.operands[range]
    }

    fn inst_clobbers(&self, insn: InsnIndex) -> PRegSet {
        self.clobbers.get(&insn).cloned().unwrap_or_default()
    }

    fn num_vregs(&self) -> usize {
        self.vreg_types.len()
    }

    fn debug_value_labels(&self) -> &[(VReg, InsnIndex, InsnIndex, u32)] {
        &self.debug_value_labels
    }

    fn spillslot_size(&self, regclass: RegClass) -> usize {
        self.abi.get_spillslot_size(regclass) as usize
    }

    fn allow_multiple_vreg_defs(&self) -> bool {
        // At least the s390x backend requires this, because the
        // `Loop` pseudo-instruction aggregates all Operands so pinned
        // vregs (RealRegs) may occur more than once.
        true
    }
}

impl<I: VCodeInst> Debug for VRegAllocator<I> {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        writeln!(f, "VRegAllocator {{")?;

        let mut alias_keys = self.vreg_aliases.keys().cloned().collect::<Vec<_>>();
        alias_keys.sort_unstable();
        for key in alias_keys {
            let dest = self.vreg_aliases.get(&key).unwrap();
            writeln!(f, "  {:?} := {:?}", Reg::from(key), Reg::from(*dest))?;
        }

        for (vreg, fact) in self.facts.iter().enumerate() {
            if let Some(fact) = fact {
                writeln!(f, "  v{vreg} ! {fact}")?;
            }
        }

        writeln!(f, "}}")
    }
}

impl<I: VCodeInst> fmt::Debug for VCode<I> {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        writeln!(f, "VCode {{")?;
        writeln!(f, "  Entry block: {}", self.entry.index())?;

        let mut state = Default::default();

        for block in 0..self.num_blocks() {
            let block = BlockIndex::new(block);
            writeln!(
                f,
                "Block {}({:?}):",
                block.index(),
                self.block_params(block)
            )?;
            if let Some(bb) = self.bindex_to_bb(block) {
                writeln!(f, "    (original IR block: {bb})")?;
            }
            for (succ_idx, succ) in self.block_succs(block).iter().enumerate() {
                writeln!(
                    f,
                    "    (successor: Block {}({:?}))",
                    succ.index(),
                    self.branch_blockparams(block, InsnIndex::new(0) /* dummy */, succ_idx)
                )?;
            }
            for inst in self.block_ranges.get(block.index()) {
                writeln!(
                    f,
                    "  Inst {}: {}",
                    inst,
                    self.insts[inst].pretty_print_inst(&mut state)
                )?;
                if !self.operands.is_empty() {
                    for operand in self.inst_operands(InsnIndex::new(inst)) {
                        if operand.kind() == OperandKind::Def {
                            if let Some(fact) = &self.facts[operand.vreg().vreg()] {
                                writeln!(f, "    v{} ! {}", operand.vreg().vreg(), fact)?;
                            }
                        }
                    }
                }
                if let Some(user_stack_map) = self.get_user_stack_map(InsnIndex::new(inst)) {
                    writeln!(f, "    {user_stack_map:?}")?;
                }
            }
        }

        writeln!(f, "}}")?;
        Ok(())
    }
}

/// This structure manages VReg allocation during the lifetime of the VCodeBuilder.
pub struct VRegAllocator<I> {
    /// VReg IR-level types.
    vreg_types: Vec<Type>,

    /// VReg aliases. When the final VCode is built we rewrite all
    /// uses of the keys in this table to their replacement values.
    ///
    /// We use these aliases to rename an instruction's expected
    /// result vregs to the returned vregs from lowering, which are
    /// usually freshly-allocated temps.
    vreg_aliases: FxHashMap<regalloc2::VReg, regalloc2::VReg>,

    /// A deferred error, to be bubbled up to the top level of the
    /// lowering algorithm. We take this approach because we cannot
    /// currently propagate a `Result` upward through ISLE code (the
    /// lowering rules) or some ABI code.
    deferred_error: Option<CodegenError>,

    /// Facts on VRegs, for proof-carrying code.
    facts: Vec<Option<Fact>>,

    /// The type of instruction that this allocator makes registers for.
    _inst: core::marker::PhantomData<I>,
}

impl<I: VCodeInst> VRegAllocator<I> {
    /// Make a new VRegAllocator.
    pub fn with_capacity(capacity: usize) -> Self {
        let capacity = first_user_vreg_index() + capacity;
        let mut vreg_types = Vec::with_capacity(capacity);
        vreg_types.resize(first_user_vreg_index(), types::INVALID);
        Self {
            vreg_types,
            vreg_aliases: FxHashMap::with_capacity_and_hasher(capacity, Default::default()),
            deferred_error: None,
            facts: Vec::with_capacity(capacity),
            _inst: core::marker::PhantomData::default(),
        }
    }

    /// Allocate a fresh ValueRegs.
    pub fn alloc(&mut self, ty: Type) -> CodegenResult<ValueRegs<Reg>> {
        if self.deferred_error.is_some() {
            return Err(CodegenError::CodeTooLarge);
        }
        let v = self.vreg_types.len();
        let (regclasses, tys) = I::rc_for_type(ty)?;
        if v + regclasses.len() >= VReg::MAX {
            return Err(CodegenError::CodeTooLarge);
        }

        let regs: ValueRegs<Reg> = match regclasses {
            &[rc0] => ValueRegs::one(VReg::new(v, rc0).into()),
            &[rc0, rc1] => ValueRegs::two(VReg::new(v, rc0).into(), VReg::new(v + 1, rc1).into()),
            // We can extend this if/when we support 32-bit targets; e.g.,
            // an i128 on a 32-bit machine will need up to four machine regs
            // for a `Value`.
            _ => panic!("Value must reside in 1 or 2 registers"),
        };
        for (&reg_ty, &reg) in tys.iter().zip(regs.regs().iter()) {
            let vreg = reg.to_virtual_reg().unwrap();
            debug_assert_eq!(self.vreg_types.len(), vreg.index());
            self.vreg_types.push(reg_ty);
        }

        // Create empty facts for each allocated vreg.
        self.facts.resize(self.vreg_types.len(), None);

        Ok(regs)
    }

    /// Allocate a fresh ValueRegs, deferring any out-of-vregs
    /// errors. This is useful in places where we cannot bubble a
    /// `CodegenResult` upward easily, and which are known to be
    /// invoked from within the lowering loop that checks the deferred
    /// error status below.
    pub fn alloc_with_deferred_error(&mut self, ty: Type) -> ValueRegs<Reg> {
        match self.alloc(ty) {
            Ok(x) => x,
            Err(e) => {
                self.deferred_error = Some(e);
                self.bogus_for_deferred_error(ty)
            }
        }
    }

    /// Take any deferred error that was accumulated by `alloc_with_deferred_error`.
    pub fn take_deferred_error(&mut self) -> Option<CodegenError> {
        self.deferred_error.take()
    }

    /// Produce an bogus VReg placeholder with the proper number of
    /// registers for the given type. This is meant to be used with
    /// deferred allocation errors (see `Lower::alloc_tmp()`).
    fn bogus_for_deferred_error(&self, ty: Type) -> ValueRegs<Reg> {
        let (regclasses, _tys) = I::rc_for_type(ty).expect("must have valid type");
        match regclasses {
            &[rc0] => ValueRegs::one(VReg::new(0, rc0).into()),
            &[rc0, rc1] => ValueRegs::two(VReg::new(0, rc0).into(), VReg::new(1, rc1).into()),
            _ => panic!("Value must reside in 1 or 2 registers"),
        }
    }

    /// Rewrite any mention of `from` into `to`.
    pub fn set_vreg_alias(&mut self, from: Reg, to: Reg) {
        let from = from.into();
        let resolved_to = self.resolve_vreg_alias(to.into());
        // Disallow cycles (see below).
        assert_ne!(resolved_to, from);

        // Maintain the invariant that PCC facts only exist on vregs
        // which aren't aliases. We want to preserve whatever was
        // stated about the vreg before its producer was lowered.
        if let Some(fact) = self.facts[from.vreg()].take() {
            self.set_fact(resolved_to, fact);
        }

        let old_alias = self.vreg_aliases.insert(from, resolved_to);
        debug_assert_eq!(old_alias, None);
    }

    fn resolve_vreg_alias(&self, mut vreg: regalloc2::VReg) -> regalloc2::VReg {
        // We prevent cycles from existing by resolving targets of
        // aliases eagerly before setting them. If the target resolves
        // to the origin of the alias, then a cycle would be created
        // and the alias is disallowed. Because of the structure of
        // SSA code (one instruction can refer to another's defs but
        // not vice-versa, except indirectly through
        // phis/blockparams), cycles should not occur as we use
        // aliases to redirect vregs to the temps that actually define
        // them.
        while let Some(to) = self.vreg_aliases.get(&vreg) {
            vreg = *to;
        }
        vreg
    }

    #[inline]
    fn debug_assert_no_vreg_aliases(&self, mut list: impl Iterator<Item = VReg>) {
        debug_assert!(list.all(|vreg| !self.vreg_aliases.contains_key(&vreg)));
    }

    /// Set the proof-carrying code fact on a given virtual register.
    ///
    /// Returns the old fact, if any (only one fact can be stored).
    fn set_fact(&mut self, vreg: regalloc2::VReg, fact: Fact) -> Option<Fact> {
        trace!("vreg {:?} has fact: {:?}", vreg, fact);
        debug_assert!(!self.vreg_aliases.contains_key(&vreg));
        self.facts[vreg.vreg()].replace(fact)
    }

    /// Set a fact only if one doesn't already exist.
    pub fn set_fact_if_missing(&mut self, vreg: VirtualReg, fact: Fact) {
        let vreg = self.resolve_vreg_alias(vreg.into());
        if self.facts[vreg.vreg()].is_none() {
            self.set_fact(vreg, fact);
        }
    }

    /// Allocate a fresh ValueRegs, with a given fact to apply if
    /// the value fits in one VReg.
    pub fn alloc_with_maybe_fact(
        &mut self,
        ty: Type,
        fact: Option<Fact>,
    ) -> CodegenResult<ValueRegs<Reg>> {
        let result = self.alloc(ty)?;

        // Ensure that we don't lose a fact on a value that splits
        // into multiple VRegs.
        assert!(result.len() == 1 || fact.is_none());
        if let Some(fact) = fact {
            self.set_fact(result.regs()[0].into(), fact);
        }

        Ok(result)
    }
}

/// This structure tracks the large constants used in VCode that will be emitted separately by the
/// [MachBuffer].
///
/// First, during the lowering phase, constants are inserted using
/// [VCodeConstants.insert]; an intermediate handle, `VCodeConstant`, tracks what constants are
/// used in this phase. Some deduplication is performed, when possible, as constant
/// values are inserted.
///
/// Secondly, during the emission phase, the [MachBuffer] assigns [MachLabel]s for each of the
/// constants so that instructions can refer to the value's memory location. The [MachBuffer]
/// then writes the constant values to the buffer.
#[derive(Default)]
pub struct VCodeConstants {
    constants: PrimaryMap<VCodeConstant, VCodeConstantData>,
    pool_uses: HashMap<Constant, VCodeConstant>,
    well_known_uses: HashMap<*const [u8], VCodeConstant>,
    u64s: HashMap<[u8; 8], VCodeConstant>,
}
impl VCodeConstants {
    /// Initialize the structure with the expected number of constants.
    pub fn with_capacity(expected_num_constants: usize) -> Self {
        Self {
            constants: PrimaryMap::with_capacity(expected_num_constants),
            pool_uses: HashMap::with_capacity(expected_num_constants),
            well_known_uses: HashMap::new(),
            u64s: HashMap::new(),
        }
    }

    /// Insert a constant; using this method indicates that a constant value will be used and thus
    /// will be emitted to the `MachBuffer`. The current implementation can deduplicate constants
    /// that are [VCodeConstantData::Pool] or [VCodeConstantData::WellKnown] but not
    /// [VCodeConstantData::Generated].
    pub fn insert(&mut self, data: VCodeConstantData) -> VCodeConstant {
        match data {
            VCodeConstantData::Generated(_) => self.constants.push(data),
            VCodeConstantData::Pool(constant, _) => match self.pool_uses.get(&constant) {
                None => {
                    let vcode_constant = self.constants.push(data);
                    self.pool_uses.insert(constant, vcode_constant);
                    vcode_constant
                }
                Some(&vcode_constant) => vcode_constant,
            },
            VCodeConstantData::WellKnown(data_ref) => {
                match self.well_known_uses.entry(data_ref as *const [u8]) {
                    Entry::Vacant(v) => {
                        let vcode_constant = self.constants.push(data);
                        v.insert(vcode_constant);
                        vcode_constant
                    }
                    Entry::Occupied(o) => *o.get(),
                }
            }
            VCodeConstantData::U64(value) => match self.u64s.entry(value) {
                Entry::Vacant(v) => {
                    let vcode_constant = self.constants.push(data);
                    v.insert(vcode_constant);
                    vcode_constant
                }
                Entry::Occupied(o) => *o.get(),
            },
        }
    }

    /// Return the number of constants inserted.
    pub fn len(&self) -> usize {
        self.constants.len()
    }

    /// Iterate over the `VCodeConstant` keys inserted in this structure.
    pub fn keys(&self) -> Keys<VCodeConstant> {
        self.constants.keys()
    }

    /// Iterate over the `VCodeConstant` keys and the data (as a byte slice) inserted in this
    /// structure.
    pub fn iter(&self) -> impl Iterator<Item = (VCodeConstant, &VCodeConstantData)> {
        self.constants.iter()
    }

    /// Returns the data associated with the specified constant.
    pub fn get(&self, c: VCodeConstant) -> &VCodeConstantData {
        &self.constants[c]
    }

    /// Checks if the given [VCodeConstantData] is registered as
    /// used by the pool.
    pub fn pool_uses(&self, constant: &VCodeConstantData) -> bool {
        match constant {
            VCodeConstantData::Pool(c, _) => self.pool_uses.contains_key(c),
            _ => false,
        }
    }
}

/// A use of a constant by one or more VCode instructions; see [VCodeConstants].
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct VCodeConstant(u32);
entity_impl!(VCodeConstant);

/// Identify the different types of constant that can be inserted into [VCodeConstants]. Tracking
/// these separately instead of as raw byte buffers allows us to avoid some duplication.
pub enum VCodeConstantData {
    /// A constant already present in the Cranelift IR
    /// [ConstantPool](crate::ir::constant::ConstantPool).
    Pool(Constant, ConstantData),
    /// A reference to a well-known constant value that is statically encoded within the compiler.
    WellKnown(&'static [u8]),
    /// A constant value generated during lowering; the value may depend on the instruction context
    /// which makes it difficult to de-duplicate--if possible, use other variants.
    Generated(ConstantData),
    /// A constant of at most 64 bits. These are deduplicated as
    /// well. Stored as a fixed-size array of `u8` so that we do not
    /// encounter endianness problems when cross-compiling.
    U64([u8; 8]),
}
impl VCodeConstantData {
    /// Retrieve the constant data as a byte slice.
    pub fn as_slice(&self) -> &[u8] {
        match self {
            VCodeConstantData::Pool(_, d) | VCodeConstantData::Generated(d) => d.as_slice(),
            VCodeConstantData::WellKnown(d) => d,
            VCodeConstantData::U64(value) => &value[..],
        }
    }

    /// Calculate the alignment of the constant data.
    pub fn alignment(&self) -> u32 {
        if self.as_slice().len() <= 8 {
            8
        } else {
            16
        }
    }
}

#[cfg(test)]
mod test {
    use super::*;
    use std::mem::size_of;

    #[test]
    fn size_of_constant_structs() {
        assert_eq!(size_of::<Constant>(), 4);
        assert_eq!(size_of::<VCodeConstant>(), 4);
        assert_eq!(size_of::<ConstantData>(), 24);
        assert_eq!(size_of::<VCodeConstantData>(), 32);
        assert_eq!(
            size_of::<PrimaryMap<VCodeConstant, VCodeConstantData>>(),
            24
        );
        // TODO The VCodeConstants structure's memory size could be further optimized.
        // With certain versions of Rust, each `HashMap` in `VCodeConstants` occupied at
        // least 48 bytes, making an empty `VCodeConstants` cost 120 bytes.
    }
}