iced_x86/
decoder.rs

1// SPDX-License-Identifier: MIT
2// Copyright (C) 2018-present iced project and contributors
3
4macro_rules! mk_read_xx {
5	($slf:ident, $mem_ty:ty, $from_le:path, $ret_ty:ty, $err_expr:expr) => {
6		const SIZE: usize = mem::size_of::<$mem_ty>();
7		const _: () = assert!(SIZE >= 1);
8		const _: () = assert!(SIZE <= Decoder::MAX_READ_SIZE);
9		let data_ptr = $slf.data_ptr;
10		#[allow(trivial_numeric_casts)]
11		{
12			// This doesn't overflow data_ptr (verified in ctor since SIZE <= MAX_READ_SIZE)
13			if data_ptr + SIZE - 1 < $slf.max_data_ptr {
14				// SAFETY:
15				// - cast: It's OK to cast to an unaligned `*const uXX` since we call read_unaligned()
16				// - ptr::read_unaligned: ptr is readable and data (u8 slice) is initialized
17				let result = $from_le(unsafe { ptr::read_unaligned(data_ptr as *const $mem_ty) }) as $ret_ty;
18				// - data_ptr + SIZE doesn't overflow (verified in ctor since SIZE <= MAX_READ_SIZE)
19				// - data_ptr + SIZE <= self.max_data_ptr (see `if` check above)
20				$slf.data_ptr = data_ptr + SIZE;
21				result
22			} else {
23				$err_expr
24			}
25		}
26	};
27}
28macro_rules! mk_read_xx_fn_body {
29	($slf:ident, $mem_ty:ty, $from_le:path, $ret_ty:ty) => {
30		mk_read_xx!($slf, $mem_ty, $from_le, $ret_ty, {
31			$slf.state.flags |= StateFlags::IS_INVALID | StateFlags::NO_MORE_BYTES;
32			0
33		})
34	};
35}
36macro_rules! read_u8_break {
37	($slf:ident) => {{
38		mk_read_xx! {$slf, u8, u8::from_le, usize, break}
39	}};
40}
41#[cfg(not(feature = "__internal_flip"))]
42macro_rules! read_u16_break {
43	($slf:ident) => {{
44		mk_read_xx! {$slf, u16, u16::from_le, usize, break}
45	}};
46}
47macro_rules! read_u32_break {
48	($slf:ident) => {{
49		mk_read_xx! {$slf, u32, u32::from_le, usize, break}
50	}};
51}
52#[cfg(not(feature = "__internal_flip"))]
53macro_rules! read_op_mem_stmt_ret {
54	($decoder:ident, $instruction:ident, $stmts:block) => {{
55		debug_assert!($decoder.state.encoding() != EncodingKind::EVEX as u32 && $decoder.state.encoding() != EncodingKind::MVEX as u32);
56		let index = $decoder.state.mem_index as usize;
57		debug_assert!(index < $decoder.read_op_mem_fns.len());
58		// SAFETY: index is valid because modrm.mod = 0-2 (never 3 if we're here) so index will always be 0-10_111 (17h)
59		let handler = unsafe { *$decoder.read_op_mem_fns.get_unchecked(index) };
60
61		$stmts
62
63		if $decoder.state.address_size != OpSize::Size16 {
64			(handler)($instruction, $decoder)
65		} else {
66			$decoder.read_op_mem_16($instruction, TupleType::N1);
67			false
68		}
69	}};
70}
71#[cfg(not(feature = "__internal_flip"))]
72macro_rules! read_op_mem_stmt {
73	($decoder:ident, $instruction:ident, $stmts:block) => {
74		let _ = read_op_mem_stmt_ret!($decoder, $instruction, $stmts);
75	};
76}
77#[cfg(feature = "__internal_flip")]
78macro_rules! read_op_mem_stmt {
79	($decoder:ident, $instruction:ident, $stmts:block) => {
80		debug_assert!($decoder.state.encoding() != EncodingKind::EVEX as u32 && $decoder.state.encoding() != EncodingKind::MVEX as u32);
81		$stmts
82		if $decoder.state.address_size != OpSize::Size16 {
83			let _ = $decoder.read_op_mem_32_or_64($instruction);
84		} else {
85			$decoder.read_op_mem_16($instruction, TupleType::N1);
86		}
87	};
88}
89
90mod enums;
91mod handlers;
92mod table_de;
93#[cfg(test)]
94pub(crate) mod tests;
95
96use crate::decoder::handlers::tables::TABLES;
97use crate::decoder::handlers::{OpCodeHandler, OpCodeHandlerDecodeFn};
98use crate::iced_constants::IcedConstants;
99use crate::iced_error::IcedError;
100use crate::instruction_internal;
101use crate::tuple_type_tbl::get_disp8n;
102use crate::*;
103use core::iter::FusedIterator;
104use core::{cmp, fmt, mem, ptr};
105
106#[rustfmt::skip]
107#[cfg(any(feature = "__internal_flip", not(feature = "no_evex"), not(feature = "no_vex"), not(feature = "no_xop"), feature = "mvex"))]
108static READ_OP_MEM_VSIB_FNS: [fn(&mut Decoder<'_>, &mut Instruction, Register, TupleType, bool) -> bool; 0x18] = [
109	decoder_read_op_mem_vsib_0,
110	decoder_read_op_mem_vsib_0,
111	decoder_read_op_mem_vsib_0,
112	decoder_read_op_mem_vsib_0,
113	decoder_read_op_mem_vsib_0_4,
114	decoder_read_op_mem_vsib_0_5,
115	decoder_read_op_mem_vsib_0,
116	decoder_read_op_mem_vsib_0,
117
118	decoder_read_op_mem_vsib_1,
119	decoder_read_op_mem_vsib_1,
120	decoder_read_op_mem_vsib_1,
121	decoder_read_op_mem_vsib_1,
122	decoder_read_op_mem_vsib_1_4,
123	decoder_read_op_mem_vsib_1,
124	decoder_read_op_mem_vsib_1,
125	decoder_read_op_mem_vsib_1,
126
127	decoder_read_op_mem_vsib_2,
128	decoder_read_op_mem_vsib_2,
129	decoder_read_op_mem_vsib_2,
130	decoder_read_op_mem_vsib_2,
131	decoder_read_op_mem_vsib_2_4,
132	decoder_read_op_mem_vsib_2,
133	decoder_read_op_mem_vsib_2,
134	decoder_read_op_mem_vsib_2,
135];
136
137static MEM_REGS_16: [(Register, Register); 8] = [
138	(Register::BX, Register::SI),
139	(Register::BX, Register::DI),
140	(Register::BP, Register::SI),
141	(Register::BP, Register::DI),
142	(Register::SI, Register::None),
143	(Register::DI, Register::None),
144	(Register::BP, Register::None),
145	(Register::BX, Register::None),
146];
147
148// GENERATOR-BEGIN: OpSize
149// ⚠️This was generated by GENERATOR!🦹‍♂️
150#[derive(Copy, Clone, Eq, PartialEq)]
151#[allow(dead_code)]
152pub(crate) enum OpSize {
153	Size16,
154	Size32,
155	Size64,
156}
157#[rustfmt::skip]
158static GEN_DEBUG_OP_SIZE: [&str; 3] = [
159	"Size16",
160	"Size32",
161	"Size64",
162];
163impl fmt::Debug for OpSize {
164	#[inline]
165	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
166		write!(f, "{}", GEN_DEBUG_OP_SIZE[*self as usize])
167	}
168}
169impl Default for OpSize {
170	#[must_use]
171	#[inline]
172	fn default() -> Self {
173		OpSize::Size16
174	}
175}
176// GENERATOR-END: OpSize
177
178// GENERATOR-BEGIN: DecoderError
179// ⚠️This was generated by GENERATOR!🦹‍♂️
180/// Decoder error
181#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
182#[cfg_attr(not(feature = "exhaustive_enums"), non_exhaustive)]
183pub enum DecoderError {
184	/// No error. The last decoded instruction is a valid instruction
185	None = 0,
186	/// It's an invalid instruction or an invalid encoding of an existing instruction (eg. some reserved bit is set/cleared)
187	InvalidInstruction = 1,
188	/// There's not enough bytes left to decode the instruction
189	NoMoreBytes = 2,
190}
191#[rustfmt::skip]
192static GEN_DEBUG_DECODER_ERROR: [&str; 3] = [
193	"None",
194	"InvalidInstruction",
195	"NoMoreBytes",
196];
197impl fmt::Debug for DecoderError {
198	#[inline]
199	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
200		write!(f, "{}", GEN_DEBUG_DECODER_ERROR[*self as usize])
201	}
202}
203impl Default for DecoderError {
204	#[must_use]
205	#[inline]
206	fn default() -> Self {
207		DecoderError::None
208	}
209}
210#[allow(non_camel_case_types)]
211#[allow(dead_code)]
212pub(crate) type DecoderErrorUnderlyingType = u8;
213#[rustfmt::skip]
214impl DecoderError {
215	/// Iterates over all `DecoderError` enum values
216	#[inline]
217	pub fn values() -> impl Iterator<Item = DecoderError> + DoubleEndedIterator + ExactSizeIterator + FusedIterator {
218		// SAFETY: all values 0-max are valid enum values
219		(0..IcedConstants::DECODER_ERROR_ENUM_COUNT).map(|x| unsafe { mem::transmute::<u8, DecoderError>(x as u8) })
220	}
221}
222#[test]
223#[rustfmt::skip]
224fn test_decodererror_values() {
225	let mut iter = DecoderError::values();
226	assert_eq!(iter.size_hint(), (IcedConstants::DECODER_ERROR_ENUM_COUNT, Some(IcedConstants::DECODER_ERROR_ENUM_COUNT)));
227	assert_eq!(iter.len(), IcedConstants::DECODER_ERROR_ENUM_COUNT);
228	assert!(iter.next().is_some());
229	assert_eq!(iter.size_hint(), (IcedConstants::DECODER_ERROR_ENUM_COUNT - 1, Some(IcedConstants::DECODER_ERROR_ENUM_COUNT - 1)));
230	assert_eq!(iter.len(), IcedConstants::DECODER_ERROR_ENUM_COUNT - 1);
231
232	let values: Vec<DecoderError> = DecoderError::values().collect();
233	assert_eq!(values.len(), IcedConstants::DECODER_ERROR_ENUM_COUNT);
234	for (i, value) in values.into_iter().enumerate() {
235		assert_eq!(i, value as usize);
236	}
237
238	let values1: Vec<DecoderError> = DecoderError::values().collect();
239	let mut values2: Vec<DecoderError> = DecoderError::values().rev().collect();
240	values2.reverse();
241	assert_eq!(values1, values2);
242}
243#[rustfmt::skip]
244impl TryFrom<usize> for DecoderError {
245	type Error = IcedError;
246	#[inline]
247	fn try_from(value: usize) -> Result<Self, Self::Error> {
248		if value < IcedConstants::DECODER_ERROR_ENUM_COUNT {
249			// SAFETY: all values 0-max are valid enum values
250			Ok(unsafe { mem::transmute(value as u8) })
251		} else {
252			Err(IcedError::new("Invalid DecoderError value"))
253		}
254	}
255}
256#[test]
257#[rustfmt::skip]
258fn test_decodererror_try_from_usize() {
259	for value in DecoderError::values() {
260		let converted = <DecoderError as TryFrom<usize>>::try_from(value as usize).unwrap();
261		assert_eq!(converted, value);
262	}
263	assert!(<DecoderError as TryFrom<usize>>::try_from(IcedConstants::DECODER_ERROR_ENUM_COUNT).is_err());
264	assert!(<DecoderError as TryFrom<usize>>::try_from(core::usize::MAX).is_err());
265}
266#[cfg(feature = "serde")]
267#[rustfmt::skip]
268#[allow(clippy::zero_sized_map_values)]
269const _: () = {
270	use core::marker::PhantomData;
271	use serde::de;
272	use serde::{Deserialize, Deserializer, Serialize, Serializer};
273	type EnumType = DecoderError;
274	impl Serialize for EnumType {
275		#[inline]
276		fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
277		where
278			S: Serializer,
279		{
280			serializer.serialize_u8(*self as u8)
281		}
282	}
283	impl<'de> Deserialize<'de> for EnumType {
284		#[inline]
285		fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
286		where
287			D: Deserializer<'de>,
288		{
289			struct Visitor<'de> {
290				marker: PhantomData<EnumType>,
291				lifetime: PhantomData<&'de ()>,
292			}
293			impl<'de> de::Visitor<'de> for Visitor<'de> {
294				type Value = EnumType;
295				#[inline]
296				fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
297					formatter.write_str("enum DecoderError")
298				}
299				#[inline]
300				fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
301				where
302					E: de::Error,
303				{
304					if let Ok(v) = <usize as TryFrom<_>>::try_from(v) {
305						if let Ok(value) = <EnumType as TryFrom<_>>::try_from(v) {
306							return Ok(value);
307						}
308					}
309					Err(de::Error::invalid_value(de::Unexpected::Unsigned(v), &"a valid DecoderError variant value"))
310				}
311			}
312			deserializer.deserialize_u8(Visitor { marker: PhantomData::<EnumType>, lifetime: PhantomData })
313		}
314	}
315};
316// GENERATOR-END: DecoderError
317
318// GENERATOR-BEGIN: DecoderOptions
319// ⚠️This was generated by GENERATOR!🦹‍♂️
320/// Decoder options
321#[allow(missing_copy_implementations)]
322#[allow(missing_debug_implementations)]
323pub struct DecoderOptions;
324impl DecoderOptions {
325	/// No option is enabled
326	pub const NONE: u32 = 0x0000_0000;
327	/// Disable some checks for invalid encodings of instructions, eg. most instructions can't use a `LOCK` prefix so if one is found, they're decoded as [`Code::INVALID`] unless this option is enabled.
328	///
329	/// [`Code::INVALID`]: enum.Code.html#variant.INVALID
330	pub const NO_INVALID_CHECK: u32 = 0x0000_0001;
331	/// AMD decoder: allow 16-bit branch/ret instructions in 64-bit mode, no `o64 CALL/JMP FAR [mem], o64 LSS/LFS/LGS`, `UD0` has no modr/m byte, decode `LOCK MOV CR`. The AMD decoder can still decode Intel instructions.
332	pub const AMD: u32 = 0x0000_0002;
333	/// Decode opcodes `0F0D` and `0F18-0F1F` as reserved-nop instructions (eg. [`Code::Reservednop_rm32_r32_0F1D`])
334	///
335	/// [`Code::Reservednop_rm32_r32_0F1D`]: enum.Code.html#variant.Reservednop_rm32_r32_0F1D
336	pub const FORCE_RESERVED_NOP: u32 = 0x0000_0004;
337	/// Decode `UMOV` instructions
338	pub const UMOV: u32 = 0x0000_0008;
339	/// Decode `XBTS`/`IBTS`
340	pub const XBTS: u32 = 0x0000_0010;
341	/// Decode `0FA6`/`0FA7` as `CMPXCHG`
342	pub const CMPXCHG486A: u32 = 0x0000_0020;
343	/// Decode some old removed FPU instructions (eg. `FRSTPM`)
344	pub const OLD_FPU: u32 = 0x0000_0040;
345	/// Decode `PCOMMIT`
346	pub const PCOMMIT: u32 = 0x0000_0080;
347	/// Decode 286 `STOREALL`/`LOADALL` (`0F04` and `0F05`)
348	pub const LOADALL286: u32 = 0x0000_0100;
349	/// Decode 386 `LOADALL`
350	pub const LOADALL386: u32 = 0x0000_0200;
351	/// Decode `CL1INVMB`
352	pub const CL1INVMB: u32 = 0x0000_0400;
353	/// Decode `MOV r32,tr` and `MOV tr,r32`
354	pub const MOV_TR: u32 = 0x0000_0800;
355	/// Decode `JMPE` instructions
356	pub const JMPE: u32 = 0x0000_1000;
357	/// Don't decode `PAUSE`, decode `NOP` instead
358	pub const NO_PAUSE: u32 = 0x0000_2000;
359	/// Don't decode `WBNOINVD`, decode `WBINVD` instead
360	pub const NO_WBNOINVD: u32 = 0x0000_4000;
361	/// Decode undocumented Intel `RDUDBG` and `WRUDBG` instructions
362	pub const UDBG: u32 = 0x0000_8000;
363	/// Don't decode `TZCNT`, decode `BSF` instead
364	pub const NO_MPFX_0FBC: u32 = 0x0001_0000;
365	/// Don't decode `LZCNT`, decode `BSR` instead
366	pub const NO_MPFX_0FBD: u32 = 0x0002_0000;
367	/// Don't decode `LAHF` and `SAHF` in 64-bit mode
368	pub const NO_LAHF_SAHF_64: u32 = 0x0004_0000;
369	/// Decode `MPX` instructions
370	pub const MPX: u32 = 0x0008_0000;
371	/// Decode most Cyrix instructions: `FPU`, `EMMI`, `SMM`, `DDI`
372	pub const CYRIX: u32 = 0x0010_0000;
373	/// Decode Cyrix `SMINT 0F7E` (Cyrix 6x86 or earlier)
374	pub const CYRIX_SMINT_0F7E: u32 = 0x0020_0000;
375	/// Decode Cyrix `DMI` instructions (AMD Geode GX/LX)
376	pub const CYRIX_DMI: u32 = 0x0040_0000;
377	/// Decode Centaur `ALTINST`
378	pub const ALTINST: u32 = 0x0080_0000;
379	/// Decode Intel Knights Corner instructions (requires the `mvex` feature)
380	pub const KNC: u32 = 0x0100_0000;
381}
382// GENERATOR-END: DecoderOptions
383
384// GENERATOR-BEGIN: HandlerFlags
385// ⚠️This was generated by GENERATOR!🦹‍♂️
386pub(crate) struct HandlerFlags;
387#[allow(dead_code)]
388impl HandlerFlags {
389	pub(crate) const NONE: u32 = 0x0000_0000;
390	pub(crate) const XACQUIRE: u32 = 0x0000_0001;
391	pub(crate) const XRELEASE: u32 = 0x0000_0002;
392	pub(crate) const XACQUIRE_XRELEASE_NO_LOCK: u32 = 0x0000_0004;
393	pub(crate) const LOCK: u32 = 0x0000_0008;
394}
395// GENERATOR-END: HandlerFlags
396
397// GENERATOR-BEGIN: StateFlags
398// ⚠️This was generated by GENERATOR!🦹‍♂️
399pub(crate) struct StateFlags;
400#[allow(dead_code)]
401impl StateFlags {
402	pub(crate) const IP_REL64: u32 = 0x0000_0001;
403	pub(crate) const IP_REL32: u32 = 0x0000_0002;
404	pub(crate) const HAS_REX: u32 = 0x0000_0008;
405	pub(crate) const B: u32 = 0x0000_0010;
406	pub(crate) const Z: u32 = 0x0000_0020;
407	pub(crate) const IS_INVALID: u32 = 0x0000_0040;
408	pub(crate) const W: u32 = 0x0000_0080;
409	pub(crate) const NO_IMM: u32 = 0x0000_0100;
410	pub(crate) const ADDR64: u32 = 0x0000_0200;
411	pub(crate) const BRANCH_IMM8: u32 = 0x0000_0400;
412	pub(crate) const XBEGIN: u32 = 0x0000_0800;
413	pub(crate) const LOCK: u32 = 0x0000_1000;
414	pub(crate) const ALLOW_LOCK: u32 = 0x0000_2000;
415	pub(crate) const NO_MORE_BYTES: u32 = 0x0000_4000;
416	pub(crate) const HAS66: u32 = 0x0000_8000;
417	pub(crate) const MVEX_SSS_MASK: u32 = 0x0000_0007;
418	pub(crate) const MVEX_SSS_SHIFT: u32 = 0x0000_0010;
419	pub(crate) const MVEX_EH: u32 = 0x0008_0000;
420	pub(crate) const ENCODING_MASK: u32 = 0x0000_0007;
421	pub(crate) const ENCODING_SHIFT: u32 = 0x0000_001D;
422}
423// GENERATOR-END: StateFlags
424
425// This is `repr(u32)` since we need the decoder field near other fields that also get cleared in `decode()`.
426// It could fit in a `u8` but then it wouldn't be cleared at the same time as the other fields since the
427// compiler would move other `u32` fields above it to align the fields.
428#[repr(u32)]
429#[derive(Debug, Copy, Clone, Eq, PartialEq)]
430enum DecoderMandatoryPrefix {
431	PNP = 0,
432	P66 = 1,
433	PF3 = 2,
434	PF2 = 3,
435}
436impl Default for DecoderMandatoryPrefix {
437	fn default() -> Self {
438		DecoderMandatoryPrefix::PNP
439	}
440}
441
442#[derive(Default)]
443#[allow(dead_code)]
444struct State {
445	modrm: u32, // 0-0xFF
446	mod_: u32,  // 0-3
447	reg: u32,   // 0-7
448	rm: u32,    // 0-7
449
450	// ***************************
451	// These fields are cleared in decode_out() and should be close so the compiler can optimize clearing them.
452	extra_register_base: u32,       // R << 3
453	extra_index_register_base: u32, // X << 3
454	extra_base_register_base: u32,  // B << 3
455	extra_index_register_base_vsib: u32,
456	flags: u32, // StateFlags
457	mandatory_prefix: DecoderMandatoryPrefix,
458
459	vvvv: u32,               // V`vvvv. Not stored in inverted form. If 16/32-bit mode, bits [4:3] are cleared
460	vvvv_invalid_check: u32, // vvvv bits, even in 16/32-bit mode.
461	// ***************************
462	mem_index: u32, // (mod << 3 | rm) and an index into the mem handler tables if mod <= 2
463	vector_length: VectorLength,
464	aaa: u32,
465	extra_register_base_evex: u32,      // EVEX/MVEX.R' << 4
466	extra_base_register_base_evex: u32, // EVEX/MVEX.XB << 3
467	// The order of these 4 fields is important. They're accessed as a u32 (decode_out_ptr()) by the compiler so should be 4 byte aligned.
468	address_size: OpSize,
469	operand_size: OpSize,
470	segment_prio: u8, // 0=ES/CS/SS/DS, 1=FS/GS
471	dummy: u8,
472	// =================
473}
474
475impl State {
476	#[must_use]
477	#[inline(always)]
478	#[cfg(debug_assertions)]
479	const fn encoding(&self) -> u32 {
480		(self.flags >> StateFlags::ENCODING_SHIFT) & StateFlags::ENCODING_MASK
481	}
482
483	#[must_use]
484	#[inline(always)]
485	#[cfg(not(debug_assertions))]
486	#[allow(clippy::unused_self)]
487	fn encoding(&self) -> u32 {
488		EncodingKind::Legacy as u32
489	}
490
491	#[must_use]
492	#[inline]
493	#[cfg(feature = "mvex")]
494	fn sss(&self) -> u32 {
495		(self.flags >> StateFlags::MVEX_SSS_SHIFT) & StateFlags::MVEX_SSS_MASK
496	}
497}
498
499/// Decodes 16/32/64-bit x86 instructions
500#[allow(missing_debug_implementations)]
501#[allow(dead_code)]
502pub struct Decoder<'a>
503where
504	Self: Send + Sync,
505{
506	// Current RIP value
507	ip: u64,
508
509	// Next bytes to read if there's enough bytes left to read.
510	// This can be 1 byte past the last byte of `data`.
511	// Invariant: data.as_ptr() <= data_ptr <= max_data_ptr <= data.as_ptr() + data.len() == data_ptr_end
512	// Invariant: {data_ptr,max_data_ptr,data_ptr_end}.add(max(MAX_READ_SIZE, MAX_INSTRUCTION_LENGTH)) doesn't overflow
513	data_ptr: usize,
514	// This is `data.as_ptr() + data.len()` (1 byte past the last valid byte).
515	// This is guaranteed to be >= data_ptr (see the ctor), in other words, it can't overflow to 0
516	// Invariant: data.as_ptr() <= data_ptr <= max_data_ptr <= data.as_ptr() + data.len() == data_ptr_end
517	// Invariant: {data_ptr,max_data_ptr,data_ptr_end}.add(max(MAX_READ_SIZE, MAX_INSTRUCTION_LENGTH)) doesn't overflow
518	data_ptr_end: usize,
519	// Set to cmp::min(self.data_ptr + IcedConstants::MAX_INSTRUCTION_LENGTH, self.data_ptr_end)
520	// and is guaranteed to not overflow
521	// Initialized in decode() to at most 15 bytes after data_ptr so read_uXX() fails quickly after at most 15 read bytes
522	// (1MB prefixes won't cause it to read 1MB prefixes, it will stop after at most 15).
523	// Invariant: data.as_ptr() <= data_ptr <= max_data_ptr <= data.as_ptr() + data.len() == data_ptr_end
524	// Invariant: {data_ptr,max_data_ptr,data_ptr_end}.add(max(MAX_READ_SIZE, MAX_INSTRUCTION_LENGTH)) doesn't overflow
525	max_data_ptr: usize,
526	// Initialized to start of data (data_ptr) when decode() is called. Used to calculate current IP/offset (when decoding) if needed.
527	instr_start_data_ptr: usize,
528
529	handlers_map0: &'static [(OpCodeHandlerDecodeFn, &'static OpCodeHandler); 0x100],
530	// MAP0 is only used by MVEX. Don't allocate an extra array element if mvex feature is disabled (common case)
531	#[cfg(all(not(feature = "no_vex"), feature = "mvex"))]
532	handlers_vex_map0: &'static [(OpCodeHandlerDecodeFn, &'static OpCodeHandler); 0x100],
533	#[cfg(not(feature = "no_vex"))]
534	handlers_vex: [&'static [(OpCodeHandlerDecodeFn, &'static OpCodeHandler); 0x100]; 3],
535	#[cfg(not(feature = "no_evex"))]
536	handlers_evex: [&'static [(OpCodeHandlerDecodeFn, &'static OpCodeHandler); 0x100]; 6],
537	#[cfg(not(feature = "no_xop"))]
538	handlers_xop: [&'static [(OpCodeHandlerDecodeFn, &'static OpCodeHandler); 0x100]; 3],
539	#[cfg(feature = "mvex")]
540	handlers_mvex: [&'static [(OpCodeHandlerDecodeFn, &'static OpCodeHandler); 0x100]; 3],
541
542	#[cfg(not(all(not(feature = "no_vex"), feature = "mvex")))]
543	handlers_vex_map0: (),
544	#[cfg(feature = "no_vex")]
545	handlers_vex: [(); 3],
546	#[cfg(feature = "no_evex")]
547	handlers_evex: [(); 6],
548	#[cfg(feature = "no_xop")]
549	handlers_xop: [(); 3],
550	#[cfg(not(feature = "mvex"))]
551	handlers_mvex: [(); 3],
552
553	#[cfg(not(feature = "__internal_flip"))]
554	read_op_mem_fns: [fn(&mut Instruction, &mut Decoder<'a>) -> bool; 0x18],
555	#[cfg(feature = "__internal_flip")]
556	read_op_mem_fns: (),
557
558	state: State,
559	// DecoderOptions
560	options: u32,
561	// All 1s if we should check for invalid instructions, else 0
562	invalid_check_mask: u32,
563	// StateFlags::W if 64-bit mode, 0 if 16/32-bit mode
564	is64b_mode_and_w: u32,
565	// 7 in 16/32-bit mode, 15 in 64-bit mode
566	reg15_mask: u32,
567	// 0 in 16/32-bit mode, 0E0h in 64-bit mode
568	mask_e0: u32,
569	rex_mask: u32,
570	bitness: u32,
571	// The order of these 4 fields is important. They're accessed as a u32 (decode_out_ptr()) by the compiler so should be 4 byte aligned.
572	default_address_size: OpSize,
573	default_operand_size: OpSize,
574	segment_prio: u8, // Always 0
575	dummy: u8,        // Padding so the compiler can read 4 bytes, see decode_out_ptr()
576	// =================
577	default_inverted_address_size: OpSize,
578	default_inverted_operand_size: OpSize,
579	// true if 64-bit mode, false if 16/32-bit mode
580	is64b_mode: bool,
581	default_code_size: CodeSize,
582	// Offset of displacement in the instruction. Only used by get_constant_offsets() to return the offset of the displ
583	displ_index: u8,
584
585	// Input data provided by the user. When there's no more bytes left to read we'll return a NoMoreBytes error
586	data: &'a [u8],
587}
588
589macro_rules! write_base_reg {
590	($instruction:ident, $expr:expr) => {
591		debug_assert!($expr < IcedConstants::REGISTER_ENUM_COUNT as u32);
592		$instruction.set_memory_base(unsafe { mem::transmute($expr as RegisterUnderlyingType) });
593	};
594}
595
596macro_rules! write_index_reg {
597	($instruction:ident, $expr:expr) => {
598		debug_assert!($expr < IcedConstants::REGISTER_ENUM_COUNT as u32);
599		$instruction.set_memory_index(unsafe { mem::transmute($expr as RegisterUnderlyingType) });
600	};
601}
602
603impl<'a> Decoder<'a> {
604	const MAX_READ_SIZE: usize = 8;
605
606	/// Creates a decoder
607	///
608	/// # Panics
609	///
610	/// Panics if `bitness` is not one of 16, 32, 64.
611	///
612	/// # Arguments
613	///
614	/// * `bitness`: 16, 32 or 64
615	/// * `data`: Data to decode
616	/// * `options`: Decoder options, `0` or eg. `DecoderOptions::NO_INVALID_CHECK | DecoderOptions::AMD`
617	///
618	/// # Examples
619	///
620	/// ```
621	/// use iced_x86::*;
622	///
623	/// // xchg ah,[rdx+rsi+16h]
624	/// // xacquire lock add dword ptr [rax],5Ah
625	/// // vmovdqu64 zmm18{k3}{z},zmm11
626	/// let bytes = b"\x86\x64\x32\x16\xF0\xF2\x83\x00\x5A\x62\xC1\xFE\xCB\x6F\xD3";
627	/// let mut decoder = Decoder::new(64, bytes, DecoderOptions::NONE);
628	/// decoder.set_ip(0x1234_5678);
629	///
630	/// let instr1 = decoder.decode();
631	/// assert_eq!(instr1.code(), Code::Xchg_rm8_r8);
632	/// assert_eq!(instr1.mnemonic(), Mnemonic::Xchg);
633	/// assert_eq!(instr1.len(), 4);
634	///
635	/// let instr2 = decoder.decode();
636	/// assert_eq!(instr2.code(), Code::Add_rm32_imm8);
637	/// assert_eq!(instr2.mnemonic(), Mnemonic::Add);
638	/// assert_eq!(instr2.len(), 5);
639	///
640	/// let instr3 = decoder.decode();
641	/// assert_eq!(instr3.code(), Code::EVEX_Vmovdqu64_zmm_k1z_zmmm512);
642	/// assert_eq!(instr3.mnemonic(), Mnemonic::Vmovdqu64);
643	/// assert_eq!(instr3.len(), 6);
644	/// ```
645	///
646	/// It's sometimes useful to decode some invalid instructions, eg. `lock add esi,ecx`.
647	/// Pass in [`DecoderOptions::NO_INVALID_CHECK`] to the constructor and the decoder
648	/// will decode some invalid encodings.
649	///
650	/// [`DecoderOptions::NO_INVALID_CHECK`]: struct.DecoderOptions.html#associatedconstant.NO_INVALID_CHECK
651	///
652	/// ```
653	/// use iced_x86::*;
654	///
655	/// // lock add esi,ecx   ; lock not allowed
656	/// let bytes = b"\xF0\x01\xCE";
657	/// let mut decoder = Decoder::new(64, bytes, DecoderOptions::NONE);
658	/// decoder.set_ip(0x1234_5678);
659	/// let instr = decoder.decode();
660	/// assert_eq!(instr.code(), Code::INVALID);
661	///
662	/// // We want to decode some instructions with invalid encodings
663	/// let mut decoder = Decoder::new(64, bytes, DecoderOptions::NO_INVALID_CHECK);
664	/// decoder.set_ip(0x1234_5678);
665	/// let instr = decoder.decode();
666	/// assert_eq!(instr.code(), Code::Add_rm32_r32);
667	/// assert!(instr.has_lock_prefix());
668	/// ```
669	#[must_use]
670	#[inline]
671	#[allow(clippy::unwrap_used)]
672	pub fn new(bitness: u32, data: &'a [u8], options: u32) -> Decoder<'a> {
673		Decoder::try_new(bitness, data, options).unwrap()
674	}
675
676	/// Creates a decoder
677	///
678	/// # Panics
679	///
680	/// Panics if `bitness` is not one of 16, 32, 64.
681	///
682	/// # Arguments
683	///
684	/// * `bitness`: 16, 32 or 64
685	/// * `data`: Data to decode
686	/// * `ip`: `RIP` value
687	/// * `options`: Decoder options, `0` or eg. `DecoderOptions::NO_INVALID_CHECK | DecoderOptions::AMD`
688	///
689	/// # Examples
690	///
691	/// ```
692	/// use iced_x86::*;
693	///
694	/// // xchg ah,[rdx+rsi+16h]
695	/// // xacquire lock add dword ptr [rax],5Ah
696	/// // vmovdqu64 zmm18{k3}{z},zmm11
697	/// let bytes = b"\x86\x64\x32\x16\xF0\xF2\x83\x00\x5A\x62\xC1\xFE\xCB\x6F\xD3";
698	/// let mut decoder = Decoder::with_ip(64, bytes, 0x1234_5678, DecoderOptions::NONE);
699	///
700	/// let instr1 = decoder.decode();
701	/// assert_eq!(instr1.code(), Code::Xchg_rm8_r8);
702	/// assert_eq!(instr1.mnemonic(), Mnemonic::Xchg);
703	/// assert_eq!(instr1.len(), 4);
704	///
705	/// let instr2 = decoder.decode();
706	/// assert_eq!(instr2.code(), Code::Add_rm32_imm8);
707	/// assert_eq!(instr2.mnemonic(), Mnemonic::Add);
708	/// assert_eq!(instr2.len(), 5);
709	///
710	/// let instr3 = decoder.decode();
711	/// assert_eq!(instr3.code(), Code::EVEX_Vmovdqu64_zmm_k1z_zmmm512);
712	/// assert_eq!(instr3.mnemonic(), Mnemonic::Vmovdqu64);
713	/// assert_eq!(instr3.len(), 6);
714	/// ```
715	///
716	/// It's sometimes useful to decode some invalid instructions, eg. `lock add esi,ecx`.
717	/// Pass in [`DecoderOptions::NO_INVALID_CHECK`] to the constructor and the decoder
718	/// will decode some invalid encodings.
719	///
720	/// [`DecoderOptions::NO_INVALID_CHECK`]: struct.DecoderOptions.html#associatedconstant.NO_INVALID_CHECK
721	///
722	/// ```
723	/// use iced_x86::*;
724	///
725	/// // lock add esi,ecx   ; lock not allowed
726	/// let bytes = b"\xF0\x01\xCE";
727	/// let mut decoder = Decoder::with_ip(64, bytes, 0x1234_5678, DecoderOptions::NONE);
728	/// let instr = decoder.decode();
729	/// assert_eq!(instr.code(), Code::INVALID);
730	///
731	/// // We want to decode some instructions with invalid encodings
732	/// let mut decoder = Decoder::with_ip(64, bytes, 0x1234_5678, DecoderOptions::NO_INVALID_CHECK);
733	/// let instr = decoder.decode();
734	/// assert_eq!(instr.code(), Code::Add_rm32_r32);
735	/// assert!(instr.has_lock_prefix());
736	/// ```
737	#[must_use]
738	#[inline]
739	#[allow(clippy::unwrap_used)]
740	pub fn with_ip(bitness: u32, data: &'a [u8], ip: u64, options: u32) -> Decoder<'a> {
741		Decoder::try_with_ip(bitness, data, ip, options).unwrap()
742	}
743
744	/// Creates a decoder
745	///
746	/// # Errors
747	///
748	/// Fails if `bitness` is not one of 16, 32, 64.
749	///
750	/// # Arguments
751	///
752	/// * `bitness`: 16, 32 or 64
753	/// * `data`: Data to decode
754	/// * `options`: Decoder options, `0` or eg. `DecoderOptions::NO_INVALID_CHECK | DecoderOptions::AMD`
755	///
756	/// # Examples
757	///
758	/// ```
759	/// use iced_x86::*;
760	///
761	/// // xchg ah,[rdx+rsi+16h]
762	/// // xacquire lock add dword ptr [rax],5Ah
763	/// // vmovdqu64 zmm18{k3}{z},zmm11
764	/// let bytes = b"\x86\x64\x32\x16\xF0\xF2\x83\x00\x5A\x62\xC1\xFE\xCB\x6F\xD3";
765	/// let mut decoder = Decoder::try_new(64, bytes, DecoderOptions::NONE).unwrap();
766	/// decoder.set_ip(0x1234_5678);
767	///
768	/// let instr1 = decoder.decode();
769	/// assert_eq!(instr1.code(), Code::Xchg_rm8_r8);
770	/// assert_eq!(instr1.mnemonic(), Mnemonic::Xchg);
771	/// assert_eq!(instr1.len(), 4);
772	///
773	/// let instr2 = decoder.decode();
774	/// assert_eq!(instr2.code(), Code::Add_rm32_imm8);
775	/// assert_eq!(instr2.mnemonic(), Mnemonic::Add);
776	/// assert_eq!(instr2.len(), 5);
777	///
778	/// let instr3 = decoder.decode();
779	/// assert_eq!(instr3.code(), Code::EVEX_Vmovdqu64_zmm_k1z_zmmm512);
780	/// assert_eq!(instr3.mnemonic(), Mnemonic::Vmovdqu64);
781	/// assert_eq!(instr3.len(), 6);
782	/// ```
783	///
784	/// It's sometimes useful to decode some invalid instructions, eg. `lock add esi,ecx`.
785	/// Pass in [`DecoderOptions::NO_INVALID_CHECK`] to the constructor and the decoder
786	/// will decode some invalid encodings.
787	///
788	/// [`DecoderOptions::NO_INVALID_CHECK`]: struct.DecoderOptions.html#associatedconstant.NO_INVALID_CHECK
789	///
790	/// ```
791	/// use iced_x86::*;
792	///
793	/// // lock add esi,ecx   ; lock not allowed
794	/// let bytes = b"\xF0\x01\xCE";
795	/// let mut decoder = Decoder::try_new(64, bytes, DecoderOptions::NONE).unwrap();
796	/// decoder.set_ip(0x1234_5678);
797	/// let instr = decoder.decode();
798	/// assert_eq!(instr.code(), Code::INVALID);
799	///
800	/// // We want to decode some instructions with invalid encodings
801	/// let mut decoder = Decoder::try_new(64, bytes, DecoderOptions::NO_INVALID_CHECK).unwrap();
802	/// decoder.set_ip(0x1234_5678);
803	/// let instr = decoder.decode();
804	/// assert_eq!(instr.code(), Code::Add_rm32_r32);
805	/// assert!(instr.has_lock_prefix());
806	/// ```
807	#[inline]
808	pub fn try_new(bitness: u32, data: &'a [u8], options: u32) -> Result<Decoder<'a>, IcedError> {
809		Decoder::try_with_ip(bitness, data, 0, options)
810	}
811
812	/// Creates a decoder
813	///
814	/// # Errors
815	///
816	/// Fails if `bitness` is not one of 16, 32, 64.
817	///
818	/// # Arguments
819	///
820	/// * `bitness`: 16, 32 or 64
821	/// * `data`: Data to decode
822	/// * `ip`: `RIP` value
823	/// * `options`: Decoder options, `0` or eg. `DecoderOptions::NO_INVALID_CHECK | DecoderOptions::AMD`
824	///
825	/// # Examples
826	///
827	/// ```
828	/// use iced_x86::*;
829	///
830	/// // xchg ah,[rdx+rsi+16h]
831	/// // xacquire lock add dword ptr [rax],5Ah
832	/// // vmovdqu64 zmm18{k3}{z},zmm11
833	/// let bytes = b"\x86\x64\x32\x16\xF0\xF2\x83\x00\x5A\x62\xC1\xFE\xCB\x6F\xD3";
834	/// let mut decoder = Decoder::try_with_ip(64, bytes, 0x1234_5678, DecoderOptions::NONE).unwrap();
835	///
836	/// let instr1 = decoder.decode();
837	/// assert_eq!(instr1.code(), Code::Xchg_rm8_r8);
838	/// assert_eq!(instr1.mnemonic(), Mnemonic::Xchg);
839	/// assert_eq!(instr1.len(), 4);
840	///
841	/// let instr2 = decoder.decode();
842	/// assert_eq!(instr2.code(), Code::Add_rm32_imm8);
843	/// assert_eq!(instr2.mnemonic(), Mnemonic::Add);
844	/// assert_eq!(instr2.len(), 5);
845	///
846	/// let instr3 = decoder.decode();
847	/// assert_eq!(instr3.code(), Code::EVEX_Vmovdqu64_zmm_k1z_zmmm512);
848	/// assert_eq!(instr3.mnemonic(), Mnemonic::Vmovdqu64);
849	/// assert_eq!(instr3.len(), 6);
850	/// ```
851	///
852	/// It's sometimes useful to decode some invalid instructions, eg. `lock add esi,ecx`.
853	/// Pass in [`DecoderOptions::NO_INVALID_CHECK`] to the constructor and the decoder
854	/// will decode some invalid encodings.
855	///
856	/// [`DecoderOptions::NO_INVALID_CHECK`]: struct.DecoderOptions.html#associatedconstant.NO_INVALID_CHECK
857	///
858	/// ```
859	/// use iced_x86::*;
860	///
861	/// // lock add esi,ecx   ; lock not allowed
862	/// let bytes = b"\xF0\x01\xCE";
863	/// let mut decoder = Decoder::try_with_ip(64, bytes, 0x1234_5678, DecoderOptions::NONE).unwrap();
864	/// let instr = decoder.decode();
865	/// assert_eq!(instr.code(), Code::INVALID);
866	///
867	/// // We want to decode some instructions with invalid encodings
868	/// let mut decoder = Decoder::try_with_ip(64, bytes, 0x1234_5678, DecoderOptions::NO_INVALID_CHECK).unwrap();
869	/// let instr = decoder.decode();
870	/// assert_eq!(instr.code(), Code::Add_rm32_r32);
871	/// assert!(instr.has_lock_prefix());
872	/// ```
873	#[allow(clippy::missing_inline_in_public_items)]
874	#[allow(clippy::let_unit_value)]
875	#[allow(trivial_casts)]
876	pub fn try_with_ip(bitness: u32, data: &'a [u8], ip: u64, options: u32) -> Result<Decoder<'a>, IcedError> {
877		let is64b_mode;
878		let default_code_size;
879		let default_operand_size;
880		let default_inverted_operand_size;
881		let default_address_size;
882		let default_inverted_address_size;
883		match bitness {
884			64 => {
885				is64b_mode = true;
886				default_code_size = CodeSize::Code64;
887				default_operand_size = OpSize::Size32;
888				default_inverted_operand_size = OpSize::Size16;
889				default_address_size = OpSize::Size64;
890				default_inverted_address_size = OpSize::Size32;
891			}
892			32 => {
893				is64b_mode = false;
894				default_code_size = CodeSize::Code32;
895				default_operand_size = OpSize::Size32;
896				default_inverted_operand_size = OpSize::Size16;
897				default_address_size = OpSize::Size32;
898				default_inverted_address_size = OpSize::Size16;
899			}
900			16 => {
901				is64b_mode = false;
902				default_code_size = CodeSize::Code16;
903				default_operand_size = OpSize::Size16;
904				default_inverted_operand_size = OpSize::Size32;
905				default_address_size = OpSize::Size16;
906				default_inverted_address_size = OpSize::Size32;
907			}
908			_ => return Err(IcedError::new("Invalid bitness")),
909		}
910		let data_ptr_end = data.as_ptr() as usize + data.len();
911		if data_ptr_end < data.as_ptr() as usize || {
912			// Verify that max_data_ptr can never overflow and that data_ptr.add(N) can't overflow.
913			// Both of them can equal data_ptr_end (1 byte past the last valid byte).
914			// When reading a u8/u16/u32..., we calculate data_ptr.add({1,2,4,...MAX_READ_SIZE}) so it must not overflow.
915			// In decode(), we calculate data_ptr.add(MAX_INSTRUCTION_LENGTH) so it must not overflow.
916			data_ptr_end.wrapping_add(cmp::max(IcedConstants::MAX_INSTRUCTION_LENGTH, Decoder::MAX_READ_SIZE)) < data.as_ptr() as usize
917		} {
918			return Err(IcedError::new("Invalid slice"));
919		}
920
921		let tables = &*TABLES;
922
923		#[allow(clippy::unwrap_used)]
924		fn get_handlers(
925			handlers: &'static [(OpCodeHandlerDecodeFn, &'static OpCodeHandler)],
926		) -> &'static [(OpCodeHandlerDecodeFn, &'static OpCodeHandler); 0x100] {
927			debug_assert_eq!(handlers.len(), 0x100);
928			TryFrom::try_from(handlers).unwrap()
929		}
930		macro_rules! mk_handlers_local {
931			($name:ident, $feature:literal) => {
932				mk_handlers_local!($name, $name, $feature);
933			};
934			($name:ident, $field_name:ident, $feature:literal) => {
935				#[cfg(not(feature = $feature))]
936				let $name = get_handlers(&tables.$field_name);
937				#[cfg(feature = $feature)]
938				let $name = ();
939			};
940			($name:ident ; $feature:literal) => {
941				mk_handlers_local!($name, $name ; $feature);
942			};
943			($name:ident, $field_name:ident ; $feature:literal) => {
944				#[cfg(feature = $feature)]
945				let $name = get_handlers(&tables.$field_name);
946				#[cfg(not(feature = $feature))]
947				let $name = ();
948			};
949		}
950		#[cfg(all(not(feature = "no_vex"), feature = "mvex"))]
951		let handlers_vex_map0 = get_handlers(&tables.handlers_vex_map0);
952		#[cfg(not(all(not(feature = "no_vex"), feature = "mvex")))]
953		let handlers_vex_map0 = ();
954		mk_handlers_local!(handlers_vex_0f, "no_vex");
955		mk_handlers_local!(handlers_vex_0f38, "no_vex");
956		mk_handlers_local!(handlers_vex_0f3a, "no_vex");
957		mk_handlers_local!(handlers_evex_0f, "no_evex");
958		mk_handlers_local!(handlers_evex_0f38, "no_evex");
959		mk_handlers_local!(handlers_evex_0f3a, "no_evex");
960		mk_handlers_local!(handlers_evex_map4, invalid_map, "no_evex");
961		mk_handlers_local!(handlers_evex_map5, "no_evex");
962		mk_handlers_local!(handlers_evex_map6, "no_evex");
963		mk_handlers_local!(handlers_xop_map8, "no_xop");
964		mk_handlers_local!(handlers_xop_map9, "no_xop");
965		mk_handlers_local!(handlers_xop_map10, "no_xop");
966		mk_handlers_local!(handlers_mvex_0f ; "mvex");
967		mk_handlers_local!(handlers_mvex_0f38 ; "mvex");
968		mk_handlers_local!(handlers_mvex_0f3a ; "mvex");
969
970		#[rustfmt::skip]
971		#[cfg(not(feature = "__internal_flip"))]
972		let read_op_mem_fns = [
973			Decoder::read_op_mem_0,
974			Decoder::read_op_mem_0,
975			Decoder::read_op_mem_0,
976			Decoder::read_op_mem_0,
977			Decoder::read_op_mem_0_4,
978			Decoder::read_op_mem_0_5,
979			Decoder::read_op_mem_0,
980			Decoder::read_op_mem_0,
981
982			Decoder::read_op_mem_1,
983			Decoder::read_op_mem_1,
984			Decoder::read_op_mem_1,
985			Decoder::read_op_mem_1,
986			Decoder::read_op_mem_1_4,
987			Decoder::read_op_mem_1,
988			Decoder::read_op_mem_1,
989			Decoder::read_op_mem_1,
990
991			Decoder::read_op_mem_2,
992			Decoder::read_op_mem_2,
993			Decoder::read_op_mem_2,
994			Decoder::read_op_mem_2,
995			Decoder::read_op_mem_2_4,
996			Decoder::read_op_mem_2,
997			Decoder::read_op_mem_2,
998			Decoder::read_op_mem_2,
999		];
1000		#[cfg(feature = "__internal_flip")]
1001		let read_op_mem_fns = ();
1002
1003		Ok(Decoder {
1004			ip,
1005			data_ptr: data.as_ptr() as usize,
1006			data_ptr_end,
1007			max_data_ptr: data.as_ptr() as usize,
1008			instr_start_data_ptr: data.as_ptr() as usize,
1009			handlers_map0: get_handlers(&tables.handlers_map0),
1010			handlers_vex_map0,
1011			handlers_vex: [handlers_vex_0f, handlers_vex_0f38, handlers_vex_0f3a],
1012			handlers_evex: [handlers_evex_0f, handlers_evex_0f38, handlers_evex_0f3a, handlers_evex_map4, handlers_evex_map5, handlers_evex_map6],
1013			handlers_xop: [handlers_xop_map8, handlers_xop_map9, handlers_xop_map10],
1014			handlers_mvex: [handlers_mvex_0f, handlers_mvex_0f38, handlers_mvex_0f3a],
1015			read_op_mem_fns,
1016			state: State::default(),
1017			options,
1018			invalid_check_mask: if (options & DecoderOptions::NO_INVALID_CHECK) == 0 { u32::MAX } else { 0 },
1019			is64b_mode_and_w: if is64b_mode { StateFlags::W } else { 0 },
1020			reg15_mask: if is64b_mode { 0xF } else { 0x7 },
1021			mask_e0: if is64b_mode { 0xE0 } else { 0 },
1022			rex_mask: if is64b_mode { 0xF0 } else { 0 },
1023			bitness,
1024			default_address_size,
1025			default_operand_size,
1026			segment_prio: 0,
1027			dummy: 0,
1028			default_inverted_address_size,
1029			default_inverted_operand_size,
1030			is64b_mode,
1031			default_code_size,
1032			displ_index: 0,
1033			data,
1034		})
1035	}
1036
1037	/// Gets the current `IP`/`EIP`/`RIP` value, see also [`position()`]
1038	///
1039	/// [`position()`]: #method.position
1040	#[must_use]
1041	#[inline]
1042	pub const fn ip(&self) -> u64 {
1043		self.ip
1044	}
1045
1046	/// Sets the current `IP`/`EIP`/`RIP` value, see also [`try_set_position()`]
1047	///
1048	/// This method only updates the IP value, it does not change the data position, use [`try_set_position()`] to change the position.
1049	///
1050	/// [`try_set_position()`]: #method.try_set_position
1051	///
1052	/// # Arguments
1053	///
1054	/// * `new_value`: New IP
1055	#[inline]
1056	pub fn set_ip(&mut self, new_value: u64) {
1057		self.ip = new_value;
1058	}
1059
1060	/// Gets the bitness (16, 32 or 64)
1061	#[must_use]
1062	#[inline]
1063	pub const fn bitness(&self) -> u32 {
1064		self.bitness
1065	}
1066
1067	/// Gets the max value that can be passed to [`try_set_position()`]. This is the size of the data that gets
1068	/// decoded to instructions and it's the length of the slice that was passed to the constructor.
1069	///
1070	/// [`try_set_position()`]: #method.try_set_position
1071	#[must_use]
1072	#[inline]
1073	pub const fn max_position(&self) -> usize {
1074		self.data.len()
1075	}
1076
1077	/// Gets the current data position. This value is always <= [`max_position()`].
1078	/// When [`position()`] == [`max_position()`], it's not possible to decode more
1079	/// instructions and [`can_decode()`] returns `false`.
1080	///
1081	/// [`max_position()`]: #method.max_position
1082	/// [`position()`]: #method.position
1083	/// [`can_decode()`]: #method.can_decode
1084	#[must_use]
1085	#[inline]
1086	pub fn position(&self) -> usize {
1087		self.data_ptr - self.data.as_ptr() as usize
1088	}
1089
1090	/// Sets the current data position, which is the index into the data passed to the constructor.
1091	/// This value is always <= [`max_position()`]
1092	///
1093	/// [`max_position()`]: #method.max_position
1094	///
1095	/// # Errors
1096	///
1097	/// Fails if the new position is invalid.
1098	///
1099	/// # Arguments
1100	///
1101	/// * `new_pos`: New position and must be <= [`max_position()`]
1102	///
1103	/// # Examples
1104	///
1105	/// ```
1106	/// use iced_x86::*;
1107	///
1108	/// // nop and pause
1109	/// let bytes = b"\x90\xF3\x90";
1110	/// let mut decoder = Decoder::with_ip(64, bytes, 0x1234_5678, DecoderOptions::NONE);
1111	///
1112	/// assert_eq!(decoder.position(), 0);
1113	/// assert_eq!(decoder.max_position(), 3);
1114	/// let instr = decoder.decode();
1115	/// assert_eq!(decoder.position(), 1);
1116	/// assert_eq!(instr.code(), Code::Nopd);
1117	///
1118	/// let instr = decoder.decode();
1119	/// assert_eq!(decoder.position(), 3);
1120	/// assert_eq!(instr.code(), Code::Pause);
1121	///
1122	/// // Start all over again
1123	/// decoder.set_position(0).unwrap();
1124	/// decoder.set_ip(0x1234_5678);
1125	/// assert_eq!(decoder.position(), 0);
1126	/// assert_eq!(decoder.decode().code(), Code::Nopd);
1127	/// assert_eq!(decoder.decode().code(), Code::Pause);
1128	/// assert_eq!(decoder.position(), 3);
1129	/// ```
1130	#[inline]
1131	#[allow(clippy::missing_inline_in_public_items)]
1132	pub fn set_position(&mut self, new_pos: usize) -> Result<(), IcedError> {
1133		if new_pos > self.data.len() {
1134			Err(IcedError::new("Invalid position"))
1135		} else {
1136			// - We verified the new offset above.
1137			// - Referencing 1 byte past the last valid byte is safe as long as we don't dereference it.
1138			self.data_ptr = self.data.as_ptr() as usize + new_pos;
1139			Ok(())
1140		}
1141	}
1142
1143	#[doc(hidden)]
1144	#[inline]
1145	pub fn try_set_position(&mut self, new_pos: usize) -> Result<(), IcedError> {
1146		self.set_position(new_pos)
1147	}
1148
1149	/// Returns `true` if there's at least one more byte to decode. It doesn't verify that the
1150	/// next instruction is valid, it only checks if there's at least one more byte to read.
1151	/// See also [`position()`] and [`max_position()`]
1152	///
1153	/// It's not required to call this method. If this method returns `false`, then [`decode_out()`]
1154	/// and [`decode()`] will return an instruction whose [`code()`] == [`Code::INVALID`].
1155	///
1156	/// [`position()`]: #method.position
1157	/// [`max_position()`]: #method.max_position
1158	/// [`decode_out()`]: #method.decode_out
1159	/// [`decode()`]: #method.decode
1160	/// [`code()`]: struct.Instruction.html#method.code
1161	/// [`Code::INVALID`]: enum.Code.html#variant.INVALID
1162	///
1163	/// # Examples
1164	///
1165	/// ```
1166	/// use iced_x86::*;
1167	///
1168	/// // nop and an incomplete instruction
1169	/// let bytes = b"\x90\xF3\x0F";
1170	/// let mut decoder = Decoder::with_ip(64, bytes, 0x1234_5678, DecoderOptions::NONE);
1171	///
1172	/// // 3 bytes left to read
1173	/// assert!(decoder.can_decode());
1174	/// let instr = decoder.decode();
1175	/// assert_eq!(instr.code(), Code::Nopd);
1176	///
1177	/// // 2 bytes left to read
1178	/// assert!(decoder.can_decode());
1179	/// let instr = decoder.decode();
1180	/// // Not enough bytes left to decode a full instruction
1181	/// assert_eq!(instr.code(), Code::INVALID);
1182	///
1183	/// // 0 bytes left to read
1184	/// assert!(!decoder.can_decode());
1185	/// ```
1186	#[must_use]
1187	#[inline]
1188	#[allow(clippy::missing_const_for_fn)]
1189	pub fn can_decode(&self) -> bool {
1190		self.data_ptr != self.data_ptr_end
1191	}
1192
1193	/// Returns an iterator that borrows this instance to decode instructions until there's
1194	/// no more data to decode, i.e., until [`can_decode()`] returns `false`.
1195	///
1196	/// [`can_decode()`]: #method.can_decode
1197	///
1198	/// # Examples
1199	///
1200	/// ```
1201	/// use iced_x86::*;
1202	///
1203	/// // nop and pause
1204	/// let bytes = b"\x90\xF3\x90";
1205	/// let mut decoder = Decoder::with_ip(64, bytes, 0x1234_5678, DecoderOptions::NONE);
1206	///
1207	/// let mut iter = decoder.iter();
1208	/// assert_eq!(iter.next().unwrap().code(), Code::Nopd);
1209	/// assert_eq!(iter.next().unwrap().code(), Code::Pause);
1210	/// assert!(iter.next().is_none());
1211	/// ```
1212	///
1213	/// `for` loop:
1214	///
1215	/// ```
1216	/// use iced_x86::*;
1217	///
1218	/// let bytes = b"\x90\xF3\x90";
1219	/// let mut decoder = Decoder::with_ip(64, bytes, 0x1234_5678, DecoderOptions::NONE);
1220	///
1221	/// for instr in &mut decoder { // or decoder.iter()
1222	///     println!("code: {:?}", instr.code());
1223	/// }
1224	/// ```
1225	#[inline]
1226	pub fn iter<'b>(&'b mut self) -> DecoderIter<'a, 'b> {
1227		DecoderIter { decoder: self }
1228	}
1229
1230	#[must_use]
1231	#[inline(always)]
1232	fn read_u8(&mut self) -> usize {
1233		mk_read_xx_fn_body! {self, u8, u8::from_le, usize}
1234	}
1235
1236	#[must_use]
1237	#[inline(always)]
1238	fn read_u16(&mut self) -> usize {
1239		mk_read_xx_fn_body! {self, u16, u16::from_le, usize}
1240	}
1241
1242	#[must_use]
1243	#[inline(always)]
1244	fn read_u32(&mut self) -> usize {
1245		mk_read_xx_fn_body! {self, u32, u32::from_le, usize}
1246	}
1247
1248	#[must_use]
1249	#[inline(always)]
1250	fn read_u64(&mut self) -> u64 {
1251		mk_read_xx_fn_body! {self, u64, u64::from_le, u64}
1252	}
1253
1254	/// Gets the last decoder error. Unless you need to know the reason it failed,
1255	/// it's better to check [`instruction.is_invalid()`].
1256	///
1257	/// [`instruction.is_invalid()`]: struct.Instruction.html#method.is_invalid
1258	#[must_use]
1259	#[inline]
1260	pub const fn last_error(&self) -> DecoderError {
1261		// NoMoreBytes error has highest priority
1262		if (self.state.flags & StateFlags::NO_MORE_BYTES) != 0 {
1263			DecoderError::NoMoreBytes
1264		} else if (self.state.flags & StateFlags::IS_INVALID) != 0 {
1265			DecoderError::InvalidInstruction
1266		} else {
1267			DecoderError::None
1268		}
1269	}
1270
1271	/// Decodes and returns the next instruction, see also [`decode_out(&mut Instruction)`]
1272	/// which avoids copying the decoded instruction to the caller's return variable.
1273	/// See also [`last_error()`].
1274	///
1275	/// [`decode_out(&mut Instruction)`]: #method.decode_out
1276	/// [`last_error()`]: #method.last_error
1277	///
1278	/// # Examples
1279	///
1280	/// ```
1281	/// use iced_x86::*;
1282	///
1283	/// // xrelease lock add [rax],ebx
1284	/// let bytes = b"\xF0\xF3\x01\x18";
1285	/// let mut decoder = Decoder::with_ip(64, bytes, 0x1234_5678, DecoderOptions::NONE);
1286	/// let instr = decoder.decode();
1287	///
1288	/// assert_eq!(instr.code(), Code::Add_rm32_r32);
1289	/// assert_eq!(instr.mnemonic(), Mnemonic::Add);
1290	/// assert_eq!(instr.len(), 4);
1291	/// assert_eq!(instr.op_count(), 2);
1292	///
1293	/// assert_eq!(instr.op0_kind(), OpKind::Memory);
1294	/// assert_eq!(instr.memory_base(), Register::RAX);
1295	/// assert_eq!(instr.memory_index(), Register::None);
1296	/// assert_eq!(instr.memory_index_scale(), 1);
1297	/// assert_eq!(instr.memory_displacement64(), 0);
1298	/// assert_eq!(instr.memory_segment(), Register::DS);
1299	/// assert_eq!(instr.segment_prefix(), Register::None);
1300	/// assert_eq!(instr.memory_size(), MemorySize::UInt32);
1301	///
1302	/// assert_eq!(instr.op1_kind(), OpKind::Register);
1303	/// assert_eq!(instr.op1_register(), Register::EBX);
1304	///
1305	/// assert!(instr.has_lock_prefix());
1306	/// assert!(instr.has_xrelease_prefix());
1307	/// ```
1308	#[must_use]
1309	#[inline]
1310	pub fn decode(&mut self) -> Instruction {
1311		let mut instruction = mem::MaybeUninit::uninit();
1312		// SAFETY: decode_out_ptr() initializes the whole instruction (all fields) with valid values
1313		unsafe {
1314			self.decode_out_ptr(instruction.as_mut_ptr());
1315			instruction.assume_init()
1316		}
1317	}
1318
1319	/// Decodes the next instruction. The difference between this method and [`decode()`] is that this
1320	/// method doesn't need to copy the result to the caller's return variable (saves 40 bytes of copying).
1321	/// See also [`last_error()`].
1322	///
1323	/// [`decode()`]: #method.decode
1324	/// [`last_error()`]: #method.last_error
1325	///
1326	/// # Arguments
1327	///
1328	/// * `instruction`: Updated with the decoded instruction. All fields are initialized (it's an `out` argument)
1329	///
1330	/// # Examples
1331	///
1332	/// ```
1333	/// use iced_x86::*;
1334	///
1335	/// // xrelease lock add [rax],ebx
1336	/// let bytes = b"\xF0\xF3\x01\x18";
1337	/// let mut decoder = Decoder::with_ip(64, bytes, 0x1234_5678, DecoderOptions::NONE);
1338	/// let mut instr = Instruction::default();
1339	/// decoder.decode_out(&mut instr);
1340	///
1341	/// assert_eq!(instr.code(), Code::Add_rm32_r32);
1342	/// assert_eq!(instr.mnemonic(), Mnemonic::Add);
1343	/// assert_eq!(instr.len(), 4);
1344	/// assert_eq!(instr.op_count(), 2);
1345	///
1346	/// assert_eq!(instr.op0_kind(), OpKind::Memory);
1347	/// assert_eq!(instr.memory_base(), Register::RAX);
1348	/// assert_eq!(instr.memory_index(), Register::None);
1349	/// assert_eq!(instr.memory_index_scale(), 1);
1350	/// assert_eq!(instr.memory_displacement64(), 0);
1351	/// assert_eq!(instr.memory_segment(), Register::DS);
1352	/// assert_eq!(instr.segment_prefix(), Register::None);
1353	/// assert_eq!(instr.memory_size(), MemorySize::UInt32);
1354	///
1355	/// assert_eq!(instr.op1_kind(), OpKind::Register);
1356	/// assert_eq!(instr.op1_register(), Register::EBX);
1357	///
1358	/// assert!(instr.has_lock_prefix());
1359	/// assert!(instr.has_xrelease_prefix());
1360	/// ```
1361	#[inline]
1362	pub fn decode_out(&mut self, instruction: &mut Instruction) {
1363		unsafe {
1364			self.decode_out_ptr(instruction);
1365		}
1366	}
1367
1368	// SAFETY: `instruction` must be non-null, writable and aligned (`ptr::write()`) and not aliased
1369	#[allow(clippy::useless_let_if_seq)]
1370	unsafe fn decode_out_ptr(&mut self, instruction: *mut Instruction) {
1371		// SAFETY:
1372		// - the instruction has only primitive integer types, nothing needs to be dropped
1373		// - private method: no caller passes in a null ptr, a non-writable ptr or an unaligned ptr
1374		unsafe { ptr::write(instruction, Instruction::default()) };
1375		// SAFETY: private method: the only callers pass in their `&mut arg` or their own stack-allocated `MaybeUninit` instruction
1376		let instruction = unsafe { &mut *instruction };
1377
1378		self.state.extra_register_base = 0;
1379		self.state.extra_index_register_base = 0;
1380		self.state.extra_base_register_base = 0;
1381		self.state.extra_index_register_base_vsib = 0;
1382		self.state.flags = 0;
1383		self.state.mandatory_prefix = DecoderMandatoryPrefix::default();
1384		// These don't need to be cleared, but they're here so the compiler can re-use the
1385		// same XMM reg to clear the previous 2 u32s (including these 2 u32s).
1386		self.state.vvvv = 0;
1387		self.state.vvvv_invalid_check = 0;
1388
1389		// We only need to write addr/op size fields and init segment_prio to 0.
1390		// The fields are consecutive so the compiler can read all 4 fields (including dummy)
1391		// and write all 4 fields at the same time.
1392		self.state.address_size = self.default_address_size;
1393		self.state.operand_size = self.default_operand_size;
1394		self.state.segment_prio = self.segment_prio;
1395		self.state.dummy = self.dummy;
1396
1397		let data_ptr = self.data_ptr;
1398		self.instr_start_data_ptr = data_ptr;
1399		// The ctor has verified that the two expressions used in min() don't overflow and are >= data_ptr.
1400		// The calculated usize is a valid pointer in `self.data` slice or at most 1 byte past the last valid byte.
1401		self.max_data_ptr = cmp::min(data_ptr + IcedConstants::MAX_INSTRUCTION_LENGTH, self.data_ptr_end);
1402
1403		let b = self.read_u8();
1404		let mut handler = self.handlers_map0[b];
1405		if ((b as u32) & self.rex_mask) == 0x40 {
1406			debug_assert!(self.is64b_mode);
1407			handler = self.handlers_map0[self.read_u8()];
1408			let mut flags = self.state.flags | StateFlags::HAS_REX;
1409			if (b & 8) != 0 {
1410				flags |= StateFlags::W;
1411				self.state.operand_size = OpSize::Size64;
1412			}
1413			self.state.flags = flags;
1414			self.state.extra_register_base = (b as u32 & 4) << 1;
1415			self.state.extra_index_register_base = (b as u32 & 2) << 2;
1416			self.state.extra_base_register_base = (b as u32 & 1) << 3;
1417		}
1418		self.decode_table2(handler, instruction);
1419
1420		debug_assert_eq!(data_ptr, self.instr_start_data_ptr);
1421		let instr_len = self.data_ptr as u32 - data_ptr as u32;
1422		debug_assert!(instr_len <= IcedConstants::MAX_INSTRUCTION_LENGTH as u32); // Could be 0 if there were no bytes available
1423		instruction_internal::internal_set_len(instruction, instr_len);
1424		let orig_ip = self.ip;
1425		let ip = orig_ip.wrapping_add(instr_len as u64);
1426		self.ip = ip;
1427		instruction.set_next_ip(ip);
1428		instruction_internal::internal_set_code_size(instruction, self.default_code_size);
1429
1430		let mut flags = self.state.flags;
1431		if (flags & (StateFlags::IS_INVALID | StateFlags::LOCK | StateFlags::IP_REL64 | StateFlags::IP_REL32)) != 0 {
1432			let addr = ip.wrapping_add(instruction.memory_displacement64());
1433			// Assume it's IP_REL64 (very common if we're here). We'll undo this if it's not.
1434			instruction.set_memory_displacement64(addr);
1435			// RIP rel ops are common, but invalid/lock bits are usually never set, so exit early if possible
1436			if (flags & (StateFlags::IP_REL64 | StateFlags::IS_INVALID | StateFlags::LOCK)) == StateFlags::IP_REL64 {
1437				return;
1438			}
1439			if (flags & StateFlags::IP_REL64) == 0 {
1440				// Undo what we did above
1441				instruction.set_memory_displacement64(addr.wrapping_sub(ip));
1442			}
1443			if (flags & StateFlags::IP_REL32) != 0 {
1444				let addr = ip.wrapping_add(instruction.memory_displacement64());
1445				instruction.set_memory_displacement64(addr as u32 as u64);
1446			}
1447
1448			if (flags & StateFlags::IS_INVALID) != 0
1449				|| (((flags & (StateFlags::LOCK | StateFlags::ALLOW_LOCK)) & self.invalid_check_mask) == StateFlags::LOCK)
1450			{
1451				*instruction = Instruction::default();
1452				const _: () = assert!(Code::INVALID as u32 == 0);
1453				//instruction.set_code(Code::INVALID);
1454
1455				if (flags & StateFlags::NO_MORE_BYTES) != 0 {
1456					debug_assert_eq!(data_ptr, self.instr_start_data_ptr);
1457					let max_len = self.data_ptr_end - data_ptr;
1458					// If max-instr-len bytes is available, it's never no-more-bytes, and always invalid-instr
1459					if max_len >= IcedConstants::MAX_INSTRUCTION_LENGTH {
1460						flags &= !StateFlags::NO_MORE_BYTES;
1461					}
1462					// max_data_ptr is in self.data slice or at most 1 byte past the last valid byte
1463					self.data_ptr = self.max_data_ptr;
1464				}
1465
1466				self.state.flags = flags | StateFlags::IS_INVALID;
1467
1468				let instr_len = self.data_ptr as u32 - data_ptr as u32;
1469				instruction_internal::internal_set_len(instruction, instr_len);
1470				let ip = orig_ip.wrapping_add(instr_len as u64);
1471				self.ip = ip;
1472				instruction.set_next_ip(ip);
1473				instruction_internal::internal_set_code_size(instruction, self.default_code_size);
1474			}
1475		}
1476	}
1477
1478	#[inline(always)]
1479	fn reset_rex_prefix_state(&mut self) {
1480		self.state.flags &= !(StateFlags::HAS_REX | StateFlags::W);
1481		if (self.state.flags & StateFlags::HAS66) == 0 {
1482			self.state.operand_size = self.default_operand_size;
1483		} else {
1484			self.state.operand_size = self.default_inverted_operand_size;
1485		}
1486		self.state.extra_register_base = 0;
1487		self.state.extra_index_register_base = 0;
1488		self.state.extra_base_register_base = 0;
1489	}
1490
1491	#[inline(always)]
1492	fn call_opcode_handlers_map0_table(&mut self, instruction: &mut Instruction) {
1493		let b = self.read_u8();
1494		self.decode_table2(self.handlers_map0[b], instruction);
1495	}
1496
1497	#[must_use]
1498	#[inline]
1499	fn current_ip32(&self) -> u32 {
1500		debug_assert!(self.instr_start_data_ptr <= self.data_ptr);
1501		debug_assert!(self.data_ptr - self.instr_start_data_ptr <= IcedConstants::MAX_INSTRUCTION_LENGTH);
1502		((self.data_ptr - self.instr_start_data_ptr) as u32).wrapping_add(self.ip as u32)
1503	}
1504
1505	#[must_use]
1506	#[inline]
1507	fn current_ip64(&self) -> u64 {
1508		debug_assert!(self.instr_start_data_ptr <= self.data_ptr);
1509		debug_assert!(self.data_ptr - self.instr_start_data_ptr <= IcedConstants::MAX_INSTRUCTION_LENGTH);
1510		((self.data_ptr - self.instr_start_data_ptr) as u64).wrapping_add(self.ip)
1511	}
1512
1513	#[inline]
1514	fn clear_mandatory_prefix(&mut self, instruction: &mut Instruction) {
1515		debug_assert_eq!(self.state.encoding(), EncodingKind::Legacy as u32);
1516		instruction_internal::internal_clear_has_repe_repne_prefix(instruction);
1517	}
1518
1519	#[inline(always)]
1520	fn set_xacquire_xrelease(&mut self, instruction: &mut Instruction, flags: u32) {
1521		if instruction.has_lock_prefix() {
1522			self.set_xacquire_xrelease_core(instruction, flags);
1523		}
1524	}
1525
1526	#[allow(clippy::nonminimal_bool)]
1527	fn set_xacquire_xrelease_core(&mut self, instruction: &mut Instruction, flags: u32) {
1528		debug_assert!(!((flags & HandlerFlags::XACQUIRE_XRELEASE_NO_LOCK) == 0 && !instruction.has_lock_prefix()));
1529		match self.state.mandatory_prefix {
1530			DecoderMandatoryPrefix::PF2 => {
1531				self.clear_mandatory_prefix_f2(instruction);
1532				instruction.set_has_xacquire_prefix(true);
1533			}
1534			DecoderMandatoryPrefix::PF3 => {
1535				self.clear_mandatory_prefix_f3(instruction);
1536				instruction.set_has_xrelease_prefix(true);
1537			}
1538			_ => {}
1539		}
1540	}
1541
1542	#[inline]
1543	fn clear_mandatory_prefix_f3(&self, instruction: &mut Instruction) {
1544		debug_assert_eq!(self.state.encoding(), EncodingKind::Legacy as u32);
1545		debug_assert_eq!(self.state.mandatory_prefix, DecoderMandatoryPrefix::PF3);
1546		instruction.set_has_repe_prefix(false);
1547	}
1548
1549	#[inline]
1550	fn clear_mandatory_prefix_f2(&self, instruction: &mut Instruction) {
1551		debug_assert_eq!(self.state.encoding(), EncodingKind::Legacy as u32);
1552		debug_assert_eq!(self.state.mandatory_prefix, DecoderMandatoryPrefix::PF2);
1553		instruction.set_has_repne_prefix(false);
1554	}
1555
1556	#[inline]
1557	fn set_invalid_instruction(&mut self) {
1558		self.state.flags |= StateFlags::IS_INVALID;
1559	}
1560
1561	#[inline(always)]
1562	fn decode_table2(&mut self, (decode, handler): (OpCodeHandlerDecodeFn, &OpCodeHandler), instruction: &mut Instruction) {
1563		if handler.has_modrm {
1564			let m = self.read_u8() as u32;
1565			self.state.modrm = m;
1566			self.state.reg = (m >> 3) & 7;
1567			self.state.mod_ = m >> 6;
1568			self.state.rm = m & 7;
1569			self.state.mem_index = (self.state.mod_ << 3) | self.state.rm;
1570		}
1571		(decode)(handler, self, instruction);
1572	}
1573
1574	#[inline(always)]
1575	fn read_modrm(&mut self) {
1576		let m = self.read_u8() as u32;
1577		self.state.modrm = m;
1578		self.state.reg = (m >> 3) & 7;
1579		self.state.mod_ = m >> 6;
1580		self.state.rm = m & 7;
1581		self.state.mem_index = (self.state.mod_ << 3) | self.state.rm;
1582	}
1583
1584	#[cfg(feature = "no_vex")]
1585	fn vex2(&mut self, _instruction: &mut Instruction) {
1586		self.set_invalid_instruction();
1587	}
1588
1589	#[cfg(not(feature = "no_vex"))]
1590	fn vex2(&mut self, instruction: &mut Instruction) {
1591		const _: () = assert!(DecoderMandatoryPrefix::PNP as u32 == 0);
1592		if (((self.state.flags & StateFlags::HAS_REX) | (self.state.mandatory_prefix as u32)) & self.invalid_check_mask) != 0 {
1593			self.set_invalid_instruction();
1594		}
1595		// Undo what decode_out() did if it got a REX prefix
1596		self.state.flags &= !StateFlags::W;
1597		self.state.extra_index_register_base = 0;
1598		self.state.extra_base_register_base = 0;
1599
1600		if cfg!(debug_assertions) {
1601			self.state.flags |= (EncodingKind::VEX as u32) << StateFlags::ENCODING_SHIFT;
1602		}
1603
1604		let b = self.read_u8();
1605		let (decode, handler) = self.handlers_vex[0][b];
1606
1607		let mut b = self.state.modrm;
1608
1609		const _: () = assert!(VectorLength::L128 as u32 == 0);
1610		const _: () = assert!(VectorLength::L256 as u32 == 1);
1611		// SAFETY: 0<=(n&1)<=1 and those are valid enum variants, see const assert!() above
1612		self.state.vector_length = unsafe { mem::transmute((b >> 2) & 1) };
1613
1614		const _: () = assert!(DecoderMandatoryPrefix::PNP as u32 == 0);
1615		const _: () = assert!(DecoderMandatoryPrefix::P66 as u32 == 1);
1616		const _: () = assert!(DecoderMandatoryPrefix::PF3 as u32 == 2);
1617		const _: () = assert!(DecoderMandatoryPrefix::PF2 as u32 == 3);
1618		// SAFETY: 0<=(b&3)<=3 and those are valid enum variants, see const assert!() above
1619		self.state.mandatory_prefix = unsafe { mem::transmute(b & 3) };
1620
1621		b = !b;
1622		self.state.extra_register_base = (b >> 4) & 8;
1623
1624		// Bit 6 can only be 0 if it's 16/32-bit mode, so we don't need to change the mask
1625		b = (b >> 3) & 0x0F;
1626		self.state.vvvv = b;
1627		self.state.vvvv_invalid_check = b;
1628
1629		self.decode_table2((decode, handler), instruction);
1630	}
1631
1632	#[cfg(feature = "no_vex")]
1633	fn vex3(&mut self, _instruction: &mut Instruction) {
1634		self.set_invalid_instruction();
1635	}
1636
1637	#[cfg(not(feature = "no_vex"))]
1638	fn vex3(&mut self, instruction: &mut Instruction) {
1639		const _: () = assert!(DecoderMandatoryPrefix::PNP as u32 == 0);
1640		if (((self.state.flags & StateFlags::HAS_REX) | (self.state.mandatory_prefix as u32)) & self.invalid_check_mask) != 0 {
1641			self.set_invalid_instruction();
1642		}
1643		// Undo what decode_out() did if it got a REX prefix
1644		self.state.flags &= !StateFlags::W;
1645
1646		if cfg!(debug_assertions) {
1647			self.state.flags |= (EncodingKind::VEX as u32) << StateFlags::ENCODING_SHIFT;
1648		}
1649
1650		let b2 = self.read_u16() as u32;
1651
1652		const _: () = assert!(StateFlags::W == 0x80);
1653		self.state.flags |= b2 & 0x80;
1654
1655		const _: () = assert!(VectorLength::L128 as u32 == 0);
1656		const _: () = assert!(VectorLength::L256 as u32 == 1);
1657		// SAFETY: 0<=(n&1)<=1 and those are valid enum variants, see const assert!() above
1658		self.state.vector_length = unsafe { mem::transmute((b2 >> 2) & 1) };
1659
1660		const _: () = assert!(DecoderMandatoryPrefix::PNP as u32 == 0);
1661		const _: () = assert!(DecoderMandatoryPrefix::P66 as u32 == 1);
1662		const _: () = assert!(DecoderMandatoryPrefix::PF3 as u32 == 2);
1663		const _: () = assert!(DecoderMandatoryPrefix::PF2 as u32 == 3);
1664		// SAFETY: 0<=(b2&3)<=3 and those are valid enum variants, see const assert!() above
1665		self.state.mandatory_prefix = unsafe { mem::transmute(b2 & 3) };
1666
1667		let b = (!b2 >> 3) & 0x0F;
1668		self.state.vvvv_invalid_check = b;
1669		self.state.vvvv = b & self.reg15_mask;
1670		let b1 = self.state.modrm;
1671		let b1x = !b1 & self.mask_e0;
1672		self.state.extra_register_base = (b1x >> 4) & 8;
1673		self.state.extra_index_register_base = (b1x >> 3) & 8;
1674		self.state.extra_base_register_base = (b1x >> 2) & 8;
1675
1676		if let Some(&table) = self.handlers_vex.get(((b1 & 0x1F) as usize).wrapping_sub(1)) {
1677			self.decode_table2(table[(b2 >> 8) as usize], instruction);
1678		} else {
1679			#[cfg(feature = "mvex")]
1680			if (b1 & 0x1F) == 0 {
1681				self.decode_table2(self.handlers_vex_map0[(b2 >> 8) as usize], instruction);
1682				return;
1683			}
1684			self.set_invalid_instruction();
1685		}
1686	}
1687
1688	#[cfg(feature = "no_xop")]
1689	fn xop(&mut self, _instruction: &mut Instruction) {
1690		self.set_invalid_instruction();
1691	}
1692
1693	#[cfg(not(feature = "no_xop"))]
1694	fn xop(&mut self, instruction: &mut Instruction) {
1695		const _: () = assert!(DecoderMandatoryPrefix::PNP as u32 == 0);
1696		if (((self.state.flags & StateFlags::HAS_REX) | (self.state.mandatory_prefix as u32)) & self.invalid_check_mask) != 0 {
1697			self.set_invalid_instruction();
1698		}
1699		// Undo what decode_out() did if it got a REX prefix
1700		self.state.flags &= !StateFlags::W;
1701
1702		if cfg!(debug_assertions) {
1703			self.state.flags |= (EncodingKind::XOP as u32) << StateFlags::ENCODING_SHIFT;
1704		}
1705
1706		let b2 = self.read_u16() as u32;
1707
1708		const _: () = assert!(StateFlags::W == 0x80);
1709		self.state.flags |= b2 & 0x80;
1710
1711		const _: () = assert!(VectorLength::L128 as u32 == 0);
1712		const _: () = assert!(VectorLength::L256 as u32 == 1);
1713		// SAFETY: 0<=(n&1)<=1 and those are valid enum variants, see const assert!() above
1714		self.state.vector_length = unsafe { mem::transmute((b2 >> 2) & 1) };
1715
1716		const _: () = assert!(DecoderMandatoryPrefix::PNP as u32 == 0);
1717		const _: () = assert!(DecoderMandatoryPrefix::P66 as u32 == 1);
1718		const _: () = assert!(DecoderMandatoryPrefix::PF3 as u32 == 2);
1719		const _: () = assert!(DecoderMandatoryPrefix::PF2 as u32 == 3);
1720		// SAFETY: 0<=(b2&3)<=3 and those are valid enum variants, see const assert!() above
1721		self.state.mandatory_prefix = unsafe { mem::transmute(b2 & 3) };
1722
1723		let b = (!b2 >> 3) & 0x0F;
1724		self.state.vvvv_invalid_check = b;
1725		self.state.vvvv = b & self.reg15_mask;
1726		let b1 = self.state.modrm;
1727		let b1x = !b1 & self.mask_e0;
1728		self.state.extra_register_base = (b1x >> 4) & 8;
1729		self.state.extra_index_register_base = (b1x >> 3) & 8;
1730		self.state.extra_base_register_base = (b1x >> 2) & 8;
1731
1732		if let Some(&table) = self.handlers_xop.get(((b1 & 0x1F) as usize).wrapping_sub(8)) {
1733			self.decode_table2(table[(b2 >> 8) as usize], instruction);
1734		} else {
1735			self.set_invalid_instruction();
1736		}
1737	}
1738
1739	#[cfg(not(any(not(feature = "no_evex"), feature = "mvex")))]
1740	fn evex_mvex(&mut self, _instruction: &mut Instruction) {
1741		self.set_invalid_instruction();
1742	}
1743
1744	#[cfg(any(not(feature = "no_evex"), feature = "mvex"))]
1745	fn evex_mvex(&mut self, instruction: &mut Instruction) {
1746		const _: () = assert!(DecoderMandatoryPrefix::PNP as u32 == 0);
1747		if (((self.state.flags & StateFlags::HAS_REX) | (self.state.mandatory_prefix as u32)) & self.invalid_check_mask) != 0 {
1748			self.set_invalid_instruction();
1749		}
1750		// Undo what decode_out() did if it got a REX prefix
1751		self.state.flags &= !StateFlags::W;
1752
1753		let d = self.read_u32() as u32;
1754		if (d & 4) != 0 {
1755			#[cfg(feature = "no_evex")]
1756			self.set_invalid_instruction();
1757			#[cfg(not(feature = "no_evex"))]
1758			{
1759				let p0 = self.state.modrm;
1760				if (p0 & 8) == 0 {
1761					if cfg!(debug_assertions) {
1762						self.state.flags |= (EncodingKind::EVEX as u32) << StateFlags::ENCODING_SHIFT;
1763					}
1764
1765					const _: () = assert!(DecoderMandatoryPrefix::PNP as u32 == 0);
1766					const _: () = assert!(DecoderMandatoryPrefix::P66 as u32 == 1);
1767					const _: () = assert!(DecoderMandatoryPrefix::PF3 as u32 == 2);
1768					const _: () = assert!(DecoderMandatoryPrefix::PF2 as u32 == 3);
1769					// SAFETY: 0<=(d&3)<=3 and those are valid enum variants, see const assert!() above
1770					self.state.mandatory_prefix = unsafe { mem::transmute(d & 3) };
1771
1772					const _: () = assert!(StateFlags::W == 0x80);
1773					self.state.flags |= d & 0x80;
1774
1775					let p2 = d >> 8;
1776					let aaa = p2 & 7;
1777					self.state.aaa = aaa;
1778					instruction_internal::internal_set_op_mask(instruction, aaa);
1779					if (p2 & 0x80) != 0 {
1780						// invalid if aaa == 0 and if we check for invalid instructions (it's all 1s)
1781						if (aaa ^ self.invalid_check_mask) == u32::MAX {
1782							self.set_invalid_instruction();
1783						}
1784						self.state.flags |= StateFlags::Z;
1785						instruction.set_zeroing_masking(true);
1786					}
1787
1788					const _: () = assert!(StateFlags::B == 0x10);
1789					self.state.flags |= p2 & 0x10;
1790
1791					const _: () = assert!(VectorLength::L128 as u32 == 0);
1792					const _: () = assert!(VectorLength::L256 as u32 == 1);
1793					const _: () = assert!(VectorLength::L512 as u32 == 2);
1794					const _: () = assert!(VectorLength::Unknown as u32 == 3);
1795					// SAFETY: 0<=(n&3)<=3 and those are valid enum variants, see const assert!() above
1796					self.state.vector_length = unsafe { mem::transmute((p2 >> 5) & 3) };
1797
1798					let p1 = (!d >> 3) & 0x0F;
1799					if self.is64b_mode {
1800						let mut tmp = (!p2 & 8) << 1;
1801						self.state.extra_index_register_base_vsib = tmp;
1802						tmp += p1;
1803						self.state.vvvv = tmp;
1804						self.state.vvvv_invalid_check = tmp;
1805						let mut p0x = !p0;
1806						self.state.extra_register_base = (p0x >> 4) & 8;
1807						self.state.extra_index_register_base = (p0x >> 3) & 8;
1808						self.state.extra_register_base_evex = p0x & 0x10;
1809						p0x >>= 2;
1810						self.state.extra_base_register_base_evex = p0x & 0x18;
1811						self.state.extra_base_register_base = p0x & 8;
1812					} else {
1813						self.state.vvvv_invalid_check = p1;
1814						self.state.vvvv = p1 & 0x07;
1815						const _: () = assert!(StateFlags::IS_INVALID == 0x40);
1816						self.state.flags |= (!p2 & 8) << 3;
1817					}
1818
1819					if let Some(&table) = self.handlers_evex.get(((p0 & 7) as usize).wrapping_sub(1)) {
1820						let (decode, handler) = table[(d >> 16) as u8 as usize];
1821						debug_assert!(handler.has_modrm);
1822						let m = d >> 24;
1823						self.state.modrm = m;
1824						self.state.reg = (m >> 3) & 7;
1825						self.state.mod_ = m >> 6;
1826						self.state.rm = m & 7;
1827						self.state.mem_index = (self.state.mod_ << 3) | self.state.rm;
1828						// Invalid if LL=3 and no rc
1829						const _: () = assert!(StateFlags::B > 3);
1830						debug_assert!(self.state.vector_length as u32 <= 3);
1831						if (((self.state.flags & StateFlags::B) | (self.state.vector_length as u32)) & self.invalid_check_mask) == 3 {
1832							self.set_invalid_instruction();
1833						}
1834						(decode)(handler, self, instruction);
1835					} else {
1836						self.set_invalid_instruction();
1837					}
1838				} else {
1839					self.set_invalid_instruction();
1840				}
1841			}
1842		} else {
1843			#[cfg(not(feature = "mvex"))]
1844			self.set_invalid_instruction();
1845			#[cfg(feature = "mvex")]
1846			{
1847				if (self.options & DecoderOptions::KNC) == 0 || !self.is64b_mode {
1848					self.set_invalid_instruction();
1849				} else {
1850					let p0 = self.state.modrm;
1851					if cfg!(debug_assertions) {
1852						self.state.flags |= (EncodingKind::MVEX as u32) << StateFlags::ENCODING_SHIFT;
1853					}
1854
1855					const _: () = assert!(DecoderMandatoryPrefix::PNP as u32 == 0);
1856					const _: () = assert!(DecoderMandatoryPrefix::P66 as u32 == 1);
1857					const _: () = assert!(DecoderMandatoryPrefix::PF3 as u32 == 2);
1858					const _: () = assert!(DecoderMandatoryPrefix::PF2 as u32 == 3);
1859					// SAFETY: 0<=(d&3)<=3 and those are valid enum variants, see const assert!() above
1860					self.state.mandatory_prefix = unsafe { mem::transmute(d & 3) };
1861
1862					const _: () = assert!(StateFlags::W == 0x80);
1863					self.state.flags |= d & 0x80;
1864
1865					let p2 = d >> 8;
1866					let aaa = p2 & 7;
1867					self.state.aaa = aaa;
1868					instruction_internal::internal_set_op_mask(instruction, aaa);
1869
1870					const _: () = assert!(StateFlags::MVEX_SSS_SHIFT == 16);
1871					const _: () = assert!(StateFlags::MVEX_SSS_MASK == 7);
1872					const _: () = assert!(StateFlags::MVEX_EH == 1 << (StateFlags::MVEX_SSS_SHIFT + 3));
1873					self.state.flags |= (p2 & 0xF0) << (StateFlags::MVEX_SSS_SHIFT - 4);
1874
1875					let p1 = (!d >> 3) & 0x0F;
1876					let mut tmp = (!p2 & 8) << 1;
1877					self.state.extra_index_register_base_vsib = tmp;
1878					tmp += p1;
1879					self.state.vvvv = tmp;
1880					self.state.vvvv_invalid_check = tmp;
1881					let mut p0x = !p0;
1882					self.state.extra_register_base = (p0x >> 4) & 8;
1883					self.state.extra_index_register_base = (p0x >> 3) & 8;
1884					self.state.extra_register_base_evex = p0x & 0x10;
1885					p0x >>= 2;
1886					self.state.extra_base_register_base_evex = p0x & 0x18;
1887					self.state.extra_base_register_base = p0x & 8;
1888
1889					if let Some(&table) = self.handlers_mvex.get(((p0 & 0xF) as usize).wrapping_sub(1)) {
1890						let (decode, handler) = table[(d >> 16) as u8 as usize];
1891						debug_assert!(handler.has_modrm);
1892						let m = d >> 24;
1893						self.state.modrm = m;
1894						self.state.reg = (m >> 3) & 7;
1895						self.state.mod_ = m >> 6;
1896						self.state.rm = m & 7;
1897						self.state.mem_index = (self.state.mod_ << 3) | self.state.rm;
1898						(decode)(handler, self, instruction);
1899					} else {
1900						self.set_invalid_instruction();
1901					}
1902				}
1903			}
1904		}
1905	}
1906
1907	#[must_use]
1908	#[inline(always)]
1909	fn read_op_seg_reg(&mut self) -> u32 {
1910		let reg = self.state.reg;
1911		const _: () = assert!(Register::ES as u32 + 1 == Register::CS as u32);
1912		const _: () = assert!(Register::ES as u32 + 2 == Register::SS as u32);
1913		const _: () = assert!(Register::ES as u32 + 3 == Register::DS as u32);
1914		const _: () = assert!(Register::ES as u32 + 4 == Register::FS as u32);
1915		const _: () = assert!(Register::ES as u32 + 5 == Register::GS as u32);
1916		if reg < 6 {
1917			Register::ES as u32 + reg
1918		} else {
1919			self.set_invalid_instruction();
1920			Register::None as u32
1921		}
1922	}
1923
1924	#[inline(always)]
1925	#[cfg(any(not(feature = "no_vex"), not(feature = "no_xop")))]
1926	fn read_op_mem_sib(&mut self, instruction: &mut Instruction) {
1927		debug_assert!(self.state.encoding() != EncodingKind::EVEX as u32 && self.state.encoding() != EncodingKind::MVEX as u32);
1928		let is_valid = if self.state.address_size != OpSize::Size16 {
1929			self.read_op_mem_32_or_64(instruction)
1930		} else {
1931			self.read_op_mem_16(instruction, TupleType::N1);
1932			false
1933		};
1934		if self.invalid_check_mask != 0 && !is_valid {
1935			self.set_invalid_instruction();
1936		}
1937	}
1938
1939	// All MPX instructions in 64-bit mode force 64-bit addressing, and
1940	// all MPX instructions in 16/32-bit mode require 32-bit addressing
1941	// (see SDM Vol 1, 17.5.1 Intel MPX and Operating Modes)
1942	#[inline(always)]
1943	fn read_op_mem_mpx(&mut self, instruction: &mut Instruction) {
1944		debug_assert!(self.state.encoding() != EncodingKind::EVEX as u32 && self.state.encoding() != EncodingKind::MVEX as u32);
1945		if self.is64b_mode {
1946			self.state.address_size = OpSize::Size64;
1947			let _ = self.read_op_mem_32_or_64(instruction);
1948		} else if self.state.address_size != OpSize::Size16 {
1949			let _ = self.read_op_mem_32_or_64(instruction);
1950		} else {
1951			self.read_op_mem_16(instruction, TupleType::N1);
1952			if self.invalid_check_mask != 0 {
1953				self.set_invalid_instruction();
1954			}
1955		}
1956	}
1957
1958	#[inline(always)]
1959	#[cfg(any(not(feature = "no_evex"), feature = "mvex"))]
1960	fn read_op_mem_tuple_type(&mut self, instruction: &mut Instruction, tuple_type: TupleType) {
1961		debug_assert!(self.state.encoding() == EncodingKind::EVEX as u32 || self.state.encoding() == EncodingKind::MVEX as u32);
1962		if self.state.address_size != OpSize::Size16 {
1963			let index_reg = if self.state.address_size == OpSize::Size64 { Register::RAX } else { Register::EAX };
1964			let _ = decoder_read_op_mem_32_or_64_vsib(self, instruction, index_reg, tuple_type, false);
1965		} else {
1966			self.read_op_mem_16(instruction, tuple_type);
1967		}
1968	}
1969
1970	#[inline(always)]
1971	#[cfg(any(not(feature = "no_evex"), not(feature = "no_vex"), not(feature = "no_xop"), feature = "mvex"))]
1972	fn read_op_mem_vsib(&mut self, instruction: &mut Instruction, vsib_index: Register, tuple_type: TupleType) {
1973		let is_valid = if self.state.address_size != OpSize::Size16 {
1974			decoder_read_op_mem_32_or_64_vsib(self, instruction, vsib_index, tuple_type, true)
1975		} else {
1976			self.read_op_mem_16(instruction, tuple_type);
1977			false
1978		};
1979		if self.invalid_check_mask != 0 && !is_valid {
1980			self.set_invalid_instruction();
1981		}
1982	}
1983
1984	// It's small enough that the compiler wants to inline it but almost no-one will
1985	// disassemble code with 16-bit addressing.
1986	#[inline(never)]
1987	#[cold]
1988	fn read_op_mem_16(&mut self, instruction: &mut Instruction, tuple_type: TupleType) {
1989		debug_assert!(self.state.address_size == OpSize::Size16);
1990		debug_assert!(self.state.rm <= 7);
1991		// SAFETY: `MEM_REGS_16.len() == 8` and `0<=self.state.rm<=7`
1992		let (mut base_reg, index_reg) = unsafe { *MEM_REGS_16.get_unchecked(self.state.rm as usize) };
1993		match self.state.mod_ {
1994			0 => {
1995				if self.state.rm == 6 {
1996					instruction_internal::internal_set_memory_displ_size(instruction, 2);
1997					self.displ_index = self.data_ptr as u8;
1998					instruction.set_memory_displacement64(self.read_u16() as u64);
1999					base_reg = Register::None;
2000					debug_assert_eq!(index_reg, Register::None);
2001				}
2002			}
2003			1 => {
2004				instruction_internal::internal_set_memory_displ_size(instruction, 1);
2005				self.displ_index = self.data_ptr as u8;
2006				let b = self.read_u8();
2007				instruction.set_memory_displacement64(self.disp8n(tuple_type).wrapping_mul(b as i8 as u32) as u16 as u64);
2008			}
2009			_ => {
2010				debug_assert_eq!(self.state.mod_, 2);
2011				instruction_internal::internal_set_memory_displ_size(instruction, 2);
2012				self.displ_index = self.data_ptr as u8;
2013				instruction.set_memory_displacement64(self.read_u16() as u64);
2014			}
2015		}
2016		instruction.set_memory_base(base_reg);
2017		instruction.set_memory_index(index_reg);
2018	}
2019
2020	#[must_use]
2021	#[cfg(feature = "__internal_flip")]
2022	fn read_op_mem_32_or_64(&mut self, instruction: &mut Instruction) -> bool {
2023		let base_reg = if self.state.address_size == OpSize::Size64 { Register::RAX } else { Register::EAX };
2024		decoder_read_op_mem_32_or_64_vsib(self, instruction, base_reg, TupleType::N1, false)
2025	}
2026
2027	// Returns `true` if the SIB byte was read
2028	// This is a specialized version of read_op_mem_32_or_64_vsib() which takes less arguments. Keep them in sync.
2029	#[must_use]
2030	#[cfg(not(feature = "__internal_flip"))]
2031	#[inline(always)]
2032	fn read_op_mem_32_or_64(&mut self, instruction: &mut Instruction) -> bool {
2033		read_op_mem_stmt_ret!(self, instruction, {})
2034	}
2035
2036	#[cfg(not(feature = "__internal_flip"))]
2037	#[allow(clippy::never_loop)]
2038	fn read_op_mem_1(instruction: &mut Instruction, this: &mut Decoder<'_>) -> bool {
2039		loop {
2040			instruction_internal::internal_set_memory_displ_size(instruction, 1);
2041			this.displ_index = this.data_ptr as u8;
2042			let displ = read_u8_break!(this) as i8 as u64;
2043			if this.state.address_size == OpSize::Size64 {
2044				instruction.set_memory_displacement64(displ);
2045				write_base_reg!(instruction, this.state.extra_base_register_base + this.state.rm + Register::RAX as u32);
2046			} else {
2047				instruction.set_memory_displacement64(displ as u32 as u64);
2048				write_base_reg!(instruction, this.state.extra_base_register_base + this.state.rm + Register::EAX as u32);
2049			}
2050
2051			return false;
2052		}
2053		this.state.flags |= StateFlags::IS_INVALID | StateFlags::NO_MORE_BYTES;
2054		false
2055	}
2056
2057	#[cfg(not(feature = "__internal_flip"))]
2058	#[allow(clippy::never_loop)]
2059	fn read_op_mem_1_4(instruction: &mut Instruction, this: &mut Decoder<'_>) -> bool {
2060		loop {
2061			instruction_internal::internal_set_memory_displ_size(instruction, 1);
2062
2063			this.displ_index = this.data_ptr.wrapping_add(1) as u8;
2064			let w = read_u16_break!(this) as u32;
2065
2066			const _: () = assert!(InstrScale::Scale1 as u32 == 0);
2067			const _: () = assert!(InstrScale::Scale2 as u32 == 1);
2068			const _: () = assert!(InstrScale::Scale4 as u32 == 2);
2069			const _: () = assert!(InstrScale::Scale8 as u32 == 3);
2070			// SAFETY: 0-3 are valid variants
2071			instruction_internal::internal_set_memory_index_scale(instruction, unsafe { mem::transmute(((w >> 6) & 3) as InstrScaleUnderlyingType) });
2072			let index = ((w >> 3) & 7) + this.state.extra_index_register_base;
2073			if this.state.address_size == OpSize::Size64 {
2074				const BASE_REG: Register = Register::RAX;
2075				if index != 4 {
2076					write_index_reg!(instruction, index + BASE_REG as u32);
2077				}
2078
2079				write_base_reg!(instruction, (w & 7) + this.state.extra_base_register_base + BASE_REG as u32);
2080				let displ = (w >> 8) as i8 as u64;
2081				instruction.set_memory_displacement64(displ);
2082			} else {
2083				const BASE_REG: Register = Register::EAX;
2084				if index != 4 {
2085					write_index_reg!(instruction, index + BASE_REG as u32);
2086				}
2087
2088				write_base_reg!(instruction, (w & 7) + this.state.extra_base_register_base + BASE_REG as u32);
2089				let displ = (w >> 8) as i8 as u32 as u64;
2090				instruction.set_memory_displacement64(displ);
2091			}
2092
2093			return true;
2094		}
2095		this.state.flags |= StateFlags::IS_INVALID | StateFlags::NO_MORE_BYTES;
2096		true
2097	}
2098
2099	#[cfg(not(feature = "__internal_flip"))]
2100	fn read_op_mem_0(instruction: &mut Instruction, this: &mut Decoder<'_>) -> bool {
2101		if this.state.address_size == OpSize::Size64 {
2102			write_base_reg!(instruction, this.state.extra_base_register_base + this.state.rm + Register::RAX as u32);
2103		} else {
2104			write_base_reg!(instruction, this.state.extra_base_register_base + this.state.rm + Register::EAX as u32);
2105		};
2106
2107		false
2108	}
2109
2110	#[cfg(not(feature = "__internal_flip"))]
2111	#[allow(clippy::never_loop)]
2112	fn read_op_mem_0_5(instruction: &mut Instruction, this: &mut Decoder<'_>) -> bool {
2113		loop {
2114			this.displ_index = this.data_ptr as u8;
2115			let displ = read_u32_break!(this) as i32 as u64;
2116			if this.state.address_size == OpSize::Size64 {
2117				debug_assert!(this.is64b_mode);
2118				this.state.flags |= StateFlags::IP_REL64;
2119				instruction.set_memory_displacement64(displ);
2120				instruction_internal::internal_set_memory_displ_size(instruction, 4);
2121				instruction.set_memory_base(Register::RIP);
2122			} else if this.is64b_mode {
2123				this.state.flags |= StateFlags::IP_REL32;
2124				instruction.set_memory_displacement64(displ as u32 as u64);
2125				instruction_internal::internal_set_memory_displ_size(instruction, 3);
2126				instruction.set_memory_base(Register::EIP);
2127			} else {
2128				instruction.set_memory_displacement64(displ as u32 as u64);
2129				instruction_internal::internal_set_memory_displ_size(instruction, 3);
2130			}
2131
2132			return false;
2133		}
2134		this.state.flags |= StateFlags::IS_INVALID | StateFlags::NO_MORE_BYTES;
2135		false
2136	}
2137
2138	#[cfg(not(feature = "__internal_flip"))]
2139	#[allow(clippy::never_loop)]
2140	fn read_op_mem_2_4(instruction: &mut Instruction, this: &mut Decoder<'_>) -> bool {
2141		loop {
2142			let sib = read_u8_break!(this) as u32;
2143			this.displ_index = this.data_ptr as u8;
2144			let displ = read_u32_break!(this) as i32 as u64;
2145
2146			const _: () = assert!(InstrScale::Scale1 as u32 == 0);
2147			const _: () = assert!(InstrScale::Scale2 as u32 == 1);
2148			const _: () = assert!(InstrScale::Scale4 as u32 == 2);
2149			const _: () = assert!(InstrScale::Scale8 as u32 == 3);
2150			// SAFETY: 0-3 are valid variants
2151			instruction_internal::internal_set_memory_index_scale(instruction, unsafe { mem::transmute((sib >> 6) as InstrScaleUnderlyingType) });
2152			let index = ((sib >> 3) & 7) + this.state.extra_index_register_base;
2153			if this.state.address_size == OpSize::Size64 {
2154				const BASE_REG: Register = Register::RAX;
2155				if index != 4 {
2156					write_index_reg!(instruction, index + BASE_REG as u32);
2157				}
2158
2159				write_base_reg!(instruction, (sib & 7) + this.state.extra_base_register_base + BASE_REG as u32);
2160				instruction_internal::internal_set_memory_displ_size(instruction, 4);
2161				instruction.set_memory_displacement64(displ);
2162			} else {
2163				const BASE_REG: Register = Register::EAX;
2164				if index != 4 {
2165					write_index_reg!(instruction, index + BASE_REG as u32);
2166				}
2167
2168				write_base_reg!(instruction, (sib & 7) + this.state.extra_base_register_base + BASE_REG as u32);
2169				instruction_internal::internal_set_memory_displ_size(instruction, 3);
2170				instruction.set_memory_displacement64(displ as u32 as u64);
2171			}
2172
2173			return true;
2174		}
2175		this.state.flags |= StateFlags::IS_INVALID | StateFlags::NO_MORE_BYTES;
2176		true
2177	}
2178
2179	#[cfg(not(feature = "__internal_flip"))]
2180	#[allow(clippy::never_loop)]
2181	fn read_op_mem_2(instruction: &mut Instruction, this: &mut Decoder<'_>) -> bool {
2182		loop {
2183			this.displ_index = this.data_ptr as u8;
2184			let displ = read_u32_break!(this) as i32 as u64;
2185			if this.state.address_size == OpSize::Size64 {
2186				instruction.set_memory_displacement64(displ);
2187				instruction_internal::internal_set_memory_displ_size(instruction, 4);
2188				write_base_reg!(instruction, this.state.extra_base_register_base + this.state.rm + Register::RAX as u32);
2189			} else {
2190				instruction.set_memory_displacement64(displ as u32 as u64);
2191				instruction_internal::internal_set_memory_displ_size(instruction, 3);
2192				write_base_reg!(instruction, this.state.extra_base_register_base + this.state.rm + Register::EAX as u32);
2193			}
2194
2195			return false;
2196		}
2197		this.state.flags |= StateFlags::IS_INVALID | StateFlags::NO_MORE_BYTES;
2198		false
2199	}
2200
2201	#[cfg(not(feature = "__internal_flip"))]
2202	#[allow(clippy::never_loop)]
2203	fn read_op_mem_0_4(instruction: &mut Instruction, this: &mut Decoder<'_>) -> bool {
2204		loop {
2205			let sib = read_u8_break!(this) as u32;
2206			const _: () = assert!(InstrScale::Scale1 as u32 == 0);
2207			const _: () = assert!(InstrScale::Scale2 as u32 == 1);
2208			const _: () = assert!(InstrScale::Scale4 as u32 == 2);
2209			const _: () = assert!(InstrScale::Scale8 as u32 == 3);
2210			// SAFETY: 0-3 are valid variants
2211			instruction_internal::internal_set_memory_index_scale(instruction, unsafe { mem::transmute((sib >> 6) as InstrScaleUnderlyingType) });
2212			let index = ((sib >> 3) & 7) + this.state.extra_index_register_base;
2213			let base_reg = if this.state.address_size == OpSize::Size64 { Register::RAX } else { Register::EAX };
2214			if index != 4 {
2215				write_index_reg!(instruction, index + base_reg as u32);
2216			}
2217
2218			let base = sib & 7;
2219			if base == 5 {
2220				this.displ_index = this.data_ptr as u8;
2221				let displ = read_u32_break!(this) as i32 as u64;
2222				if this.state.address_size == OpSize::Size64 {
2223					instruction.set_memory_displacement64(displ);
2224					instruction_internal::internal_set_memory_displ_size(instruction, 4);
2225				} else {
2226					instruction.set_memory_displacement64(displ as u32 as u64);
2227					instruction_internal::internal_set_memory_displ_size(instruction, 3);
2228				}
2229			} else {
2230				write_base_reg!(instruction, base + this.state.extra_base_register_base + base_reg as u32);
2231				instruction_internal::internal_set_memory_displ_size(instruction, 0);
2232				instruction.set_memory_displacement64(0);
2233			}
2234
2235			return true;
2236		}
2237		this.state.flags |= StateFlags::IS_INVALID | StateFlags::NO_MORE_BYTES;
2238		true
2239	}
2240
2241	#[must_use]
2242	#[inline(always)]
2243	fn disp8n(&self, tuple_type: TupleType) -> u32 {
2244		get_disp8n(tuple_type, (self.state.flags & StateFlags::B) != 0)
2245	}
2246
2247	/// Gets the offsets of the constants (memory displacement and immediate) in the decoded instruction.
2248	/// The caller can check if there are any relocations at those addresses.
2249	///
2250	/// # Arguments
2251	///
2252	/// * `instruction`: The latest instruction that was decoded by this decoder
2253	///
2254	/// # Examples
2255	///
2256	/// ```
2257	/// use iced_x86::*;
2258	///
2259	/// // nop
2260	/// // xor dword ptr [rax-5AA5EDCCh],5Ah
2261	/// //                  00  01  02  03  04  05  06
2262	/// //                \opc\mrm\displacement___\imm
2263	/// let bytes = b"\x90\x83\xB3\x34\x12\x5A\xA5\x5A";
2264	/// let mut decoder = Decoder::with_ip(64, bytes, 0x1234_5678, DecoderOptions::NONE);
2265	/// assert_eq!(decoder.decode().code(), Code::Nopd);
2266	/// let instr = decoder.decode();
2267	/// let co = decoder.get_constant_offsets(&instr);
2268	///
2269	/// assert!(co.has_displacement());
2270	/// assert_eq!(co.displacement_offset(), 2);
2271	/// assert_eq!(co.displacement_size(), 4);
2272	/// assert!(co.has_immediate());
2273	/// assert_eq!(co.immediate_offset(), 6);
2274	/// assert_eq!(co.immediate_size(), 1);
2275	/// // It's not an instruction with two immediates (e.g. enter)
2276	/// assert!(!co.has_immediate2());
2277	/// assert_eq!(co.immediate_offset2(), 0);
2278	/// assert_eq!(co.immediate_size2(), 0);
2279	/// ```
2280	#[must_use]
2281	#[allow(clippy::missing_inline_in_public_items)]
2282	pub fn get_constant_offsets(&self, instruction: &Instruction) -> ConstantOffsets {
2283		let mut constant_offsets = ConstantOffsets::default();
2284
2285		let displ_size = instruction.memory_displ_size();
2286		if displ_size != 0 {
2287			constant_offsets.displacement_offset = self.displ_index.wrapping_sub(self.instr_start_data_ptr as u8);
2288			if displ_size == 8 && (self.state.flags & StateFlags::ADDR64) == 0 {
2289				constant_offsets.displacement_size = 4;
2290			} else {
2291				constant_offsets.displacement_size = displ_size as u8;
2292			}
2293		}
2294
2295		if (self.state.flags & StateFlags::NO_IMM) == 0 {
2296			let mut extra_imm_sub = 0;
2297			for i in (0..instruction.op_count()).rev() {
2298				match instruction.op_kind(i) {
2299					OpKind::Immediate8 | OpKind::Immediate8to16 | OpKind::Immediate8to32 | OpKind::Immediate8to64 => {
2300						constant_offsets.immediate_offset = instruction.len().wrapping_sub(extra_imm_sub).wrapping_sub(1) as u8;
2301						constant_offsets.immediate_size = 1;
2302						break;
2303					}
2304
2305					OpKind::Immediate16 => {
2306						constant_offsets.immediate_offset = instruction.len().wrapping_sub(extra_imm_sub).wrapping_sub(2) as u8;
2307						constant_offsets.immediate_size = 2;
2308						break;
2309					}
2310
2311					OpKind::Immediate32 | OpKind::Immediate32to64 => {
2312						constant_offsets.immediate_offset = instruction.len().wrapping_sub(extra_imm_sub).wrapping_sub(4) as u8;
2313						constant_offsets.immediate_size = 4;
2314						break;
2315					}
2316
2317					OpKind::Immediate64 => {
2318						constant_offsets.immediate_offset = instruction.len().wrapping_sub(extra_imm_sub).wrapping_sub(8) as u8;
2319						constant_offsets.immediate_size = 8;
2320						break;
2321					}
2322
2323					OpKind::Immediate8_2nd => {
2324						constant_offsets.immediate_offset2 = instruction.len().wrapping_sub(1) as u8;
2325						constant_offsets.immediate_size2 = 1;
2326						extra_imm_sub = 1;
2327					}
2328
2329					OpKind::NearBranch16 => {
2330						if (self.state.flags & StateFlags::BRANCH_IMM8) != 0 {
2331							constant_offsets.immediate_offset = instruction.len().wrapping_sub(1) as u8;
2332							constant_offsets.immediate_size = 1;
2333						} else if (self.state.flags & StateFlags::XBEGIN) == 0 {
2334							constant_offsets.immediate_offset = instruction.len().wrapping_sub(2) as u8;
2335							constant_offsets.immediate_size = 2;
2336						} else {
2337							debug_assert!((self.state.flags & StateFlags::XBEGIN) != 0);
2338							if self.state.operand_size != OpSize::Size16 {
2339								constant_offsets.immediate_offset = instruction.len().wrapping_sub(4) as u8;
2340								constant_offsets.immediate_size = 4;
2341							} else {
2342								constant_offsets.immediate_offset = instruction.len().wrapping_sub(2) as u8;
2343								constant_offsets.immediate_size = 2;
2344							}
2345						}
2346					}
2347
2348					OpKind::NearBranch32 | OpKind::NearBranch64 => {
2349						if (self.state.flags & StateFlags::BRANCH_IMM8) != 0 {
2350							constant_offsets.immediate_offset = instruction.len().wrapping_sub(1) as u8;
2351							constant_offsets.immediate_size = 1;
2352						} else if (self.state.flags & StateFlags::XBEGIN) == 0 {
2353							constant_offsets.immediate_offset = instruction.len().wrapping_sub(4) as u8;
2354							constant_offsets.immediate_size = 4;
2355						} else {
2356							debug_assert!((self.state.flags & StateFlags::XBEGIN) != 0);
2357							if self.state.operand_size != OpSize::Size16 {
2358								constant_offsets.immediate_offset = instruction.len().wrapping_sub(4) as u8;
2359								constant_offsets.immediate_size = 4;
2360							} else {
2361								constant_offsets.immediate_offset = instruction.len().wrapping_sub(2) as u8;
2362								constant_offsets.immediate_size = 2;
2363							}
2364						}
2365					}
2366
2367					OpKind::FarBranch16 => {
2368						constant_offsets.immediate_offset = instruction.len().wrapping_sub(2 + 2) as u8;
2369						constant_offsets.immediate_size = 2;
2370						constant_offsets.immediate_offset2 = instruction.len().wrapping_sub(2) as u8;
2371						constant_offsets.immediate_size2 = 2;
2372					}
2373
2374					OpKind::FarBranch32 => {
2375						constant_offsets.immediate_offset = instruction.len().wrapping_sub(4 + 2) as u8;
2376						constant_offsets.immediate_size = 4;
2377						constant_offsets.immediate_offset2 = instruction.len().wrapping_sub(2) as u8;
2378						constant_offsets.immediate_size2 = 2;
2379					}
2380
2381					_ => {}
2382				}
2383			}
2384		}
2385
2386		constant_offsets
2387	}
2388}
2389
2390// These are referenced from a static and I couldn't get it to work when they were inside an `impl` block
2391// so they're here instead of where they really belong.
2392
2393// Returns `true` if the SIB byte was read
2394// Same as read_op_mem_32_or_64() except it works with vsib memory operands. Keep them in sync.
2395#[must_use]
2396#[cfg(any(feature = "__internal_flip", not(feature = "no_evex"), not(feature = "no_vex"), not(feature = "no_xop"), feature = "mvex"))]
2397#[inline(always)]
2398fn decoder_read_op_mem_32_or_64_vsib(
2399	this: &mut Decoder<'_>, instruction: &mut Instruction, index_reg: Register, tuple_type: TupleType, is_vsib: bool,
2400) -> bool {
2401	debug_assert!(this.state.address_size == OpSize::Size32 || this.state.address_size == OpSize::Size64);
2402
2403	let index = this.state.mem_index as usize;
2404	debug_assert!(index < READ_OP_MEM_VSIB_FNS.len());
2405	// SAFETY: index is valid because modrm.mod = 0-2 (never 3 if we're here) so index will always be 0-10_111 (17h)
2406	unsafe { (READ_OP_MEM_VSIB_FNS.get_unchecked(index))(this, instruction, index_reg, tuple_type, is_vsib) }
2407}
2408
2409#[cfg(any(feature = "__internal_flip", not(feature = "no_evex"), not(feature = "no_vex"), not(feature = "no_xop"), feature = "mvex"))]
2410fn decoder_read_op_mem_vsib_1(
2411	this: &mut Decoder<'_>, instruction: &mut Instruction, _index_reg: Register, tuple_type: TupleType, _is_vsib: bool,
2412) -> bool {
2413	instruction_internal::internal_set_memory_displ_size(instruction, 1);
2414	this.displ_index = this.data_ptr as u8;
2415	let b = this.read_u8();
2416	if this.state.address_size == OpSize::Size64 {
2417		write_base_reg!(instruction, this.state.extra_base_register_base + this.state.rm + Register::RAX as u32);
2418		instruction.set_memory_displacement64((this.disp8n(tuple_type) as u64).wrapping_mul(b as i8 as u64));
2419	} else {
2420		write_base_reg!(instruction, this.state.extra_base_register_base + this.state.rm + Register::EAX as u32);
2421		instruction.set_memory_displacement64(this.disp8n(tuple_type).wrapping_mul(b as i8 as u32) as u64);
2422	}
2423
2424	false
2425}
2426
2427#[cfg(any(feature = "__internal_flip", not(feature = "no_evex"), not(feature = "no_vex"), not(feature = "no_xop"), feature = "mvex"))]
2428fn decoder_read_op_mem_vsib_1_4(
2429	this: &mut Decoder<'_>, instruction: &mut Instruction, index_reg: Register, tuple_type: TupleType, is_vsib: bool,
2430) -> bool {
2431	instruction_internal::internal_set_memory_displ_size(instruction, 1);
2432
2433	this.displ_index = this.data_ptr.wrapping_add(1) as u8;
2434	let sib = this.read_u16() as u32;
2435	let index = ((sib >> 3) & 7) + this.state.extra_index_register_base;
2436	if !is_vsib {
2437		if index != 4 {
2438			write_index_reg!(instruction, index + index_reg as u32);
2439		}
2440	} else {
2441		write_index_reg!(instruction, index + this.state.extra_index_register_base_vsib + index_reg as u32);
2442	}
2443
2444	const _: () = assert!(InstrScale::Scale1 as u32 == 0);
2445	const _: () = assert!(InstrScale::Scale2 as u32 == 1);
2446	const _: () = assert!(InstrScale::Scale4 as u32 == 2);
2447	const _: () = assert!(InstrScale::Scale8 as u32 == 3);
2448	// SAFETY: 0-3 are valid variants
2449	instruction_internal::internal_set_memory_index_scale(instruction, unsafe { mem::transmute(((sib >> 6) & 3) as InstrScaleUnderlyingType) });
2450	let base_reg = if this.state.address_size == OpSize::Size64 { Register::RAX } else { Register::EAX };
2451	write_base_reg!(instruction, (sib & 7) + this.state.extra_base_register_base + base_reg as u32);
2452
2453	let b = (sib >> 8) as i8 as u32;
2454	let displ = this.disp8n(tuple_type).wrapping_mul(b);
2455	if this.state.address_size == OpSize::Size64 {
2456		instruction.set_memory_displacement64(displ as i32 as u64);
2457	} else {
2458		instruction.set_memory_displacement64(displ as u64);
2459	}
2460
2461	true
2462}
2463
2464#[cfg(any(feature = "__internal_flip", not(feature = "no_evex"), not(feature = "no_vex"), not(feature = "no_xop"), feature = "mvex"))]
2465fn decoder_read_op_mem_vsib_0(
2466	this: &mut Decoder<'_>, instruction: &mut Instruction, _index_reg: Register, _tuple_type: TupleType, _is_vsib: bool,
2467) -> bool {
2468	let base_reg = if this.state.address_size == OpSize::Size64 { Register::RAX } else { Register::EAX };
2469	write_base_reg!(instruction, this.state.extra_base_register_base + this.state.rm + base_reg as u32);
2470
2471	false
2472}
2473
2474#[cfg(any(feature = "__internal_flip", not(feature = "no_evex"), not(feature = "no_vex"), not(feature = "no_xop"), feature = "mvex"))]
2475fn decoder_read_op_mem_vsib_0_5(
2476	this: &mut Decoder<'_>, instruction: &mut Instruction, _index_reg: Register, _tuple_type: TupleType, _is_vsib: bool,
2477) -> bool {
2478	this.displ_index = this.data_ptr as u8;
2479	let d = this.read_u32();
2480	if this.state.address_size == OpSize::Size64 {
2481		debug_assert!(this.is64b_mode);
2482		this.state.flags |= StateFlags::IP_REL64;
2483		instruction.set_memory_displacement64(d as i32 as u64);
2484		instruction_internal::internal_set_memory_displ_size(instruction, 4);
2485		instruction.set_memory_base(Register::RIP);
2486	} else if this.is64b_mode {
2487		this.state.flags |= StateFlags::IP_REL32;
2488		instruction.set_memory_displacement64(d as u64);
2489		instruction_internal::internal_set_memory_displ_size(instruction, 3);
2490		instruction.set_memory_base(Register::EIP);
2491	} else {
2492		instruction.set_memory_displacement64(d as u64);
2493		instruction_internal::internal_set_memory_displ_size(instruction, 3);
2494	}
2495
2496	false
2497}
2498
2499#[cfg(any(feature = "__internal_flip", not(feature = "no_evex"), not(feature = "no_vex"), not(feature = "no_xop"), feature = "mvex"))]
2500fn decoder_read_op_mem_vsib_2_4(
2501	this: &mut Decoder<'_>, instruction: &mut Instruction, index_reg: Register, _tuple_type: TupleType, is_vsib: bool,
2502) -> bool {
2503	let sib = this.read_u8() as u32;
2504	this.displ_index = this.data_ptr as u8;
2505
2506	let index = ((sib >> 3) & 7) + this.state.extra_index_register_base;
2507	if !is_vsib {
2508		if index != 4 {
2509			write_index_reg!(instruction, index + index_reg as u32);
2510		}
2511	} else {
2512		write_index_reg!(instruction, index + this.state.extra_index_register_base_vsib + index_reg as u32);
2513	}
2514
2515	const _: () = assert!(InstrScale::Scale1 as u32 == 0);
2516	const _: () = assert!(InstrScale::Scale2 as u32 == 1);
2517	const _: () = assert!(InstrScale::Scale4 as u32 == 2);
2518	const _: () = assert!(InstrScale::Scale8 as u32 == 3);
2519	// SAFETY: 0-3 are valid variants
2520	instruction_internal::internal_set_memory_index_scale(instruction, unsafe { mem::transmute((sib >> 6) as InstrScaleUnderlyingType) });
2521
2522	let base_reg = if this.state.address_size == OpSize::Size64 { Register::RAX } else { Register::EAX };
2523	write_base_reg!(instruction, (sib & 7) + this.state.extra_base_register_base + base_reg as u32);
2524	let displ = this.read_u32() as u32;
2525	if this.state.address_size == OpSize::Size64 {
2526		instruction_internal::internal_set_memory_displ_size(instruction, 4);
2527		instruction.set_memory_displacement64(displ as i32 as u64);
2528	} else {
2529		instruction_internal::internal_set_memory_displ_size(instruction, 3);
2530		instruction.set_memory_displacement64(displ as u64);
2531	}
2532
2533	true
2534}
2535
2536#[cfg(any(feature = "__internal_flip", not(feature = "no_evex"), not(feature = "no_vex"), not(feature = "no_xop"), feature = "mvex"))]
2537fn decoder_read_op_mem_vsib_2(
2538	this: &mut Decoder<'_>, instruction: &mut Instruction, _index_reg: Register, _tuple_type: TupleType, _is_vsib: bool,
2539) -> bool {
2540	let base_reg = if this.state.address_size == OpSize::Size64 { Register::RAX } else { Register::EAX };
2541	write_base_reg!(instruction, this.state.extra_base_register_base + this.state.rm + base_reg as u32);
2542	this.displ_index = this.data_ptr as u8;
2543	let d = this.read_u32();
2544	if this.state.address_size == OpSize::Size64 {
2545		instruction.set_memory_displacement64(d as i32 as u64);
2546		instruction_internal::internal_set_memory_displ_size(instruction, 4);
2547	} else {
2548		instruction.set_memory_displacement64(d as u64);
2549		instruction_internal::internal_set_memory_displ_size(instruction, 3);
2550	}
2551
2552	false
2553}
2554
2555#[cfg(any(feature = "__internal_flip", not(feature = "no_evex"), not(feature = "no_vex"), not(feature = "no_xop"), feature = "mvex"))]
2556fn decoder_read_op_mem_vsib_0_4(
2557	this: &mut Decoder<'_>, instruction: &mut Instruction, index_reg: Register, _tuple_type: TupleType, is_vsib: bool,
2558) -> bool {
2559	let sib = this.read_u8() as u32;
2560	const _: () = assert!(InstrScale::Scale1 as u32 == 0);
2561	const _: () = assert!(InstrScale::Scale2 as u32 == 1);
2562	const _: () = assert!(InstrScale::Scale4 as u32 == 2);
2563	const _: () = assert!(InstrScale::Scale8 as u32 == 3);
2564	// SAFETY: 0-3 are valid variants
2565	instruction_internal::internal_set_memory_index_scale(instruction, unsafe { mem::transmute((sib >> 6) as InstrScaleUnderlyingType) });
2566	let index = ((sib >> 3) & 7) + this.state.extra_index_register_base;
2567	if !is_vsib {
2568		if index != 4 {
2569			write_index_reg!(instruction, index + index_reg as u32);
2570		}
2571	} else {
2572		write_index_reg!(instruction, index + this.state.extra_index_register_base_vsib + index_reg as u32);
2573	}
2574
2575	let base = sib & 7;
2576	if base == 5 {
2577		this.displ_index = this.data_ptr as u8;
2578		let d = this.read_u32();
2579		if this.state.address_size == OpSize::Size64 {
2580			instruction.set_memory_displacement64(d as i32 as u64);
2581			instruction_internal::internal_set_memory_displ_size(instruction, 4);
2582		} else {
2583			instruction.set_memory_displacement64(d as u64);
2584			instruction_internal::internal_set_memory_displ_size(instruction, 3);
2585		}
2586	} else {
2587		let base_reg = if this.state.address_size == OpSize::Size64 { Register::RAX } else { Register::EAX };
2588		write_base_reg!(instruction, base + this.state.extra_base_register_base + base_reg as u32);
2589		instruction_internal::internal_set_memory_displ_size(instruction, 0);
2590		instruction.set_memory_displacement64(0);
2591	}
2592
2593	true
2594}
2595
2596#[doc(hidden)]
2597#[allow(missing_debug_implementations)]
2598pub struct DecoderIter<'a, 'b> {
2599	decoder: &'b mut Decoder<'a>,
2600}
2601
2602impl Iterator for DecoderIter<'_, '_> {
2603	type Item = Instruction;
2604
2605	#[inline]
2606	fn next(&mut self) -> Option<Self::Item> {
2607		if self.decoder.can_decode() {
2608			Some(self.decoder.decode())
2609		} else {
2610			None
2611		}
2612	}
2613}
2614
2615impl FusedIterator for DecoderIter<'_, '_> {}
2616
2617#[doc(hidden)]
2618#[allow(missing_debug_implementations)]
2619pub struct DecoderIntoIter<'a> {
2620	decoder: Decoder<'a>,
2621}
2622
2623impl Iterator for DecoderIntoIter<'_> {
2624	type Item = Instruction;
2625
2626	#[inline]
2627	fn next(&mut self) -> Option<Self::Item> {
2628		if self.decoder.can_decode() {
2629			Some(self.decoder.decode())
2630		} else {
2631			None
2632		}
2633	}
2634}
2635
2636impl FusedIterator for DecoderIntoIter<'_> {}
2637
2638impl<'a> IntoIterator for Decoder<'a> {
2639	type Item = Instruction;
2640	type IntoIter = DecoderIntoIter<'a>;
2641
2642	#[must_use]
2643	#[inline]
2644	fn into_iter(self) -> Self::IntoIter {
2645		DecoderIntoIter { decoder: self }
2646	}
2647}
2648
2649impl<'a, 'b> IntoIterator for &'b mut Decoder<'a> {
2650	type Item = Instruction;
2651	type IntoIter = DecoderIter<'a, 'b>;
2652
2653	#[must_use]
2654	#[inline]
2655	fn into_iter(self) -> Self::IntoIter {
2656		DecoderIter { decoder: self }
2657	}
2658}