solana_sbpf/
ebpf.rs

1#![allow(clippy::arithmetic_side_effects)]
2// Copyright 2016 6WIND S.A. <quentin.monnet@6wind.com>
3//
4// Licensed under the Apache License, Version 2.0 <http://www.apache.org/licenses/LICENSE-2.0> or
5// the MIT license <http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8//! This module contains all the definitions related to eBPF, and some functions permitting to
9//! manipulate eBPF instructions.
10//!
11//! The number of bytes in an instruction, the maximum number of instructions in a program, and
12//! also all operation codes are defined here as constants.
13//!
14//! The structure for an instruction used by this crate, as well as the function to extract it from
15//! a program, is also defined in the module.
16//!
17//! To learn more about these instructions, see the Linux kernel documentation:
18//! <https://www.kernel.org/doc/Documentation/networking/filter.txt>, or for a shorter version of
19//! the list of the operation codes: <https://github.com/iovisor/bpf-docs/blob/master/eBPF.md>
20
21use byteorder::{ByteOrder, LittleEndian};
22use hash32::{Hash, Hasher, Murmur3Hasher};
23use std::fmt;
24
25/// Solana BPF version flag
26pub const EF_SBPF_V2: u32 = 0x20;
27/// Maximum number of instructions in an eBPF program.
28pub const PROG_MAX_INSNS: usize = 65_536;
29/// Size of an eBPF instructions, in bytes.
30pub const INSN_SIZE: usize = 8;
31/// Frame pointer register
32pub const FRAME_PTR_REG: usize = 10;
33/// First scratch register
34pub const FIRST_SCRATCH_REG: usize = 6;
35/// Number of scratch registers
36pub const SCRATCH_REGS: usize = 4;
37/// Alignment of the memory regions in host address space in bytes
38pub const HOST_ALIGN: usize = 16;
39/// Upper half of a pointer is the region index, lower half the virtual address inside that region.
40pub const VIRTUAL_ADDRESS_BITS: usize = 32;
41
42/// Size (and alignment) of a memory region
43pub const MM_REGION_SIZE: u64 = 1 << VIRTUAL_ADDRESS_BITS;
44/// Virtual address of the bytecode region (in SBPFv3)
45pub const MM_BYTECODE_START: u64 = 0;
46/// Virtual address of the readonly data region (also contains the bytecode until SBPFv3)
47pub const MM_RODATA_START: u64 = MM_REGION_SIZE;
48/// Virtual address of the stack region
49pub const MM_STACK_START: u64 = MM_REGION_SIZE * 2;
50/// Virtual address of the heap region
51pub const MM_HEAP_START: u64 = MM_REGION_SIZE * 3;
52/// Virtual address of the input region
53pub const MM_INPUT_START: u64 = MM_REGION_SIZE * 4;
54
55// eBPF op codes.
56// See also https://www.kernel.org/doc/Documentation/networking/filter.txt
57
58// Three least significant bits are operation class:
59/// BPF operation class: load from immediate. [DEPRECATED]
60pub const BPF_LD: u8 = 0x00;
61/// BPF operation class: load from register. [DEPRECATED]
62pub const BPF_LDX: u8 = 0x01;
63/// BPF operation class: store immediate. [DEPRECATED]
64pub const BPF_ST: u8 = 0x02;
65/// BPF operation class: store value from register. [DEPRECATED]
66pub const BPF_STX: u8 = 0x03;
67/// BPF operation class: 32 bit arithmetic or load.
68pub const BPF_ALU32_LOAD: u8 = 0x04;
69/// BPF operation class: control flow.
70pub const BPF_JMP: u8 = 0x05;
71/// BPF operation class: product / quotient / remainder.
72pub const BPF_PQR: u8 = 0x06;
73/// BPF operation class: 64 bit arithmetic or store.
74pub const BPF_ALU64_STORE: u8 = 0x07;
75
76// For load and store instructions:
77// +------------+--------+------------+
78// |   3 bits   | 2 bits |   3 bits   |
79// |    mode    |  size  | insn class |
80// +------------+--------+------------+
81// (MSB)                          (LSB)
82
83// Size modifiers:
84/// BPF size modifier: word (4 bytes).
85pub const BPF_W: u8 = 0x00;
86/// BPF size modifier: half-word (2 bytes).
87pub const BPF_H: u8 = 0x08;
88/// BPF size modifier: byte (1 byte).
89pub const BPF_B: u8 = 0x10;
90/// BPF size modifier: double word (8 bytes).
91pub const BPF_DW: u8 = 0x18;
92/// BPF size modifier: 1 byte.
93pub const BPF_1B: u8 = 0x20;
94/// BPF size modifier: 2 bytes.
95pub const BPF_2B: u8 = 0x30;
96/// BPF size modifier: 4 bytes.
97pub const BPF_4B: u8 = 0x80;
98/// BPF size modifier: 8 bytes.
99pub const BPF_8B: u8 = 0x90;
100
101// Mode modifiers:
102/// BPF mode modifier: immediate value.
103pub const BPF_IMM: u8 = 0x00;
104/// BPF mode modifier: absolute load.
105pub const BPF_ABS: u8 = 0x20;
106/// BPF mode modifier: indirect load. [DEPRECATED]
107pub const BPF_IND: u8 = 0x40;
108/// BPF mode modifier: load from / store to memory. [DEPRECATED]
109pub const BPF_MEM: u8 = 0x60;
110// [ 0x80 reserved ]
111// [ 0xa0 reserved ]
112// [ 0xc0 reserved ]
113
114// For arithmetic (BPF_ALU/BPF_ALU64_STORE) and jump (BPF_JMP) instructions:
115// +----------------+--------+--------+
116// |     4 bits     |1 b.|   3 bits   |
117// | operation code | src| insn class |
118// +----------------+----+------------+
119// (MSB)                          (LSB)
120
121// Source modifiers:
122/// BPF source operand modifier: 32-bit immediate value.
123pub const BPF_K: u8 = 0x00;
124/// BPF source operand modifier: `src` register.
125pub const BPF_X: u8 = 0x08;
126
127// Operation codes -- BPF_ALU32_LOAD or BPF_ALU64_STORE classes:
128/// BPF ALU/ALU64 operation code: addition.
129pub const BPF_ADD: u8 = 0x00;
130/// BPF ALU/ALU64 operation code: subtraction.
131pub const BPF_SUB: u8 = 0x10;
132/// BPF ALU/ALU64 operation code: multiplication. [DEPRECATED]
133pub const BPF_MUL: u8 = 0x20;
134/// BPF ALU/ALU64 operation code: division. [DEPRECATED]
135pub const BPF_DIV: u8 = 0x30;
136/// BPF ALU/ALU64 operation code: or.
137pub const BPF_OR: u8 = 0x40;
138/// BPF ALU/ALU64 operation code: and.
139pub const BPF_AND: u8 = 0x50;
140/// BPF ALU/ALU64 operation code: left shift.
141pub const BPF_LSH: u8 = 0x60;
142/// BPF ALU/ALU64 operation code: right shift.
143pub const BPF_RSH: u8 = 0x70;
144/// BPF ALU/ALU64 operation code: negation. [DEPRECATED]
145pub const BPF_NEG: u8 = 0x80;
146/// BPF ALU/ALU64 operation code: modulus. [DEPRECATED]
147pub const BPF_MOD: u8 = 0x90;
148/// BPF ALU/ALU64 operation code: exclusive or.
149pub const BPF_XOR: u8 = 0xa0;
150/// BPF ALU/ALU64 operation code: move.
151pub const BPF_MOV: u8 = 0xb0;
152/// BPF ALU/ALU64 operation code: sign extending right shift.
153pub const BPF_ARSH: u8 = 0xc0;
154/// BPF ALU/ALU64 operation code: endianness conversion.
155pub const BPF_END: u8 = 0xd0;
156/// BPF ALU/ALU64 operation code: high or.
157pub const BPF_HOR: u8 = 0xf0;
158
159// Operation codes -- BPF_PQR class:
160//    7         6               5                               4       3          2-0
161// 0  Unsigned  Multiplication  Product Lower Half / Quotient   32 Bit  Immediate  PQR
162// 1  Signed    Division        Product Upper Half / Remainder  64 Bit  Register   PQR
163/// BPF PQR operation code: unsigned high multiplication.
164pub const BPF_UHMUL: u8 = 0x20;
165/// BPF PQR operation code: unsigned division quotient.
166pub const BPF_UDIV: u8 = 0x40;
167/// BPF PQR operation code: unsigned division remainder.
168pub const BPF_UREM: u8 = 0x60;
169/// BPF PQR operation code: low multiplication.
170pub const BPF_LMUL: u8 = 0x80;
171/// BPF PQR operation code: signed high multiplication.
172pub const BPF_SHMUL: u8 = 0xA0;
173/// BPF PQR operation code: signed division quotient.
174pub const BPF_SDIV: u8 = 0xC0;
175/// BPF PQR operation code: signed division remainder.
176pub const BPF_SREM: u8 = 0xE0;
177
178// Operation codes -- BPF_JMP class:
179/// BPF JMP operation code: jump.
180pub const BPF_JA: u8 = 0x00;
181/// BPF JMP operation code: jump if equal.
182pub const BPF_JEQ: u8 = 0x10;
183/// BPF JMP operation code: jump if greater than.
184pub const BPF_JGT: u8 = 0x20;
185/// BPF JMP operation code: jump if greater or equal.
186pub const BPF_JGE: u8 = 0x30;
187/// BPF JMP operation code: jump if `src` & `reg`.
188pub const BPF_JSET: u8 = 0x40;
189/// BPF JMP operation code: jump if not equal.
190pub const BPF_JNE: u8 = 0x50;
191/// BPF JMP operation code: jump if greater than (signed).
192pub const BPF_JSGT: u8 = 0x60;
193/// BPF JMP operation code: jump if greater or equal (signed).
194pub const BPF_JSGE: u8 = 0x70;
195/// BPF JMP operation code: syscall function call.
196pub const BPF_CALL: u8 = 0x80;
197/// BPF JMP operation code: return from program.
198pub const BPF_EXIT: u8 = 0x90;
199/// BPF JMP operation code: static syscall.
200pub const BPF_SYSCALL: u8 = 0x90;
201/// BPF JMP operation code: jump if lower than.
202pub const BPF_JLT: u8 = 0xa0;
203/// BPF JMP operation code: jump if lower or equal.
204pub const BPF_JLE: u8 = 0xb0;
205/// BPF JMP operation code: jump if lower than (signed).
206pub const BPF_JSLT: u8 = 0xc0;
207/// BPF JMP operation code: jump if lower or equal (signed).
208pub const BPF_JSLE: u8 = 0xd0;
209
210// Op codes
211// (Following operation names are not “official”, but may be proper to sbpf; Linux kernel only
212// combines above flags and does not attribute a name per operation.)
213
214/// BPF opcode: `lddw dst, imm` /// `dst = imm`. [DEPRECATED]
215pub const LD_DW_IMM: u8 = BPF_LD | BPF_IMM | BPF_DW;
216/// BPF opcode: `ldxb dst, [src + off]` /// `dst = (src + off) as u8`.
217pub const LD_B_REG: u8 = BPF_LDX | BPF_MEM | BPF_B;
218/// BPF opcode: `ldxh dst, [src + off]` /// `dst = (src + off) as u16`.
219pub const LD_H_REG: u8 = BPF_LDX | BPF_MEM | BPF_H;
220/// BPF opcode: `ldxw dst, [src + off]` /// `dst = (src + off) as u32`.
221pub const LD_W_REG: u8 = BPF_LDX | BPF_MEM | BPF_W;
222/// BPF opcode: `ldxdw dst, [src + off]` /// `dst = (src + off) as u64`.
223pub const LD_DW_REG: u8 = BPF_LDX | BPF_MEM | BPF_DW;
224/// BPF opcode: `stb [dst + off], imm` /// `(dst + offset) as u8 = imm`.
225pub const ST_B_IMM: u8 = BPF_ST | BPF_MEM | BPF_B;
226/// BPF opcode: `sth [dst + off], imm` /// `(dst + offset) as u16 = imm`.
227pub const ST_H_IMM: u8 = BPF_ST | BPF_MEM | BPF_H;
228/// BPF opcode: `stw [dst + off], imm` /// `(dst + offset) as u32 = imm`.
229pub const ST_W_IMM: u8 = BPF_ST | BPF_MEM | BPF_W;
230/// BPF opcode: `stdw [dst + off], imm` /// `(dst + offset) as u64 = imm`.
231pub const ST_DW_IMM: u8 = BPF_ST | BPF_MEM | BPF_DW;
232/// BPF opcode: `stxb [dst + off], src` /// `(dst + offset) as u8 = src`.
233pub const ST_B_REG: u8 = BPF_STX | BPF_MEM | BPF_B;
234/// BPF opcode: `stxh [dst + off], src` /// `(dst + offset) as u16 = src`.
235pub const ST_H_REG: u8 = BPF_STX | BPF_MEM | BPF_H;
236/// BPF opcode: `stxw [dst + off], src` /// `(dst + offset) as u32 = src`.
237pub const ST_W_REG: u8 = BPF_STX | BPF_MEM | BPF_W;
238/// BPF opcode: `stxdw [dst + off], src` /// `(dst + offset) as u64 = src`.
239pub const ST_DW_REG: u8 = BPF_STX | BPF_MEM | BPF_DW;
240
241/// BPF opcode: `ldxb dst, [src + off]` /// `dst = (src + off) as u8`.
242pub const LD_1B_REG: u8 = BPF_ALU32_LOAD | BPF_X | BPF_1B;
243/// BPF opcode: `ldxh dst, [src + off]` /// `dst = (src + off) as u16`.
244pub const LD_2B_REG: u8 = BPF_ALU32_LOAD | BPF_X | BPF_2B;
245/// BPF opcode: `ldxw dst, [src + off]` /// `dst = (src + off) as u32`.
246pub const LD_4B_REG: u8 = BPF_ALU32_LOAD | BPF_X | BPF_4B;
247/// BPF opcode: `ldxdw dst, [src + off]` /// `dst = (src + off) as u64`.
248pub const LD_8B_REG: u8 = BPF_ALU32_LOAD | BPF_X | BPF_8B;
249/// BPF opcode: `stb [dst + off], imm` /// `(dst + offset) as u8 = imm`.
250pub const ST_1B_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_1B;
251/// BPF opcode: `sth [dst + off], imm` /// `(dst + offset) as u16 = imm`.
252pub const ST_2B_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_2B;
253/// BPF opcode: `stw [dst + off], imm` /// `(dst + offset) as u32 = imm`.
254pub const ST_4B_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_4B;
255/// BPF opcode: `stdw [dst + off], imm` /// `(dst + offset) as u64 = imm`.
256pub const ST_8B_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_8B;
257/// BPF opcode: `stxb [dst + off], src` /// `(dst + offset) as u8 = src`.
258pub const ST_1B_REG: u8 = BPF_ALU64_STORE | BPF_X | BPF_1B;
259/// BPF opcode: `stxh [dst + off], src` /// `(dst + offset) as u16 = src`.
260pub const ST_2B_REG: u8 = BPF_ALU64_STORE | BPF_X | BPF_2B;
261/// BPF opcode: `stxw [dst + off], src` /// `(dst + offset) as u32 = src`.
262pub const ST_4B_REG: u8 = BPF_ALU64_STORE | BPF_X | BPF_4B;
263/// BPF opcode: `stxdw [dst + off], src` /// `(dst + offset) as u64 = src`.
264pub const ST_8B_REG: u8 = BPF_ALU64_STORE | BPF_X | BPF_8B;
265
266/// BPF opcode: `add32 dst, imm` /// `dst += imm`.
267pub const ADD32_IMM: u8 = BPF_ALU32_LOAD | BPF_K | BPF_ADD;
268/// BPF opcode: `add32 dst, src` /// `dst += src`.
269pub const ADD32_REG: u8 = BPF_ALU32_LOAD | BPF_X | BPF_ADD;
270/// BPF opcode: `sub32 dst, imm` /// `dst = imm - dst`.
271pub const SUB32_IMM: u8 = BPF_ALU32_LOAD | BPF_K | BPF_SUB;
272/// BPF opcode: `sub32 dst, src` /// `dst -= src`.
273pub const SUB32_REG: u8 = BPF_ALU32_LOAD | BPF_X | BPF_SUB;
274/// BPF opcode: `mul32 dst, imm` /// `dst *= imm`.
275pub const MUL32_IMM: u8 = BPF_ALU32_LOAD | BPF_K | BPF_MUL;
276/// BPF opcode: `mul32 dst, src` /// `dst *= src`.
277pub const MUL32_REG: u8 = BPF_ALU32_LOAD | BPF_X | BPF_MUL;
278/// BPF opcode: `div32 dst, imm` /// `dst /= imm`.
279pub const DIV32_IMM: u8 = BPF_ALU32_LOAD | BPF_K | BPF_DIV;
280/// BPF opcode: `div32 dst, src` /// `dst /= src`.
281pub const DIV32_REG: u8 = BPF_ALU32_LOAD | BPF_X | BPF_DIV;
282/// BPF opcode: `or32 dst, imm` /// `dst |= imm`.
283pub const OR32_IMM: u8 = BPF_ALU32_LOAD | BPF_K | BPF_OR;
284/// BPF opcode: `or32 dst, src` /// `dst |= src`.
285pub const OR32_REG: u8 = BPF_ALU32_LOAD | BPF_X | BPF_OR;
286/// BPF opcode: `and32 dst, imm` /// `dst &= imm`.
287pub const AND32_IMM: u8 = BPF_ALU32_LOAD | BPF_K | BPF_AND;
288/// BPF opcode: `and32 dst, src` /// `dst &= src`.
289pub const AND32_REG: u8 = BPF_ALU32_LOAD | BPF_X | BPF_AND;
290/// BPF opcode: `lsh32 dst, imm` /// `dst <<= imm`.
291pub const LSH32_IMM: u8 = BPF_ALU32_LOAD | BPF_K | BPF_LSH;
292/// BPF opcode: `lsh32 dst, src` /// `dst <<= src`.
293pub const LSH32_REG: u8 = BPF_ALU32_LOAD | BPF_X | BPF_LSH;
294/// BPF opcode: `rsh32 dst, imm` /// `dst >>= imm`.
295pub const RSH32_IMM: u8 = BPF_ALU32_LOAD | BPF_K | BPF_RSH;
296/// BPF opcode: `rsh32 dst, src` /// `dst >>= src`.
297pub const RSH32_REG: u8 = BPF_ALU32_LOAD | BPF_X | BPF_RSH;
298/// BPF opcode: `neg32 dst` /// `dst = -dst`.
299pub const NEG32: u8 = BPF_ALU32_LOAD | BPF_NEG;
300/// BPF opcode: `mod32 dst, imm` /// `dst %= imm`.
301pub const MOD32_IMM: u8 = BPF_ALU32_LOAD | BPF_K | BPF_MOD;
302/// BPF opcode: `mod32 dst, src` /// `dst %= src`.
303pub const MOD32_REG: u8 = BPF_ALU32_LOAD | BPF_X | BPF_MOD;
304/// BPF opcode: `xor32 dst, imm` /// `dst ^= imm`.
305pub const XOR32_IMM: u8 = BPF_ALU32_LOAD | BPF_K | BPF_XOR;
306/// BPF opcode: `xor32 dst, src` /// `dst ^= src`.
307pub const XOR32_REG: u8 = BPF_ALU32_LOAD | BPF_X | BPF_XOR;
308/// BPF opcode: `mov32 dst, imm` /// `dst = imm`.
309pub const MOV32_IMM: u8 = BPF_ALU32_LOAD | BPF_K | BPF_MOV;
310/// BPF opcode: `mov32 dst, src` /// `dst = src`.
311pub const MOV32_REG: u8 = BPF_ALU32_LOAD | BPF_X | BPF_MOV;
312/// BPF opcode: `arsh32 dst, imm` /// `dst >>= imm (arithmetic)`.
313pub const ARSH32_IMM: u8 = BPF_ALU32_LOAD | BPF_K | BPF_ARSH;
314/// BPF opcode: `arsh32 dst, src` /// `dst >>= src (arithmetic)`.
315pub const ARSH32_REG: u8 = BPF_ALU32_LOAD | BPF_X | BPF_ARSH;
316
317/// BPF opcode: `lmul32 dst, imm` /// `dst *= (dst * imm) as u32`.
318pub const LMUL32_IMM: u8 = BPF_PQR | BPF_K | BPF_LMUL;
319/// BPF opcode: `lmul32 dst, src` /// `dst *= (dst * src) as u32`.
320pub const LMUL32_REG: u8 = BPF_PQR | BPF_X | BPF_LMUL;
321/// BPF opcode: `uhmul32 dst, imm` /// `dst = (dst * imm) as u64`.
322// pub const UHMUL32_IMM: u8 = BPF_PQR | BPF_K | BPF_UHMUL;
323/// BPF opcode: `uhmul32 dst, src` /// `dst = (dst * src) as u64`.
324// pub const UHMUL32_REG: u8 = BPF_PQR | BPF_X | BPF_UHMUL;
325/// BPF opcode: `udiv32 dst, imm` /// `dst /= imm`.
326pub const UDIV32_IMM: u8 = BPF_PQR | BPF_K | BPF_UDIV;
327/// BPF opcode: `udiv32 dst, src` /// `dst /= src`.
328pub const UDIV32_REG: u8 = BPF_PQR | BPF_X | BPF_UDIV;
329/// BPF opcode: `urem32 dst, imm` /// `dst %= imm`.
330pub const UREM32_IMM: u8 = BPF_PQR | BPF_K | BPF_UREM;
331/// BPF opcode: `urem32 dst, src` /// `dst %= src`.
332pub const UREM32_REG: u8 = BPF_PQR | BPF_X | BPF_UREM;
333/// BPF opcode: `shmul32 dst, imm` /// `dst = (dst * imm) as i64`.
334// pub const SHMUL32_IMM: u8 = BPF_PQR | BPF_K | BPF_SHMUL;
335/// BPF opcode: `shmul32 dst, src` /// `dst = (dst * src) as i64`.
336// pub const SHMUL32_REG: u8 = BPF_PQR | BPF_X | BPF_SHMUL;
337/// BPF opcode: `sdiv32 dst, imm` /// `dst /= imm`.
338pub const SDIV32_IMM: u8 = BPF_PQR | BPF_K | BPF_SDIV;
339/// BPF opcode: `sdiv32 dst, src` /// `dst /= src`.
340pub const SDIV32_REG: u8 = BPF_PQR | BPF_X | BPF_SDIV;
341/// BPF opcode: `srem32 dst, imm` /// `dst %= imm`.
342pub const SREM32_IMM: u8 = BPF_PQR | BPF_K | BPF_SREM;
343/// BPF opcode: `srem32 dst, src` /// `dst %= src`.
344pub const SREM32_REG: u8 = BPF_PQR | BPF_X | BPF_SREM;
345
346/// BPF opcode: `le dst` /// `dst = htole<imm>(dst), with imm in {16, 32, 64}`.
347pub const LE: u8 = BPF_ALU32_LOAD | BPF_K | BPF_END;
348/// BPF opcode: `be dst` /// `dst = htobe<imm>(dst), with imm in {16, 32, 64}`.
349pub const BE: u8 = BPF_ALU32_LOAD | BPF_X | BPF_END;
350
351/// BPF opcode: `add64 dst, imm` /// `dst += imm`.
352pub const ADD64_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_ADD;
353/// BPF opcode: `add64 dst, src` /// `dst += src`.
354pub const ADD64_REG: u8 = BPF_ALU64_STORE | BPF_X | BPF_ADD;
355/// BPF opcode: `sub64 dst, imm` /// `dst -= imm`.
356pub const SUB64_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_SUB;
357/// BPF opcode: `sub64 dst, src` /// `dst -= src`.
358pub const SUB64_REG: u8 = BPF_ALU64_STORE | BPF_X | BPF_SUB;
359/// BPF opcode: `mul64 dst, imm` /// `dst *= imm`.
360pub const MUL64_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_MUL;
361/// BPF opcode: `mul64 dst, src` /// `dst *= src`.
362pub const MUL64_REG: u8 = BPF_ALU64_STORE | BPF_X | BPF_MUL;
363/// BPF opcode: `div64 dst, imm` /// `dst /= imm`.
364pub const DIV64_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_DIV;
365/// BPF opcode: `div64 dst, src` /// `dst /= src`.
366pub const DIV64_REG: u8 = BPF_ALU64_STORE | BPF_X | BPF_DIV;
367/// BPF opcode: `or64 dst, imm` /// `dst |= imm`.
368pub const OR64_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_OR;
369/// BPF opcode: `or64 dst, src` /// `dst |= src`.
370pub const OR64_REG: u8 = BPF_ALU64_STORE | BPF_X | BPF_OR;
371/// BPF opcode: `and64 dst, imm` /// `dst &= imm`.
372pub const AND64_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_AND;
373/// BPF opcode: `and64 dst, src` /// `dst &= src`.
374pub const AND64_REG: u8 = BPF_ALU64_STORE | BPF_X | BPF_AND;
375/// BPF opcode: `lsh64 dst, imm` /// `dst <<= imm`.
376pub const LSH64_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_LSH;
377/// BPF opcode: `lsh64 dst, src` /// `dst <<= src`.
378pub const LSH64_REG: u8 = BPF_ALU64_STORE | BPF_X | BPF_LSH;
379/// BPF opcode: `rsh64 dst, imm` /// `dst >>= imm`.
380pub const RSH64_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_RSH;
381/// BPF opcode: `rsh64 dst, src` /// `dst >>= src`.
382pub const RSH64_REG: u8 = BPF_ALU64_STORE | BPF_X | BPF_RSH;
383/// BPF opcode: `neg64 dst` /// `dst = -dst`.
384pub const NEG64: u8 = BPF_ALU64_STORE | BPF_NEG;
385/// BPF opcode: `mod64 dst, imm` /// `dst %= imm`.
386pub const MOD64_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_MOD;
387/// BPF opcode: `mod64 dst, src` /// `dst %= src`.
388pub const MOD64_REG: u8 = BPF_ALU64_STORE | BPF_X | BPF_MOD;
389/// BPF opcode: `xor64 dst, imm` /// `dst ^= imm`.
390pub const XOR64_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_XOR;
391/// BPF opcode: `xor64 dst, src` /// `dst ^= src`.
392pub const XOR64_REG: u8 = BPF_ALU64_STORE | BPF_X | BPF_XOR;
393/// BPF opcode: `mov64 dst, imm` /// `dst = imm`.
394pub const MOV64_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_MOV;
395/// BPF opcode: `mov64 dst, src` /// `dst = src`.
396pub const MOV64_REG: u8 = BPF_ALU64_STORE | BPF_X | BPF_MOV;
397/// BPF opcode: `arsh64 dst, imm` /// `dst >>= imm (arithmetic)`.
398pub const ARSH64_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_ARSH;
399/// BPF opcode: `arsh64 dst, src` /// `dst >>= src (arithmetic)`.
400pub const ARSH64_REG: u8 = BPF_ALU64_STORE | BPF_X | BPF_ARSH;
401/// BPF opcode: `hor64 dst, imm` /// `dst |= imm << 32`.
402pub const HOR64_IMM: u8 = BPF_ALU64_STORE | BPF_K | BPF_HOR;
403
404/// BPF opcode: `lmul64 dst, imm` /// `dst = (dst * imm) as u64`.
405pub const LMUL64_IMM: u8 = BPF_PQR | BPF_B | BPF_K | BPF_LMUL;
406/// BPF opcode: `lmul64 dst, src` /// `dst = (dst * src) as u64`.
407pub const LMUL64_REG: u8 = BPF_PQR | BPF_B | BPF_X | BPF_LMUL;
408/// BPF opcode: `uhmul64 dst, imm` /// `dst = (dst * imm) >> 64`.
409pub const UHMUL64_IMM: u8 = BPF_PQR | BPF_B | BPF_K | BPF_UHMUL;
410/// BPF opcode: `uhmul64 dst, src` /// `dst = (dst * src) >> 64`.
411pub const UHMUL64_REG: u8 = BPF_PQR | BPF_B | BPF_X | BPF_UHMUL;
412/// BPF opcode: `udiv64 dst, imm` /// `dst /= imm`.
413pub const UDIV64_IMM: u8 = BPF_PQR | BPF_B | BPF_K | BPF_UDIV;
414/// BPF opcode: `udiv64 dst, src` /// `dst /= src`.
415pub const UDIV64_REG: u8 = BPF_PQR | BPF_B | BPF_X | BPF_UDIV;
416/// BPF opcode: `urem64 dst, imm` /// `dst %= imm`.
417pub const UREM64_IMM: u8 = BPF_PQR | BPF_B | BPF_K | BPF_UREM;
418/// BPF opcode: `urem64 dst, src` /// `dst %= src`.
419pub const UREM64_REG: u8 = BPF_PQR | BPF_B | BPF_X | BPF_UREM;
420/// BPF opcode: `shmul64 dst, imm` /// `dst = (dst * imm) >> 64`.
421pub const SHMUL64_IMM: u8 = BPF_PQR | BPF_B | BPF_K | BPF_SHMUL;
422/// BPF opcode: `shmul64 dst, src` /// `dst = (dst * src) >> 64`.
423pub const SHMUL64_REG: u8 = BPF_PQR | BPF_B | BPF_X | BPF_SHMUL;
424/// BPF opcode: `sdiv64 dst, imm` /// `dst /= imm`.
425pub const SDIV64_IMM: u8 = BPF_PQR | BPF_B | BPF_K | BPF_SDIV;
426/// BPF opcode: `sdiv64 dst, src` /// `dst /= src`.
427pub const SDIV64_REG: u8 = BPF_PQR | BPF_B | BPF_X | BPF_SDIV;
428/// BPF opcode: `srem64 dst, imm` /// `dst %= imm`.
429pub const SREM64_IMM: u8 = BPF_PQR | BPF_B | BPF_K | BPF_SREM;
430/// BPF opcode: `srem64 dst, src` /// `dst %= src`.
431pub const SREM64_REG: u8 = BPF_PQR | BPF_B | BPF_X | BPF_SREM;
432
433/// BPF opcode: `ja +off` /// `PC += off`.
434pub const JA: u8 = BPF_JMP | BPF_JA;
435/// BPF opcode: `jeq dst, imm, +off` /// `PC += off if dst == imm`.
436pub const JEQ_IMM: u8 = BPF_JMP | BPF_K | BPF_JEQ;
437/// BPF opcode: `jeq dst, src, +off` /// `PC += off if dst == src`.
438pub const JEQ_REG: u8 = BPF_JMP | BPF_X | BPF_JEQ;
439/// BPF opcode: `jgt dst, imm, +off` /// `PC += off if dst > imm`.
440pub const JGT_IMM: u8 = BPF_JMP | BPF_K | BPF_JGT;
441/// BPF opcode: `jgt dst, src, +off` /// `PC += off if dst > src`.
442pub const JGT_REG: u8 = BPF_JMP | BPF_X | BPF_JGT;
443/// BPF opcode: `jge dst, imm, +off` /// `PC += off if dst >= imm`.
444pub const JGE_IMM: u8 = BPF_JMP | BPF_K | BPF_JGE;
445/// BPF opcode: `jge dst, src, +off` /// `PC += off if dst >= src`.
446pub const JGE_REG: u8 = BPF_JMP | BPF_X | BPF_JGE;
447/// BPF opcode: `jlt dst, imm, +off` /// `PC += off if dst < imm`.
448pub const JLT_IMM: u8 = BPF_JMP | BPF_K | BPF_JLT;
449/// BPF opcode: `jlt dst, src, +off` /// `PC += off if dst < src`.
450pub const JLT_REG: u8 = BPF_JMP | BPF_X | BPF_JLT;
451/// BPF opcode: `jle dst, imm, +off` /// `PC += off if dst <= imm`.
452pub const JLE_IMM: u8 = BPF_JMP | BPF_K | BPF_JLE;
453/// BPF opcode: `jle dst, src, +off` /// `PC += off if dst <= src`.
454pub const JLE_REG: u8 = BPF_JMP | BPF_X | BPF_JLE;
455/// BPF opcode: `jset dst, imm, +off` /// `PC += off if dst & imm`.
456pub const JSET_IMM: u8 = BPF_JMP | BPF_K | BPF_JSET;
457/// BPF opcode: `jset dst, src, +off` /// `PC += off if dst & src`.
458pub const JSET_REG: u8 = BPF_JMP | BPF_X | BPF_JSET;
459/// BPF opcode: `jne dst, imm, +off` /// `PC += off if dst != imm`.
460pub const JNE_IMM: u8 = BPF_JMP | BPF_K | BPF_JNE;
461/// BPF opcode: `jne dst, src, +off` /// `PC += off if dst != src`.
462pub const JNE_REG: u8 = BPF_JMP | BPF_X | BPF_JNE;
463/// BPF opcode: `jsgt dst, imm, +off` /// `PC += off if dst > imm (signed)`.
464pub const JSGT_IMM: u8 = BPF_JMP | BPF_K | BPF_JSGT;
465/// BPF opcode: `jsgt dst, src, +off` /// `PC += off if dst > src (signed)`.
466pub const JSGT_REG: u8 = BPF_JMP | BPF_X | BPF_JSGT;
467/// BPF opcode: `jsge dst, imm, +off` /// `PC += off if dst >= imm (signed)`.
468pub const JSGE_IMM: u8 = BPF_JMP | BPF_K | BPF_JSGE;
469/// BPF opcode: `jsge dst, src, +off` /// `PC += off if dst >= src (signed)`.
470pub const JSGE_REG: u8 = BPF_JMP | BPF_X | BPF_JSGE;
471/// BPF opcode: `jslt dst, imm, +off` /// `PC += off if dst < imm (signed)`.
472pub const JSLT_IMM: u8 = BPF_JMP | BPF_K | BPF_JSLT;
473/// BPF opcode: `jslt dst, src, +off` /// `PC += off if dst < src (signed)`.
474pub const JSLT_REG: u8 = BPF_JMP | BPF_X | BPF_JSLT;
475/// BPF opcode: `jsle dst, imm, +off` /// `PC += off if dst <= imm (signed)`.
476pub const JSLE_IMM: u8 = BPF_JMP | BPF_K | BPF_JSLE;
477/// BPF opcode: `jsle dst, src, +off` /// `PC += off if dst <= src (signed)`.
478pub const JSLE_REG: u8 = BPF_JMP | BPF_X | BPF_JSLE;
479
480/// BPF opcode: `call imm` /// syscall function call to syscall with key `imm`.
481pub const CALL_IMM: u8 = BPF_JMP | BPF_CALL;
482/// BPF opcode: tail call.
483pub const CALL_REG: u8 = BPF_JMP | BPF_X | BPF_CALL;
484/// BPF opcode: `exit` /// `return r0`. /// Valid only until SBPFv3
485pub const EXIT: u8 = BPF_JMP | BPF_EXIT;
486/// BPF opcode: `return` /// `return r0`. /// Valid only since SBPFv3
487pub const RETURN: u8 = BPF_JMP | BPF_X | BPF_EXIT;
488/// BPF opcode: `syscall` /// `syscall imm`. /// Valid only since SBPFv3
489pub const SYSCALL: u8 = BPF_JMP | BPF_SYSCALL;
490
491// Used in JIT
492/// Mask to extract the operation class from an operation code.
493pub const BPF_CLS_MASK: u8 = 0x07;
494/// Mask to extract the arithmetic operation code from an instruction operation code.
495pub const BPF_ALU_OP_MASK: u8 = 0xf0;
496
497/// An eBPF instruction.
498///
499/// See <https://www.kernel.org/doc/Documentation/networking/filter.txt> for the Linux kernel
500/// documentation about eBPF, or <https://github.com/iovisor/bpf-docs/blob/master/eBPF.md> for a
501/// more concise version.
502#[derive(PartialEq, Eq, Clone, Default)]
503pub struct Insn {
504    /// Instruction pointer.
505    pub ptr: usize,
506    /// Operation code.
507    pub opc: u8,
508    /// Destination register operand.
509    pub dst: u8,
510    /// Source register operand.
511    pub src: u8,
512    /// Offset operand.
513    pub off: i16,
514    /// Immediate value operand.
515    pub imm: i64,
516}
517
518impl fmt::Debug for Insn {
519    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
520        write!(
521            f,
522            "Insn {{ ptr: 0x{:08x?}, opc: 0x{:02x?}, dst: {}, src: {}, off: 0x{:04x?}, imm: 0x{:08x?} }}",
523            self.ptr, self.opc, self.dst, self.src, self.off, self.imm
524        )
525    }
526}
527
528impl Insn {
529    /// Turn an `Insn` back into an array of bytes.
530    ///
531    /// # Examples
532    ///
533    /// ```
534    /// use solana_sbpf::ebpf;
535    ///
536    /// let prog: &[u8] = &[
537    ///     0xb7, 0x12, 0x56, 0x34, 0xde, 0xbc, 0x9a, 0x78,
538    ///     ];
539    /// let insn = ebpf::Insn {
540    ///     ptr: 0x00,
541    ///     opc: 0xb7,
542    ///     dst: 2,
543    ///     src: 1,
544    ///     off: 0x3456,
545    ///     imm: 0x789abcde
546    /// };
547    /// assert_eq!(insn.to_array(), prog);
548    /// ```
549    pub fn to_array(&self) -> [u8; INSN_SIZE] {
550        [
551            self.opc,
552            self.src.wrapping_shl(4) | self.dst,
553            (self.off & 0xff) as u8,
554            self.off.wrapping_shr(8) as u8,
555            (self.imm & 0xff) as u8,
556            (self.imm & 0xff_00).wrapping_shr(8) as u8,
557            (self.imm as u32 & 0xff_00_00).wrapping_shr(16) as u8,
558            (self.imm as u32 & 0xff_00_00_00).wrapping_shr(24) as u8,
559        ]
560    }
561
562    /// Turn an `Insn` into an vector of bytes.
563    ///
564    /// # Examples
565    ///
566    /// ```
567    /// use solana_sbpf::ebpf;
568    ///
569    /// let prog: Vec<u8> = vec![
570    ///     0xb7, 0x12, 0x56, 0x34, 0xde, 0xbc, 0x9a, 0x78,
571    ///     ];
572    /// let insn = ebpf::Insn {
573    ///     ptr: 0x00,
574    ///     opc: 0xb7,
575    ///     dst: 2,
576    ///     src: 1,
577    ///     off: 0x3456,
578    ///     imm: 0x789abcde
579    /// };
580    /// assert_eq!(insn.to_vec(), prog);
581    /// ```
582    pub fn to_vec(&self) -> Vec<u8> {
583        self.to_array().to_vec()
584    }
585}
586
587/// Get the instruction at `idx` of an eBPF program. `idx` is the index (number) of the
588/// instruction (not a byte offset). The first instruction has index 0.
589///
590/// # Panics
591///
592/// Panics if it is not possible to get the instruction (if idx is too high, or last instruction is
593/// incomplete).
594///
595/// # Examples
596///
597/// ```
598/// use solana_sbpf::ebpf;
599///
600/// let prog = &[
601///     0xb7, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
602///     0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
603///     ];
604/// let insn = ebpf::get_insn(prog, 1);
605/// assert_eq!(insn.opc, 0x95);
606/// ```
607///
608/// The example below will panic, since the last instruction is not complete and cannot be loaded.
609///
610/// ```rust,should_panic
611/// use solana_sbpf::ebpf;
612///
613/// let prog = &[
614///     0xb7, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
615///     0x95, 0x00, 0x00, 0x00, 0x00, 0x00              // two bytes missing
616///     ];
617/// let insn = ebpf::get_insn(prog, 1);
618/// ```
619pub fn get_insn(prog: &[u8], pc: usize) -> Insn {
620    // This guard should not be needed in most cases, since the verifier already checks the program
621    // size, and indexes should be fine in the interpreter/JIT. But this function is publicly
622    // available and user can call it with any `pc`, so we have to check anyway.
623    debug_assert!(
624        (pc + 1) * INSN_SIZE <= prog.len(),
625        "cannot reach instruction at index {:?} in program containing {:?} bytes",
626        pc,
627        prog.len()
628    );
629    get_insn_unchecked(prog, pc)
630}
631/// Same as `get_insn` except not checked
632pub fn get_insn_unchecked(prog: &[u8], pc: usize) -> Insn {
633    Insn {
634        ptr: pc,
635        opc: prog[INSN_SIZE * pc],
636        dst: prog[INSN_SIZE * pc + 1] & 0x0f,
637        src: (prog[INSN_SIZE * pc + 1] & 0xf0) >> 4,
638        off: LittleEndian::read_i16(&prog[(INSN_SIZE * pc + 2)..]),
639        imm: LittleEndian::read_i32(&prog[(INSN_SIZE * pc + 4)..]) as i64,
640    }
641}
642
643/// Merge the two halves of a LD_DW_IMM instruction
644pub fn augment_lddw_unchecked(prog: &[u8], insn: &mut Insn) {
645    let more_significant_half = LittleEndian::read_i32(&prog[((insn.ptr + 1) * INSN_SIZE + 4)..]);
646    insn.imm = ((insn.imm as u64 & 0xffffffff) | ((more_significant_half as u64) << 32)) as i64;
647}
648
649/// Hash a symbol name
650///
651/// This function is used by both the relocator and the VM to translate symbol names
652/// into a 32 bit id used to identify a syscall function.  The 32 bit id is used in the
653/// eBPF `call` instruction's imm field.
654pub fn hash_symbol_name(name: &[u8]) -> u32 {
655    let mut hasher = Murmur3Hasher::default();
656    Hash::hash_slice(name, &mut hasher);
657    hasher.finish()
658}