cairo_lang_sierra/extensions/modules/
utils.rsuse std::ops::Shl;
use cairo_lang_utils::casts::IntoOrPanic;
use itertools::{chain, repeat_n};
use num_bigint::BigInt;
use num_traits::One;
use starknet_types_core::felt::CAIRO_PRIME_BIGINT;
use super::bounded_int::BoundedIntType;
use super::bytes31::Bytes31Type;
use super::int::signed::{Sint8Type, Sint16Type, Sint32Type, Sint64Type};
use super::int::signed128::Sint128Type;
use super::int::unsigned::{Uint8Type, Uint16Type, Uint32Type, Uint64Type};
use super::int::unsigned128::Uint128Type;
use super::structure::StructType;
use crate::extensions::felt252::Felt252Type;
use crate::extensions::lib_func::{
LibfuncSignature, OutputVarInfo, ParamSignature, SierraApChange, SignatureSpecializationContext,
};
use crate::extensions::types::TypeInfo;
use crate::extensions::{NamedType, OutputVarReferenceInfo, SpecializationError};
use crate::ids::{ConcreteTypeId, UserTypeId};
use crate::program::GenericArg;
pub fn reinterpret_cast_signature(
from_ty: ConcreteTypeId,
to_ty: ConcreteTypeId,
) -> LibfuncSignature {
LibfuncSignature::new_non_branch_ex(
vec![ParamSignature::new(from_ty).with_allow_all()],
vec![OutputVarInfo {
ty: to_ty,
ref_info: OutputVarReferenceInfo::SameAsParam { param_idx: 0 },
}],
SierraApChange::Known { new_vars_only: true },
)
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Range {
pub lower: BigInt,
pub upper: BigInt,
}
impl Range {
pub fn closed(lower: impl Into<BigInt>, upper: impl Into<BigInt>) -> Self {
Self::half_open(lower, upper.into() as BigInt + 1)
}
pub fn half_open(lower: impl Into<BigInt>, upper: impl Into<BigInt>) -> Self {
let result = Self { lower: lower.into(), upper: upper.into() };
assert!(result.lower < result.upper, "Invalid range: {:?}", result);
result
}
pub fn from_type_info(ty_info: &TypeInfo) -> Result<Self, SpecializationError> {
Ok(match (&ty_info.long_id.generic_id, &ty_info.long_id.generic_args[..]) {
(id, []) if *id == Felt252Type::id() => {
let prime: BigInt = CAIRO_PRIME_BIGINT.clone();
Self::half_open(1 - &prime, prime)
}
(id, []) if *id == Uint8Type::id() => Self::closed(u8::MIN, u8::MAX),
(id, []) if *id == Uint16Type::id() => Self::closed(u16::MIN, u16::MAX),
(id, []) if *id == Uint32Type::id() => Self::closed(u32::MIN, u32::MAX),
(id, []) if *id == Uint64Type::id() => Self::closed(u64::MIN, u64::MAX),
(id, []) if *id == Uint128Type::id() => Self::closed(u128::MIN, u128::MAX),
(id, []) if *id == Sint8Type::id() => Self::closed(i8::MIN, i8::MAX),
(id, []) if *id == Sint16Type::id() => Self::closed(i16::MIN, i16::MAX),
(id, []) if *id == Sint32Type::id() => Self::closed(i32::MIN, i32::MAX),
(id, []) if *id == Sint64Type::id() => Self::closed(i64::MIN, i64::MAX),
(id, []) if *id == Sint128Type::id() => Self::closed(i128::MIN, i128::MAX),
(id, []) if *id == Bytes31Type::id() => Self::half_open(0, BigInt::one().shl(248)),
(id, [GenericArg::Value(min), GenericArg::Value(max)])
if *id == BoundedIntType::id() =>
{
Self::closed(min.clone(), max.clone())
}
_ => return Err(SpecializationError::UnsupportedGenericArg),
})
}
pub fn from_type(
context: &dyn SignatureSpecializationContext,
ty: ConcreteTypeId,
) -> Result<Self, SpecializationError> {
Self::from_type_info(&context.get_type_info(ty)?)
}
pub fn is_small_range(&self) -> bool {
self.size() <= BigInt::one().shl(128)
}
pub fn is_full_felt252_range(&self) -> bool {
self.size() >= *CAIRO_PRIME_BIGINT
}
pub fn size(&self) -> BigInt {
&self.upper - &self.lower
}
pub fn intersection(&self, other: &Self) -> Option<Self> {
let lower = std::cmp::max(&self.lower, &other.lower).clone();
let upper = std::cmp::min(&self.upper, &other.upper).clone();
if lower < upper { Some(Self::half_open(lower, upper)) } else { None }
}
}
pub fn fixed_size_array_ty(
context: &dyn SignatureSpecializationContext,
ty: ConcreteTypeId,
size: i16,
) -> Result<ConcreteTypeId, SpecializationError> {
let args: Vec<GenericArg> = chain!(
[GenericArg::UserType(UserTypeId::from_string("Tuple"))],
repeat_n(GenericArg::Type(ty), size.into_or_panic())
)
.collect();
context.get_concrete_type(StructType::id(), &args)
}