#[allow(clippy::all)]
use crate::{C_scalar, C_tensor};
use libc::c_int;
extern "C" {
pub fn atg___and__(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg___and__tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg___iand__(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg___iand__tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg___ilshift__(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg___ilshift__tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg___ior__(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg___ior__tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg___irshift__(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg___irshift__tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg___ixor__(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg___ixor__tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg___lshift__(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg___lshift__scalar_out_(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg___lshift__tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg___lshift__tensor_out_(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg___or__(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg___or__tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg___rshift__(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg___rshift__scalar_out_(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg___rshift__tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg___rshift__tensor_out_(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg___xor__(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg___xor__tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg__adaptive_avg_pool2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg__adaptive_avg_pool2d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__adaptive_avg_pool2d_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__adaptive_avg_pool2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg__adaptive_avg_pool3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg__adaptive_avg_pool3d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__adaptive_avg_pool3d_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__adaptive_avg_pool3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg__add_batch_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
batch_dim_: i64,
level_: i64,
);
pub fn atg__add_relu(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg__add_relu_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg__add_relu_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg__add_relu_scalar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg__add_relu_scalar_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg__add_relu_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg__addmm_activation(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mat1_: *mut C_tensor,
mat2_: *mut C_tensor,
use_gelu_: c_int,
);
pub fn atg__addmm_activation_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mat1_: *mut C_tensor,
mat2_: *mut C_tensor,
use_gelu_: c_int,
);
pub fn atg__aminmax(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__aminmax_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
keepdim_: c_int,
);
pub fn atg__aminmax_dim_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
keepdim_: c_int,
);
pub fn atg__aminmax_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__amp_update_scale(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
growth_tracker_: *mut C_tensor,
found_inf_: *mut C_tensor,
scale_growth_factor_: f64,
scale_backoff_factor_: f64,
growth_interval_: i64,
);
pub fn atg__amp_update_scale_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
growth_tracker_: *mut C_tensor,
found_inf_: *mut C_tensor,
scale_growth_factor_: f64,
scale_backoff_factor_: f64,
growth_interval_: i64,
);
pub fn atg__amp_update_scale_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
growth_tracker_: *mut C_tensor,
found_inf_: *mut C_tensor,
scale_growth_factor_: f64,
scale_backoff_factor_: f64,
growth_interval_: i64,
);
pub fn atg__assert_scalar(
self_scalar_: *mut C_scalar,
assert_msg_ptr: *const u8,
assert_msg_len: c_int,
);
pub fn atg__assert_tensor_metadata(
a_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dtype_: c_int,
);
pub fn atg__autocast_to_full_precision(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
cuda_enabled_: c_int,
cpu_enabled_: c_int,
);
pub fn atg__autocast_to_reduced_precision(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
cuda_enabled_: c_int,
cpu_enabled_: c_int,
cuda_dtype_: c_int,
cpu_dtype_: c_int,
);
pub fn atg__batch_norm_no_update(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
momentum_: f64,
eps_: f64,
);
pub fn atg__batch_norm_no_update_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
out3_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
momentum_: f64,
eps_: f64,
);
pub fn atg__batch_norm_with_update(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
momentum_: f64,
eps_: f64,
);
pub fn atg__batch_norm_with_update_functional(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
momentum_: f64,
eps_: f64,
);
pub fn atg__batch_norm_with_update_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
save_mean_: *mut C_tensor,
save_invstd_: *mut C_tensor,
reserve_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
momentum_: f64,
eps_: f64,
);
pub fn atg__cast_byte(out__: *mut *mut C_tensor, self_: *mut C_tensor, non_blocking_: c_int);
pub fn atg__cast_char(out__: *mut *mut C_tensor, self_: *mut C_tensor, non_blocking_: c_int);
pub fn atg__cast_double(out__: *mut *mut C_tensor, self_: *mut C_tensor, non_blocking_: c_int);
pub fn atg__cast_float(out__: *mut *mut C_tensor, self_: *mut C_tensor, non_blocking_: c_int);
pub fn atg__cast_half(out__: *mut *mut C_tensor, self_: *mut C_tensor, non_blocking_: c_int);
pub fn atg__cast_int(out__: *mut *mut C_tensor, self_: *mut C_tensor, non_blocking_: c_int);
pub fn atg__cast_long(out__: *mut *mut C_tensor, self_: *mut C_tensor, non_blocking_: c_int);
pub fn atg__cast_short(out__: *mut *mut C_tensor, self_: *mut C_tensor, non_blocking_: c_int);
pub fn atg__cdist_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
x1_: *mut C_tensor,
x2_: *mut C_tensor,
p_: f64,
cdist_: *mut C_tensor,
);
pub fn atg__cdist_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_: *mut C_tensor,
x1_: *mut C_tensor,
x2_: *mut C_tensor,
p_: f64,
cdist_: *mut C_tensor,
);
pub fn atg__cholesky_solve_helper(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
A_: *mut C_tensor,
upper_: c_int,
);
pub fn atg__cholesky_solve_helper_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
A_: *mut C_tensor,
upper_: c_int,
);
pub fn atg__chunk_cat(
out__: *mut *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
dim_: i64,
num_chunks_: i64,
);
pub fn atg__chunk_cat_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
dim_: i64,
num_chunks_: i64,
);
pub fn atg__coalesce(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__coalesce_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg__coalesced(out__: *mut *mut C_tensor, self_: *mut C_tensor, coalesced_: c_int);
pub fn atg__coalesced_(out__: *mut *mut C_tensor, self_: *mut C_tensor, coalesced_: c_int);
pub fn atg__coalesced_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
coalesced_: c_int,
);
pub fn atg__compute_linear_combination(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
coefficients_: *mut C_tensor,
);
pub fn atg__compute_linear_combination_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
input_: *mut C_tensor,
coefficients_: *mut C_tensor,
);
pub fn atg__conj(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__conj_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__conj_copy_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg__conj_physical(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__conj_physical_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__conv_depthwise2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
);
pub fn atg__conv_depthwise2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
);
pub fn atg__convert_indices_from_coo_to_csr(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_: i64,
out_int32_: c_int,
);
pub fn atg__convert_indices_from_coo_to_csr_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_: i64,
out_int32_: c_int,
);
pub fn atg__convert_indices_from_csr_to_coo(
out__: *mut *mut C_tensor,
crow_indices_: *mut C_tensor,
col_indices_: *mut C_tensor,
out_int32_: c_int,
transpose_: c_int,
);
pub fn atg__convert_indices_from_csr_to_coo_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
crow_indices_: *mut C_tensor,
col_indices_: *mut C_tensor,
out_int32_: c_int,
transpose_: c_int,
);
pub fn atg__convert_weight_to_int4pack(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
innerKTiles_: i64,
);
pub fn atg__convolution(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
transposed_: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
groups_: i64,
benchmark_: c_int,
deterministic_: c_int,
cudnn_enabled_: c_int,
allow_tf32_: c_int,
);
pub fn atg__convolution_deprecated(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
transposed_: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
groups_: i64,
benchmark_: c_int,
deterministic_: c_int,
cudnn_enabled_: c_int,
);
pub fn atg__convolution_mode(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_ptr: *const u8,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg__convolution_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
transposed_: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
groups_: i64,
benchmark_: c_int,
deterministic_: c_int,
cudnn_enabled_: c_int,
allow_tf32_: c_int,
);
pub fn atg__copy_from(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dst_: *mut C_tensor,
non_blocking_: c_int,
);
pub fn atg__copy_from_and_resize(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dst_: *mut C_tensor,
);
pub fn atg__copy_from_and_resize_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dst_: *mut C_tensor,
);
pub fn atg__copy_from_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dst_: *mut C_tensor,
non_blocking_: c_int,
);
pub fn atg__cslt_compress(out__: *mut *mut C_tensor, input_: *mut C_tensor);
pub fn atg__cslt_sparse_mm(
out__: *mut *mut C_tensor,
compressed_A_: *mut C_tensor,
dense_B_: *mut C_tensor,
bias_: *mut C_tensor,
alpha_: *mut C_tensor,
out_dtype_: c_int,
transpose_result_: c_int,
alg_id_: i64,
);
pub fn atg__cslt_sparse_mm_search(
compressed_A_: *mut C_tensor,
dense_B_: *mut C_tensor,
bias_: *mut C_tensor,
alpha_: *mut C_tensor,
out_dtype_: c_int,
transpose_result_: c_int,
) -> i64;
pub fn atg__ctc_loss(
out__: *mut *mut C_tensor,
log_probs_: *mut C_tensor,
targets_: *mut C_tensor,
input_lengths_data: *const i64,
input_lengths_len: c_int,
target_lengths_data: *const i64,
target_lengths_len: c_int,
blank_: i64,
zero_infinity_: c_int,
);
pub fn atg__ctc_loss_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
log_probs_: *mut C_tensor,
targets_: *mut C_tensor,
input_lengths_data: *const i64,
input_lengths_len: c_int,
target_lengths_data: *const i64,
target_lengths_len: c_int,
neg_log_likelihood_: *mut C_tensor,
log_alpha_: *mut C_tensor,
blank_: i64,
zero_infinity_: c_int,
);
pub fn atg__ctc_loss_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_: *mut C_tensor,
log_probs_: *mut C_tensor,
targets_: *mut C_tensor,
input_lengths_data: *const i64,
input_lengths_len: c_int,
target_lengths_data: *const i64,
target_lengths_len: c_int,
neg_log_likelihood_: *mut C_tensor,
log_alpha_: *mut C_tensor,
blank_: i64,
zero_infinity_: c_int,
);
pub fn atg__ctc_loss_backward_tensor(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
log_probs_: *mut C_tensor,
targets_: *mut C_tensor,
input_lengths_: *mut C_tensor,
target_lengths_: *mut C_tensor,
neg_log_likelihood_: *mut C_tensor,
log_alpha_: *mut C_tensor,
blank_: i64,
zero_infinity_: c_int,
);
pub fn atg__ctc_loss_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
log_probs_: *mut C_tensor,
targets_: *mut C_tensor,
input_lengths_data: *const i64,
input_lengths_len: c_int,
target_lengths_data: *const i64,
target_lengths_len: c_int,
blank_: i64,
zero_infinity_: c_int,
);
pub fn atg__ctc_loss_tensor(
out__: *mut *mut C_tensor,
log_probs_: *mut C_tensor,
targets_: *mut C_tensor,
input_lengths_: *mut C_tensor,
target_lengths_: *mut C_tensor,
blank_: i64,
zero_infinity_: c_int,
);
pub fn atg__ctc_loss_tensor_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
log_probs_: *mut C_tensor,
targets_: *mut C_tensor,
input_lengths_: *mut C_tensor,
target_lengths_: *mut C_tensor,
blank_: i64,
zero_infinity_: c_int,
);
pub fn atg__cudnn_ctc_loss(
out__: *mut *mut C_tensor,
log_probs_: *mut C_tensor,
targets_: *mut C_tensor,
input_lengths_data: *const i64,
input_lengths_len: c_int,
target_lengths_data: *const i64,
target_lengths_len: c_int,
blank_: i64,
deterministic_: c_int,
zero_infinity_: c_int,
);
pub fn atg__cudnn_ctc_loss_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
log_probs_: *mut C_tensor,
targets_: *mut C_tensor,
input_lengths_data: *const i64,
input_lengths_len: c_int,
target_lengths_data: *const i64,
target_lengths_len: c_int,
blank_: i64,
deterministic_: c_int,
zero_infinity_: c_int,
);
pub fn atg__cudnn_ctc_loss_tensor(
out__: *mut *mut C_tensor,
log_probs_: *mut C_tensor,
targets_: *mut C_tensor,
input_lengths_: *mut C_tensor,
target_lengths_: *mut C_tensor,
blank_: i64,
deterministic_: c_int,
zero_infinity_: c_int,
);
pub fn atg__cudnn_init_dropout_state(
out__: *mut *mut C_tensor,
dropout_: f64,
train_: c_int,
dropout_seed_: i64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg__cudnn_init_dropout_state_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
dropout_: f64,
train_: c_int,
dropout_seed_: i64,
);
pub fn atg__cudnn_rnn(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_data: *const *mut C_tensor,
weight_len: c_int,
weight_stride0_: i64,
weight_buf_: *mut C_tensor,
hx_: *mut C_tensor,
cx_: *mut C_tensor,
mode_: i64,
hidden_size_: i64,
proj_size_: i64,
num_layers_: i64,
batch_first_: c_int,
dropout_: f64,
train_: c_int,
bidirectional_: c_int,
batch_sizes_data: *const i64,
batch_sizes_len: c_int,
dropout_state_: *mut C_tensor,
);
pub fn atg__cudnn_rnn_flatten_weight(
out__: *mut *mut C_tensor,
weight_arr_data: *const *mut C_tensor,
weight_arr_len: c_int,
weight_stride0_: i64,
input_size_: i64,
mode_: i64,
hidden_size_: i64,
proj_size_: i64,
num_layers_: i64,
batch_first_: c_int,
bidirectional_: c_int,
);
pub fn atg__cudnn_rnn_flatten_weight_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
weight_arr_data: *const *mut C_tensor,
weight_arr_len: c_int,
weight_stride0_: i64,
input_size_: i64,
mode_: i64,
hidden_size_: i64,
proj_size_: i64,
num_layers_: i64,
batch_first_: c_int,
bidirectional_: c_int,
);
pub fn atg__cudnn_rnn_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
out3_: *mut C_tensor,
out4_: *mut C_tensor,
input_: *mut C_tensor,
weight_data: *const *mut C_tensor,
weight_len: c_int,
weight_stride0_: i64,
weight_buf_: *mut C_tensor,
hx_: *mut C_tensor,
cx_: *mut C_tensor,
mode_: i64,
hidden_size_: i64,
proj_size_: i64,
num_layers_: i64,
batch_first_: c_int,
dropout_: f64,
train_: c_int,
bidirectional_: c_int,
batch_sizes_data: *const i64,
batch_sizes_len: c_int,
dropout_state_: *mut C_tensor,
);
pub fn atg__debug_has_internal_overlap(self_: *mut C_tensor) -> i64;
pub fn atg__dim_arange(out__: *mut *mut C_tensor, like_: *mut C_tensor, dim_: i64);
pub fn atg__dimi(self_: *mut C_tensor) -> i64;
pub fn atg__dimv(self_: *mut C_tensor) -> i64;
pub fn atg__dirichlet_grad(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
alpha_: *mut C_tensor,
total_: *mut C_tensor,
);
pub fn atg__dirichlet_grad_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
alpha_: *mut C_tensor,
total_: *mut C_tensor,
);
pub fn atg__efficient_attention_backward(
out__: *mut *mut C_tensor,
grad_out__: *mut C_tensor,
query_: *mut C_tensor,
key_: *mut C_tensor,
value_: *mut C_tensor,
bias_: *mut C_tensor,
out_: *mut C_tensor,
cu_seqlens_q_: *mut C_tensor,
cu_seqlens_k_: *mut C_tensor,
max_seqlen_q_: i64,
max_seqlen_k_: i64,
logsumexp_: *mut C_tensor,
dropout_p_: f64,
philox_seed_: *mut C_tensor,
philox_offset_: *mut C_tensor,
custom_mask_type_: i64,
bias_requires_grad_: c_int,
scale_v: f64,
scale_null: i8,
num_splits_key_v: i64,
num_splits_key_null: i8,
window_size_v: i64,
window_size_null: i8,
shared_storage_dqdkdv_: c_int,
);
pub fn atg__efficientzerotensor(
out__: *mut *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg__efficientzerotensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg__embedding_bag(
out__: *mut *mut C_tensor,
weight_: *mut C_tensor,
indices_: *mut C_tensor,
offsets_: *mut C_tensor,
scale_grad_by_freq_: c_int,
mode_: i64,
sparse_: c_int,
per_sample_weights_: *mut C_tensor,
include_last_offset_: c_int,
padding_idx_: i64,
);
pub fn atg__embedding_bag_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
indices_: *mut C_tensor,
offsets_: *mut C_tensor,
offset2bag_: *mut C_tensor,
bag_size_: *mut C_tensor,
maximum_indices_: *mut C_tensor,
num_weights_: i64,
scale_grad_by_freq_: c_int,
mode_: i64,
sparse_: c_int,
per_sample_weights_: *mut C_tensor,
padding_idx_: i64,
);
pub fn atg__embedding_bag_dense_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
indices_: *mut C_tensor,
offset2bag_: *mut C_tensor,
bag_size_: *mut C_tensor,
maximum_indices_: *mut C_tensor,
num_weights_: i64,
scale_grad_by_freq_: c_int,
mode_: i64,
per_sample_weights_: *mut C_tensor,
padding_idx_: i64,
);
pub fn atg__embedding_bag_dense_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_: *mut C_tensor,
indices_: *mut C_tensor,
offset2bag_: *mut C_tensor,
bag_size_: *mut C_tensor,
maximum_indices_: *mut C_tensor,
num_weights_: i64,
scale_grad_by_freq_: c_int,
mode_: i64,
per_sample_weights_: *mut C_tensor,
padding_idx_: i64,
);
pub fn atg__embedding_bag_forward_only(
out__: *mut *mut C_tensor,
weight_: *mut C_tensor,
indices_: *mut C_tensor,
offsets_: *mut C_tensor,
scale_grad_by_freq_: c_int,
mode_: i64,
sparse_: c_int,
per_sample_weights_: *mut C_tensor,
include_last_offset_: c_int,
padding_idx_: i64,
);
pub fn atg__embedding_bag_forward_only_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
out3_: *mut C_tensor,
weight_: *mut C_tensor,
indices_: *mut C_tensor,
offsets_: *mut C_tensor,
scale_grad_by_freq_: c_int,
mode_: i64,
sparse_: c_int,
per_sample_weights_: *mut C_tensor,
include_last_offset_: c_int,
padding_idx_: i64,
);
pub fn atg__embedding_bag_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
out3_: *mut C_tensor,
weight_: *mut C_tensor,
indices_: *mut C_tensor,
offsets_: *mut C_tensor,
scale_grad_by_freq_: c_int,
mode_: i64,
sparse_: c_int,
per_sample_weights_: *mut C_tensor,
include_last_offset_: c_int,
padding_idx_: i64,
);
pub fn atg__embedding_bag_per_sample_weights_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
weight_: *mut C_tensor,
indices_: *mut C_tensor,
offsets_: *mut C_tensor,
offset2bag_: *mut C_tensor,
mode_: i64,
padding_idx_: i64,
);
pub fn atg__embedding_bag_per_sample_weights_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_: *mut C_tensor,
weight_: *mut C_tensor,
indices_: *mut C_tensor,
offsets_: *mut C_tensor,
offset2bag_: *mut C_tensor,
mode_: i64,
padding_idx_: i64,
);
pub fn atg__embedding_bag_sparse_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
indices_: *mut C_tensor,
offsets_: *mut C_tensor,
offset2bag_: *mut C_tensor,
bag_size_: *mut C_tensor,
num_weights_: i64,
scale_grad_by_freq_: c_int,
mode_: i64,
per_sample_weights_: *mut C_tensor,
padding_idx_: i64,
);
pub fn atg__empty_affine_quantized(
out__: *mut *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
scale_: f64,
zero_point_: i64,
);
pub fn atg__empty_affine_quantized_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
scale_: f64,
zero_point_: i64,
);
pub fn atg__empty_per_channel_affine_quantized(
out__: *mut *mut C_tensor,
size_data: *const i64,
size_len: c_int,
scales_: *mut C_tensor,
zero_points_: *mut C_tensor,
axis_: i64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg__empty_per_channel_affine_quantized_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
scales_: *mut C_tensor,
zero_points_: *mut C_tensor,
axis_: i64,
);
pub fn atg__euclidean_dist(out__: *mut *mut C_tensor, x1_: *mut C_tensor, x2_: *mut C_tensor);
pub fn atg__euclidean_dist_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x1_: *mut C_tensor,
x2_: *mut C_tensor,
);
pub fn atg__fake_quantize_learnable_per_channel_affine(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
axis_: i64,
quant_min_: i64,
quant_max_: i64,
grad_factor_: f64,
);
pub fn atg__fake_quantize_learnable_per_channel_affine_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
self_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
axis_: i64,
quant_min_: i64,
quant_max_: i64,
grad_factor_: f64,
);
pub fn atg__fake_quantize_learnable_per_channel_affine_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
axis_: i64,
quant_min_: i64,
quant_max_: i64,
grad_factor_: f64,
);
pub fn atg__fake_quantize_learnable_per_tensor_affine(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
quant_min_: i64,
quant_max_: i64,
grad_factor_: f64,
);
pub fn atg__fake_quantize_learnable_per_tensor_affine_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
self_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
quant_min_: i64,
quant_max_: i64,
grad_factor_: f64,
);
pub fn atg__fake_quantize_learnable_per_tensor_affine_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
quant_min_: i64,
quant_max_: i64,
grad_factor_: f64,
);
pub fn atg__fake_quantize_per_tensor_affine_cachemask_tensor_qparams(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
fake_quant_enabled_: *mut C_tensor,
quant_min_: i64,
quant_max_: i64,
);
pub fn atg__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
self_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
fake_quant_enabled_: *mut C_tensor,
quant_min_: i64,
quant_max_: i64,
);
pub fn atg__fft_c2c(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
normalization_: i64,
forward_: c_int,
);
pub fn atg__fft_c2c_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
normalization_: i64,
forward_: c_int,
);
pub fn atg__fft_c2r(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
normalization_: i64,
last_dim_size_: i64,
);
pub fn atg__fft_c2r_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
normalization_: i64,
last_dim_size_: i64,
);
pub fn atg__fft_r2c(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
normalization_: i64,
onesided_: c_int,
);
pub fn atg__fft_r2c_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
normalization_: i64,
onesided_: c_int,
);
pub fn atg__fill_mem_eff_dropout_mask_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dropout_p_: f64,
seed_: i64,
offset_: i64,
);
pub fn atg__flash_attention_backward(
out__: *mut *mut C_tensor,
grad_out_: *mut C_tensor,
query_: *mut C_tensor,
key_: *mut C_tensor,
value_: *mut C_tensor,
out_: *mut C_tensor,
logsumexp_: *mut C_tensor,
cum_seq_q_: *mut C_tensor,
cum_seq_k_: *mut C_tensor,
max_q_: i64,
max_k_: i64,
dropout_p_: f64,
is_causal_: c_int,
philox_seed_: *mut C_tensor,
philox_offset_: *mut C_tensor,
scale_v: f64,
scale_null: i8,
window_size_left_v: i64,
window_size_left_null: i8,
window_size_right_v: i64,
window_size_right_null: i8,
);
pub fn atg__foobar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
arg1_: c_int,
arg2_: c_int,
arg3_: c_int,
);
pub fn atg__foobar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
arg1_: c_int,
arg2_: c_int,
arg3_: c_int,
);
pub fn atg__functional_assert_async(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
assert_msg_ptr: *const u8,
assert_msg_len: c_int,
dep_token_: *mut C_tensor,
);
pub fn atg__functional_assert_scalar(
out__: *mut *mut C_tensor,
self_scalar_: *mut C_scalar,
assert_msg_ptr: *const u8,
assert_msg_len: c_int,
dep_token_: *mut C_tensor,
);
pub fn atg__functional_sym_constrain_range(
out__: *mut *mut C_tensor,
size_: *mut C_scalar,
min_v: i64,
min_null: i8,
max_v: i64,
max_null: i8,
dep_token_: *mut C_tensor,
);
pub fn atg__functional_sym_constrain_range_for_size(
out__: *mut *mut C_tensor,
size_: *mut C_scalar,
min_v: i64,
min_null: i8,
max_v: i64,
max_null: i8,
dep_token_: *mut C_tensor,
);
pub fn atg__fused_dropout(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: f64);
pub fn atg__fused_dropout_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
self_: *mut C_tensor,
p_: f64,
);
pub fn atg__fused_moving_avg_obs_fq_helper(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
observer_on_: *mut C_tensor,
fake_quant_on_: *mut C_tensor,
running_min_: *mut C_tensor,
running_max_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
averaging_const_: f64,
quant_min_: i64,
quant_max_: i64,
ch_axis_: i64,
per_row_fake_quant_: c_int,
symmetric_quant_: c_int,
);
pub fn atg__fused_moving_avg_obs_fq_helper_functional(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
observer_on_: *mut C_tensor,
fake_quant_on_: *mut C_tensor,
running_min_: *mut C_tensor,
running_max_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
averaging_const_: f64,
quant_min_: i64,
quant_max_: i64,
ch_axis_: i64,
per_row_fake_quant_: c_int,
symmetric_quant_: c_int,
);
pub fn atg__fused_moving_avg_obs_fq_helper_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
self_: *mut C_tensor,
observer_on_: *mut C_tensor,
fake_quant_on_: *mut C_tensor,
running_min_: *mut C_tensor,
running_max_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
averaging_const_: f64,
quant_min_: i64,
quant_max_: i64,
ch_axis_: i64,
per_row_fake_quant_: c_int,
symmetric_quant_: c_int,
);
pub fn atg__fused_sdp_choice(
query_: *mut C_tensor,
key_: *mut C_tensor,
value_: *mut C_tensor,
attn_mask_: *mut C_tensor,
dropout_p_: f64,
is_causal_: c_int,
scale_v: f64,
scale_null: i8,
enable_gqa_: c_int,
) -> i64;
pub fn atg__fw_primal(out__: *mut *mut C_tensor, self_: *mut C_tensor, level_: i64);
pub fn atg__fw_primal_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor, level_: i64);
pub fn atg__fw_primal_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
level_: i64,
);
pub fn atg__gather_sparse_backward(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
grad_: *mut C_tensor,
);
pub fn atg__grid_sampler_2d_cpu_fallback(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
grid_: *mut C_tensor,
interpolation_mode_: i64,
padding_mode_: i64,
align_corners_: c_int,
);
pub fn atg__grid_sampler_2d_cpu_fallback_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
input_: *mut C_tensor,
grid_: *mut C_tensor,
interpolation_mode_: i64,
padding_mode_: i64,
align_corners_: c_int,
);
pub fn atg__grid_sampler_2d_cpu_fallback_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
input_: *mut C_tensor,
grid_: *mut C_tensor,
interpolation_mode_: i64,
padding_mode_: i64,
align_corners_: c_int,
);
pub fn atg__has_compatible_shallow_copy_type(
self_: *mut C_tensor,
from_: *mut C_tensor,
) -> c_int;
pub fn atg__has_same_storage_numel(self_: *mut C_tensor, other_: *mut C_tensor) -> c_int;
pub fn atg__histogramdd_bin_edges(
self_: *mut C_tensor,
bins_data: *const i64,
bins_len: c_int,
range_data: *const f64,
range_len: c_int,
weight_: *mut C_tensor,
density_: c_int,
) -> *mut *mut C_tensor;
pub fn atg__histogramdd_bin_edges_out(
out_data: *const *mut C_tensor,
out_len: c_int,
self_: *mut C_tensor,
bins_data: *const i64,
bins_len: c_int,
range_data: *const f64,
range_len: c_int,
weight_: *mut C_tensor,
density_: c_int,
);
pub fn atg__histogramdd_from_bin_cts(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
bins_data: *const i64,
bins_len: c_int,
range_data: *const f64,
range_len: c_int,
weight_: *mut C_tensor,
density_: c_int,
);
pub fn atg__histogramdd_from_bin_cts_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
bins_data: *const i64,
bins_len: c_int,
range_data: *const f64,
range_len: c_int,
weight_: *mut C_tensor,
density_: c_int,
);
pub fn atg__histogramdd_from_bin_tensors(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
bins_data: *const *mut C_tensor,
bins_len: c_int,
weight_: *mut C_tensor,
density_: c_int,
);
pub fn atg__histogramdd_from_bin_tensors_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
bins_data: *const *mut C_tensor,
bins_len: c_int,
weight_: *mut C_tensor,
density_: c_int,
);
pub fn atg__index_put_impl(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
indices_data: *const *mut C_tensor,
indices_len: c_int,
values_: *mut C_tensor,
accumulate_: c_int,
unsafe_: c_int,
);
pub fn atg__index_put_impl_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
indices_data: *const *mut C_tensor,
indices_len: c_int,
values_: *mut C_tensor,
accumulate_: c_int,
unsafe_: c_int,
);
pub fn atg__index_put_impl_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
indices_data: *const *mut C_tensor,
indices_len: c_int,
values_: *mut C_tensor,
accumulate_: c_int,
unsafe_: c_int,
);
pub fn atg__indices(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__indices_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__indices_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__int_mm(out__: *mut *mut C_tensor, self_: *mut C_tensor, mat2_: *mut C_tensor);
pub fn atg__int_mm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mat2_: *mut C_tensor,
);
pub fn atg__is_all_true(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__is_any_true(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__is_zerotensor(self_: *mut C_tensor) -> c_int;
pub fn atg__lazy_clone(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__linalg_check_errors(
info_: *mut C_tensor,
api_name_ptr: *const u8,
api_name_len: c_int,
is_matrix_: c_int,
);
pub fn atg__linalg_det(out__: *mut *mut C_tensor, A_: *mut C_tensor);
pub fn atg__linalg_det_result(
out__: *mut *mut C_tensor,
result_: *mut C_tensor,
LU_: *mut C_tensor,
pivots_: *mut C_tensor,
A_: *mut C_tensor,
);
pub fn atg__linalg_eigh(
out__: *mut *mut C_tensor,
A_: *mut C_tensor,
UPLO_ptr: *const u8,
UPLO_len: c_int,
compute_v_: c_int,
);
pub fn atg__linalg_eigh_eigenvalues(
out__: *mut *mut C_tensor,
eigenvalues_: *mut C_tensor,
eigenvectors_: *mut C_tensor,
A_: *mut C_tensor,
UPLO_ptr: *const u8,
UPLO_len: c_int,
compute_v_: c_int,
);
pub fn atg__linalg_eigvals(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__linalg_slogdet(out__: *mut *mut C_tensor, A_: *mut C_tensor);
pub fn atg__linalg_slogdet_sign(
out__: *mut *mut C_tensor,
sign_: *mut C_tensor,
logabsdet_: *mut C_tensor,
LU_: *mut C_tensor,
pivots_: *mut C_tensor,
A_: *mut C_tensor,
);
pub fn atg__linalg_solve_ex(
out__: *mut *mut C_tensor,
A_: *mut C_tensor,
B_: *mut C_tensor,
left_: c_int,
check_errors_: c_int,
);
pub fn atg__linalg_solve_ex_result(
out__: *mut *mut C_tensor,
result_: *mut C_tensor,
LU_: *mut C_tensor,
pivots_: *mut C_tensor,
info_: *mut C_tensor,
A_: *mut C_tensor,
B_: *mut C_tensor,
left_: c_int,
check_errors_: c_int,
);
pub fn atg__linalg_svd(
out__: *mut *mut C_tensor,
A_: *mut C_tensor,
full_matrices_: c_int,
compute_uv_: c_int,
driver_ptr: *const u8,
driver_len: c_int,
);
pub fn atg__linalg_svd_u(
out__: *mut *mut C_tensor,
U_: *mut C_tensor,
S_: *mut C_tensor,
Vh_: *mut C_tensor,
A_: *mut C_tensor,
full_matrices_: c_int,
compute_uv_: c_int,
driver_ptr: *const u8,
driver_len: c_int,
);
pub fn atg__log_softmax(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
half_to_float_: c_int,
);
pub fn atg__log_softmax_backward_data(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
dim_: i64,
input_dtype_: c_int,
);
pub fn atg__log_softmax_backward_data_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
dim_: i64,
input_dtype_: c_int,
);
pub fn atg__log_softmax_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
half_to_float_: c_int,
);
pub fn atg__logcumsumexp(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
pub fn atg__logcumsumexp_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
);
pub fn atg__lstm_mps(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
hx_data: *const *mut C_tensor,
hx_len: c_int,
params_data: *const *mut C_tensor,
params_len: c_int,
has_biases_: c_int,
num_layers_: i64,
dropout_: f64,
train_: c_int,
bidirectional_: c_int,
batch_first_: c_int,
);
pub fn atg__lstm_mps_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
out3_: *mut C_tensor,
out4_: *mut C_tensor,
out5_: *mut C_tensor,
input_: *mut C_tensor,
hx_data: *const *mut C_tensor,
hx_len: c_int,
params_data: *const *mut C_tensor,
params_len: c_int,
has_biases_: c_int,
num_layers_: i64,
dropout_: f64,
train_: c_int,
bidirectional_: c_int,
batch_first_: c_int,
);
pub fn atg__lu_with_info(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
pivot_: c_int,
check_errors_: c_int,
);
pub fn atg__make_dep_token(
out__: *mut *mut C_tensor,
options_kind: c_int,
options_device: c_int,
);
pub fn atg__make_dual(
out__: *mut *mut C_tensor,
primal_: *mut C_tensor,
tangent_: *mut C_tensor,
level_: i64,
);
pub fn atg__make_dual_copy(
out__: *mut *mut C_tensor,
primal_: *mut C_tensor,
tangent_: *mut C_tensor,
level_: i64,
);
pub fn atg__make_dual_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
primal_: *mut C_tensor,
tangent_: *mut C_tensor,
level_: i64,
);
pub fn atg__make_per_channel_quantized_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
axis_: i64,
);
pub fn atg__make_per_channel_quantized_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
axis_: i64,
);
pub fn atg__make_per_tensor_quantized_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
scale_: f64,
zero_point_: i64,
);
pub fn atg__make_per_tensor_quantized_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
scale_: f64,
zero_point_: i64,
);
pub fn atg__masked_scale(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
scale_: f64,
);
pub fn atg__masked_scale_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
scale_: f64,
);
pub fn atg__masked_softmax(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
mask_type_v: i64,
mask_type_null: i8,
);
pub fn atg__masked_softmax_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
mask_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
);
pub fn atg__masked_softmax_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
mask_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
);
pub fn atg__masked_softmax_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
mask_type_v: i64,
mask_type_null: i8,
);
pub fn atg__mixed_dtypes_linear(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
scale_: *mut C_tensor,
bias_: *mut C_tensor,
activation_ptr: *const u8,
activation_len: c_int,
);
pub fn atg__mkldnn_reshape(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
shape_data: *const i64,
shape_len: c_int,
);
pub fn atg__mkldnn_reshape_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
shape_data: *const i64,
shape_len: c_int,
);
pub fn atg__mkldnn_transpose(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim0_: i64,
dim1_: i64,
);
pub fn atg__mkldnn_transpose_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim0_: i64,
dim1_: i64,
);
pub fn atg__mkldnn_transpose_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim0_: i64,
dim1_: i64,
);
pub fn atg__mps_convolution(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg__mps_convolution_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg__mps_convolution_transpose(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg__mps_convolution_transpose_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg__native_batch_norm_legit(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
training_: c_int,
momentum_: f64,
eps_: f64,
);
pub fn atg__native_batch_norm_legit_functional(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
training_: c_int,
momentum_: f64,
eps_: f64,
);
pub fn atg__native_batch_norm_legit_no_stats(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
training_: c_int,
momentum_: f64,
eps_: f64,
);
pub fn atg__native_batch_norm_legit_no_stats_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
save_mean_: *mut C_tensor,
save_invstd_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
training_: c_int,
momentum_: f64,
eps_: f64,
);
pub fn atg__native_batch_norm_legit_no_training(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
momentum_: f64,
eps_: f64,
);
pub fn atg__native_batch_norm_legit_no_training_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
momentum_: f64,
eps_: f64,
);
pub fn atg__native_batch_norm_legit_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
save_mean_: *mut C_tensor,
save_invstd_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
training_: c_int,
momentum_: f64,
eps_: f64,
);
pub fn atg__native_multi_head_attention(
out__: *mut *mut C_tensor,
query_: *mut C_tensor,
key_: *mut C_tensor,
value_: *mut C_tensor,
embed_dim_: i64,
num_head_: i64,
qkv_weight_: *mut C_tensor,
qkv_bias_: *mut C_tensor,
proj_weight_: *mut C_tensor,
proj_bias_: *mut C_tensor,
mask_: *mut C_tensor,
need_weights_: c_int,
average_attn_weights_: c_int,
mask_type_v: i64,
mask_type_null: i8,
);
pub fn atg__native_multi_head_attention_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
query_: *mut C_tensor,
key_: *mut C_tensor,
value_: *mut C_tensor,
embed_dim_: i64,
num_head_: i64,
qkv_weight_: *mut C_tensor,
qkv_bias_: *mut C_tensor,
proj_weight_: *mut C_tensor,
proj_bias_: *mut C_tensor,
mask_: *mut C_tensor,
need_weights_: c_int,
average_attn_weights_: c_int,
mask_type_v: i64,
mask_type_null: i8,
);
pub fn atg__neg_view(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__neg_view_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__neg_view_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__nested_compute_contiguous_strides_offsets(
out__: *mut *mut C_tensor,
nested_size_: *mut C_tensor,
);
pub fn atg__nested_from_padded(
out__: *mut *mut C_tensor,
padded_: *mut C_tensor,
cpu_nested_shape_example_: *mut C_tensor,
fuse_transform_0213_: c_int,
);
pub fn atg__nested_from_padded_and_nested_example(
out__: *mut *mut C_tensor,
padded_: *mut C_tensor,
nt_example_: *mut C_tensor,
);
pub fn atg__nested_from_padded_and_nested_example_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
padded_: *mut C_tensor,
nt_example_: *mut C_tensor,
);
pub fn atg__nested_from_padded_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
padded_: *mut C_tensor,
cpu_nested_shape_example_: *mut C_tensor,
fuse_transform_0213_: c_int,
);
pub fn atg__nested_get_jagged_dummy(out__: *mut *mut C_tensor, any_: *mut C_tensor);
pub fn atg__nested_get_lengths(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__nested_get_max_seqlen(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__nested_get_min_seqlen(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__nested_get_offsets(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__nested_get_ragged_idx(self_: *mut C_tensor) -> i64;
pub fn atg__nested_get_values(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__nested_get_values_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__nested_get_values_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__nested_select_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: i64,
);
pub fn atg__nested_sum_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg__nested_view_from_buffer(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
nested_size_: *mut C_tensor,
nested_strides_: *mut C_tensor,
offsets_: *mut C_tensor,
);
pub fn atg__nested_view_from_buffer_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
nested_size_: *mut C_tensor,
nested_strides_: *mut C_tensor,
offsets_: *mut C_tensor,
);
pub fn atg__nested_view_from_buffer_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
nested_size_: *mut C_tensor,
nested_strides_: *mut C_tensor,
offsets_: *mut C_tensor,
);
pub fn atg__nested_view_from_jagged(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
offsets_: *mut C_tensor,
dummy_: *mut C_tensor,
lengths_: *mut C_tensor,
ragged_idx_: i64,
min_seqlen_: *mut C_tensor,
max_seqlen_: *mut C_tensor,
);
pub fn atg__nested_view_from_jagged_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
offsets_: *mut C_tensor,
dummy_: *mut C_tensor,
lengths_: *mut C_tensor,
ragged_idx_: i64,
min_seqlen_: *mut C_tensor,
max_seqlen_: *mut C_tensor,
);
pub fn atg__nested_view_from_jagged_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
offsets_: *mut C_tensor,
dummy_: *mut C_tensor,
lengths_: *mut C_tensor,
ragged_idx_: i64,
min_seqlen_: *mut C_tensor,
max_seqlen_: *mut C_tensor,
);
pub fn atg__new_zeros_with_same_feature_meta(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
self_num_batch_dims_: i64,
);
pub fn atg__new_zeros_with_same_feature_meta_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
self_num_batch_dims_: i64,
);
pub fn atg__nnpack_available() -> c_int;
pub fn atg__nnpack_spatial_convolution(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
);
pub fn atg__nnpack_spatial_convolution_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
);
pub fn atg__nnz(self_: *mut C_tensor) -> i64;
pub fn atg__pack_padded_sequence(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
lengths_: *mut C_tensor,
batch_first_: c_int,
);
pub fn atg__pack_padded_sequence_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
input_size_data: *const i64,
input_size_len: c_int,
batch_sizes_: *mut C_tensor,
batch_first_: c_int,
);
pub fn atg__pack_padded_sequence_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
input_: *mut C_tensor,
lengths_: *mut C_tensor,
batch_first_: c_int,
);
pub fn atg__pad_circular(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
pad_data: *const i64,
pad_len: c_int,
);
pub fn atg__pad_enum(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
pad_data: *const i64,
pad_len: c_int,
mode_: i64,
value_v: f64,
value_null: i8,
);
pub fn atg__pad_packed_sequence(
out__: *mut *mut C_tensor,
data_: *mut C_tensor,
batch_sizes_: *mut C_tensor,
batch_first_: c_int,
padding_value_: *mut C_scalar,
total_length_: i64,
);
pub fn atg__pdist_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
self_: *mut C_tensor,
p_: f64,
pdist_: *mut C_tensor,
);
pub fn atg__pdist_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_: *mut C_tensor,
self_: *mut C_tensor,
p_: f64,
pdist_: *mut C_tensor,
);
pub fn atg__pin_memory(out__: *mut *mut C_tensor, self_: *mut C_tensor, device_: c_int);
pub fn atg__pin_memory_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
device_: c_int,
);
pub fn atg__prelu_kernel(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
);
pub fn atg__prelu_kernel_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
);
pub fn atg__print(s_ptr: *const u8, s_len: c_int);
pub fn atg__propagate_xla_data(input_: *mut C_tensor, output_: *mut C_tensor);
pub fn atg__remove_batch_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
level_: i64,
batch_size_: i64,
out_dim_: i64,
);
pub fn atg__reshape_alias(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
);
pub fn atg__reshape_alias_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
);
pub fn atg__reshape_alias_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
);
pub fn atg__reshape_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg__reshape_from_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
shape_: *mut C_tensor,
);
pub fn atg__resize_output(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
device_: c_int,
);
pub fn atg__resize_output_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
device_: c_int,
);
pub fn atg__resize_output_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
device_: c_int,
);
pub fn atg__rowwise_prune(
out__: *mut *mut C_tensor,
weight_: *mut C_tensor,
mask_: *mut C_tensor,
compressed_indices_dtype_: c_int,
);
pub fn atg__safe_softmax(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
dtype_: c_int,
);
pub fn atg__sample_dirichlet(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__sample_dirichlet_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__saturate_weight_to_fp16(out__: *mut *mut C_tensor, weight_: *mut C_tensor);
pub fn atg__scaled_dot_product_attention_math(
out__: *mut *mut C_tensor,
query_: *mut C_tensor,
key_: *mut C_tensor,
value_: *mut C_tensor,
attn_mask_: *mut C_tensor,
dropout_p_: f64,
is_causal_: c_int,
dropout_mask_: *mut C_tensor,
scale_v: f64,
scale_null: i8,
enable_gqa_: c_int,
);
pub fn atg__scaled_dot_product_attention_math_for_mps(
out__: *mut *mut C_tensor,
query_: *mut C_tensor,
key_: *mut C_tensor,
value_: *mut C_tensor,
attn_mask_: *mut C_tensor,
dropout_p_: f64,
is_causal_: c_int,
dropout_mask_: *mut C_tensor,
scale_v: f64,
scale_null: i8,
);
pub fn atg__scaled_dot_product_cudnn_attention_backward(
out__: *mut *mut C_tensor,
grad_out_: *mut C_tensor,
query_: *mut C_tensor,
key_: *mut C_tensor,
value_: *mut C_tensor,
out_: *mut C_tensor,
logsumexp_: *mut C_tensor,
philox_seed_: *mut C_tensor,
philox_offset_: *mut C_tensor,
attn_bias_: *mut C_tensor,
cum_seq_q_: *mut C_tensor,
cum_seq_k_: *mut C_tensor,
max_q_: i64,
max_k_: i64,
dropout_p_: f64,
is_causal_: c_int,
scale_v: f64,
scale_null: i8,
);
pub fn atg__scaled_dot_product_efficient_attention(
out__: *mut *mut C_tensor,
query_: *mut C_tensor,
key_: *mut C_tensor,
value_: *mut C_tensor,
attn_bias_: *mut C_tensor,
compute_log_sumexp_: c_int,
dropout_p_: f64,
is_causal_: c_int,
scale_v: f64,
scale_null: i8,
);
pub fn atg__scaled_dot_product_flash_attention_backward(
out__: *mut *mut C_tensor,
grad_out_: *mut C_tensor,
query_: *mut C_tensor,
key_: *mut C_tensor,
value_: *mut C_tensor,
out_: *mut C_tensor,
logsumexp_: *mut C_tensor,
cum_seq_q_: *mut C_tensor,
cum_seq_k_: *mut C_tensor,
max_q_: i64,
max_k_: i64,
dropout_p_: f64,
is_causal_: c_int,
philox_seed_: *mut C_tensor,
philox_offset_: *mut C_tensor,
scale_v: f64,
scale_null: i8,
);
pub fn atg__scaled_dot_product_flash_attention_for_cpu(
out__: *mut *mut C_tensor,
query_: *mut C_tensor,
key_: *mut C_tensor,
value_: *mut C_tensor,
dropout_p_: f64,
is_causal_: c_int,
attn_mask_: *mut C_tensor,
scale_v: f64,
scale_null: i8,
);
pub fn atg__scaled_dot_product_flash_attention_for_cpu_backward(
out__: *mut *mut C_tensor,
grad_out_: *mut C_tensor,
query_: *mut C_tensor,
key_: *mut C_tensor,
value_: *mut C_tensor,
out_: *mut C_tensor,
logsumexp_: *mut C_tensor,
dropout_p_: f64,
is_causal_: c_int,
attn_mask_: *mut C_tensor,
scale_v: f64,
scale_null: i8,
);
pub fn atg__scaled_mm(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mat2_: *mut C_tensor,
scale_a_: *mut C_tensor,
scale_b_: *mut C_tensor,
bias_: *mut C_tensor,
scale_result_: *mut C_tensor,
out_dtype_: c_int,
use_fast_accum_: c_int,
);
pub fn atg__scaled_mm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mat2_: *mut C_tensor,
scale_a_: *mut C_tensor,
scale_b_: *mut C_tensor,
bias_: *mut C_tensor,
scale_result_: *mut C_tensor,
out_dtype_: c_int,
use_fast_accum_: c_int,
);
pub fn atg__scatter_reduce(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
src_: *mut C_tensor,
reduce_ptr: *const u8,
reduce_len: c_int,
include_self_: c_int,
);
pub fn atg__scatter_reduce_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
src_: *mut C_tensor,
reduce_ptr: *const u8,
reduce_len: c_int,
include_self_: c_int,
);
pub fn atg__scatter_reduce_two_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
src_: *mut C_tensor,
reduce_ptr: *const u8,
reduce_len: c_int,
include_self_: c_int,
);
pub fn atg__segment_reduce_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
output_: *mut C_tensor,
data_: *mut C_tensor,
reduce_ptr: *const u8,
reduce_len: c_int,
lengths_: *mut C_tensor,
offsets_: *mut C_tensor,
axis_: i64,
initial_: *mut C_scalar,
);
pub fn atg__segment_reduce_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_: *mut C_tensor,
output_: *mut C_tensor,
data_: *mut C_tensor,
reduce_ptr: *const u8,
reduce_len: c_int,
lengths_: *mut C_tensor,
offsets_: *mut C_tensor,
axis_: i64,
initial_: *mut C_scalar,
);
pub fn atg__shape_as_tensor(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__slow_conv2d_backward(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_weight_: *mut C_tensor,
grad_bias_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg__sobol_engine_draw(
out__: *mut *mut C_tensor,
quasi_: *mut C_tensor,
n_: i64,
sobolstate_: *mut C_tensor,
dimension_: i64,
num_generated_: i64,
dtype_: c_int,
);
pub fn atg__sobol_engine_ff_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
n_: i64,
sobolstate_: *mut C_tensor,
dimension_: i64,
num_generated_: i64,
);
pub fn atg__sobol_engine_initialize_state_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dimension_: i64,
);
pub fn atg__sobol_engine_scramble_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
ltm_: *mut C_tensor,
dimension_: i64,
);
pub fn atg__softmax(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
half_to_float_: c_int,
);
pub fn atg__softmax_backward_data(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
dim_: i64,
input_dtype_: c_int,
);
pub fn atg__softmax_backward_data_out(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
dim_: i64,
input_dtype_: c_int,
);
pub fn atg__softmax_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
half_to_float_: c_int,
);
pub fn atg__sparse_addmm(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mat1_: *mut C_tensor,
mat2_: *mut C_tensor,
);
pub fn atg__sparse_addmm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mat1_: *mut C_tensor,
mat2_: *mut C_tensor,
);
pub fn atg__sparse_broadcast_to(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg__sparse_broadcast_to_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg__sparse_broadcast_to_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg__sparse_bsc_tensor_unsafe(
out__: *mut *mut C_tensor,
ccol_indices_: *mut C_tensor,
row_indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg__sparse_bsr_tensor_unsafe(
out__: *mut *mut C_tensor,
crow_indices_: *mut C_tensor,
col_indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg__sparse_compressed_tensor_unsafe(
out__: *mut *mut C_tensor,
compressed_indices_: *mut C_tensor,
plain_indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg__sparse_compressed_tensor_with_dims(
out__: *mut *mut C_tensor,
nnz_: i64,
dense_dim_: i64,
size_data: *const i64,
size_len: c_int,
blocksize_data: *const i64,
blocksize_len: c_int,
index_dtype_: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg__sparse_coo_tensor_unsafe(
out__: *mut *mut C_tensor,
indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
is_coalesced_: c_int,
);
pub fn atg__sparse_coo_tensor_with_dims(
out__: *mut *mut C_tensor,
sparse_dim_: i64,
dense_dim_: i64,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg__sparse_coo_tensor_with_dims_and_tensors(
out__: *mut *mut C_tensor,
sparse_dim_: i64,
dense_dim_: i64,
size_data: *const i64,
size_len: c_int,
indices_: *mut C_tensor,
values_: *mut C_tensor,
options_kind: c_int,
options_device: c_int,
is_coalesced_: c_int,
);
pub fn atg__sparse_coo_tensor_with_dims_and_tensors_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
sparse_dim_: i64,
dense_dim_: i64,
size_data: *const i64,
size_len: c_int,
indices_: *mut C_tensor,
values_: *mut C_tensor,
is_coalesced_: c_int,
);
pub fn atg__sparse_coo_tensor_with_dims_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
sparse_dim_: i64,
dense_dim_: i64,
size_data: *const i64,
size_len: c_int,
);
pub fn atg__sparse_csc_tensor_unsafe(
out__: *mut *mut C_tensor,
ccol_indices_: *mut C_tensor,
row_indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg__sparse_csr_prod(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg__sparse_csr_prod_dim_dtype_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg__sparse_csr_sum(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg__sparse_csr_sum_dim_dtype_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg__sparse_csr_tensor_unsafe(
out__: *mut *mut C_tensor,
crow_indices_: *mut C_tensor,
col_indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg__sparse_log_softmax(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
half_to_float_: c_int,
);
pub fn atg__sparse_log_softmax_backward_data(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
dim_: i64,
self_: *mut C_tensor,
);
pub fn atg__sparse_log_softmax_backward_data_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
dim_: i64,
self_: *mut C_tensor,
);
pub fn atg__sparse_log_softmax_int(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
dtype_: c_int,
);
pub fn atg__sparse_log_softmax_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
half_to_float_: c_int,
);
pub fn atg__sparse_mask_projection(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
accumulate_matches_: c_int,
);
pub fn atg__sparse_mask_projection_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
accumulate_matches_: c_int,
);
pub fn atg__sparse_mm(out__: *mut *mut C_tensor, sparse_: *mut C_tensor, dense_: *mut C_tensor);
pub fn atg__sparse_mm_reduce(
out__: *mut *mut C_tensor,
sparse_: *mut C_tensor,
dense_: *mut C_tensor,
reduce_ptr: *const u8,
reduce_len: c_int,
);
pub fn atg__sparse_mm_reduce_impl(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
reduce_ptr: *const u8,
reduce_len: c_int,
);
pub fn atg__sparse_semi_structured_apply(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
thread_masks_: *mut C_tensor,
);
pub fn atg__sparse_semi_structured_apply_dense(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
thread_masks_: *mut C_tensor,
);
pub fn atg__sparse_semi_structured_linear(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
meta_: *mut C_tensor,
bias_: *mut C_tensor,
activation_ptr: *const u8,
activation_len: c_int,
out_dtype_: c_int,
);
pub fn atg__sparse_semi_structured_mm(
out__: *mut *mut C_tensor,
mat1_: *mut C_tensor,
mat1_meta_: *mut C_tensor,
mat2_: *mut C_tensor,
out_dtype_: c_int,
);
pub fn atg__sparse_semi_structured_tile(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
algorithm_ptr: *const u8,
algorithm_len: c_int,
use_cutlass_: c_int,
);
pub fn atg__sparse_softmax(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
half_to_float_: c_int,
);
pub fn atg__sparse_softmax_backward_data(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
dim_: i64,
self_: *mut C_tensor,
);
pub fn atg__sparse_softmax_backward_data_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
dim_: i64,
self_: *mut C_tensor,
);
pub fn atg__sparse_softmax_int(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
dtype_: c_int,
);
pub fn atg__sparse_softmax_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
half_to_float_: c_int,
);
pub fn atg__sparse_sparse_matmul(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg__sparse_sparse_matmul_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg__sparse_sum(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__sparse_sum_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
);
pub fn atg__sparse_sum_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
);
pub fn atg__sparse_sum_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
);
pub fn atg__sparse_sum_dim_dtype(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
dtype_: c_int,
);
pub fn atg__sparse_sum_dim_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
);
pub fn atg__sparse_sum_dtype(out__: *mut *mut C_tensor, self_: *mut C_tensor, dtype_: c_int);
pub fn atg__spdiags(
out__: *mut *mut C_tensor,
diagonals_: *mut C_tensor,
offsets_: *mut C_tensor,
shape_data: *const i64,
shape_len: c_int,
layout_: i8,
);
pub fn atg__spdiags_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
diagonals_: *mut C_tensor,
offsets_: *mut C_tensor,
shape_data: *const i64,
shape_len: c_int,
layout_: i8,
);
pub fn atg__spsolve(
out__: *mut *mut C_tensor,
A_: *mut C_tensor,
B_: *mut C_tensor,
left_: c_int,
);
pub fn atg__stack(
out__: *mut *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
dim_: i64,
);
pub fn atg__stack_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
dim_: i64,
);
pub fn atg__standard_gamma(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__standard_gamma_grad(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_: *mut C_tensor,
);
pub fn atg__standard_gamma_grad_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_: *mut C_tensor,
);
pub fn atg__standard_gamma_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__test_ambiguous_defaults(
out__: *mut *mut C_tensor,
dummy_: *mut C_tensor,
a_: i64,
b_: i64,
);
pub fn atg__test_ambiguous_defaults_b(
out__: *mut *mut C_tensor,
dummy_: *mut C_tensor,
a_: i64,
b_ptr: *const u8,
b_len: c_int,
);
pub fn atg__test_autograd_multiple_dispatch(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__test_autograd_multiple_dispatch_fullcoverage_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__test_autograd_multiple_dispatch_ntonly(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
b_: c_int,
);
pub fn atg__test_autograd_multiple_dispatch_view(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__test_autograd_multiple_dispatch_view_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__test_autograd_multiple_dispatch_view_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__test_check_tensor(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__test_functorch_fallback(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg__test_functorch_fallback_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg__test_optional_filled_intlist(
out__: *mut *mut C_tensor,
values_: *mut C_tensor,
addends_data: *const i64,
addends_len: c_int,
);
pub fn atg__test_optional_filled_intlist_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
values_: *mut C_tensor,
addends_data: *const i64,
addends_len: c_int,
);
pub fn atg__test_optional_floatlist(
out__: *mut *mut C_tensor,
values_: *mut C_tensor,
addends_data: *const f64,
addends_len: c_int,
);
pub fn atg__test_optional_floatlist_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
values_: *mut C_tensor,
addends_data: *const f64,
addends_len: c_int,
);
pub fn atg__test_optional_intlist(
out__: *mut *mut C_tensor,
values_: *mut C_tensor,
addends_data: *const i64,
addends_len: c_int,
);
pub fn atg__test_optional_intlist_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
values_: *mut C_tensor,
addends_data: *const i64,
addends_len: c_int,
);
pub fn atg__test_parallel_materialize(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
num_parallel_: i64,
skip_first_: c_int,
);
pub fn atg__test_serialization_subcmul(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg__test_string_default(
out__: *mut *mut C_tensor,
dummy_: *mut C_tensor,
a_ptr: *const u8,
a_len: c_int,
b_ptr: *const u8,
b_len: c_int,
);
pub fn atg__test_warn_in_autograd(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__test_warn_in_autograd_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__to_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
options_kind: c_int,
options_device: c_int,
non_blocking_: c_int,
);
pub fn atg__to_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
non_blocking_: c_int,
);
pub fn atg__to_cpu(
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
) -> *mut *mut C_tensor;
pub fn atg__to_dense(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dtype_: c_int,
masked_grad_: c_int,
);
pub fn atg__to_dense_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dtype_: c_int,
masked_grad_: c_int,
);
pub fn atg__to_sparse(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
layout_: i8,
blocksize_data: *const i64,
blocksize_len: c_int,
dense_dim_v: i64,
dense_dim_null: i8,
);
pub fn atg__to_sparse_bsc(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
blocksize_data: *const i64,
blocksize_len: c_int,
dense_dim_v: i64,
dense_dim_null: i8,
);
pub fn atg__to_sparse_bsc_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
blocksize_data: *const i64,
blocksize_len: c_int,
dense_dim_v: i64,
dense_dim_null: i8,
);
pub fn atg__to_sparse_bsr(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
blocksize_data: *const i64,
blocksize_len: c_int,
dense_dim_v: i64,
dense_dim_null: i8,
);
pub fn atg__to_sparse_bsr_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
blocksize_data: *const i64,
blocksize_len: c_int,
dense_dim_v: i64,
dense_dim_null: i8,
);
pub fn atg__to_sparse_csc(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dense_dim_v: i64,
dense_dim_null: i8,
);
pub fn atg__to_sparse_csc_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dense_dim_v: i64,
dense_dim_null: i8,
);
pub fn atg__to_sparse_csr(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dense_dim_v: i64,
dense_dim_null: i8,
);
pub fn atg__to_sparse_csr_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dense_dim_v: i64,
dense_dim_null: i8,
);
pub fn atg__to_sparse_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
layout_: i8,
blocksize_data: *const i64,
blocksize_len: c_int,
dense_dim_v: i64,
dense_dim_null: i8,
);
pub fn atg__to_sparse_semi_structured(out__: *mut *mut C_tensor, dense_: *mut C_tensor);
pub fn atg__to_sparse_sparse_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
sparse_dim_: i64,
);
pub fn atg__to_sparse_sparse_dim_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
sparse_dim_: i64,
);
pub fn atg__transform_bias_rescale_qkv(
out__: *mut *mut C_tensor,
qkv_: *mut C_tensor,
qkv_bias_: *mut C_tensor,
num_heads_: i64,
);
pub fn atg__transform_bias_rescale_qkv_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
qkv_: *mut C_tensor,
qkv_bias_: *mut C_tensor,
num_heads_: i64,
);
pub fn atg__transformer_encoder_layer_fwd(
out__: *mut *mut C_tensor,
src_: *mut C_tensor,
embed_dim_: i64,
num_heads_: i64,
qkv_weight_: *mut C_tensor,
qkv_bias_: *mut C_tensor,
proj_weight_: *mut C_tensor,
proj_bias_: *mut C_tensor,
use_gelu_: c_int,
norm_first_: c_int,
eps_: f64,
norm_weight_1_: *mut C_tensor,
norm_bias_1_: *mut C_tensor,
norm_weight_2_: *mut C_tensor,
norm_bias_2_: *mut C_tensor,
ffn_weight_1_: *mut C_tensor,
ffn_bias_1_: *mut C_tensor,
ffn_weight_2_: *mut C_tensor,
ffn_bias_2_: *mut C_tensor,
mask_: *mut C_tensor,
mask_type_v: i64,
mask_type_null: i8,
);
pub fn atg__transformer_encoder_layer_fwd_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
src_: *mut C_tensor,
embed_dim_: i64,
num_heads_: i64,
qkv_weight_: *mut C_tensor,
qkv_bias_: *mut C_tensor,
proj_weight_: *mut C_tensor,
proj_bias_: *mut C_tensor,
use_gelu_: c_int,
norm_first_: c_int,
eps_: f64,
norm_weight_1_: *mut C_tensor,
norm_bias_1_: *mut C_tensor,
norm_weight_2_: *mut C_tensor,
norm_bias_2_: *mut C_tensor,
ffn_weight_1_: *mut C_tensor,
ffn_bias_1_: *mut C_tensor,
ffn_weight_2_: *mut C_tensor,
ffn_bias_2_: *mut C_tensor,
mask_: *mut C_tensor,
mask_type_v: i64,
mask_type_null: i8,
);
pub fn atg__trilinear(
out__: *mut *mut C_tensor,
i1_: *mut C_tensor,
i2_: *mut C_tensor,
i3_: *mut C_tensor,
expand1_data: *const i64,
expand1_len: c_int,
expand2_data: *const i64,
expand2_len: c_int,
expand3_data: *const i64,
expand3_len: c_int,
sumdim_data: *const i64,
sumdim_len: c_int,
unroll_dim_: i64,
);
pub fn atg__trilinear_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
i1_: *mut C_tensor,
i2_: *mut C_tensor,
i3_: *mut C_tensor,
expand1_data: *const i64,
expand1_len: c_int,
expand2_data: *const i64,
expand2_len: c_int,
expand3_data: *const i64,
expand3_len: c_int,
sumdim_data: *const i64,
sumdim_len: c_int,
unroll_dim_: i64,
);
pub fn atg__triton_multi_head_attention(
out__: *mut *mut C_tensor,
query_: *mut C_tensor,
key_: *mut C_tensor,
value_: *mut C_tensor,
embed_dim_: i64,
num_head_: i64,
qkv_weight_: *mut C_tensor,
qkv_bias_: *mut C_tensor,
proj_weight_: *mut C_tensor,
proj_bias_: *mut C_tensor,
mask_: *mut C_tensor,
);
pub fn atg__triton_multi_head_attention_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
query_: *mut C_tensor,
key_: *mut C_tensor,
value_: *mut C_tensor,
embed_dim_: i64,
num_head_: i64,
qkv_weight_: *mut C_tensor,
qkv_bias_: *mut C_tensor,
proj_weight_: *mut C_tensor,
proj_bias_: *mut C_tensor,
mask_: *mut C_tensor,
);
pub fn atg__triton_scaled_dot_attention(
out__: *mut *mut C_tensor,
q_: *mut C_tensor,
k_: *mut C_tensor,
v_: *mut C_tensor,
dropout_p_: f64,
);
pub fn atg__triton_scaled_dot_attention_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
q_: *mut C_tensor,
k_: *mut C_tensor,
v_: *mut C_tensor,
dropout_p_: f64,
);
pub fn atg__unique(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
sorted_: c_int,
return_inverse_: c_int,
);
pub fn atg__unique2(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
sorted_: c_int,
return_inverse_: c_int,
return_counts_: c_int,
);
pub fn atg__unique2_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
self_: *mut C_tensor,
sorted_: c_int,
return_inverse_: c_int,
return_counts_: c_int,
);
pub fn atg__unique_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
self_: *mut C_tensor,
sorted_: c_int,
return_inverse_: c_int,
);
pub fn atg__unpack_dual(out__: *mut *mut C_tensor, dual_: *mut C_tensor, level_: i64);
pub fn atg__unsafe_index(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
indices_data: *const *mut C_tensor,
indices_len: c_int,
);
pub fn atg__unsafe_index_put(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
indices_data: *const *mut C_tensor,
indices_len: c_int,
values_: *mut C_tensor,
accumulate_: c_int,
);
pub fn atg__unsafe_masked_index(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
indices_data: *const *mut C_tensor,
indices_len: c_int,
fill_: *mut C_scalar,
);
pub fn atg__unsafe_masked_index_put_accumulate(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
indices_data: *const *mut C_tensor,
indices_len: c_int,
values_: *mut C_tensor,
);
pub fn atg__unsafe_view(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg__unsafe_view_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg__upsample_bicubic2d_aa(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg__upsample_bicubic2d_aa_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
align_corners_: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg__upsample_bicubic2d_aa_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
align_corners_: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg__upsample_bicubic2d_aa_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg__upsample_bicubic2d_aa_vec(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scale_factors_data: *const f64,
scale_factors_len: c_int,
);
pub fn atg__upsample_bilinear2d_aa(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg__upsample_bilinear2d_aa_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
align_corners_: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg__upsample_bilinear2d_aa_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
align_corners_: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg__upsample_bilinear2d_aa_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg__upsample_bilinear2d_aa_vec(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scale_factors_data: *const f64,
scale_factors_len: c_int,
);
pub fn atg__upsample_nearest_exact1d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scales_v: f64,
scales_null: i8,
);
pub fn atg__upsample_nearest_exact1d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
scales_v: f64,
scales_null: i8,
);
pub fn atg__upsample_nearest_exact1d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
scales_v: f64,
scales_null: i8,
);
pub fn atg__upsample_nearest_exact1d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scales_v: f64,
scales_null: i8,
);
pub fn atg__upsample_nearest_exact1d_vec(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scale_factors_data: *const f64,
scale_factors_len: c_int,
);
pub fn atg__upsample_nearest_exact2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg__upsample_nearest_exact2d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg__upsample_nearest_exact2d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg__upsample_nearest_exact2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg__upsample_nearest_exact2d_vec(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scale_factors_data: *const f64,
scale_factors_len: c_int,
);
pub fn atg__upsample_nearest_exact3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scales_d_v: f64,
scales_d_null: i8,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg__upsample_nearest_exact3d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
scales_d_v: f64,
scales_d_null: i8,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg__upsample_nearest_exact3d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
scales_d_v: f64,
scales_d_null: i8,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg__upsample_nearest_exact3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scales_d_v: f64,
scales_d_null: i8,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg__upsample_nearest_exact3d_vec(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scale_factors_data: *const f64,
scale_factors_len: c_int,
);
pub fn atg__use_cudnn_ctc_loss(
log_probs_: *mut C_tensor,
targets_: *mut C_tensor,
input_lengths_data: *const i64,
input_lengths_len: c_int,
target_lengths_data: *const i64,
target_lengths_len: c_int,
blank_: i64,
) -> c_int;
pub fn atg__use_cudnn_ctc_loss_tensor(
log_probs_: *mut C_tensor,
targets_: *mut C_tensor,
input_lengths_: *mut C_tensor,
target_lengths_: *mut C_tensor,
blank_: i64,
) -> c_int;
pub fn atg__use_cudnn_rnn_flatten_weight() -> c_int;
pub fn atg__validate_compressed_sparse_indices(
is_crow_: c_int,
compressed_idx_: *mut C_tensor,
plain_idx_: *mut C_tensor,
cdim_: i64,
dim_: i64,
nnz_: i64,
);
pub fn atg__validate_sparse_bsc_tensor_args(
ccol_indices_: *mut C_tensor,
row_indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg__validate_sparse_bsr_tensor_args(
crow_indices_: *mut C_tensor,
col_indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg__validate_sparse_compressed_tensor_args(
compressed_indices_: *mut C_tensor,
plain_indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
layout_: i8,
);
pub fn atg__validate_sparse_csc_tensor_args(
ccol_indices_: *mut C_tensor,
row_indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg__validate_sparse_csr_tensor_args(
crow_indices_: *mut C_tensor,
col_indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg__values(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__values_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg__values_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg__version(self_: *mut C_tensor) -> i64;
pub fn atg__weight_int4pack_mm(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mat2_: *mut C_tensor,
qGroupSize_: i64,
qScaleAndZeros_: *mut C_tensor,
);
pub fn atg__weight_int8pack_mm(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mat2_: *mut C_tensor,
scales_: *mut C_tensor,
);
pub fn atg__weight_norm(
out__: *mut *mut C_tensor,
v_: *mut C_tensor,
g_: *mut C_tensor,
dim_: i64,
);
pub fn atg__weight_norm_differentiable_backward(
out__: *mut *mut C_tensor,
grad_w_: *mut C_tensor,
saved_v_: *mut C_tensor,
saved_g_: *mut C_tensor,
saved_norms_: *mut C_tensor,
dim_: i64,
);
pub fn atg__weight_norm_interface(
out__: *mut *mut C_tensor,
v_: *mut C_tensor,
g_: *mut C_tensor,
dim_: i64,
);
pub fn atg__weight_norm_interface_backward(
out__: *mut *mut C_tensor,
grad_w_: *mut C_tensor,
saved_v_: *mut C_tensor,
saved_g_: *mut C_tensor,
saved_norms_: *mut C_tensor,
dim_: i64,
);
pub fn atg__weight_norm_interface_backward_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
grad_w_: *mut C_tensor,
saved_v_: *mut C_tensor,
saved_g_: *mut C_tensor,
saved_norms_: *mut C_tensor,
dim_: i64,
);
pub fn atg__weight_norm_interface_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
v_: *mut C_tensor,
g_: *mut C_tensor,
dim_: i64,
);
pub fn atg__wrapped_linear_prepack(
out__: *mut *mut C_tensor,
weight_: *mut C_tensor,
weight_scale_: *mut C_tensor,
weight_zero_point_: *mut C_tensor,
bias_: *mut C_tensor,
);
pub fn atg__wrapped_quantized_linear_prepacked(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
input_scale_: *mut C_tensor,
input_zero_point_: *mut C_tensor,
packed_weight_: *mut C_tensor,
output_scale_: *mut C_tensor,
output_zero_point_: *mut C_tensor,
out_channel_: i64,
);
pub fn atg_abs(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_abs_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_abs_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_absolute(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_absolute_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_absolute_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_acos(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_acos_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_acos_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_acosh(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_acosh_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_acosh_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_adaptive_avg_pool1d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_adaptive_avg_pool2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_adaptive_avg_pool2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_adaptive_avg_pool3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_adaptive_avg_pool3d_backward(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_adaptive_avg_pool3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_adaptive_max_pool1d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_adaptive_max_pool2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_adaptive_max_pool2d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
indices_: *mut C_tensor,
);
pub fn atg_adaptive_max_pool2d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
indices_: *mut C_tensor,
);
pub fn atg_adaptive_max_pool2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
indices_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_adaptive_max_pool3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_adaptive_max_pool3d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
indices_: *mut C_tensor,
);
pub fn atg_adaptive_max_pool3d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
indices_: *mut C_tensor,
);
pub fn atg_adaptive_max_pool3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
indices_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_add(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_add_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_add_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_add_scalar(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_add_scalar_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_add_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_addbmm(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
batch1_: *mut C_tensor,
batch2_: *mut C_tensor,
);
pub fn atg_addbmm_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
batch1_: *mut C_tensor,
batch2_: *mut C_tensor,
);
pub fn atg_addbmm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
batch1_: *mut C_tensor,
batch2_: *mut C_tensor,
);
pub fn atg_addcdiv(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
tensor1_: *mut C_tensor,
tensor2_: *mut C_tensor,
);
pub fn atg_addcdiv_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
tensor1_: *mut C_tensor,
tensor2_: *mut C_tensor,
);
pub fn atg_addcdiv_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
tensor1_: *mut C_tensor,
tensor2_: *mut C_tensor,
);
pub fn atg_addcmul(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
tensor1_: *mut C_tensor,
tensor2_: *mut C_tensor,
);
pub fn atg_addcmul_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
tensor1_: *mut C_tensor,
tensor2_: *mut C_tensor,
);
pub fn atg_addcmul_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
tensor1_: *mut C_tensor,
tensor2_: *mut C_tensor,
);
pub fn atg_addmm(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mat1_: *mut C_tensor,
mat2_: *mut C_tensor,
);
pub fn atg_addmm_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mat1_: *mut C_tensor,
mat2_: *mut C_tensor,
);
pub fn atg_addmm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mat1_: *mut C_tensor,
mat2_: *mut C_tensor,
);
pub fn atg_addmv(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mat_: *mut C_tensor,
vec_: *mut C_tensor,
);
pub fn atg_addmv_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mat_: *mut C_tensor,
vec_: *mut C_tensor,
);
pub fn atg_addmv_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mat_: *mut C_tensor,
vec_: *mut C_tensor,
);
pub fn atg_addr(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
vec1_: *mut C_tensor,
vec2_: *mut C_tensor,
);
pub fn atg_addr_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
vec1_: *mut C_tensor,
vec2_: *mut C_tensor,
);
pub fn atg_addr_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
vec1_: *mut C_tensor,
vec2_: *mut C_tensor,
);
pub fn atg_adjoint(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_affine_grid_generator(
out__: *mut *mut C_tensor,
theta_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
align_corners_: c_int,
);
pub fn atg_affine_grid_generator_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
align_corners_: c_int,
);
pub fn atg_affine_grid_generator_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
theta_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
align_corners_: c_int,
);
pub fn atg_alias(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_alias_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_alias_copy_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_align_as(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_align_tensors(
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
) -> *mut *mut C_tensor;
pub fn atg_all(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_all_all_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_all_dim(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
pub fn atg_all_dims(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_all_dims_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_all_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
keepdim_: c_int,
);
pub fn atg_allclose(
self_: *mut C_tensor,
other_: *mut C_tensor,
rtol_: f64,
atol_: f64,
equal_nan_: c_int,
) -> c_int;
pub fn atg_alpha_dropout(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
p_: f64,
train_: c_int,
);
pub fn atg_alpha_dropout_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
p_: f64,
train_: c_int,
);
pub fn atg_amax(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_amax_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_amin(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_amin_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_aminmax(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
keepdim_: c_int,
);
pub fn atg_aminmax_out(
out__: *mut *mut C_tensor,
min_: *mut C_tensor,
max_: *mut C_tensor,
self_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
keepdim_: c_int,
);
pub fn atg_angle(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_angle_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_any(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_any_all_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_any_dim(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
pub fn atg_any_dims(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_any_dims_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_any_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
keepdim_: c_int,
);
pub fn atg_arange(
out__: *mut *mut C_tensor,
end_: *mut C_scalar,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_arange_start(
out__: *mut *mut C_tensor,
start_: *mut C_scalar,
end_: *mut C_scalar,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_arange_start_step(
out__: *mut *mut C_tensor,
start_: *mut C_scalar,
end_: *mut C_scalar,
step_: *mut C_scalar,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_arccos(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arccos_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arccos_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arccosh(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arccosh_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arccosh_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arcsin(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arcsin_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arcsin_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arcsinh(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arcsinh_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arcsinh_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arctan(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arctan2(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_arctan2_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_arctan2_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_arctan_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arctan_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arctanh(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arctanh_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_arctanh_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_argmax(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
keepdim_: c_int,
);
pub fn atg_argmax_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
keepdim_: c_int,
);
pub fn atg_argmin(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
keepdim_: c_int,
);
pub fn atg_argmin_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
keepdim_: c_int,
);
pub fn atg_argsort(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
descending_: c_int,
);
pub fn atg_argsort_stable(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
stable_: c_int,
dim_: i64,
descending_: c_int,
);
pub fn atg_argsort_stable_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
stable_: c_int,
dim_: i64,
descending_: c_int,
);
pub fn atg_argwhere(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_as_strided(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
storage_offset_v: i64,
storage_offset_null: i8,
);
pub fn atg_as_strided_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
storage_offset_v: i64,
storage_offset_null: i8,
);
pub fn atg_as_strided_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
storage_offset_v: i64,
storage_offset_null: i8,
);
pub fn atg_as_strided_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
storage_offset_v: i64,
storage_offset_null: i8,
);
pub fn atg_as_strided_scatter(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
src_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
storage_offset_v: i64,
storage_offset_null: i8,
);
pub fn atg_as_strided_scatter_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
src_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
storage_offset_v: i64,
storage_offset_null: i8,
);
pub fn atg_asin(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_asin_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_asin_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_asinh(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_asinh_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_asinh_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_atan(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_atan2(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_atan2_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_atan2_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_atan_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_atan_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_atanh(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_atanh_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_atanh_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_atleast_1d(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_atleast_1d_sequence(
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
) -> *mut *mut C_tensor;
pub fn atg_atleast_2d(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_atleast_2d_sequence(
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
) -> *mut *mut C_tensor;
pub fn atg_atleast_3d(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_atleast_3d_sequence(
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
) -> *mut *mut C_tensor;
pub fn atg_avg_pool1d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
ceil_mode_: c_int,
count_include_pad_: c_int,
);
pub fn atg_avg_pool2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
ceil_mode_: c_int,
count_include_pad_: c_int,
divisor_override_v: i64,
divisor_override_null: i8,
);
pub fn atg_avg_pool2d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
ceil_mode_: c_int,
count_include_pad_: c_int,
divisor_override_v: i64,
divisor_override_null: i8,
);
pub fn atg_avg_pool2d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
ceil_mode_: c_int,
count_include_pad_: c_int,
divisor_override_v: i64,
divisor_override_null: i8,
);
pub fn atg_avg_pool2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
ceil_mode_: c_int,
count_include_pad_: c_int,
divisor_override_v: i64,
divisor_override_null: i8,
);
pub fn atg_avg_pool3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
ceil_mode_: c_int,
count_include_pad_: c_int,
divisor_override_v: i64,
divisor_override_null: i8,
);
pub fn atg_avg_pool3d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
ceil_mode_: c_int,
count_include_pad_: c_int,
divisor_override_v: i64,
divisor_override_null: i8,
);
pub fn atg_avg_pool3d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
ceil_mode_: c_int,
count_include_pad_: c_int,
divisor_override_v: i64,
divisor_override_null: i8,
);
pub fn atg_avg_pool3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
ceil_mode_: c_int,
count_include_pad_: c_int,
divisor_override_v: i64,
divisor_override_null: i8,
);
pub fn atg_baddbmm(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
batch1_: *mut C_tensor,
batch2_: *mut C_tensor,
beta_: *mut C_scalar,
alpha_: *mut C_scalar,
);
pub fn atg_baddbmm_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
batch1_: *mut C_tensor,
batch2_: *mut C_tensor,
);
pub fn atg_baddbmm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
batch1_: *mut C_tensor,
batch2_: *mut C_tensor,
);
pub fn atg_bartlett_window(
out__: *mut *mut C_tensor,
window_length_: i64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_bartlett_window_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
window_length_: i64,
);
pub fn atg_bartlett_window_periodic(
out__: *mut *mut C_tensor,
window_length_: i64,
periodic_: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_bartlett_window_periodic_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
window_length_: i64,
periodic_: c_int,
);
pub fn atg_batch_norm(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
training_: c_int,
momentum_: f64,
eps_: f64,
cudnn_enabled_: c_int,
);
pub fn atg_batch_norm_backward_elemt(
out__: *mut *mut C_tensor,
grad_out_: *mut C_tensor,
input_: *mut C_tensor,
mean_: *mut C_tensor,
invstd_: *mut C_tensor,
weight_: *mut C_tensor,
sum_dy_: *mut C_tensor,
sum_dy_xmu_: *mut C_tensor,
count_: *mut C_tensor,
);
pub fn atg_batch_norm_backward_elemt_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_out_: *mut C_tensor,
input_: *mut C_tensor,
mean_: *mut C_tensor,
invstd_: *mut C_tensor,
weight_: *mut C_tensor,
sum_dy_: *mut C_tensor,
sum_dy_xmu_: *mut C_tensor,
count_: *mut C_tensor,
);
pub fn atg_batch_norm_backward_reduce(
out__: *mut *mut C_tensor,
grad_out_: *mut C_tensor,
input_: *mut C_tensor,
mean_: *mut C_tensor,
invstd_: *mut C_tensor,
weight_: *mut C_tensor,
input_g_: c_int,
weight_g_: c_int,
bias_g_: c_int,
);
pub fn atg_batch_norm_backward_reduce_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
out3_: *mut C_tensor,
grad_out_: *mut C_tensor,
input_: *mut C_tensor,
mean_: *mut C_tensor,
invstd_: *mut C_tensor,
weight_: *mut C_tensor,
input_g_: c_int,
weight_g_: c_int,
bias_g_: c_int,
);
pub fn atg_batch_norm_elemt(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
mean_: *mut C_tensor,
invstd_: *mut C_tensor,
eps_: f64,
);
pub fn atg_batch_norm_elemt_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
mean_: *mut C_tensor,
invstd_: *mut C_tensor,
eps_: f64,
);
pub fn atg_batch_norm_gather_stats(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
mean_: *mut C_tensor,
invstd_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
momentum_: f64,
eps_: f64,
count_: i64,
);
pub fn atg_batch_norm_gather_stats_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
input_: *mut C_tensor,
mean_: *mut C_tensor,
invstd_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
momentum_: f64,
eps_: f64,
count_: i64,
);
pub fn atg_batch_norm_gather_stats_with_counts(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
mean_: *mut C_tensor,
invstd_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
momentum_: f64,
eps_: f64,
counts_: *mut C_tensor,
);
pub fn atg_batch_norm_gather_stats_with_counts_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
input_: *mut C_tensor,
mean_: *mut C_tensor,
invstd_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
momentum_: f64,
eps_: f64,
counts_: *mut C_tensor,
);
pub fn atg_batch_norm_stats(out__: *mut *mut C_tensor, input_: *mut C_tensor, eps_: f64);
pub fn atg_batch_norm_stats_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
input_: *mut C_tensor,
eps_: f64,
);
pub fn atg_batch_norm_update_stats(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
momentum_: f64,
);
pub fn atg_batch_norm_update_stats_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
input_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
momentum_: f64,
);
pub fn atg_bernoulli(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_bernoulli_(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: *mut C_tensor);
pub fn atg_bernoulli_float_(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: f64);
pub fn atg_bernoulli_p(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: f64);
pub fn atg_bernoulli_tensor(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: *mut C_tensor);
pub fn atg_bilinear(
out__: *mut *mut C_tensor,
input1_: *mut C_tensor,
input2_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
);
pub fn atg_binary_cross_entropy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
weight_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_binary_cross_entropy_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
weight_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_binary_cross_entropy_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
weight_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_binary_cross_entropy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
weight_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_binary_cross_entropy_with_logits(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
weight_: *mut C_tensor,
pos_weight_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_binary_cross_entropy_with_logits_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
weight_: *mut C_tensor,
pos_weight_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_bincount(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weights_: *mut C_tensor,
minlength_: i64,
);
pub fn atg_bincount_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weights_: *mut C_tensor,
minlength_: i64,
);
pub fn atg_binomial(out__: *mut *mut C_tensor, count_: *mut C_tensor, prob_: *mut C_tensor);
pub fn atg_binomial_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
count_: *mut C_tensor,
prob_: *mut C_tensor,
);
pub fn atg_bitwise_and(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_bitwise_and_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_bitwise_and_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_bitwise_and_scalar_tensor(
out__: *mut *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_bitwise_and_scalar_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_bitwise_and_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_bitwise_and_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_bitwise_and_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_bitwise_left_shift(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_bitwise_left_shift_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_bitwise_left_shift_scalar_tensor(
out__: *mut *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_bitwise_left_shift_scalar_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_bitwise_left_shift_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_bitwise_left_shift_tensor_scalar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_bitwise_left_shift_tensor_scalar_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_bitwise_left_shift_tensor_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_bitwise_not(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_bitwise_not_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_bitwise_not_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_bitwise_or(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_bitwise_or_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_bitwise_or_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_bitwise_or_scalar_tensor(
out__: *mut *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_bitwise_or_scalar_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_bitwise_or_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_bitwise_or_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_bitwise_or_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_bitwise_right_shift(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_bitwise_right_shift_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_bitwise_right_shift_scalar_tensor(
out__: *mut *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_bitwise_right_shift_scalar_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_bitwise_right_shift_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_bitwise_right_shift_tensor_scalar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_bitwise_right_shift_tensor_scalar_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_bitwise_right_shift_tensor_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_bitwise_xor(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_bitwise_xor_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_bitwise_xor_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_bitwise_xor_scalar_tensor(
out__: *mut *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_bitwise_xor_scalar_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_bitwise_xor_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_bitwise_xor_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_bitwise_xor_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_blackman_window(
out__: *mut *mut C_tensor,
window_length_: i64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_blackman_window_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
window_length_: i64,
);
pub fn atg_blackman_window_periodic(
out__: *mut *mut C_tensor,
window_length_: i64,
periodic_: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_blackman_window_periodic_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
window_length_: i64,
periodic_: c_int,
);
pub fn atg_block_diag(
out__: *mut *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_block_diag_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_bmm(out__: *mut *mut C_tensor, self_: *mut C_tensor, mat2_: *mut C_tensor);
pub fn atg_bmm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mat2_: *mut C_tensor,
);
pub fn atg_broadcast_tensors(
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
) -> *mut *mut C_tensor;
pub fn atg_broadcast_to(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_bucketize(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
boundaries_: *mut C_tensor,
out_int32_: c_int,
right_: c_int,
);
pub fn atg_bucketize_scalar(
out__: *mut *mut C_tensor,
self_scalar_: *mut C_scalar,
boundaries_: *mut C_tensor,
out_int32_: c_int,
right_: c_int,
);
pub fn atg_bucketize_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_scalar_: *mut C_scalar,
boundaries_: *mut C_tensor,
out_int32_: c_int,
right_: c_int,
);
pub fn atg_bucketize_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
boundaries_: *mut C_tensor,
out_int32_: c_int,
right_: c_int,
);
pub fn atg_can_cast(from__: c_int, to_: c_int) -> c_int;
pub fn atg_cartesian_prod(
out__: *mut *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_cat(
out__: *mut *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
dim_: i64,
);
pub fn atg_cat_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
dim_: i64,
);
pub fn atg_cauchy(out__: *mut *mut C_tensor, self_: *mut C_tensor, median_: f64, sigma_: f64);
pub fn atg_cauchy_(out__: *mut *mut C_tensor, self_: *mut C_tensor, median_: f64, sigma_: f64);
pub fn atg_cauchy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
median_: f64,
sigma_: f64,
);
pub fn atg_ccol_indices(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_ccol_indices_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_ccol_indices_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_cdist(
out__: *mut *mut C_tensor,
x1_: *mut C_tensor,
x2_: *mut C_tensor,
p_: f64,
compute_mode_v: i64,
compute_mode_null: i8,
);
pub fn atg_ceil(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_ceil_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_ceil_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_celu(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_celu_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_celu_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_chain_matmul(
out__: *mut *mut C_tensor,
matrices_data: *const *mut C_tensor,
matrices_len: c_int,
);
pub fn atg_chain_matmul_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
matrices_data: *const *mut C_tensor,
matrices_len: c_int,
);
pub fn atg_chalf(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_channel_shuffle(out__: *mut *mut C_tensor, self_: *mut C_tensor, groups_: i64);
pub fn atg_channel_shuffle_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
groups_: i64,
);
pub fn atg_cholesky(out__: *mut *mut C_tensor, self_: *mut C_tensor, upper_: c_int);
pub fn atg_cholesky_inverse(out__: *mut *mut C_tensor, self_: *mut C_tensor, upper_: c_int);
pub fn atg_cholesky_inverse_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
upper_: c_int,
);
pub fn atg_cholesky_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
upper_: c_int,
);
pub fn atg_cholesky_solve(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
input2_: *mut C_tensor,
upper_: c_int,
);
pub fn atg_cholesky_solve_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
input2_: *mut C_tensor,
upper_: c_int,
);
pub fn atg_choose_qparams_optimized(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
numel_: i64,
n_bins_: i64,
ratio_: f64,
bit_width_: i64,
);
pub fn atg_chunk(self_: *mut C_tensor, chunks_: i64, dim_: i64) -> *mut *mut C_tensor;
pub fn atg_clamp(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
min_: *mut C_scalar,
max_: *mut C_scalar,
);
pub fn atg_clamp_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
min_: *mut C_scalar,
max_: *mut C_scalar,
);
pub fn atg_clamp_max(out__: *mut *mut C_tensor, self_: *mut C_tensor, max_: *mut C_scalar);
pub fn atg_clamp_max_(out__: *mut *mut C_tensor, self_: *mut C_tensor, max_: *mut C_scalar);
pub fn atg_clamp_max_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
max_: *mut C_scalar,
);
pub fn atg_clamp_max_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
max_: *mut C_tensor,
);
pub fn atg_clamp_max_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
max_: *mut C_tensor,
);
pub fn atg_clamp_max_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
max_: *mut C_tensor,
);
pub fn atg_clamp_min(out__: *mut *mut C_tensor, self_: *mut C_tensor, min_: *mut C_scalar);
pub fn atg_clamp_min_(out__: *mut *mut C_tensor, self_: *mut C_tensor, min_: *mut C_scalar);
pub fn atg_clamp_min_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
min_: *mut C_scalar,
);
pub fn atg_clamp_min_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
min_: *mut C_tensor,
);
pub fn atg_clamp_min_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
min_: *mut C_tensor,
);
pub fn atg_clamp_min_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
min_: *mut C_tensor,
);
pub fn atg_clamp_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
min_: *mut C_scalar,
max_: *mut C_scalar,
);
pub fn atg_clamp_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
min_: *mut C_tensor,
max_: *mut C_tensor,
);
pub fn atg_clamp_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
min_: *mut C_tensor,
max_: *mut C_tensor,
);
pub fn atg_clamp_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
min_: *mut C_tensor,
max_: *mut C_tensor,
);
pub fn atg_clip(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
min_: *mut C_scalar,
max_: *mut C_scalar,
);
pub fn atg_clip_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
min_: *mut C_scalar,
max_: *mut C_scalar,
);
pub fn atg_clip_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
min_: *mut C_scalar,
max_: *mut C_scalar,
);
pub fn atg_clip_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
min_: *mut C_tensor,
max_: *mut C_tensor,
);
pub fn atg_clip_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
min_: *mut C_tensor,
max_: *mut C_tensor,
);
pub fn atg_clip_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
min_: *mut C_tensor,
max_: *mut C_tensor,
);
pub fn atg_clone(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_coalesce(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_col2im(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
kernel_size_data: *const i64,
kernel_size_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
);
pub fn atg_col2im_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
kernel_size_data: *const i64,
kernel_size_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
);
pub fn atg_col_indices(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_col_indices_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_col_indices_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_column_stack(
out__: *mut *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_column_stack_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_combinations(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
r_: i64,
with_replacement_: c_int,
);
pub fn atg_complex(out__: *mut *mut C_tensor, real_: *mut C_tensor, imag_: *mut C_tensor);
pub fn atg_complex_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
real_: *mut C_tensor,
imag_: *mut C_tensor,
);
pub fn atg_concat(
out__: *mut *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
dim_: i64,
);
pub fn atg_concat_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
dim_: i64,
);
pub fn atg_concatenate(
out__: *mut *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
dim_: i64,
);
pub fn atg_concatenate_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
dim_: i64,
);
pub fn atg_conj(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_conj_physical(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_conj_physical_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_conj_physical_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_constant_pad_nd(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
pad_data: *const i64,
pad_len: c_int,
);
pub fn atg_constant_pad_nd_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
pad_data: *const i64,
pad_len: c_int,
);
pub fn atg_contiguous(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_conv1d(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg_conv1d_padding(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_ptr: *const u8,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg_conv2d(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg_conv2d_padding(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_ptr: *const u8,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg_conv3d(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg_conv3d_padding(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_ptr: *const u8,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg_conv_depthwise3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
);
pub fn atg_conv_depthwise3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
);
pub fn atg_conv_tbc(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
pad_: i64,
);
pub fn atg_conv_tbc_backward(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
pad_: i64,
);
pub fn atg_conv_tbc_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
pad_: i64,
);
pub fn atg_conv_transpose1d(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
groups_: i64,
dilation_data: *const i64,
dilation_len: c_int,
);
pub fn atg_conv_transpose2d(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
groups_: i64,
dilation_data: *const i64,
dilation_len: c_int,
);
pub fn atg_conv_transpose3d(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
groups_: i64,
dilation_data: *const i64,
dilation_len: c_int,
);
pub fn atg_convolution(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
transposed_: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
groups_: i64,
);
pub fn atg_convolution_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
transposed_: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
groups_: i64,
);
pub fn atg_convolution_overrideable(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
transposed_: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
groups_: i64,
);
pub fn atg_convolution_overrideable_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
transposed_: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
groups_: i64,
);
pub fn atg_copy_sparse_to_sparse(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
src_: *mut C_tensor,
non_blocking_: c_int,
);
pub fn atg_copy_sparse_to_sparse_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
src_: *mut C_tensor,
non_blocking_: c_int,
);
pub fn atg_copy_sparse_to_sparse_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
src_: *mut C_tensor,
non_blocking_: c_int,
);
pub fn atg_copysign(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_copysign_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_copysign_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_copysign_scalar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_copysign_scalar_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_copysign_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_corrcoef(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_cos(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_cos_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_cos_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_cosh(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_cosh_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_cosh_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_cosine_embedding_loss(
out__: *mut *mut C_tensor,
input1_: *mut C_tensor,
input2_: *mut C_tensor,
target_: *mut C_tensor,
margin_: f64,
reduction_: i64,
);
pub fn atg_cosine_similarity(
out__: *mut *mut C_tensor,
x1_: *mut C_tensor,
x2_: *mut C_tensor,
dim_: i64,
eps_: f64,
);
pub fn atg_count_nonzero(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
);
pub fn atg_count_nonzero_dim_intlist(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
);
pub fn atg_count_nonzero_dim_intlist_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
);
pub fn atg_count_nonzero_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
);
pub fn atg_cov(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
correction_: i64,
fweights_: *mut C_tensor,
aweights_: *mut C_tensor,
);
pub fn atg_cross(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
);
pub fn atg_cross_entropy_loss(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
weight_: *mut C_tensor,
reduction_: i64,
ignore_index_: i64,
label_smoothing_: f64,
);
pub fn atg_cross_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
);
pub fn atg_crow_indices(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_crow_indices_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_crow_indices_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_ctc_loss(
out__: *mut *mut C_tensor,
log_probs_: *mut C_tensor,
targets_: *mut C_tensor,
input_lengths_data: *const i64,
input_lengths_len: c_int,
target_lengths_data: *const i64,
target_lengths_len: c_int,
blank_: i64,
reduction_: i64,
zero_infinity_: c_int,
);
pub fn atg_ctc_loss_tensor(
out__: *mut *mut C_tensor,
log_probs_: *mut C_tensor,
targets_: *mut C_tensor,
input_lengths_: *mut C_tensor,
target_lengths_: *mut C_tensor,
blank_: i64,
reduction_: i64,
zero_infinity_: c_int,
);
pub fn atg_cudnn_affine_grid_generator(
out__: *mut *mut C_tensor,
theta_: *mut C_tensor,
n_: i64,
C_: i64,
H_: i64,
W_: i64,
);
pub fn atg_cudnn_affine_grid_generator_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
n_: i64,
C_: i64,
H_: i64,
W_: i64,
);
pub fn atg_cudnn_affine_grid_generator_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_: *mut C_tensor,
n_: i64,
C_: i64,
H_: i64,
W_: i64,
);
pub fn atg_cudnn_affine_grid_generator_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
theta_: *mut C_tensor,
n_: i64,
C_: i64,
H_: i64,
W_: i64,
);
pub fn atg_cudnn_batch_norm(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
training_: c_int,
exponential_average_factor_: f64,
epsilon_: f64,
);
pub fn atg_cudnn_batch_norm_backward(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
grad_output_: *mut C_tensor,
weight_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
save_mean_: *mut C_tensor,
save_var_: *mut C_tensor,
epsilon_: f64,
reserveSpace_: *mut C_tensor,
);
pub fn atg_cudnn_batch_norm_backward_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
input_: *mut C_tensor,
grad_output_: *mut C_tensor,
weight_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
save_mean_: *mut C_tensor,
save_var_: *mut C_tensor,
epsilon_: f64,
reserveSpace_: *mut C_tensor,
);
pub fn atg_cudnn_batch_norm_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
out3_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
training_: c_int,
exponential_average_factor_: f64,
epsilon_: f64,
);
pub fn atg_cudnn_convolution(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
benchmark_: c_int,
deterministic_: c_int,
allow_tf32_: c_int,
);
pub fn atg_cudnn_convolution_add_relu(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
z_: *mut C_tensor,
alpha_: *mut C_scalar,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg_cudnn_convolution_add_relu_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
z_: *mut C_tensor,
alpha_: *mut C_scalar,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg_cudnn_convolution_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
benchmark_: c_int,
deterministic_: c_int,
allow_tf32_: c_int,
);
pub fn atg_cudnn_convolution_relu(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg_cudnn_convolution_relu_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg_cudnn_convolution_transpose(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
benchmark_: c_int,
deterministic_: c_int,
allow_tf32_: c_int,
);
pub fn atg_cudnn_convolution_transpose_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
benchmark_: c_int,
deterministic_: c_int,
allow_tf32_: c_int,
);
pub fn atg_cudnn_grid_sampler(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
grid_: *mut C_tensor,
);
pub fn atg_cudnn_grid_sampler_backward(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
grid_: *mut C_tensor,
grad_output_: *mut C_tensor,
);
pub fn atg_cudnn_grid_sampler_backward_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
self_: *mut C_tensor,
grid_: *mut C_tensor,
grad_output_: *mut C_tensor,
);
pub fn atg_cudnn_grid_sampler_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
grid_: *mut C_tensor,
);
pub fn atg_cudnn_is_acceptable(self_: *mut C_tensor) -> c_int;
pub fn atg_cummax(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
pub fn atg_cummax_out(
out__: *mut *mut C_tensor,
values_: *mut C_tensor,
indices_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
);
pub fn atg_cummaxmin_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
input_: *mut C_tensor,
indices_: *mut C_tensor,
dim_: i64,
);
pub fn atg_cummin(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
pub fn atg_cummin_out(
out__: *mut *mut C_tensor,
values_: *mut C_tensor,
indices_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
);
pub fn atg_cumprod(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, dtype_: c_int);
pub fn atg_cumprod_(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, dtype_: c_int);
pub fn atg_cumprod_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
input_: *mut C_tensor,
dim_: i64,
output_: *mut C_tensor,
);
pub fn atg_cumprod_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
dtype_: c_int,
);
pub fn atg_cumsum(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, dtype_: c_int);
pub fn atg_cumsum_(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, dtype_: c_int);
pub fn atg_cumsum_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
dtype_: c_int,
);
pub fn atg_cumulative_trapezoid(out__: *mut *mut C_tensor, y_: *mut C_tensor, dim_: i64);
pub fn atg_cumulative_trapezoid_x(
out__: *mut *mut C_tensor,
y_: *mut C_tensor,
x_: *mut C_tensor,
dim_: i64,
);
pub fn atg_data(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_deg2rad(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_deg2rad_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_deg2rad_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_dense_dim(self_: *mut C_tensor) -> i64;
pub fn atg_dequantize(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_dequantize_self_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_dequantize_tensors(
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
) -> *mut *mut C_tensor;
pub fn atg_dequantize_tensors_out(
out_data: *const *mut C_tensor,
out_len: c_int,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_det(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_detach(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_detach_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_detach_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_detach_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_diag(out__: *mut *mut C_tensor, self_: *mut C_tensor, diagonal_: i64);
pub fn atg_diag_embed(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
offset_: i64,
dim1_: i64,
dim2_: i64,
);
pub fn atg_diag_embed_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
offset_: i64,
dim1_: i64,
dim2_: i64,
);
pub fn atg_diag_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
diagonal_: i64,
);
pub fn atg_diagflat(out__: *mut *mut C_tensor, self_: *mut C_tensor, offset_: i64);
pub fn atg_diagonal(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
offset_: i64,
dim1_: i64,
dim2_: i64,
);
pub fn atg_diagonal_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
input_sizes_data: *const i64,
input_sizes_len: c_int,
offset_: i64,
dim1_: i64,
dim2_: i64,
);
pub fn atg_diagonal_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
input_sizes_data: *const i64,
input_sizes_len: c_int,
offset_: i64,
dim1_: i64,
dim2_: i64,
);
pub fn atg_diagonal_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
offset_: i64,
dim1_: i64,
dim2_: i64,
);
pub fn atg_diagonal_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
offset_: i64,
dim1_: i64,
dim2_: i64,
);
pub fn atg_diagonal_scatter(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
src_: *mut C_tensor,
offset_: i64,
dim1_: i64,
dim2_: i64,
);
pub fn atg_diagonal_scatter_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
src_: *mut C_tensor,
offset_: i64,
dim1_: i64,
dim2_: i64,
);
pub fn atg_diff(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
n_: i64,
dim_: i64,
prepend_: *mut C_tensor,
append_: *mut C_tensor,
);
pub fn atg_diff_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
n_: i64,
dim_: i64,
prepend_: *mut C_tensor,
append_: *mut C_tensor,
);
pub fn atg_digamma(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_digamma_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_digamma_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_dist(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_dist_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_div(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_div_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_div_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_div_out_mode(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
rounding_mode_ptr: *const u8,
rounding_mode_len: c_int,
);
pub fn atg_div_scalar(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_div_scalar_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_div_scalar_mode(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
rounding_mode_ptr: *const u8,
rounding_mode_len: c_int,
);
pub fn atg_div_scalar_mode_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
rounding_mode_ptr: *const u8,
rounding_mode_len: c_int,
);
pub fn atg_div_scalar_mode_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
rounding_mode_ptr: *const u8,
rounding_mode_len: c_int,
);
pub fn atg_div_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_div_tensor_mode(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
rounding_mode_ptr: *const u8,
rounding_mode_len: c_int,
);
pub fn atg_div_tensor_mode_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
rounding_mode_ptr: *const u8,
rounding_mode_len: c_int,
);
pub fn atg_divide(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_divide_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_divide_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_divide_out_mode(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
rounding_mode_ptr: *const u8,
rounding_mode_len: c_int,
);
pub fn atg_divide_scalar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_divide_scalar_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_divide_scalar_mode(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
rounding_mode_ptr: *const u8,
rounding_mode_len: c_int,
);
pub fn atg_divide_scalar_mode_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
rounding_mode_ptr: *const u8,
rounding_mode_len: c_int,
);
pub fn atg_divide_tensor_mode(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
rounding_mode_ptr: *const u8,
rounding_mode_len: c_int,
);
pub fn atg_divide_tensor_mode_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
rounding_mode_ptr: *const u8,
rounding_mode_len: c_int,
);
pub fn atg_dot(out__: *mut *mut C_tensor, self_: *mut C_tensor, tensor_: *mut C_tensor);
pub fn atg_dot_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
tensor_: *mut C_tensor,
);
pub fn atg_dropout(out__: *mut *mut C_tensor, input_: *mut C_tensor, p_: f64, train_: c_int);
pub fn atg_dropout_(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: f64, train_: c_int);
pub fn atg_dsplit(self_: *mut C_tensor, sections_: i64) -> *mut *mut C_tensor;
pub fn atg_dsplit_array(
self_: *mut C_tensor,
indices_data: *const i64,
indices_len: c_int,
) -> *mut *mut C_tensor;
pub fn atg_dstack(
out__: *mut *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_dstack_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_einsum(
out__: *mut *mut C_tensor,
equation_ptr: *const u8,
equation_len: c_int,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
path_data: *const i64,
path_len: c_int,
);
pub fn atg_elu(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_elu_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_elu_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
alpha_: *mut C_scalar,
scale_: *mut C_scalar,
input_scale_: *mut C_scalar,
is_result_: c_int,
self_or_result_: *mut C_tensor,
);
pub fn atg_elu_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
alpha_: *mut C_scalar,
scale_: *mut C_scalar,
input_scale_: *mut C_scalar,
is_result_: c_int,
self_or_result_: *mut C_tensor,
);
pub fn atg_elu_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_embedding(
out__: *mut *mut C_tensor,
weight_: *mut C_tensor,
indices_: *mut C_tensor,
padding_idx_: i64,
scale_grad_by_freq_: c_int,
sparse_: c_int,
);
pub fn atg_embedding_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
indices_: *mut C_tensor,
num_weights_: i64,
padding_idx_: i64,
scale_grad_by_freq_: c_int,
sparse_: c_int,
);
pub fn atg_embedding_bag(
out__: *mut *mut C_tensor,
weight_: *mut C_tensor,
indices_: *mut C_tensor,
offsets_: *mut C_tensor,
scale_grad_by_freq_: c_int,
mode_: i64,
sparse_: c_int,
per_sample_weights_: *mut C_tensor,
include_last_offset_: c_int,
);
pub fn atg_embedding_bag_padding_idx(
out__: *mut *mut C_tensor,
weight_: *mut C_tensor,
indices_: *mut C_tensor,
offsets_: *mut C_tensor,
scale_grad_by_freq_: c_int,
mode_: i64,
sparse_: c_int,
per_sample_weights_: *mut C_tensor,
include_last_offset_: c_int,
padding_idx_v: i64,
padding_idx_null: i8,
);
pub fn atg_embedding_dense_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
indices_: *mut C_tensor,
num_weights_: i64,
padding_idx_: i64,
scale_grad_by_freq_: c_int,
);
pub fn atg_embedding_dense_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
indices_: *mut C_tensor,
num_weights_: i64,
padding_idx_: i64,
scale_grad_by_freq_: c_int,
);
pub fn atg_embedding_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
weight_: *mut C_tensor,
indices_: *mut C_tensor,
padding_idx_: i64,
scale_grad_by_freq_: c_int,
sparse_: c_int,
);
pub fn atg_embedding_renorm(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
indices_: *mut C_tensor,
max_norm_: f64,
norm_type_: f64,
);
pub fn atg_embedding_renorm_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
indices_: *mut C_tensor,
max_norm_: f64,
norm_type_: f64,
);
pub fn atg_embedding_renorm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
indices_: *mut C_tensor,
max_norm_: f64,
norm_type_: f64,
);
pub fn atg_embedding_sparse_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
indices_: *mut C_tensor,
num_weights_: i64,
padding_idx_: i64,
scale_grad_by_freq_: c_int,
);
pub fn atg_empty(
out__: *mut *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_empty_like(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_empty_like_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_empty_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_empty_permuted(
out__: *mut *mut C_tensor,
size_data: *const i64,
size_len: c_int,
physical_layout_data: *const i64,
physical_layout_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_empty_permuted_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
physical_layout_data: *const i64,
physical_layout_len: c_int,
);
pub fn atg_empty_quantized(
out__: *mut *mut C_tensor,
size_data: *const i64,
size_len: c_int,
qtensor_: *mut C_tensor,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_empty_quantized_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
qtensor_: *mut C_tensor,
);
pub fn atg_empty_strided(
out__: *mut *mut C_tensor,
size_data: *const i64,
size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_empty_strided_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
);
pub fn atg_eq(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_eq_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_eq_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_eq_tensor(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_eq_tensor_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_eq_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_equal(self_: *mut C_tensor, other_: *mut C_tensor) -> c_int;
pub fn atg_erf(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_erf_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_erf_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_erfc(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_erfc_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_erfc_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_erfinv(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_erfinv_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_erfinv_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_exp(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_exp2(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_exp2_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_exp2_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_exp_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_exp_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_expand(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
implicit_: c_int,
);
pub fn atg_expand_as(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_expand_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
implicit_: c_int,
);
pub fn atg_expand_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
implicit_: c_int,
);
pub fn atg_expm1(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_expm1_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_expm1_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_exponential(out__: *mut *mut C_tensor, self_: *mut C_tensor, lambd_: f64);
pub fn atg_exponential_(out__: *mut *mut C_tensor, self_: *mut C_tensor, lambd_: f64);
pub fn atg_exponential_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
lambd_: f64,
);
pub fn atg_eye(out__: *mut *mut C_tensor, n_: i64, options_kind: c_int, options_device: c_int);
pub fn atg_eye_m(
out__: *mut *mut C_tensor,
n_: i64,
m_: i64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_eye_m_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, n_: i64, m_: i64);
pub fn atg_eye_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, n_: i64);
pub fn atg_fake_quantize_per_channel_affine(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
axis_: i64,
quant_min_: i64,
quant_max_: i64,
);
pub fn atg_fake_quantize_per_channel_affine_cachemask(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
axis_: i64,
quant_min_: i64,
quant_max_: i64,
);
pub fn atg_fake_quantize_per_channel_affine_cachemask_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
mask_: *mut C_tensor,
);
pub fn atg_fake_quantize_per_channel_affine_cachemask_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
self_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
axis_: i64,
quant_min_: i64,
quant_max_: i64,
);
pub fn atg_fake_quantize_per_tensor_affine(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
scale_: f64,
zero_point_: i64,
quant_min_: i64,
quant_max_: i64,
);
pub fn atg_fake_quantize_per_tensor_affine_cachemask(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
scale_: f64,
zero_point_: i64,
quant_min_: i64,
quant_max_: i64,
);
pub fn atg_fake_quantize_per_tensor_affine_cachemask_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
mask_: *mut C_tensor,
);
pub fn atg_fake_quantize_per_tensor_affine_cachemask_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
self_: *mut C_tensor,
scale_: f64,
zero_point_: i64,
quant_min_: i64,
quant_max_: i64,
);
pub fn atg_fake_quantize_per_tensor_affine_tensor_qparams(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
quant_min_: i64,
quant_max_: i64,
);
pub fn atg_fbgemm_linear_fp16_weight(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
packed_weight_: *mut C_tensor,
bias_: *mut C_tensor,
);
pub fn atg_fbgemm_linear_fp16_weight_fp32_activation(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
packed_weight_: *mut C_tensor,
bias_: *mut C_tensor,
);
pub fn atg_fbgemm_linear_int8_weight(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
packed_: *mut C_tensor,
col_offsets_: *mut C_tensor,
weight_scale_: *mut C_scalar,
weight_zero_point_: *mut C_scalar,
bias_: *mut C_tensor,
);
pub fn atg_fbgemm_linear_int8_weight_fp32_activation(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
packed_: *mut C_tensor,
col_offsets_: *mut C_tensor,
weight_scale_: *mut C_scalar,
weight_zero_point_: *mut C_scalar,
bias_: *mut C_tensor,
);
pub fn atg_fbgemm_pack_gemm_matrix_fp16(out__: *mut *mut C_tensor, input_: *mut C_tensor);
pub fn atg_fbgemm_pack_quantized_matrix(out__: *mut *mut C_tensor, input_: *mut C_tensor);
pub fn atg_fbgemm_pack_quantized_matrix_kn(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
K_: i64,
n_: i64,
);
pub fn atg_feature_alpha_dropout(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
p_: f64,
train_: c_int,
);
pub fn atg_feature_alpha_dropout_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
p_: f64,
train_: c_int,
);
pub fn atg_feature_dropout(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
p_: f64,
train_: c_int,
);
pub fn atg_feature_dropout_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
p_: f64,
train_: c_int,
);
pub fn atg_fft_fft(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
n_v: i64,
n_null: i8,
dim_: i64,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_fft2(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_fft2_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_fft_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
n_v: i64,
n_null: i8,
dim_: i64,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_fftfreq(
out__: *mut *mut C_tensor,
n_: i64,
d_: f64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_fft_fftfreq_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, n_: i64, d_: f64);
pub fn atg_fft_fftn(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_fftn_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_fftshift(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
);
pub fn atg_fft_hfft(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
n_v: i64,
n_null: i8,
dim_: i64,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_hfft2(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_hfft2_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_hfft_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
n_v: i64,
n_null: i8,
dim_: i64,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_hfftn(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_hfftn_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_ifft(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
n_v: i64,
n_null: i8,
dim_: i64,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_ifft2(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_ifft2_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_ifft_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
n_v: i64,
n_null: i8,
dim_: i64,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_ifftn(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_ifftn_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_ifftshift(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
);
pub fn atg_fft_ihfft(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
n_v: i64,
n_null: i8,
dim_: i64,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_ihfft2(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_ihfft2_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_ihfft_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
n_v: i64,
n_null: i8,
dim_: i64,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_ihfftn(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_ihfftn_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_irfft(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
n_v: i64,
n_null: i8,
dim_: i64,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_irfft2(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_irfft2_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_irfft_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
n_v: i64,
n_null: i8,
dim_: i64,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_irfftn(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_irfftn_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_rfft(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
n_v: i64,
n_null: i8,
dim_: i64,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_rfft2(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_rfft2_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_rfft_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
n_v: i64,
n_null: i8,
dim_: i64,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_rfftfreq(
out__: *mut *mut C_tensor,
n_: i64,
d_: f64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_fft_rfftfreq_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, n_: i64, d_: f64);
pub fn atg_fft_rfftn(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fft_rfftn_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
s_data: *const i64,
s_len: c_int,
dim_data: *const i64,
dim_len: c_int,
norm_ptr: *const u8,
norm_len: c_int,
);
pub fn atg_fill(out__: *mut *mut C_tensor, self_: *mut C_tensor, value_: *mut C_scalar);
pub fn atg_fill_(out__: *mut *mut C_tensor, self_: *mut C_tensor, value_: *mut C_scalar);
pub fn atg_fill_diagonal_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
fill_value_: *mut C_scalar,
wrap_: c_int,
);
pub fn atg_fill_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
value_: *mut C_scalar,
);
pub fn atg_fill_tensor(out__: *mut *mut C_tensor, self_: *mut C_tensor, value_: *mut C_tensor);
pub fn atg_fill_tensor_(out__: *mut *mut C_tensor, self_: *mut C_tensor, value_: *mut C_tensor);
pub fn atg_fill_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
value_: *mut C_tensor,
);
pub fn atg_fix(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_fix_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_fix_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_flatten(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
start_dim_: i64,
end_dim_: i64,
);
pub fn atg_flatten_dense_tensors(
out__: *mut *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_flip(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dims_data: *const i64,
dims_len: c_int,
);
pub fn atg_flip_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dims_data: *const i64,
dims_len: c_int,
);
pub fn atg_fliplr(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_flipud(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_float_power(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
exponent_: *mut C_tensor,
);
pub fn atg_float_power_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
exponent_: *mut C_scalar,
);
pub fn atg_float_power_scalar(
out__: *mut *mut C_tensor,
self_scalar_: *mut C_scalar,
exponent_: *mut C_tensor,
);
pub fn atg_float_power_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_scalar_: *mut C_scalar,
exponent_: *mut C_tensor,
);
pub fn atg_float_power_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
exponent_: *mut C_tensor,
);
pub fn atg_float_power_tensor_scalar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
exponent_: *mut C_scalar,
);
pub fn atg_float_power_tensor_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
exponent_: *mut C_scalar,
);
pub fn atg_float_power_tensor_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
exponent_: *mut C_tensor,
);
pub fn atg_floor(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_floor_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_floor_divide(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_floor_divide_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_floor_divide_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_floor_divide_scalar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_floor_divide_scalar_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_floor_divide_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_floor_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_fmax(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_fmax_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_fmin(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_fmin_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_fmod(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_fmod_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_fmod_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_fmod_tensor(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_fmod_tensor_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_fmod_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_frac(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_frac_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_frac_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_fractional_max_pool2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
output_size_data: *const i64,
output_size_len: c_int,
random_samples_: *mut C_tensor,
);
pub fn atg_fractional_max_pool2d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
output_size_data: *const i64,
output_size_len: c_int,
indices_: *mut C_tensor,
);
pub fn atg_fractional_max_pool2d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
output_size_data: *const i64,
output_size_len: c_int,
indices_: *mut C_tensor,
);
pub fn atg_fractional_max_pool2d_output(
out__: *mut *mut C_tensor,
output_: *mut C_tensor,
indices_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
output_size_data: *const i64,
output_size_len: c_int,
random_samples_: *mut C_tensor,
);
pub fn atg_fractional_max_pool3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
output_size_data: *const i64,
output_size_len: c_int,
random_samples_: *mut C_tensor,
);
pub fn atg_fractional_max_pool3d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
output_size_data: *const i64,
output_size_len: c_int,
indices_: *mut C_tensor,
);
pub fn atg_fractional_max_pool3d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
output_size_data: *const i64,
output_size_len: c_int,
indices_: *mut C_tensor,
);
pub fn atg_fractional_max_pool3d_output(
out__: *mut *mut C_tensor,
output_: *mut C_tensor,
indices_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
output_size_data: *const i64,
output_size_len: c_int,
random_samples_: *mut C_tensor,
);
pub fn atg_frexp(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_frexp_tensor_out(
out__: *mut *mut C_tensor,
mantissa_: *mut C_tensor,
exponent_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_frobenius_norm(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_frobenius_norm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_from_file(
out__: *mut *mut C_tensor,
filename_ptr: *const u8,
filename_len: c_int,
shared_: c_int,
size_v: i64,
size_null: i8,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_from_file_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
filename_ptr: *const u8,
filename_len: c_int,
shared_: c_int,
size_v: i64,
size_null: i8,
);
pub fn atg_full(
out__: *mut *mut C_tensor,
size_data: *const i64,
size_len: c_int,
fill_value_: *mut C_scalar,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_full_like(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
fill_value_: *mut C_scalar,
);
pub fn atg_full_like_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
fill_value_: *mut C_scalar,
);
pub fn atg_full_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
fill_value_: *mut C_scalar,
);
pub fn atg_fused_moving_avg_obs_fake_quant(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
observer_on_: *mut C_tensor,
fake_quant_on_: *mut C_tensor,
running_min_: *mut C_tensor,
running_max_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
averaging_const_: f64,
quant_min_: i64,
quant_max_: i64,
ch_axis_: i64,
per_row_fake_quant_: c_int,
symmetric_quant_: c_int,
);
pub fn atg_gather(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
sparse_grad_: c_int,
);
pub fn atg_gather_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
sparse_grad_: c_int,
);
pub fn atg_gather_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
sparse_grad_: c_int,
);
pub fn atg_gcd(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_gcd_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_gcd_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_ge(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_ge_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_ge_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_ge_tensor(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_ge_tensor_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_ge_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_gelu(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
approximate_ptr: *const u8,
approximate_len: c_int,
);
pub fn atg_gelu_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
approximate_ptr: *const u8,
approximate_len: c_int,
);
pub fn atg_gelu_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
approximate_ptr: *const u8,
approximate_len: c_int,
);
pub fn atg_gelu_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
approximate_ptr: *const u8,
approximate_len: c_int,
);
pub fn atg_gelu_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
approximate_ptr: *const u8,
approximate_len: c_int,
);
pub fn atg_geometric(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: f64);
pub fn atg_geometric_(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: f64);
pub fn atg_geometric_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
p_: f64,
);
pub fn atg_geqrf(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_geqrf_a(
out__: *mut *mut C_tensor,
a_: *mut C_tensor,
tau_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_ger(out__: *mut *mut C_tensor, self_: *mut C_tensor, vec2_: *mut C_tensor);
pub fn atg_ger_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
vec2_: *mut C_tensor,
);
pub fn atg_glu(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
pub fn atg_glu_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
);
pub fn atg_glu_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
);
pub fn atg_glu_backward_jvp(
out__: *mut *mut C_tensor,
grad_x_: *mut C_tensor,
grad_glu_: *mut C_tensor,
x_: *mut C_tensor,
dgrad_glu_: *mut C_tensor,
dx_: *mut C_tensor,
dim_: i64,
);
pub fn atg_glu_backward_jvp_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_x_: *mut C_tensor,
grad_glu_: *mut C_tensor,
x_: *mut C_tensor,
dgrad_glu_: *mut C_tensor,
dx_: *mut C_tensor,
dim_: i64,
);
pub fn atg_glu_jvp(
out__: *mut *mut C_tensor,
glu_: *mut C_tensor,
x_: *mut C_tensor,
dx_: *mut C_tensor,
dim_: i64,
);
pub fn atg_glu_jvp_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
glu_: *mut C_tensor,
x_: *mut C_tensor,
dx_: *mut C_tensor,
dim_: i64,
);
pub fn atg_glu_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
);
pub fn atg_grad(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_greater(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_greater_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_greater_equal(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_greater_equal_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_greater_equal_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_greater_equal_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_greater_equal_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_greater_equal_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_greater_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_greater_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_greater_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_greater_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_grid_sampler(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
grid_: *mut C_tensor,
interpolation_mode_: i64,
padding_mode_: i64,
align_corners_: c_int,
);
pub fn atg_grid_sampler_2d(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
grid_: *mut C_tensor,
interpolation_mode_: i64,
padding_mode_: i64,
align_corners_: c_int,
);
pub fn atg_grid_sampler_2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
input_: *mut C_tensor,
grid_: *mut C_tensor,
interpolation_mode_: i64,
padding_mode_: i64,
align_corners_: c_int,
);
pub fn atg_grid_sampler_3d(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
grid_: *mut C_tensor,
interpolation_mode_: i64,
padding_mode_: i64,
align_corners_: c_int,
);
pub fn atg_grid_sampler_3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
input_: *mut C_tensor,
grid_: *mut C_tensor,
interpolation_mode_: i64,
padding_mode_: i64,
align_corners_: c_int,
);
pub fn atg_group_norm(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
num_groups_: i64,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
eps_: f64,
cudnn_enabled_: c_int,
);
pub fn atg_gru(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
hx_: *mut C_tensor,
params_data: *const *mut C_tensor,
params_len: c_int,
has_biases_: c_int,
num_layers_: i64,
dropout_: f64,
train_: c_int,
bidirectional_: c_int,
batch_first_: c_int,
);
pub fn atg_gru_cell(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
hx_: *mut C_tensor,
w_ih_: *mut C_tensor,
w_hh_: *mut C_tensor,
b_ih_: *mut C_tensor,
b_hh_: *mut C_tensor,
);
pub fn atg_gru_data(
out__: *mut *mut C_tensor,
data_: *mut C_tensor,
batch_sizes_: *mut C_tensor,
hx_: *mut C_tensor,
params_data: *const *mut C_tensor,
params_len: c_int,
has_biases_: c_int,
num_layers_: i64,
dropout_: f64,
train_: c_int,
bidirectional_: c_int,
);
pub fn atg_gt(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_gt_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_gt_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_gt_tensor(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_gt_tensor_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_gt_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_hamming_window(
out__: *mut *mut C_tensor,
window_length_: i64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_hamming_window_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
window_length_: i64,
);
pub fn atg_hamming_window_periodic(
out__: *mut *mut C_tensor,
window_length_: i64,
periodic_: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_hamming_window_periodic_alpha(
out__: *mut *mut C_tensor,
window_length_: i64,
periodic_: c_int,
alpha_: f64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_hamming_window_periodic_alpha_beta(
out__: *mut *mut C_tensor,
window_length_: i64,
periodic_: c_int,
alpha_: f64,
beta_: f64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_hamming_window_periodic_alpha_beta_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
window_length_: i64,
periodic_: c_int,
alpha_: f64,
beta_: f64,
);
pub fn atg_hamming_window_periodic_alpha_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
window_length_: i64,
periodic_: c_int,
alpha_: f64,
);
pub fn atg_hamming_window_periodic_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
window_length_: i64,
periodic_: c_int,
);
pub fn atg_hann_window(
out__: *mut *mut C_tensor,
window_length_: i64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_hann_window_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, window_length_: i64);
pub fn atg_hann_window_periodic(
out__: *mut *mut C_tensor,
window_length_: i64,
periodic_: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_hann_window_periodic_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
window_length_: i64,
periodic_: c_int,
);
pub fn atg_hardshrink(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_hardshrink_backward(
out__: *mut *mut C_tensor,
grad_out_: *mut C_tensor,
self_: *mut C_tensor,
lambd_: *mut C_scalar,
);
pub fn atg_hardshrink_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_out_: *mut C_tensor,
self_: *mut C_tensor,
lambd_: *mut C_scalar,
);
pub fn atg_hardshrink_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_hardsigmoid(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_hardsigmoid_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_hardsigmoid_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_hardsigmoid_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_hardsigmoid_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_hardswish(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_hardswish_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_hardswish_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_hardswish_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_hardswish_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_hardtanh(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_hardtanh_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_hardtanh_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
min_val_: *mut C_scalar,
max_val_: *mut C_scalar,
);
pub fn atg_hardtanh_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
min_val_: *mut C_scalar,
max_val_: *mut C_scalar,
);
pub fn atg_hardtanh_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_heaviside(out__: *mut *mut C_tensor, self_: *mut C_tensor, values_: *mut C_tensor);
pub fn atg_heaviside_(out__: *mut *mut C_tensor, self_: *mut C_tensor, values_: *mut C_tensor);
pub fn atg_heaviside_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
values_: *mut C_tensor,
);
pub fn atg_hinge_embedding_loss(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
margin_: f64,
reduction_: i64,
);
pub fn atg_histc(out__: *mut *mut C_tensor, self_: *mut C_tensor, bins_: i64);
pub fn atg_histc_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
bins_: i64,
);
pub fn atg_histogram(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
bins_: *mut C_tensor,
weight_: *mut C_tensor,
density_: c_int,
);
pub fn atg_histogram_bin_ct(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
bins_: i64,
range_data: *const f64,
range_len: c_int,
weight_: *mut C_tensor,
density_: c_int,
);
pub fn atg_histogram_bin_ct_out(
out__: *mut *mut C_tensor,
hist_: *mut C_tensor,
bin_edges_: *mut C_tensor,
self_: *mut C_tensor,
bins_: i64,
range_data: *const f64,
range_len: c_int,
weight_: *mut C_tensor,
density_: c_int,
);
pub fn atg_histogram_bins_tensor_out(
out__: *mut *mut C_tensor,
hist_: *mut C_tensor,
bin_edges_: *mut C_tensor,
self_: *mut C_tensor,
bins_: *mut C_tensor,
weight_: *mut C_tensor,
density_: c_int,
);
pub fn atg_hsplit(self_: *mut C_tensor, sections_: i64) -> *mut *mut C_tensor;
pub fn atg_hsplit_array(
self_: *mut C_tensor,
indices_data: *const i64,
indices_len: c_int,
) -> *mut *mut C_tensor;
pub fn atg_hspmm(out__: *mut *mut C_tensor, mat1_: *mut C_tensor, mat2_: *mut C_tensor);
pub fn atg_hspmm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
mat1_: *mut C_tensor,
mat2_: *mut C_tensor,
);
pub fn atg_hstack(
out__: *mut *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_hstack_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_huber_loss(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
delta_: f64,
);
pub fn atg_huber_loss_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
delta_: f64,
);
pub fn atg_huber_loss_backward_out(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
delta_: f64,
);
pub fn atg_huber_loss_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
delta_: f64,
);
pub fn atg_hypot(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_hypot_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_hypot_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_i0(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_i0_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_i0_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_igamma(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_igamma_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_igamma_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_igammac(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_igammac_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_igammac_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_im2col(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
);
pub fn atg_im2col_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
);
pub fn atg_imag(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_index(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
indices_data: *const *mut C_tensor,
indices_len: c_int,
);
pub fn atg_index_add(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
source_: *mut C_tensor,
);
pub fn atg_index_add_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
source_: *mut C_tensor,
);
pub fn atg_index_add_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
source_: *mut C_tensor,
);
pub fn atg_index_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
source_: *mut C_tensor,
);
pub fn atg_index_copy_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
source_: *mut C_tensor,
);
pub fn atg_index_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
source_: *mut C_tensor,
);
pub fn atg_index_fill(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
value_: *mut C_scalar,
);
pub fn atg_index_fill_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
value_: *mut C_scalar,
);
pub fn atg_index_fill_int_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
value_: *mut C_scalar,
);
pub fn atg_index_fill_int_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
value_: *mut C_tensor,
);
pub fn atg_index_fill_int_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
value_: *mut C_tensor,
);
pub fn atg_index_fill_int_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
value_: *mut C_tensor,
);
pub fn atg_index_put(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
indices_data: *const *mut C_tensor,
indices_len: c_int,
values_: *mut C_tensor,
accumulate_: c_int,
);
pub fn atg_index_put_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
indices_data: *const *mut C_tensor,
indices_len: c_int,
values_: *mut C_tensor,
accumulate_: c_int,
);
pub fn atg_index_put_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
indices_data: *const *mut C_tensor,
indices_len: c_int,
values_: *mut C_tensor,
accumulate_: c_int,
);
pub fn atg_index_reduce(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
source_: *mut C_tensor,
reduce_ptr: *const u8,
reduce_len: c_int,
include_self_: c_int,
);
pub fn atg_index_reduce_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
source_: *mut C_tensor,
reduce_ptr: *const u8,
reduce_len: c_int,
include_self_: c_int,
);
pub fn atg_index_reduce_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
source_: *mut C_tensor,
reduce_ptr: *const u8,
reduce_len: c_int,
include_self_: c_int,
);
pub fn atg_index_select(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
);
pub fn atg_index_select_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
self_sizes_data: *const i64,
self_sizes_len: c_int,
dim_: i64,
index_: *mut C_tensor,
);
pub fn atg_index_select_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
);
pub fn atg_index_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
indices_data: *const *mut C_tensor,
indices_len: c_int,
);
pub fn atg_indices(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_indices_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_indices_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_infinitely_differentiable_gelu_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_inner(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_inner_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_instance_norm(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
use_input_stats_: c_int,
momentum_: f64,
eps_: f64,
cudnn_enabled_: c_int,
);
pub fn atg_int_repr(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_int_repr_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_inverse(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_inverse_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_is_coalesced(self_: *mut C_tensor) -> c_int;
pub fn atg_is_complex(self_: *mut C_tensor) -> c_int;
pub fn atg_is_conj(self_: *mut C_tensor) -> c_int;
pub fn atg_is_distributed(self_: *mut C_tensor) -> c_int;
pub fn atg_is_floating_point(self_: *mut C_tensor) -> c_int;
pub fn atg_is_inference(self_: *mut C_tensor) -> c_int;
pub fn atg_is_leaf(self_: *mut C_tensor) -> c_int;
pub fn atg_is_neg(self_: *mut C_tensor) -> c_int;
pub fn atg_is_nonzero(self_: *mut C_tensor) -> c_int;
pub fn atg_is_pinned(self_: *mut C_tensor, device_: c_int) -> c_int;
pub fn atg_is_same_size(self_: *mut C_tensor, other_: *mut C_tensor) -> c_int;
pub fn atg_is_set_to(self_: *mut C_tensor, tensor_: *mut C_tensor) -> c_int;
pub fn atg_is_signed(self_: *mut C_tensor) -> c_int;
pub fn atg_is_vulkan_available() -> c_int;
pub fn atg_isclose(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
rtol_: f64,
atol_: f64,
equal_nan_: c_int,
);
pub fn atg_isfinite(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_isin(
out__: *mut *mut C_tensor,
elements_: *mut C_tensor,
test_elements_: *mut C_tensor,
assume_unique_: c_int,
invert_: c_int,
);
pub fn atg_isin_scalar_tensor(
out__: *mut *mut C_tensor,
element_: *mut C_scalar,
test_elements_: *mut C_tensor,
assume_unique_: c_int,
invert_: c_int,
);
pub fn atg_isin_scalar_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
element_: *mut C_scalar,
test_elements_: *mut C_tensor,
assume_unique_: c_int,
invert_: c_int,
);
pub fn atg_isin_tensor_scalar(
out__: *mut *mut C_tensor,
elements_: *mut C_tensor,
test_element_: *mut C_scalar,
assume_unique_: c_int,
invert_: c_int,
);
pub fn atg_isin_tensor_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
elements_: *mut C_tensor,
test_element_: *mut C_scalar,
assume_unique_: c_int,
invert_: c_int,
);
pub fn atg_isin_tensor_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
elements_: *mut C_tensor,
test_elements_: *mut C_tensor,
assume_unique_: c_int,
invert_: c_int,
);
pub fn atg_isinf(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_isinf_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_isnan(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_isnan_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_isneginf(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_isneginf_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_isposinf(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_isposinf_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_isreal(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_istft(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
n_fft_: i64,
hop_length_v: i64,
hop_length_null: i8,
win_length_v: i64,
win_length_null: i8,
window_: *mut C_tensor,
center_: c_int,
normalized_: c_int,
onesided_: c_int,
length_v: i64,
length_null: i8,
return_complex_: c_int,
);
pub fn atg_kaiser_window(
out__: *mut *mut C_tensor,
window_length_: i64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_kaiser_window_beta(
out__: *mut *mut C_tensor,
window_length_: i64,
periodic_: c_int,
beta_: f64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_kaiser_window_beta_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
window_length_: i64,
periodic_: c_int,
beta_: f64,
);
pub fn atg_kaiser_window_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
window_length_: i64,
);
pub fn atg_kaiser_window_periodic(
out__: *mut *mut C_tensor,
window_length_: i64,
periodic_: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_kaiser_window_periodic_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
window_length_: i64,
periodic_: c_int,
);
pub fn atg_kl_div(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
log_target_: c_int,
);
pub fn atg_kron(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_kron_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_kthvalue(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
k_: i64,
dim_: i64,
keepdim_: c_int,
);
pub fn atg_kthvalue_values(
out__: *mut *mut C_tensor,
values_: *mut C_tensor,
indices_: *mut C_tensor,
self_: *mut C_tensor,
k_: i64,
dim_: i64,
keepdim_: c_int,
);
pub fn atg_l1_loss(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_layer_norm(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
normalized_shape_data: *const i64,
normalized_shape_len: c_int,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
eps_: f64,
cudnn_enable_: c_int,
);
pub fn atg_lcm(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_lcm_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_lcm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_ldexp(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_ldexp_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_ldexp_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_le(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_le_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_le_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_le_tensor(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_le_tensor_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_le_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_leaky_relu(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_leaky_relu_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_leaky_relu_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
negative_slope_: *mut C_scalar,
self_is_result_: c_int,
);
pub fn atg_leaky_relu_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
negative_slope_: *mut C_scalar,
self_is_result_: c_int,
);
pub fn atg_leaky_relu_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_lerp(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
end_: *mut C_tensor,
weight_: *mut C_scalar,
);
pub fn atg_lerp_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
end_: *mut C_tensor,
weight_: *mut C_scalar,
);
pub fn atg_lerp_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
end_: *mut C_tensor,
weight_: *mut C_scalar,
);
pub fn atg_lerp_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
end_: *mut C_tensor,
weight_: *mut C_tensor,
);
pub fn atg_lerp_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
end_: *mut C_tensor,
weight_: *mut C_tensor,
);
pub fn atg_lerp_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
end_: *mut C_tensor,
weight_: *mut C_tensor,
);
pub fn atg_less(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_less_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_less_equal(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_less_equal_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_less_equal_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_less_equal_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_less_equal_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_less_equal_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_less_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_less_tensor(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_less_tensor_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_less_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_lgamma(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_lgamma_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_lgamma_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_lift(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_lift_fresh(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_lift_fresh_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_lift_fresh_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_lift_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_linalg_cholesky(out__: *mut *mut C_tensor, self_: *mut C_tensor, upper_: c_int);
pub fn atg_linalg_cholesky_ex(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
upper_: c_int,
check_errors_: c_int,
);
pub fn atg_linalg_cholesky_ex_l(
out__: *mut *mut C_tensor,
L_: *mut C_tensor,
info_: *mut C_tensor,
self_: *mut C_tensor,
upper_: c_int,
check_errors_: c_int,
);
pub fn atg_linalg_cholesky_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
upper_: c_int,
);
pub fn atg_linalg_cond(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: *mut C_scalar);
pub fn atg_linalg_cond_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
p_: *mut C_scalar,
);
pub fn atg_linalg_cond_p_str(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
p_ptr: *const u8,
p_len: c_int,
);
pub fn atg_linalg_cond_p_str_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
p_ptr: *const u8,
p_len: c_int,
);
pub fn atg_linalg_cross(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
dim_: i64,
);
pub fn atg_linalg_cross_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
dim_: i64,
);
pub fn atg_linalg_det(out__: *mut *mut C_tensor, A_: *mut C_tensor);
pub fn atg_linalg_det_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, A_: *mut C_tensor);
pub fn atg_linalg_diagonal(
out__: *mut *mut C_tensor,
A_: *mut C_tensor,
offset_: i64,
dim1_: i64,
dim2_: i64,
);
pub fn atg_linalg_eig(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_linalg_eig_out(
out__: *mut *mut C_tensor,
eigenvalues_: *mut C_tensor,
eigenvectors_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_linalg_eigh(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
UPLO_ptr: *const u8,
UPLO_len: c_int,
);
pub fn atg_linalg_eigh_eigvals(
out__: *mut *mut C_tensor,
eigvals_: *mut C_tensor,
eigvecs_: *mut C_tensor,
self_: *mut C_tensor,
UPLO_ptr: *const u8,
UPLO_len: c_int,
);
pub fn atg_linalg_eigvals(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_linalg_eigvals_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_linalg_eigvalsh(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
UPLO_ptr: *const u8,
UPLO_len: c_int,
);
pub fn atg_linalg_eigvalsh_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
UPLO_ptr: *const u8,
UPLO_len: c_int,
);
pub fn atg_linalg_householder_product(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
tau_: *mut C_tensor,
);
pub fn atg_linalg_householder_product_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
input_: *mut C_tensor,
tau_: *mut C_tensor,
);
pub fn atg_linalg_inv(out__: *mut *mut C_tensor, A_: *mut C_tensor);
pub fn atg_linalg_inv_ex(out__: *mut *mut C_tensor, A_: *mut C_tensor, check_errors_: c_int);
pub fn atg_linalg_inv_ex_inverse(
out__: *mut *mut C_tensor,
inverse_: *mut C_tensor,
info_: *mut C_tensor,
A_: *mut C_tensor,
check_errors_: c_int,
);
pub fn atg_linalg_inv_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, A_: *mut C_tensor);
pub fn atg_linalg_ldl_factor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
hermitian_: c_int,
);
pub fn atg_linalg_ldl_factor_ex(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
hermitian_: c_int,
check_errors_: c_int,
);
pub fn atg_linalg_ldl_factor_ex_out(
out__: *mut *mut C_tensor,
LD_: *mut C_tensor,
pivots_: *mut C_tensor,
info_: *mut C_tensor,
self_: *mut C_tensor,
hermitian_: c_int,
check_errors_: c_int,
);
pub fn atg_linalg_ldl_factor_out(
out__: *mut *mut C_tensor,
LD_: *mut C_tensor,
pivots_: *mut C_tensor,
self_: *mut C_tensor,
hermitian_: c_int,
);
pub fn atg_linalg_ldl_solve(
out__: *mut *mut C_tensor,
LD_: *mut C_tensor,
pivots_: *mut C_tensor,
B_: *mut C_tensor,
hermitian_: c_int,
);
pub fn atg_linalg_ldl_solve_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
LD_: *mut C_tensor,
pivots_: *mut C_tensor,
B_: *mut C_tensor,
hermitian_: c_int,
);
pub fn atg_linalg_lstsq(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
b_: *mut C_tensor,
rcond_v: f64,
rcond_null: i8,
driver_ptr: *const u8,
driver_len: c_int,
);
pub fn atg_linalg_lstsq_out(
out__: *mut *mut C_tensor,
solution_: *mut C_tensor,
residuals_: *mut C_tensor,
rank_: *mut C_tensor,
singular_values_: *mut C_tensor,
self_: *mut C_tensor,
b_: *mut C_tensor,
rcond_v: f64,
rcond_null: i8,
driver_ptr: *const u8,
driver_len: c_int,
);
pub fn atg_linalg_lu(out__: *mut *mut C_tensor, A_: *mut C_tensor, pivot_: c_int);
pub fn atg_linalg_lu_factor(out__: *mut *mut C_tensor, A_: *mut C_tensor, pivot_: c_int);
pub fn atg_linalg_lu_factor_ex(
out__: *mut *mut C_tensor,
A_: *mut C_tensor,
pivot_: c_int,
check_errors_: c_int,
);
pub fn atg_linalg_lu_factor_ex_out(
out__: *mut *mut C_tensor,
LU_: *mut C_tensor,
pivots_: *mut C_tensor,
info_: *mut C_tensor,
A_: *mut C_tensor,
pivot_: c_int,
check_errors_: c_int,
);
pub fn atg_linalg_lu_factor_out(
out__: *mut *mut C_tensor,
LU_: *mut C_tensor,
pivots_: *mut C_tensor,
A_: *mut C_tensor,
pivot_: c_int,
);
pub fn atg_linalg_lu_out(
out__: *mut *mut C_tensor,
P_: *mut C_tensor,
L_: *mut C_tensor,
U_: *mut C_tensor,
A_: *mut C_tensor,
pivot_: c_int,
);
pub fn atg_linalg_lu_solve(
out__: *mut *mut C_tensor,
LU_: *mut C_tensor,
pivots_: *mut C_tensor,
B_: *mut C_tensor,
left_: c_int,
adjoint_: c_int,
);
pub fn atg_linalg_lu_solve_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
LU_: *mut C_tensor,
pivots_: *mut C_tensor,
B_: *mut C_tensor,
left_: c_int,
adjoint_: c_int,
);
pub fn atg_linalg_matmul(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_linalg_matmul_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_linalg_matrix_exp(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_linalg_matrix_exp_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_linalg_matrix_power(out__: *mut *mut C_tensor, self_: *mut C_tensor, n_: i64);
pub fn atg_linalg_matrix_power_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
n_: i64,
);
pub fn atg_linalg_matrix_rank(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
tol_: f64,
hermitian_: c_int,
);
pub fn atg_linalg_matrix_rank_atol_rtol_float(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
atol_v: f64,
atol_null: i8,
rtol_v: f64,
rtol_null: i8,
hermitian_: c_int,
);
pub fn atg_linalg_matrix_rank_atol_rtol_float_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
atol_v: f64,
atol_null: i8,
rtol_v: f64,
rtol_null: i8,
hermitian_: c_int,
);
pub fn atg_linalg_matrix_rank_atol_rtol_tensor(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
atol_: *mut C_tensor,
rtol_: *mut C_tensor,
hermitian_: c_int,
);
pub fn atg_linalg_matrix_rank_atol_rtol_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
input_: *mut C_tensor,
atol_: *mut C_tensor,
rtol_: *mut C_tensor,
hermitian_: c_int,
);
pub fn atg_linalg_matrix_rank_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
tol_: f64,
hermitian_: c_int,
);
pub fn atg_linalg_matrix_rank_out_tol_tensor(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
input_: *mut C_tensor,
tol_: *mut C_tensor,
hermitian_: c_int,
);
pub fn atg_linalg_matrix_rank_tol_tensor(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
tol_: *mut C_tensor,
hermitian_: c_int,
);
pub fn atg_linalg_multi_dot(
out__: *mut *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_linalg_multi_dot_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_linalg_norm(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
ord_: *mut C_scalar,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_linalg_norm_ord_str(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
ord_ptr: *const u8,
ord_len: c_int,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_linalg_norm_ord_str_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
ord_ptr: *const u8,
ord_len: c_int,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_linalg_norm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
ord_: *mut C_scalar,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_linalg_pinv(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
rcond_: f64,
hermitian_: c_int,
);
pub fn atg_linalg_pinv_atol_rtol_float(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
atol_v: f64,
atol_null: i8,
rtol_v: f64,
rtol_null: i8,
hermitian_: c_int,
);
pub fn atg_linalg_pinv_atol_rtol_float_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
atol_v: f64,
atol_null: i8,
rtol_v: f64,
rtol_null: i8,
hermitian_: c_int,
);
pub fn atg_linalg_pinv_atol_rtol_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
atol_: *mut C_tensor,
rtol_: *mut C_tensor,
hermitian_: c_int,
);
pub fn atg_linalg_pinv_atol_rtol_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
atol_: *mut C_tensor,
rtol_: *mut C_tensor,
hermitian_: c_int,
);
pub fn atg_linalg_pinv_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
rcond_: f64,
hermitian_: c_int,
);
pub fn atg_linalg_pinv_out_rcond_tensor(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
rcond_: *mut C_tensor,
hermitian_: c_int,
);
pub fn atg_linalg_pinv_rcond_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
rcond_: *mut C_tensor,
hermitian_: c_int,
);
pub fn atg_linalg_qr(
out__: *mut *mut C_tensor,
A_: *mut C_tensor,
mode_ptr: *const u8,
mode_len: c_int,
);
pub fn atg_linalg_qr_out(
out__: *mut *mut C_tensor,
Q_: *mut C_tensor,
R_: *mut C_tensor,
A_: *mut C_tensor,
mode_ptr: *const u8,
mode_len: c_int,
);
pub fn atg_linalg_slogdet(out__: *mut *mut C_tensor, A_: *mut C_tensor);
pub fn atg_linalg_slogdet_out(
out__: *mut *mut C_tensor,
sign_: *mut C_tensor,
logabsdet_: *mut C_tensor,
A_: *mut C_tensor,
);
pub fn atg_linalg_solve(
out__: *mut *mut C_tensor,
A_: *mut C_tensor,
B_: *mut C_tensor,
left_: c_int,
);
pub fn atg_linalg_solve_ex(
out__: *mut *mut C_tensor,
A_: *mut C_tensor,
B_: *mut C_tensor,
left_: c_int,
check_errors_: c_int,
);
pub fn atg_linalg_solve_ex_out(
out__: *mut *mut C_tensor,
result_: *mut C_tensor,
info_: *mut C_tensor,
A_: *mut C_tensor,
B_: *mut C_tensor,
left_: c_int,
check_errors_: c_int,
);
pub fn atg_linalg_solve_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
A_: *mut C_tensor,
B_: *mut C_tensor,
left_: c_int,
);
pub fn atg_linalg_solve_triangular(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
B_: *mut C_tensor,
upper_: c_int,
left_: c_int,
unitriangular_: c_int,
);
pub fn atg_linalg_solve_triangular_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
B_: *mut C_tensor,
upper_: c_int,
left_: c_int,
unitriangular_: c_int,
);
pub fn atg_linalg_svd(
out__: *mut *mut C_tensor,
A_: *mut C_tensor,
full_matrices_: c_int,
driver_ptr: *const u8,
driver_len: c_int,
);
pub fn atg_linalg_svd_u(
out__: *mut *mut C_tensor,
U_: *mut C_tensor,
S_: *mut C_tensor,
Vh_: *mut C_tensor,
A_: *mut C_tensor,
full_matrices_: c_int,
driver_ptr: *const u8,
driver_len: c_int,
);
pub fn atg_linalg_svdvals(
out__: *mut *mut C_tensor,
A_: *mut C_tensor,
driver_ptr: *const u8,
driver_len: c_int,
);
pub fn atg_linalg_svdvals_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
A_: *mut C_tensor,
driver_ptr: *const u8,
driver_len: c_int,
);
pub fn atg_linalg_tensorinv(out__: *mut *mut C_tensor, self_: *mut C_tensor, ind_: i64);
pub fn atg_linalg_tensorinv_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
ind_: i64,
);
pub fn atg_linalg_tensorsolve(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
dims_data: *const i64,
dims_len: c_int,
);
pub fn atg_linalg_tensorsolve_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
dims_data: *const i64,
dims_len: c_int,
);
pub fn atg_linalg_vander(out__: *mut *mut C_tensor, x_: *mut C_tensor, n_v: i64, n_null: i8);
pub fn atg_linalg_vecdot(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
y_: *mut C_tensor,
dim_: i64,
);
pub fn atg_linalg_vecdot_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
y_: *mut C_tensor,
dim_: i64,
);
pub fn atg_linear(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
);
pub fn atg_linear_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
);
pub fn atg_linspace(
out__: *mut *mut C_tensor,
start_: *mut C_scalar,
end_: *mut C_scalar,
steps_: i64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_linspace_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
start_: *mut C_scalar,
end_: *mut C_scalar,
steps_: i64,
);
pub fn atg_linspace_scalar_tensor(
out__: *mut *mut C_tensor,
start_: *mut C_scalar,
end_: *mut C_tensor,
steps_: i64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_linspace_scalar_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
start_: *mut C_scalar,
end_: *mut C_tensor,
steps_: i64,
);
pub fn atg_linspace_tensor_scalar(
out__: *mut *mut C_tensor,
start_: *mut C_tensor,
end_: *mut C_scalar,
steps_: i64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_linspace_tensor_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
start_: *mut C_tensor,
end_: *mut C_scalar,
steps_: i64,
);
pub fn atg_linspace_tensor_tensor(
out__: *mut *mut C_tensor,
start_: *mut C_tensor,
end_: *mut C_tensor,
steps_: i64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_linspace_tensor_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
start_: *mut C_tensor,
end_: *mut C_tensor,
steps_: i64,
);
pub fn atg_log(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_log10(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_log10_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_log10_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_log1p(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_log1p_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_log1p_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_log2(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_log2_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_log2_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_log_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_log_normal(out__: *mut *mut C_tensor, self_: *mut C_tensor, mean_: f64, std_: f64);
pub fn atg_log_normal_(out__: *mut *mut C_tensor, self_: *mut C_tensor, mean_: f64, std_: f64);
pub fn atg_log_normal_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mean_: f64,
std_: f64,
);
pub fn atg_log_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_log_sigmoid(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_log_sigmoid_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
buffer_: *mut C_tensor,
);
pub fn atg_log_sigmoid_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
buffer_: *mut C_tensor,
);
pub fn atg_log_sigmoid_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_log_softmax(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
dtype_: c_int,
);
pub fn atg_log_softmax_int_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
dtype_: c_int,
);
pub fn atg_logaddexp(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_logaddexp2(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_logaddexp2_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_logaddexp_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_logcumsumexp(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
pub fn atg_logcumsumexp_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
);
pub fn atg_logdet(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_logical_and(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_logical_and_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_logical_and_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_logical_not(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_logical_not_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_logical_not_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_logical_or(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_logical_or_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_logical_or_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_logical_xor(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_logical_xor_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_logical_xor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_logit(out__: *mut *mut C_tensor, self_: *mut C_tensor, eps_v: f64, eps_null: i8);
pub fn atg_logit_(out__: *mut *mut C_tensor, self_: *mut C_tensor, eps_v: f64, eps_null: i8);
pub fn atg_logit_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
eps_v: f64,
eps_null: i8,
);
pub fn atg_logit_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
eps_v: f64,
eps_null: i8,
);
pub fn atg_logit_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
eps_v: f64,
eps_null: i8,
);
pub fn atg_logspace(
out__: *mut *mut C_tensor,
start_: *mut C_scalar,
end_: *mut C_scalar,
steps_: i64,
base_: f64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_logspace_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
start_: *mut C_scalar,
end_: *mut C_scalar,
steps_: i64,
base_: f64,
);
pub fn atg_logspace_scalar_tensor(
out__: *mut *mut C_tensor,
start_: *mut C_scalar,
end_: *mut C_tensor,
steps_: i64,
base_: f64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_logspace_scalar_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
start_: *mut C_scalar,
end_: *mut C_tensor,
steps_: i64,
base_: f64,
);
pub fn atg_logspace_tensor_scalar(
out__: *mut *mut C_tensor,
start_: *mut C_tensor,
end_: *mut C_scalar,
steps_: i64,
base_: f64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_logspace_tensor_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
start_: *mut C_tensor,
end_: *mut C_scalar,
steps_: i64,
base_: f64,
);
pub fn atg_logspace_tensor_tensor(
out__: *mut *mut C_tensor,
start_: *mut C_tensor,
end_: *mut C_tensor,
steps_: i64,
base_: f64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_logspace_tensor_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
start_: *mut C_tensor,
end_: *mut C_tensor,
steps_: i64,
base_: f64,
);
pub fn atg_logsumexp(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_logsumexp_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_lstm(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
hx_data: *const *mut C_tensor,
hx_len: c_int,
params_data: *const *mut C_tensor,
params_len: c_int,
has_biases_: c_int,
num_layers_: i64,
dropout_: f64,
train_: c_int,
bidirectional_: c_int,
batch_first_: c_int,
);
pub fn atg_lstm_cell(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
hx_data: *const *mut C_tensor,
hx_len: c_int,
w_ih_: *mut C_tensor,
w_hh_: *mut C_tensor,
b_ih_: *mut C_tensor,
b_hh_: *mut C_tensor,
);
pub fn atg_lstm_data(
out__: *mut *mut C_tensor,
data_: *mut C_tensor,
batch_sizes_: *mut C_tensor,
hx_data: *const *mut C_tensor,
hx_len: c_int,
params_data: *const *mut C_tensor,
params_len: c_int,
has_biases_: c_int,
num_layers_: i64,
dropout_: f64,
train_: c_int,
bidirectional_: c_int,
);
pub fn atg_lstm_mps_backward(
out0_: *mut C_tensor,
out1_data: *const *mut C_tensor,
out1_len: c_int,
out2_data: *const *mut C_tensor,
out2_len: c_int,
grad_y_: *mut C_tensor,
grad_hy_: *mut C_tensor,
grad_cy_: *mut C_tensor,
z_state_: *mut C_tensor,
cell_state_fwd_: *mut C_tensor,
input_: *mut C_tensor,
layersOutputs_: *mut C_tensor,
hx_data: *const *mut C_tensor,
hx_len: c_int,
params_data: *const *mut C_tensor,
params_len: c_int,
has_biases_: c_int,
num_layers_: i64,
dropout_: f64,
train_: c_int,
bidirectional_: c_int,
batch_first_: c_int,
);
pub fn atg_lt(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_lt_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_lt_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_lt_tensor(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_lt_tensor_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_lt_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_lu_solve(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
LU_data_: *mut C_tensor,
LU_pivots_: *mut C_tensor,
);
pub fn atg_lu_solve_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
LU_data_: *mut C_tensor,
LU_pivots_: *mut C_tensor,
);
pub fn atg_lu_unpack(
out__: *mut *mut C_tensor,
LU_data_: *mut C_tensor,
LU_pivots_: *mut C_tensor,
unpack_data_: c_int,
unpack_pivots_: c_int,
);
pub fn atg_lu_unpack_out(
out__: *mut *mut C_tensor,
P_: *mut C_tensor,
L_: *mut C_tensor,
U_: *mut C_tensor,
LU_data_: *mut C_tensor,
LU_pivots_: *mut C_tensor,
unpack_data_: c_int,
unpack_pivots_: c_int,
);
pub fn atg_margin_ranking_loss(
out__: *mut *mut C_tensor,
input1_: *mut C_tensor,
input2_: *mut C_tensor,
target_: *mut C_tensor,
margin_: f64,
reduction_: i64,
);
pub fn atg_masked_fill(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
value_: *mut C_scalar,
);
pub fn atg_masked_fill_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
value_: *mut C_scalar,
);
pub fn atg_masked_fill_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
value_: *mut C_scalar,
);
pub fn atg_masked_fill_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
value_: *mut C_tensor,
);
pub fn atg_masked_fill_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
value_: *mut C_tensor,
);
pub fn atg_masked_fill_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
value_: *mut C_tensor,
);
pub fn atg_masked_scatter(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
source_: *mut C_tensor,
);
pub fn atg_masked_scatter_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
source_: *mut C_tensor,
);
pub fn atg_masked_scatter_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
mask_: *mut C_tensor,
sizes_data: *const i64,
sizes_len: c_int,
);
pub fn atg_masked_scatter_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
source_: *mut C_tensor,
);
pub fn atg_masked_select(out__: *mut *mut C_tensor, self_: *mut C_tensor, mask_: *mut C_tensor);
pub fn atg_masked_select_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
input_: *mut C_tensor,
mask_: *mut C_tensor,
);
pub fn atg_masked_select_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
);
pub fn atg_matmul(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_matmul_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_matrix_exp(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_matrix_exp_backward(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
grad_: *mut C_tensor,
);
pub fn atg_matrix_h(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_matrix_power(out__: *mut *mut C_tensor, self_: *mut C_tensor, n_: i64);
pub fn atg_matrix_power_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
n_: i64,
);
pub fn atg_max(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_max_dim(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
pub fn atg_max_dim_max(
out__: *mut *mut C_tensor,
max_: *mut C_tensor,
max_values_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
keepdim_: c_int,
);
pub fn atg_max_other(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_max_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_max_pool1d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_max_pool1d_with_indices(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_max_pool2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_max_pool2d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_max_pool2d_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_max_pool2d_with_indices(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_max_pool2d_with_indices_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
indices_: *mut C_tensor,
);
pub fn atg_max_pool2d_with_indices_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
indices_: *mut C_tensor,
);
pub fn atg_max_pool2d_with_indices_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
indices_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_max_pool3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_max_pool3d_with_indices(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_max_pool3d_with_indices_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
indices_: *mut C_tensor,
);
pub fn atg_max_pool3d_with_indices_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
indices_: *mut C_tensor,
);
pub fn atg_max_pool3d_with_indices_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
indices_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_max_unary_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_max_unpool2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
indices_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_max_unpool2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
indices_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_max_unpool3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
indices_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_max_unpool3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
indices_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_maximum(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_maximum_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_mean(out__: *mut *mut C_tensor, self_: *mut C_tensor, dtype_: c_int);
pub fn atg_mean_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_mean_dtype_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dtype_: c_int,
);
pub fn atg_mean_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_median(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_median_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
keepdim_: c_int,
);
pub fn atg_median_dim_values(
out__: *mut *mut C_tensor,
values_: *mut C_tensor,
indices_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
keepdim_: c_int,
);
pub fn atg_median_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_meshgrid(
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
) -> *mut *mut C_tensor;
pub fn atg_meshgrid_indexing(
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
indexing_ptr: *const u8,
indexing_len: c_int,
) -> *mut *mut C_tensor;
pub fn atg_mh(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_min(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_min_dim(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
pub fn atg_min_dim_min(
out__: *mut *mut C_tensor,
min_: *mut C_tensor,
min_indices_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
keepdim_: c_int,
);
pub fn atg_min_other(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_min_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_min_unary_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_minimum(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_minimum_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_miopen_batch_norm(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
training_: c_int,
exponential_average_factor_: f64,
epsilon_: f64,
);
pub fn atg_miopen_batch_norm_backward(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
grad_output_: *mut C_tensor,
weight_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
save_mean_: *mut C_tensor,
save_var_: *mut C_tensor,
epsilon_: f64,
);
pub fn atg_miopen_batch_norm_backward_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
input_: *mut C_tensor,
grad_output_: *mut C_tensor,
weight_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
save_mean_: *mut C_tensor,
save_var_: *mut C_tensor,
epsilon_: f64,
);
pub fn atg_miopen_batch_norm_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
training_: c_int,
exponential_average_factor_: f64,
epsilon_: f64,
);
pub fn atg_miopen_convolution(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
benchmark_: c_int,
deterministic_: c_int,
);
pub fn atg_miopen_convolution_add_relu(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
z_: *mut C_tensor,
alpha_: *mut C_scalar,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg_miopen_convolution_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
benchmark_: c_int,
deterministic_: c_int,
);
pub fn atg_miopen_convolution_relu(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg_miopen_convolution_transpose(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
benchmark_: c_int,
deterministic_: c_int,
);
pub fn atg_miopen_convolution_transpose_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
benchmark_: c_int,
deterministic_: c_int,
);
pub fn atg_miopen_depthwise_convolution(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
benchmark_: c_int,
deterministic_: c_int,
);
pub fn atg_miopen_depthwise_convolution_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
benchmark_: c_int,
deterministic_: c_int,
);
pub fn atg_miopen_rnn(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_data: *const *mut C_tensor,
weight_len: c_int,
weight_stride0_: i64,
hx_: *mut C_tensor,
cx_: *mut C_tensor,
mode_: i64,
hidden_size_: i64,
num_layers_: i64,
batch_first_: c_int,
dropout_: f64,
train_: c_int,
bidirectional_: c_int,
batch_sizes_data: *const i64,
batch_sizes_len: c_int,
dropout_state_: *mut C_tensor,
);
pub fn atg_miopen_rnn_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
out3_: *mut C_tensor,
out4_: *mut C_tensor,
input_: *mut C_tensor,
weight_data: *const *mut C_tensor,
weight_len: c_int,
weight_stride0_: i64,
hx_: *mut C_tensor,
cx_: *mut C_tensor,
mode_: i64,
hidden_size_: i64,
num_layers_: i64,
batch_first_: c_int,
dropout_: f64,
train_: c_int,
bidirectional_: c_int,
batch_sizes_data: *const i64,
batch_sizes_len: c_int,
dropout_state_: *mut C_tensor,
);
pub fn atg_mish(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_mish_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_mish_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_mish_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_mkldnn_adaptive_avg_pool2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_mkldnn_adaptive_avg_pool2d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_mkldnn_adaptive_avg_pool2d_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_mkldnn_adaptive_avg_pool2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_mkldnn_convolution(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg_mkldnn_convolution_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
);
pub fn atg_mkldnn_linear(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
);
pub fn atg_mkldnn_linear_backward_input(
out__: *mut *mut C_tensor,
input_size_data: *const i64,
input_size_len: c_int,
grad_output_: *mut C_tensor,
weight_: *mut C_tensor,
);
pub fn atg_mkldnn_linear_backward_input_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
input_size_data: *const i64,
input_size_len: c_int,
grad_output_: *mut C_tensor,
weight_: *mut C_tensor,
);
pub fn atg_mkldnn_linear_backward_weights(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_defined_: c_int,
);
pub fn atg_mkldnn_linear_backward_weights_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
grad_output_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_defined_: c_int,
);
pub fn atg_mkldnn_linear_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
);
pub fn atg_mkldnn_max_pool2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_mkldnn_max_pool2d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
input_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_mkldnn_max_pool2d_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
input_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_mkldnn_max_pool2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_mkldnn_max_pool3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_mkldnn_max_pool3d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
input_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_mkldnn_max_pool3d_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
input_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_mkldnn_max_pool3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_mkldnn_reorder_conv2d_weight(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
input_size_data: *const i64,
input_size_len: c_int,
);
pub fn atg_mkldnn_reorder_conv2d_weight_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
input_size_data: *const i64,
input_size_len: c_int,
);
pub fn atg_mkldnn_reorder_conv3d_weight(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
input_size_data: *const i64,
input_size_len: c_int,
);
pub fn atg_mkldnn_reorder_conv3d_weight_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
stride_data: *const i64,
stride_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
groups_: i64,
input_size_data: *const i64,
input_size_len: c_int,
);
pub fn atg_mkldnn_rnn_layer(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight0_: *mut C_tensor,
weight1_: *mut C_tensor,
weight2_: *mut C_tensor,
weight3_: *mut C_tensor,
hx__: *mut C_tensor,
cx__: *mut C_tensor,
reverse_: c_int,
batch_sizes_data: *const i64,
batch_sizes_len: c_int,
mode_: i64,
hidden_size_: i64,
num_layers_: i64,
has_biases_: c_int,
bidirectional_: c_int,
batch_first_: c_int,
train_: c_int,
);
pub fn atg_mkldnn_rnn_layer_backward(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight1_: *mut C_tensor,
weight2_: *mut C_tensor,
weight3_: *mut C_tensor,
weight4_: *mut C_tensor,
hx__: *mut C_tensor,
cx_tmp_: *mut C_tensor,
output_: *mut C_tensor,
hy__: *mut C_tensor,
cy__: *mut C_tensor,
grad_output_: *mut C_tensor,
grad_hy_: *mut C_tensor,
grad_cy_: *mut C_tensor,
reverse_: c_int,
mode_: i64,
hidden_size_: i64,
num_layers_: i64,
has_biases_: c_int,
train_: c_int,
bidirectional_: c_int,
batch_sizes_data: *const i64,
batch_sizes_len: c_int,
batch_first_: c_int,
workspace_: *mut C_tensor,
);
pub fn atg_mkldnn_rnn_layer_backward_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
out3_: *mut C_tensor,
out4_: *mut C_tensor,
out5_: *mut C_tensor,
out6_: *mut C_tensor,
input_: *mut C_tensor,
weight1_: *mut C_tensor,
weight2_: *mut C_tensor,
weight3_: *mut C_tensor,
weight4_: *mut C_tensor,
hx__: *mut C_tensor,
cx_tmp_: *mut C_tensor,
output_: *mut C_tensor,
hy__: *mut C_tensor,
cy__: *mut C_tensor,
grad_output_: *mut C_tensor,
grad_hy_: *mut C_tensor,
grad_cy_: *mut C_tensor,
reverse_: c_int,
mode_: i64,
hidden_size_: i64,
num_layers_: i64,
has_biases_: c_int,
train_: c_int,
bidirectional_: c_int,
batch_sizes_data: *const i64,
batch_sizes_len: c_int,
batch_first_: c_int,
workspace_: *mut C_tensor,
);
pub fn atg_mkldnn_rnn_layer_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
out3_: *mut C_tensor,
input_: *mut C_tensor,
weight0_: *mut C_tensor,
weight1_: *mut C_tensor,
weight2_: *mut C_tensor,
weight3_: *mut C_tensor,
hx__: *mut C_tensor,
cx__: *mut C_tensor,
reverse_: c_int,
batch_sizes_data: *const i64,
batch_sizes_len: c_int,
mode_: i64,
hidden_size_: i64,
num_layers_: i64,
has_biases_: c_int,
bidirectional_: c_int,
batch_first_: c_int,
train_: c_int,
);
pub fn atg_mm(out__: *mut *mut C_tensor, self_: *mut C_tensor, mat2_: *mut C_tensor);
pub fn atg_mm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mat2_: *mut C_tensor,
);
pub fn atg_mode(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, keepdim_: c_int);
pub fn atg_mode_values(
out__: *mut *mut C_tensor,
values_: *mut C_tensor,
indices_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
keepdim_: c_int,
);
pub fn atg_moveaxis(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
source_data: *const i64,
source_len: c_int,
destination_data: *const i64,
destination_len: c_int,
);
pub fn atg_moveaxis_int(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
source_: i64,
destination_: i64,
);
pub fn atg_movedim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
source_data: *const i64,
source_len: c_int,
destination_data: *const i64,
destination_len: c_int,
);
pub fn atg_movedim_int(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
source_: i64,
destination_: i64,
);
pub fn atg_mse_loss(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_mse_loss_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_mse_loss_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_mse_loss_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_msort(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_msort_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_mt(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_mul(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_mul_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_mul_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_mul_scalar(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_mul_scalar_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_mul_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_multi_margin_loss_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
p_: *mut C_scalar,
margin_: *mut C_scalar,
weight_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_multi_margin_loss_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
p_: *mut C_scalar,
margin_: *mut C_scalar,
weight_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_multilabel_margin_loss(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_multilabel_margin_loss_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
is_target_: *mut C_tensor,
);
pub fn atg_multilabel_margin_loss_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
is_target_: *mut C_tensor,
);
pub fn atg_multilabel_margin_loss_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_multinomial(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
num_samples_: i64,
replacement_: c_int,
);
pub fn atg_multinomial_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
num_samples_: i64,
replacement_: c_int,
);
pub fn atg_multiply(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_multiply_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_multiply_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_multiply_scalar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_multiply_scalar_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_mv(out__: *mut *mut C_tensor, self_: *mut C_tensor, vec_: *mut C_tensor);
pub fn atg_mv_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
vec_: *mut C_tensor,
);
pub fn atg_mvlgamma(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: i64);
pub fn atg_mvlgamma_(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: i64);
pub fn atg_mvlgamma_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
p_: i64,
);
pub fn atg_nan_to_num(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
nan_v: f64,
nan_null: i8,
posinf_v: f64,
posinf_null: i8,
neginf_v: f64,
neginf_null: i8,
);
pub fn atg_nan_to_num_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
nan_v: f64,
nan_null: i8,
posinf_v: f64,
posinf_null: i8,
neginf_v: f64,
neginf_null: i8,
);
pub fn atg_nan_to_num_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
nan_v: f64,
nan_null: i8,
posinf_v: f64,
posinf_null: i8,
neginf_v: f64,
neginf_null: i8,
);
pub fn atg_nanmean(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_nanmean_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_nanmedian(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_nanmedian_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
keepdim_: c_int,
);
pub fn atg_nanmedian_dim_values(
out__: *mut *mut C_tensor,
values_: *mut C_tensor,
indices_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
keepdim_: c_int,
);
pub fn atg_nanmedian_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_nanquantile(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
q_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
keepdim_: c_int,
interpolation_ptr: *const u8,
interpolation_len: c_int,
);
pub fn atg_nanquantile_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
q_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
keepdim_: c_int,
interpolation_ptr: *const u8,
interpolation_len: c_int,
);
pub fn atg_nanquantile_scalar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
q_: f64,
dim_v: i64,
dim_null: i8,
keepdim_: c_int,
interpolation_ptr: *const u8,
interpolation_len: c_int,
);
pub fn atg_nanquantile_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
q_: f64,
dim_v: i64,
dim_null: i8,
keepdim_: c_int,
interpolation_ptr: *const u8,
interpolation_len: c_int,
);
pub fn atg_nansum(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_nansum_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_narrow(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
start_: i64,
length_: i64,
);
pub fn atg_narrow_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
start_: i64,
length_: i64,
);
pub fn atg_narrow_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
start_: i64,
length_: i64,
);
pub fn atg_narrow_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
start_: *mut C_tensor,
length_: i64,
);
pub fn atg_native_batch_norm(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
training_: c_int,
momentum_: f64,
eps_: f64,
);
pub fn atg_native_batch_norm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
save_mean_: *mut C_tensor,
save_invstd_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
running_mean_: *mut C_tensor,
running_var_: *mut C_tensor,
training_: c_int,
momentum_: f64,
eps_: f64,
);
pub fn atg_native_channel_shuffle(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
groups_: i64,
);
pub fn atg_native_dropout(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
p_: f64,
train_: c_int,
);
pub fn atg_native_dropout_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
mask_: *mut C_tensor,
scale_: f64,
);
pub fn atg_native_dropout_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
mask_: *mut C_tensor,
scale_: f64,
);
pub fn atg_native_dropout_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
input_: *mut C_tensor,
p_: f64,
train_: c_int,
);
pub fn atg_native_group_norm(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
n_: i64,
C_: i64,
HxW_: i64,
group_: i64,
eps_: f64,
);
pub fn atg_native_group_norm_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
n_: i64,
C_: i64,
HxW_: i64,
group_: i64,
eps_: f64,
);
pub fn atg_native_layer_norm(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
normalized_shape_data: *const i64,
normalized_shape_len: c_int,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
eps_: f64,
);
pub fn atg_native_layer_norm_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
input_: *mut C_tensor,
normalized_shape_data: *const i64,
normalized_shape_len: c_int,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
eps_: f64,
);
pub fn atg_native_norm(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_native_norm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_native_norm_scalaropt_dim_dtype(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
p_: *mut C_scalar,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_native_norm_scalaropt_dim_dtype_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
p_: *mut C_scalar,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_ne(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_ne_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_ne_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_ne_tensor(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_ne_tensor_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_ne_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_neg(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_neg_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_neg_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_negative(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_negative_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_negative_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_nested_to_padded_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
padding_: f64,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_new_empty(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_new_empty_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_new_empty_strided(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_new_empty_strided_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
);
pub fn atg_new_full(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
fill_value_: *mut C_scalar,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_new_full_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
fill_value_: *mut C_scalar,
);
pub fn atg_new_ones(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_new_ones_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_new_zeros(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_new_zeros_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_nextafter(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_nextafter_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_nextafter_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_nll_loss(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
weight_: *mut C_tensor,
reduction_: i64,
ignore_index_: i64,
);
pub fn atg_nll_loss2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
weight_: *mut C_tensor,
reduction_: i64,
ignore_index_: i64,
);
pub fn atg_nll_loss2d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
weight_: *mut C_tensor,
reduction_: i64,
ignore_index_: i64,
total_weight_: *mut C_tensor,
);
pub fn atg_nll_loss2d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
weight_: *mut C_tensor,
reduction_: i64,
ignore_index_: i64,
total_weight_: *mut C_tensor,
);
pub fn atg_nll_loss2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
weight_: *mut C_tensor,
reduction_: i64,
ignore_index_: i64,
);
pub fn atg_nll_loss_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
weight_: *mut C_tensor,
reduction_: i64,
ignore_index_: i64,
total_weight_: *mut C_tensor,
);
pub fn atg_nll_loss_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
weight_: *mut C_tensor,
reduction_: i64,
ignore_index_: i64,
total_weight_: *mut C_tensor,
);
pub fn atg_nll_loss_nd(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
weight_: *mut C_tensor,
reduction_: i64,
ignore_index_: i64,
);
pub fn atg_nll_loss_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
weight_: *mut C_tensor,
reduction_: i64,
ignore_index_: i64,
);
pub fn atg_nonzero(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_nonzero_numpy(self_: *mut C_tensor) -> *mut *mut C_tensor;
pub fn atg_nonzero_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_nonzero_static(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_: i64,
fill_value_: i64,
);
pub fn atg_nonzero_static_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_: i64,
fill_value_: i64,
);
pub fn atg_norm(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_norm_dtype_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
p_: *mut C_scalar,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_norm_except_dim(out__: *mut *mut C_tensor, v_: *mut C_tensor, pow_: i64, dim_: i64);
pub fn atg_norm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
p_: *mut C_scalar,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_norm_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_norm_scalaropt_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
p_: *mut C_scalar,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_norm_scalaropt_dim_dtype(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
p_: *mut C_scalar,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_norm_scalaropt_dtype(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
p_: *mut C_scalar,
dtype_: c_int,
);
pub fn atg_norm_scalaropt_dtype_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
p_: *mut C_scalar,
dtype_: c_int,
);
pub fn atg_normal_(out__: *mut *mut C_tensor, self_: *mut C_tensor, mean_: f64, std_: f64);
pub fn atg_normal_functional(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mean_: f64,
std_: f64,
);
pub fn atg_not_equal(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_not_equal_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_not_equal_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_not_equal_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_not_equal_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_not_equal_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_nuclear_norm(out__: *mut *mut C_tensor, self_: *mut C_tensor, keepdim_: c_int);
pub fn atg_nuclear_norm_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_nuclear_norm_dim_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_nuclear_norm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
keepdim_: c_int,
);
pub fn atg_numpy_t(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_one_hot(out__: *mut *mut C_tensor, self_: *mut C_tensor, num_classes_: i64);
pub fn atg_ones(
out__: *mut *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_ones_like(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_ones_like_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_ones_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_orgqr(out__: *mut *mut C_tensor, self_: *mut C_tensor, input2_: *mut C_tensor);
pub fn atg_orgqr_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
input2_: *mut C_tensor,
);
pub fn atg_ormqr(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
input2_: *mut C_tensor,
input3_: *mut C_tensor,
left_: c_int,
transpose_: c_int,
);
pub fn atg_ormqr_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
input2_: *mut C_tensor,
input3_: *mut C_tensor,
left_: c_int,
transpose_: c_int,
);
pub fn atg_outer(out__: *mut *mut C_tensor, self_: *mut C_tensor, vec2_: *mut C_tensor);
pub fn atg_outer_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
vec2_: *mut C_tensor,
);
pub fn atg_output_nr(self_: *mut C_tensor) -> i64;
pub fn atg_pad(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
pad_data: *const i64,
pad_len: c_int,
mode_ptr: *const u8,
mode_len: c_int,
value_v: f64,
value_null: i8,
);
pub fn atg_pad_sequence(
out__: *mut *mut C_tensor,
sequences_data: *const *mut C_tensor,
sequences_len: c_int,
batch_first_: c_int,
padding_value_: f64,
padding_side_ptr: *const u8,
padding_side_len: c_int,
);
pub fn atg_pairwise_distance(
out__: *mut *mut C_tensor,
x1_: *mut C_tensor,
x2_: *mut C_tensor,
p_: f64,
eps_: f64,
keepdim_: c_int,
);
pub fn atg_pdist(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: f64);
pub fn atg_permute(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dims_data: *const i64,
dims_len: c_int,
);
pub fn atg_permute_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dims_data: *const i64,
dims_len: c_int,
);
pub fn atg_permute_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dims_data: *const i64,
dims_len: c_int,
);
pub fn atg_pin_memory(out__: *mut *mut C_tensor, self_: *mut C_tensor, device_: c_int);
pub fn atg_pinverse(out__: *mut *mut C_tensor, self_: *mut C_tensor, rcond_: f64);
pub fn atg_pixel_shuffle(out__: *mut *mut C_tensor, self_: *mut C_tensor, upscale_factor_: i64);
pub fn atg_pixel_shuffle_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
upscale_factor_: i64,
);
pub fn atg_pixel_unshuffle(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
downscale_factor_: i64,
);
pub fn atg_pixel_unshuffle_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
downscale_factor_: i64,
);
pub fn atg_poisson(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_poisson_nll_loss(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
target_: *mut C_tensor,
log_input_: c_int,
full_: c_int,
eps_: f64,
reduction_: i64,
);
pub fn atg_poisson_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_polar(out__: *mut *mut C_tensor, abs_: *mut C_tensor, angle_: *mut C_tensor);
pub fn atg_polar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
abs_: *mut C_tensor,
angle_: *mut C_tensor,
);
pub fn atg_polygamma(out__: *mut *mut C_tensor, n_: i64, self_: *mut C_tensor);
pub fn atg_polygamma_(out__: *mut *mut C_tensor, self_: *mut C_tensor, n_: i64);
pub fn atg_polygamma_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
n_: i64,
self_: *mut C_tensor,
);
pub fn atg_positive(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_pow(out__: *mut *mut C_tensor, self_: *mut C_tensor, exponent_: *mut C_tensor);
pub fn atg_pow_(out__: *mut *mut C_tensor, self_: *mut C_tensor, exponent_: *mut C_scalar);
pub fn atg_pow_scalar(
out__: *mut *mut C_tensor,
self_scalar_: *mut C_scalar,
exponent_: *mut C_tensor,
);
pub fn atg_pow_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_scalar_: *mut C_scalar,
exponent_: *mut C_tensor,
);
pub fn atg_pow_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
exponent_: *mut C_tensor,
);
pub fn atg_pow_tensor_scalar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
exponent_: *mut C_scalar,
);
pub fn atg_pow_tensor_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
exponent_: *mut C_scalar,
);
pub fn atg_pow_tensor_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
exponent_: *mut C_tensor,
);
pub fn atg_prelu(out__: *mut *mut C_tensor, self_: *mut C_tensor, weight_: *mut C_tensor);
pub fn atg_prod(out__: *mut *mut C_tensor, self_: *mut C_tensor, dtype_: c_int);
pub fn atg_prod_dim_int(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_prod_int_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_prod_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dtype_: c_int,
);
pub fn atg_put(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
index_: *mut C_tensor,
source_: *mut C_tensor,
accumulate_: c_int,
);
pub fn atg_put_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
index_: *mut C_tensor,
source_: *mut C_tensor,
accumulate_: c_int,
);
pub fn atg_put_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
index_: *mut C_tensor,
source_: *mut C_tensor,
accumulate_: c_int,
);
pub fn atg_q_per_channel_axis(self_: *mut C_tensor) -> i64;
pub fn atg_q_per_channel_scales(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_q_per_channel_scales_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_q_per_channel_zero_points(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_q_per_channel_zero_points_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_q_scale(self_: *mut C_tensor) -> f64;
pub fn atg_q_zero_point(self_: *mut C_tensor) -> i64;
pub fn atg_qr(out__: *mut *mut C_tensor, self_: *mut C_tensor, some_: c_int);
pub fn atg_qr_q(
out__: *mut *mut C_tensor,
Q_: *mut C_tensor,
R_: *mut C_tensor,
self_: *mut C_tensor,
some_: c_int,
);
pub fn atg_quantile(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
q_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
keepdim_: c_int,
interpolation_ptr: *const u8,
interpolation_len: c_int,
);
pub fn atg_quantile_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
q_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
keepdim_: c_int,
interpolation_ptr: *const u8,
interpolation_len: c_int,
);
pub fn atg_quantile_scalar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
q_: f64,
dim_v: i64,
dim_null: i8,
keepdim_: c_int,
interpolation_ptr: *const u8,
interpolation_len: c_int,
);
pub fn atg_quantile_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
q_: f64,
dim_v: i64,
dim_null: i8,
keepdim_: c_int,
interpolation_ptr: *const u8,
interpolation_len: c_int,
);
pub fn atg_quantize_per_channel(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
scales_: *mut C_tensor,
zero_points_: *mut C_tensor,
axis_: i64,
dtype_: c_int,
);
pub fn atg_quantize_per_channel_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
scales_: *mut C_tensor,
zero_points_: *mut C_tensor,
axis_: i64,
dtype_: c_int,
);
pub fn atg_quantize_per_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
scale_: f64,
zero_point_: i64,
dtype_: c_int,
);
pub fn atg_quantize_per_tensor_dynamic(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dtype_: c_int,
reduce_range_: c_int,
);
pub fn atg_quantize_per_tensor_dynamic_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dtype_: c_int,
reduce_range_: c_int,
);
pub fn atg_quantize_per_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
scale_: f64,
zero_point_: i64,
dtype_: c_int,
);
pub fn atg_quantize_per_tensor_tensor_qparams(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
dtype_: c_int,
);
pub fn atg_quantize_per_tensor_tensor_qparams_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
scale_: *mut C_tensor,
zero_point_: *mut C_tensor,
dtype_: c_int,
);
pub fn atg_quantize_per_tensor_tensors(
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
scales_: *mut C_tensor,
zero_points_: *mut C_tensor,
dtype_: c_int,
) -> *mut *mut C_tensor;
pub fn atg_quantize_per_tensor_tensors_out(
out_data: *const *mut C_tensor,
out_len: c_int,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
scales_: *mut C_tensor,
zero_points_: *mut C_tensor,
dtype_: c_int,
);
pub fn atg_quantized_batch_norm(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
mean_: *mut C_tensor,
var_: *mut C_tensor,
eps_: f64,
output_scale_: f64,
output_zero_point_: i64,
);
pub fn atg_quantized_batch_norm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
input_: *mut C_tensor,
weight_: *mut C_tensor,
bias_: *mut C_tensor,
mean_: *mut C_tensor,
var_: *mut C_tensor,
eps_: f64,
output_scale_: f64,
output_zero_point_: i64,
);
pub fn atg_quantized_gru_cell(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
hx_: *mut C_tensor,
w_ih_: *mut C_tensor,
w_hh_: *mut C_tensor,
b_ih_: *mut C_tensor,
b_hh_: *mut C_tensor,
packed_ih_: *mut C_tensor,
packed_hh_: *mut C_tensor,
col_offsets_ih_: *mut C_tensor,
col_offsets_hh_: *mut C_tensor,
scale_ih_: *mut C_scalar,
scale_hh_: *mut C_scalar,
zero_point_ih_: *mut C_scalar,
zero_point_hh_: *mut C_scalar,
);
pub fn atg_quantized_lstm_cell(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
hx_data: *const *mut C_tensor,
hx_len: c_int,
w_ih_: *mut C_tensor,
w_hh_: *mut C_tensor,
b_ih_: *mut C_tensor,
b_hh_: *mut C_tensor,
packed_ih_: *mut C_tensor,
packed_hh_: *mut C_tensor,
col_offsets_ih_: *mut C_tensor,
col_offsets_hh_: *mut C_tensor,
scale_ih_: *mut C_scalar,
scale_hh_: *mut C_scalar,
zero_point_ih_: *mut C_scalar,
zero_point_hh_: *mut C_scalar,
);
pub fn atg_quantized_max_pool1d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_quantized_max_pool1d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_quantized_max_pool2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_quantized_max_pool2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_quantized_max_pool3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_quantized_max_pool3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
ceil_mode_: c_int,
);
pub fn atg_quantized_rnn_relu_cell(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
hx_: *mut C_tensor,
w_ih_: *mut C_tensor,
w_hh_: *mut C_tensor,
b_ih_: *mut C_tensor,
b_hh_: *mut C_tensor,
packed_ih_: *mut C_tensor,
packed_hh_: *mut C_tensor,
col_offsets_ih_: *mut C_tensor,
col_offsets_hh_: *mut C_tensor,
scale_ih_: *mut C_scalar,
scale_hh_: *mut C_scalar,
zero_point_ih_: *mut C_scalar,
zero_point_hh_: *mut C_scalar,
);
pub fn atg_quantized_rnn_tanh_cell(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
hx_: *mut C_tensor,
w_ih_: *mut C_tensor,
w_hh_: *mut C_tensor,
b_ih_: *mut C_tensor,
b_hh_: *mut C_tensor,
packed_ih_: *mut C_tensor,
packed_hh_: *mut C_tensor,
col_offsets_ih_: *mut C_tensor,
col_offsets_hh_: *mut C_tensor,
scale_ih_: *mut C_scalar,
scale_hh_: *mut C_scalar,
zero_point_ih_: *mut C_scalar,
zero_point_hh_: *mut C_scalar,
);
pub fn atg_rad2deg(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_rad2deg_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_rad2deg_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_rand(
out__: *mut *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_rand_like(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_rand_like_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_rand_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_randint(
out__: *mut *mut C_tensor,
high_: i64,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_randint_like(out__: *mut *mut C_tensor, self_: *mut C_tensor, high_: i64);
pub fn atg_randint_like_low_dtype(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
low_: i64,
high_: i64,
);
pub fn atg_randint_like_low_dtype_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
low_: i64,
high_: i64,
);
pub fn atg_randint_like_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
high_: i64,
);
pub fn atg_randint_low(
out__: *mut *mut C_tensor,
low_: i64,
high_: i64,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_randint_low_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
low_: i64,
high_: i64,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_randint_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
high_: i64,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_randn(
out__: *mut *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_randn_like(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_randn_like_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_randn_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_random(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_random_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_random_from(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
from_: i64,
to_v: i64,
to_null: i8,
);
pub fn atg_random_from_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
from_: i64,
to_v: i64,
to_null: i8,
);
pub fn atg_random_from_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
from_: i64,
to_v: i64,
to_null: i8,
);
pub fn atg_random_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_random_to(out__: *mut *mut C_tensor, self_: *mut C_tensor, to_: i64);
pub fn atg_random_to_(out__: *mut *mut C_tensor, self_: *mut C_tensor, to_: i64);
pub fn atg_random_to_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
to_: i64,
);
pub fn atg_randperm(
out__: *mut *mut C_tensor,
n_: i64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_randperm_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, n_: i64);
pub fn atg_range(
out__: *mut *mut C_tensor,
start_: *mut C_scalar,
end_: *mut C_scalar,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_range_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
start_: *mut C_scalar,
end_: *mut C_scalar,
);
pub fn atg_range_out_(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
start_: *mut C_scalar,
end_: *mut C_scalar,
);
pub fn atg_range_step(
out__: *mut *mut C_tensor,
start_: *mut C_scalar,
end_: *mut C_scalar,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_ravel(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_real(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_reciprocal(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_reciprocal_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_reciprocal_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_reflection_pad1d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_reflection_pad1d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_reflection_pad1d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_reflection_pad1d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_reflection_pad2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_reflection_pad2d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_reflection_pad2d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_reflection_pad2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_reflection_pad3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_reflection_pad3d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_reflection_pad3d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_reflection_pad3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_relu(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_relu6(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_relu6_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_relu_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_relu_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_remainder(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_remainder_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_remainder_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_remainder_scalar_tensor(
out__: *mut *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_remainder_scalar_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_remainder_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_remainder_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_remainder_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_renorm(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
p_: *mut C_scalar,
dim_: i64,
maxnorm_: *mut C_scalar,
);
pub fn atg_renorm_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
p_: *mut C_scalar,
dim_: i64,
maxnorm_: *mut C_scalar,
);
pub fn atg_renorm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
p_: *mut C_scalar,
dim_: i64,
maxnorm_: *mut C_scalar,
);
pub fn atg_repeat(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
repeats_data: *const i64,
repeats_len: c_int,
);
pub fn atg_repeat_interleave(
out__: *mut *mut C_tensor,
repeats_: *mut C_tensor,
output_size_v: i64,
output_size_null: i8,
);
pub fn atg_repeat_interleave_self_int(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
repeats_: i64,
dim_v: i64,
dim_null: i8,
output_size_v: i64,
output_size_null: i8,
);
pub fn atg_repeat_interleave_self_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
repeats_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
output_size_v: i64,
output_size_null: i8,
);
pub fn atg_repeat_interleave_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
repeats_: *mut C_tensor,
output_size_v: i64,
output_size_null: i8,
);
pub fn atg_repeat_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
repeats_data: *const i64,
repeats_len: c_int,
);
pub fn atg_replication_pad1d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_replication_pad1d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_replication_pad1d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_replication_pad1d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_replication_pad2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_replication_pad2d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_replication_pad2d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_replication_pad2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_replication_pad3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_replication_pad3d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_replication_pad3d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_replication_pad3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_requires_grad_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
requires_grad_: c_int,
);
pub fn atg_reshape(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
shape_data: *const i64,
shape_len: c_int,
);
pub fn atg_reshape_as(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_resize(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_resize_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_resize_as(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
the_template_: *mut C_tensor,
);
pub fn atg_resize_as_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
the_template_: *mut C_tensor,
);
pub fn atg_resize_as_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
the_template_: *mut C_tensor,
);
pub fn atg_resize_as_sparse(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
the_template_: *mut C_tensor,
);
pub fn atg_resize_as_sparse_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
the_template_: *mut C_tensor,
);
pub fn atg_resize_as_sparse_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
the_template_: *mut C_tensor,
);
pub fn atg_resize_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_resolve_conj(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_resolve_neg(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_retains_grad(self_: *mut C_tensor) -> c_int;
pub fn atg_rms_norm(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
normalized_shape_data: *const i64,
normalized_shape_len: c_int,
weight_: *mut C_tensor,
eps_v: f64,
eps_null: i8,
);
pub fn atg_rnn_relu(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
hx_: *mut C_tensor,
params_data: *const *mut C_tensor,
params_len: c_int,
has_biases_: c_int,
num_layers_: i64,
dropout_: f64,
train_: c_int,
bidirectional_: c_int,
batch_first_: c_int,
);
pub fn atg_rnn_relu_cell(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
hx_: *mut C_tensor,
w_ih_: *mut C_tensor,
w_hh_: *mut C_tensor,
b_ih_: *mut C_tensor,
b_hh_: *mut C_tensor,
);
pub fn atg_rnn_relu_data(
out__: *mut *mut C_tensor,
data_: *mut C_tensor,
batch_sizes_: *mut C_tensor,
hx_: *mut C_tensor,
params_data: *const *mut C_tensor,
params_len: c_int,
has_biases_: c_int,
num_layers_: i64,
dropout_: f64,
train_: c_int,
bidirectional_: c_int,
);
pub fn atg_rnn_tanh(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
hx_: *mut C_tensor,
params_data: *const *mut C_tensor,
params_len: c_int,
has_biases_: c_int,
num_layers_: i64,
dropout_: f64,
train_: c_int,
bidirectional_: c_int,
batch_first_: c_int,
);
pub fn atg_rnn_tanh_cell(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
hx_: *mut C_tensor,
w_ih_: *mut C_tensor,
w_hh_: *mut C_tensor,
b_ih_: *mut C_tensor,
b_hh_: *mut C_tensor,
);
pub fn atg_rnn_tanh_data(
out__: *mut *mut C_tensor,
data_: *mut C_tensor,
batch_sizes_: *mut C_tensor,
hx_: *mut C_tensor,
params_data: *const *mut C_tensor,
params_len: c_int,
has_biases_: c_int,
num_layers_: i64,
dropout_: f64,
train_: c_int,
bidirectional_: c_int,
);
pub fn atg_roll(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
shifts_data: *const i64,
shifts_len: c_int,
dims_data: *const i64,
dims_len: c_int,
);
pub fn atg_roll_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
shifts_data: *const i64,
shifts_len: c_int,
dims_data: *const i64,
dims_len: c_int,
);
pub fn atg_rot90(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
k_: i64,
dims_data: *const i64,
dims_len: c_int,
);
pub fn atg_rot90_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
k_: i64,
dims_data: *const i64,
dims_len: c_int,
);
pub fn atg_round(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_round_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_round_decimals(out__: *mut *mut C_tensor, self_: *mut C_tensor, decimals_: i64);
pub fn atg_round_decimals_(out__: *mut *mut C_tensor, self_: *mut C_tensor, decimals_: i64);
pub fn atg_round_decimals_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
decimals_: i64,
);
pub fn atg_round_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_row_indices(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_row_indices_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_row_indices_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_row_stack(
out__: *mut *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_row_stack_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_rrelu(out__: *mut *mut C_tensor, self_: *mut C_tensor, training_: c_int);
pub fn atg_rrelu_(out__: *mut *mut C_tensor, self_: *mut C_tensor, training_: c_int);
pub fn atg_rrelu_with_noise(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
noise_: *mut C_tensor,
training_: c_int,
);
pub fn atg_rrelu_with_noise_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
noise_: *mut C_tensor,
training_: c_int,
);
pub fn atg_rrelu_with_noise_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
noise_: *mut C_tensor,
lower_: *mut C_scalar,
upper_: *mut C_scalar,
training_: c_int,
self_is_result_: c_int,
);
pub fn atg_rrelu_with_noise_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
noise_: *mut C_tensor,
lower_: *mut C_scalar,
upper_: *mut C_scalar,
training_: c_int,
self_is_result_: c_int,
);
pub fn atg_rrelu_with_noise_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
noise_: *mut C_tensor,
training_: c_int,
);
pub fn atg_rsqrt(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_rsqrt_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_rsqrt_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_rsub(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_rsub_scalar(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_rsub_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_rsub_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_scalar_tensor(
out__: *mut *mut C_tensor,
s_: *mut C_scalar,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_scalar_tensor_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, s_: *mut C_scalar);
pub fn atg_scaled_dot_product_attention(
out__: *mut *mut C_tensor,
query_: *mut C_tensor,
key_: *mut C_tensor,
value_: *mut C_tensor,
attn_mask_: *mut C_tensor,
dropout_p_: f64,
is_causal_: c_int,
scale_v: f64,
scale_null: i8,
enable_gqa_: c_int,
);
pub fn atg_scatter(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
src_: *mut C_tensor,
);
pub fn atg_scatter_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
src_: *mut C_tensor,
);
pub fn atg_scatter_add(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
src_: *mut C_tensor,
);
pub fn atg_scatter_add_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
src_: *mut C_tensor,
);
pub fn atg_scatter_add_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
src_: *mut C_tensor,
);
pub fn atg_scatter_reduce(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
src_: *mut C_tensor,
reduce_ptr: *const u8,
reduce_len: c_int,
);
pub fn atg_scatter_reduce_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
src_: *mut C_tensor,
reduce_ptr: *const u8,
reduce_len: c_int,
);
pub fn atg_scatter_reduce_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
src_: *mut C_tensor,
reduce_ptr: *const u8,
reduce_len: c_int,
);
pub fn atg_scatter_src_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
src_: *mut C_tensor,
);
pub fn atg_scatter_value(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
value_: *mut C_scalar,
);
pub fn atg_scatter_value_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
value_: *mut C_scalar,
);
pub fn atg_scatter_value_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
value_: *mut C_scalar,
);
pub fn atg_scatter_value_reduce(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
value_: *mut C_scalar,
reduce_ptr: *const u8,
reduce_len: c_int,
);
pub fn atg_scatter_value_reduce_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
value_: *mut C_scalar,
reduce_ptr: *const u8,
reduce_len: c_int,
);
pub fn atg_scatter_value_reduce_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: *mut C_tensor,
value_: *mut C_scalar,
reduce_ptr: *const u8,
reduce_len: c_int,
);
pub fn atg_searchsorted(
out__: *mut *mut C_tensor,
sorted_sequence_: *mut C_tensor,
self_: *mut C_tensor,
out_int32_: c_int,
right_: c_int,
side_ptr: *const u8,
side_len: c_int,
sorter_: *mut C_tensor,
);
pub fn atg_searchsorted_scalar(
out__: *mut *mut C_tensor,
sorted_sequence_: *mut C_tensor,
self_scalar_: *mut C_scalar,
out_int32_: c_int,
right_: c_int,
side_ptr: *const u8,
side_len: c_int,
sorter_: *mut C_tensor,
);
pub fn atg_searchsorted_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
sorted_sequence_: *mut C_tensor,
self_scalar_: *mut C_scalar,
out_int32_: c_int,
right_: c_int,
side_ptr: *const u8,
side_len: c_int,
sorter_: *mut C_tensor,
);
pub fn atg_searchsorted_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
sorted_sequence_: *mut C_tensor,
self_: *mut C_tensor,
out_int32_: c_int,
right_: c_int,
side_ptr: *const u8,
side_len: c_int,
sorter_: *mut C_tensor,
);
pub fn atg_segment_reduce(
out__: *mut *mut C_tensor,
data_: *mut C_tensor,
reduce_ptr: *const u8,
reduce_len: c_int,
lengths_: *mut C_tensor,
indices_: *mut C_tensor,
offsets_: *mut C_tensor,
axis_: i64,
unsafe_: c_int,
initial_: *mut C_scalar,
);
pub fn atg_segment_reduce_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
data_: *mut C_tensor,
reduce_ptr: *const u8,
reduce_len: c_int,
lengths_: *mut C_tensor,
indices_: *mut C_tensor,
offsets_: *mut C_tensor,
axis_: i64,
unsafe_: c_int,
initial_: *mut C_scalar,
);
pub fn atg_select(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, index_: i64);
pub fn atg_select_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
input_sizes_data: *const i64,
input_sizes_len: c_int,
dim_: i64,
index_: i64,
);
pub fn atg_select_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
input_sizes_data: *const i64,
input_sizes_len: c_int,
dim_: i64,
index_: i64,
);
pub fn atg_select_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, index_: i64);
pub fn atg_select_copy_int_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
index_: i64,
);
pub fn atg_select_scatter(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
src_: *mut C_tensor,
dim_: i64,
index_: i64,
);
pub fn atg_select_scatter_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
src_: *mut C_tensor,
dim_: i64,
index_: i64,
);
pub fn atg_selu(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_selu_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_set(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_set_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_set_data(self_: *mut C_tensor, new_data_: *mut C_tensor);
pub fn atg_set_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_set_requires_grad(out__: *mut *mut C_tensor, self_: *mut C_tensor, r_: c_int);
pub fn atg_set_source_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
source_: *mut C_tensor,
);
pub fn atg_set_source_tensor_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
source_: *mut C_tensor,
);
pub fn atg_set_source_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
source_: *mut C_tensor,
);
pub fn atg_set_source_tensor_storage_offset_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
source_: *mut C_tensor,
storage_offset_: i64,
size_data: *const i64,
size_len: c_int,
stride_data: *const i64,
stride_len: c_int,
);
pub fn atg_sgn(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sgn_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sgn_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sigmoid(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sigmoid_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sigmoid_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
);
pub fn atg_sigmoid_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
);
pub fn atg_sigmoid_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sign(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sign_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sign_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_signbit(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_signbit_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_silu(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_silu_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_silu_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_silu_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_silu_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sin(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sin_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sin_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sinc(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sinc_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sinc_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sinh(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sinh_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sinh_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_slice(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
start_v: i64,
start_null: i8,
end_v: i64,
end_null: i8,
step_: i64,
);
pub fn atg_slice_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
input_sizes_data: *const i64,
input_sizes_len: c_int,
dim_: i64,
start_: i64,
end_: i64,
step_: i64,
);
pub fn atg_slice_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_output_: *mut C_tensor,
input_sizes_data: *const i64,
input_sizes_len: c_int,
dim_: i64,
start_: i64,
end_: i64,
step_: i64,
);
pub fn atg_slice_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
start_v: i64,
start_null: i8,
end_v: i64,
end_null: i8,
step_: i64,
);
pub fn atg_slice_copy_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
start_v: i64,
start_null: i8,
end_v: i64,
end_null: i8,
step_: i64,
);
pub fn atg_slice_inverse(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
src_: *mut C_tensor,
dim_: i64,
start_v: i64,
start_null: i8,
end_v: i64,
end_null: i8,
step_: i64,
);
pub fn atg_slice_scatter(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
src_: *mut C_tensor,
dim_: i64,
start_v: i64,
start_null: i8,
end_v: i64,
end_null: i8,
step_: i64,
);
pub fn atg_slice_scatter_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
src_: *mut C_tensor,
dim_: i64,
start_v: i64,
start_null: i8,
end_v: i64,
end_null: i8,
step_: i64,
);
pub fn atg_slogdet(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_slogdet_out(
out__: *mut *mut C_tensor,
sign_: *mut C_tensor,
logabsdet_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_slow_conv3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_slow_conv3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
);
pub fn atg_slow_conv_dilated2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
);
pub fn atg_slow_conv_dilated2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
);
pub fn atg_slow_conv_dilated3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
);
pub fn atg_slow_conv_dilated3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
);
pub fn atg_slow_conv_transpose2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
);
pub fn atg_slow_conv_transpose2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
);
pub fn atg_slow_conv_transpose3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
);
pub fn atg_slow_conv_transpose3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
weight_: *mut C_tensor,
kernel_size_data: *const i64,
kernel_size_len: c_int,
bias_: *mut C_tensor,
stride_data: *const i64,
stride_len: c_int,
padding_data: *const i64,
padding_len: c_int,
output_padding_data: *const i64,
output_padding_len: c_int,
dilation_data: *const i64,
dilation_len: c_int,
);
pub fn atg_smm(out__: *mut *mut C_tensor, self_: *mut C_tensor, mat2_: *mut C_tensor);
pub fn atg_smooth_l1_loss(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
beta_: f64,
);
pub fn atg_smooth_l1_loss_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
beta_: f64,
);
pub fn atg_smooth_l1_loss_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
beta_: f64,
);
pub fn atg_smooth_l1_loss_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
beta_: f64,
);
pub fn atg_soft_margin_loss(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_soft_margin_loss_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_soft_margin_loss_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_soft_margin_loss_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
target_: *mut C_tensor,
reduction_: i64,
);
pub fn atg_softmax(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, dtype_: c_int);
pub fn atg_softmax_int_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
dtype_: c_int,
);
pub fn atg_softplus(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_softplus_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
beta_: *mut C_scalar,
threshold_: *mut C_scalar,
);
pub fn atg_softplus_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
beta_: *mut C_scalar,
threshold_: *mut C_scalar,
);
pub fn atg_softplus_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_softshrink(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_softshrink_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
lambd_: *mut C_scalar,
);
pub fn atg_softshrink_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
lambd_: *mut C_scalar,
);
pub fn atg_softshrink_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sort(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64, descending_: c_int);
pub fn atg_sort_stable(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
stable_: c_int,
dim_: i64,
descending_: c_int,
);
pub fn atg_sort_values(
out__: *mut *mut C_tensor,
values_: *mut C_tensor,
indices_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
descending_: c_int,
);
pub fn atg_sort_values_stable(
out__: *mut *mut C_tensor,
values_: *mut C_tensor,
indices_: *mut C_tensor,
self_: *mut C_tensor,
stable_: c_int,
dim_: i64,
descending_: c_int,
);
pub fn atg_sparse_bsc_tensor(
out__: *mut *mut C_tensor,
ccol_indices_: *mut C_tensor,
row_indices_: *mut C_tensor,
values_: *mut C_tensor,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_sparse_bsc_tensor_ccol_row_value_size(
out__: *mut *mut C_tensor,
ccol_indices_: *mut C_tensor,
row_indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_sparse_bsr_tensor(
out__: *mut *mut C_tensor,
crow_indices_: *mut C_tensor,
col_indices_: *mut C_tensor,
values_: *mut C_tensor,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_sparse_bsr_tensor_crow_col_value_size(
out__: *mut *mut C_tensor,
crow_indices_: *mut C_tensor,
col_indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_sparse_compressed_tensor(
out__: *mut *mut C_tensor,
compressed_indices_: *mut C_tensor,
plain_indices_: *mut C_tensor,
values_: *mut C_tensor,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_sparse_compressed_tensor_comp_plain_value_size(
out__: *mut *mut C_tensor,
compressed_indices_: *mut C_tensor,
plain_indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_sparse_coo_tensor(
out__: *mut *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_sparse_coo_tensor_indices(
out__: *mut *mut C_tensor,
indices_: *mut C_tensor,
values_: *mut C_tensor,
options_kind: c_int,
options_device: c_int,
is_coalesced_: c_int,
);
pub fn atg_sparse_coo_tensor_indices_size(
out__: *mut *mut C_tensor,
indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
is_coalesced_: c_int,
);
pub fn atg_sparse_coo_tensor_size_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_sparse_csc_tensor(
out__: *mut *mut C_tensor,
ccol_indices_: *mut C_tensor,
row_indices_: *mut C_tensor,
values_: *mut C_tensor,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_sparse_csc_tensor_ccol_row_value_size(
out__: *mut *mut C_tensor,
ccol_indices_: *mut C_tensor,
row_indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_sparse_csr_tensor(
out__: *mut *mut C_tensor,
crow_indices_: *mut C_tensor,
col_indices_: *mut C_tensor,
values_: *mut C_tensor,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_sparse_csr_tensor_crow_col_value_size(
out__: *mut *mut C_tensor,
crow_indices_: *mut C_tensor,
col_indices_: *mut C_tensor,
values_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_sparse_dim(self_: *mut C_tensor) -> i64;
pub fn atg_sparse_mask(out__: *mut *mut C_tensor, self_: *mut C_tensor, mask_: *mut C_tensor);
pub fn atg_sparse_mask_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mask_: *mut C_tensor,
);
pub fn atg_sparse_resize(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
sparse_dim_: i64,
dense_dim_: i64,
);
pub fn atg_sparse_resize_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
sparse_dim_: i64,
dense_dim_: i64,
);
pub fn atg_sparse_resize_and_clear(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
sparse_dim_: i64,
dense_dim_: i64,
);
pub fn atg_sparse_resize_and_clear_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
sparse_dim_: i64,
dense_dim_: i64,
);
pub fn atg_sparse_resize_and_clear_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
sparse_dim_: i64,
dense_dim_: i64,
);
pub fn atg_sparse_resize_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
sparse_dim_: i64,
dense_dim_: i64,
);
pub fn atg_sparse_sampled_addmm(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mat1_: *mut C_tensor,
mat2_: *mut C_tensor,
);
pub fn atg_sparse_sampled_addmm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mat1_: *mut C_tensor,
mat2_: *mut C_tensor,
);
pub fn atg_special_airy_ai(out__: *mut *mut C_tensor, x_: *mut C_tensor);
pub fn atg_special_airy_ai_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
);
pub fn atg_special_bessel_j0(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_bessel_j0_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_bessel_j1(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_bessel_j1_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_bessel_y0(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_bessel_y0_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_bessel_y1(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_bessel_y1_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_chebyshev_polynomial_t(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_chebyshev_polynomial_t_n_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_chebyshev_polynomial_t_n_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_chebyshev_polynomial_t_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_chebyshev_polynomial_t_x_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_chebyshev_polynomial_t_x_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_chebyshev_polynomial_u(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_chebyshev_polynomial_u_n_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_chebyshev_polynomial_u_n_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_chebyshev_polynomial_u_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_chebyshev_polynomial_u_x_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_chebyshev_polynomial_u_x_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_chebyshev_polynomial_v(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_chebyshev_polynomial_v_n_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_chebyshev_polynomial_v_n_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_chebyshev_polynomial_v_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_chebyshev_polynomial_v_x_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_chebyshev_polynomial_v_x_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_chebyshev_polynomial_w(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_chebyshev_polynomial_w_n_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_chebyshev_polynomial_w_n_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_chebyshev_polynomial_w_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_chebyshev_polynomial_w_x_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_chebyshev_polynomial_w_x_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_digamma(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_digamma_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_entr(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_entr_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_erf(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_erf_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_erfc(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_erfc_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_erfcx(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_erfcx_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_erfinv(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_erfinv_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_exp2(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_exp2_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_expit(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_expit_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_expm1(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_expm1_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_gammainc(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_special_gammainc_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_special_gammaincc(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_special_gammaincc_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_special_gammaln(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_gammaln_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_hermite_polynomial_h(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_hermite_polynomial_h_n_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_hermite_polynomial_h_n_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_hermite_polynomial_h_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_hermite_polynomial_h_x_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_hermite_polynomial_h_x_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_hermite_polynomial_he(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_hermite_polynomial_he_n_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_hermite_polynomial_he_n_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_hermite_polynomial_he_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_hermite_polynomial_he_x_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_hermite_polynomial_he_x_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_i0(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_i0_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_i0e(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_i0e_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_i1(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_i1_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_i1e(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_i1e_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_laguerre_polynomial_l(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_laguerre_polynomial_l_n_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_laguerre_polynomial_l_n_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_laguerre_polynomial_l_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_laguerre_polynomial_l_x_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_laguerre_polynomial_l_x_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_legendre_polynomial_p(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_legendre_polynomial_p_n_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_legendre_polynomial_p_n_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_legendre_polynomial_p_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_legendre_polynomial_p_x_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_legendre_polynomial_p_x_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_log1p(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_log1p_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_log_ndtr(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_log_ndtr_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_log_softmax(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
dtype_: c_int,
);
pub fn atg_special_logit(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
eps_v: f64,
eps_null: i8,
);
pub fn atg_special_logit_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
eps_v: f64,
eps_null: i8,
);
pub fn atg_special_logsumexp(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_special_logsumexp_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
);
pub fn atg_special_modified_bessel_i0(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_modified_bessel_i0_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_modified_bessel_i1(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_modified_bessel_i1_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_modified_bessel_k0(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_modified_bessel_k0_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_modified_bessel_k1(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_modified_bessel_k1_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_multigammaln(out__: *mut *mut C_tensor, self_: *mut C_tensor, p_: i64);
pub fn atg_special_multigammaln_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
p_: i64,
);
pub fn atg_special_ndtr(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_ndtr_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_ndtri(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_ndtri_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_polygamma(out__: *mut *mut C_tensor, n_: i64, self_: *mut C_tensor);
pub fn atg_special_polygamma_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
n_: i64,
self_: *mut C_tensor,
);
pub fn atg_special_psi(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_psi_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_round(out__: *mut *mut C_tensor, self_: *mut C_tensor, decimals_: i64);
pub fn atg_special_round_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
decimals_: i64,
);
pub fn atg_special_scaled_modified_bessel_k0(out__: *mut *mut C_tensor, x_: *mut C_tensor);
pub fn atg_special_scaled_modified_bessel_k0_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
);
pub fn atg_special_scaled_modified_bessel_k1(out__: *mut *mut C_tensor, x_: *mut C_tensor);
pub fn atg_special_scaled_modified_bessel_k1_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
);
pub fn atg_special_shifted_chebyshev_polynomial_t(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_shifted_chebyshev_polynomial_t_n_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_shifted_chebyshev_polynomial_t_n_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_shifted_chebyshev_polynomial_t_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_shifted_chebyshev_polynomial_t_x_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_shifted_chebyshev_polynomial_t_x_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_shifted_chebyshev_polynomial_u(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_shifted_chebyshev_polynomial_u_n_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_shifted_chebyshev_polynomial_u_n_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_shifted_chebyshev_polynomial_u_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_shifted_chebyshev_polynomial_u_x_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_shifted_chebyshev_polynomial_u_x_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_shifted_chebyshev_polynomial_v(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_shifted_chebyshev_polynomial_v_n_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_shifted_chebyshev_polynomial_v_n_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_shifted_chebyshev_polynomial_v_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_shifted_chebyshev_polynomial_v_x_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_shifted_chebyshev_polynomial_v_x_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_shifted_chebyshev_polynomial_w(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_shifted_chebyshev_polynomial_w_n_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_shifted_chebyshev_polynomial_w_n_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_scalar,
);
pub fn atg_special_shifted_chebyshev_polynomial_w_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
n_: *mut C_tensor,
);
pub fn atg_special_shifted_chebyshev_polynomial_w_x_scalar(
out__: *mut *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_shifted_chebyshev_polynomial_w_x_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_scalar,
n_: *mut C_tensor,
);
pub fn atg_special_sinc(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_special_sinc_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_special_softmax(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
dtype_: c_int,
);
pub fn atg_special_spherical_bessel_j0(out__: *mut *mut C_tensor, x_: *mut C_tensor);
pub fn atg_special_spherical_bessel_j0_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
x_: *mut C_tensor,
);
pub fn atg_special_xlog1py(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_special_xlog1py_other_scalar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_special_xlog1py_other_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_special_xlog1py_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_special_xlog1py_self_scalar(
out__: *mut *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_special_xlog1py_self_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_special_xlogy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_special_xlogy_other_scalar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_special_xlogy_other_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_special_xlogy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_special_xlogy_self_scalar(
out__: *mut *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_special_xlogy_self_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_special_zeta(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_special_zeta_other_scalar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_special_zeta_other_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_special_zeta_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_special_zeta_self_scalar(
out__: *mut *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_special_zeta_self_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_split(self_: *mut C_tensor, split_size_: i64, dim_: i64) -> *mut *mut C_tensor;
pub fn atg_split_copy(self_: *mut C_tensor, split_size_: i64, dim_: i64) -> *mut *mut C_tensor;
pub fn atg_split_copy_tensor_out(
out_data: *const *mut C_tensor,
out_len: c_int,
self_: *mut C_tensor,
split_size_: i64,
dim_: i64,
);
pub fn atg_split_sizes(
self_: *mut C_tensor,
split_size_data: *const i64,
split_size_len: c_int,
dim_: i64,
) -> *mut *mut C_tensor;
pub fn atg_split_with_sizes(
self_: *mut C_tensor,
split_sizes_data: *const i64,
split_sizes_len: c_int,
dim_: i64,
) -> *mut *mut C_tensor;
pub fn atg_split_with_sizes_copy(
self_: *mut C_tensor,
split_sizes_data: *const i64,
split_sizes_len: c_int,
dim_: i64,
) -> *mut *mut C_tensor;
pub fn atg_split_with_sizes_copy_out(
out_data: *const *mut C_tensor,
out_len: c_int,
self_: *mut C_tensor,
split_sizes_data: *const i64,
split_sizes_len: c_int,
dim_: i64,
);
pub fn atg_sqrt(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sqrt_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_sqrt_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_square(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_square_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_square_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_squeeze(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_squeeze_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_squeeze_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_squeeze_copy_dim(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
pub fn atg_squeeze_copy_dim_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
);
pub fn atg_squeeze_copy_dims(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
);
pub fn atg_squeeze_copy_dims_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
);
pub fn atg_squeeze_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_squeeze_dim(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
pub fn atg_squeeze_dim_(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
pub fn atg_squeeze_dims(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
);
pub fn atg_squeeze_dims_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
);
pub fn atg_sspaddmm(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
mat1_: *mut C_tensor,
mat2_: *mut C_tensor,
);
pub fn atg_sspaddmm_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
mat1_: *mut C_tensor,
mat2_: *mut C_tensor,
);
pub fn atg_stack(
out__: *mut *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
dim_: i64,
);
pub fn atg_stack_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
dim_: i64,
);
pub fn atg_std(out__: *mut *mut C_tensor, self_: *mut C_tensor, unbiased_: c_int);
pub fn atg_std_correction(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
correction_: *mut C_scalar,
keepdim_: c_int,
);
pub fn atg_std_correction_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
correction_: *mut C_scalar,
keepdim_: c_int,
);
pub fn atg_std_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
unbiased_: c_int,
keepdim_: c_int,
);
pub fn atg_std_mean(out__: *mut *mut C_tensor, self_: *mut C_tensor, unbiased_: c_int);
pub fn atg_std_mean_correction(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
correction_: *mut C_scalar,
keepdim_: c_int,
);
pub fn atg_std_mean_correction_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
correction_: *mut C_scalar,
keepdim_: c_int,
);
pub fn atg_std_mean_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
unbiased_: c_int,
keepdim_: c_int,
);
pub fn atg_std_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
unbiased_: c_int,
keepdim_: c_int,
);
pub fn atg_stft(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
n_fft_: i64,
hop_length_v: i64,
hop_length_null: i8,
win_length_v: i64,
win_length_null: i8,
window_: *mut C_tensor,
normalized_: c_int,
onesided_: c_int,
return_complex_: c_int,
);
pub fn atg_stft_center(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
n_fft_: i64,
hop_length_v: i64,
hop_length_null: i8,
win_length_v: i64,
win_length_null: i8,
window_: *mut C_tensor,
center_: c_int,
pad_mode_ptr: *const u8,
pad_mode_len: c_int,
normalized_: c_int,
onesided_: c_int,
return_complex_: c_int,
);
pub fn atg_sub(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_sub_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_sub_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_sub_scalar(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_sub_scalar_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_scalar);
pub fn atg_sub_scalar_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_subtract(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_subtract_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_subtract_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_subtract_scalar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_subtract_scalar_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_sum(out__: *mut *mut C_tensor, self_: *mut C_tensor, dtype_: c_int);
pub fn atg_sum_dim_intlist(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_sum_intlist_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
keepdim_: c_int,
dtype_: c_int,
);
pub fn atg_sum_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dtype_: c_int,
);
pub fn atg_sum_to_size(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_svd(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
some_: c_int,
compute_uv_: c_int,
);
pub fn atg_svd_u(
out__: *mut *mut C_tensor,
U_: *mut C_tensor,
S_: *mut C_tensor,
V_: *mut C_tensor,
self_: *mut C_tensor,
some_: c_int,
compute_uv_: c_int,
);
pub fn atg_swapaxes(out__: *mut *mut C_tensor, self_: *mut C_tensor, axis0_: i64, axis1_: i64);
pub fn atg_swapaxes_(out__: *mut *mut C_tensor, self_: *mut C_tensor, axis0_: i64, axis1_: i64);
pub fn atg_swapdims(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim0_: i64, dim1_: i64);
pub fn atg_swapdims_(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim0_: i64, dim1_: i64);
pub fn atg_t(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_t_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_t_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_t_copy_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_take(out__: *mut *mut C_tensor, self_: *mut C_tensor, index_: *mut C_tensor);
pub fn atg_take_along_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
indices_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
);
pub fn atg_take_along_dim_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
indices_: *mut C_tensor,
dim_v: i64,
dim_null: i8,
);
pub fn atg_take_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
index_: *mut C_tensor,
);
pub fn atg_tan(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_tan_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_tan_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_tanh(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_tanh_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_tanh_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
);
pub fn atg_tanh_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_: *mut C_tensor,
);
pub fn atg_tanh_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_tensor_split(self_: *mut C_tensor, sections_: i64, dim_: i64) -> *mut *mut C_tensor;
pub fn atg_tensor_split_indices(
self_: *mut C_tensor,
indices_data: *const i64,
indices_len: c_int,
dim_: i64,
) -> *mut *mut C_tensor;
pub fn atg_tensor_split_tensor_indices_or_sections(
self_: *mut C_tensor,
tensor_indices_or_sections_: *mut C_tensor,
dim_: i64,
) -> *mut *mut C_tensor;
pub fn atg_tensordot(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
dims_self_data: *const i64,
dims_self_len: c_int,
dims_other_data: *const i64,
dims_other_len: c_int,
);
pub fn atg_tensordot_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
dims_self_data: *const i64,
dims_self_len: c_int,
dims_other_data: *const i64,
dims_other_len: c_int,
);
pub fn atg_threshold(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
threshold_: *mut C_scalar,
value_: *mut C_scalar,
);
pub fn atg_threshold_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
threshold_: *mut C_scalar,
value_: *mut C_scalar,
);
pub fn atg_threshold_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
threshold_: *mut C_scalar,
);
pub fn atg_threshold_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
self_: *mut C_tensor,
threshold_: *mut C_scalar,
);
pub fn atg_threshold_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
threshold_: *mut C_scalar,
value_: *mut C_scalar,
);
pub fn atg_tile(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dims_data: *const i64,
dims_len: c_int,
);
pub fn atg_to(out__: *mut *mut C_tensor, self_: *mut C_tensor, device_: c_int);
pub fn atg_to_dense(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dtype_: c_int,
masked_grad_: c_int,
);
pub fn atg_to_dense_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
input_: *mut C_tensor,
masked_grad_: c_int,
);
pub fn atg_to_device(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
device_: c_int,
dtype_: c_int,
non_blocking_: c_int,
copy_: c_int,
);
pub fn atg_to_dtype(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dtype_: c_int,
non_blocking_: c_int,
copy_: c_int,
);
pub fn atg_to_dtype_layout(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
options_kind: c_int,
options_device: c_int,
non_blocking_: c_int,
copy_: c_int,
);
pub fn atg_to_mkldnn(out__: *mut *mut C_tensor, self_: *mut C_tensor, dtype_: c_int);
pub fn atg_to_mkldnn_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
input_: *mut C_tensor,
);
pub fn atg_to_mkldnn_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dtype_: c_int,
);
pub fn atg_to_other(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
non_blocking_: c_int,
copy_: c_int,
);
pub fn atg_to_padded_tensor(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
padding_: f64,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_to_padded_tensor_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
padding_: f64,
output_size_data: *const i64,
output_size_len: c_int,
);
pub fn atg_to_sparse(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
layout_: i8,
blocksize_data: *const i64,
blocksize_len: c_int,
dense_dim_v: i64,
dense_dim_null: i8,
);
pub fn atg_to_sparse_bsc(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
blocksize_data: *const i64,
blocksize_len: c_int,
dense_dim_v: i64,
dense_dim_null: i8,
);
pub fn atg_to_sparse_bsr(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
blocksize_data: *const i64,
blocksize_len: c_int,
dense_dim_v: i64,
dense_dim_null: i8,
);
pub fn atg_to_sparse_csc(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dense_dim_v: i64,
dense_dim_null: i8,
);
pub fn atg_to_sparse_csr(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dense_dim_v: i64,
dense_dim_null: i8,
);
pub fn atg_to_sparse_sparse_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
sparse_dim_: i64,
);
pub fn atg_topk(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
k_: i64,
dim_: i64,
largest_: c_int,
sorted_: c_int,
);
pub fn atg_topk_values(
out__: *mut *mut C_tensor,
values_: *mut C_tensor,
indices_: *mut C_tensor,
self_: *mut C_tensor,
k_: i64,
dim_: i64,
largest_: c_int,
sorted_: c_int,
);
pub fn atg_totype(out__: *mut *mut C_tensor, self_: *mut C_tensor, scalar_type_: c_int);
pub fn atg_trace(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_trace_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
sizes_data: *const i64,
sizes_len: c_int,
);
pub fn atg_trace_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_transpose(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim0_: i64, dim1_: i64);
pub fn atg_transpose_(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim0_: i64, dim1_: i64);
pub fn atg_transpose_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim0_: i64,
dim1_: i64,
);
pub fn atg_transpose_copy_int_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim0_: i64,
dim1_: i64,
);
pub fn atg_trapezoid(out__: *mut *mut C_tensor, y_: *mut C_tensor, dim_: i64);
pub fn atg_trapezoid_x(
out__: *mut *mut C_tensor,
y_: *mut C_tensor,
x_: *mut C_tensor,
dim_: i64,
);
pub fn atg_trapz(out__: *mut *mut C_tensor, y_: *mut C_tensor, x_: *mut C_tensor, dim_: i64);
pub fn atg_trapz_dx(out__: *mut *mut C_tensor, y_: *mut C_tensor, dx_: f64, dim_: i64);
pub fn atg_triangular_solve(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
A_: *mut C_tensor,
upper_: c_int,
transpose_: c_int,
unitriangular_: c_int,
);
pub fn atg_triangular_solve_x(
out__: *mut *mut C_tensor,
X_: *mut C_tensor,
M_: *mut C_tensor,
self_: *mut C_tensor,
A_: *mut C_tensor,
upper_: c_int,
transpose_: c_int,
unitriangular_: c_int,
);
pub fn atg_tril(out__: *mut *mut C_tensor, self_: *mut C_tensor, diagonal_: i64);
pub fn atg_tril_(out__: *mut *mut C_tensor, self_: *mut C_tensor, diagonal_: i64);
pub fn atg_tril_indices(
out__: *mut *mut C_tensor,
row_: i64,
col_: i64,
offset_: i64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_tril_indices_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
row_: i64,
col_: i64,
offset_: i64,
);
pub fn atg_tril_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
diagonal_: i64,
);
pub fn atg_triplet_margin_loss(
out__: *mut *mut C_tensor,
anchor_: *mut C_tensor,
positive_: *mut C_tensor,
negative_: *mut C_tensor,
margin_: f64,
p_: f64,
eps_: f64,
swap_: c_int,
reduction_: i64,
);
pub fn atg_triu(out__: *mut *mut C_tensor, self_: *mut C_tensor, diagonal_: i64);
pub fn atg_triu_(out__: *mut *mut C_tensor, self_: *mut C_tensor, diagonal_: i64);
pub fn atg_triu_indices(
out__: *mut *mut C_tensor,
row_: i64,
col_: i64,
offset_: i64,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_triu_indices_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
row_: i64,
col_: i64,
offset_: i64,
);
pub fn atg_triu_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
diagonal_: i64,
);
pub fn atg_true_divide(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_true_divide_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_true_divide_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_true_divide_scalar(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_true_divide_scalar_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_trunc(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_trunc_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_trunc_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_type_as(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_unbind(self_: *mut C_tensor, dim_: i64) -> *mut *mut C_tensor;
pub fn atg_unbind_copy(self_: *mut C_tensor, dim_: i64) -> *mut *mut C_tensor;
pub fn atg_unbind_copy_int_out(
out_data: *const *mut C_tensor,
out_len: c_int,
self_: *mut C_tensor,
dim_: i64,
);
pub fn atg_unflatten(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
sizes_data: *const i64,
sizes_len: c_int,
);
pub fn atg_unflatten_dense_tensors(
flat_: *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
) -> *mut *mut C_tensor;
pub fn atg_unfold(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dimension_: i64,
size_: i64,
step_: i64,
);
pub fn atg_unfold_backward(
out__: *mut *mut C_tensor,
grad_in_: *mut C_tensor,
input_sizes_data: *const i64,
input_sizes_len: c_int,
dim_: i64,
size_: i64,
step_: i64,
);
pub fn atg_unfold_backward_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
grad_in_: *mut C_tensor,
input_sizes_data: *const i64,
input_sizes_len: c_int,
dim_: i64,
size_: i64,
step_: i64,
);
pub fn atg_unfold_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dimension_: i64,
size_: i64,
step_: i64,
);
pub fn atg_unfold_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dimension_: i64,
size_: i64,
step_: i64,
);
pub fn atg_uniform(out__: *mut *mut C_tensor, self_: *mut C_tensor, from_: f64, to_: f64);
pub fn atg_uniform_(out__: *mut *mut C_tensor, self_: *mut C_tensor, from_: f64, to_: f64);
pub fn atg_uniform_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
from_: f64,
to_: f64,
);
pub fn atg_unique_consecutive(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
return_inverse_: c_int,
return_counts_: c_int,
dim_v: i64,
dim_null: i8,
);
pub fn atg_unique_consecutive_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
self_: *mut C_tensor,
return_inverse_: c_int,
return_counts_: c_int,
dim_v: i64,
dim_null: i8,
);
pub fn atg_unique_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
sorted_: c_int,
return_inverse_: c_int,
return_counts_: c_int,
);
pub fn atg_unique_dim_consecutive(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
return_inverse_: c_int,
return_counts_: c_int,
);
pub fn atg_unique_dim_consecutive_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
return_inverse_: c_int,
return_counts_: c_int,
);
pub fn atg_unique_dim_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
out2_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
sorted_: c_int,
return_inverse_: c_int,
return_counts_: c_int,
);
pub fn atg_unsafe_chunk(self_: *mut C_tensor, chunks_: i64, dim_: i64) -> *mut *mut C_tensor;
pub fn atg_unsafe_split(
self_: *mut C_tensor,
split_size_: i64,
dim_: i64,
) -> *mut *mut C_tensor;
pub fn atg_unsafe_split_tensor_out(
out_data: *const *mut C_tensor,
out_len: c_int,
self_: *mut C_tensor,
split_size_: i64,
dim_: i64,
);
pub fn atg_unsafe_split_with_sizes(
self_: *mut C_tensor,
split_sizes_data: *const i64,
split_sizes_len: c_int,
dim_: i64,
) -> *mut *mut C_tensor;
pub fn atg_unsafe_split_with_sizes_out(
out_data: *const *mut C_tensor,
out_len: c_int,
self_: *mut C_tensor,
split_sizes_data: *const i64,
split_sizes_len: c_int,
dim_: i64,
);
pub fn atg_unsqueeze(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
pub fn atg_unsqueeze_(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
pub fn atg_unsqueeze_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor, dim_: i64);
pub fn atg_unsqueeze_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_: i64,
);
pub fn atg_upsample_bicubic2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_bicubic2d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
align_corners_: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_bicubic2d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
align_corners_: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_bicubic2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_bicubic2d_vec(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scale_factors_data: *const f64,
scale_factors_len: c_int,
);
pub fn atg_upsample_bilinear2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_bilinear2d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
align_corners_: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_bilinear2d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
align_corners_: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_bilinear2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_bilinear2d_vec(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scale_factors_data: *const f64,
scale_factors_len: c_int,
);
pub fn atg_upsample_linear1d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scales_v: f64,
scales_null: i8,
);
pub fn atg_upsample_linear1d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
align_corners_: c_int,
scales_v: f64,
scales_null: i8,
);
pub fn atg_upsample_linear1d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
align_corners_: c_int,
scales_v: f64,
scales_null: i8,
);
pub fn atg_upsample_linear1d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scales_v: f64,
scales_null: i8,
);
pub fn atg_upsample_linear1d_vec(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scale_factors_data: *const f64,
scale_factors_len: c_int,
);
pub fn atg_upsample_nearest1d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scales_v: f64,
scales_null: i8,
);
pub fn atg_upsample_nearest1d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
scales_v: f64,
scales_null: i8,
);
pub fn atg_upsample_nearest1d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
scales_v: f64,
scales_null: i8,
);
pub fn atg_upsample_nearest1d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scales_v: f64,
scales_null: i8,
);
pub fn atg_upsample_nearest1d_vec(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scale_factors_data: *const f64,
scale_factors_len: c_int,
);
pub fn atg_upsample_nearest2d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_nearest2d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_nearest2d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_nearest2d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_nearest2d_vec(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scale_factors_data: *const f64,
scale_factors_len: c_int,
);
pub fn atg_upsample_nearest3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scales_d_v: f64,
scales_d_null: i8,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_nearest3d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
scales_d_v: f64,
scales_d_null: i8,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_nearest3d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
scales_d_v: f64,
scales_d_null: i8,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_nearest3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scales_d_v: f64,
scales_d_null: i8,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_nearest3d_vec(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
scale_factors_data: *const f64,
scale_factors_len: c_int,
);
pub fn atg_upsample_trilinear3d(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scales_d_v: f64,
scales_d_null: i8,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_trilinear3d_backward(
out__: *mut *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
align_corners_: c_int,
scales_d_v: f64,
scales_d_null: i8,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_trilinear3d_backward_grad_input(
out__: *mut *mut C_tensor,
grad_input_: *mut C_tensor,
grad_output_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
input_size_data: *const i64,
input_size_len: c_int,
align_corners_: c_int,
scales_d_v: f64,
scales_d_null: i8,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_trilinear3d_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scales_d_v: f64,
scales_d_null: i8,
scales_h_v: f64,
scales_h_null: i8,
scales_w_v: f64,
scales_w_null: i8,
);
pub fn atg_upsample_trilinear3d_vec(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
output_size_data: *const i64,
output_size_len: c_int,
align_corners_: c_int,
scale_factors_data: *const f64,
scale_factors_len: c_int,
);
pub fn atg_value_selecting_reduction_backward(
out__: *mut *mut C_tensor,
grad_: *mut C_tensor,
dim_: i64,
indices_: *mut C_tensor,
sizes_data: *const i64,
sizes_len: c_int,
keepdim_: c_int,
);
pub fn atg_values(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_values_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_values_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_vander(
out__: *mut *mut C_tensor,
x_: *mut C_tensor,
n_v: i64,
n_null: i8,
increasing_: c_int,
);
pub fn atg_var(out__: *mut *mut C_tensor, self_: *mut C_tensor, unbiased_: c_int);
pub fn atg_var_correction(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
correction_: *mut C_scalar,
keepdim_: c_int,
);
pub fn atg_var_correction_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
correction_: *mut C_scalar,
keepdim_: c_int,
);
pub fn atg_var_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
unbiased_: c_int,
keepdim_: c_int,
);
pub fn atg_var_mean(out__: *mut *mut C_tensor, self_: *mut C_tensor, unbiased_: c_int);
pub fn atg_var_mean_correction(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
correction_: *mut C_scalar,
keepdim_: c_int,
);
pub fn atg_var_mean_correction_out(
out__: *mut *mut C_tensor,
out0_: *mut C_tensor,
out1_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
correction_: *mut C_scalar,
keepdim_: c_int,
);
pub fn atg_var_mean_dim(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
unbiased_: c_int,
keepdim_: c_int,
);
pub fn atg_var_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dim_data: *const i64,
dim_len: c_int,
unbiased_: c_int,
keepdim_: c_int,
);
pub fn atg_vdot(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_vdot_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_view(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_view_as(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_view_as_complex(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_view_as_complex_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_view_as_complex_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_view_as_real(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_view_as_real_copy(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_view_as_real_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
);
pub fn atg_view_copy(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_view_copy_dtype(out__: *mut *mut C_tensor, self_: *mut C_tensor, dtype_: c_int);
pub fn atg_view_copy_dtype_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
dtype_: c_int,
);
pub fn atg_view_copy_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
pub fn atg_view_dtype(out__: *mut *mut C_tensor, self_: *mut C_tensor, dtype_: c_int);
pub fn atg_vsplit(self_: *mut C_tensor, sections_: i64) -> *mut *mut C_tensor;
pub fn atg_vsplit_array(
self_: *mut C_tensor,
indices_data: *const i64,
indices_len: c_int,
) -> *mut *mut C_tensor;
pub fn atg_vstack(
out__: *mut *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_vstack_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
tensors_data: *const *mut C_tensor,
tensors_len: c_int,
);
pub fn atg_where(condition_: *mut C_tensor) -> *mut *mut C_tensor;
pub fn atg_where_scalar(
out__: *mut *mut C_tensor,
condition_: *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_scalar,
);
pub fn atg_where_scalarother(
out__: *mut *mut C_tensor,
condition_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_where_scalarself(
out__: *mut *mut C_tensor,
condition_: *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_where_self(
out__: *mut *mut C_tensor,
condition_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_where_self_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
condition_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_xlogy(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_xlogy_(out__: *mut *mut C_tensor, self_: *mut C_tensor, other_: *mut C_tensor);
pub fn atg_xlogy_outscalar_other(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_xlogy_outscalar_self(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_xlogy_outtensor(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_tensor,
);
pub fn atg_xlogy_scalar_other(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_xlogy_scalar_other_(
out__: *mut *mut C_tensor,
self_: *mut C_tensor,
other_: *mut C_scalar,
);
pub fn atg_xlogy_scalar_self(
out__: *mut *mut C_tensor,
self_scalar_: *mut C_scalar,
other_: *mut C_tensor,
);
pub fn atg_zero(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_zero_(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_zero_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_zeros(
out__: *mut *mut C_tensor,
size_data: *const i64,
size_len: c_int,
options_kind: c_int,
options_device: c_int,
);
pub fn atg_zeros_like(out__: *mut *mut C_tensor, self_: *mut C_tensor);
pub fn atg_zeros_like_out(out__: *mut *mut C_tensor, out_: *mut C_tensor, self_: *mut C_tensor);
pub fn atg_zeros_out(
out__: *mut *mut C_tensor,
out_: *mut C_tensor,
size_data: *const i64,
size_len: c_int,
);
}