async_openai_wasm/types/thread.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
use std::collections::HashMap;
use derive_builder::Builder;
use serde::{Deserialize, Serialize};
use crate::error::OpenAIError;
use super::{
AssistantToolResources, AssistantTools, AssistantsApiResponseFormatOption,
AssistantsApiToolChoiceOption, CreateAssistantToolResources, CreateMessageRequest,
TruncationObject,
};
/// Represents a thread that contains [messages](https://platform.openai.com/docs/api-reference/messages).
#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)]
pub struct ThreadObject {
/// The identifier, which can be referenced in API endpoints.
pub id: String,
/// The object type, which is always `thread`.
pub object: String,
/// The Unix timestamp (in seconds) for when the thread was created.
pub created_at: i32,
/// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
pub tool_resources: Option<AssistantToolResources>,
pub metadata: Option<HashMap<String, serde_json::Value>>,
}
#[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)]
#[builder(name = "CreateThreadRequestArgs")]
#[builder(pattern = "mutable")]
#[builder(setter(into, strip_option), default)]
#[builder(derive(Debug))]
#[builder(build_fn(error = "OpenAIError"))]
pub struct CreateThreadRequest {
/// A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start the thread with.
#[serde(skip_serializing_if = "Option::is_none")]
pub messages: Option<Vec<CreateMessageRequest>>,
/// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
#[serde(skip_serializing_if = "Option::is_none")]
pub tool_resources: Option<CreateAssistantToolResources>,
#[serde(skip_serializing_if = "Option::is_none")]
pub metadata: Option<HashMap<String, serde_json::Value>>,
}
#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
pub struct ModifyThreadRequest {
#[serde(skip_serializing_if = "Option::is_none")]
pub metadata: Option<HashMap<String, serde_json::Value>>,
/// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
#[serde(skip_serializing_if = "Option::is_none")]
pub tool_resources: Option<AssistantToolResources>,
}
#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)]
pub struct DeleteThreadResponse {
pub id: String,
pub deleted: bool,
pub object: String,
}
#[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)]
#[builder(name = "CreateThreadAndRunRequestArgs")]
#[builder(pattern = "mutable")]
#[builder(setter(into, strip_option), default)]
#[builder(derive(Debug))]
#[builder(build_fn(error = "OpenAIError"))]
pub struct CreateThreadAndRunRequest {
/// The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run.
pub assistant_id: String,
/// If no thread is provided, an empty thread will be created.
#[serde(skip_serializing_if = "Option::is_none")]
pub thread: Option<CreateThreadRequest>,
/// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.
#[serde(skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
/// Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis.
#[serde(skip_serializing_if = "Option::is_none")]
pub instructions: Option<String>,
/// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis.
#[serde(skip_serializing_if = "Option::is_none")]
pub tools: Option<Vec<AssistantTools>>,
/// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
#[serde(skip_serializing_if = "Option::is_none")]
pub tool_resources: Option<AssistantToolResources>,
#[serde(skip_serializing_if = "Option::is_none")]
pub metadata: Option<HashMap<String, serde_json::Value>>,
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
#[serde(skip_serializing_if = "Option::is_none")]
pub temperature: Option<f32>,
/// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
///
/// We generally recommend altering this or temperature but not both.
#[serde(skip_serializing_if = "Option::is_none")]
pub top_p: Option<f32>,
/// If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.
#[serde(skip_serializing_if = "Option::is_none")]
pub stream: Option<bool>,
/// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.
#[serde(skip_serializing_if = "Option::is_none")]
pub max_prompt_tokens: Option<u32>,
/// The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.
#[serde(skip_serializing_if = "Option::is_none")]
pub max_completion_tokens: Option<u32>,
/// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run.
#[serde(skip_serializing_if = "Option::is_none")]
pub truncation_strategy: Option<TruncationObject>,
#[serde(skip_serializing_if = "Option::is_none")]
pub tool_choice: Option<AssistantsApiToolChoiceOption>,
/// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use.
#[serde(skip_serializing_if = "Option::is_none")]
pub parallel_tool_calls: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub response_format: Option<AssistantsApiResponseFormatOption>,
}