async_openai_wasm/
lib.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
//! Rust library for OpenAI on WASM
//!
//! ## Creating client
//!
//! ```
//! use async_openai_wasm::{Client, config::OpenAIConfig};
//!
//! // Create a OpenAI client with api key from env var OPENAI_API_KEY and default base url.
//! let client = Client::new();
//!
//! // Above is shortcut for
//! let config = OpenAIConfig::default();
//! let client = Client::with_config(config);
//!
//! // OR use API key from different source and a non default organization
//! let api_key = "sk-..."; // This secret could be from a file, or environment variable.
//! let config = OpenAIConfig::new()
//!     .with_api_key(api_key)
//!     .with_org_id("the-continental");
//!
//! let client = Client::with_config(config);
//!
//! // Use custom reqwest client
//! let http_client = reqwest::ClientBuilder::new().user_agent("async-openai-wasm").build().unwrap();
//! let client = Client::new().with_http_client(http_client);
//! ```
//!
//! ## Microsoft Azure Endpoints
//!
//! ```
//! use async_openai_wasm::{Client, config::AzureConfig};
//!
//! let config = AzureConfig::new()
//!     .with_api_base("https://my-resource-name.openai.azure.com")
//!     .with_api_version("2023-03-15-preview")
//!     .with_deployment_id("deployment-id")
//!     .with_api_key("...");
//!
//! let client = Client::with_config(config);
//!
//! // Note that `async-openai-wasm` only implements OpenAI spec
//! // and doesn't maintain parity with the spec of Azure OpenAI service.
//!
//! ```
//!
//! ## Making requests
//!
//!```
//!# tokio_test::block_on(async {
//!
//! use async_openai_wasm::{Client, types::{CreateCompletionRequestArgs}};
//!
//! // Create client
//! let client = Client::new();
//!
//! // Create request using builder pattern
//! // Every request struct has companion builder struct with same name + Args suffix
//! let request = CreateCompletionRequestArgs::default()
//!     .model("gpt-3.5-turbo-instruct")
//!     .prompt("Tell me the recipe of alfredo pasta")
//!     .max_tokens(40_u32)
//!     .build()
//!     .unwrap();
//!
//! // Call API
//! let response = client
//!     .completions()      // Get the API "group" (completions, images, etc.) from the client
//!     .create(request)    // Make the API call in that "group"
//!     .await
//!     .unwrap();
//!
//! println!("{}", response.choices.first().unwrap().text);
//! # });
//!```
//!
//! ## Examples
//! For full working examples of the original `async-openai` for all supported features see [examples](https://github.com/64bit/async-openai/tree/main/examples) directory in the repository.
//! Also see [wasm examples](https://github.com/ifsheldon/async-openai-wasm/tree/main/examples)
//!
#![cfg_attr(docsrs, feature(doc_cfg))]
mod assistant_files;
mod assistants;
mod audio;
mod audit_logs;
mod batches;
mod chat;
mod client;
mod completion;
pub mod config;
mod embedding;
pub mod error;
mod file;
mod fine_tuning;
mod image;
mod invites;
mod message_files;
mod messages;
mod model;
mod moderation;
mod project_api_keys;
mod project_service_accounts;
mod project_users;
mod projects;
mod runs;
mod steps;
mod threads;
pub mod types;
mod uploads;
mod users;
mod util;
mod vector_store_file_batches;
mod vector_store_files;
mod vector_stores;

pub use assistant_files::AssistantFiles;
pub use assistants::Assistants;
pub use audio::Audio;
pub use audit_logs::AuditLogs;
pub use batches::Batches;
pub use chat::Chat;
pub use client::Client;
pub use completion::Completions;
pub use embedding::Embeddings;
pub use file::Files;
pub use fine_tuning::FineTuning;
pub use image::Images;
pub use invites::Invites;
pub use message_files::MessageFiles;
pub use messages::Messages;
pub use model::Models;
pub use moderation::Moderations;
pub use project_api_keys::ProjectAPIKeys;
pub use project_service_accounts::ProjectServiceAccounts;
pub use project_users::ProjectUsers;
pub use projects::Projects;
pub use runs::Runs;
pub use steps::Steps;
pub use threads::Threads;
pub use uploads::Uploads;
pub use users::Users;
pub use vector_store_file_batches::VectorStoreFileBatches;
pub use vector_store_files::VectorStoreFiles;
pub use vector_stores::VectorStores;