pub struct FfmpegCommand { /* private fields */ }
Expand description
A wrapper around std::process::Command
with some convenient preset
argument sets and customization for ffmpeg
specifically.
The rustdoc
on each method includes relevant information from the FFmpeg
documentation: https://ffmpeg.org/ffmpeg.html. Refer there for the
exhaustive list of possible arguments.
Implementations§
Source§impl FfmpegCommand
impl FfmpegCommand
alias for -hide_banner
argument.
Suppress printing banner.
All FFmpeg tools will normally show a copyright notice, build options and library versions. This option can be used to suppress printing this information.
Examples found in repository?
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
fn main() -> Result<()> {
// Set up a TCP listener
const TCP_PORT: u32 = 3000;
let (exit_sender, exit_receiver) = channel::<()>();
let listener_thread = thread::spawn(|| listen_for_connections(TCP_PORT, exit_receiver));
// Wait for the listener to start
thread::sleep(Duration::from_millis(1000));
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
FfmpegCommand::new()
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Audio output
.map("1:a")
.format("s16le")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Subtitles output
.map("2:s")
.format("srt")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
// _ => {}
e => {
println!("{:?}", e);
}
});
exit_sender.send(())?;
listener_thread.join().unwrap()?;
Ok(())
}
More examples
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
pub fn main() -> Result<()> {
if cfg!(not(windows)) {
eprintln!("Note: Methods for capturing audio are platform-specific and this demo is intended for Windows.");
eprintln!("On Linux or Mac, you need to switch from the `dshow` format to a different one supported on your platform.");
eprintln!("Make sure to also include format-specific arguments such as `-audio_buffer_size`.");
eprintln!("Pull requests are welcome to make this demo cross-platform!");
}
// First step: find default audio input device
// Runs an `ffmpeg -list_devices` command and selects the first one found
// Sample log output: [dshow @ 000001c9babdb000] "Headset Microphone (Arctis 7 Chat)" (audio)
let audio_device = FfmpegCommand::new()
.hide_banner()
.args(&["-list_devices", "true"])
.format("dshow")
.input("dummy")
.spawn()?
.iter()?
.into_ffmpeg_stderr()
.find(|line| line.contains("(audio)"))
.map(|line| line.split('\"').nth(1).map(|s| s.to_string()))
.context("No audio device found")?
.context("Failed to parse audio device")?;
println!("Listening to audio device: {}", audio_device);
// Second step: Capture audio and analyze w/ `ebur128` audio filter
// Loudness metadata will be printed to the FFmpeg logs
// Docs: <https://ffmpeg.org/ffmpeg-filters.html#ebur128-1>
let iter = FfmpegCommand::new()
.format("dshow")
.args("-audio_buffer_size 50".split(' ')) // reduces latency to 50ms (dshow-specific)
.input(format!("audio={audio_device}"))
.args("-af ebur128=metadata=1,ametadata=print".split(' '))
.format("null")
.output("-")
.spawn()?
.iter()?;
// Note: even though the audio device name may have spaces, it should *not* be
// in quotes (""). Quotes are only needed on the command line to separate
// different arguments. Since Rust invokes the command directly without a
// shell interpreter, args are already divided up correctly. Any quotes
// would be included in the device name instead and the command would fail.
// <https://github.com/fluent-ffmpeg/node-fluent-ffmpeg/issues/648#issuecomment-866242144>
let mut first_volume_event = true;
for event in iter {
match event {
FfmpegEvent::Error(e) | FfmpegEvent::Log(LogLevel::Error | LogLevel::Fatal, e) => {
eprintln!("{e}");
}
FfmpegEvent::Log(LogLevel::Info, msg) if msg.contains("lavfi.r128.M=") => {
if let Some(volume) = msg.split("lavfi.r128.M=").last() {
// Sample log output: [Parsed_ametadata_1 @ 0000024c27effdc0] [info] lavfi.r128.M=-120.691
// M = "momentary loudness"; a sliding time window of 400ms
// Volume scale is roughly -70 to 0 LUFS. Anything below -70 is silence.
// See <https://en.wikipedia.org/wiki/EBU_R_128#Metering>
let volume_f32 = volume.parse::<f32>().context("Failed to parse volume")?;
let volume_normalized: usize = max(((volume_f32 / 5.0).round() as i32) + 14, 0) as usize;
let volume_percent = ((volume_normalized as f32 / 14.0) * 100.0).round();
// Clear previous line of output
if !first_volume_event {
print!("\x1b[1A\x1b[2K");
} else {
first_volume_event = false;
}
// Blinking red dot to indicate recording
let time = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let recording_indicator = if time % 2 == 0 { "🔴" } else { " " };
println!(
"{} {} {}%",
recording_indicator,
repeat('█').take(volume_normalized).collect::<String>(),
volume_percent
);
}
}
_ => {}
}
}
Ok(())
}
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
fn main() -> anyhow::Result<()> {
use anyhow::Result;
use ffmpeg_sidecar::command::FfmpegCommand;
use ffmpeg_sidecar::event::{FfmpegEvent, LogLevel};
use ffmpeg_sidecar::named_pipes::NamedPipe;
use ffmpeg_sidecar::pipe_name;
use std::io::Read;
use std::sync::mpsc;
use std::thread;
const VIDEO_PIPE_NAME: &str = pipe_name!("ffmpeg_video");
const AUDIO_PIPE_NAME: &str = pipe_name!("ffmpeg_audio");
const SUBTITLES_PIPE_NAME: &str = pipe_name!("ffmpeg_subtitles");
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
let mut command = FfmpegCommand::new();
command
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(VIDEO_PIPE_NAME)
// Audio output
.map("1:a")
.format("s16le")
.output(AUDIO_PIPE_NAME)
// Subtitles output
.map("2:s")
.format("srt")
.output(SUBTITLES_PIPE_NAME);
// Create a separate thread for each output pipe
let threads = [VIDEO_PIPE_NAME, AUDIO_PIPE_NAME, SUBTITLES_PIPE_NAME]
.iter()
.cloned()
.map(|pipe_name| {
// It's important to create the named pipe on the main thread before
// sending it elsewhere so that any errors are caught at the top level.
let mut pipe = NamedPipe::new(pipe_name)?;
println!("[{pipe_name}] pipe created");
let (ready_sender, ready_receiver) = mpsc::channel::<()>();
let thread = thread::spawn(move || -> Result<()> {
// Wait for FFmpeg to start writing
// Only needed for Windows, since Unix will block until a writer has connected
println!("[{pipe_name}] waiting for ready signal");
ready_receiver.recv()?;
// Read continuously until finished
// Note that if the stream of output is interrupted or paused,
// you may need additional logic to keep the read loop alive.
println!("[{pipe_name}] reading from pipe");
let mut buf = vec![0; 1920 * 1080 * 3];
let mut total_bytes_read = 0;
// In the case of subtitles, we'll decode the string contents directly
let mut text_content = if pipe_name == SUBTITLES_PIPE_NAME {
Some("".to_string())
} else {
None
};
loop {
match pipe.read(&mut buf) {
Ok(bytes_read) => {
total_bytes_read += bytes_read;
// read bytes into string
if let Some(cur_str) = &mut text_content {
let s = std::str::from_utf8(&buf[..bytes_read]).unwrap();
text_content = Some(format!("{}{}", cur_str, s));
}
if bytes_read == 0 {
break;
}
}
Err(err) => {
if err.kind() != std::io::ErrorKind::BrokenPipe {
return Err(err.into());
} else {
break;
}
}
}
}
// Log how many bytes were received over this pipe.
// You can visually compare this to the FFmpeg log output to confirm
// that all the expected bytes were captured.
let size_str = if total_bytes_read < 1024 {
format!("{}B", total_bytes_read)
} else {
format!("{}KiB", total_bytes_read / 1024)
};
if let Some(text_content) = text_content {
println!("[{pipe_name}] subtitle text content: ");
println!("{}", text_content.trim());
}
println!("[{pipe_name}] done reading ({size_str} total)");
Ok(())
});
Ok((thread, ready_sender))
})
.collect::<Result<Vec<_>>>()?;
// Start FFmpeg
let mut ready_signal_sent = false;
command
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Signal threads when output is ready
FfmpegEvent::Progress(_) if !ready_signal_sent => {
threads.iter().for_each(|(_, sender)| {
sender.send(()).ok();
});
ready_signal_sent = true;
}
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
_ => {}
});
for (thread, _) in threads {
thread.join().unwrap()?;
}
Ok(())
}
Sourcepub fn format<S: AsRef<str>>(&mut self, format: S) -> &mut Self
pub fn format<S: AsRef<str>>(&mut self, format: S) -> &mut Self
Alias for -f
argument, the format name.
Force input or output file format. The format is normally auto detected for input files and guessed from the file extension for output files, so this option is not needed in most cases.
Examples found in repository?
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
fn main() {
let mut ffmpeg = FfmpegCommand::new()
.realtime()
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60")
.codec_video("rawvideo")
.format("avi")
.output("-")
.spawn()
.unwrap();
let mut ffplay = Command::new("ffplay")
.args("-i -".split(' '))
.stdin(Stdio::piped())
.spawn()
.unwrap();
let mut ffmpeg_stdout = ffmpeg.take_stdout().unwrap();
let mut ffplay_stdin = ffplay.stdin.take().unwrap();
// pipe from ffmpeg stdout to ffplay stdin
let buf = &mut [0u8; 4096];
loop {
let n = ffmpeg_stdout.read(buf).unwrap();
if n == 0 {
break;
}
ffplay_stdin.write_all(&buf[..n]).unwrap();
}
}
More examples
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
fn main() -> Result<()> {
let iter = FfmpegCommand::new()
.format("lavfi")
.arg("-re") // "realtime"
.input(format!(
"testsrc=size={OUTPUT_WIDTH}x{OUTPUT_HEIGHT}:rate={OUTPUT_FRAMERATE}"
))
.rawvideo()
.spawn()?
.iter()?
.filter_frames();
for frame in iter {
// clear the previous frame
if frame.frame_num > 0 {
for _ in 0..frame.height {
print!("\x1B[{}A", 1);
}
}
// Print the pixels colored with ANSI codes
for y in 0..frame.height {
for x in 0..frame.width {
let idx = (y * frame.width + x) as usize * 3;
let r = frame.data[idx] as u32;
let g = frame.data[idx + 1] as u32;
let b = frame.data[idx + 2] as u32;
print!("\x1B[48;2;{r};{g};{b}m ");
}
println!("\x1B[0m");
}
}
Ok(())
}
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
fn main() -> Result<()> {
// Set up a TCP listener
const TCP_PORT: u32 = 3000;
let (exit_sender, exit_receiver) = channel::<()>();
let listener_thread = thread::spawn(|| listen_for_connections(TCP_PORT, exit_receiver));
// Wait for the listener to start
thread::sleep(Duration::from_millis(1000));
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
FfmpegCommand::new()
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Audio output
.map("1:a")
.format("s16le")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Subtitles output
.map("2:s")
.format("srt")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
// _ => {}
e => {
println!("{:?}", e);
}
});
exit_sender.send(())?;
listener_thread.join().unwrap()?;
Ok(())
}
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
pub fn main() -> Result<()> {
if cfg!(not(windows)) {
eprintln!("Note: Methods for capturing audio are platform-specific and this demo is intended for Windows.");
eprintln!("On Linux or Mac, you need to switch from the `dshow` format to a different one supported on your platform.");
eprintln!("Make sure to also include format-specific arguments such as `-audio_buffer_size`.");
eprintln!("Pull requests are welcome to make this demo cross-platform!");
}
// First step: find default audio input device
// Runs an `ffmpeg -list_devices` command and selects the first one found
// Sample log output: [dshow @ 000001c9babdb000] "Headset Microphone (Arctis 7 Chat)" (audio)
let audio_device = FfmpegCommand::new()
.hide_banner()
.args(&["-list_devices", "true"])
.format("dshow")
.input("dummy")
.spawn()?
.iter()?
.into_ffmpeg_stderr()
.find(|line| line.contains("(audio)"))
.map(|line| line.split('\"').nth(1).map(|s| s.to_string()))
.context("No audio device found")?
.context("Failed to parse audio device")?;
println!("Listening to audio device: {}", audio_device);
// Second step: Capture audio and analyze w/ `ebur128` audio filter
// Loudness metadata will be printed to the FFmpeg logs
// Docs: <https://ffmpeg.org/ffmpeg-filters.html#ebur128-1>
let iter = FfmpegCommand::new()
.format("dshow")
.args("-audio_buffer_size 50".split(' ')) // reduces latency to 50ms (dshow-specific)
.input(format!("audio={audio_device}"))
.args("-af ebur128=metadata=1,ametadata=print".split(' '))
.format("null")
.output("-")
.spawn()?
.iter()?;
// Note: even though the audio device name may have spaces, it should *not* be
// in quotes (""). Quotes are only needed on the command line to separate
// different arguments. Since Rust invokes the command directly without a
// shell interpreter, args are already divided up correctly. Any quotes
// would be included in the device name instead and the command would fail.
// <https://github.com/fluent-ffmpeg/node-fluent-ffmpeg/issues/648#issuecomment-866242144>
let mut first_volume_event = true;
for event in iter {
match event {
FfmpegEvent::Error(e) | FfmpegEvent::Log(LogLevel::Error | LogLevel::Fatal, e) => {
eprintln!("{e}");
}
FfmpegEvent::Log(LogLevel::Info, msg) if msg.contains("lavfi.r128.M=") => {
if let Some(volume) = msg.split("lavfi.r128.M=").last() {
// Sample log output: [Parsed_ametadata_1 @ 0000024c27effdc0] [info] lavfi.r128.M=-120.691
// M = "momentary loudness"; a sliding time window of 400ms
// Volume scale is roughly -70 to 0 LUFS. Anything below -70 is silence.
// See <https://en.wikipedia.org/wiki/EBU_R_128#Metering>
let volume_f32 = volume.parse::<f32>().context("Failed to parse volume")?;
let volume_normalized: usize = max(((volume_f32 / 5.0).round() as i32) + 14, 0) as usize;
let volume_percent = ((volume_normalized as f32 / 14.0) * 100.0).round();
// Clear previous line of output
if !first_volume_event {
print!("\x1b[1A\x1b[2K");
} else {
first_volume_event = false;
}
// Blinking red dot to indicate recording
let time = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let recording_indicator = if time % 2 == 0 { "🔴" } else { " " };
println!(
"{} {} {}%",
recording_indicator,
repeat('█').take(volume_normalized).collect::<String>(),
volume_percent
);
}
}
_ => {}
}
}
Ok(())
}
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
fn main() -> anyhow::Result<()> {
use anyhow::Result;
use ffmpeg_sidecar::command::FfmpegCommand;
use ffmpeg_sidecar::event::{FfmpegEvent, LogLevel};
use ffmpeg_sidecar::named_pipes::NamedPipe;
use ffmpeg_sidecar::pipe_name;
use std::io::Read;
use std::sync::mpsc;
use std::thread;
const VIDEO_PIPE_NAME: &str = pipe_name!("ffmpeg_video");
const AUDIO_PIPE_NAME: &str = pipe_name!("ffmpeg_audio");
const SUBTITLES_PIPE_NAME: &str = pipe_name!("ffmpeg_subtitles");
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
let mut command = FfmpegCommand::new();
command
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(VIDEO_PIPE_NAME)
// Audio output
.map("1:a")
.format("s16le")
.output(AUDIO_PIPE_NAME)
// Subtitles output
.map("2:s")
.format("srt")
.output(SUBTITLES_PIPE_NAME);
// Create a separate thread for each output pipe
let threads = [VIDEO_PIPE_NAME, AUDIO_PIPE_NAME, SUBTITLES_PIPE_NAME]
.iter()
.cloned()
.map(|pipe_name| {
// It's important to create the named pipe on the main thread before
// sending it elsewhere so that any errors are caught at the top level.
let mut pipe = NamedPipe::new(pipe_name)?;
println!("[{pipe_name}] pipe created");
let (ready_sender, ready_receiver) = mpsc::channel::<()>();
let thread = thread::spawn(move || -> Result<()> {
// Wait for FFmpeg to start writing
// Only needed for Windows, since Unix will block until a writer has connected
println!("[{pipe_name}] waiting for ready signal");
ready_receiver.recv()?;
// Read continuously until finished
// Note that if the stream of output is interrupted or paused,
// you may need additional logic to keep the read loop alive.
println!("[{pipe_name}] reading from pipe");
let mut buf = vec![0; 1920 * 1080 * 3];
let mut total_bytes_read = 0;
// In the case of subtitles, we'll decode the string contents directly
let mut text_content = if pipe_name == SUBTITLES_PIPE_NAME {
Some("".to_string())
} else {
None
};
loop {
match pipe.read(&mut buf) {
Ok(bytes_read) => {
total_bytes_read += bytes_read;
// read bytes into string
if let Some(cur_str) = &mut text_content {
let s = std::str::from_utf8(&buf[..bytes_read]).unwrap();
text_content = Some(format!("{}{}", cur_str, s));
}
if bytes_read == 0 {
break;
}
}
Err(err) => {
if err.kind() != std::io::ErrorKind::BrokenPipe {
return Err(err.into());
} else {
break;
}
}
}
}
// Log how many bytes were received over this pipe.
// You can visually compare this to the FFmpeg log output to confirm
// that all the expected bytes were captured.
let size_str = if total_bytes_read < 1024 {
format!("{}B", total_bytes_read)
} else {
format!("{}KiB", total_bytes_read / 1024)
};
if let Some(text_content) = text_content {
println!("[{pipe_name}] subtitle text content: ");
println!("{}", text_content.trim());
}
println!("[{pipe_name}] done reading ({size_str} total)");
Ok(())
});
Ok((thread, ready_sender))
})
.collect::<Result<Vec<_>>>()?;
// Start FFmpeg
let mut ready_signal_sent = false;
command
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Signal threads when output is ready
FfmpegEvent::Progress(_) if !ready_signal_sent => {
threads.iter().for_each(|(_, sender)| {
sender.send(()).ok();
});
ready_signal_sent = true;
}
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
_ => {}
});
for (thread, _) in threads {
thread.join().unwrap()?;
}
Ok(())
}
Sourcepub fn input<S: AsRef<str>>(&mut self, path_or_url: S) -> &mut Self
pub fn input<S: AsRef<str>>(&mut self, path_or_url: S) -> &mut Self
Alias for -i
argument, the input file path or URL.
To take input from stdin, use the value -
or pipe:0
.
Examples found in repository?
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
fn main() {
let mut ffmpeg = FfmpegCommand::new()
.realtime()
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60")
.codec_video("rawvideo")
.format("avi")
.output("-")
.spawn()
.unwrap();
let mut ffplay = Command::new("ffplay")
.args("-i -".split(' '))
.stdin(Stdio::piped())
.spawn()
.unwrap();
let mut ffmpeg_stdout = ffmpeg.take_stdout().unwrap();
let mut ffplay_stdin = ffplay.stdin.take().unwrap();
// pipe from ffmpeg stdout to ffplay stdin
let buf = &mut [0u8; 4096];
loop {
let n = ffmpeg_stdout.read(buf).unwrap();
if n == 0 {
break;
}
ffplay_stdin.write_all(&buf[..n]).unwrap();
}
}
More examples
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
fn main() -> Result<()> {
let iter = FfmpegCommand::new()
.format("lavfi")
.arg("-re") // "realtime"
.input(format!(
"testsrc=size={OUTPUT_WIDTH}x{OUTPUT_HEIGHT}:rate={OUTPUT_FRAMERATE}"
))
.rawvideo()
.spawn()?
.iter()?
.filter_frames();
for frame in iter {
// clear the previous frame
if frame.frame_num > 0 {
for _ in 0..frame.height {
print!("\x1B[{}A", 1);
}
}
// Print the pixels colored with ANSI codes
for y in 0..frame.height {
for x in 0..frame.width {
let idx = (y * frame.width + x) as usize * 3;
let r = frame.data[idx] as u32;
let g = frame.data[idx + 1] as u32;
let b = frame.data[idx + 2] as u32;
print!("\x1B[48;2;{r};{g};{b}m ");
}
println!("\x1B[0m");
}
}
Ok(())
}
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
fn main() -> Result<()> {
// Set up a TCP listener
const TCP_PORT: u32 = 3000;
let (exit_sender, exit_receiver) = channel::<()>();
let listener_thread = thread::spawn(|| listen_for_connections(TCP_PORT, exit_receiver));
// Wait for the listener to start
thread::sleep(Duration::from_millis(1000));
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
FfmpegCommand::new()
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Audio output
.map("1:a")
.format("s16le")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Subtitles output
.map("2:s")
.format("srt")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
// _ => {}
e => {
println!("{:?}", e);
}
});
exit_sender.send(())?;
listener_thread.join().unwrap()?;
Ok(())
}
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
fn main() {
// Create an H265 source video as a starting point
let input_path = "output/h265.mp4";
if !Path::new(input_path).exists() {
create_h265_source(input_path);
}
// One instance decodes H265 to raw frames
let mut input = FfmpegCommand::new()
.input(input_path)
.rawvideo()
.spawn()
.unwrap();
// Frames can be transformed by Iterator `.map()`.
// This example is a no-op, with frames passed through unaltered.
let transformed_frames = input.iter().unwrap().filter_frames();
// You could easily add some "middleware" processing here:
// - overlay or composite another RGB image (or even another Ffmpeg Iterator)
// - apply a filter like blur or convolution
// Note: some of these operations are also possible with FFmpeg's (somewhat arcane)
// `filtergraph` API, but doing it in Rust gives you much finer-grained
// control, debuggability, and modularity -- you can pull in any Rust crate
// you need.
// A second instance encodes the updated frames back to H265
let mut output = FfmpegCommand::new()
.args([
"-f", "rawvideo", "-pix_fmt", "rgb24", "-s", "600x800", "-r", "30",
]) // note: should be possible to infer these params from the source input stream
.input("-")
.args(["-c:v", "libx265"])
.args(["-y", "output/h265_overlay.mp4"])
.spawn()
.unwrap();
// Connect the two instances
let mut stdin = output.take_stdin().unwrap();
thread::spawn(move || {
// `for_each` blocks through the end of the iterator,
// so we run it in another thread.
transformed_frames.for_each(|f| {
stdin.write_all(&f.data).ok();
});
});
// On the main thread, run the output instance to completion
output.iter().unwrap().for_each(|e| match e {
FfmpegEvent::Log(LogLevel::Error, e) => println!("Error: {}", e),
FfmpegEvent::Progress(p) => println!("Progress: {} / 00:00:15", p.time),
_ => {}
});
}
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
pub fn main() -> Result<()> {
if cfg!(not(windows)) {
eprintln!("Note: Methods for capturing audio are platform-specific and this demo is intended for Windows.");
eprintln!("On Linux or Mac, you need to switch from the `dshow` format to a different one supported on your platform.");
eprintln!("Make sure to also include format-specific arguments such as `-audio_buffer_size`.");
eprintln!("Pull requests are welcome to make this demo cross-platform!");
}
// First step: find default audio input device
// Runs an `ffmpeg -list_devices` command and selects the first one found
// Sample log output: [dshow @ 000001c9babdb000] "Headset Microphone (Arctis 7 Chat)" (audio)
let audio_device = FfmpegCommand::new()
.hide_banner()
.args(&["-list_devices", "true"])
.format("dshow")
.input("dummy")
.spawn()?
.iter()?
.into_ffmpeg_stderr()
.find(|line| line.contains("(audio)"))
.map(|line| line.split('\"').nth(1).map(|s| s.to_string()))
.context("No audio device found")?
.context("Failed to parse audio device")?;
println!("Listening to audio device: {}", audio_device);
// Second step: Capture audio and analyze w/ `ebur128` audio filter
// Loudness metadata will be printed to the FFmpeg logs
// Docs: <https://ffmpeg.org/ffmpeg-filters.html#ebur128-1>
let iter = FfmpegCommand::new()
.format("dshow")
.args("-audio_buffer_size 50".split(' ')) // reduces latency to 50ms (dshow-specific)
.input(format!("audio={audio_device}"))
.args("-af ebur128=metadata=1,ametadata=print".split(' '))
.format("null")
.output("-")
.spawn()?
.iter()?;
// Note: even though the audio device name may have spaces, it should *not* be
// in quotes (""). Quotes are only needed on the command line to separate
// different arguments. Since Rust invokes the command directly without a
// shell interpreter, args are already divided up correctly. Any quotes
// would be included in the device name instead and the command would fail.
// <https://github.com/fluent-ffmpeg/node-fluent-ffmpeg/issues/648#issuecomment-866242144>
let mut first_volume_event = true;
for event in iter {
match event {
FfmpegEvent::Error(e) | FfmpegEvent::Log(LogLevel::Error | LogLevel::Fatal, e) => {
eprintln!("{e}");
}
FfmpegEvent::Log(LogLevel::Info, msg) if msg.contains("lavfi.r128.M=") => {
if let Some(volume) = msg.split("lavfi.r128.M=").last() {
// Sample log output: [Parsed_ametadata_1 @ 0000024c27effdc0] [info] lavfi.r128.M=-120.691
// M = "momentary loudness"; a sliding time window of 400ms
// Volume scale is roughly -70 to 0 LUFS. Anything below -70 is silence.
// See <https://en.wikipedia.org/wiki/EBU_R_128#Metering>
let volume_f32 = volume.parse::<f32>().context("Failed to parse volume")?;
let volume_normalized: usize = max(((volume_f32 / 5.0).round() as i32) + 14, 0) as usize;
let volume_percent = ((volume_normalized as f32 / 14.0) * 100.0).round();
// Clear previous line of output
if !first_volume_event {
print!("\x1b[1A\x1b[2K");
} else {
first_volume_event = false;
}
// Blinking red dot to indicate recording
let time = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let recording_indicator = if time % 2 == 0 { "🔴" } else { " " };
println!(
"{} {} {}%",
recording_indicator,
repeat('█').take(volume_normalized).collect::<String>(),
volume_percent
);
}
}
_ => {}
}
}
Ok(())
}
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
fn main() -> anyhow::Result<()> {
use anyhow::Result;
use ffmpeg_sidecar::command::FfmpegCommand;
use ffmpeg_sidecar::event::{FfmpegEvent, LogLevel};
use ffmpeg_sidecar::named_pipes::NamedPipe;
use ffmpeg_sidecar::pipe_name;
use std::io::Read;
use std::sync::mpsc;
use std::thread;
const VIDEO_PIPE_NAME: &str = pipe_name!("ffmpeg_video");
const AUDIO_PIPE_NAME: &str = pipe_name!("ffmpeg_audio");
const SUBTITLES_PIPE_NAME: &str = pipe_name!("ffmpeg_subtitles");
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
let mut command = FfmpegCommand::new();
command
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(VIDEO_PIPE_NAME)
// Audio output
.map("1:a")
.format("s16le")
.output(AUDIO_PIPE_NAME)
// Subtitles output
.map("2:s")
.format("srt")
.output(SUBTITLES_PIPE_NAME);
// Create a separate thread for each output pipe
let threads = [VIDEO_PIPE_NAME, AUDIO_PIPE_NAME, SUBTITLES_PIPE_NAME]
.iter()
.cloned()
.map(|pipe_name| {
// It's important to create the named pipe on the main thread before
// sending it elsewhere so that any errors are caught at the top level.
let mut pipe = NamedPipe::new(pipe_name)?;
println!("[{pipe_name}] pipe created");
let (ready_sender, ready_receiver) = mpsc::channel::<()>();
let thread = thread::spawn(move || -> Result<()> {
// Wait for FFmpeg to start writing
// Only needed for Windows, since Unix will block until a writer has connected
println!("[{pipe_name}] waiting for ready signal");
ready_receiver.recv()?;
// Read continuously until finished
// Note that if the stream of output is interrupted or paused,
// you may need additional logic to keep the read loop alive.
println!("[{pipe_name}] reading from pipe");
let mut buf = vec![0; 1920 * 1080 * 3];
let mut total_bytes_read = 0;
// In the case of subtitles, we'll decode the string contents directly
let mut text_content = if pipe_name == SUBTITLES_PIPE_NAME {
Some("".to_string())
} else {
None
};
loop {
match pipe.read(&mut buf) {
Ok(bytes_read) => {
total_bytes_read += bytes_read;
// read bytes into string
if let Some(cur_str) = &mut text_content {
let s = std::str::from_utf8(&buf[..bytes_read]).unwrap();
text_content = Some(format!("{}{}", cur_str, s));
}
if bytes_read == 0 {
break;
}
}
Err(err) => {
if err.kind() != std::io::ErrorKind::BrokenPipe {
return Err(err.into());
} else {
break;
}
}
}
}
// Log how many bytes were received over this pipe.
// You can visually compare this to the FFmpeg log output to confirm
// that all the expected bytes were captured.
let size_str = if total_bytes_read < 1024 {
format!("{}B", total_bytes_read)
} else {
format!("{}KiB", total_bytes_read / 1024)
};
if let Some(text_content) = text_content {
println!("[{pipe_name}] subtitle text content: ");
println!("{}", text_content.trim());
}
println!("[{pipe_name}] done reading ({size_str} total)");
Ok(())
});
Ok((thread, ready_sender))
})
.collect::<Result<Vec<_>>>()?;
// Start FFmpeg
let mut ready_signal_sent = false;
command
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Signal threads when output is ready
FfmpegEvent::Progress(_) if !ready_signal_sent => {
threads.iter().for_each(|(_, sender)| {
sender.send(()).ok();
});
ready_signal_sent = true;
}
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
_ => {}
});
for (thread, _) in threads {
thread.join().unwrap()?;
}
Ok(())
}
Sourcepub fn output<S: AsRef<str>>(&mut self, path_or_url: S) -> &mut Self
pub fn output<S: AsRef<str>>(&mut self, path_or_url: S) -> &mut Self
Alias for the output file path or URL.
To send output to stdout, use the value -
or pipe:1
.
Since this is the last argument in the command and has no -
flag
preceding it, it is equivalent to calling .arg()
directly. However,
using this command helps label the purpose of the argument, and makes the
code more readable at a glance.
Examples found in repository?
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
fn main() {
let mut ffmpeg_runner = FfmpegCommand::new()
.testsrc()
.args(["-metadata", "title=some cool title"])
.overwrite() // -y
.output("output/metadata.mp4")
.print_command()
.spawn()
.unwrap();
ffmpeg_runner
.iter()
.unwrap()
.for_each(|e| {
match e {
FfmpegEvent::Progress(FfmpegProgress { frame, .. }) =>
println!("Current frame: {frame}"),
FfmpegEvent::Log(_level, msg) =>
println!("[ffmpeg] {msg}"),
_ => {}
}
});
}
More examples
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
fn main() {
let mut ffmpeg = FfmpegCommand::new()
.realtime()
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60")
.codec_video("rawvideo")
.format("avi")
.output("-")
.spawn()
.unwrap();
let mut ffplay = Command::new("ffplay")
.args("-i -".split(' '))
.stdin(Stdio::piped())
.spawn()
.unwrap();
let mut ffmpeg_stdout = ffmpeg.take_stdout().unwrap();
let mut ffplay_stdin = ffplay.stdin.take().unwrap();
// pipe from ffmpeg stdout to ffplay stdin
let buf = &mut [0u8; 4096];
loop {
let n = ffmpeg_stdout.read(buf).unwrap();
if n == 0 {
break;
}
ffplay_stdin.write_all(&buf[..n]).unwrap();
}
}
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
fn main() -> Result<()> {
// Set up a TCP listener
const TCP_PORT: u32 = 3000;
let (exit_sender, exit_receiver) = channel::<()>();
let listener_thread = thread::spawn(|| listen_for_connections(TCP_PORT, exit_receiver));
// Wait for the listener to start
thread::sleep(Duration::from_millis(1000));
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
FfmpegCommand::new()
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Audio output
.map("1:a")
.format("s16le")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Subtitles output
.map("2:s")
.format("srt")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
// _ => {}
e => {
println!("{:?}", e);
}
});
exit_sender.send(())?;
listener_thread.join().unwrap()?;
Ok(())
}
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
pub fn main() -> Result<()> {
if cfg!(not(windows)) {
eprintln!("Note: Methods for capturing audio are platform-specific and this demo is intended for Windows.");
eprintln!("On Linux or Mac, you need to switch from the `dshow` format to a different one supported on your platform.");
eprintln!("Make sure to also include format-specific arguments such as `-audio_buffer_size`.");
eprintln!("Pull requests are welcome to make this demo cross-platform!");
}
// First step: find default audio input device
// Runs an `ffmpeg -list_devices` command and selects the first one found
// Sample log output: [dshow @ 000001c9babdb000] "Headset Microphone (Arctis 7 Chat)" (audio)
let audio_device = FfmpegCommand::new()
.hide_banner()
.args(&["-list_devices", "true"])
.format("dshow")
.input("dummy")
.spawn()?
.iter()?
.into_ffmpeg_stderr()
.find(|line| line.contains("(audio)"))
.map(|line| line.split('\"').nth(1).map(|s| s.to_string()))
.context("No audio device found")?
.context("Failed to parse audio device")?;
println!("Listening to audio device: {}", audio_device);
// Second step: Capture audio and analyze w/ `ebur128` audio filter
// Loudness metadata will be printed to the FFmpeg logs
// Docs: <https://ffmpeg.org/ffmpeg-filters.html#ebur128-1>
let iter = FfmpegCommand::new()
.format("dshow")
.args("-audio_buffer_size 50".split(' ')) // reduces latency to 50ms (dshow-specific)
.input(format!("audio={audio_device}"))
.args("-af ebur128=metadata=1,ametadata=print".split(' '))
.format("null")
.output("-")
.spawn()?
.iter()?;
// Note: even though the audio device name may have spaces, it should *not* be
// in quotes (""). Quotes are only needed on the command line to separate
// different arguments. Since Rust invokes the command directly without a
// shell interpreter, args are already divided up correctly. Any quotes
// would be included in the device name instead and the command would fail.
// <https://github.com/fluent-ffmpeg/node-fluent-ffmpeg/issues/648#issuecomment-866242144>
let mut first_volume_event = true;
for event in iter {
match event {
FfmpegEvent::Error(e) | FfmpegEvent::Log(LogLevel::Error | LogLevel::Fatal, e) => {
eprintln!("{e}");
}
FfmpegEvent::Log(LogLevel::Info, msg) if msg.contains("lavfi.r128.M=") => {
if let Some(volume) = msg.split("lavfi.r128.M=").last() {
// Sample log output: [Parsed_ametadata_1 @ 0000024c27effdc0] [info] lavfi.r128.M=-120.691
// M = "momentary loudness"; a sliding time window of 400ms
// Volume scale is roughly -70 to 0 LUFS. Anything below -70 is silence.
// See <https://en.wikipedia.org/wiki/EBU_R_128#Metering>
let volume_f32 = volume.parse::<f32>().context("Failed to parse volume")?;
let volume_normalized: usize = max(((volume_f32 / 5.0).round() as i32) + 14, 0) as usize;
let volume_percent = ((volume_normalized as f32 / 14.0) * 100.0).round();
// Clear previous line of output
if !first_volume_event {
print!("\x1b[1A\x1b[2K");
} else {
first_volume_event = false;
}
// Blinking red dot to indicate recording
let time = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let recording_indicator = if time % 2 == 0 { "🔴" } else { " " };
println!(
"{} {} {}%",
recording_indicator,
repeat('█').take(volume_normalized).collect::<String>(),
volume_percent
);
}
}
_ => {}
}
}
Ok(())
}
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
fn main() -> anyhow::Result<()> {
use anyhow::Result;
use ffmpeg_sidecar::command::FfmpegCommand;
use ffmpeg_sidecar::event::{FfmpegEvent, LogLevel};
use ffmpeg_sidecar::named_pipes::NamedPipe;
use ffmpeg_sidecar::pipe_name;
use std::io::Read;
use std::sync::mpsc;
use std::thread;
const VIDEO_PIPE_NAME: &str = pipe_name!("ffmpeg_video");
const AUDIO_PIPE_NAME: &str = pipe_name!("ffmpeg_audio");
const SUBTITLES_PIPE_NAME: &str = pipe_name!("ffmpeg_subtitles");
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
let mut command = FfmpegCommand::new();
command
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(VIDEO_PIPE_NAME)
// Audio output
.map("1:a")
.format("s16le")
.output(AUDIO_PIPE_NAME)
// Subtitles output
.map("2:s")
.format("srt")
.output(SUBTITLES_PIPE_NAME);
// Create a separate thread for each output pipe
let threads = [VIDEO_PIPE_NAME, AUDIO_PIPE_NAME, SUBTITLES_PIPE_NAME]
.iter()
.cloned()
.map(|pipe_name| {
// It's important to create the named pipe on the main thread before
// sending it elsewhere so that any errors are caught at the top level.
let mut pipe = NamedPipe::new(pipe_name)?;
println!("[{pipe_name}] pipe created");
let (ready_sender, ready_receiver) = mpsc::channel::<()>();
let thread = thread::spawn(move || -> Result<()> {
// Wait for FFmpeg to start writing
// Only needed for Windows, since Unix will block until a writer has connected
println!("[{pipe_name}] waiting for ready signal");
ready_receiver.recv()?;
// Read continuously until finished
// Note that if the stream of output is interrupted or paused,
// you may need additional logic to keep the read loop alive.
println!("[{pipe_name}] reading from pipe");
let mut buf = vec![0; 1920 * 1080 * 3];
let mut total_bytes_read = 0;
// In the case of subtitles, we'll decode the string contents directly
let mut text_content = if pipe_name == SUBTITLES_PIPE_NAME {
Some("".to_string())
} else {
None
};
loop {
match pipe.read(&mut buf) {
Ok(bytes_read) => {
total_bytes_read += bytes_read;
// read bytes into string
if let Some(cur_str) = &mut text_content {
let s = std::str::from_utf8(&buf[..bytes_read]).unwrap();
text_content = Some(format!("{}{}", cur_str, s));
}
if bytes_read == 0 {
break;
}
}
Err(err) => {
if err.kind() != std::io::ErrorKind::BrokenPipe {
return Err(err.into());
} else {
break;
}
}
}
}
// Log how many bytes were received over this pipe.
// You can visually compare this to the FFmpeg log output to confirm
// that all the expected bytes were captured.
let size_str = if total_bytes_read < 1024 {
format!("{}B", total_bytes_read)
} else {
format!("{}KiB", total_bytes_read / 1024)
};
if let Some(text_content) = text_content {
println!("[{pipe_name}] subtitle text content: ");
println!("{}", text_content.trim());
}
println!("[{pipe_name}] done reading ({size_str} total)");
Ok(())
});
Ok((thread, ready_sender))
})
.collect::<Result<Vec<_>>>()?;
// Start FFmpeg
let mut ready_signal_sent = false;
command
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Signal threads when output is ready
FfmpegEvent::Progress(_) if !ready_signal_sent => {
threads.iter().for_each(|(_, sender)| {
sender.send(()).ok();
});
ready_signal_sent = true;
}
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
_ => {}
});
for (thread, _) in threads {
thread.join().unwrap()?;
}
Ok(())
}
Sourcepub fn overwrite(&mut self) -> &mut Self
pub fn overwrite(&mut self) -> &mut Self
Alias for -y
argument: overwrite output files without asking.
Examples found in repository?
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
fn main() {
let mut ffmpeg_runner = FfmpegCommand::new()
.testsrc()
.args(["-metadata", "title=some cool title"])
.overwrite() // -y
.output("output/metadata.mp4")
.print_command()
.spawn()
.unwrap();
ffmpeg_runner
.iter()
.unwrap()
.for_each(|e| {
match e {
FfmpegEvent::Progress(FfmpegProgress { frame, .. }) =>
println!("Current frame: {frame}"),
FfmpegEvent::Log(_level, msg) =>
println!("[ffmpeg] {msg}"),
_ => {}
}
});
}
More examples
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
fn main() -> Result<()> {
// Set up a TCP listener
const TCP_PORT: u32 = 3000;
let (exit_sender, exit_receiver) = channel::<()>();
let listener_thread = thread::spawn(|| listen_for_connections(TCP_PORT, exit_receiver));
// Wait for the listener to start
thread::sleep(Duration::from_millis(1000));
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
FfmpegCommand::new()
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Audio output
.map("1:a")
.format("s16le")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Subtitles output
.map("2:s")
.format("srt")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
// _ => {}
e => {
println!("{:?}", e);
}
});
exit_sender.send(())?;
listener_thread.join().unwrap()?;
Ok(())
}
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
fn main() -> anyhow::Result<()> {
use anyhow::Result;
use ffmpeg_sidecar::command::FfmpegCommand;
use ffmpeg_sidecar::event::{FfmpegEvent, LogLevel};
use ffmpeg_sidecar::named_pipes::NamedPipe;
use ffmpeg_sidecar::pipe_name;
use std::io::Read;
use std::sync::mpsc;
use std::thread;
const VIDEO_PIPE_NAME: &str = pipe_name!("ffmpeg_video");
const AUDIO_PIPE_NAME: &str = pipe_name!("ffmpeg_audio");
const SUBTITLES_PIPE_NAME: &str = pipe_name!("ffmpeg_subtitles");
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
let mut command = FfmpegCommand::new();
command
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(VIDEO_PIPE_NAME)
// Audio output
.map("1:a")
.format("s16le")
.output(AUDIO_PIPE_NAME)
// Subtitles output
.map("2:s")
.format("srt")
.output(SUBTITLES_PIPE_NAME);
// Create a separate thread for each output pipe
let threads = [VIDEO_PIPE_NAME, AUDIO_PIPE_NAME, SUBTITLES_PIPE_NAME]
.iter()
.cloned()
.map(|pipe_name| {
// It's important to create the named pipe on the main thread before
// sending it elsewhere so that any errors are caught at the top level.
let mut pipe = NamedPipe::new(pipe_name)?;
println!("[{pipe_name}] pipe created");
let (ready_sender, ready_receiver) = mpsc::channel::<()>();
let thread = thread::spawn(move || -> Result<()> {
// Wait for FFmpeg to start writing
// Only needed for Windows, since Unix will block until a writer has connected
println!("[{pipe_name}] waiting for ready signal");
ready_receiver.recv()?;
// Read continuously until finished
// Note that if the stream of output is interrupted or paused,
// you may need additional logic to keep the read loop alive.
println!("[{pipe_name}] reading from pipe");
let mut buf = vec![0; 1920 * 1080 * 3];
let mut total_bytes_read = 0;
// In the case of subtitles, we'll decode the string contents directly
let mut text_content = if pipe_name == SUBTITLES_PIPE_NAME {
Some("".to_string())
} else {
None
};
loop {
match pipe.read(&mut buf) {
Ok(bytes_read) => {
total_bytes_read += bytes_read;
// read bytes into string
if let Some(cur_str) = &mut text_content {
let s = std::str::from_utf8(&buf[..bytes_read]).unwrap();
text_content = Some(format!("{}{}", cur_str, s));
}
if bytes_read == 0 {
break;
}
}
Err(err) => {
if err.kind() != std::io::ErrorKind::BrokenPipe {
return Err(err.into());
} else {
break;
}
}
}
}
// Log how many bytes were received over this pipe.
// You can visually compare this to the FFmpeg log output to confirm
// that all the expected bytes were captured.
let size_str = if total_bytes_read < 1024 {
format!("{}B", total_bytes_read)
} else {
format!("{}KiB", total_bytes_read / 1024)
};
if let Some(text_content) = text_content {
println!("[{pipe_name}] subtitle text content: ");
println!("{}", text_content.trim());
}
println!("[{pipe_name}] done reading ({size_str} total)");
Ok(())
});
Ok((thread, ready_sender))
})
.collect::<Result<Vec<_>>>()?;
// Start FFmpeg
let mut ready_signal_sent = false;
command
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Signal threads when output is ready
FfmpegEvent::Progress(_) if !ready_signal_sent => {
threads.iter().for_each(|(_, sender)| {
sender.send(()).ok();
});
ready_signal_sent = true;
}
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
_ => {}
});
for (thread, _) in threads {
thread.join().unwrap()?;
}
Ok(())
}
Sourcepub fn no_overwrite(&mut self) -> &mut Self
pub fn no_overwrite(&mut self) -> &mut Self
Alias for -n
argument: do not overwrite output files, and exit
immediately if a specified output file already exists.
Sourcepub fn codec_video<S: AsRef<str>>(&mut self, codec: S) -> &mut Self
pub fn codec_video<S: AsRef<str>>(&mut self, codec: S) -> &mut Self
Alias for -c:v
argument.
Select an encoder (when used before an output file) or a decoder (when
used before an input file) for one or more video streams. codec
is the
name of a decoder/encoder or a special value `copy`` (output only) to
indicate that the stream is not to be re-encoded.
Examples found in repository?
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
fn main() {
let mut ffmpeg = FfmpegCommand::new()
.realtime()
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60")
.codec_video("rawvideo")
.format("avi")
.output("-")
.spawn()
.unwrap();
let mut ffplay = Command::new("ffplay")
.args("-i -".split(' '))
.stdin(Stdio::piped())
.spawn()
.unwrap();
let mut ffmpeg_stdout = ffmpeg.take_stdout().unwrap();
let mut ffplay_stdin = ffplay.stdin.take().unwrap();
// pipe from ffmpeg stdout to ffplay stdin
let buf = &mut [0u8; 4096];
loop {
let n = ffmpeg_stdout.read(buf).unwrap();
if n == 0 {
break;
}
ffplay_stdin.write_all(&buf[..n]).unwrap();
}
}
Sourcepub fn codec_audio<S: AsRef<str>>(&mut self, codec: S) -> &mut Self
pub fn codec_audio<S: AsRef<str>>(&mut self, codec: S) -> &mut Self
Alias for -c:a
argument.
Select an encoder (when used before an output file) or a decoder (when
used before an input file) for one or more audio streams. codec
is the
name of a decoder/encoder or a special value copy
(output only) to
indicate that the stream is not to be re-encoded.
Sourcepub fn codec_subtitle<S: AsRef<str>>(&mut self, codec: S) -> &mut Self
pub fn codec_subtitle<S: AsRef<str>>(&mut self, codec: S) -> &mut Self
Alias for -c:s
argument.
Select an encoder (when used before an output file) or a decoder (when
used before an input file) for one or more subtitle streams. codec
is
the name of a decoder/encoder or a special value copy
(output only) to
indicate that the stream is not to be re-encoded.
Sourcepub fn duration<S: AsRef<str>>(&mut self, duration: S) -> &mut Self
pub fn duration<S: AsRef<str>>(&mut self, duration: S) -> &mut Self
Alias for -t
argument.
When used as an input option (before -i
), limit the duration of data
read from the input file.
When used as an output option (before an output url), stop writing the output after its duration reaches duration.
duration
must be a time duration specification, see (ffmpeg-utils)the
Time duration section in the ffmpeg-utils(1)
manual.
-to
and -t
are mutually exclusive and -t has priority.
Sourcepub fn to<S: AsRef<str>>(&mut self, position: S) -> &mut Self
pub fn to<S: AsRef<str>>(&mut self, position: S) -> &mut Self
Alias for -to
argument.
Stop writing the output or reading the input at position
. position
must be a time duration specification, see (ffmpeg-utils)the Time
duration section in the ffmpeg-utils(1)
manual.
-to
and -t
(aka duration()
) are mutually exclusive and -t
has
priority.
Sourcepub fn limit_file_size(&mut self, size_in_bytes: u32) -> &mut Self
pub fn limit_file_size(&mut self, size_in_bytes: u32) -> &mut Self
Alias for -fs
argument.
Set the file size limit, expressed in bytes. No further chunk of bytes is written after the limit is exceeded. The size of the output file is slightly more than the requested file size.
Sourcepub fn seek<S: AsRef<str>>(&mut self, position: S) -> &mut Self
pub fn seek<S: AsRef<str>>(&mut self, position: S) -> &mut Self
Alias for -ss
argument.
When used as an input option (before -i
), seeks in this input file to
position. Note that in most formats it is not possible to seek exactly, so
ffmpeg
will seek to the closest seek point before position
. When
transcoding and -accurate_seek
is enabled (the default), this extra
segment between the seek point and position
will be decoded and
discarded. When doing stream copy or when -noaccurate_seek
is used, it
will be preserved.
When used as an output option (before an output url), decodes but discards
input until the timestamps reach position
.
position
must be a time duration specification, see (ffmpeg-utils)the
Time duration section in the ffmpeg-utils(1)
manual.
Sourcepub fn seek_eof<S: AsRef<str>>(&mut self, position: S) -> &mut Self
pub fn seek_eof<S: AsRef<str>>(&mut self, position: S) -> &mut Self
Alias for -sseof
argument.
Like the -ss
option but relative to the “end of file”. That is negative
values are earlier in the file, 0 is at EOF.
Sourcepub fn filter<S: AsRef<str>>(&mut self, filtergraph: S) -> &mut Self
pub fn filter<S: AsRef<str>>(&mut self, filtergraph: S) -> &mut Self
Alias for -filter
argument.
Create the filtergraph specified by filtergraph
and use it to filter the
stream.
filtergraph
is a description of the filtergraph to apply to the stream,
and must have a single input and a single output of the same type of the
stream. In the filtergraph, the input is associated to the label in
, and
the output to the label out
. See the ffmpeg-filters manual for more
information about the filtergraph syntax.
See the -filter_complex
option if
you want to create filtergraphs with multiple inputs and/or outputs.
Sourcepub fn crf(&mut self, crf: u32) -> &mut Self
pub fn crf(&mut self, crf: u32) -> &mut Self
Alias for ‘-crf:v’ argument.
Set CRF (Constant Rate Factor) for quality-based VBR (Variable BitRate)
Use this rate control mode if you want to keep the best quality and care less about the file size. Lower values means better quality with bigger average bitrate (0 usually means lossless).
Possible values depend on codec:
- 0-51 for h264 (default is 23), see ffmpeg encoding guide for h264 for more details
- 0-51 for h265 (default is 28), see ffmpeg encoding guide for h265 for more details
- 0-63 for vp9 (no default, 31 is recommended for 1080p HD video), see ffmpeg encoding guide for vp9 for more details
- 0-63 for av1(libaom-av1) (no default), see ffmpeg encoding guide for libaom for more details
- 0-63 for av1(libsvtav1) (default is 30), see ffmpeg encoding guide for svt-av1 for mode details
Sourcepub fn frames(&mut self, framecount: u32) -> &mut Self
pub fn frames(&mut self, framecount: u32) -> &mut Self
Alias for -frames:v
argument.
Stop writing to the stream after framecount
frames.
See also: -frames:a
(audio), -frames:d
(data).
Sourcepub fn preset<S: AsRef<str>>(&mut self, preset: S) -> &mut Self
pub fn preset<S: AsRef<str>>(&mut self, preset: S) -> &mut Self
Alias for -preset:v
argument.
Set preset which is basically trade-off between encoding speed and compression ratio.
For h264 and h265 allowed values are:
- ultrafast
- superfast
- veryfast
- faster
- medium (default preset)
- slow
- slower
- veryslow
- placebo
For svt-av1 supported values 0-13 (higher number providing a higher encoding speed). Prior to version 0.9.0 valid values was 0-8.
For libaom supported values 0-11 (higher number providing a higher encoding speed)
VP9 has no presets
Sourcepub fn rate(&mut self, fps: f32) -> &mut Self
pub fn rate(&mut self, fps: f32) -> &mut Self
Alias for -r
argument.
Set frame rate (Hz value, fraction or abbreviation).
As an input option, ignore any timestamps stored in the file and instead
generate timestamps assuming constant frame rate fps
. This is not the
same as the -framerate
option used for some input formats like image2 or
v4l2 (it used to be the same in older versions of FFmpeg). If in doubt use
-framerate
instead of the input option -r
.
Sourcepub fn size(&mut self, width: u32, height: u32) -> &mut Self
pub fn size(&mut self, width: u32, height: u32) -> &mut Self
Alias for -s
argument.
Set frame size.
As an input option, this is a shortcut for the video_size
private
option, recognized by some demuxers for which the frame size is either not
stored in the file or is configurable – e.g. raw video or video grabbers.
As an output option, this inserts the scale
video filter to the end of
the corresponding filtergraph. Please use the scale
filter directly to
insert it at the beginning or some other place.
The format is 'wxh'
(default - same as source).
Sourcepub fn no_video(&mut self) -> &mut Self
pub fn no_video(&mut self) -> &mut Self
Alias for -vn
argument.
As an input option, blocks all video streams of a file from being filtered
or being automatically selected or mapped for any output. See -discard
option to disable streams individually.
As an output option, disables video recording i.e. automatic selection or
mapping of any video stream. For full manual control see the -map
option.
Sourcepub fn pix_fmt<S: AsRef<str>>(&mut self, format: S) -> &mut Self
pub fn pix_fmt<S: AsRef<str>>(&mut self, format: S) -> &mut Self
Alias for -pix_fmt
argument.
Set pixel format. Use -pix_fmts
to show all the supported pixel formats.
If the selected pixel format can not be selected, ffmpeg will print a
warning and select the best pixel format supported by the encoder. If
pix_fmt is prefixed by a +
, ffmpeg will exit with an error if the
requested pixel format can not be selected, and automatic conversions
inside filtergraphs are disabled. If pix_fmt is a single +
, ffmpeg
selects the same pixel format as the input (or graph output) and automatic
conversions are disabled.
Examples found in repository?
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
fn main() -> Result<()> {
// Set up a TCP listener
const TCP_PORT: u32 = 3000;
let (exit_sender, exit_receiver) = channel::<()>();
let listener_thread = thread::spawn(|| listen_for_connections(TCP_PORT, exit_receiver));
// Wait for the listener to start
thread::sleep(Duration::from_millis(1000));
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
FfmpegCommand::new()
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Audio output
.map("1:a")
.format("s16le")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Subtitles output
.map("2:s")
.format("srt")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
// _ => {}
e => {
println!("{:?}", e);
}
});
exit_sender.send(())?;
listener_thread.join().unwrap()?;
Ok(())
}
More examples
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
fn main() -> anyhow::Result<()> {
use anyhow::Result;
use ffmpeg_sidecar::command::FfmpegCommand;
use ffmpeg_sidecar::event::{FfmpegEvent, LogLevel};
use ffmpeg_sidecar::named_pipes::NamedPipe;
use ffmpeg_sidecar::pipe_name;
use std::io::Read;
use std::sync::mpsc;
use std::thread;
const VIDEO_PIPE_NAME: &str = pipe_name!("ffmpeg_video");
const AUDIO_PIPE_NAME: &str = pipe_name!("ffmpeg_audio");
const SUBTITLES_PIPE_NAME: &str = pipe_name!("ffmpeg_subtitles");
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
let mut command = FfmpegCommand::new();
command
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(VIDEO_PIPE_NAME)
// Audio output
.map("1:a")
.format("s16le")
.output(AUDIO_PIPE_NAME)
// Subtitles output
.map("2:s")
.format("srt")
.output(SUBTITLES_PIPE_NAME);
// Create a separate thread for each output pipe
let threads = [VIDEO_PIPE_NAME, AUDIO_PIPE_NAME, SUBTITLES_PIPE_NAME]
.iter()
.cloned()
.map(|pipe_name| {
// It's important to create the named pipe on the main thread before
// sending it elsewhere so that any errors are caught at the top level.
let mut pipe = NamedPipe::new(pipe_name)?;
println!("[{pipe_name}] pipe created");
let (ready_sender, ready_receiver) = mpsc::channel::<()>();
let thread = thread::spawn(move || -> Result<()> {
// Wait for FFmpeg to start writing
// Only needed for Windows, since Unix will block until a writer has connected
println!("[{pipe_name}] waiting for ready signal");
ready_receiver.recv()?;
// Read continuously until finished
// Note that if the stream of output is interrupted or paused,
// you may need additional logic to keep the read loop alive.
println!("[{pipe_name}] reading from pipe");
let mut buf = vec![0; 1920 * 1080 * 3];
let mut total_bytes_read = 0;
// In the case of subtitles, we'll decode the string contents directly
let mut text_content = if pipe_name == SUBTITLES_PIPE_NAME {
Some("".to_string())
} else {
None
};
loop {
match pipe.read(&mut buf) {
Ok(bytes_read) => {
total_bytes_read += bytes_read;
// read bytes into string
if let Some(cur_str) = &mut text_content {
let s = std::str::from_utf8(&buf[..bytes_read]).unwrap();
text_content = Some(format!("{}{}", cur_str, s));
}
if bytes_read == 0 {
break;
}
}
Err(err) => {
if err.kind() != std::io::ErrorKind::BrokenPipe {
return Err(err.into());
} else {
break;
}
}
}
}
// Log how many bytes were received over this pipe.
// You can visually compare this to the FFmpeg log output to confirm
// that all the expected bytes were captured.
let size_str = if total_bytes_read < 1024 {
format!("{}B", total_bytes_read)
} else {
format!("{}KiB", total_bytes_read / 1024)
};
if let Some(text_content) = text_content {
println!("[{pipe_name}] subtitle text content: ");
println!("{}", text_content.trim());
}
println!("[{pipe_name}] done reading ({size_str} total)");
Ok(())
});
Ok((thread, ready_sender))
})
.collect::<Result<Vec<_>>>()?;
// Start FFmpeg
let mut ready_signal_sent = false;
command
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Signal threads when output is ready
FfmpegEvent::Progress(_) if !ready_signal_sent => {
threads.iter().for_each(|(_, sender)| {
sender.send(()).ok();
});
ready_signal_sent = true;
}
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
_ => {}
});
for (thread, _) in threads {
thread.join().unwrap()?;
}
Ok(())
}
Sourcepub fn hwaccel<S: AsRef<str>>(&mut self, hwaccel: S) -> &mut Self
pub fn hwaccel<S: AsRef<str>>(&mut self, hwaccel: S) -> &mut Self
Alias for -hwaccel
argument.
Use hardware acceleration to decode the matching stream(s). The allowed values of hwaccel are:
none
: Do not use any hardware acceleration (the default).auto
: Automatically select the hardware acceleration method.vdpau
: Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration.dxva2
: Use DXVA2 (DirectX Video Acceleration) hardware acceleration.d3d11va
: Use D3D11VA (DirectX Video Acceleration) hardware acceleration.vaapi
: Use VAAPI (Video Acceleration API) hardware acceleration.qsv
: Use the Intel QuickSync Video acceleration for video transcoding.- Unlike most other values, this option does not enable accelerated decoding (that is used automatically whenever a qsv decoder is selected), but accelerated transcoding, without copying the frames into the system memory.
- For it to work, both the decoder and the encoder must support QSV acceleration and no filters must be used.
This option has no effect if the selected hwaccel is not available or not supported by the chosen decoder.
Note that most acceleration methods are intended for playback and will not
be faster than software decoding on modern CPUs. Additionally, ffmpeg
will usually need to copy the decoded frames from the GPU memory into the
system memory, resulting in further performance loss. This option is thus
mainly useful for testing.
Sourcepub fn no_audio(&mut self) -> &mut Self
pub fn no_audio(&mut self) -> &mut Self
Alias for -an
argument.
As an input option, blocks all audio streams of a file from being filtered
or being automatically selected or mapped for any output. See -discard
option to disable streams individually.
As an output option, disables audio recording i.e. automatic selection or
mapping of any audio stream. For full manual control see the -map
option.
Sourcepub fn map<S: AsRef<str>>(&mut self, map_string: S) -> &mut Self
pub fn map<S: AsRef<str>>(&mut self, map_string: S) -> &mut Self
Alias for -map
argument.
Create one or more streams in the output file. This option has two forms
for specifying the data source(s): the first selects one or more streams
from some input file (specified with -i
), the second takes an output
from some complex filtergraph (specified with -filter_complex
or
-filter_complex_script
).
In the first form, an output stream is created for every stream from the input file with the index input_file_id. If stream_specifier is given, only those streams that match the specifier are used (see the Stream specifiers section for the stream_specifier syntax).
A -
character before the stream identifier creates a “negative” mapping.
It disables matching streams from already created mappings.
A trailing ?
after the stream index will allow the map to be optional:
if the map matches no streams the map will be ignored instead of failing.
Note the map will still fail if an invalid input file index is used; such
as if the map refers to a non-existent input.
An alternative [linklabel]
form will map outputs from complex filter
graphs (see the -filter_complex
option) to the output file. linklabel
must correspond to a defined output link label in the graph.
This option may be specified multiple times, each adding more streams to
the output file. Any given input stream may also be mapped any number of
times as a source for different output streams, e.g. in order to use
different encoding options and/or filters. The streams are created in the
output in the same order in which the -map
options are given on the
commandline.
Using this option disables the default mappings for this output file.
Examples found in repository?
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
fn main() -> Result<()> {
// Set up a TCP listener
const TCP_PORT: u32 = 3000;
let (exit_sender, exit_receiver) = channel::<()>();
let listener_thread = thread::spawn(|| listen_for_connections(TCP_PORT, exit_receiver));
// Wait for the listener to start
thread::sleep(Duration::from_millis(1000));
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
FfmpegCommand::new()
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Audio output
.map("1:a")
.format("s16le")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Subtitles output
.map("2:s")
.format("srt")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
// _ => {}
e => {
println!("{:?}", e);
}
});
exit_sender.send(())?;
listener_thread.join().unwrap()?;
Ok(())
}
More examples
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
fn main() -> anyhow::Result<()> {
use anyhow::Result;
use ffmpeg_sidecar::command::FfmpegCommand;
use ffmpeg_sidecar::event::{FfmpegEvent, LogLevel};
use ffmpeg_sidecar::named_pipes::NamedPipe;
use ffmpeg_sidecar::pipe_name;
use std::io::Read;
use std::sync::mpsc;
use std::thread;
const VIDEO_PIPE_NAME: &str = pipe_name!("ffmpeg_video");
const AUDIO_PIPE_NAME: &str = pipe_name!("ffmpeg_audio");
const SUBTITLES_PIPE_NAME: &str = pipe_name!("ffmpeg_subtitles");
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
let mut command = FfmpegCommand::new();
command
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(VIDEO_PIPE_NAME)
// Audio output
.map("1:a")
.format("s16le")
.output(AUDIO_PIPE_NAME)
// Subtitles output
.map("2:s")
.format("srt")
.output(SUBTITLES_PIPE_NAME);
// Create a separate thread for each output pipe
let threads = [VIDEO_PIPE_NAME, AUDIO_PIPE_NAME, SUBTITLES_PIPE_NAME]
.iter()
.cloned()
.map(|pipe_name| {
// It's important to create the named pipe on the main thread before
// sending it elsewhere so that any errors are caught at the top level.
let mut pipe = NamedPipe::new(pipe_name)?;
println!("[{pipe_name}] pipe created");
let (ready_sender, ready_receiver) = mpsc::channel::<()>();
let thread = thread::spawn(move || -> Result<()> {
// Wait for FFmpeg to start writing
// Only needed for Windows, since Unix will block until a writer has connected
println!("[{pipe_name}] waiting for ready signal");
ready_receiver.recv()?;
// Read continuously until finished
// Note that if the stream of output is interrupted or paused,
// you may need additional logic to keep the read loop alive.
println!("[{pipe_name}] reading from pipe");
let mut buf = vec![0; 1920 * 1080 * 3];
let mut total_bytes_read = 0;
// In the case of subtitles, we'll decode the string contents directly
let mut text_content = if pipe_name == SUBTITLES_PIPE_NAME {
Some("".to_string())
} else {
None
};
loop {
match pipe.read(&mut buf) {
Ok(bytes_read) => {
total_bytes_read += bytes_read;
// read bytes into string
if let Some(cur_str) = &mut text_content {
let s = std::str::from_utf8(&buf[..bytes_read]).unwrap();
text_content = Some(format!("{}{}", cur_str, s));
}
if bytes_read == 0 {
break;
}
}
Err(err) => {
if err.kind() != std::io::ErrorKind::BrokenPipe {
return Err(err.into());
} else {
break;
}
}
}
}
// Log how many bytes were received over this pipe.
// You can visually compare this to the FFmpeg log output to confirm
// that all the expected bytes were captured.
let size_str = if total_bytes_read < 1024 {
format!("{}B", total_bytes_read)
} else {
format!("{}KiB", total_bytes_read / 1024)
};
if let Some(text_content) = text_content {
println!("[{pipe_name}] subtitle text content: ");
println!("{}", text_content.trim());
}
println!("[{pipe_name}] done reading ({size_str} total)");
Ok(())
});
Ok((thread, ready_sender))
})
.collect::<Result<Vec<_>>>()?;
// Start FFmpeg
let mut ready_signal_sent = false;
command
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Signal threads when output is ready
FfmpegEvent::Progress(_) if !ready_signal_sent => {
threads.iter().for_each(|(_, sender)| {
sender.send(()).ok();
});
ready_signal_sent = true;
}
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
_ => {}
});
for (thread, _) in threads {
thread.join().unwrap()?;
}
Ok(())
}
Sourcepub fn readrate(&mut self, speed: f32) -> &mut Self
pub fn readrate(&mut self, speed: f32) -> &mut Self
Alias for -readrate
argument.
Limit input read speed.
Its value is a floating-point positive number which represents the maximum
duration of media, in seconds, that should be ingested in one second of
wallclock time. Default value is zero and represents no imposed limitation
on speed of ingestion. Value 1
represents real-time speed and is
equivalent to -re
.
Mainly used to simulate a capture device or live input stream (e.g. when reading from a file). Should not be used with a low value when input is an actual capture device or live stream as it may cause packet loss.
It is useful for when flow speed of output packets is important, such as live streaming.
Sourcepub fn realtime(&mut self) -> &mut Self
pub fn realtime(&mut self) -> &mut Self
Alias for -re
.
Read input at native frame rate. This is equivalent to setting -readrate 1
.
Examples found in repository?
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
fn main() {
let mut ffmpeg = FfmpegCommand::new()
.realtime()
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60")
.codec_video("rawvideo")
.format("avi")
.output("-")
.spawn()
.unwrap();
let mut ffplay = Command::new("ffplay")
.args("-i -".split(' '))
.stdin(Stdio::piped())
.spawn()
.unwrap();
let mut ffmpeg_stdout = ffmpeg.take_stdout().unwrap();
let mut ffplay_stdin = ffplay.stdin.take().unwrap();
// pipe from ffmpeg stdout to ffplay stdin
let buf = &mut [0u8; 4096];
loop {
let n = ffmpeg_stdout.read(buf).unwrap();
if n == 0 {
break;
}
ffplay_stdin.write_all(&buf[..n]).unwrap();
}
}
Sourcepub fn fps_mode<S: AsRef<str>>(&mut self, parameter: S) -> &mut Self
pub fn fps_mode<S: AsRef<str>>(&mut self, parameter: S) -> &mut Self
Alias for -fps_mode
argument.
Set video sync method / framerate mode. vsync is applied to all output video streams but can be overridden for a stream by setting fps_mode. vsync is deprecated and will be removed in the future.
For compatibility reasons some of the values for vsync can be specified as numbers (shown in parentheses in the following table).
passthrough
(0
): Each frame is passed with its timestamp from the demuxer to the muxer.cfr
(1
): Frames will be duplicated and dropped to achieve exactly the requested constant frame rate.vfr
(2
): Frames are passed through with their timestamp or dropped so as to prevent 2 frames from having the same timestamp.drop
: As passthrough but destroys all timestamps, making the muxer generate fresh timestamps based on frame-rate.auto
(-1
): Chooses between cfr and vfr depending on muxer capabilities. This is the default method.
Sourcepub fn bitstream_filter_video<S: AsRef<str>>(
&mut self,
bitstream_filters: S,
) -> &mut Self
pub fn bitstream_filter_video<S: AsRef<str>>( &mut self, bitstream_filters: S, ) -> &mut Self
Alias for -bsf:v
argument.
Set bitstream filters for matching streams. bitstream_filters
is a
comma-separated list of bitstream filters. Use the -bsfs
option to get
the list of bitstream filters.
See also: -bsf:s
(subtitles), -bsf:a
(audio), -bsf:d
(data)
Sourcepub fn filter_complex<S: AsRef<str>>(&mut self, filtergraph: S) -> &mut Self
pub fn filter_complex<S: AsRef<str>>(&mut self, filtergraph: S) -> &mut Self
Alias for -filter_complex
argument.
Define a complex filtergraph, i.e. one with arbitrary number of inputs
and/or outputs. For simple graphs – those with one input and one output of
the same type – see the -filter
options. filtergraph
is a description
of the filtergraph, as described in the “Filtergraph syntax” section of
the ffmpeg-filters manual.
Input link labels must refer to input streams using the
[file_index:stream_specifier]
syntax (i.e. the same as -map
uses). If
stream_specifier
matches multiple streams, the first one will be used.
An unlabeled input will be connected to the first unused input stream of
the matching type.
Output link labels are referred to with -map
. Unlabeled outputs are
added to the first output file.
Note that with this option it is possible to use only lavfi sources without normal input files.
Sourcepub fn testsrc(&mut self) -> &mut Self
pub fn testsrc(&mut self) -> &mut Self
Generate a procedural test video. Equivalent to ffmpeg -f lavfi -i testsrc=duration=10
. It also inherits defaults from the testsrc
filter
in FFmpeg: 320x240
size and 25
fps.
Examples found in repository?
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
fn main() {
let mut ffmpeg_runner = FfmpegCommand::new()
.testsrc()
.args(["-metadata", "title=some cool title"])
.overwrite() // -y
.output("output/metadata.mp4")
.print_command()
.spawn()
.unwrap();
ffmpeg_runner
.iter()
.unwrap()
.for_each(|e| {
match e {
FfmpegEvent::Progress(FfmpegProgress { frame, .. }) =>
println!("Current frame: {frame}"),
FfmpegEvent::Log(_level, msg) =>
println!("[ffmpeg] {msg}"),
_ => {}
}
});
}
More examples
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
fn main() -> anyhow::Result<()> {
// Run an FFmpeg command that generates a test video
let iter = FfmpegCommand::new() // <- Builder API like `std::process::Command`
.testsrc() // <- Discoverable aliases for FFmpeg args
.rawvideo() // <- Convenient argument presets
.spawn()? // <- Ordinary `std::process::Child`
.iter()?; // <- Blocking iterator over logs and output
// Use a regular "for" loop to read decoded video data
for frame in iter.filter_frames() {
println!("frame: {}x{}", frame.width, frame.height);
let _pixels: Vec<u8> = frame.data; // <- raw RGB pixels! 🎨
}
Ok(())
}
Sourcepub fn rawvideo(&mut self) -> &mut Self
pub fn rawvideo(&mut self) -> &mut Self
Preset for emitting raw decoded video frames on stdout. Equivalent to -f rawvideo -pix_fmt rgb24 -
.
Examples found in repository?
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
fn main() -> anyhow::Result<()> {
// Run an FFmpeg command that generates a test video
let iter = FfmpegCommand::new() // <- Builder API like `std::process::Command`
.testsrc() // <- Discoverable aliases for FFmpeg args
.rawvideo() // <- Convenient argument presets
.spawn()? // <- Ordinary `std::process::Child`
.iter()?; // <- Blocking iterator over logs and output
// Use a regular "for" loop to read decoded video data
for frame in iter.filter_frames() {
println!("frame: {}x{}", frame.width, frame.height);
let _pixels: Vec<u8> = frame.data; // <- raw RGB pixels! 🎨
}
Ok(())
}
More examples
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
fn main() -> Result<()> {
let iter = FfmpegCommand::new()
.format("lavfi")
.arg("-re") // "realtime"
.input(format!(
"testsrc=size={OUTPUT_WIDTH}x{OUTPUT_HEIGHT}:rate={OUTPUT_FRAMERATE}"
))
.rawvideo()
.spawn()?
.iter()?
.filter_frames();
for frame in iter {
// clear the previous frame
if frame.frame_num > 0 {
for _ in 0..frame.height {
print!("\x1B[{}A", 1);
}
}
// Print the pixels colored with ANSI codes
for y in 0..frame.height {
for x in 0..frame.width {
let idx = (y * frame.width + x) as usize * 3;
let r = frame.data[idx] as u32;
let g = frame.data[idx + 1] as u32;
let b = frame.data[idx + 2] as u32;
print!("\x1B[48;2;{r};{g};{b}m ");
}
println!("\x1B[0m");
}
}
Ok(())
}
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
fn main() {
// Create an H265 source video as a starting point
let input_path = "output/h265.mp4";
if !Path::new(input_path).exists() {
create_h265_source(input_path);
}
// One instance decodes H265 to raw frames
let mut input = FfmpegCommand::new()
.input(input_path)
.rawvideo()
.spawn()
.unwrap();
// Frames can be transformed by Iterator `.map()`.
// This example is a no-op, with frames passed through unaltered.
let transformed_frames = input.iter().unwrap().filter_frames();
// You could easily add some "middleware" processing here:
// - overlay or composite another RGB image (or even another Ffmpeg Iterator)
// - apply a filter like blur or convolution
// Note: some of these operations are also possible with FFmpeg's (somewhat arcane)
// `filtergraph` API, but doing it in Rust gives you much finer-grained
// control, debuggability, and modularity -- you can pull in any Rust crate
// you need.
// A second instance encodes the updated frames back to H265
let mut output = FfmpegCommand::new()
.args([
"-f", "rawvideo", "-pix_fmt", "rgb24", "-s", "600x800", "-r", "30",
]) // note: should be possible to infer these params from the source input stream
.input("-")
.args(["-c:v", "libx265"])
.args(["-y", "output/h265_overlay.mp4"])
.spawn()
.unwrap();
// Connect the two instances
let mut stdin = output.take_stdin().unwrap();
thread::spawn(move || {
// `for_each` blocks through the end of the iterator,
// so we run it in another thread.
transformed_frames.for_each(|f| {
stdin.write_all(&f.data).ok();
});
});
// On the main thread, run the output instance to completion
output.iter().unwrap().for_each(|e| match e {
FfmpegEvent::Log(LogLevel::Error, e) => println!("Error: {}", e),
FfmpegEvent::Progress(p) => println!("Progress: {} / 00:00:15", p.time),
_ => {}
});
}
Sourcepub fn pipe_stdout(&mut self) -> &mut Self
pub fn pipe_stdout(&mut self) -> &mut Self
Configure the ffmpeg command to produce output on stdout.
Synchronizes two changes:
- Pass
pipe:1
to the ffmpeg command (“output on stdout”) - Set the
stdout
field of the innerCommand
toStdio::piped()
Sourcepub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Self
pub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Self
Adds an argument to pass to the program.
Identical to arg
in std::process::Command
.
Examples found in repository?
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
fn create_h265_source(path_str: &str) {
println!("Creating H265 source video: {}", path_str);
FfmpegCommand::new()
.args("-f lavfi -i testsrc=size=600x800:rate=30:duration=15 -c:v libx265".split(' '))
.arg(path_str)
.spawn()
.unwrap()
.iter()
.unwrap()
.for_each(|e| match e {
FfmpegEvent::Log(LogLevel::Error, e) => println!("Error: {}", e),
FfmpegEvent::Progress(p) => println!("Progress: {} / 00:00:15", p.time),
_ => {}
});
println!("Created H265 source video: {}", path_str);
}
More examples
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
fn main() -> Result<()> {
let iter = FfmpegCommand::new()
.format("lavfi")
.arg("-re") // "realtime"
.input(format!(
"testsrc=size={OUTPUT_WIDTH}x{OUTPUT_HEIGHT}:rate={OUTPUT_FRAMERATE}"
))
.rawvideo()
.spawn()?
.iter()?
.filter_frames();
for frame in iter {
// clear the previous frame
if frame.frame_num > 0 {
for _ in 0..frame.height {
print!("\x1B[{}A", 1);
}
}
// Print the pixels colored with ANSI codes
for y in 0..frame.height {
for x in 0..frame.width {
let idx = (y * frame.width + x) as usize * 3;
let r = frame.data[idx] as u32;
let g = frame.data[idx + 1] as u32;
let b = frame.data[idx + 2] as u32;
print!("\x1B[48;2;{r};{g};{b}m ");
}
println!("\x1B[0m");
}
}
Ok(())
}
Sourcepub fn args<I, S>(&mut self, args: I) -> &mut Self
pub fn args<I, S>(&mut self, args: I) -> &mut Self
Adds multiple arguments to pass to the program.
Identical to args
in std::process::Command
.
Examples found in repository?
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
fn main() {
let fps = 60;
let duration = 10;
let total_frames = fps * duration;
let arg_string = format!(
"-f lavfi -i testsrc=duration={}:size=1920x1080:rate={} -y output/test.mp4",
duration, fps
);
FfmpegCommand::new()
.args(arg_string.split(' '))
.spawn()
.unwrap()
.iter()
.unwrap()
.filter_progress()
.for_each(|progress| println!("{}%", (progress.frame * 100) / total_frames));
}
More examples
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
fn main() {
let mut ffmpeg_runner = FfmpegCommand::new()
.testsrc()
.args(["-metadata", "title=some cool title"])
.overwrite() // -y
.output("output/metadata.mp4")
.print_command()
.spawn()
.unwrap();
ffmpeg_runner
.iter()
.unwrap()
.for_each(|e| {
match e {
FfmpegEvent::Progress(FfmpegProgress { frame, .. }) =>
println!("Current frame: {frame}"),
FfmpegEvent::Log(_level, msg) =>
println!("[ffmpeg] {msg}"),
_ => {}
}
});
}
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
fn main() {
// Create an H265 source video as a starting point
let input_path = "output/h265.mp4";
if !Path::new(input_path).exists() {
create_h265_source(input_path);
}
// One instance decodes H265 to raw frames
let mut input = FfmpegCommand::new()
.input(input_path)
.rawvideo()
.spawn()
.unwrap();
// Frames can be transformed by Iterator `.map()`.
// This example is a no-op, with frames passed through unaltered.
let transformed_frames = input.iter().unwrap().filter_frames();
// You could easily add some "middleware" processing here:
// - overlay or composite another RGB image (or even another Ffmpeg Iterator)
// - apply a filter like blur or convolution
// Note: some of these operations are also possible with FFmpeg's (somewhat arcane)
// `filtergraph` API, but doing it in Rust gives you much finer-grained
// control, debuggability, and modularity -- you can pull in any Rust crate
// you need.
// A second instance encodes the updated frames back to H265
let mut output = FfmpegCommand::new()
.args([
"-f", "rawvideo", "-pix_fmt", "rgb24", "-s", "600x800", "-r", "30",
]) // note: should be possible to infer these params from the source input stream
.input("-")
.args(["-c:v", "libx265"])
.args(["-y", "output/h265_overlay.mp4"])
.spawn()
.unwrap();
// Connect the two instances
let mut stdin = output.take_stdin().unwrap();
thread::spawn(move || {
// `for_each` blocks through the end of the iterator,
// so we run it in another thread.
transformed_frames.for_each(|f| {
stdin.write_all(&f.data).ok();
});
});
// On the main thread, run the output instance to completion
output.iter().unwrap().for_each(|e| match e {
FfmpegEvent::Log(LogLevel::Error, e) => println!("Error: {}", e),
FfmpegEvent::Progress(p) => println!("Progress: {} / 00:00:15", p.time),
_ => {}
});
}
/// Create a H265 source video from scratch
fn create_h265_source(path_str: &str) {
println!("Creating H265 source video: {}", path_str);
FfmpegCommand::new()
.args("-f lavfi -i testsrc=size=600x800:rate=30:duration=15 -c:v libx265".split(' '))
.arg(path_str)
.spawn()
.unwrap()
.iter()
.unwrap()
.for_each(|e| match e {
FfmpegEvent::Log(LogLevel::Error, e) => println!("Error: {}", e),
FfmpegEvent::Progress(p) => println!("Progress: {} / 00:00:15", p.time),
_ => {}
});
println!("Created H265 source video: {}", path_str);
}
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
pub fn main() -> Result<()> {
if cfg!(not(windows)) {
eprintln!("Note: Methods for capturing audio are platform-specific and this demo is intended for Windows.");
eprintln!("On Linux or Mac, you need to switch from the `dshow` format to a different one supported on your platform.");
eprintln!("Make sure to also include format-specific arguments such as `-audio_buffer_size`.");
eprintln!("Pull requests are welcome to make this demo cross-platform!");
}
// First step: find default audio input device
// Runs an `ffmpeg -list_devices` command and selects the first one found
// Sample log output: [dshow @ 000001c9babdb000] "Headset Microphone (Arctis 7 Chat)" (audio)
let audio_device = FfmpegCommand::new()
.hide_banner()
.args(&["-list_devices", "true"])
.format("dshow")
.input("dummy")
.spawn()?
.iter()?
.into_ffmpeg_stderr()
.find(|line| line.contains("(audio)"))
.map(|line| line.split('\"').nth(1).map(|s| s.to_string()))
.context("No audio device found")?
.context("Failed to parse audio device")?;
println!("Listening to audio device: {}", audio_device);
// Second step: Capture audio and analyze w/ `ebur128` audio filter
// Loudness metadata will be printed to the FFmpeg logs
// Docs: <https://ffmpeg.org/ffmpeg-filters.html#ebur128-1>
let iter = FfmpegCommand::new()
.format("dshow")
.args("-audio_buffer_size 50".split(' ')) // reduces latency to 50ms (dshow-specific)
.input(format!("audio={audio_device}"))
.args("-af ebur128=metadata=1,ametadata=print".split(' '))
.format("null")
.output("-")
.spawn()?
.iter()?;
// Note: even though the audio device name may have spaces, it should *not* be
// in quotes (""). Quotes are only needed on the command line to separate
// different arguments. Since Rust invokes the command directly without a
// shell interpreter, args are already divided up correctly. Any quotes
// would be included in the device name instead and the command would fail.
// <https://github.com/fluent-ffmpeg/node-fluent-ffmpeg/issues/648#issuecomment-866242144>
let mut first_volume_event = true;
for event in iter {
match event {
FfmpegEvent::Error(e) | FfmpegEvent::Log(LogLevel::Error | LogLevel::Fatal, e) => {
eprintln!("{e}");
}
FfmpegEvent::Log(LogLevel::Info, msg) if msg.contains("lavfi.r128.M=") => {
if let Some(volume) = msg.split("lavfi.r128.M=").last() {
// Sample log output: [Parsed_ametadata_1 @ 0000024c27effdc0] [info] lavfi.r128.M=-120.691
// M = "momentary loudness"; a sliding time window of 400ms
// Volume scale is roughly -70 to 0 LUFS. Anything below -70 is silence.
// See <https://en.wikipedia.org/wiki/EBU_R_128#Metering>
let volume_f32 = volume.parse::<f32>().context("Failed to parse volume")?;
let volume_normalized: usize = max(((volume_f32 / 5.0).round() as i32) + 14, 0) as usize;
let volume_percent = ((volume_normalized as f32 / 14.0) * 100.0).round();
// Clear previous line of output
if !first_volume_event {
print!("\x1b[1A\x1b[2K");
} else {
first_volume_event = false;
}
// Blinking red dot to indicate recording
let time = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let recording_indicator = if time % 2 == 0 { "🔴" } else { " " };
println!(
"{} {} {}%",
recording_indicator,
repeat('█').take(volume_normalized).collect::<String>(),
volume_percent
);
}
}
_ => {}
}
}
Ok(())
}
Sourcepub fn get_args(&self) -> CommandArgs<'_>
pub fn get_args(&self) -> CommandArgs<'_>
Returns an iterator of the arguments that will be passed to the program.
Identical to get_args
in std::process::Command
.
Sourcepub fn spawn(&mut self) -> Result<FfmpegChild>
pub fn spawn(&mut self) -> Result<FfmpegChild>
Spawn the ffmpeg command as a child process, wrapping it in a
FfmpegChild
interface.
Please note that if the result is not used with wait()
the process is not cleaned up correctly resulting in a zombie process
until your main thread exits.
Identical to spawn
in std::process::Command
.
Examples found in repository?
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
fn main() {
let fps = 60;
let duration = 10;
let total_frames = fps * duration;
let arg_string = format!(
"-f lavfi -i testsrc=duration={}:size=1920x1080:rate={} -y output/test.mp4",
duration, fps
);
FfmpegCommand::new()
.args(arg_string.split(' '))
.spawn()
.unwrap()
.iter()
.unwrap()
.filter_progress()
.for_each(|progress| println!("{}%", (progress.frame * 100) / total_frames));
}
More examples
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
fn main() {
let mut ffmpeg_runner = FfmpegCommand::new()
.testsrc()
.args(["-metadata", "title=some cool title"])
.overwrite() // -y
.output("output/metadata.mp4")
.print_command()
.spawn()
.unwrap();
ffmpeg_runner
.iter()
.unwrap()
.for_each(|e| {
match e {
FfmpegEvent::Progress(FfmpegProgress { frame, .. }) =>
println!("Current frame: {frame}"),
FfmpegEvent::Log(_level, msg) =>
println!("[ffmpeg] {msg}"),
_ => {}
}
});
}
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
fn main() -> anyhow::Result<()> {
// Run an FFmpeg command that generates a test video
let iter = FfmpegCommand::new() // <- Builder API like `std::process::Command`
.testsrc() // <- Discoverable aliases for FFmpeg args
.rawvideo() // <- Convenient argument presets
.spawn()? // <- Ordinary `std::process::Child`
.iter()?; // <- Blocking iterator over logs and output
// Use a regular "for" loop to read decoded video data
for frame in iter.filter_frames() {
println!("frame: {}x{}", frame.width, frame.height);
let _pixels: Vec<u8> = frame.data; // <- raw RGB pixels! 🎨
}
Ok(())
}
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
fn main() {
let mut ffmpeg = FfmpegCommand::new()
.realtime()
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60")
.codec_video("rawvideo")
.format("avi")
.output("-")
.spawn()
.unwrap();
let mut ffplay = Command::new("ffplay")
.args("-i -".split(' '))
.stdin(Stdio::piped())
.spawn()
.unwrap();
let mut ffmpeg_stdout = ffmpeg.take_stdout().unwrap();
let mut ffplay_stdin = ffplay.stdin.take().unwrap();
// pipe from ffmpeg stdout to ffplay stdin
let buf = &mut [0u8; 4096];
loop {
let n = ffmpeg_stdout.read(buf).unwrap();
if n == 0 {
break;
}
ffplay_stdin.write_all(&buf[..n]).unwrap();
}
}
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
fn main() -> Result<()> {
let iter = FfmpegCommand::new()
.format("lavfi")
.arg("-re") // "realtime"
.input(format!(
"testsrc=size={OUTPUT_WIDTH}x{OUTPUT_HEIGHT}:rate={OUTPUT_FRAMERATE}"
))
.rawvideo()
.spawn()?
.iter()?
.filter_frames();
for frame in iter {
// clear the previous frame
if frame.frame_num > 0 {
for _ in 0..frame.height {
print!("\x1B[{}A", 1);
}
}
// Print the pixels colored with ANSI codes
for y in 0..frame.height {
for x in 0..frame.width {
let idx = (y * frame.width + x) as usize * 3;
let r = frame.data[idx] as u32;
let g = frame.data[idx + 1] as u32;
let b = frame.data[idx + 2] as u32;
print!("\x1B[48;2;{r};{g};{b}m ");
}
println!("\x1B[0m");
}
}
Ok(())
}
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
fn main() -> Result<()> {
// Set up a TCP listener
const TCP_PORT: u32 = 3000;
let (exit_sender, exit_receiver) = channel::<()>();
let listener_thread = thread::spawn(|| listen_for_connections(TCP_PORT, exit_receiver));
// Wait for the listener to start
thread::sleep(Duration::from_millis(1000));
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
FfmpegCommand::new()
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Audio output
.map("1:a")
.format("s16le")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Subtitles output
.map("2:s")
.format("srt")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
// _ => {}
e => {
println!("{:?}", e);
}
});
exit_sender.send(())?;
listener_thread.join().unwrap()?;
Ok(())
}
Sourcepub fn print_command(&mut self) -> &mut Self
pub fn print_command(&mut self) -> &mut Self
Print a command that can be copy-pasted to run in the terminal. Requires
&mut self
so that it chains seamlessly with other methods in the
interface. Sample output:
ffmpeg \
-f lavfi \
-i testsrc=duration=10 output/test.mp4
Examples found in repository?
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
fn main() {
let mut ffmpeg_runner = FfmpegCommand::new()
.testsrc()
.args(["-metadata", "title=some cool title"])
.overwrite() // -y
.output("output/metadata.mp4")
.print_command()
.spawn()
.unwrap();
ffmpeg_runner
.iter()
.unwrap()
.for_each(|e| {
match e {
FfmpegEvent::Progress(FfmpegProgress { frame, .. }) =>
println!("Current frame: {frame}"),
FfmpegEvent::Log(_level, msg) =>
println!("[ffmpeg] {msg}"),
_ => {}
}
});
}
More examples
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
fn main() -> Result<()> {
// Set up a TCP listener
const TCP_PORT: u32 = 3000;
let (exit_sender, exit_receiver) = channel::<()>();
let listener_thread = thread::spawn(|| listen_for_connections(TCP_PORT, exit_receiver));
// Wait for the listener to start
thread::sleep(Duration::from_millis(1000));
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
FfmpegCommand::new()
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Audio output
.map("1:a")
.format("s16le")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Subtitles output
.map("2:s")
.format("srt")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
// _ => {}
e => {
println!("{:?}", e);
}
});
exit_sender.send(())?;
listener_thread.join().unwrap()?;
Ok(())
}
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
fn main() -> anyhow::Result<()> {
use anyhow::Result;
use ffmpeg_sidecar::command::FfmpegCommand;
use ffmpeg_sidecar::event::{FfmpegEvent, LogLevel};
use ffmpeg_sidecar::named_pipes::NamedPipe;
use ffmpeg_sidecar::pipe_name;
use std::io::Read;
use std::sync::mpsc;
use std::thread;
const VIDEO_PIPE_NAME: &str = pipe_name!("ffmpeg_video");
const AUDIO_PIPE_NAME: &str = pipe_name!("ffmpeg_audio");
const SUBTITLES_PIPE_NAME: &str = pipe_name!("ffmpeg_subtitles");
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
let mut command = FfmpegCommand::new();
command
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(VIDEO_PIPE_NAME)
// Audio output
.map("1:a")
.format("s16le")
.output(AUDIO_PIPE_NAME)
// Subtitles output
.map("2:s")
.format("srt")
.output(SUBTITLES_PIPE_NAME);
// Create a separate thread for each output pipe
let threads = [VIDEO_PIPE_NAME, AUDIO_PIPE_NAME, SUBTITLES_PIPE_NAME]
.iter()
.cloned()
.map(|pipe_name| {
// It's important to create the named pipe on the main thread before
// sending it elsewhere so that any errors are caught at the top level.
let mut pipe = NamedPipe::new(pipe_name)?;
println!("[{pipe_name}] pipe created");
let (ready_sender, ready_receiver) = mpsc::channel::<()>();
let thread = thread::spawn(move || -> Result<()> {
// Wait for FFmpeg to start writing
// Only needed for Windows, since Unix will block until a writer has connected
println!("[{pipe_name}] waiting for ready signal");
ready_receiver.recv()?;
// Read continuously until finished
// Note that if the stream of output is interrupted or paused,
// you may need additional logic to keep the read loop alive.
println!("[{pipe_name}] reading from pipe");
let mut buf = vec![0; 1920 * 1080 * 3];
let mut total_bytes_read = 0;
// In the case of subtitles, we'll decode the string contents directly
let mut text_content = if pipe_name == SUBTITLES_PIPE_NAME {
Some("".to_string())
} else {
None
};
loop {
match pipe.read(&mut buf) {
Ok(bytes_read) => {
total_bytes_read += bytes_read;
// read bytes into string
if let Some(cur_str) = &mut text_content {
let s = std::str::from_utf8(&buf[..bytes_read]).unwrap();
text_content = Some(format!("{}{}", cur_str, s));
}
if bytes_read == 0 {
break;
}
}
Err(err) => {
if err.kind() != std::io::ErrorKind::BrokenPipe {
return Err(err.into());
} else {
break;
}
}
}
}
// Log how many bytes were received over this pipe.
// You can visually compare this to the FFmpeg log output to confirm
// that all the expected bytes were captured.
let size_str = if total_bytes_read < 1024 {
format!("{}B", total_bytes_read)
} else {
format!("{}KiB", total_bytes_read / 1024)
};
if let Some(text_content) = text_content {
println!("[{pipe_name}] subtitle text content: ");
println!("{}", text_content.trim());
}
println!("[{pipe_name}] done reading ({size_str} total)");
Ok(())
});
Ok((thread, ready_sender))
})
.collect::<Result<Vec<_>>>()?;
// Start FFmpeg
let mut ready_signal_sent = false;
command
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Signal threads when output is ready
FfmpegEvent::Progress(_) if !ready_signal_sent => {
threads.iter().for_each(|(_, sender)| {
sender.send(()).ok();
});
ready_signal_sent = true;
}
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
_ => {}
});
for (thread, _) in threads {
thread.join().unwrap()?;
}
Ok(())
}
Sourcepub fn create_no_window(&mut self) -> &mut Self
pub fn create_no_window(&mut self) -> &mut Self
Disable creating a new console window for the spawned process on Windows. Has no effect on other platforms. This can be useful when spawning a command from a GUI program.
This is called automatically in the constructor. To override, use
CommandExt::creation_flags()
directly on the inner Command
.
Sourcepub fn new() -> Self
pub fn new() -> Self
Examples found in repository?
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
fn main() {
let fps = 60;
let duration = 10;
let total_frames = fps * duration;
let arg_string = format!(
"-f lavfi -i testsrc=duration={}:size=1920x1080:rate={} -y output/test.mp4",
duration, fps
);
FfmpegCommand::new()
.args(arg_string.split(' '))
.spawn()
.unwrap()
.iter()
.unwrap()
.filter_progress()
.for_each(|progress| println!("{}%", (progress.frame * 100) / total_frames));
}
More examples
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
fn main() {
let mut ffmpeg_runner = FfmpegCommand::new()
.testsrc()
.args(["-metadata", "title=some cool title"])
.overwrite() // -y
.output("output/metadata.mp4")
.print_command()
.spawn()
.unwrap();
ffmpeg_runner
.iter()
.unwrap()
.for_each(|e| {
match e {
FfmpegEvent::Progress(FfmpegProgress { frame, .. }) =>
println!("Current frame: {frame}"),
FfmpegEvent::Log(_level, msg) =>
println!("[ffmpeg] {msg}"),
_ => {}
}
});
}
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
fn main() -> anyhow::Result<()> {
// Run an FFmpeg command that generates a test video
let iter = FfmpegCommand::new() // <- Builder API like `std::process::Command`
.testsrc() // <- Discoverable aliases for FFmpeg args
.rawvideo() // <- Convenient argument presets
.spawn()? // <- Ordinary `std::process::Child`
.iter()?; // <- Blocking iterator over logs and output
// Use a regular "for" loop to read decoded video data
for frame in iter.filter_frames() {
println!("frame: {}x{}", frame.width, frame.height);
let _pixels: Vec<u8> = frame.data; // <- raw RGB pixels! 🎨
}
Ok(())
}
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
fn main() {
let mut ffmpeg = FfmpegCommand::new()
.realtime()
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60")
.codec_video("rawvideo")
.format("avi")
.output("-")
.spawn()
.unwrap();
let mut ffplay = Command::new("ffplay")
.args("-i -".split(' '))
.stdin(Stdio::piped())
.spawn()
.unwrap();
let mut ffmpeg_stdout = ffmpeg.take_stdout().unwrap();
let mut ffplay_stdin = ffplay.stdin.take().unwrap();
// pipe from ffmpeg stdout to ffplay stdin
let buf = &mut [0u8; 4096];
loop {
let n = ffmpeg_stdout.read(buf).unwrap();
if n == 0 {
break;
}
ffplay_stdin.write_all(&buf[..n]).unwrap();
}
}
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
fn main() -> Result<()> {
let iter = FfmpegCommand::new()
.format("lavfi")
.arg("-re") // "realtime"
.input(format!(
"testsrc=size={OUTPUT_WIDTH}x{OUTPUT_HEIGHT}:rate={OUTPUT_FRAMERATE}"
))
.rawvideo()
.spawn()?
.iter()?
.filter_frames();
for frame in iter {
// clear the previous frame
if frame.frame_num > 0 {
for _ in 0..frame.height {
print!("\x1B[{}A", 1);
}
}
// Print the pixels colored with ANSI codes
for y in 0..frame.height {
for x in 0..frame.width {
let idx = (y * frame.width + x) as usize * 3;
let r = frame.data[idx] as u32;
let g = frame.data[idx + 1] as u32;
let b = frame.data[idx + 2] as u32;
print!("\x1B[48;2;{r};{g};{b}m ");
}
println!("\x1B[0m");
}
}
Ok(())
}
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
fn main() -> Result<()> {
// Set up a TCP listener
const TCP_PORT: u32 = 3000;
let (exit_sender, exit_receiver) = channel::<()>();
let listener_thread = thread::spawn(|| listen_for_connections(TCP_PORT, exit_receiver));
// Wait for the listener to start
thread::sleep(Duration::from_millis(1000));
// Prepare an FFmpeg command with separate outputs for video, audio, and subtitles.
FfmpegCommand::new()
// Global flags
.hide_banner()
.overwrite() // <- overwrite required on windows
// Generate test video
.format("lavfi")
.input("testsrc=size=1920x1080:rate=60:duration=10")
// Generate test audio
.format("lavfi")
.input("sine=frequency=1000:duration=10")
// Generate test subtitles
.format("srt")
.input(
"data:text/plain;base64,MQ0KMDA6MDA6MDAsMDAwIC0tPiAwMDowMDoxMCw1MDANCkhlbGxvIFdvcmxkIQ==",
)
// Video output
.map("0:v")
.format("rawvideo")
.pix_fmt("rgb24")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Audio output
.map("1:a")
.format("s16le")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
// Subtitles output
.map("2:s")
.format("srt")
.output(format!("tcp://127.0.0.1:{TCP_PORT}"))
.print_command()
.spawn()?
.iter()?
.for_each(|event| match event {
// Verify output size from FFmpeg logs (video/audio KiB)
FfmpegEvent::Log(LogLevel::Info, msg) if msg.starts_with("[out#") => {
println!("{msg}");
}
// Log any unexpected errors
FfmpegEvent::Log(LogLevel::Warning | LogLevel::Error | LogLevel::Fatal, msg) => {
eprintln!("{msg}");
}
// _ => {}
e => {
println!("{:?}", e);
}
});
exit_sender.send(())?;
listener_thread.join().unwrap()?;
Ok(())
}
pub fn new_with_path<S: AsRef<OsStr>>(path_to_ffmpeg_binary: S) -> Self
Sourcepub fn as_inner_mut(&mut self) -> &mut Command
pub fn as_inner_mut(&mut self) -> &mut Command
Escape hatch to mutably access the inner Command
.