diff --git a/API.md b/API.md index d4c5f0ac8a..cea3e7a0a6 100644 --- a/API.md +++ b/API.md @@ -114,6 +114,10 @@ Everything else inherited from [Uint8Array](https://developer.mozilla.org/en-US/ [spawn](https://nodejs.org/api/child_process.html#child_processspawncommand-args-options) +[exec](https://nodejs.org/api/child_process.html#child_processexeccommand-options-callback) + +[execFile](https://nodejs.org/api/child_process.html#child_processexecfilefile-args-options-callback) + ## console [Console](https://nodejs.org/api/console.html#class-console) diff --git a/modules/llrt_child_process/Cargo.toml b/modules/llrt_child_process/Cargo.toml index 691e0aeaf8..830bead498 100644 --- a/modules/llrt_child_process/Cargo.toml +++ b/modules/llrt_child_process/Cargo.toml @@ -20,10 +20,11 @@ llrt_utils = { version = "0.7.0-beta", path = "../../libs/llrt_utils", default-f rquickjs = { git = "https://github.com/DelSkayn/rquickjs.git", version = "0.10.0", features = [ "std", ], default-features = false } -tokio = { version = "1", features = ["process"], default-features = false } +tokio = { version = "1", features = ["process", "time"], default-features = false } [target.'cfg(unix)'.dependencies] libc = { version = "0.2", default-features = false } [dev-dependencies] llrt_test = { path = "../../libs/llrt_test" } +tokio = { version = "1", features = ["rt"] } diff --git a/modules/llrt_child_process/src/lib.rs b/modules/llrt_child_process/src/lib.rs index 65f2e04ba7..ebd00d6586 100644 --- a/modules/llrt_child_process/src/lib.rs +++ b/modules/llrt_child_process/src/lib.rs @@ -16,7 +16,9 @@ use std::{ collections::HashMap, io::Result as IoResult, process::{Command as StdCommand, Stdio}, + ptr::NonNull, sync::{Arc, RwLock}, + time::Duration, }; use llrt_context::CtxExtension; @@ -35,11 +37,11 @@ use rquickjs::{ convert::Coerced, module::{Declarations, Exports, ModuleDef}, prelude::{Func, Opt, Rest, This}, - Class, Ctx, Error, Exception, IntoJs, Result, Value, + qjs, Array, Class, Ctx, Error, Exception, Function, IntoJs, Object, Persistent, Result, Value, }; use tokio::{ - io::AsyncRead, - process::{Child, Command}, + io::{AsyncRead, AsyncReadExt}, + process::{Child, ChildStderr, ChildStdout, Command}, sync::{ broadcast::{channel as broadcast_channel, Receiver, Sender}, oneshot::Receiver as OneshotReceiver, @@ -102,13 +104,13 @@ fn prepare_shell_args( vec!["-c".into(), string_args] } -#[allow(dead_code)] #[rquickjs::class] #[derive(rquickjs::JsLifetime)] pub struct ChildProcess<'js> { emitter: EventEmitter<'js>, - args: Option>, - command: String, + /// Channel to signal process termination. On Windows, used in kill(). + /// The receiver is used by run_with_accumulation on all platforms. + #[cfg_attr(unix, allow(dead_code))] kill_tx: Option>, pid: Option, } @@ -146,6 +148,54 @@ impl StdioEnum { } } +/// Configuration for exec/execFile accumulation mode. +/// Uses Persistent to safely hold the callback across async boundaries. +/// This is the same pattern used by llrt_timers for setTimeout/setInterval. +struct ExecConfig { + /// The callback saved as Persistent for async safety + callback: Persistent>, + /// Raw context pointer for restoring the callback + raw_ctx: NonNull, + /// Maximum buffer size for stdout + stderr combined (default: 1MB) + max_buffer: usize, + /// Timeout in milliseconds (None = no timeout) + timeout_ms: Option, + /// Signal to send on timeout (default: "SIGTERM") + kill_signal: String, +} + +// SAFETY: The callback is only restored on the same JS runtime. +// LLRT is single-threaded, so this is safe. +unsafe impl Send for ExecConfig {} + +impl ExecConfig { + fn new<'js>( + ctx: &Ctx<'js>, + callback: Function<'js>, + max_buffer: usize, + timeout_ms: Option, + kill_signal: String, + ) -> Self { + Self { + callback: Persistent::::save(ctx, callback), + raw_ctx: ctx.as_raw(), + max_buffer, + timeout_ms, + kill_signal, + } + } +} + +/// Result of accumulated execution +struct AccumulatedResult { + stdout: Vec, + stderr: Vec, + exit_code: Option, + signal: Option, + timed_out: bool, + max_buffer_exceeded: bool, +} + #[rquickjs::methods] impl<'js> ChildProcess<'js> { #[qjs(get)] @@ -181,18 +231,11 @@ impl<'js> ChildProcess<'js> { } impl<'js> ChildProcess<'js> { - fn new( - ctx: Ctx<'js>, - command: String, - args: Option>, - child: IoResult, - ) -> Result> { + fn new(ctx: Ctx<'js>, command: String, child: IoResult) -> Result> { let (kill_tx, kill_rx) = broadcast_channel::<()>(1); let instance = Self { emitter: EventEmitter::new(), - command: command.clone(), - args, pid: None, kill_tx: Some(kill_tx), }; @@ -314,6 +357,301 @@ impl<'js> ChildProcess<'js> { } Ok(instance) } + + /// Create a ChildProcess with accumulation mode for exec/execFile. + /// Streams are set to null, and the callback is invoked with (error, stdout, stderr). + fn new_with_callback( + ctx: Ctx<'js>, + command: String, + child: IoResult, + config: ExecConfig, + ) -> Result> { + let (kill_tx, kill_rx) = broadcast_channel::<()>(1); + + let instance = Self { + emitter: EventEmitter::new(), + pid: None, + kill_tx: Some(kill_tx), + }; + + let instance = Class::instance(ctx.clone(), instance)?; + + // Set streams to null for accumulation mode (matches Node.js exec behavior) + instance.set("stdout", rquickjs::Null)?; + instance.set("stderr", rquickjs::Null)?; + instance.set("stdin", rquickjs::Null)?; + + match child { + Ok(mut child) => { + instance.borrow_mut().pid = child.id(); + + // Take ownership of stdout/stderr for direct reading + let stdout = child.stdout.take(); + let stderr = child.stderr.take(); + + let instance2 = instance.clone(); + + ctx.spawn_exit(async move { + let result = Self::run_with_accumulation( + child, + stdout, + stderr, + config.max_buffer, + config.timeout_ms, + config.kill_signal, + kill_rx, + ) + .await; + + // Restore context from raw pointer + // SAFETY: ExecConfig has `unsafe impl Send` (see line ~169) which is safe + // because LLRT is single-threaded. The raw_ctx pointer was obtained from + // ctx.as_raw() before spawn_exit, and we're restoring it on the same + // JS runtime thread. This pattern matches llrt_timers. + let ctx2 = unsafe { Ctx::from_raw(config.raw_ctx) }; + + // Invoke the callback + if let Ok(callback) = config.callback.restore(&ctx2) { + Self::invoke_exec_callback(&ctx2, callback, &result)?; + } + + // Emit exit and close events for consistency + let code = result.exit_code.unwrap_or_default().into_js(&ctx2)?; + let signal: Value = match &result.signal { + Some(s) => s.as_str().into_js(&ctx2)?, + None => rquickjs::Null.into_value(ctx2.clone()), + }; + + ChildProcess::emit_str( + This(instance2.clone()), + &ctx2, + "exit", + vec![code.clone(), signal.clone()], + false, + )?; + + ChildProcess::emit_str( + This(instance2), + &ctx2, + "close", + vec![code, signal], + false, + )?; + + Ok(()) + })?; + }, + Err(err) => { + let err_message = format!("Child process failed to spawn \"{}\". {}", command, err); + let instance3 = instance.clone(); + + ctx.spawn_exit(async move { + // SAFETY: See comment at line ~417 for full explanation. + // Same single-threaded runtime guarantees apply here. + let ctx2 = unsafe { Ctx::from_raw(config.raw_ctx) }; + + // Invoke callback with error + if let Ok(callback) = config.callback.restore(&ctx2) { + let error = Exception::from_message(ctx2.clone(), &err_message)?; + let error_obj = error.into_object(); + error_obj.set("code", "ENOENT")?; + callback.call::<_, ()>((error_obj.into_value(), "", ""))?; + } + + // Emit error event if there are listeners + if instance3.borrow().emitter.has_listener_str("error") { + let ex = Exception::from_message(ctx2.clone(), &err_message)?; + ChildProcess::emit_str( + This(instance3), + &ctx2, + "error", + vec![ex.into()], + false, + )?; + } + + Ok(()) + })?; + }, + } + + Ok(instance) + } + + /// Run the child process with output accumulation. + /// Preserves partial data on timeout by killing the process first, + /// then letting the stream readers complete naturally. + async fn run_with_accumulation( + mut child: Child, + stdout: Option, + stderr: Option, + max_buffer: usize, + timeout_ms: Option, + kill_signal: String, + mut kill_rx: Receiver<()>, + ) -> AccumulatedResult { + // Start reading stdout/stderr as concurrent futures. + // These will complete when pipes close (process exits or is killed). + let stdout_future = Self::read_stream(stdout); + let stderr_future = Self::read_stream(stderr); + tokio::pin!(stdout_future); + tokio::pin!(stderr_future); + + let mut timed_out = false; + let mut exit_code = None; + let mut exit_signal: Option = None; + + // Set up deadline if timeout is configured + let deadline = timeout_ms.map(|ms| tokio::time::Instant::now() + Duration::from_millis(ms)); + + // Wait for process to exit, handling timeout and kill signals + loop { + tokio::select! { + biased; + + // Handle timeout - kill process with configured signal + _ = async { + if let Some(d) = deadline { + tokio::time::sleep_until(d).await + } else { + std::future::pending::<()>().await + } + }, if deadline.is_some() && !timed_out => { + timed_out = true; + exit_signal = Some(kill_signal.clone()); + Self::kill_with_signal(&mut child, &kill_signal).await; + } + + // Handle kill from JS (.kill() method) + Ok(()) = kill_rx.recv() => { + let _ = child.kill().await; + } + + // Process exited + status = child.wait() => { + if let Ok(status) = status { + exit_code = status.code(); + #[cfg(unix)] + { + if let Some(sig) = status.signal() { + exit_signal = signal_str_from_i32(sig).map(|s| s.to_string()); + } + } + } + break; + } + } + } + + // Now collect the data - streams will EOF after process dies. + // This preserves partial data even on timeout. + let stdout_data = stdout_future.await; + let stderr_data = stderr_future.await; + + // Check combined size + let total = stdout_data.len() + stderr_data.len(); + let max_buffer_exceeded = total > max_buffer; + + AccumulatedResult { + stdout: stdout_data, + stderr: stderr_data, + exit_code, + signal: exit_signal, + timed_out, + max_buffer_exceeded, + } + } + + /// Kill process with the specified signal + async fn kill_with_signal(child: &mut Child, signal: &str) { + #[cfg(unix)] + { + if let Some(pid) = child.id() { + // Try to send the specified signal + let sig_num = match signal { + "SIGTERM" | "15" => libc::SIGTERM, + "SIGKILL" | "9" => libc::SIGKILL, + "SIGINT" | "2" => libc::SIGINT, + "SIGHUP" | "1" => libc::SIGHUP, + "SIGQUIT" | "3" => libc::SIGQUIT, + _ => libc::SIGTERM, // Default to SIGTERM + }; + unsafe { + libc::kill(pid as i32, sig_num); + } + } + } + #[cfg(not(unix))] + { + // Windows doesn't have signals - just kill + let _ = signal; + let _ = child.kill().await; + } + } + + /// Read from a stream until EOF + async fn read_stream(reader: Option) -> Vec { + let Some(mut reader) = reader else { + return Vec::new(); + }; + + let mut buffer = Vec::new(); + let mut chunk = [0u8; 8192]; + + loop { + match reader.read(&mut chunk).await { + Ok(0) => break, // EOF + Ok(n) => buffer.extend_from_slice(&chunk[..n]), + Err(_) => break, + } + } + + buffer + } + + /// Invoke the exec callback with (error, stdout, stderr) + fn invoke_exec_callback( + ctx: &Ctx<'js>, + callback: Function<'js>, + result: &AccumulatedResult, + ) -> Result<()> { + // Build error object if needed + let error: Value<'js> = if result.max_buffer_exceeded { + let err = Exception::from_message(ctx.clone(), "stdout maxBuffer length exceeded")?; + let obj = err.into_object(); + obj.set("code", "ERR_CHILD_PROCESS_STDIO_MAXBUFFER")?; + obj.set("killed", true)?; + obj.into_value() + } else if result.timed_out { + let err = Exception::from_message(ctx.clone(), "Process timed out")?; + let obj = err.into_object(); + obj.set("code", "ETIMEDOUT")?; + obj.set("killed", true)?; + if let Some(sig) = &result.signal { + obj.set("signal", sig.as_str())?; + } + obj.into_value() + } else if result.exit_code.map(|c| c != 0).unwrap_or(false) { + let err = Exception::from_message(ctx.clone(), "Command failed")?; + let obj = err.into_object(); + obj.set("code", result.exit_code)?; + if let Some(sig) = &result.signal { + obj.set("signal", sig.as_str())?; + } + obj.into_value() + } else { + rquickjs::Null.into_value(ctx.clone()) + }; + + // Convert bytes to strings (UTF-8 lossy) + let stdout_str = String::from_utf8_lossy(&result.stdout).into_owned(); + let stderr_str = String::from_utf8_lossy(&result.stderr).into_owned(); + + // Call: callback(error, stdout, stderr) + callback.call::<_, ()>((error, stdout_str, stderr_str))?; + + Ok(()) + } } async fn wait_for_process( @@ -355,43 +693,18 @@ impl<'js> Emitter<'js> for ChildProcess<'js> { } } -fn spawn<'js>( +/// Core function that spawns a child process. +/// This is the shared implementation used by spawn, exec, and execFile. +/// +/// When `callback` is Some, uses accumulation mode (for exec/execFile with callback). +/// When `callback` is None, uses streaming mode (for spawn or exec/execFile without callback). +fn spawn_child_process<'js>( ctx: Ctx<'js>, cmd: String, - args_and_opts: Rest>, + command_args: Option>, + opts: Option>, + callback: Option>, ) -> Result>> { - let args_0 = args_and_opts.first(); - let args_1 = args_and_opts.get(1); - - let mut opts = None; - - if args_1.is_some() { - opts = args_1.and_then(|o| o.as_object()).map(|o| o.to_owned()); - } - - let mut command_args = if let Some(args_0) = args_0 { - if args_0.is_array() { - let args = args_0.clone().into_array().or_throw(&ctx)?; - let mut args_vec = Vec::with_capacity(args.len()); - for arg in args.iter() { - let arg: Value = arg?; - let arg = arg - .as_string() - .or_throw_msg(&ctx, "argument is not a string")?; - let arg = arg.to_string()?; - args_vec.push(arg); - } - Some(args_vec) - } else if args_0.is_object() { - opts = args_0.as_object().map(|o| o.to_owned()); - None - } else { - None - } - } else { - None - }; - let mut windows_verbatim_arguments = if let Some(opts) = &opts { opts.get_optional::<&str, bool>("windowsVerbatimArguments")? .unwrap_or_default() @@ -399,7 +712,8 @@ fn spawn<'js>( false }; - let cmd = if let Some(opts) = &opts { + // Handle shell option + let (cmd, command_args) = if let Some(opts) = &opts { if opts .get_optional::<&str, bool>("shell")? .unwrap_or_default() @@ -408,26 +722,18 @@ fn spawn<'js>( let shell = "cmd.exe".to_string(); #[cfg(not(windows))] let shell = "/bin/sh".to_string(); - command_args = Some(prepare_shell_args( - &shell, - &mut windows_verbatim_arguments, - cmd, - command_args, - )); - shell + let shell_args = + prepare_shell_args(&shell, &mut windows_verbatim_arguments, cmd, command_args); + (shell, Some(shell_args)) } else if let Some(shell) = opts.get_optional::<&str, String>("shell")? { - command_args = Some(prepare_shell_args( - &shell, - &mut windows_verbatim_arguments, - cmd, - command_args, - )); - shell + let shell_args = + prepare_shell_args(&shell, &mut windows_verbatim_arguments, cmd, command_args); + (shell, Some(shell_args)) } else { - cmd + (cmd, command_args) } } else { - cmd + (cmd, command_args) }; let mut command = StdCommand::new(cmd.clone()); @@ -447,7 +753,7 @@ fn spawn<'js>( let mut stderr = StdioEnum::Piped; let mut detached = false; - if let Some(opts) = opts { + if let Some(opts) = &opts { #[cfg(unix)] { if let Some(gid) = opts.get_optional("gid")? { @@ -505,6 +811,12 @@ fn spawn<'js>( } } + // For callback mode, force piped stdout/stderr for accumulation + if callback.is_some() { + stdout = StdioEnum::Piped; + stderr = StdioEnum::Piped; + } + command.stdin(stdin.to_stdio()); command.stdout(stdout.to_stdio()); command.stderr(stderr.to_stdio()); @@ -521,10 +833,212 @@ fn spawn<'js>( } } - //tokio command does not have all std command features stabilized + // tokio command does not have all std command features stabilized let mut command = Command::from(command); + let spawn_result = command.spawn(); + + match callback { + Some(cb) => { + // With callback: use accumulation mode + let (max_buffer, timeout_ms, kill_signal) = extract_exec_options(opts.as_ref()); + let config = ExecConfig::new(&ctx, cb, max_buffer, timeout_ms, kill_signal); + ChildProcess::new_with_callback(ctx, cmd, spawn_result, config) + }, + None => { + // Without callback: use streaming mode + ChildProcess::new(ctx, cmd, spawn_result) + }, + } +} + +/// spawn(command[, args][, options]) +/// Spawns a new process using the given command. +fn spawn<'js>( + ctx: Ctx<'js>, + cmd: String, + args_and_opts: Rest>, +) -> Result>> { + let (command_args, opts) = parse_spawn_args(&ctx, &args_and_opts)?; + spawn_child_process(ctx, cmd, command_args, opts, None) +} + +/// Parse spawn's argument combinations: [args], [options] +fn parse_spawn_args<'js>( + ctx: &Ctx<'js>, + args: &Rest>, +) -> Result<(Option>, Option>)> { + let args_0 = args.first(); + let args_1 = args.get(1); + + let mut opts = None; + let mut command_args = None; + + if let Some(arg) = args_0 { + if let Some(arr) = arg.as_array() { + command_args = Some(array_to_vec_string(ctx, arr)?); + } else if let Some(o) = arg.as_object() { + opts = Some(o.clone()); + } + } + + if let Some(arg) = args_1 { + if let Some(o) = arg.as_object() { + opts = Some(o.clone()); + } + } + + Ok((command_args, opts)) +} + +/// execFile(file[, args][, options][, callback]) +/// Executes a file directly without spawning a shell (unless shell option is set). +/// This is a higher-level API that builds on spawn_child_process. +fn exec_file<'js>( + ctx: Ctx<'js>, + file: String, + rest: Rest>, +) -> Result>> { + let (command_args, opts, callback) = parse_exec_file_args(&ctx, &rest)?; + spawn_child_process(ctx, file, command_args, opts, callback) +} + +/// exec(command[, options][, callback]) +/// Executes a command in a shell. +/// This is the highest-level API: exec → execFile (with shell:true) → spawn_child_process. +fn exec<'js>( + ctx: Ctx<'js>, + command: String, + rest: Rest>, +) -> Result>> { + let (opts, callback) = parse_exec_args(&rest); + + // exec() always uses shell - ensure shell:true is set in options + let opts = ensure_shell_option(&ctx, opts)?; + + // Delegate to spawn_child_process with shell:true + // exec passes the command as a single string, no args + spawn_child_process(ctx, command, None, Some(opts), callback) +} + +/// Ensure shell option is set to true in options object. +/// Creates a new options object if none exists. +fn ensure_shell_option<'js>(ctx: &Ctx<'js>, opts: Option>) -> Result> { + match opts { + Some(opts) => { + // Only set shell if not already set + if opts.get::<_, Value>("shell")?.is_undefined() { + opts.set("shell", true)?; + } + Ok(opts) + }, + None => { + let opts = Object::new(ctx.clone())?; + opts.set("shell", true)?; + Ok(opts) + }, + } +} + +/// Parse exec's argument combinations: [options], [callback] +fn parse_exec_args<'js>(args: &Rest>) -> (Option>, Option>) { + let args_0 = args.first(); + let args_1 = args.get(1); + + let mut opts = None; + let mut callback = None; + + if let Some(arg) = args_0 { + if let Some(f) = arg.as_function() { + callback = Some(f.clone()); + } else if let Some(o) = arg.as_object().filter(|o| !o.is_null()) { + opts = Some(o.clone()); + } + } + + if let Some(arg) = args_1 { + if let Some(f) = arg.as_function() { + callback = Some(f.clone()); + } + } + + (opts, callback) +} + +/// Extract exec/execFile options: maxBuffer, timeout, killSignal +fn extract_exec_options(opts: Option<&Object<'_>>) -> (usize, Option, String) { + let max_buffer = opts + .and_then(|o| o.get_optional("maxBuffer").ok().flatten()) + .unwrap_or(1024 * 1024); // 1MB default + + let timeout_ms = opts.and_then(|o| o.get_optional("timeout").ok().flatten()); + + let kill_signal = opts + .and_then(|o| o.get_optional::<_, String>("killSignal").ok().flatten()) + .unwrap_or_else(|| "SIGTERM".to_string()); + + (max_buffer, timeout_ms, kill_signal) +} + +/// Parse execFile's flexible argument combinations: [args], [options], [callback] +#[allow(clippy::type_complexity)] +fn parse_exec_file_args<'js>( + ctx: &Ctx<'js>, + args: &Rest>, +) -> Result<( + Option>, + Option>, + Option>, +)> { + let args_0 = args.first(); + let args_1 = args.get(1); + let args_2 = args.get(2); + + let mut command_args = None; + let mut opts = None; + let mut callback = None; + + // args_0: Array | Object | Function + if let Some(arg) = args_0 { + if let Some(arr) = arg.as_array() { + command_args = Some(array_to_vec_string(ctx, arr)?); + } else if let Some(f) = arg.as_function() { + callback = Some(f.clone()); + } else if let Some(o) = arg.as_object().filter(|o| !o.is_null()) { + opts = Some(o.clone()); + } + } + + // args_1: Object | Function + if let Some(arg) = args_1 { + if let Some(f) = arg.as_function() { + callback = Some(f.clone()); + } else if let Some(o) = arg.as_object().filter(|o| !o.is_null()) { + opts = Some(o.clone()); + } + } + + // args_2: Function + if let Some(arg) = args_2 { + if let Some(f) = arg.as_function() { + callback = Some(f.clone()); + } + } - ChildProcess::new(ctx.clone(), cmd, command_args, command.spawn()) + Ok((command_args, opts, callback)) +} + +/// Convert a JS array to a Vec +fn array_to_vec_string<'js>(ctx: &Ctx<'js>, arr: &Array<'js>) -> Result> { + let mut result = Vec::with_capacity(arr.len()); + for item in arr.iter::() { + let item = item?; + let s = item + .as_string() + .or_throw_msg(ctx, "argument must be a string")? + .to_string()?; + result.push(s); + } + Ok(result) } fn str_to_stdio(ctx: &Ctx<'_>, input: &str) -> Result { @@ -563,6 +1077,8 @@ pub struct ChildProcessModule; impl ModuleDef for ChildProcessModule { fn declare(declare: &Declarations) -> Result<()> { declare.declare("spawn")?; + declare.declare("exec")?; + declare.declare("execFile")?; declare.declare("default")?; Ok(()) } @@ -576,6 +1092,8 @@ impl ModuleDef for ChildProcessModule { export_default(ctx, exports, |default| { default.set("spawn", Func::from(spawn))?; + default.set("exec", Func::from(exec))?; + default.set("execFile", Func::from(exec_file))?; Ok(()) })?; @@ -681,4 +1199,470 @@ mod tests { }) .await; } + + #[tokio::test] + async fn test_exec_file() { + test_async_with(|ctx| { + Box::pin(async move { + buffer::init(&ctx).unwrap(); + + ModuleEvaluator::eval_rust::(ctx.clone(), "node:child_process") + .await + .unwrap(); + + let message: String = ModuleEvaluator::eval_js( + ctx.clone(), + "test", + r#" + import {execFile} from "node:child_process"; + + let resolve = null; + const deferred = new Promise(res => { + resolve = res; + }); + + execFile("echo", ["hello"], (error, stdout, stderr) => { + resolve(stdout.trim()); + }); + + export default await deferred; + "#, + ) + .await + .catch(&ctx) + .unwrap() + .get("default") + .unwrap(); + + assert_eq!(message, "hello"); + }) + }) + .await; + } + + #[tokio::test] + async fn test_exec_file_no_args() { + test_async_with(|ctx| { + Box::pin(async move { + buffer::init(&ctx).unwrap(); + + ModuleEvaluator::eval_rust::(ctx.clone(), "node:child_process") + .await + .unwrap(); + + let message: String = ModuleEvaluator::eval_js( + ctx.clone(), + "test", + r#" + import {execFile} from "node:child_process"; + + let resolve = null; + const deferred = new Promise(res => { + resolve = res; + }); + + execFile("pwd", (error, stdout, stderr) => { + resolve(stdout.trim().length > 0 ? "has_output" : "no_output"); + }); + + export default await deferred; + "#, + ) + .await + .catch(&ctx) + .unwrap() + .get("default") + .unwrap(); + + assert_eq!(message, "has_output"); + }) + }) + .await; + } + + #[tokio::test] + async fn test_exec() { + test_async_with(|ctx| { + Box::pin(async move { + buffer::init(&ctx).unwrap(); + + ModuleEvaluator::eval_rust::(ctx.clone(), "node:child_process") + .await + .unwrap(); + + let message: String = ModuleEvaluator::eval_js( + ctx.clone(), + "test", + r#" + import {exec} from "node:child_process"; + + let resolve = null; + const deferred = new Promise(res => { + resolve = res; + }); + + exec("echo hello", (error, stdout, stderr) => { + resolve(stdout.trim()); + }); + + export default await deferred; + "#, + ) + .await + .catch(&ctx) + .unwrap() + .get("default") + .unwrap(); + + assert_eq!(message, "hello"); + }) + }) + .await; + } + + #[tokio::test] + #[cfg(unix)] // Uses Unix-specific paths and commands + async fn test_exec_with_options() { + test_async_with(|ctx| { + Box::pin(async move { + buffer::init(&ctx).unwrap(); + + ModuleEvaluator::eval_rust::(ctx.clone(), "node:child_process") + .await + .unwrap(); + + let message: String = ModuleEvaluator::eval_js( + ctx.clone(), + "test", + r#" + import {exec} from "node:child_process"; + + let resolve = null; + const deferred = new Promise(res => { + resolve = res; + }); + + exec("pwd", { cwd: "/tmp" }, (error, stdout, stderr) => { + resolve(stdout.trim()); + }); + + export default await deferred; + "#, + ) + .await + .catch(&ctx) + .unwrap() + .get("default") + .unwrap(); + + // On macOS, /tmp is a symlink to /private/tmp + assert!(message == "/tmp" || message == "/private/tmp"); + }) + }) + .await; + } + + #[tokio::test] + async fn test_exec_file_error() { + test_async_with(|ctx| { + Box::pin(async move { + buffer::init(&ctx).unwrap(); + + ModuleEvaluator::eval_rust::(ctx.clone(), "node:child_process") + .await + .unwrap(); + + let result: String = ModuleEvaluator::eval_js( + ctx.clone(), + "test", + r#" + import {execFile} from "node:child_process"; + + let resolve = null; + const deferred = new Promise(res => { + resolve = res; + }); + + execFile("false", (error, stdout, stderr) => { + if (error) { + resolve("error_received"); + } else { + resolve("no_error"); + } + }); + + export default await deferred; + "#, + ) + .await + .catch(&ctx) + .unwrap() + .get("default") + .unwrap(); + + assert_eq!(result, "error_received"); + }) + }) + .await; + } + + #[tokio::test] + async fn test_exec_file_with_shell() { + test_async_with(|ctx| { + Box::pin(async move { + buffer::init(&ctx).unwrap(); + + ModuleEvaluator::eval_rust::(ctx.clone(), "node:child_process") + .await + .unwrap(); + + let message: String = ModuleEvaluator::eval_js( + ctx.clone(), + "test", + r#" + import {execFile} from "node:child_process"; + + let resolve = null; + const deferred = new Promise(res => { + resolve = res; + }); + + // Use shell to run a command with shell features (globbing, pipes) + execFile("echo", ["hello", "world"], { shell: true }, (error, stdout, stderr) => { + resolve(stdout.trim()); + }); + + export default await deferred; + "#, + ) + .await + .catch(&ctx) + .unwrap() + .get("default") + .unwrap(); + + assert_eq!(message, "hello world"); + }) + }) + .await; + } + + #[tokio::test] + #[cfg(unix)] // Uses Unix shell variable syntax ($VAR) + async fn test_exec_with_env() { + test_async_with(|ctx| { + Box::pin(async move { + buffer::init(&ctx).unwrap(); + + ModuleEvaluator::eval_rust::(ctx.clone(), "node:child_process") + .await + .unwrap(); + + let message: String = ModuleEvaluator::eval_js( + ctx.clone(), + "test", + r#" + import {exec} from "node:child_process"; + + let resolve = null; + const deferred = new Promise(res => { + resolve = res; + }); + + exec("echo $MY_TEST_VAR", { env: { MY_TEST_VAR: "custom_value" } }, (error, stdout, stderr) => { + resolve(stdout.trim()); + }); + + export default await deferred; + "#, + ) + .await + .catch(&ctx) + .unwrap() + .get("default") + .unwrap(); + + assert_eq!(message, "custom_value"); + }) + }) + .await; + } + + #[tokio::test] + #[cfg(unix)] // Uses sh and Unix shell variable syntax + async fn test_exec_file_with_env() { + test_async_with(|ctx| { + Box::pin(async move { + buffer::init(&ctx).unwrap(); + + ModuleEvaluator::eval_rust::(ctx.clone(), "node:child_process") + .await + .unwrap(); + + let message: String = ModuleEvaluator::eval_js( + ctx.clone(), + "test", + r#" + import {execFile} from "node:child_process"; + + let resolve = null; + const deferred = new Promise(res => { + resolve = res; + }); + + // Use shell: true to enable variable expansion + execFile("sh", ["-c", "echo $MY_VAR"], { env: { MY_VAR: "env_test" } }, (error, stdout, stderr) => { + resolve(stdout.trim()); + }); + + export default await deferred; + "#, + ) + .await + .catch(&ctx) + .unwrap() + .get("default") + .unwrap(); + + assert_eq!(message, "env_test"); + }) + }) + .await; + } + + #[tokio::test] + #[cfg(unix)] // Uses sleep and && shell operator + async fn test_exec_timeout_preserves_partial_data() { + test_async_with(|ctx| { + Box::pin(async move { + buffer::init(&ctx).unwrap(); + + ModuleEvaluator::eval_rust::(ctx.clone(), "node:child_process") + .await + .unwrap(); + + let result: String = ModuleEvaluator::eval_js( + ctx.clone(), + "test", + r#" + import {exec} from "node:child_process"; + + let resolve = null; + const deferred = new Promise(res => { + resolve = res; + }); + + // Process that outputs data then sleeps - we should get partial data on timeout + exec("echo partial_output && sleep 10", { timeout: 100 }, (error, stdout, stderr) => { + const hasError = error !== null; + const hasPartialData = stdout.includes("partial_output"); + resolve(hasError && hasPartialData ? "timeout_with_data" : "unexpected:" + stdout); + }); + + export default await deferred; + "#, + ) + .await + .catch(&ctx) + .unwrap() + .get("default") + .unwrap(); + + assert_eq!(result, "timeout_with_data"); + }) + }) + .await; + } + + #[tokio::test] + #[cfg(unix)] // Uses sh, yes, and head commands + async fn test_exec_file_max_buffer() { + test_async_with(|ctx| { + Box::pin(async move { + buffer::init(&ctx).unwrap(); + + ModuleEvaluator::eval_rust::(ctx.clone(), "node:child_process") + .await + .unwrap(); + + let result: String = ModuleEvaluator::eval_js( + ctx.clone(), + "test", + r#" + import {execFile} from "node:child_process"; + + let resolve = null; + const deferred = new Promise(res => { + resolve = res; + }); + + // Generate output larger than maxBuffer + execFile("sh", ["-c", "yes | head -c 200"], { maxBuffer: 100 }, (error, stdout, stderr) => { + if (error && error.code === "ERR_CHILD_PROCESS_STDIO_MAXBUFFER") { + resolve("maxbuffer_error"); + } else if (error) { + resolve("other_error:" + error.code); + } else { + resolve("no_error"); + } + }); + + export default await deferred; + "#, + ) + .await + .catch(&ctx) + .unwrap() + .get("default") + .unwrap(); + + assert_eq!(result, "maxbuffer_error"); + }) + }) + .await; + } + + #[tokio::test] + async fn test_exec_error_nonexistent_command() { + test_async_with(|ctx| { + Box::pin(async move { + buffer::init(&ctx).unwrap(); + + ModuleEvaluator::eval_rust::(ctx.clone(), "node:child_process") + .await + .unwrap(); + + let result: String = ModuleEvaluator::eval_js( + ctx.clone(), + "test", + r#" + import {exec} from "node:child_process"; + + let resolve = null; + const deferred = new Promise(res => { + resolve = res; + }); + + exec("this_command_does_not_exist_xyz123", (error, stdout, stderr) => { + if (error) { + resolve("error_received"); + } else { + resolve("no_error"); + } + }); + + export default await deferred; + "#, + ) + .await + .catch(&ctx) + .unwrap() + .get("default") + .unwrap(); + + assert_eq!(result, "error_received"); + }) + }) + .await; + } } diff --git a/tests/unit/child_process.test.ts b/tests/unit/child_process.test.ts index c69f21ac0c..af4c39d022 100644 --- a/tests/unit/child_process.test.ts +++ b/tests/unit/child_process.test.ts @@ -9,7 +9,7 @@ it("node:child_process should be the same as child_process", () => { expect(defaultImport).toStrictEqual(legacyImport); }); -const { spawn } = defaultImport; +const { spawn, exec, execFile } = defaultImport; describe("spawn", () => { it("should spawn a child process", (done) => { @@ -227,3 +227,205 @@ describe("spawn", () => { }); }); }); + +describe("exec", () => { + it("should execute a shell command and return stdout", (done) => { + exec("echo Hello", (error, stdout, stderr) => { + try { + expect(error).toBeNull(); + expect(stdout.trim()).toEqual("Hello"); + expect(stderr).toEqual(""); + done(); + } catch (err) { + done(err); + } + }); + }); + + it("should execute a command with options", (done) => { + const cwd = IS_WINDOWS ? process.cwd() : "/tmp"; + const pwdCmd = IS_WINDOWS ? "cd" : "pwd"; + exec(pwdCmd, { cwd }, (error, stdout, stderr) => { + try { + expect(error).toBeNull(); + if (IS_WINDOWS) { + expect(stdout.trim().toLowerCase()).toContain( + process.cwd().toLowerCase() + ); + } else { + expect(stdout.trim()).toContain("/tmp"); + } + expect(stderr).toEqual(""); + done(); + } catch (err) { + done(err); + } + }); + }); + + it("should capture stderr on command failure", (done) => { + if (process.env._VIRTUAL_ENV) { + // QEMU may handle errors differently + return done(); + } + // Use a command that writes to stderr and exits with error + const cmd = IS_WINDOWS + ? "cmd /c exit 1" + : "sh -c 'echo error >&2 && exit 1'"; + exec(cmd, (error, stdout, stderr) => { + try { + expect(error).not.toBeNull(); + expect(error!.code).toEqual(1); + done(); + } catch (err) { + done(err); + } + }); + }); + + it("should handle nonexistent commands", (done) => { + if (process.env._VIRTUAL_ENV) { + // QEMU may handle errors differently + return done(); + } + exec("nonexistent_command_12345", (error, stdout, stderr) => { + try { + expect(error).not.toBeNull(); + done(); + } catch (err) { + done(err); + } + }); + }); + + it("should respect maxBuffer option", (done) => { + // Generate output larger than maxBuffer + const cmd = IS_WINDOWS + ? 'cmd /c "echo AAAAAAAAAA"' + : "echo AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"; + exec(cmd, { maxBuffer: 10 }, (error, stdout, stderr) => { + try { + expect(error).not.toBeNull(); + expect(error!.message).toContain("maxBuffer"); + done(); + } catch (err) { + done(err); + } + }); + }); +}); + +describe("execFile", () => { + it("should execute a file and return stdout", (done) => { + execFile("echo", ["Hello", "World"], (error, stdout, stderr) => { + try { + expect(error).toBeNull(); + expect(stdout.trim()).toEqual("Hello World"); + expect(stderr).toEqual(""); + done(); + } catch (err) { + done(err); + } + }); + }); + + it("should execute a file without arguments", (done) => { + const cmd = IS_WINDOWS ? "cmd" : "pwd"; + const args = IS_WINDOWS ? ["/c", "cd"] : []; + execFile(cmd, args, (error, stdout, stderr) => { + try { + expect(error).toBeNull(); + expect(stdout.length).toBeGreaterThan(0); + expect(stderr).toEqual(""); + done(); + } catch (err) { + done(err); + } + }); + }); + + it("should execute with options", (done) => { + const cwd = IS_WINDOWS ? process.cwd() : "/tmp"; + const cmd = IS_WINDOWS ? "cmd" : "pwd"; + const args = IS_WINDOWS ? ["/c", "cd"] : []; + execFile(cmd, args, { cwd }, (error, stdout, stderr) => { + try { + expect(error).toBeNull(); + if (IS_WINDOWS) { + expect(stdout.trim().toLowerCase()).toContain( + process.cwd().toLowerCase() + ); + } else { + expect(stdout.trim()).toContain("/tmp"); + } + done(); + } catch (err) { + done(err); + } + }); + }); + + it("should handle nonexistent executable", (done) => { + if (process.env._VIRTUAL_ENV) { + // QEMU may handle errors differently + return done(); + } + execFile("nonexistent_command_12345", [], (error, stdout, stderr) => { + try { + expect(error).not.toBeNull(); + done(); + } catch (err) { + done(err); + } + }); + }); + + it("should handle executable failure with exit code", (done) => { + if (process.env._VIRTUAL_ENV) { + // QEMU may handle errors differently + return done(); + } + const cmd = IS_WINDOWS ? "cmd" : "sh"; + const args = IS_WINDOWS ? ["/c", "exit 42"] : ["-c", "exit 42"]; + execFile(cmd, args, (error, stdout, stderr) => { + try { + expect(error).not.toBeNull(); + expect(error!.code).toEqual(42); + done(); + } catch (err) { + done(err); + } + }); + }); + + it("should respect maxBuffer option", (done) => { + execFile( + "echo", + ["AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"], + { maxBuffer: 10 }, + (error, stdout, stderr) => { + try { + expect(error).not.toBeNull(); + expect(error!.message).toContain("maxBuffer"); + done(); + } catch (err) { + done(err); + } + } + ); + }); + + it("should execute with shell option", (done) => { + // When shell is true, execFile should use shell to execute + const cmd = IS_WINDOWS ? "echo %PATH%" : "echo $PATH"; + execFile(cmd, [], { shell: true }, (error, stdout, stderr) => { + try { + expect(error).toBeNull(); + expect(stdout.length).toBeGreaterThan(0); + done(); + } catch (err) { + done(err); + } + }); + }); +}); diff --git a/types/child_process.d.ts b/types/child_process.d.ts index ce8e8709aa..284112abfd 100644 --- a/types/child_process.d.ts +++ b/types/child_process.d.ts @@ -444,4 +444,161 @@ declare module "child_process" { args: readonly string[], options: SpawnOptions ): ChildProcess; + + interface ExecOptions extends ProcessEnvOptions { + /** + * Current working directory of the child process. + * @default undefined + */ + cwd?: string | undefined; + /** + * Environment key-value pairs. + */ + env?: Record | undefined; + /** + * Shell to execute the command with. + * @default '/bin/sh' on Unix, process.env.ComSpec on Windows + */ + shell?: string | undefined; + /** + * In milliseconds the maximum amount of time the process is allowed to run. + * @default undefined + */ + timeout?: number | undefined; + /** + * Largest amount of data in bytes allowed on stdout or stderr. + * If exceeded, the child process is terminated and any output is truncated. + * @default 1024 * 1024 (1MB) + */ + maxBuffer?: number | undefined; + /** + * Signal value to be used when the spawned process will be killed by timeout or buffer overflow. + * @default 'SIGTERM' + */ + killSignal?: QuickJS.Signals | number | undefined; + } + + interface ExecFileOptions extends ProcessEnvOptions { + /** + * Current working directory of the child process. + * @default undefined + */ + cwd?: string | undefined; + /** + * Environment key-value pairs. + */ + env?: Record | undefined; + /** + * Shell to execute the command with. If true, uses default shell. + * @default false + */ + shell?: boolean | string | undefined; + /** + * In milliseconds the maximum amount of time the process is allowed to run. + * @default undefined + */ + timeout?: number | undefined; + /** + * Largest amount of data in bytes allowed on stdout or stderr. + * If exceeded, the child process is terminated and any output is truncated. + * @default 1024 * 1024 (1MB) + */ + maxBuffer?: number | undefined; + /** + * Signal value to be used when the spawned process will be killed by timeout or buffer overflow. + * @default 'SIGTERM' + */ + killSignal?: QuickJS.Signals | number | undefined; + } + + interface ExecException extends Error { + cmd?: string | undefined; + killed?: boolean | undefined; + code?: number | undefined; + signal?: QuickJS.Signals | undefined; + } + + type ExecCallback = ( + error: ExecException | null, + stdout: string, + stderr: string + ) => void; + + type ExecFileCallback = ( + error: ExecException | null, + stdout: string, + stderr: string + ) => void; + + /** + * Spawns a shell then executes the `command` within that shell, buffering any + * generated output. The `command` string passed to the exec function is processed + * directly by the shell and special characters (vary based on shell) need to be + * dealt with accordingly. + * + * **Never pass unsanitized user input to this function. Any input containing shell** + * **metacharacters may be used to trigger arbitrary command execution.** + * + * ```js + * const { exec } = require('child_process'); + * exec('cat *.js missing_file | wc -l', (error, stdout, stderr) => { + * if (error) { + * console.error(`exec error: ${error}`); + * return; + * } + * console.log(`stdout: ${stdout}`); + * console.error(`stderr: ${stderr}`); + * }); + * ``` + * + * @param command The command to run, with space-separated arguments. + * @param callback Called with the output when process terminates. + */ + function exec(command: string, callback: ExecCallback): ChildProcess; + function exec( + command: string, + options: ExecOptions, + callback: ExecCallback + ): ChildProcess; + + /** + * The `child_process.execFile()` function is similar to `exec()` except + * that it does not spawn a shell by default. Rather, the specified executable + * `file` is spawned directly as a new process making it slightly more efficient + * than `exec()`. + * + * The same options as `exec()` are supported. Since a shell is not spawned, + * behaviors such as I/O redirection and file globbing are not supported. + * + * ```js + * const { execFile } = require('child_process'); + * const child = execFile('node', ['--version'], (error, stdout, stderr) => { + * if (error) { + * throw error; + * } + * console.log(stdout); + * }); + * ``` + * + * @param file The name or path of the executable file to run. + * @param args List of string arguments. + * @param callback Called with the output when process terminates. + */ + function execFile(file: string, callback: ExecFileCallback): ChildProcess; + function execFile( + file: string, + args: readonly string[], + callback: ExecFileCallback + ): ChildProcess; + function execFile( + file: string, + options: ExecFileOptions, + callback: ExecFileCallback + ): ChildProcess; + function execFile( + file: string, + args: readonly string[], + options: ExecFileOptions, + callback: ExecFileCallback + ): ChildProcess; }