Skip to content

fix: handle launch errors early in client-runtime to prevent unnecessary timeout delays. #19

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 26, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 39 additions & 38 deletions crates/rust-mcp-sdk/src/mcp_runtimes/client_runtime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,10 +85,47 @@ impl McpClient for ClientRuntime {
self.set_message_sender(sender).await;

let self_clone = Arc::clone(&self);
self_clone.initialize_request().await?;

let self_clone_err = Arc::clone(&self);

let err_task = tokio::spawn(async move {
let self_ref = &*self_clone_err;

if let IoStream::Readable(error_input) = error_io {
let mut reader = BufReader::new(error_input).lines();
loop {
tokio::select! {
should_break = self_ref.transport.is_shut_down() =>{
if should_break {
break;
}
}
line = reader.next_line() =>{
match line {
Ok(Some(error_message)) => {
self_ref
.handler
.handle_process_error(error_message, self_ref)
.await?;
}
Ok(None) => {
// end of input
break;
}
Err(e) => {
eprintln!("Error reading from std_err: {}", e);
break;
}
}
}
}
}
}
Ok::<(), McpSdkError>(())
});

// send initialize request to the MCP server
self_clone.initialize_request().await?;

let main_task = tokio::spawn(async move {
let sender = self_clone.sender().await.read().await;
let sender = sender.as_ref().ok_or(crate::error::McpSdkError::SdkError(
Expand Down Expand Up @@ -132,42 +169,6 @@ impl McpClient for ClientRuntime {
Ok::<(), McpSdkError>(())
});

let err_task = tokio::spawn(async move {
let self_ref = &*self_clone_err;

if let IoStream::Readable(error_input) = error_io {
let mut reader = BufReader::new(error_input).lines();
loop {
tokio::select! {
should_break = self_ref.transport.is_shut_down() =>{
if should_break {
break;
}
}
line = reader.next_line() =>{
match line {
Ok(Some(error_message)) => {
self_ref
.handler
.handle_process_error(error_message, self_ref)
.await?;
}
Ok(None) => {
// end of input
break;
}
Err(e) => {
eprintln!("Error reading from std_err: {}", e);
break;
}
}
}
}
}
}
Ok::<(), McpSdkError>(())
});

let mut lock = self.handlers.lock().await;
lock.push(main_task);
lock.push(err_task);
Expand Down
2 changes: 1 addition & 1 deletion crates/rust-mcp-transport/src/mcp_stream.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ impl MCPStream {
readable: Pin<Box<dyn tokio::io::AsyncRead + Send + Sync>>,
writable: Mutex<Pin<Box<dyn tokio::io::AsyncWrite + Send + Sync>>>,
error_io: IoStream,
pending_requests: Arc<Mutex<HashMap<RequestId, tokio::sync::oneshot::Sender<R>>>>,
timeout_msec: u64,
shutdown_rx: Receiver<bool>,
) -> (
Expand All @@ -44,7 +45,6 @@ impl MCPStream {
R: RPCMessage + Clone + Send + Sync + serde::de::DeserializeOwned + 'static,
{
let (tx, rx) = tokio::sync::broadcast::channel::<R>(CHANNEL_CAPACITY);
let pending_requests = Arc::new(Mutex::new(HashMap::new()));

#[allow(clippy::let_underscore_future)]
let _ = Self::spawn_reader(readable, tx, pending_requests.clone(), shutdown_rx);
Expand Down
13 changes: 10 additions & 3 deletions crates/rust-mcp-transport/src/message_dispatcher.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use async_trait::async_trait;
use rust_mcp_schema::schema_utils::{
ClientMessage, FromMessage, MCPMessage, MessageFromClient, MessageFromServer, ServerMessage,
self, ClientMessage, FromMessage, MCPMessage, MessageFromClient, MessageFromServer,
ServerMessage,
};
use rust_mcp_schema::{RequestId, RpcError};
use std::collections::HashMap;
Expand All @@ -12,7 +13,7 @@ use tokio::io::AsyncWriteExt;
use tokio::sync::oneshot;
use tokio::sync::Mutex;

use crate::error::TransportResult;
use crate::error::{TransportError, TransportResult};
use crate::utils::await_timeout;
use crate::McpDispatch;

Expand Down Expand Up @@ -146,9 +147,15 @@ impl McpDispatch<ServerMessage, MessageFromClient> for MessageDispatcher<ServerM
writable_std.flush().await?;

if let Some(rx) = rx_response {
// Wait for the response with timeout
match await_timeout(rx, Duration::from_millis(self.timeout_msec)).await {
Ok(response) => Ok(Some(response)),
Err(error) => Err(error),
Err(error) => match error {
TransportError::OneshotRecvError(_) => {
Err(schema_utils::SdkError::connection_closed().into())
}
_ => Err(error),
},
}
} else {
Ok(None)
Expand Down
35 changes: 17 additions & 18 deletions crates/rust-mcp-transport/src/stdio.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
use async_trait::async_trait;
use futures::Stream;
use rust_mcp_schema::schema_utils::{MCPMessage, RPCMessage};
use rust_mcp_schema::RequestId;
use std::collections::HashMap;
use std::pin::Pin;
use tokio::process::{Child, Command};
use std::sync::Arc;
use tokio::process::Command;
use tokio::sync::watch::Sender;
use tokio::sync::{watch, Mutex};

Expand All @@ -24,7 +26,6 @@ pub struct StdioTransport {
command: Option<String>,
args: Option<Vec<String>>,
env: Option<HashMap<String, String>>,
process: Mutex<Option<Child>>,
options: TransportOptions,
shutdown_tx: tokio::sync::RwLock<Option<Sender<bool>>>,
is_shut_down: Mutex<bool>,
Expand All @@ -49,7 +50,6 @@ impl StdioTransport {
args: None,
command: None,
env: None,
process: Mutex::new(None),
options,
shutdown_tx: tokio::sync::RwLock::new(None),
is_shut_down: Mutex::new(false),
Expand Down Expand Up @@ -81,20 +81,12 @@ impl StdioTransport {
args: Some(args),
command: Some(command.into()),
env,
process: Mutex::new(None),
options,
shutdown_tx: tokio::sync::RwLock::new(None),
is_shut_down: Mutex::new(false),
})
}

/// Sets the subprocess handle for the transport.
async fn set_process(&self, value: Child) -> TransportResult<()> {
let mut process = self.process.lock().await;
*process = Some(value);
Ok(())
}

/// Retrieves the command and arguments for launching the subprocess.
///
/// Adjusts the command based on the platform: on Windows, wraps it with `cmd.exe /c`.
Expand Down Expand Up @@ -188,22 +180,35 @@ where
.take()
.ok_or_else(|| TransportError::FromString("Unable to retrieve stderr.".into()))?;

self.set_process(process).await.unwrap();
let pending_requests: Arc<Mutex<HashMap<RequestId, tokio::sync::oneshot::Sender<R>>>> =
Arc::new(Mutex::new(HashMap::new()));
let pending_requests_clone = Arc::clone(&pending_requests);

tokio::spawn(async move {
let _ = process.wait().await;
// clean up pending requests to cancel waiting tasks
let mut pending_requests = pending_requests.lock().await;
pending_requests.clear();
});

let (stream, sender, error_stream) = MCPStream::create(
Box::pin(stdout),
Mutex::new(Box::pin(stdin)),
IoStream::Readable(Box::pin(stderr)),
pending_requests_clone,
self.options.timeout,
shutdown_rx,
);

Ok((stream, sender, error_stream))
} else {
let pending_requests: Arc<Mutex<HashMap<RequestId, tokio::sync::oneshot::Sender<R>>>> =
Arc::new(Mutex::new(HashMap::new()));
let (stream, sender, error_stream) = MCPStream::create(
Box::pin(tokio::io::stdin()),
Mutex::new(Box::pin(tokio::io::stdout())),
IoStream::Writable(Box::pin(tokio::io::stderr())),
pending_requests,
self.options.timeout,
shutdown_rx,
);
Expand Down Expand Up @@ -234,12 +239,6 @@ where
let mut lock = self.is_shut_down.lock().await;
*lock = true
}

let mut process = self.process.lock().await;
if let Some(p) = process.as_mut() {
p.kill().await?;
p.wait().await?;
}
Ok(())
}
}