From daf11d303ff4620eecc1b256f56eec6f1338298a Mon Sep 17 00:00:00 2001 From: Emmanuel Bosquet Date: Mon, 8 Jan 2024 15:06:04 +0100 Subject: [PATCH 01/11] handle backend hangup when responses is still transferring this solves an error where payload was partially transmitted Co-Authored-By: Eloi DEMOLIS --- lib/src/logs.rs | 3 +++ lib/src/protocol/kawa_h1/mod.rs | 16 ++++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/lib/src/logs.rs b/lib/src/logs.rs index bd676b985..cd585c83a 100644 --- a/lib/src/logs.rs +++ b/lib/src/logs.rs @@ -5,6 +5,7 @@ use time::Duration; use crate::{protocol::http::parser::Method, SessionMetrics}; +#[derive(Debug)] pub struct LogContext<'a> { pub request_id: Ulid, pub cluster_id: Option<&'a str>, @@ -92,6 +93,7 @@ impl fmt::Display for LogDuration { } } +#[derive(Debug)] pub enum Endpoint<'a> { Http { method: Option<&'a Method>, @@ -127,6 +129,7 @@ impl fmt::Display for Endpoint<'_> { } } +#[derive(Debug)] pub struct RequestRecord<'a> { pub error: Option<&'a str>, pub context: LogContext<'a>, diff --git a/lib/src/protocol/kawa_h1/mod.rs b/lib/src/protocol/kawa_h1/mod.rs index 3137cf0d9..94437eee4 100644 --- a/lib/src/protocol/kawa_h1/mod.rs +++ b/lib/src/protocol/kawa_h1/mod.rs @@ -9,7 +9,7 @@ use std::{ rc::{Rc, Weak}, }; -use kawa; +use kawa::{self, debug_kawa, ParsingPhase}; use mio::{net::TcpStream, Interest, Token}; use rusty_ulid::Ulid; use sozu_command::{ @@ -926,6 +926,7 @@ impl Http StateResult { + trace!("==============writable_default_answer"); let res = match self.status { SessionStatus::DefaultAnswer(status, ref buf, mut index) => { let len = buf.len(); @@ -1479,7 +1480,18 @@ impl Http { From c9f275eab0351c04a98c7aa55995606cf90a123a Mon Sep 17 00:00:00 2001 From: Emmanuel Bosquet Date: Mon, 8 Jan 2024 15:33:10 +0100 Subject: [PATCH 02/11] add missing access logs Co-Authored-By: Eloi DEMOLIS --- lib/src/protocol/kawa_h1/mod.rs | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/lib/src/protocol/kawa_h1/mod.rs b/lib/src/protocol/kawa_h1/mod.rs index 94437eee4..127b9eccc 100644 --- a/lib/src/protocol/kawa_h1/mod.rs +++ b/lib/src/protocol/kawa_h1/mod.rs @@ -9,7 +9,7 @@ use std::{ rc::{Rc, Weak}, }; -use kawa::{self, debug_kawa, ParsingPhase}; +use kawa; use mio::{net::TcpStream, Interest, Token}; use rusty_ulid::Ulid; use sozu_command::{ @@ -357,7 +357,7 @@ impl Http StateResult { + pub fn readable_parse(&mut self, metrics: &mut SessionMetrics) -> StateResult { trace!("==============readable_parse"); let was_initial = self.request_stream.is_initial(); let was_not_proxying = !self.request_stream.is_main_phase(); @@ -399,6 +399,7 @@ impl Http Http StateResult { trace!("==============writable_default_answer"); - let res = match self.status { + let socket_result = match self.status { SessionStatus::DefaultAnswer(status, ref buf, mut index) => { let len = buf.len(); let mut sz = 0usize; - let mut res = SocketResult::Continue; - while res == SocketResult::Continue && index < len { + let mut socket_result = SocketResult::Continue; + while socket_result == SocketResult::Continue && index < len { let (current_sz, current_res) = self.frontend_socket.socket_write(&buf[index..]); - res = current_res; + socket_result = current_res; sz += current_sz; index += current_sz; } @@ -944,7 +945,7 @@ impl Http Http return StateResult::CloseSession, }; - if res == SocketResult::Error { + if socket_result == SocketResult::Error { self.frontend_socket.write_error(); self.log_request_error( metrics, @@ -1457,7 +1458,7 @@ impl Http StateResult { + pub fn backend_hup(&mut self, metrics: &mut SessionMetrics) -> StateResult { // there might still data we can read on the socket if self.backend_readiness.event.is_readable() && self.backend_readiness.interest.is_readable() @@ -1482,11 +1483,15 @@ impl Http Http Date: Tue, 16 Jan 2024 15:23:52 +0100 Subject: [PATCH 03/11] fix timeout issue in the CLI create Channel::set_timeout() Co-Authored-By: Eloi DEMOLIS --- bin/src/ctl/mod.rs | 3 +++ command/src/channel.rs | 52 ++++++++++++++++++++++++++++++------------ 2 files changed, 41 insertions(+), 14 deletions(-) diff --git a/bin/src/ctl/mod.rs b/bin/src/ctl/mod.rs index e8b23f4f1..7568dbc18 100644 --- a/bin/src/ctl/mod.rs +++ b/bin/src/ctl/mod.rs @@ -44,6 +44,9 @@ pub fn ctl(args: cli::Args) -> anyhow::Result<()> { })?; let timeout = Duration::from_millis(args.timeout.unwrap_or(config.ctl_command_timeout)); + if !args.json { + debug!("applying timeout {:?}", timeout); + } let mut command_manager = CommandManager { channel, diff --git a/command/src/channel.rs b/command/src/channel.rs index ebd696c4e..b85788041 100644 --- a/command/src/channel.rs +++ b/command/src/channel.rs @@ -40,7 +40,9 @@ pub enum ChannelError { InvalidCharSet(String), #[error("Error deserializing message")] Serde(serde_json::error::Error), - #[error("Could not change the blocking status ef the unix stream with file descriptor {fd}: {error}")] + #[error("could not set the timeout of the unix stream with file descriptor {fd}: {error}")] + SetTimeout { fd: i32, error: String }, + #[error("Could not change the blocking status of the unix stream with file descriptor {fd}: {error}")] BlockingStatus { fd: i32, error: String }, #[error("Connection error: {0:?}")] Connection(Option), @@ -120,6 +122,22 @@ impl Channel { Ok(()) } + /// set the read_timeout of the unix stream. This works only temporary, be sure to set the timeout to None afterwards. + fn set_timeout(&mut self, timeout: Option) -> Result<(), ChannelError> { + unsafe { + let fd = self.sock.as_raw_fd(); + let stream = StdUnixStream::from_raw_fd(fd); + stream + .set_read_timeout(timeout) + .map_err(|error| ChannelError::SetTimeout { + fd, + error: error.to_string(), + })?; + let _fd = stream.into_raw_fd(); + } + Ok(()) + } + /// set the channel to be blocking pub fn blocking(&mut self) -> Result<(), ChannelError> { self.set_nonblocking(false) @@ -288,35 +306,41 @@ impl Channel { ) -> Result { let now = std::time::Instant::now(); - loop { + // set a very small timeout, to repeat the loop often + self.set_timeout(Some(Duration::from_millis(10)))?; + + let status = loop { if let Some(timeout) = timeout { if now.elapsed() >= timeout { - return Err(ChannelError::TimeoutReached(timeout)); + break Err(ChannelError::TimeoutReached(timeout)); } } match self.front_buf.data().iter().position(|&x| x == 0) { - Some(position) => return self.read_and_parse_from_front_buffer(position), + Some(position) => break self.read_and_parse_from_front_buffer(position), None => { if self.front_buf.available_space() == 0 { if self.front_buf.capacity() == self.max_buffer_size { - return Err(ChannelError::BufferFull); + break Err(ChannelError::BufferFull); } let new_size = min(self.front_buf.capacity() + 5000, self.max_buffer_size); self.front_buf.grow(new_size); } - - match self - .sock - .read(self.front_buf.space()) - .map_err(ChannelError::Read)? - { - 0 => return Err(ChannelError::NoByteToRead), - bytes_read => self.front_buf.fill(bytes_read), + match self.sock.read(self.front_buf.space()) { + Ok(0) => break Err(ChannelError::NoByteToRead), + Ok(bytes_read) => self.front_buf.fill(bytes_read), + Err(io_error) => match io_error.kind() { + ErrorKind::WouldBlock => continue, // ignore 10 millisecond timeouts + _ => break Err(ChannelError::Read(io_error)), + }, }; } } - } + }; + + self.set_timeout(None)?; + + status } fn read_and_parse_from_front_buffer(&mut self, position: usize) -> Result { From 60dd1c2cf8dedea1af58be6957b5f8f8113fb8f3 Mon Sep 17 00:00:00 2001 From: Emmanuel Bosquet Date: Mon, 20 Nov 2023 18:21:10 +0100 Subject: [PATCH 04/11] add no-clusters option on metrics query --- bin/src/cli.rs | 5 +++++ bin/src/ctl/command.rs | 2 ++ bin/src/ctl/mod.rs | 3 ++- command/src/command.proto | 2 ++ lib/src/metrics/local_drain.rs | 9 +++++++++ 5 files changed, 20 insertions(+), 1 deletion(-) diff --git a/bin/src/cli.rs b/bin/src/cli.rs index 28497a978..cff0293ba 100644 --- a/bin/src/cli.rs +++ b/bin/src/cli.rs @@ -220,6 +220,11 @@ pub enum MetricsCmd { // parse(try_from_str = split_slash) )] backends: Vec, + #[clap( + long = "no-clusters", + help = "get only the metrics of main process and workers (no cluster metrics)" + )] + no_clusters: bool, }, } diff --git a/bin/src/ctl/command.rs b/bin/src/ctl/command.rs index 43822c4b4..518eea513 100644 --- a/bin/src/ctl/command.rs +++ b/bin/src/ctl/command.rs @@ -176,12 +176,14 @@ impl CommandManager { metric_names: Vec, cluster_ids: Vec, backend_ids: Vec, + no_clusters: bool, ) -> Result<(), anyhow::Error> { let request: Request = RequestType::QueryMetrics(QueryMetricsOptions { list, cluster_ids, backend_ids, metric_names, + no_clusters, }) .into(); diff --git a/bin/src/ctl/mod.rs b/bin/src/ctl/mod.rs index 7568dbc18..ce360fa4e 100644 --- a/bin/src/ctl/mod.rs +++ b/bin/src/ctl/mod.rs @@ -81,7 +81,8 @@ impl CommandManager { names, clusters, backends, - } => self.get_metrics(list, refresh, names, clusters, backends), + no_clusters, + } => self.get_metrics(list, refresh, names, clusters, backends, no_clusters), _ => self.configure_metrics(cmd), }, SubCmd::Logging { level } => self.logging_filter(&level), diff --git a/command/src/command.proto b/command/src/command.proto index f69db483d..dcdc048a7 100644 --- a/command/src/command.proto +++ b/command/src/command.proto @@ -412,6 +412,8 @@ message QueryMetricsOptions { repeated string backend_ids = 3; // query only these metrics repeated string metric_names = 4; + // query only worker and main process metrics (no cluster metrics) + required bool no_clusters = 5; } // options to configure metrics collection diff --git a/lib/src/metrics/local_drain.rs b/lib/src/metrics/local_drain.rs index faf010915..ea29ef009 100644 --- a/lib/src/metrics/local_drain.rs +++ b/lib/src/metrics/local_drain.rs @@ -257,12 +257,21 @@ impl LocalDrain { cluster_ids, backend_ids, list, + no_clusters, } = options; if *list { return self.list_all_metric_names(); } + if *no_clusters { + let proxy_metrics = self.dump_proxy_metrics(metric_names); + return Ok(ContentType::WorkerMetrics(WorkerMetrics { + proxy: proxy_metrics, + clusters: BTreeMap::new(), + }).into()); + } + let worker_metrics = match (cluster_ids.is_empty(), backend_ids.is_empty()) { (false, _) => self.query_clusters(cluster_ids, metric_names)?, (true, false) => self.query_backends(backend_ids, metric_names)?, From 74df9643e212d83c7a97622d2a229a96c06b991a Mon Sep 17 00:00:00 2001 From: Eloi DEMOLIS Date: Wed, 24 Jan 2024 18:49:14 +0100 Subject: [PATCH 05/11] Sanitize user-agent in access logs Due to how access logs are parsed inside CleverCloud, we can't have spaces in tags nor can a tag value end with a comma. This commit sanitizes the user-agent which is the only tag with client data. This is a temporary fix necessary until we make the logs binary. Signed-off-by: Eloi DEMOLIS --- lib/src/logs.rs | 68 +++++++++++++++++++-------------- lib/src/protocol/kawa_h1/mod.rs | 9 +++-- 2 files changed, 44 insertions(+), 33 deletions(-) diff --git a/lib/src/logs.rs b/lib/src/logs.rs index cd585c83a..f28bfce04 100644 --- a/lib/src/logs.rs +++ b/lib/src/logs.rs @@ -141,11 +141,11 @@ pub struct RequestRecord<'a> { pub client_rtt: Option, pub server_rtt: Option, pub metrics: &'a SessionMetrics, - pub user_agent: Option<&'a str>, + pub user_agent: Option, } impl RequestRecord<'_> { - pub fn log(&self) { + pub fn log(self) { let context = &self.context; let cluster_id = context.cluster_id; let tags = self.tags; @@ -154,7 +154,7 @@ impl RequestRecord<'_> { let session_address = self.session_address; let backend_address = self.backend_address; let endpoint = &self.endpoint; - let user_agent = &self.user_agent; + let mut user_agent = self.user_agent; let metrics = self.metrics; // let backend_response_time = metrics.backend_response_time(); @@ -195,10 +195,23 @@ impl RequestRecord<'_> { } } + let (tags, ua_sep, user_agent) = match (tags, &mut user_agent) { + (None, None) => ("-", "", ""), + (Some(tags), None) => (tags, "", ""), + (None, Some(ua)) => { + prepare_user_agent(ua); + ("", "user-agent=", ua.as_str()) + } + (Some(tags), Some(ua)) => { + prepare_user_agent(ua); + (tags, ", user-agent=", ua.as_str()) + } + }; + match self.error { None => { info_access!( - "{}{} -> {} \t{}/{}/{}/{} \t{} -> {} \t {}{} {} {}", + "{}{} -> {} \t{}/{}/{}/{} \t{} -> {} \t {}{}{} {} {}", context, session_address.as_str_or("X"), backend_address.as_str_or("X"), @@ -208,18 +221,9 @@ impl RequestRecord<'_> { LogDuration(server_rtt), metrics.bin, metrics.bout, - match user_agent { - Some(_) => tags.as_str_or(""), - None => tags.as_str_or("-"), - }, - match tags { - Some(tags) if !tags.is_empty() => user_agent - .map(|ua| format!(", user-agent={ua}")) - .unwrap_or_default(), - Some(_) | None => user_agent - .map(|ua| format!("user-agent={ua}")) - .unwrap_or_default(), - }, + tags, + ua_sep, + user_agent, protocol, endpoint ); @@ -230,7 +234,7 @@ impl RequestRecord<'_> { ); } Some(message) => error_access!( - "{}{} -> {} \t{}/{}/{}/{} \t{} -> {} \t {}{} {} {} | {}", + "{}{} -> {} \t{}/{}/{}/{} \t{} -> {} \t {}{}{} {} {} | {}", context, session_address.as_str_or("X"), backend_address.as_str_or("X"), @@ -240,18 +244,9 @@ impl RequestRecord<'_> { LogDuration(server_rtt), metrics.bin, metrics.bout, - match user_agent { - Some(_) => tags.as_str_or(""), - None => tags.as_str_or("-"), - }, - match tags { - Some(tags) if !tags.is_empty() => user_agent - .map(|ua| format!(", user-agent={ua}")) - .unwrap_or_default(), - Some(_) | None => user_agent - .map(|ua| format!("user-agent={ua}")) - .unwrap_or_default(), - }, + tags, + ua_sep, + user_agent, protocol, endpoint, message @@ -259,3 +254,18 @@ impl RequestRecord<'_> { } } } + +fn prepare_user_agent(ua: &mut String) { + let mut ua_bytes = std::mem::take(ua).into_bytes(); + for c in &mut ua_bytes { + if *c == b' ' { + *c = b'_'; + } + } + if let Some(last) = ua_bytes.last_mut() { + if *last == b',' { + *last = b'!' + } + } + *ua = unsafe { String::from_utf8_unchecked(ua_bytes) }; +} diff --git a/lib/src/protocol/kawa_h1/mod.rs b/lib/src/protocol/kawa_h1/mod.rs index 127b9eccc..3d2387573 100644 --- a/lib/src/protocol/kawa_h1/mod.rs +++ b/lib/src/protocol/kawa_h1/mod.rs @@ -810,7 +810,7 @@ impl Http) { + pub fn log_request(&mut self, metrics: &SessionMetrics, message: Option<&str>) { let listener = self.listener.borrow(); let tags = self.context.authority.as_ref().and_then(|host| { let hostname = match host.split_once(':') { @@ -824,6 +824,7 @@ impl Http Some(answers.into()), }; + let user_agent = self.context.user_agent.take(); RequestRecord { error: message, context: self.log_context(), @@ -841,15 +842,15 @@ impl Http Date: Thu, 25 Jan 2024 16:11:39 +0100 Subject: [PATCH 06/11] chore: update dependencies Signed-off-by: Florentin Dubois --- Cargo.lock | 330 ++++++++++++++++++++++----------------------- bin/Cargo.toml | 22 +-- command/Cargo.toml | 20 +-- e2e/Cargo.toml | 12 +- lib/Cargo.toml | 26 ++-- 5 files changed, 203 insertions(+), 207 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c9eb80f47..1e1b1e391 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -28,9 +28,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.5" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d664a92ecae85fd0a7392615844904654d1d5f5514837f471ddef4a057aba1b6" +checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" dependencies = [ "anstyle", "anstyle-parse", @@ -76,9 +76,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" [[package]] name = "ascii" @@ -143,7 +143,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ca33f4bc4ed1babef42cad36cc1f51fa88be00420404e5b1e80ab1b18f7678c" dependencies = [ "concurrent-queue", - "event-listener 4.0.0", + "event-listener 4.0.3", "event-listener-strategy", "futures-core", "pin-project-lite", @@ -155,7 +155,7 @@ version = "1.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c2886ab563af5038f79ec016dd7b87947ed138b794e8dd64992962c9cca0411" dependencies = [ - "async-lock 3.2.0", + "async-lock 3.3.0", "futures-io", ] @@ -165,11 +165,11 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" dependencies = [ - "async-lock 3.2.0", + "async-lock 3.3.0", "async-task", "concurrent-queue", "fastrand 2.0.1", - "futures-lite 2.1.0", + "futures-lite 2.2.0", "slab", ] @@ -207,18 +207,18 @@ dependencies = [ [[package]] name = "async-io" -version = "2.2.2" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6afaa937395a620e33dc6a742c593c01aced20aa376ffb0f628121198578ccc7" +checksum = "fb41eb19024a91746eba0773aa5e16036045bbf45733766661099e182ea6a744" dependencies = [ - "async-lock 3.2.0", + "async-lock 3.3.0", "cfg-if", "concurrent-queue", "futures-io", - "futures-lite 2.1.0", + "futures-lite 2.2.0", "parking", - "polling 3.3.1", - "rustix 0.38.28", + "polling 3.3.2", + "rustix 0.38.30", "slab", "tracing", "windows-sys 0.52.0", @@ -235,11 +235,11 @@ dependencies = [ [[package]] name = "async-lock" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c" +checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" dependencies = [ - "event-listener 4.0.0", + "event-listener 4.0.3", "event-listener-strategy", "pin-project-lite", ] @@ -268,7 +268,7 @@ dependencies = [ "cfg-if", "event-listener 3.1.0", "futures-lite 1.13.0", - "rustix 0.38.28", + "rustix 0.38.30", "windows-sys 0.48.0", ] @@ -278,13 +278,13 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e47d90f65a225c4527103a8d747001fc56e375203592b25ad103e1ca13124c5" dependencies = [ - "async-io 2.2.2", + "async-io 2.3.0", "async-lock 2.8.0", "atomic-waker", "cfg-if", "futures-core", "futures-io", - "rustix 0.38.28", + "rustix 0.38.30", "signal-hook-registry", "slab", "windows-sys 0.48.0", @@ -292,9 +292,9 @@ dependencies = [ [[package]] name = "async-task" -version = "4.5.0" +version = "4.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4eb2cdb97421e01129ccb49169d8279ed21e829929144f4a22a6e54ac549ca1" +checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" [[package]] name = "atomic-waker" @@ -325,9 +325,9 @@ dependencies = [ [[package]] name = "base64" -version = "0.21.5" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "bitflags" @@ -337,9 +337,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" [[package]] name = "block-buffer" @@ -357,11 +357,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" dependencies = [ "async-channel 2.1.1", - "async-lock 3.2.0", + "async-lock 3.3.0", "async-task", "fastrand 2.0.1", "futures-io", - "futures-lite 2.1.0", + "futures-lite 2.2.0", "piper", "tracing", ] @@ -401,9 +401,9 @@ checksum = "6e4de3bc4ea267985becf712dc6d9eed8b04c953b3fcfb339ebc87acd9804901" [[package]] name = "clap" -version = "4.4.11" +version = "4.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2" +checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c" dependencies = [ "clap_builder", "clap_derive", @@ -411,9 +411,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.11" +version = "4.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb" +checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7" dependencies = [ "anstream", "anstyle", @@ -430,7 +430,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -462,9 +462,9 @@ checksum = "396de984970346b0d9e93d1415082923c679e5ae5c3ee3dcbd104f5610af126b" [[package]] name = "cpufeatures" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -480,22 +480,18 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" dependencies = [ - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crypto-common" @@ -542,9 +538,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", ] @@ -588,7 +584,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -648,9 +644,9 @@ dependencies = [ [[package]] name = "event-listener" -version = "4.0.0" +version = "4.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770d968249b5d99410d61f5bf89057f3199a077a04d087092f58e7d10692baae" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" dependencies = [ "concurrent-queue", "parking", @@ -663,7 +659,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" dependencies = [ - "event-listener 4.0.0", + "event-listener 4.0.3", "pin-project-lite", ] @@ -706,9 +702,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "futures" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -721,9 +717,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -731,15 +727,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -748,9 +744,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" @@ -769,9 +765,9 @@ dependencies = [ [[package]] name = "futures-lite" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aeee267a1883f7ebef3700f262d2d54de95dfaf38189015a74fdc4e0c7ad8143" +checksum = "445ba825b27408685aaecefd65178908c36c6e96aaf6d8599419d46e624192ba" dependencies = [ "fastrand 2.0.1", "futures-core", @@ -782,32 +778,32 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] name = "futures-sink" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -833,9 +829,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if", "libc", @@ -876,9 +872,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" +checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f" [[package]] name = "hex" @@ -888,11 +884,11 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "home" -version = "0.5.5" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -940,9 +936,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", @@ -954,7 +950,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -979,9 +975,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -1019,13 +1015,13 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +checksum = "0bad00257d07be169d870ab665980b06cdb366d792ad690bf2e76876dc503455" dependencies = [ "hermit-abi", - "rustix 0.38.28", - "windows-sys 0.48.0", + "rustix 0.38.30", + "windows-sys 0.52.0", ] [[package]] @@ -1065,9 +1061,9 @@ dependencies = [ [[package]] name = "kawa" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284ac2fc8ad9653f5f8ebdc0cb8e0932002b1c025bfc045cb1e9f933f6b2cb50" +checksum = "db85695c73b185d8f126377bcb663cede61f31c980cefe7bd0d711b2f949b9e0" dependencies = [ "nom", ] @@ -1080,9 +1076,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.151" +version = "0.2.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" +checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" [[package]] name = "libredox" @@ -1090,7 +1086,7 @@ version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "libc", "redox_syscall", ] @@ -1101,7 +1097,7 @@ version = "0.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3af92c55d7d839293953fcd0fda5ecfe93297cfde6ffbdec13b41d99c0ba6607" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "libc", "redox_syscall", ] @@ -1114,9 +1110,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "lock_api" @@ -1145,9 +1141,9 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "memchr" -version = "2.6.4" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "memoffset" @@ -1197,7 +1193,7 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "cfg-if", "libc", "memoffset", @@ -1261,9 +1257,9 @@ checksum = "b8f8bdf33df195859076e54ab11ee78a1b208382d3a26ec40d142ffc1ecc49ef" [[package]] name = "object" -version = "0.32.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -1390,14 +1386,14 @@ dependencies = [ [[package]] name = "polling" -version = "3.3.1" +version = "3.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf63fa624ab313c11656b4cda960bfc46c410187ad493c41f6ba2d8c1e991c9e" +checksum = "545c980a3880efd47b2e262f6a4bb6daad6555cf3367aa9c4e52895f69537a41" dependencies = [ "cfg-if", "concurrent-queue", "pin-project-lite", - "rustix 0.38.28", + "rustix 0.38.30", "tracing", "windows-sys 0.52.0", ] @@ -1431,12 +1427,12 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ "proc-macro2", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1454,9 +1450,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.70" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ "unicode-ident", ] @@ -1488,7 +1484,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.41", + "syn 2.0.48", "tempfile", "which", ] @@ -1503,7 +1499,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1528,9 +1524,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -1593,9 +1589,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.2" +version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", @@ -1605,9 +1601,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "3b7fa1134405e2ec9353fd416b17f8dacd46c473d7d3fd1cf202706a14eb792a" dependencies = [ "aho-corasick", "memchr", @@ -1665,14 +1661,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.28" +version = "0.38.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "errno", "libc", - "linux-raw-sys 0.4.12", + "linux-raw-sys 0.4.13", "windows-sys 0.52.0", ] @@ -1690,14 +1686,14 @@ dependencies = [ [[package]] name = "rustls" -version = "0.22.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe6b63262c9fcac8659abfaa96cac103d28166d3ff3eaf8f412e19f3ae9e5a48" +checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" dependencies = [ "log 0.4.20", "ring", "rustls-pki-types", - "rustls-webpki 0.102.0", + "rustls-webpki 0.102.1", "subtle", "zeroize", ] @@ -1714,9 +1710,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7673e0aa20ee4937c6aacfc12bb8341cfbf054cdd21df6bec5fd0629fe9339b" +checksum = "9e9d979b3ce68192e42760c7810125eb6cf2ea10efae545a156063e61f314e2a" [[package]] name = "rustls-webpki" @@ -1730,9 +1726,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.0" +version = "0.102.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de2635c8bc2b88d367767c5de8ea1d8db9af3f6219eba28442242d9ab81d1b89" +checksum = "ef4ca26037c909dedb327b48c3327d0ba91d3dd3c4e05dad328f210ffb68e95b" dependencies = [ "ring", "rustls-pki-types", @@ -1780,29 +1776,29 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.193" +version = "1.0.195" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.193" +version = "1.0.195" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] name = "serde_json" -version = "1.0.108" +version = "1.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" dependencies = [ "itoa", "ryu", @@ -1811,18 +1807,18 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" dependencies = [ "serde", ] [[package]] name = "serial_test" -version = "2.0.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e56dd856803e253c8f298af3f4d7eb0ae5e23a737252cd90bb4f3b435033b2d" +checksum = "953ad9342b3aaca7cb43c45c097dd008d4907070394bd0751a0aa8817e5a018d" dependencies = [ "dashmap", "futures", @@ -1834,13 +1830,13 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "2.0.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" +checksum = "b93fb4adc70021ac1b47f7d45e8cc4169baaa7ea58483bc5b721d19a26202212" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1874,9 +1870,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.2" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" +checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" [[package]] name = "smol" @@ -2008,7 +2004,7 @@ dependencies = [ "quickcheck", "rand", "regex", - "rustls 0.22.1", + "rustls 0.22.2", "rustls-pemfile", "rusty_ulid", "serial_test", @@ -2052,9 +2048,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.41" +version = "2.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c8b28c477cc3bf0e7966561e3460130e1255f7a1cf71931075f1c5e7a7e269" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" dependencies = [ "proc-macro2", "quote", @@ -2075,15 +2071,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.1" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" dependencies = [ "cfg-if", "fastrand 2.0.1", "redox_syscall", - "rustix 0.38.28", - "windows-sys 0.48.0", + "rustix 0.38.30", + "windows-sys 0.52.0", ] [[package]] @@ -2099,9 +2095,9 @@ dependencies = [ [[package]] name = "termion" -version = "2.0.3" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4648c7def6f2043b2568617b9f9b75eae88ca185dbc1f1fda30e95a85d49d7d" +checksum = "417813675a504dfbbf21bfde32c03e5bf9f2413999962b479023c02848c1c7a5" dependencies = [ "libc", "libredox 0.0.2", @@ -2111,29 +2107,29 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] name = "time" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" +checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" dependencies = [ "deranged", "itoa", @@ -2151,9 +2147,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" +checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" dependencies = [ "time-core", ] @@ -2187,9 +2183,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.35.0" +version = "1.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841d45b238a16291a4e1584e61820b8ae57d696cc5015c459c229ccc6990cc1c" +checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" dependencies = [ "backtrace", "libc", @@ -2289,9 +2285,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "unicode-bidi" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" @@ -2374,7 +2370,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.28", + "rustix 0.38.30", ] [[package]] @@ -2533,9 +2529,9 @@ checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winnow" -version = "0.5.28" +version = "0.5.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c830786f7720c2fd27a1a0e27a709dbd3c4d009b56d098fc742d4f4eab91fe2" +checksum = "b7cf47b659b318dccbd69cc4797a39ae128f533dce7902a1096044d1967b9c16" dependencies = [ "memchr", ] diff --git a/bin/Cargo.toml b/bin/Cargo.toml index 3c7cbe438..2e011f17b 100644 --- a/bin/Cargo.toml +++ b/bin/Cargo.toml @@ -23,27 +23,27 @@ include = [ ] [dependencies] -anyhow = "^1.0.75" -async-dup = "^1.2.2" +anyhow = "^1.0.79" +async-dup = "^1.2.4" async-io = "^1.13.0" -clap = { version = "^4.4.6", features = ["derive"] } -futures = "^0.3.28" +clap = { version = "^4.4.18", features = ["derive"] } +futures = "^0.3.30" futures-lite = "^1.13.0" hex = "^0.4.3" jemallocator = { version = "^0.5.4", optional = true } -libc = "^0.2.149" +libc = "^0.2.152" log = "^0.4.20" -mio = { version = "^0.8.8", features = ["os-poll", "net"] } +mio = { version = "^0.8.10", features = ["os-poll", "net"] } nix = { version = "^0.27.1", features = ["signal", "fs"] } nom = "^7.1.3" paw = "^1.0.0" -serde = { version = "^1.0.188", features = ["derive"] } -serde_json = "^1.0.107" +serde = { version = "^1.0.195", features = ["derive"] } +serde_json = "^1.0.111" time = "^0.3.29" -regex = "^1.10.0" +regex = "^1.10.3" smol = "^1.3.0" -tempfile = "^3.8.0" -termion = "^2.0.1" +tempfile = "^3.9.0" +termion = "^3.0.0" sozu-command-lib = { path = "../command", version = "^0.15.18" } sozu-lib = { path = "../lib", version = "^0.15.18" } diff --git a/command/Cargo.toml b/command/Cargo.toml index 3af52d58a..c8a5b1eb3 100644 --- a/command/Cargo.toml +++ b/command/Cargo.toml @@ -28,24 +28,24 @@ include = [ [dependencies] hex = "^0.4.3" -libc = "^0.2.149" +libc = "^0.2.152" log = "^0.4.20" -time = "^0.3.29" -toml = "^0.8.2" -memchr = "^2.6.4" -mio = { version = "^0.8.8", features = ["os-poll", "net"] } +time = "^0.3.31" +toml = "^0.8.8" +memchr = "^2.7.1" +mio = { version = "^0.8.10", features = ["os-poll", "net"] } nix = { version = "^0.27.1", features = ["socket", "uio"] } nom = "^7.1.3" -prost = "^0.12.1" +prost = "^0.12.3" rand = "^0.8.5" -serde = { version = "^1.0.188", features = ["derive"] } -serde_json = "^1.0.107" +serde = { version = "^1.0.195", features = ["derive"] } +serde_json = "^1.0.111" sha2 = "^0.10.8" trailer = "^0.1.2" prettytable-rs = { version = "^0.10.0", default-features = false } pool = "^0.1.4" poule = "^0.3.2" -thiserror = "^1.0.49" +thiserror = "^1.0.56" x509-parser = "^0.15.1" [features] @@ -57,4 +57,4 @@ logs-trace = [] travis-ci = { repository = "sozu-proxy/sozu" } [build-dependencies] -prost-build = { version = "^0.12.1" } +prost-build = { version = "^0.12.3" } diff --git a/e2e/Cargo.toml b/e2e/Cargo.toml index 4397d87e5..b8e7dcb96 100644 --- a/e2e/Cargo.toml +++ b/e2e/Cargo.toml @@ -5,14 +5,14 @@ rust-version = "1.70.0" edition = "2021" [dependencies] -futures = "^0.3.28" +futures = "^0.3.30" hyper = { version = "^0.14.27", features = ["client", "http1"] } -hyper-rustls = { version = "^0.24.1", default-features = false, features = ["webpki-tokio", "http1", "tls12", "logging"] } -libc = "^0.2.149" -mio = "^0.8.8" +hyper-rustls = { version = "^0.24.2", default-features = false, features = ["webpki-tokio", "http1", "tls12", "logging"] } +libc = "^0.2.152" +mio = "^0.8.10" rustls = { version = "^0.21.10", features = ["dangerous_configuration"] } -time = "^0.3.29" -tokio = { version = "1.33.0", features = ["net", "rt-multi-thread"] } +time = "^0.3.31" +tokio = { version = "1.35.1", features = ["net", "rt-multi-thread"] } sozu-command-lib = { path = "../command", version = "^0.15.18" } sozu-lib = { path = "../lib", version = "^0.15.18" } diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 4aa94e312..b504fefeb 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -29,36 +29,36 @@ include = [ ] [dependencies] -anyhow = "^1.0.75" +anyhow = "^1.0.79" cookie-factory = "^0.3.2" -hdrhistogram = "^7.5.2" +hdrhistogram = "^7.5.4" hex = "^0.4.3" hpack = "^0.3.0" -idna = "^0.4.0" +idna = "^0.5.0" kawa = { version = "^0.6.4", default-features = false } -libc = "^0.2.149" -memchr = "^2.6.4" -mio = { version = "^0.8.8", features = ["os-poll", "os-ext", "net"] } +libc = "^0.2.152" +memchr = "^2.7.1" +mio = { version = "^0.8.10", features = ["os-poll", "os-ext", "net"] } nom = { version = "^7.1.3", default-features = true, features = ["std"] } poule = "^0.3.2" rand = "^0.8.5" -regex = "^1.10.0" -rustls = "^0.22.1" +regex = "^1.10.3" +rustls = "^0.22.2" rustls-pemfile = "^2.0.0" rusty_ulid = "^2.0.0" sha2 = "^0.10.8" slab = "^0.4.9" -socket2 = { version = "^0.5.4", features = ["all"] } -thiserror = "^1.0.49" -time = "^0.3.29" -once_cell = "1.18.0" +socket2 = { version = "^0.5.5", features = ["all"] } +thiserror = "^1.0.56" +time = "^0.3.31" +once_cell = "1.19.0" sozu-command-lib = { path = "../command", version = "^0.15.18" } [dev-dependencies] quickcheck = "^1.0.3" rand = "^0.8.5" -serial_test = "^2.0.0" +serial_test = "^3.0.0" tiny_http = "^0.12.0" [features] From 8b42693124c3cd6f69d31e3b024003275d2e2959 Mon Sep 17 00:00:00 2001 From: Florentin Dubois Date: Thu, 25 Jan 2024 16:16:16 +0100 Subject: [PATCH 07/11] release: v0.15.19 Signed-off-by: Florentin Dubois --- Cargo.lock | 8 ++++---- bin/Cargo.toml | 6 +++--- command/Cargo.toml | 2 +- e2e/Cargo.toml | 6 +++--- lib/Cargo.toml | 4 ++-- os-build/archlinux/PKGBUILD | 2 +- os-build/linux-rpm/sozu.spec | 2 +- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1e1b1e391..d9218f28d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1913,7 +1913,7 @@ dependencies = [ [[package]] name = "sozu" -version = "0.15.18" +version = "0.15.19" dependencies = [ "anyhow", "async-dup", @@ -1943,7 +1943,7 @@ dependencies = [ [[package]] name = "sozu-command-lib" -version = "0.15.18" +version = "0.15.19" dependencies = [ "hex", "libc", @@ -1970,7 +1970,7 @@ dependencies = [ [[package]] name = "sozu-e2e" -version = "0.15.18" +version = "0.15.19" dependencies = [ "futures", "hyper", @@ -1986,7 +1986,7 @@ dependencies = [ [[package]] name = "sozu-lib" -version = "0.15.18" +version = "0.15.19" dependencies = [ "anyhow", "cookie-factory", diff --git a/bin/Cargo.toml b/bin/Cargo.toml index 2e011f17b..317b61f3a 100644 --- a/bin/Cargo.toml +++ b/bin/Cargo.toml @@ -5,7 +5,7 @@ repository = "https://github.com/sozu-proxy/sozu" readme = "README.md" documentation = "https://docs.rs/sozu" homepage = "https://sozu.io" -version = "0.15.18" +version = "0.15.19" license = "AGPL-3.0" authors = [ "Geoffroy Couprie ", @@ -45,8 +45,8 @@ smol = "^1.3.0" tempfile = "^3.9.0" termion = "^3.0.0" -sozu-command-lib = { path = "../command", version = "^0.15.18" } -sozu-lib = { path = "../lib", version = "^0.15.18" } +sozu-command-lib = { path = "../command", version = "^0.15.19" } +sozu-lib = { path = "../lib", version = "^0.15.19" } [target.'cfg(target_os="linux")'.dependencies] num_cpus = "^1.16.0" diff --git a/command/Cargo.toml b/command/Cargo.toml index c8a5b1eb3..c2a428f52 100644 --- a/command/Cargo.toml +++ b/command/Cargo.toml @@ -5,7 +5,7 @@ repository = "https://github.com/sozu-proxy/sozu" readme = "README.md" documentation = "https://docs.rs/sozu-command-lib" homepage = "https://sozu.io" -version = "0.15.18" +version = "0.15.19" license = "LGPL-3.0" authors = [ "Geoffroy Couprie ", diff --git a/e2e/Cargo.toml b/e2e/Cargo.toml index b8e7dcb96..9850f54e2 100644 --- a/e2e/Cargo.toml +++ b/e2e/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sozu-e2e" -version = "0.15.18" +version = "0.15.19" rust-version = "1.70.0" edition = "2021" @@ -14,5 +14,5 @@ rustls = { version = "^0.21.10", features = ["dangerous_configuration"] } time = "^0.3.31" tokio = { version = "1.35.1", features = ["net", "rt-multi-thread"] } -sozu-command-lib = { path = "../command", version = "^0.15.18" } -sozu-lib = { path = "../lib", version = "^0.15.18" } +sozu-command-lib = { path = "../command", version = "^0.15.19" } +sozu-lib = { path = "../lib", version = "^0.15.19" } diff --git a/lib/Cargo.toml b/lib/Cargo.toml index b504fefeb..ffd3f5555 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -5,7 +5,7 @@ repository = "https://github.com/sozu-proxy/sozu" readme = "README.md" documentation = "https://docs.rs/sozu-lib" homepage = "https://sozu.io" -version = "0.15.18" +version = "0.15.19" license = "AGPL-3.0" authors = [ "Clément Delafargue ", @@ -53,7 +53,7 @@ thiserror = "^1.0.56" time = "^0.3.31" once_cell = "1.19.0" -sozu-command-lib = { path = "../command", version = "^0.15.18" } +sozu-command-lib = { path = "../command", version = "^0.15.19" } [dev-dependencies] quickcheck = "^1.0.3" diff --git a/os-build/archlinux/PKGBUILD b/os-build/archlinux/PKGBUILD index 4534e5fe2..a046afa91 100644 --- a/os-build/archlinux/PKGBUILD +++ b/os-build/archlinux/PKGBUILD @@ -1,7 +1,7 @@ # Maintainer: Jan-Erik Rediger pkgname=sozu-git -pkgver=0.15.18 +pkgver=0.15.19 pkgrel=1 pkgdesc="HTTP reverse proxy, configurable at runtime, fast and safe, built in Rust" arch=('i686' 'x86_64') diff --git a/os-build/linux-rpm/sozu.spec b/os-build/linux-rpm/sozu.spec index 6f42ebf9b..6c6cd3dfe 100755 --- a/os-build/linux-rpm/sozu.spec +++ b/os-build/linux-rpm/sozu.spec @@ -6,7 +6,7 @@ Summary: A lightweight, fast, always-up reverse proxy server. Name: sozu -Version: 0.15.18 +Version: 0.15.19 Release: 1%{?dist} Epoch: 1 License: AGPL-3.0 From 8d2f0d84c3eaa460b4ba27bcf70483421659e4fb Mon Sep 17 00:00:00 2001 From: Florentin Dubois Date: Thu, 25 Jan 2024 17:04:20 +0100 Subject: [PATCH 08/11] doc(changelog): add 0.15.19 entry Signed-off-by: Florentin Dubois --- CHANGELOG.md | 149 +++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 108 insertions(+), 41 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c2e91d5dd..684699ced 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,9 +4,72 @@ See milestone [`v0.16.0`](https://github.com/sozu-proxy/sozu/projects/3?card_filter_query=milestone%3Av0.16.0) +## 0.15.19 - 2024-01-25 + +> This changelog merges modifications between 0.15.15 to 0.15.19 + +- We have reduced logging level and enhanced few logs, update the logger to better track issues that may occur, see [`582ab5b`](https://github.com/sozu-proxy/sozu/commit/582ab5be830684d416e1813d2d84c87456254a5a), [`17020fb`](https://github.com/sozu-proxy/sozu/commit/17020fb4032cf5f220075617c9b31a017df02722), [`04d3105`](https://github.com/sozu-proxy/sozu/commit/04d3105cfab506fa29467e1365abf31239a88c6d), [`730f0c3`](https://github.com/sozu-proxy/sozu/commit/730f0c329917a1da9a09d0dfcbc3799e9a2288d5), [`c887666`](https://github.com/sozu-proxy/sozu/commit/c88766694ff10a5ac9f1d7f17e7f7cb0ec919ff6), [`ef6e99a`](https://github.com/sozu-proxy/sozu/commit/ef6e99ad46cf302fa6a5fa9b67cc6908f3561d3b), [`63e76c7`](https://github.com/sozu-proxy/sozu/commit/63e76c7d1da2aeaa0ab8565772e88a089f0c36da), [`3c6ef35`](https://github.com/sozu-proxy/sozu/commit/3c6ef359d16d04e07b59df1878b9998f1f31205e), [`4d1500a`](https://github.com/sozu-proxy/sozu/commit/4d1500a0a5b70e4460e09a36aa27d8063dc7937e), [`72bfab9`](https://github.com/sozu-proxy/sozu/commit/72bfab997d4df991133d73c0ca1e9b7e70269385), [`18ddee3`](https://github.com/sozu-proxy/sozu/commit/18ddee36828e11ad80ca51c4b18fc515a8c7ef2c), [`b455bbf`](https://github.com/sozu-proxy/sozu/commit/b455bbfa78c7d1bb33f8f40492e6c331ff16eea2) and [`d864012`](https://github.com/sozu-proxy/sozu/commit/d864012ad6cdadbf94630c7fd195095968a6b17d). +- We have implemented the flag `--json` on every query command of Sōzu to be able to use it with software like `jq`, see [`95de156`](https://github.com/sozu-proxy/sozu/commit/95de156c533a67dde6e7135bf0e5bf96b7ea4cb6), [`0e62ff3`](https://github.com/sozu-proxy/sozu/commit/0e62ff3ca80bbb4b78c533ef9723b9fc19e8ce66) and [`822dcb9`](https://github.com/sozu-proxy/sozu/commit/822dcb9530419693c7bc6997a36576520a0e36e1). +- We have fixed behaviors when parsing HTTP 1.1 (mainly pipelining or streaming issues), see , [`58a7f03`](https://github.com/sozu-proxy/sozu/commit/58a7f03feac0e8ca23fac6529734f9ded14e724b), [`ae8c66d`](https://github.com/sozu-proxy/sozu/commit/ae8c66d9f12bae8d2e6aaa5071d961734ae2d446), [`6bd2d85`](https://github.com/sozu-proxy/sozu/commit/6bd2d85833ea9ef35ed14807bffe4afffcd6806d), [`707fbf3`](https://github.com/sozu-proxy/sozu/commit/707fbf3f168400a76828a9b348e2bb226609724a), [`1cb4d53`](https://github.com/sozu-proxy/sozu/commit/1cb4d53a162f8e5efbdc18937db9d7dddb4a2933) and [`1710f8a`](https://github.com/sozu-proxy/sozu/commit/1710f8a7f1fa4673676f2045a5cad6d3e89d194b). + +### Changelog + +#### ➕ Added + +- [ [`72bfab9`](https://github.com/sozu-proxy/sozu/commit/72bfab997d4df991133d73c0ca1e9b7e70269385) ] setup logging in accept_clients() [`Emmanuel Bosquet`] (`2023-12-13`) +- [ [`18ddee3`](https://github.com/sozu-proxy/sozu/commit/18ddee36828e11ad80ca51c4b18fc515a8c7ef2c) ] add missing access logs [`Emmanuel Bosquet`] (`2024-01-08`) +- [ [`822dcb9`](https://github.com/sozu-proxy/sozu/commit/822dcb9530419693c7bc6997a36576520a0e36e1) ] CLI: all responses are displayable in JSON [`Emmanuel Bosquet`] (`2023-12-06`) +- [ [`1788fac`](https://github.com/sozu-proxy/sozu/commit/1788faca0193f4311f6e1def45b3cc28ab401bce) ] add remove_backend test in state module [`Emmanuel Bosquet`] (`2023-12-06`) +- [ [`161ca05`](https://github.com/sozu-proxy/sozu/commit/161ca051202f9edff4bdf88ce90c3dc89061d22f) ] introduce optional worker_timeout [`Emmanuel Bosquet`] (`2023-11-23`) +- [ [`b754391`](https://github.com/sozu-proxy/sozu/commit/b754391b6a148202e2622595a55b231103f8730e) ] create ConfigState::write_requests_to_file [`Emmanuel Bosquet`] (`2023-11-27`) + +#### ⛑️ Fixed + +- [ [`1cb4d53`](https://github.com/sozu-proxy/sozu/commit/1cb4d53a162f8e5efbdc18937db9d7dddb4a2933) ] handle backend hangup when responses is still transferring [`Emmanuel Bosquet`] (`2024-01-08`) +- [ [`1710f8a`](https://github.com/sozu-proxy/sozu/commit/1710f8a7f1fa4673676f2045a5cad6d3e89d194b) ] Fix TCP connection hanging on backend connection error [`Eloi DEMOLIS`] (`2024-01-23`) +- [ [`707fbf3`](https://github.com/sozu-proxy/sozu/commit/707fbf3f168400a76828a9b348e2bb226609724a) ] Update TCP states to use SessionResult when possible [`Eloi DEMOLIS`] (`2024-01-24`) +- [ [`6bd2d85`](https://github.com/sozu-proxy/sozu/commit/6bd2d85833ea9ef35ed14807bffe4afffcd6806d) ] fix(sozu): reset storage buffers on keep-alive requests [`Florentin Dubois`] (`2023-12-07`) +- [ [`bb1aa11`](https://github.com/sozu-proxy/sozu/commit/bb1aa112789ff7432a9a30eee36f8de0c5585055) ] fix(https): panic on failed https upgrade into wss [`Florentin Dubois`] (`2023-12-13`) +- [ [`ae8c66d`](https://github.com/sozu-proxy/sozu/commit/ae8c66d9f12bae8d2e6aaa5071d961734ae2d446) ] fix(http): panic on http upgrade into websocket [`Florentin Dubois`] (`2023-12-13`) +- [ [`58a7f03`](https://github.com/sozu-proxy/sozu/commit/58a7f03feac0e8ca23fac6529734f9ded14e724b) ] Fix: WouldBlock in SocketHandler::socket_write breaks properly [`Eloi DEMOLIS`] (`2023-11-23`) +- [ [`17020fb`](https://github.com/sozu-proxy/sozu/commit/17020fb4032cf5f220075617c9b31a017df02722) ] Fix panic in view [`Eloi DEMOLIS`] (`2023-11-23`) +- [ [`0d82323`](https://github.com/sozu-proxy/sozu/commit/0d8232317ba6cd84ac053038e47bd6f260107904) ] fix worker status command [`Emmanuel Bosquet`] (`2023-11-23`) +- [ [`582ab5b`](https://github.com/sozu-proxy/sozu/commit/582ab5be830684d416e1813d2d84c87456254a5a) ] Do not set RUST_LOG on logger setup [`Eloi DEMOLIS`] (`2023-12-07`) +- [ [`04d3105`](https://github.com/sozu-proxy/sozu/commit/04d3105cfab506fa29467e1365abf31239a88c6d) ] Sanitize user-agent in access logs [`Eloi DEMOLIS`] (`2024-01-24`) +- [ [`10f5433`](https://github.com/sozu-proxy/sozu/commit/10f54339f6301fa6b3cc365d8b6206513a44ddc9) ] fix timeout issue in the CLI [`Emmanuel Bosquet`] (`2024-01-24`) + +#### ✍️ Changed + +- [ [`23d8171`](https://github.com/sozu-proxy/sozu/commit/23d81715d0e19b09ef035b46d441cbc59f96382d) ] update rustls to 0.22.1 [`Emmanuel Bosquet`] (`2023-12-14`) +- [ [`b7ef38f`](https://github.com/sozu-proxy/sozu/commit/b7ef38f3784c3321ee13a8faa75f8485a214fc1a) ] add no-clusters option on metrics query [`Emmanuel Bosquet`] (`2024-01-24`) +- [ [`98b5783`](https://github.com/sozu-proxy/sozu/commit/98b5783b096f9f5d244a2add2e294a772b7fd848) ] chore: update dependencies [`Florentin Dubois`] (`2024-01-25`) +- [ [`3a4e4fd`](https://github.com/sozu-proxy/sozu/commit/3a4e4fd93d5451c43f822348e933af1894a2e917) ] pass Vec instead of ConfigState to new worker [`Emmanuel Bosquet`] (`2023-11-27`) +- [ [`b455bbf`](https://github.com/sozu-proxy/sozu/commit/b455bbfa78c7d1bb33f8f40492e6c331ff16eea2) ] Add SNI and peer address on handshake error logs [`Eloi DEMOLIS`] (`2023-11-28`) +- [ [`d864012`](https://github.com/sozu-proxy/sozu/commit/d864012ad6cdadbf94630c7fd195095968a6b17d) ] remove main logger [`Emmanuel Bosquet`] (`2023-12-01`) +- [ [`505d134`](https://github.com/sozu-proxy/sozu/commit/505d134ce2a1725250b56f24a8649f35694f859b) ] remove unused dependencies [`Emmanuel Bosquet`] (`2023-12-04`) +- [ [`0e62ff3`](https://github.com/sozu-proxy/sozu/commit/0e62ff3ca80bbb4b78c533ef9723b9fc19e8ce66) ] refactor cli display by creating Response::display [`Emmanuel Bosquet`] (`2023-12-06`) +- [ [`95de156`](https://github.com/sozu-proxy/sozu/commit/95de156c533a67dde6e7135bf0e5bf96b7ea4cb6) ] display no other lines than JSON [`Emmanuel Bosquet`] (`2023-12-06`) +- [ [`86303a2`](https://github.com/sozu-proxy/sozu/commit/86303a2183a555c5c5c55d08acd3310ceeff0a94) ] ConfigState::cluster_state return Option [`Emmanuel Bosquet`] (`2023-12-06`) +- [ [`4d1500a`](https://github.com/sozu-proxy/sozu/commit/4d1500a0a5b70e4460e09a36aa27d8063dc7937e) ] chore: reduce verbosity of a few logs [`Florentin Dubois`] (`2023-12-07`) +- [ [`3c6ef35`](https://github.com/sozu-proxy/sozu/commit/3c6ef359d16d04e07b59df1878b9998f1f31205e) ] Better logging for parsing errors [`Eloi DEMOLIS`] (`2023-12-07`) +- [ [`63e76c7`](https://github.com/sozu-proxy/sozu/commit/63e76c7d1da2aeaa0ab8565772e88a089f0c36da) ] chore: reduce logging level [`Florentin Dubois`] (`2023-12-08`) +- [ [`0f0ed1f`](https://github.com/sozu-proxy/sozu/commit/0f0ed1f80c37eb2ff0c2611e3d0654f9b7e418a9) ] workers return only one response when dispatching a request [`Emmanuel Bosquet`] (`2023-12-12`) +- [ [`ef6e99a`](https://github.com/sozu-proxy/sozu/commit/ef6e99ad46cf302fa6a5fa9b67cc6908f3561d3b) ] chore(http,https): update warning message with frontend token [`Florentin Dubois`] (`2023-12-13`) +- [ [`32d8e3a`](https://github.com/sozu-proxy/sozu/commit/32d8e3ac91025c764a27e54b461c717ae036bddf) ] chore: update rustls to 0.21.10 [`Florentin Dubois`] (`2023-12-13`) +- [ [`c887666`](https://github.com/sozu-proxy/sozu/commit/c88766694ff10a5ac9f1d7f17e7f7cb0ec919ff6) ] better logging of back error [`Emmanuel Bosquet`] (`2023-11-22`) +- [ [`4a444b1`](https://github.com/sozu-proxy/sozu/commit/4a444b14df4d09d75c6cad37a7c1235a86248d5a) ] Mutualize MAX_LOOP_ITERATIONS in config [`Eloi DEMOLIS`] (`2023-11-23`) +- [ [`730f0c3`](https://github.com/sozu-proxy/sozu/commit/730f0c329917a1da9a09d0dfcbc3799e9a2288d5) ] Adjust logging level [`Eloi DEMOLIS`] (`2023-11-23`) + +### 🥹 Contributors +* @keksoj +* @FlorentinDUBOIS +* @Wonshtrum + +**Full Changelog**: https://github.com/sozu-proxy/sozu/compare/0.15.15...0.15.19 + ## 0.15.15 - 2023-11-15 -> This changelog merge all modifications between versions 0.15.13 and 0.15.15 +> This changelog merges all modifications between versions 0.15.13 and 0.15.15 - Since the deployment of the version 0.15.x at Clever Cloud, we have seen some performance issues around tls handshake and we made several efforts to dig in and fix them, see [`8364454`](https://github.com/sozu-proxy/sozu/commit/8364454da2ac4df3ea8fae517f619431ac0c068e) and [`92a277c`](https://github.com/sozu-proxy/sozu/commit/92a277c79fa0d319a0f8ad1f192d62b72ffd52a1). - We have fix a bug when we replace a tls certificate that resolve the old onem once replaced, see [`50afe7a`](https://github.com/sozu-proxy/sozu/commit/50afe7aa0e33b5d583a301de40f17772eb72c213) @@ -18,37 +81,39 @@ See milestone [`v0.16.0`](https://github.com/sozu-proxy/sozu/projects/3?card_fil #### 🚀 Performance -- [ [`8364454`](https://github.com/sozu-proxy/sozu/commit/8364454da2ac4df3ea8fae517f619431ac0c068e) ] Use rustls::Writer::write_vectored to reduce writev syscalls [`Eloi DEMOLIS`] (`2023-11-08`) -- [ [`92a277c`](https://github.com/sozu-proxy/sozu/commit/92a277c79fa0d319a0f8ad1f192d62b72ffd52a1) ] store certificates in parsed form in CertificateResolver [`Eloi DEMOLIS`] (`2023-11-14`) +- [ [`8364454`](https://github.com/sozu-proxy/sozu/commit/8364454da2ac4df3ea8fae517f619431ac0c068e) ] Use rustls::Writer::write_vectored to reduce writev syscalls [`Eloi DEMOLIS`] (`2023-11-08`) +- [ [`92a277c`](https://github.com/sozu-proxy/sozu/commit/92a277c79fa0d319a0f8ad1f192d62b72ffd52a1) ] store certificates in parsed form in CertificateResolver [`Eloi DEMOLIS`] (`2023-11-14`) #### ⛑️ Fixed -- [ [`50afe7a`](https://github.com/sozu-proxy/sozu/commit/50afe7aa0e33b5d583a301de40f17772eb72c213) ] fix(tls): certificate replacement and remove is still-in-use security [`Florentin Dubois`] (`2023-11-14`) +- [ [`50afe7a`](https://github.com/sozu-proxy/sozu/commit/50afe7aa0e33b5d583a301de40f17772eb72c213) ] fix(tls): certificate replacement and remove is still-in-use security [`Florentin Dubois`] (`2023-11-14`) #### ✍️ Changed -- [ [`0c3c129`](https://github.com/sozu-proxy/sozu/commit/0c3c129647baae1f0972c7f8af78cbb1200dd78e) ] make send_tls13_tickets configurable [`Emmanuel Bosquet`] (`2023-11-09`) -- [ [`1406954`](https://github.com/sozu-proxy/sozu/commit/140695475a38afa6f461a82d46b19fb35778b4e9) ] Remove rustls backpressuring flag [`Eloi DEMOLIS`] (`2023-11-08`) -- [ [`9b29dcf`](https://github.com/sozu-proxy/sozu/commit/9b29dcfa98c95626f641013a0c7615529505e0f2) ] proper logging of RouterError::RouteNotFound [`Emmanuel Bosquet`] (`2023-11-13`) -- [ [`af5ea00`](https://github.com/sozu-proxy/sozu/commit/af5ea0025eeed64c8ccfafa8387f0a1a4aef8d88) ] distribution(systemd): set start limit interval and burst [`Florentin Dubois`] (`2023-11-14`) -- [ [`cc12789`](https://github.com/sozu-proxy/sozu/commit/cc12789f4516d217fb15a7d8b8dd7b5848fc211d) ] comments and renaming in lib::tls [`Emmanuel Bosquet`] (`2023-11-14`) +- [ [`0c3c129`](https://github.com/sozu-proxy/sozu/commit/0c3c129647baae1f0972c7f8af78cbb1200dd78e) ] make send_tls13_tickets configurable [`Emmanuel Bosquet`] (`2023-11-09`) +- [ [`1406954`](https://github.com/sozu-proxy/sozu/commit/140695475a38afa6f461a82d46b19fb35778b4e9) ] Remove rustls backpressuring flag [`Eloi DEMOLIS`] (`2023-11-08`) +- [ [`9b29dcf`](https://github.com/sozu-proxy/sozu/commit/9b29dcfa98c95626f641013a0c7615529505e0f2) ] proper logging of RouterError::RouteNotFound [`Emmanuel Bosquet`] (`2023-11-13`) +- [ [`af5ea00`](https://github.com/sozu-proxy/sozu/commit/af5ea0025eeed64c8ccfafa8387f0a1a4aef8d88) ] distribution(systemd): set start limit interval and burst [`Florentin Dubois`] (`2023-11-14`) +- [ [`cc12789`](https://github.com/sozu-proxy/sozu/commit/cc12789f4516d217fb15a7d8b8dd7b5848fc211d) ] comments and renaming in lib::tls [`Emmanuel Bosquet`] (`2023-11-14`) #### 📚 Documentation -- [ [`e754a15`](https://github.com/sozu-proxy/sozu/commit/e754a159dc9abf34285c2f33970e6ecbee765e6e) ] document benchmarking technique [`Emmanuel Bosquet`] (`2023-11-10`) +- [ [`e754a15`](https://github.com/sozu-proxy/sozu/commit/e754a159dc9abf34285c2f33970e6ecbee765e6e) ] document benchmarking technique [`Emmanuel Bosquet`] (`2023-11-10`) ### 🥹 Contributors * @keksoj * @FlorentinDUBOIS * @Wonshtrum +**Full Changelog**: https://github.com/sozu-proxy/sozu/compare/0.15.13...0.15.15 + ## 0.15.13 - 2023-10-27 > This changelog merge all modifications between versions 0.15.6 and 0.15.13 -- We have deployed the new release of Sōzu at Clever Cloud on production and find out some bugs during the deployment process, see [`5d2f3b9`](https://github.com/sozu-proxy/sozu/commit/5d2f3b9de024c538577baf3ef2c6f4ab9b60e236), [`7b61c04`](https://github.com/sozu-proxy/sozu/commit/7b61c043fb7d5e2cb63113627376b5eb85bcea1c), [`72e9d44`](https://github.com/sozu-proxy/sozu/commit/72e9d4497e9326c3538fed1088cb26b9524c0700), [`bf026ee`](https://github.com/sozu-proxy/sozu/commit/bf026ee8ecf9dda2dd75c672baf3b231bc1d3231), [`76e0e7d`](https://github.com/sozu-proxy/sozu/commit/76e0e7d6ce2a74e93d4c75ea2ab891aa2d92c45d), [`0bdf61d`](https://github.com/sozu-proxy/sozu/commit/0bdf61d235406868c770c0199167d10e59809e53), [`89bf73a`](https://github.com/sozu-proxy/sozu/commit/89bf73af57218260e3579a5004eebd61bca18196), [`1196a90`](https://github.com/sozu-proxy/sozu/commit/1196a900d3759fbc579bf4434e5f945b45980790), [`e562299`](https://github.com/sozu-proxy/sozu/commit/e562299d5e140f9ae133f6692f47eaf0f31ad343), [`4c47cfc`](https://github.com/sozu-proxy/sozu/commit/4c47cfc75ee125d942c33849e9107f4b879aec0f), [`cda2f01`](https://github.com/sozu-proxy/sozu/commit/cda2f01789b4abde2ef4441d85be636b6e589384), [`ea0b8af`](https://github.com/sozu-proxy/sozu/commit/ea0b8afefeaaafb11a9a9fb27fa1e8348378829f) and [`437eb12`](https://github.com/sozu-proxy/sozu/commit/437eb1252f4f999001dac7d162694dd455dfa057). +- We have deployed the new release of Sōzu at Clever Cloud on production and find out some bugs during the deployment process, see [`5d2f3b9`](https://github.com/sozu-proxy/sozu/commit/5d2f3b9de024c538577baf3ef2c6f4ab9b60e236), [`7b61c04`](https://github.com/sozu-proxy/sozu/commit/7b61c043fb7d5e2cb63113627376b5eb85bcea1c), [`72e9d44`](https://github.com/sozu-proxy/sozu/commit/72e9d4497e9326c3538fed1088cb26b9524c0700), [`bf026ee`](https://github.com/sozu-proxy/sozu/commit/bf026ee8ecf9dda2dd75c672baf3b231bc1d3231), [`76e0e7d`](https://github.com/sozu-proxy/sozu/commit/76e0e7d6ce2a74e93d4c75ea2ab891aa2d92c45d), [`0bdf61d`](https://github.com/sozu-proxy/sozu/commit/0bdf61d235406868c770c0199167d10e59809e53), [`89bf73a`](https://github.com/sozu-proxy/sozu/commit/89bf73af57218260e3579a5004eebd61bca18196), [`1196a90`](https://github.com/sozu-proxy/sozu/commit/1196a900d3759fbc579bf4434e5f945b45980790), [`e562299`](https://github.com/sozu-proxy/sozu/commit/e562299d5e140f9ae133f6692f47eaf0f31ad343), [`4c47cfc`](https://github.com/sozu-proxy/sozu/commit/4c47cfc75ee125d942c33849e9107f4b879aec0f), [`cda2f01`](https://github.com/sozu-proxy/sozu/commit/cda2f01789b4abde2ef4441d85be636b6e589384), [`ea0b8af`](https://github.com/sozu-proxy/sozu/commit/ea0b8afefeaaafb11a9a9fb27fa1e8348378829f) and [`437eb12`](https://github.com/sozu-proxy/sozu/commit/437eb1252f4f999001dac7d162694dd455dfa057). - We have added debug logging, see [`8854576`](https://github.com/sozu-proxy/sozu/commit/88545767284284c31b8c13dab90d581b39c07b56) and [`887babe`](https://github.com/sozu-proxy/sozu/commit/887babe4c0ec81d8c73f4054af837222acb2a076). -- We now retrieve subject alternative names for certificate, see [`ea6bacd`](https://github.com/sozu-proxy/sozu/commit/ea6bacd463d5fd085fa77e411c73ae9e2e94ebbe). +- We now retrieve subject alternative names for certificate, see [`ea6bacd`](https://github.com/sozu-proxy/sozu/commit/ea6bacd463d5fd085fa77e411c73ae9e2e94ebbe). - We have enable metrics of clusters by default and add some error status code, see [`9648cf0`](https://github.com/sozu-proxy/sozu/commit/9648cf0433df13bd84efbb14dfd8321b520a91e2) and [`6b53071`](https://github.com/sozu-proxy/sozu/commit/6b53071303eb56fe45e0242b756fa73bb1fb16d1). - We have updated sozu to hot reload logging level not only for the main processn but also workers, see [`641daa3`](https://github.com/sozu-proxy/sozu/commit/641daa3fc86b7883bd794c6dc9f0c601c9289d24)? @@ -56,44 +121,46 @@ See milestone [`v0.16.0`](https://github.com/sozu-proxy/sozu/projects/3?card_fil #### ➕ Added -- [ [`be2cfe6`](https://github.com/sozu-proxy/sozu/commit/be2cfe6da18d7098565b2526b3127651eb8384b9) ] Add 507 default answer [`Eloi DEMOLIS`] (`2023-10-24`) +- [ [`be2cfe6`](https://github.com/sozu-proxy/sozu/commit/be2cfe6da18d7098565b2526b3127651eb8384b9) ] Add 507 default answer [`Eloi DEMOLIS`] (`2023-10-24`) #### ⛑️ Fixed -- [ [`5d2f3b9`](https://github.com/sozu-proxy/sozu/commit/5d2f3b9de024c538577baf3ef2c6f4ab9b60e236) ] fix misleading CLI line on state saving [`Emmanuel Bosquet`] (`2023-10-27`) -- [ [`7b61c04`](https://github.com/sozu-proxy/sozu/commit/7b61c043fb7d5e2cb63113627376b5eb85bcea1c) ] build: add missing assets [`Florentin Dubois`] (`2023-10-27`) -- [ [`72e9d44`](https://github.com/sozu-proxy/sozu/commit/72e9d4497e9326c3538fed1088cb26b9524c0700) ] Don't override X-Forwarded-Proto and X-Forwarded-Port [`Eloi DEMOLIS`] (`2023-10-26`) -- [ [`bf026ee`](https://github.com/sozu-proxy/sozu/commit/bf026ee8ecf9dda2dd75c672baf3b231bc1d3231) ] Add a default certificate when none are found for a host [`Eloi DEMOLIS`] (`2023-10-27`) -- [ [`76e0e7d`](https://github.com/sozu-proxy/sozu/commit/76e0e7d6ce2a74e93d4c75ea2ab891aa2d92c45d) ] Fix early connect trials [`Eloi DEMOLIS`] (`2023-10-24`) -- [ [`0bdf61d`](https://github.com/sozu-proxy/sozu/commit/0bdf61d235406868c770c0199167d10e59809e53) ] fix cluster metrics [`Emmanuel Bosquet`] (`2023-10-24`) -- [ [`89bf73a`](https://github.com/sozu-proxy/sozu/commit/89bf73af57218260e3579a5004eebd61bca18196) ] fix(timeout): implements cancel on drop [`Florentin Dubois`] (`2023-10-23`) -- [ [`1196a90`](https://github.com/sozu-proxy/sozu/commit/1196a900d3759fbc579bf4434e5f945b45980790) ] include TCP clusters in command 'cluster list' [`Emmanuel Bosquet`] (`2023-09-19`) -- [ [`e562299`](https://github.com/sozu-proxy/sozu/commit/e562299d5e140f9ae133f6692f47eaf0f31ad343) ] Fix TrieNode wildcard and regexp management [`Eloi DEMOLIS`] (`2023-10-17`) -- [ [`4c47cfc`](https://github.com/sozu-proxy/sozu/commit/4c47cfc75ee125d942c33849e9107f4b879aec0f) ] fix the display of non-existing cluster information in cluster list [`Emmanuel Bosquet`] (`2023-10-13`) -- [ [`cda2f01`](https://github.com/sozu-proxy/sozu/commit/cda2f01789b4abde2ef4441d85be636b6e589384) ] Fix X-Forwarded-Port when not present [`Eloi DEMOLIS`] (`2023-10-20`) -- [ [`ea0b8af`](https://github.com/sozu-proxy/sozu/commit/ea0b8afefeaaafb11a9a9fb27fa1e8348378829f) ] fix(rustls): read buffer if we received a bufffer full error instead of processing new packets [`Florentin Dubois`] (`2023-10-21`) -- [ [`437eb12`](https://github.com/sozu-proxy/sozu/commit/437eb1252f4f999001dac7d162694dd455dfa057) ] fix: allow to read [`Florentin Dubois`] (`2023-10-21`) +- [ [`5d2f3b9`](https://github.com/sozu-proxy/sozu/commit/5d2f3b9de024c538577baf3ef2c6f4ab9b60e236) ] fix misleading CLI line on state saving [`Emmanuel Bosquet`] (`2023-10-27`) +- [ [`7b61c04`](https://github.com/sozu-proxy/sozu/commit/7b61c043fb7d5e2cb63113627376b5eb85bcea1c) ] build: add missing assets [`Florentin Dubois`] (`2023-10-27`) +- [ [`72e9d44`](https://github.com/sozu-proxy/sozu/commit/72e9d4497e9326c3538fed1088cb26b9524c0700) ] Don't override X-Forwarded-Proto and X-Forwarded-Port [`Eloi DEMOLIS`] (`2023-10-26`) +- [ [`bf026ee`](https://github.com/sozu-proxy/sozu/commit/bf026ee8ecf9dda2dd75c672baf3b231bc1d3231) ] Add a default certificate when none are found for a host [`Eloi DEMOLIS`] (`2023-10-27`) +- [ [`76e0e7d`](https://github.com/sozu-proxy/sozu/commit/76e0e7d6ce2a74e93d4c75ea2ab891aa2d92c45d) ] Fix early connect trials [`Eloi DEMOLIS`] (`2023-10-24`) +- [ [`0bdf61d`](https://github.com/sozu-proxy/sozu/commit/0bdf61d235406868c770c0199167d10e59809e53) ] fix cluster metrics [`Emmanuel Bosquet`] (`2023-10-24`) +- [ [`89bf73a`](https://github.com/sozu-proxy/sozu/commit/89bf73af57218260e3579a5004eebd61bca18196) ] fix(timeout): implements cancel on drop [`Florentin Dubois`] (`2023-10-23`) +- [ [`1196a90`](https://github.com/sozu-proxy/sozu/commit/1196a900d3759fbc579bf4434e5f945b45980790) ] include TCP clusters in command 'cluster list' [`Emmanuel Bosquet`] (`2023-09-19`) +- [ [`e562299`](https://github.com/sozu-proxy/sozu/commit/e562299d5e140f9ae133f6692f47eaf0f31ad343) ] Fix TrieNode wildcard and regexp management [`Eloi DEMOLIS`] (`2023-10-17`) +- [ [`4c47cfc`](https://github.com/sozu-proxy/sozu/commit/4c47cfc75ee125d942c33849e9107f4b879aec0f) ] fix the display of non-existing cluster information in cluster list [`Emmanuel Bosquet`] (`2023-10-13`) +- [ [`cda2f01`](https://github.com/sozu-proxy/sozu/commit/cda2f01789b4abde2ef4441d85be636b6e589384) ] Fix X-Forwarded-Port when not present [`Eloi DEMOLIS`] (`2023-10-20`) +- [ [`ea0b8af`](https://github.com/sozu-proxy/sozu/commit/ea0b8afefeaaafb11a9a9fb27fa1e8348378829f) ] fix(rustls): read buffer if we received a bufffer full error instead of processing new packets [`Florentin Dubois`] (`2023-10-21`) +- [ [`437eb12`](https://github.com/sozu-proxy/sozu/commit/437eb1252f4f999001dac7d162694dd455dfa057) ] fix: allow to read [`Florentin Dubois`] (`2023-10-21`) #### ✍️ Changed -- [ [`8854576`](https://github.com/sozu-proxy/sozu/commit/88545767284284c31b8c13dab90d581b39c07b56) ] Add log on suspicious X-Forwarded-Proto and Port [`Eloi DEMOLIS`] (`2023-10-27`) -- [ [`ea6bacd`](https://github.com/sozu-proxy/sozu/commit/ea6bacd463d5fd085fa77e411c73ae9e2e94ebbe) ] Get Subject Alternative Names from extensions [`Eloi DEMOLIS`] (`2023-10-25`) -- [ [`8595cf9`](https://github.com/sozu-proxy/sozu/commit/8595cf9e8aeae1d4c1fc5f8111f9c27d68dc3613) ] Remove early read on TLS upgrade [`Eloi DEMOLIS`] (`2023-10-24`) -- [ [`9648cf0`](https://github.com/sozu-proxy/sozu/commit/9648cf0433df13bd84efbb14dfd8321b520a91e2) ] enable cluster metrics by default [`Emmanuel Bosquet`] (`2023-10-23`) -- [ [`6b53071`](https://github.com/sozu-proxy/sozu/commit/6b53071303eb56fe45e0242b756fa73bb1fb16d1) ] save 4xx and 5xx status codes in cluster metrics [`Emmanuel Bosquet`] (`2023-10-23`) -- [ [`a1d60b2`](https://github.com/sozu-proxy/sozu/commit/a1d60b2fe3203fbf9b2c36c498c1c6f9629b7c20) ] more sensible CLI defaults params in config.toml [`Emmanuel Bosquet`] (`2023-09-21`) -- [ [`641daa3`](https://github.com/sozu-proxy/sozu/commit/641daa3fc86b7883bd794c6dc9f0c601c9289d24) ] send logging level change requests to workers [`Emmanuel Bosquet`] (`2023-10-18`) -- [ [`887babe`](https://github.com/sozu-proxy/sozu/commit/887babe4c0ec81d8c73f4054af837222acb2a076) ] chore: increase logs on access error [`Florentin Dubois`] (`2023-10-21`) +- [ [`8854576`](https://github.com/sozu-proxy/sozu/commit/88545767284284c31b8c13dab90d581b39c07b56) ] Add log on suspicious X-Forwarded-Proto and Port [`Eloi DEMOLIS`] (`2023-10-27`) +- [ [`ea6bacd`](https://github.com/sozu-proxy/sozu/commit/ea6bacd463d5fd085fa77e411c73ae9e2e94ebbe) ] Get Subject Alternative Names from extensions [`Eloi DEMOLIS`] (`2023-10-25`) +- [ [`8595cf9`](https://github.com/sozu-proxy/sozu/commit/8595cf9e8aeae1d4c1fc5f8111f9c27d68dc3613) ] Remove early read on TLS upgrade [`Eloi DEMOLIS`] (`2023-10-24`) +- [ [`9648cf0`](https://github.com/sozu-proxy/sozu/commit/9648cf0433df13bd84efbb14dfd8321b520a91e2) ] enable cluster metrics by default [`Emmanuel Bosquet`] (`2023-10-23`) +- [ [`6b53071`](https://github.com/sozu-proxy/sozu/commit/6b53071303eb56fe45e0242b756fa73bb1fb16d1) ] save 4xx and 5xx status codes in cluster metrics [`Emmanuel Bosquet`] (`2023-10-23`) +- [ [`a1d60b2`](https://github.com/sozu-proxy/sozu/commit/a1d60b2fe3203fbf9b2c36c498c1c6f9629b7c20) ] more sensible CLI defaults params in config.toml [`Emmanuel Bosquet`] (`2023-09-21`) +- [ [`641daa3`](https://github.com/sozu-proxy/sozu/commit/641daa3fc86b7883bd794c6dc9f0c601c9289d24) ] send logging level change requests to workers [`Emmanuel Bosquet`] (`2023-10-18`) +- [ [`887babe`](https://github.com/sozu-proxy/sozu/commit/887babe4c0ec81d8c73f4054af837222acb2a076) ] chore: increase logs on access error [`Florentin Dubois`] (`2023-10-21`) #### 📚 Documentation -- [ [`9301048`](https://github.com/sozu-proxy/sozu/commit/9301048af9ec64517bf06a7d2f38181fbf1eeae8) ] doc(changelog): add 0.15.6 entry [`Florentin Dubois`] (`2023-10-11`) +- [ [`9301048`](https://github.com/sozu-proxy/sozu/commit/9301048af9ec64517bf06a7d2f38181fbf1eeae8) ] doc(changelog): add 0.15.6 entry [`Florentin Dubois`] (`2023-10-11`) ### 🥹 Contributors * @keksoj * @FlorentinDUBOIS * @Wonshtrum +**Full Changelog**: https://github.com/sozu-proxy/sozu/compare/0.15.6...0.15.13 + ## 0.15.6 - 2023-10-11 ### ⛑️ Fixed @@ -105,14 +172,14 @@ See milestone [`v0.16.0`](https://github.com/sozu-proxy/sozu/projects/3?card_fil #### ⛑️ Fixed -- [ [`c09e17a`](https://github.com/sozu-proxy/sozu/commit/c09e17a4bc5d8ff45694402dd7521e50320cb262) ] Fix X-Forwarded-Proto and X-Forwarded-Port (add them when not present) [`Eloi DEMOLIS`] (`2023-10-11`) -- [ [`7d89372`](https://github.com/sozu-proxy/sozu/commit/7d8937267462be3dea343fba76ec2c6ac1671da3) ] Fix responses to head requests (ignore body length) [`Eloi DEMOLIS`] (`2023-10-11`) +- [ [`c09e17a`](https://github.com/sozu-proxy/sozu/commit/c09e17a4bc5d8ff45694402dd7521e50320cb262) ] Fix X-Forwarded-Proto and X-Forwarded-Port (add them when not present) [`Eloi DEMOLIS`] (`2023-10-11`) +- [ [`7d89372`](https://github.com/sozu-proxy/sozu/commit/7d8937267462be3dea343fba76ec2c6ac1671da3) ] Fix responses to head requests (ignore body length) [`Eloi DEMOLIS`] (`2023-10-11`) #### ✍️ Changed -- [ [`a52e750`](https://github.com/sozu-proxy/sozu/commit/a52e750e3f5e1a95a2d29f13edbb27908a28e3ad) ] doc(changelog): add 0.15.5 entry [`Florentin Dubois`] (`2023-09-21`) -- [ [`4ffaf2b`](https://github.com/sozu-proxy/sozu/commit/4ffaf2b1feea57f2557722a7de9f63e58c673915) ] chore: update dependencies [`Florentin Dubois`] (`2023-10-11`) -- [ [`6de9cf5`](https://github.com/sozu-proxy/sozu/commit/6de9cf541368fca3d874b14df7e068a856d4d183) ] chore: update dependencies [`Florentin Dubois`] (`2023-09-21`) +- [ [`a52e750`](https://github.com/sozu-proxy/sozu/commit/a52e750e3f5e1a95a2d29f13edbb27908a28e3ad) ] doc(changelog): add 0.15.5 entry [`Florentin Dubois`] (`2023-09-21`) +- [ [`4ffaf2b`](https://github.com/sozu-proxy/sozu/commit/4ffaf2b1feea57f2557722a7de9f63e58c673915) ] chore: update dependencies [`Florentin Dubois`] (`2023-10-11`) +- [ [`6de9cf5`](https://github.com/sozu-proxy/sozu/commit/6de9cf541368fca3d874b14df7e068a856d4d183) ] chore: update dependencies [`Florentin Dubois`] (`2023-09-21`) ### 🥹 Contributors * @FlorentinDUBOIS @@ -130,7 +197,7 @@ We fix a bug that can occurs with pki using T.61 charset, see [`a5412b9`](https: #### ⛑️ Fixed -- [[`a5412b9`](https://github.com/sozu-proxy/sozu/commit/a5412b9764e860eedc2a206b16e81144946a8d7f) ] fix(command): retrieve name and san from slice [`Florentin Dubois`] (`2023-09-21`) +- [ [`a5412b9`](https://github.com/sozu-proxy/sozu/commit/a5412b9764e860eedc2a206b16e81144946a8d7f) ] fix(command): retrieve name and san from slice [`Florentin Dubois`] (`2023-09-21`) - [ [`24c4407`](https://github.com/sozu-proxy/sozu/commit/24c4407d654dfbcd7c490e3a23c46fe8289bce4e) ] chore: update changelog to add 0.15.4 [`Florentin Dubois`] (`2023-09-13`) - [ [`6de9cf5`](https://github.com/sozu-proxy/sozu/commit/6de9cf541368fca3d874b14df7e068a856d4d183) ] chore: update dependencies [`Florentin Dubois`] (`2023-09-21`) From db7b5a6d0882b369246d78071a79d02586840783 Mon Sep 17 00:00:00 2001 From: Eloi DEMOLIS Date: Thu, 14 Dec 2023 19:24:01 +0100 Subject: [PATCH 09/11] rewrite CommandServer with MIO MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The multi-threaded, event-based architecture was too complicated and prevented introducing protobuf channels. The new CommandServer functions on the same principle as the proxy logic of Sōzu: listen to events with Mio in an event loop, dispatch the events accordingly by creating WorkerSessions, ClientSessions. This new CommandServer allows to create Tasks, that can scatter requests to all workers, gather their responses as the event loop goes, and invoke the task finish() when all responses are gathered. A worker whose unix socket is disconnected will be killed, and automatically restarted if configurated to. Conversely, if the main process dies, the orphaned workers will perform a hard stop. Sōzu channels used everywhere for communication with workers and clients. Automatic counting and respawning of workers, at each tick implement soft stop and hard stop improve CLI: - disable logging in CLI when displaying JSON - sort list of workers when displaying status error handling with thiserror remove obsolete documentation on managing workers remove unused dependencies: smol, futures, futures-lite, anyhow Co-Authored-By: Emmanuel BOSQUET --- Cargo.lock | 455 +------- bin/Cargo.toml | 19 +- bin/src/cli.rs | 5 +- bin/src/command/mod.rs | 1173 +++---------------- bin/src/command/requests.rs | 1974 +++++++++++--------------------- bin/src/command/server.rs | 860 ++++++++++++++ bin/src/command/sessions.rs | 292 +++++ bin/src/command/upgrade.rs | 350 ++++++ bin/src/ctl/command.rs | 256 ++--- bin/src/ctl/mod.rs | 87 +- bin/src/ctl/request_builder.rs | 136 ++- bin/src/main.rs | 170 +-- bin/src/upgrade.rs | 210 ++-- bin/src/util.rs | 169 ++- bin/src/worker.rs | 416 +++---- command/src/certificate.rs | 3 +- command/src/channel.rs | 22 +- command/src/command.proto | 5 +- command/src/logging.rs | 3 + command/src/proto/display.rs | 116 +- command/src/request.rs | 26 +- command/src/scm_socket.rs | 8 +- command/src/state.rs | 8 +- doc/managing_workers.md | 159 --- e2e/src/sozu/worker.rs | 16 +- e2e/src/tests/tests.rs | 5 +- lib/src/http.rs | 6 +- lib/src/metrics/local_drain.rs | 3 +- lib/src/server.rs | 98 +- lib/src/tcp.rs | 24 +- 30 files changed, 3248 insertions(+), 3826 deletions(-) create mode 100644 bin/src/command/server.rs create mode 100644 bin/src/command/sessions.rs create mode 100644 bin/src/command/upgrade.rs delete mode 100644 doc/managing_workers.md diff --git a/Cargo.lock b/Cargo.lock index d9218f28d..4811fe7c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -125,183 +125,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "async-channel" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" -dependencies = [ - "concurrent-queue", - "event-listener 2.5.3", - "futures-core", -] - -[[package]] -name = "async-channel" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca33f4bc4ed1babef42cad36cc1f51fa88be00420404e5b1e80ab1b18f7678c" -dependencies = [ - "concurrent-queue", - "event-listener 4.0.3", - "event-listener-strategy", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-dup" -version = "1.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c2886ab563af5038f79ec016dd7b87947ed138b794e8dd64992962c9cca0411" -dependencies = [ - "async-lock 3.3.0", - "futures-io", -] - -[[package]] -name = "async-executor" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" -dependencies = [ - "async-lock 3.3.0", - "async-task", - "concurrent-queue", - "fastrand 2.0.1", - "futures-lite 2.2.0", - "slab", -] - -[[package]] -name = "async-fs" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279cf904654eeebfa37ac9bb1598880884924aab82e290aa65c9e77a0e142e06" -dependencies = [ - "async-lock 2.8.0", - "autocfg", - "blocking", - "futures-lite 1.13.0", -] - -[[package]] -name = "async-io" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" -dependencies = [ - "async-lock 2.8.0", - "autocfg", - "cfg-if", - "concurrent-queue", - "futures-lite 1.13.0", - "log 0.4.20", - "parking", - "polling 2.8.0", - "rustix 0.37.27", - "slab", - "socket2 0.4.10", - "waker-fn", -] - -[[package]] -name = "async-io" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb41eb19024a91746eba0773aa5e16036045bbf45733766661099e182ea6a744" -dependencies = [ - "async-lock 3.3.0", - "cfg-if", - "concurrent-queue", - "futures-io", - "futures-lite 2.2.0", - "parking", - "polling 3.3.2", - "rustix 0.38.30", - "slab", - "tracing", - "windows-sys 0.52.0", -] - -[[package]] -name = "async-lock" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" -dependencies = [ - "event-listener 2.5.3", -] - -[[package]] -name = "async-lock" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" -dependencies = [ - "event-listener 4.0.3", - "event-listener-strategy", - "pin-project-lite", -] - -[[package]] -name = "async-net" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0434b1ed18ce1cf5769b8ac540e33f01fa9471058b5e89da9e06f3c882a8c12f" -dependencies = [ - "async-io 1.13.0", - "blocking", - "futures-lite 1.13.0", -] - -[[package]] -name = "async-process" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6438ba0a08d81529c69b36700fa2f95837bfe3e776ab39cde9c14d9149da88" -dependencies = [ - "async-io 1.13.0", - "async-lock 2.8.0", - "async-signal", - "blocking", - "cfg-if", - "event-listener 3.1.0", - "futures-lite 1.13.0", - "rustix 0.38.30", - "windows-sys 0.48.0", -] - -[[package]] -name = "async-signal" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e47d90f65a225c4527103a8d747001fc56e375203592b25ad103e1ca13124c5" -dependencies = [ - "async-io 2.3.0", - "async-lock 2.8.0", - "atomic-waker", - "cfg-if", - "futures-core", - "futures-io", - "rustix 0.38.30", - "signal-hook-registry", - "slab", - "windows-sys 0.48.0", -] - -[[package]] -name = "async-task" -version = "4.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" - -[[package]] -name = "atomic-waker" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" - [[package]] name = "autocfg" version = "1.1.0" @@ -350,22 +173,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "blocking" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" -dependencies = [ - "async-channel 2.1.1", - "async-lock 3.3.0", - "async-task", - "fastrand 2.0.1", - "futures-io", - "futures-lite 2.2.0", - "piper", - "tracing", -] - [[package]] name = "byteorder" version = "1.5.0" @@ -445,15 +252,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" -[[package]] -name = "concurrent-queue" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" -dependencies = [ - "crossbeam-utils", -] - [[package]] name = "cookie-factory" version = "0.3.2" @@ -625,53 +423,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "event-listener" -version = "2.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" - -[[package]] -name = "event-listener" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d93877bcde0eb80ca09131a08d23f0a5c18a620b01db137dba666d18cd9b30c2" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener" -version = "4.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" -dependencies = [ - "event-listener 4.0.3", - "pin-project-lite", -] - -[[package]] -name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - [[package]] name = "fastrand" version = "2.0.1" @@ -748,34 +499,6 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" -[[package]] -name = "futures-lite" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" -dependencies = [ - "fastrand 1.9.0", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite", - "waker-fn", -] - -[[package]] -name = "futures-lite" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445ba825b27408685aaecefd65178908c36c6e96aaf6d8599419d46e624192ba" -dependencies = [ - "fastrand 2.0.1", - "futures-core", - "futures-io", - "parking", - "pin-project-lite", -] - [[package]] name = "futures-macro" version = "0.3.30" @@ -950,7 +673,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.5", + "socket2", "tokio", "tower-service", "tracing", @@ -985,34 +708,14 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.1.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "433de089bd45971eecf4668ee0ee8f4cec17db4f8bd8f7bc3197a6ce37aa7d9b" dependencies = [ "equivalent", "hashbrown", ] -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi", - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "is-terminal" version = "0.4.10" @@ -1020,7 +723,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bad00257d07be169d870ab665980b06cdb366d792ad690bf2e76876dc503455" dependencies = [ "hermit-abi", - "rustix 0.38.30", + "rustix", "windows-sys 0.52.0", ] @@ -1102,12 +805,6 @@ dependencies = [ "redox_syscall", ] -[[package]] -name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - [[package]] name = "linux-raw-sys" version = "0.4.13" @@ -1279,12 +976,6 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" -[[package]] -name = "parking" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" - [[package]] name = "parking_lot" version = "0.12.1" @@ -1357,47 +1048,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "piper" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" -dependencies = [ - "atomic-waker", - "fastrand 2.0.1", - "futures-io", -] - -[[package]] -name = "polling" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" -dependencies = [ - "autocfg", - "bitflags 1.3.2", - "cfg-if", - "concurrent-queue", - "libc", - "log 0.4.20", - "pin-project-lite", - "windows-sys 0.48.0", -] - -[[package]] -name = "polling" -version = "3.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545c980a3880efd47b2e262f6a4bb6daad6555cf3367aa9c4e52895f69537a41" -dependencies = [ - "cfg-if", - "concurrent-queue", - "pin-project-lite", - "rustix 0.38.30", - "tracing", - "windows-sys 0.52.0", -] - [[package]] name = "pool" version = "0.1.4" @@ -1601,9 +1251,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b7fa1134405e2ec9353fd416b17f8dacd46c473d7d3fd1cf202706a14eb792a" +checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" dependencies = [ "aho-corasick", "memchr", @@ -1645,20 +1295,6 @@ dependencies = [ "nom", ] -[[package]] -name = "rustix" -version = "0.37.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - [[package]] name = "rustix" version = "0.38.30" @@ -1668,7 +1304,7 @@ dependencies = [ "bitflags 2.4.2", "errno", "libc", - "linux-raw-sys 0.4.13", + "linux-raw-sys", "windows-sys 0.52.0", ] @@ -1776,18 +1412,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.195" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" +checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.195" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" +checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2", "quote", @@ -1796,9 +1432,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.111" +version = "1.0.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" +checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" dependencies = [ "itoa", "ryu", @@ -1850,15 +1486,6 @@ dependencies = [ "digest", ] -[[package]] -name = "signal-hook-registry" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" -dependencies = [ - "libc", -] - [[package]] name = "slab" version = "0.4.9" @@ -1874,33 +1501,6 @@ version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" -[[package]] -name = "smol" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13f2b548cd8447f8de0fdf1c592929f70f4fc7039a05e47404b0d096ec6987a1" -dependencies = [ - "async-channel 1.9.0", - "async-executor", - "async-fs", - "async-io 1.13.0", - "async-lock 2.8.0", - "async-net", - "async-process", - "blocking", - "futures-lite 1.13.0", -] - -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.5" @@ -1915,13 +1515,7 @@ dependencies = [ name = "sozu" version = "0.15.19" dependencies = [ - "anyhow", - "async-dup", - "async-io 1.13.0", "clap", - "futures", - "futures-lite 1.13.0", - "hex", "jemallocator", "libc", "log 0.4.20", @@ -1930,14 +1524,13 @@ dependencies = [ "nom", "num_cpus", "paw", - "regex", "serde", "serde_json", - "smol", "sozu-command-lib", "sozu-lib", "tempfile", "termion", + "thiserror", "time", ] @@ -2010,7 +1603,7 @@ dependencies = [ "serial_test", "sha2", "slab", - "socket2 0.5.5", + "socket2", "sozu-command-lib", "thiserror", "time", @@ -2076,9 +1669,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" dependencies = [ "cfg-if", - "fastrand 2.0.1", + "fastrand", "redox_syscall", - "rustix 0.38.30", + "rustix", "windows-sys 0.52.0", ] @@ -2192,7 +1785,7 @@ dependencies = [ "mio", "num_cpus", "pin-project-lite", - "socket2 0.5.5", + "socket2", "windows-sys 0.48.0", ] @@ -2334,12 +1927,6 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" -[[package]] -name = "waker-fn" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" - [[package]] name = "want" version = "0.3.1" @@ -2370,7 +1957,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.30", + "rustix", ] [[package]] @@ -2529,9 +2116,9 @@ checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winnow" -version = "0.5.34" +version = "0.5.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7cf47b659b318dccbd69cc4797a39ae128f533dce7902a1096044d1967b9c16" +checksum = "1931d78a9c73861da0134f453bb1f790ce49b2e30eba8410b4b79bac72b46a2d" dependencies = [ "memchr", ] diff --git a/bin/Cargo.toml b/bin/Cargo.toml index 317b61f3a..b66b4da1b 100644 --- a/bin/Cargo.toml +++ b/bin/Cargo.toml @@ -14,22 +14,12 @@ authors = [ "Florentin Dubois ", ] categories = ["network-programming"] -edition="2021" +edition = "2021" rust-version = "1.70.0" -include = [ - "README.md", - "Cargo.toml", - "src/**/*", -] +include = ["README.md", "Cargo.toml", "src/**/*"] [dependencies] -anyhow = "^1.0.79" -async-dup = "^1.2.4" -async-io = "^1.13.0" clap = { version = "^4.4.18", features = ["derive"] } -futures = "^0.3.30" -futures-lite = "^1.13.0" -hex = "^0.4.3" jemallocator = { version = "^0.5.4", optional = true } libc = "^0.2.152" log = "^0.4.20" @@ -40,13 +30,12 @@ paw = "^1.0.0" serde = { version = "^1.0.195", features = ["derive"] } serde_json = "^1.0.111" time = "^0.3.29" -regex = "^1.10.3" -smol = "^1.3.0" tempfile = "^3.9.0" termion = "^3.0.0" - sozu-command-lib = { path = "../command", version = "^0.15.19" } sozu-lib = { path = "../lib", version = "^0.15.19" } +thiserror = "^1.0.49" + [target.'cfg(target_os="linux")'.dependencies] num_cpus = "^1.16.0" diff --git a/bin/src/cli.rs b/bin/src/cli.rs index cff0293ba..6fedf322a 100644 --- a/bin/src/cli.rs +++ b/bin/src/cli.rs @@ -109,7 +109,10 @@ pub enum SubCmd { #[clap(long = "hard", help = "do not wait for connections to finish")] hard: bool, }, - #[clap(name = "upgrade", about = "upgrade the proxy")] + #[clap( + name = "upgrade", + about = "upgrade the main process OR a specific worker. Specify a longer timeout." + )] Upgrade { #[clap(long = "worker", help = "upgrade a specific worker")] worker: Option, diff --git a/bin/src/command/mod.rs b/bin/src/command/mod.rs index 20cb0d609..e811235b6 100644 --- a/bin/src/command/mod.rs +++ b/bin/src/command/mod.rs @@ -1,1088 +1,193 @@ +mod requests; +pub mod server; +pub mod sessions; +pub mod upgrade; + use std::{ - collections::{HashMap, HashSet}, - fs, - os::unix::{ - fs::PermissionsExt, - io::{AsRawFd, FromRawFd, IntoRawFd}, - net::{UnixListener, UnixStream}, - }, - path::PathBuf, + fs, io::Error as IoError, num::ParseIntError, os::unix::fs::PermissionsExt, path::PathBuf, }; -use anyhow::{bail, Context}; -use async_dup::Arc; -use async_io::Async; -use futures::{ - channel::{ - mpsc::{channel, Receiver, Sender}, - oneshot, - }, - {SinkExt, StreamExt}, -}; -use futures_lite::{ - future, - io::{AsyncBufReadExt, AsyncWriteExt, BufReader}, -}; -use nix::{ - sys::signal::{kill, Signal}, - unistd::Pid, -}; -use serde::{Deserialize, Serialize}; +use mio::net::UnixListener; use sozu_command_lib::{ - config::Config, + config::{Config, ConfigError}, logging::setup_logging_with_config, - proto::command::{ - request::RequestType, response_content::ContentType, MetricsConfiguration, Request, - Response, ResponseContent, ResponseStatus, RunState, Status, - }, - request::WorkerRequest, - response::WorkerResponse, - scm_socket::{Listeners, ScmSocket}, - state::ConfigState, }; use crate::{ - get_executable_path, - upgrade::{SerializedWorker, UpgradeData}, - util, - worker::{start_worker, Worker}, + cli::Args, + command::{requests::load_static_config, server::CommandHub}, + util::{get_config_file_path, get_executable_path, setup_metrics, write_pid_file, UtilError}, }; -mod requests; - -/// The CommandServer receives these CommandMessages, either from within Sōzu, -/// or from without, in which case they are ALWAYS of the Clientrequest variant. -#[derive(Debug)] -enum CommandMessage { - ClientNew { - client_id: String, - sender: Sender, // to send things back to the client - }, - ClientClose { - client_id: String, - }, - ClientRequest { - client_id: String, - request: Request, - }, - WorkerResponse { - worker_id: u32, - response: WorkerResponse, - }, - WorkerClose { - worker_id: u32, - }, - Advancement { - client_id: String, - advancement: Advancement, - }, - MasterStop, -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub enum Advancement { - Error(String), - Processing(String), - Ok(Success), +use self::server::{HubError, ServerError}; + +#[derive(thiserror::Error, Debug)] +pub enum StartError { + #[error("failed to load config: {0}")] + LoadConfig(ConfigError), + #[error("could not get path of config file: {0}")] + GetConfigPath(UtilError), + #[error("could not delete previous socket at {0}: {1}")] + RemoveSocket(PathBuf, IoError), + #[error("could not bind to listener: {0}")] + BindToListener(IoError), + #[error("could not write PID file of main process: {0}")] + WritePidFile(UtilError), + #[error("failed to set metrics on the main process: {0}")] + SetupMetrics(UtilError), + #[error("failed to get executable path: {0}")] + GetExecutablePath(UtilError), + #[error("could not get path to the command socket: {0}")] + GetSocketPath(ConfigError), + #[error("could not create command hub: {0}")] + CreateCommandHub(HubError), + #[error("could not load file: {0}")] + LoadProcFile(ConfigError), + #[error("could parse system max file descriptors: {0}")] + ParseSystemMaxFd(ParseIntError), + #[error("Too many allowed connection for a worker")] + TooManyAllowedConnections, + #[error("could not set the unix socket permissions: {0}")] + SetPermissions(IoError), + #[error("could not launch new worker: {0}")] + LaunchWorker(ServerError), } -/// Indicates success of either inner Sōzu logic and of handling the ClientRequest, -/// in which case Success caries the response data. -#[derive(PartialEq, Eq, Clone, Debug)] -pub enum Success { - CertificatesFromTheState(ResponseContent), - ClientClose(String), // the client id - ClientNew(String), // the client id - HandledClientRequest, - ListFrontends(ResponseContent), // the list of frontends - ListListeners(ResponseContent), // the list of listeners - ListWorkers(ResponseContent), - LoadState(String, usize, usize), // state path, oks, errors - Logging(String), // new logging level - Metrics(MetricsConfiguration), // enable / disable / clear metrics on the proxy - MasterStop, - // this should contain CommandResponseData but the logic does not return anything - // is this logic gone into sozu_command_lib::proxy::Query::Metrics(_) ? - // Metrics, - NotifiedClient(String), // client id - PropagatedWorkerEvent, - Query(ResponseContent), - ReloadConfiguration(usize, usize), // ok, errors - RequestCounts(ResponseContent), - SaveState(usize, String), // amount of written commands, path of the saved state - Status(ResponseContent), // Vec - SubscribeEvent(String), - UpgradeMain(i32), // pid of the new main process - UpgradeWorker(u32), // worker id - WorkerKilled(u32), // worker id - WorkerLaunched(u32), // worker id - WorkerRequest, - WorkerResponse, - WorkerRestarted(u32), // worker id - WorkerStopped(u32), // worker id -} - -// This is how success is logged on Sōzu, and, given the case, manifested to the client -impl std::fmt::Display for Success { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - Self::CertificatesFromTheState(_) => { - write!(f, "Successfully queried certificates from the state") - } - Self::ClientClose(id) => write!(f, "Close client: {id}"), - Self::ClientNew(id) => write!(f, "New client successfully added: {id}"), - Self::HandledClientRequest => write!(f, "Successfully handled the client request"), - Self::ListFrontends(_) => write!(f, "Successfully gathered the list of frontends"), - Self::ListListeners(_) => write!(f, "Successfully listed all listeners"), - Self::ListWorkers(_) => write!(f, "Successfully listed all workers"), - Self::LoadState(path, ok, error) => write!( - f, - "Successfully loaded state from path {path}, {ok} ok messages, {error} errors" - ), - Self::Logging(logging_filter) => { - write!(f, "Successfully set the logging level to {logging_filter}") - } - Self::Metrics(metrics_cfg) => { - write!(f, "Successfully set the metrics to {metrics_cfg:?}") - } - Self::MasterStop => write!(f, "stopping main process"), - Self::NotifiedClient(id) => { - write!(f, "Successfully notified client {id} of the advancement") - } - Self::PropagatedWorkerEvent => { - write!(f, "Sent worker response to all subscribing clients") - } - Self::Query(_) => write!(f, "Ran the query successfully"), - Self::ReloadConfiguration(ok, error) => write!( - f, - "Successfully reloaded configuration, ok: {ok}, errors: {error}" - ), - Self::RequestCounts(_) => write!(f, "count requests"), - Self::SaveState(counter, path) => { - write!(f, "saved {counter} config messages to {path}") - } - Self::Status(_) => { - write!(f, "Sent a status response to client") - } - Self::SubscribeEvent(client_id) => { - write!(f, "Successfully Added {client_id} to subscribers") - } - Self::UpgradeMain(pid) => write!( - f, - "new main process launched with pid {pid}, closing the old one" - ), - Self::UpgradeWorker(id) => { - write!(f, "Successfully upgraded worker with new id: {id}") - } - Self::WorkerKilled(id) => write!(f, "Successfully killed worker {id}"), - Self::WorkerLaunched(id) => write!(f, "Successfully launched worker {id}"), - Self::WorkerRequest => write!(f, "Successfully executed the request on all workers"), - Self::WorkerResponse => write!(f, "Successfully handled worker response"), - Self::WorkerRestarted(id) => write!(f, "Successfully restarted worker {id}"), - Self::WorkerStopped(id) => write!(f, "Successfully stopped worker {id}"), - } - } -} - -#[derive(Deserialize, Serialize, Debug)] -pub struct ProxyConfiguration { - id: String, - state: ConfigState, -} - -pub struct CommandServer { - /// file descriptor of the unix listener socket, usually "sozu.sock" - unix_listener_fd: i32, - /// this sender is cloned and distributed around, to send messages back - command_tx: Sender, - /// where the main loop receives messages - command_rx: Receiver, - /// All client loops. id -> cloned command_tx - clients: HashMap>, - /// handles to the workers as seen from the main process - workers: Vec, - /// A map of requests sent to workers. - /// Any function requesting a worker will log the request id in here, associated - /// with a sender and the worker id. This sender will be used to notify the function of the worker's - /// response (and worker id). - /// In certain cases, the same response may need to be transmitted several - /// times over. Therefore, a number is recorded next to the sender in - /// the hashmap. - in_flight: HashMap< - String, // the request id - ( - futures::channel::mpsc::Sender<(WorkerResponse, u32)>, // (response, worker id) to notify whoever sent the Request - usize, // the number of expected responses - ), - >, - event_subscribers: HashSet, - state: ConfigState, - config: Config, - /// id of the next worker to be spawned - next_worker_id: u32, - /// the path to the sozu executable, used to spawn workers - executable_path: String, - /// caching the number of backends instead of going through the whole state.backends hashmap - backends_count: usize, - /// caching the number of frontends instead of going through the whole state.http/hhtps/tcp_fronts hashmaps - frontends_count: usize, - accept_cancel: Option>, -} - -impl CommandServer { - fn new( - fd: i32, - config: Config, - command_tx: Sender, - command_rx: Receiver, - mut workers: Vec, - accept_cancel: oneshot::Sender<()>, - ) -> anyhow::Result { - //FIXME - if config.metrics.is_some() { - /*METRICS.with(|metrics| { - if let Some(sock) = (*metrics.borrow_mut()).socket_mut() { - poll.registry().register(sock, Token(1), Interest::WRITABLE).expect("should register the metrics socket"); - } else { - error!("could not register metrics socket"); - } - });*/ - } - - let state: ConfigState = Default::default(); - - for worker in workers.iter_mut() { - let main_to_worker_channel = worker - .worker_channel - .take() - .with_context(|| format!("No channel present in worker {}", worker.id))? - .sock; - let (worker_tx, worker_rx) = channel(10000); - worker.sender = Some(worker_tx); - - let main_to_worker_stream = Async::new(unsafe { - let fd = main_to_worker_channel.into_raw_fd(); - UnixStream::from_raw_fd(fd) - }) - .with_context(|| "Could not get a unix stream from the file descriptor")?; - - let id = worker.id; - let command_tx = command_tx.clone(); - smol::spawn(async move { - worker_loop(id, main_to_worker_stream, command_tx, worker_rx).await; - }) - .detach(); - } - - let next_id = workers.len() as u32; - let executable_path = unsafe { get_executable_path()? }; - let backends_count = state.count_backends(); - let frontends_count = state.count_frontends(); - - Ok(CommandServer { - unix_listener_fd: fd, - config, - state, - command_tx, - command_rx, - clients: HashMap::new(), - workers, - event_subscribers: HashSet::new(), - in_flight: HashMap::new(), - next_worker_id: next_id, - executable_path, - backends_count, - frontends_count, - accept_cancel: Some(accept_cancel), - }) - } - - pub async fn run(&mut self) { - while let Some(command) = self.command_rx.next().await { - let result: anyhow::Result = match command { - CommandMessage::ClientNew { client_id, sender } => { - // this appears twice, which is weird - debug!("adding new client {}", client_id); - self.clients.insert(client_id.to_owned(), sender); - Ok(Success::ClientNew(client_id)) - } - CommandMessage::ClientClose { client_id } => { - debug!("removing client {}", client_id); - self.clients.remove(&client_id); - self.event_subscribers.remove(&client_id); - Ok(Success::ClientClose(client_id)) - } - CommandMessage::ClientRequest { client_id, request } => { - self.handle_client_request(client_id, request).await - } - CommandMessage::WorkerClose { worker_id } => self - .handle_worker_close(worker_id) - .await - .with_context(|| "Could not close worker"), - CommandMessage::WorkerResponse { - worker_id, - response, - } => self - .handle_worker_response(worker_id, response) - .await - .with_context(|| "Could not handle worker response"), - CommandMessage::Advancement { - client_id, - advancement: response, - } => { - let success_result = self - .notify_advancement_to_client(client_id, response.clone()) - .await; - if let Advancement::Ok(Success::UpgradeMain(_)) = response { - std::thread::sleep(std::time::Duration::from_secs(2)); - info!("shutting down old main"); - std::process::exit(0); - }; - success_result - } - CommandMessage::MasterStop => { - info!("stopping main process"); - Ok(Success::MasterStop) - } - }; - - match result { - Ok(request_success) => { - trace!("request OK: {}", request_success); - - // perform shutdowns - if request_success == Success::MasterStop { - // breaking the loop brings run() to return and ends Sōzu - // shouldn't we have the same break for both shutdowns? - break; - } - } - Err(error) => { - // log the error on the main process without stopping it - error!("Failed request: {:#?}", error); - } - } - } - } - - pub fn generate_upgrade_data(&self) -> UpgradeData { - let workers: Vec = self - .workers - .iter() - .map(SerializedWorker::from_worker) - .collect(); - //FIXME: ensure there's at least one worker - let state = self.state.clone(); - - UpgradeData { - command_socket_fd: self.unix_listener_fd, - config: self.config.clone(), - workers, - state, - next_id: self.next_worker_id, - //token_count: self.token_count, - } - } - - pub fn from_upgrade_data(upgrade_data: UpgradeData) -> anyhow::Result { - let UpgradeData { - command_socket_fd, - config, - workers: serialized_workers, - state, - next_id, - } = upgrade_data; - - debug!("listener is: {}", command_socket_fd); - let async_listener = Async::new(unsafe { UnixListener::from_raw_fd(command_socket_fd) })?; - - let (accept_cancel_tx, accept_cancel_rx) = oneshot::channel(); - let (command_tx, command_rx) = channel(10000); - let cloned_command_tx = command_tx.clone(); - let cloned_config = config.clone(); - - smol::spawn(accept_clients( - cloned_command_tx, - async_listener, - accept_cancel_rx, - cloned_config, - )) - .detach(); - - let tx = command_tx.clone(); - - let mut workers: Vec = Vec::new(); - - for serialized in serialized_workers.iter() { - if serialized.run_state == RunState::Stopped - || serialized.run_state == RunState::Stopping - { - continue; - } - - let (worker_tx, worker_rx) = channel(10000); - let sender = Some(worker_tx); - - debug!("deserializing worker: {:?}", serialized); - let worker_stream = Async::new(unsafe { UnixStream::from_raw_fd(serialized.fd) }) - .with_context(|| "Could not create an async unix stream to spawn the worker")?; - - let id = serialized.id; - let command_tx = tx.clone(); - //async fn worker(id: u32, sock: Async, tx: Sender, rx: Receiver<()>) -> std::io::Result<()> { - smol::spawn(async move { - worker_loop(id, worker_stream, command_tx, worker_rx).await; - }) - .detach(); - - let scm_socket = ScmSocket::new(serialized.scm) - .with_context(|| "Could not get scm to create worker")?; - - let worker = Worker { - worker_channel_fd: serialized.fd, - id: serialized.id, - worker_channel: None, - sender, - pid: serialized.pid, - run_state: serialized.run_state, - queue: serialized.queue.clone().into(), - scm_socket, - }; - workers.push(worker); - } - - let config_state = state.clone(); - - let backends_count = config_state.count_backends(); - let frontends_count = config_state.count_frontends(); - - let executable_path = unsafe { get_executable_path()? }; - - Ok(CommandServer { - unix_listener_fd: command_socket_fd, - config, - state, - command_tx, - command_rx, - clients: HashMap::new(), - workers, - event_subscribers: HashSet::new(), - in_flight: HashMap::new(), - next_worker_id: next_id, - executable_path, - backends_count, - frontends_count, - accept_cancel: Some(accept_cancel_tx), - }) - } - - pub fn disable_cloexec_before_upgrade(&mut self) -> anyhow::Result<()> { - for worker in self.workers.iter_mut() { - if worker.run_state == RunState::Running { - let _ = util::disable_close_on_exec(worker.worker_channel_fd).map_err(|e| { - error!( - "could not disable close on exec for worker {}: {}", - worker.id, e - ); - }); - } - } - trace!( - "disabling cloexec on listener with file descriptor: {}", - self.unix_listener_fd - ); - util::disable_close_on_exec(self.unix_listener_fd)?; - Ok(()) - } - - pub fn enable_cloexec_after_upgrade(&mut self) -> anyhow::Result<()> { - for worker in self.workers.iter_mut() { - if worker.run_state == RunState::Running { - let _ = util::enable_close_on_exec(worker.worker_channel_fd).map_err(|e| { - error!( - "could not enable close on exec for worker {}: {}", - worker.id, e - ); - }); - } - } - util::enable_close_on_exec(self.unix_listener_fd)?; - Ok(()) - } - - pub async fn load_static_cluster_configuration(&mut self) -> anyhow::Result<()> { - let (tx, mut rx) = futures::channel::mpsc::channel(self.workers.len() * 2); - - let mut total_message_count = 0usize; - - //FIXME: too many loops, this could be cleaner - for message in self.config.generate_config_messages()? { - let request = message.content; - if let Err(e) = self.state.dispatch(&request) { - error!("Could not execute request on state: {:#}", e); - } - - if let &Some(RequestType::AddCertificate(_)) = &request.request_type { - debug!("config generated AddCertificate( ... )"); - } else { - debug!("config generated {:?}", request); - } - - let mut count = 0usize; - for worker in self.workers.iter_mut().filter(|worker| worker.is_active()) { - worker.send(message.id.clone(), request.clone()).await; - count += 1; - } - - if count == 0 { - // FIXME: should send back error here - error!("no worker found"); - } else { - self.in_flight - .insert(message.id.clone(), (tx.clone(), count)); - total_message_count += count; - } - } - - self.backends_count = self.state.count_backends(); - self.frontends_count = self.state.count_frontends(); - gauge!("configuration.clusters", self.state.clusters.len()); - gauge!("configuration.backends", self.backends_count); - gauge!("configuration.frontends", self.frontends_count); - - smol::spawn(async move { - let mut ok = 0usize; - let mut error = 0usize; - - let mut i = 0; - while let Some((proxy_response, _)) = rx.next().await { - match proxy_response.status { - ResponseStatus::Ok => { - ok += 1; - } - ResponseStatus::Processing => { - //info!("metrics processing"); - continue; - } - ResponseStatus::Failure => { - error!( - "error handling configuration message {}: {}", - proxy_response.id, proxy_response.message - ); - error += 1; - } - }; - - i += 1; - if i == total_message_count { - break; - } - } - - if error == 0 { - info!("loading state: {} ok messages, 0 errors", ok); - } else { - error!("loading state: {} ok messages, {} errors", ok, error); - } - }) - .detach(); - Ok(()) - } - - /// in case a worker has crashed while Running and automatic_worker_restart is set to true - pub async fn restart_worker(&mut self, worker_id: u32) -> anyhow::Result<()> { - let worker_to_upgrade = &mut (self - .workers - .get_mut(worker_id as usize) - .with_context(|| "there should be a worker at that token")?); - - match kill(Pid::from_raw(worker_to_upgrade.pid), None) { - Ok(_) => { - error!( - "worker process {} (PID = {}) is alive but the worker must have crashed. Killing and replacing", - worker_to_upgrade.id, worker_to_upgrade.pid - ); - } - Err(_) => { - error!( - "worker process {} (PID = {}) not answering, killing and replacing", - worker_to_upgrade.id, worker_to_upgrade.pid - ); - } - } - - kill(Pid::from_raw(worker_to_upgrade.pid), Signal::SIGKILL) - .with_context(|| "failed to kill the worker process")?; - - worker_to_upgrade.run_state = RunState::Stopped; - - incr!("worker_restart"); - - let new_worker_id = self.next_worker_id; - let listeners = Some(Listeners { - http: Vec::new(), - tls: Vec::new(), - tcp: Vec::new(), - }); - - let mut new_worker = start_worker( - new_worker_id, - &self.config, - self.executable_path.clone(), - &self.state, - listeners, - ) - .with_context(|| format!("Could not start new worker {new_worker_id}"))?; +pub fn begin_main_process(args: &Args) -> Result<(), StartError> { + let config_file_path = get_config_file_path(args).map_err(StartError::GetConfigPath)?; - info!("created new worker: {}", new_worker_id); - self.next_worker_id += 1; + let config = Config::load_from_path(config_file_path).map_err(StartError::LoadConfig)?; - let sock = new_worker - .worker_channel - .take() - .with_context(|| { - format!( - "the new worker with id {} does not have a channel", - new_worker.id - ) - })? // this used to crash with unwrap(), do we still want to crash? - .sock; - let (worker_tx, worker_rx) = channel(10_000); - new_worker.sender = Some(worker_tx); - - let stream = Async::new(unsafe { - let fd = sock.into_raw_fd(); - UnixStream::from_raw_fd(fd) - })?; - - let new_worker_id = new_worker.id; - let command_tx = self.command_tx.clone(); - smol::spawn(async move { - worker_loop(new_worker_id, stream, command_tx, worker_rx).await; - }) - .detach(); - - let mut requests = self.state.generate_activate_requests(); - for (count, request) in requests.drain(..).enumerate() { - new_worker - .send(format!("RESTART-{new_worker_id}-ACTIVATE-{count}"), request) - .await; - } - - new_worker - .send( - format!("RESTART-{new_worker_id}-STATUS"), - RequestType::Status(Status {}).into(), - ) - .await; - - self.workers.push(new_worker); - - Ok(()) - } - - async fn handle_worker_close(&mut self, id: u32) -> anyhow::Result { - info!("removing worker {}", id); - - if let Some(worker) = self.workers.iter_mut().find(|w| w.id == id) { - // In case a worker crashes and should be restarted - if self.config.worker_automatic_restart && worker.run_state == RunState::Running { - info!("Automatically restarting worker {}", id); - match self.restart_worker(id).await { - Ok(()) => info!("Worker {} has automatically restarted!", id), - Err(e) => error!("Could not restart worker {}: {}", id, e), - } - return Ok(Success::WorkerRestarted(id)); - } - - info!("Closing the worker {}.", worker.id); - if !worker.the_pid_is_alive() { - info!("Worker {} is dead, setting to Stopped.", worker.id); - worker.run_state = RunState::Stopped; - return Ok(Success::WorkerStopped(id)); - } - - info!( - "Worker {} is not dead but should be. Let's kill it.", - worker.id - ); - - match kill(Pid::from_raw(worker.pid), Signal::SIGKILL) { - Ok(()) => { - info!("Worker {} was successfully killed", id); - worker.run_state = RunState::Stopped; - return Ok(Success::WorkerKilled(id)); - } - Err(e) => { - return Err(e).with_context(|| "failed to kill the worker process"); - } - } - } - bail!(format!("Could not find worker {id}")) - } - - async fn handle_worker_response( - &mut self, - worker_id: u32, - response: WorkerResponse, - ) -> anyhow::Result { - // Notify the client with Processing in case of a proxy event - if let Some(ResponseContent { - content_type: Some(ContentType::Event(event)), - }) = response.content - { - for client_id in self.event_subscribers.iter() { - if let Some(client_tx) = self.clients.get_mut(client_id) { - let event = Response::new( - ResponseStatus::Processing, - format!("{worker_id}"), - Some(ContentType::Event(event.clone()).into()), - ); - client_tx - .send(event) - .await - .with_context(|| format!("could not send message to client {client_id}"))? - } - } - return Ok(Success::PropagatedWorkerEvent); - } + setup_logging_with_config(&config, "MAIN"); + info!("Starting up"); + setup_metrics(&config).map_err(StartError::SetupMetrics)?; + write_pid_file(&config).map_err(StartError::WritePidFile)?; - // Notify the function that sent the request to which the worker responded. - // The in_flight map contains the id of each sent request, together with a sender - // we use to send the response to. - match self.in_flight.remove(&response.id) { - None => { - // FIXME: this message happens a lot at startup because AddCluster - // messages receive responses from each of the HTTP, HTTPS and TCP - // proxys. The clusters list should be merged - debug!("unknown response id: {}", response.id); - } - Some((mut requester_tx, mut expected_responses)) => { - let response_id = response.id.clone(); + update_process_limits(&config)?; - // if a worker returned Ok or Error, we're not expecting any more - // messages with this id from it - match response.status { - ResponseStatus::Ok | ResponseStatus::Failure => { - expected_responses -= 1; - } - _ => {} - }; + let executable_path = unsafe { get_executable_path().map_err(StartError::GetExecutablePath)? }; - if requester_tx - .send((response.clone(), worker_id)) - .await - .is_err() - { - error!("Failed to send worker response back: {}", response); - }; + let command_socket_path = config + .command_socket_path() + .map_err(StartError::GetSocketPath)?; - // reinsert the message_id and sender into the hashmap, for later reuse - if expected_responses > 0 { - self.in_flight - .insert(response_id, (requester_tx, expected_responses)); - } - } - } - Ok(Success::WorkerResponse) - } -} - -pub fn start_server( - config: Config, - command_socket_path: String, - workers: Vec, -) -> anyhow::Result<()> { let path = PathBuf::from(&command_socket_path); if fs::metadata(&path).is_ok() { info!("A socket is already present. Deleting..."); - fs::remove_file(&path) - .with_context(|| format!("could not delete previous socket at {path:?}"))?; + fs::remove_file(&path).map_err(|io_err| StartError::RemoveSocket(path.clone(), io_err))?; } - let unix_listener = match UnixListener::bind(&path) { - Ok(unix_listener) => unix_listener, - Err(e) => { - error!("could not create unix socket: {:?}", e); - // the workers did not even get the configuration, we can kill them right away - for worker in workers { - error!("killing worker n°{} (PID {})", worker.id, worker.pid); - let _ = kill(Pid::from_raw(worker.pid), Signal::SIGKILL).map_err(|e| { - error!("could not kill worker: {:?}", e); - }); - } - bail!("couldn't start server"); - } - }; + let unix_listener = UnixListener::bind(&path).map_err(StartError::BindToListener)?; - if let Err(e) = fs::set_permissions(&path, fs::Permissions::from_mode(0o600)) { - error!("could not set the unix socket permissions: {:?}", e); - let _ = fs::remove_file(&path).map_err(|e2| { - error!("could not remove the unix socket: {:?}", e2); - }); - // the workers did not even get the configuration, we can kill them right away - for worker in workers { - error!("killing worker n°{} (PID {})", worker.id, worker.pid); - let _ = kill(Pid::from_raw(worker.pid), Signal::SIGKILL).map_err(|e| { - error!("could not kill worker: {:?}", e); - }); - } - bail!("couldn't start server"); - } + fs::set_permissions(&path, fs::Permissions::from_mode(0o600)) + .map_err(StartError::SetPermissions)?; - future::block_on(async { - // Create a listener. - let listener_fd = unix_listener.as_raw_fd(); - let async_listener = Async::new(unix_listener)?; - info!("Listening on {:?}", async_listener.get_ref().local_addr()?); + // Create a copy of the state path to load state later + let saved_state_path = config.saved_state.clone(); + let worker_count = config.worker_count; - let (accept_cancel_tx, accept_cancel_rx) = oneshot::channel(); - let (command_tx, command_rx) = channel(10000); - let cloned_command_tx = command_tx.clone(); - let cloned_config = config.clone(); + info!("Creating command hub"); + let mut command_hub = CommandHub::new(unix_listener, config, executable_path) + .map_err(StartError::CreateCommandHub)?; - smol::spawn(accept_clients( - cloned_command_tx, - async_listener, - accept_cancel_rx, - cloned_config, - )) - .detach(); - - // Create a copy of the state path to load state later - let saved_state_path = config.saved_state.clone(); - - let mut server = CommandServer::new( - listener_fd, - config, - command_tx, - command_rx, - workers, - accept_cancel_tx, - )?; + info!("Launching workers"); + for _ in 0..worker_count { + command_hub + .launch_new_worker(None) + .map_err(StartError::LaunchWorker)?; + } - let _ = server - .load_static_cluster_configuration() - .await - .map_err(|load_error| { - error!( - "Error loading static cluster configuration: {:#}", - load_error - ) - }); + info!("Load static configuration"); + load_static_config(&mut command_hub.server, None, None); - if let Some(path) = saved_state_path { - server - .load_state(None, &path) - .await - .with_context(|| format!("Loading {:?} failed", &path))?; - } + if let Some(path) = saved_state_path { + requests::load_state(&mut command_hub.server, None, &path); + } - gauge!("configuration.clusters", server.state.clusters.len()); - gauge!("configuration.backends", server.backends_count); - gauge!("configuration.frontends", server.frontends_count); + command_hub.run(); - info!("waiting for configuration client connections"); - server.run().await; - info!("main process stopped"); - Ok(()) - }) + info!("main process stopped"); + Ok(()) } -/// spawns a client loop whenever a client connects to the socket -async fn accept_clients( - mut command_tx: Sender, - async_listener: Async, - accept_cancel_rx: oneshot::Receiver<()>, - config: Config, -) { - setup_logging_with_config(&config, "MAIN"); - let mut counter = 0usize; - let mut accept_cancel_rx = Some(accept_cancel_rx); - info!("Accepting client connections"); - loop { - let accept_client = async_listener.accept(); - futures::pin_mut!(accept_client); - let (stream, _) = - match futures::future::select(accept_cancel_rx.take().unwrap(), accept_client).await { - futures::future::Either::Left((_canceled, _)) => { - info!("stopping listener"); - break; - } - futures::future::Either::Right((stream_and_addr, cancel_rx)) => { - accept_cancel_rx = Some(cancel_rx); - stream_and_addr.expect("Can not get unix stream to create a client loop.") - } - }; - let (client_tx, client_rx) = channel(10000); - - let client_id = format!("CL-{counter}"); +#[cfg(target_os = "linux")] +/// We check the hard_limit. The soft_limit can be changed at runtime +/// by the process or any user. hard_limit can only be changed by root +fn update_process_limits(config: &Config) -> Result<(), StartError> { + info!("Updating process limits"); + let wanted_opened_files = (config.max_connections as u64) * 2; - smol::spawn(client_loop( - client_id.clone(), - stream, - command_tx.clone(), - client_rx, - )) - .detach(); + let system_max_fd = get_system_max_fd("/proc/sys/fs/file-max")?; - command_tx - .send(CommandMessage::ClientNew { - client_id, - sender: client_tx, - }) - .await - .expect("Failed at sending ClientNew message"); - counter += 1; + if config.max_connections > system_max_fd { + error!( + "Proxies total max_connections can't be higher than system's file-max limit. \ + Current limit: {}, current value: {}", + system_max_fd, config.max_connections + ); + return Err(StartError::TooManyAllowedConnections); } -} - -/// The client loop does two things: -/// - write everything destined to the client onto the unix stream -/// - parse CommandRequests from the unix stream and send them to the command server -async fn client_loop( - client_id: String, - stream: Async, - mut command_tx: Sender, - mut client_rx: Receiver, -) { - let read_stream = Arc::new(stream); - let mut write_stream = read_stream.clone(); - - smol::spawn(async move { - while let Some(response) = client_rx.next().await { - trace!("sending back message to client: {:?}", response); - let mut message: Vec = serde_json::to_string(&response) - .map(|string| string.into_bytes()) - .unwrap_or_else(|_| Vec::new()); - // separate all messages with a 0 byte - message.push(0); - let _ = write_stream.write_all(&message).await; - } - }) - .detach(); + // Get the soft and hard limits for the current process + let mut limits = libc::rlimit { + rlim_cur: 0, + rlim_max: 0, + }; + unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut limits) }; - debug!("will start receiving messages from client {}", client_id); + // Ensure we don't exceed the hard limit + if limits.rlim_max < wanted_opened_files { + error!( + "at least one worker can't have that many connections. \ + current max file descriptor hard limit is: {}, \ + configured max_connections is {} (the worker needs two file descriptors \ + per client connection)", + limits.rlim_max, config.max_connections + ); + return Err(StartError::TooManyAllowedConnections); + } - // Read the stream by splitting it on 0 bytes - let mut split_iterator = BufReader::new(read_stream).split(0); - while let Some(message) = split_iterator.next().await { - let message = match message { - Err(e) => { - error!("could not split message: {:?}", e); - break; - } - Ok(msg) => msg, - }; + if limits.rlim_cur < wanted_opened_files && limits.rlim_cur != limits.rlim_max { + // Try to get twice what we need to be safe, or rlim_max if we exceed that + limits.rlim_cur = limits.rlim_max.min(wanted_opened_files * 2); + unsafe { + libc::setrlimit(libc::RLIMIT_NOFILE, &limits); - match serde_json::from_slice::(&message) { - Err(e) => { - error!("could not decode client message: {:?}", e); - break; - } - Ok(request) => { - debug!("got command request: {:?}", request); - let client_id = client_id.clone(); - if let Err(e) = command_tx - .send(CommandMessage::ClientRequest { client_id, request }) - .await - { - error!("error sending client request to command server: {:?}", e); - } - } + // Refresh the data we have + libc::getrlimit(libc::RLIMIT_NOFILE, &mut limits); } } - // If the loop breaks, request the command server to close the client - if let Err(send_error) = command_tx - .send(CommandMessage::ClientClose { - client_id: client_id.to_owned(), - }) - .await - { + // Ensure we don't exceed the new soft limit + if limits.rlim_cur < wanted_opened_files { error!( - "The client loop {} could not send ClientClose to the command server: {:?}", - client_id, send_error + "at least one worker can't have that many connections. \ + current max file descriptor soft limit is: {}, \ + configured max_connections is {} (the worker needs two file descriptors \ + per client connection)", + limits.rlim_cur, config.max_connections ); + return Err(StartError::TooManyAllowedConnections); } -} - -/// the worker loop does two things: -/// - write everything destined to the worker onto the unix stream -/// - parse ProxyResponses from the unix stream and send them to the CommandServer -async fn worker_loop( - worker_id: u32, - stream: Async, - mut command_tx: Sender, - mut worker_rx: Receiver, -) { - let read_stream = Arc::new(stream); - let mut write_stream = read_stream.clone(); - smol::spawn(async move { - debug!("will start sending messages to worker {}", worker_id); - while let Some(worker_request) = worker_rx.next().await { - debug!("sending to worker {}: {:?}", worker_id, worker_request); - let mut message: Vec = serde_json::to_string(&worker_request) - .map(|string| string.into_bytes()) - .unwrap_or_else(|_| Vec::new()); - - // separate all messages with a 0 byte - message.push(0); - let _ = write_stream.write_all(&message).await; - } - }) - .detach(); - - debug!("will start receiving messages from worker {}", worker_id); + Ok(()) +} - // Read the stream by splitting it on 0 bytes - let mut split_iterator = BufReader::new(read_stream).split(0); - while let Some(message) = split_iterator.next().await { - let message = match message { - Err(e) => { - error!("could not split message: {:?}", e); - break; - } - Ok(msg) => msg, - }; +/// To ensure we don't exceed the system maximum capacity +fn get_system_max_fd(max_file_path: &str) -> Result { + let max_file = Config::load_file(max_file_path).map_err(StartError::LoadProcFile)?; - match serde_json::from_slice::(&message) { - Err(e) => { - error!("could not decode worker message: {:?}", e); - break; - } - Ok(response) => { - debug!("worker {} replied message: {:?}", worker_id, response); - let worker_id = worker_id; - if let Err(e) = command_tx - .send(CommandMessage::WorkerResponse { - worker_id, - response, - }) - .await - { - error!("error sending worker response to command server: {:?}", e); - } - } - } - } + trace!("{}: '{}'", max_file_path, max_file); - error!("worker loop stopped, will close the worker {}", worker_id); + max_file + .trim() + .parse::() + .map_err(StartError::ParseSystemMaxFd) +} - // if the loop breaks, request the command server to close the worker - if let Err(send_error) = command_tx - .send(CommandMessage::WorkerClose { - worker_id: worker_id.to_owned(), - }) - .await - { - error!( - "The worker loop {} could not send WorkerClose to the CommandServer: {:?}", - worker_id, send_error - ); - } +#[cfg(not(target_os = "linux"))] +fn update_process_limits(_: &Config) -> Result<(), StartError> { + Ok(()) } diff --git a/bin/src/command/requests.rs b/bin/src/command/requests.rs index 5eae67c7b..ee8493e43 100644 --- a/bin/src/command/requests.rs +++ b/bin/src/command/requests.rs @@ -1,15 +1,10 @@ use std::{ - collections::{BTreeMap, HashSet}, + collections::{BTreeMap, HashMap}, fs::File, io::{ErrorKind, Read}, - os::unix::io::{FromRawFd, IntoRawFd}, - os::unix::net::UnixStream, - time::{Duration, Instant}, }; -use anyhow::{bail, Context}; -use async_io::Async; -use futures::{channel::mpsc::*, SinkExt, StreamExt}; +use mio::Token; use nom::{HexDisplay, Offset}; use sozu_command_lib::{ @@ -20,1421 +15,826 @@ use sozu_command_lib::{ proto::command::{ request::RequestType, response_content::ContentType, AggregatedMetrics, AvailableMetrics, CertificatesWithFingerprints, ClusterHashes, ClusterInformations, FrontendFilters, - MetricsConfiguration, QueryCertificatesFilters, Request, Response, ResponseContent, - ResponseStatus, ReturnListenSockets, RunState, SoftStop, Status, WorkerInfo, WorkerInfos, - WorkerResponses, + HardStop, QueryCertificatesFilters, QueryMetricsOptions, Request, ResponseContent, + ResponseStatus, RunState, SoftStop, Status, WorkerInfo, WorkerInfos, WorkerResponses, }, request::WorkerRequest, - scm_socket::Listeners, }; +use sozu_lib::metrics::METRICS; -use sozu::metrics::METRICS; - -use crate::{ - command::{Advancement, CommandMessage, CommandServer, Success}, - upgrade::fork_main_into_new_main, - worker::{start_worker, Worker}, +use crate::command::{ + server::{ + DefaultGatherer, Gatherer, GatheringTask, MessageClient, Server, ServerState, Timeout, + WorkerId, + }, + sessions::{ClientSession, OptionalClient}, + upgrade::{upgrade_main, upgrade_worker}, }; -impl CommandServer { - pub async fn handle_client_request( - &mut self, - client_id: String, - request: Request, - ) -> anyhow::Result { - trace!("Received request {:?}", request); - - let cloned_client_id = client_id.clone(); - let cloned_request = request.clone(); - - let result: anyhow::Result> = match request.request_type { - Some(RequestType::SaveState(path)) => self.save_state(&path).await, - Some(RequestType::ListWorkers(_)) => self.list_workers().await, - Some(RequestType::ListFrontends(filters)) => self.list_frontends(filters).await, - Some(RequestType::ListListeners(_)) => self.list_listeners(), - Some(RequestType::LoadState(path)) => self.load_state(Some(client_id), &path).await, - Some(RequestType::LaunchWorker(tag)) => self.launch_worker(client_id, &tag).await, - Some(RequestType::UpgradeMain(_)) => self.upgrade_main(client_id).await, - Some(RequestType::UpgradeWorker(worker_id)) => { - self.upgrade_worker(client_id, worker_id).await - } - Some(RequestType::ConfigureMetrics(config)) => { - match MetricsConfiguration::try_from(config) { - Ok(config) => self.configure_metrics(client_id, config).await, - Err(_) => Err(anyhow::Error::msg("wrong i32 for metrics configuration")), - } - } - Some(RequestType::Logging(logging_filter)) => { - self.set_logging_level(logging_filter, client_id).await +impl Server { + pub fn handle_client_request(&mut self, client: &mut ClientSession, request: Request) { + let request_type = match request.request_type { + Some(req) => req, + None => { + error!("empty request sent by client {:?}", client); + return; } - Some(RequestType::SubscribeEvents(_)) => { - self.event_subscribers.insert(client_id.clone()); - Ok(Some(Success::SubscribeEvent(client_id.clone()))) - } - Some(RequestType::ReloadConfiguration(path)) => { - self.reload_configuration(client_id, path).await - } - Some(RequestType::Status(_)) => self.status(client_id).await, - Some(RequestType::QueryCertificatesFromTheState(filters)) => { - self.query_certificates_from_the_state(filters) - } - Some(RequestType::CountRequests(_)) => self.query_request_count(), - Some(RequestType::QueryClusterById(_)) - | Some(RequestType::QueryCertificatesFromWorkers(_)) - | Some(RequestType::QueryClustersByDomain(_)) - | Some(RequestType::QueryClustersHashes(_)) - | Some(RequestType::QueryMetrics(_)) => self.query(client_id, request).await, - - // any other case is an request for the workers, except for SoftStop and HardStop. - // TODO: we should have something like: - // RequestContent::SoftStop => self.do_something(), - // RequestContent::HardStop => self.do_nothing_and_return_early(), - // but it goes in there instead: - Some(_request_for_workers) => self.worker_requests(client_id, cloned_request).await, - None => Err(anyhow::Error::msg("Empty request")), }; - - // Notify the command server by sending using his command_tx - match result { - Ok(Some(success)) => { - info!("{}", success); - trace!("details success of the client request: {:?}", success); - return_success(self.command_tx.clone(), cloned_client_id, success).await; - } - Err(anyhow_error) => { - let formatted = format!("{anyhow_error:#}"); - error!("{:#}", formatted); - return_error(self.command_tx.clone(), cloned_client_id, formatted).await; - } - Ok(None) => { - // do nothing here. Ok(None) means the function has already returned its result - // on its own to the command server - } + match request_type { + RequestType::SaveState(path) => save_state(self, client, &path), + RequestType::LoadState(path) => load_state(self, Some(client), &path), + RequestType::ListWorkers(_) => list_workers(self, client), + RequestType::ListFrontends(inner) => list_frontend_command(self, client, inner), + RequestType::ListListeners(_) => list_listeners(self, client), + RequestType::UpgradeMain(_) => upgrade_main(self, client), + RequestType::UpgradeWorker(worker_id) => upgrade_worker(self, client, worker_id), + RequestType::SubscribeEvents(_) => subscribe_client_to_events(self, client), + RequestType::ReloadConfiguration(path) => { + load_static_config(self, Some(client), Some(&path)) + } + RequestType::Status(_) => status(self, client), + RequestType::AddCluster(_) + | RequestType::ActivateListener(_) + | RequestType::AddBackend(_) + | RequestType::AddCertificate(_) + | RequestType::AddHttpFrontend(_) + | RequestType::AddHttpListener(_) + | RequestType::AddHttpsFrontend(_) + | RequestType::AddHttpsListener(_) + | RequestType::AddTcpFrontend(_) + | RequestType::AddTcpListener(_) + | RequestType::ConfigureMetrics(_) + | RequestType::DeactivateListener(_) + | RequestType::RemoveBackend(_) + | RequestType::RemoveCertificate(_) + | RequestType::RemoveCluster(_) + | RequestType::RemoveHttpFrontend(_) + | RequestType::RemoveHttpsFrontend(_) + | RequestType::RemoveListener(_) + | RequestType::RemoveTcpFrontend(_) + | RequestType::ReplaceCertificate(_) => { + worker_request(self, client, request_type); + } + RequestType::QueryClustersHashes(_) + | RequestType::QueryClustersByDomain(_) + | RequestType::QueryCertificatesFromWorkers(_) + | RequestType::QueryClusterById(_) => { + query_clusters(self, client, request_type); + } + RequestType::QueryMetrics(inner) => query_metrics(self, client, inner), + RequestType::SoftStop(_) => stop(self, client, false), + RequestType::HardStop(_) => stop(self, client, true), + RequestType::Logging(logging_filter) => set_logging_level(self, client, logging_filter), + RequestType::QueryCertificatesFromTheState(filters) => { + query_certificates_from_main(self, client, filters) + } + RequestType::CountRequests(_) => count_requests(self, client), + + RequestType::LaunchWorker(_) => {} // not yet implemented, nor used, anywhere + RequestType::ReturnListenSockets(_) => {} // This is only implemented by workers, } - - Ok(Success::HandledClientRequest) } - pub fn query_request_count(&mut self) -> anyhow::Result> { - let request_counts = self.state.get_request_counts(); - Ok(Some(Success::RequestCounts( - ContentType::RequestCounts(request_counts).into(), - ))) + /// get infos from the state of the main process + fn query_main(&self, request: RequestType) -> Option { + match request { + RequestType::QueryClusterById(cluster_id) => Some( + ContentType::Clusters(ClusterInformations { + vec: self.state.cluster_state(&cluster_id).into_iter().collect(), + }) + .into(), + ), + RequestType::QueryClustersByDomain(domain) => { + let cluster_ids = self + .state + .get_cluster_ids_by_domain(domain.hostname, domain.path); + let vec = cluster_ids + .iter() + .filter_map(|cluster_id| self.state.cluster_state(cluster_id)) + .collect(); + Some(ContentType::Clusters(ClusterInformations { vec }).into()) + } + RequestType::QueryClustersHashes(_) => Some( + ContentType::ClusterHashes(ClusterHashes { + map: self.state.hash_state(), + }) + .into(), + ), + _ => None, + } } +} - pub async fn save_state(&mut self, path: &str) -> anyhow::Result> { - let mut file = File::create(path) - .with_context(|| format!("could not open file at path: {}", &path))?; - - let counter = self - .state - .write_requests_to_file(&mut file) - .with_context(|| "failed writing state to file")?; +//=============================================== +// non-scattered commands - info!("wrote {} commands to {}", counter, path); +pub fn query_certificates_from_main( + server: &mut Server, + client: &mut ClientSession, + filters: QueryCertificatesFilters, +) { + debug!( + "querying certificates in the state with filters {}", + filters + ); - Ok(Some(Success::SaveState(counter, path.into()))) - } + let certs = server.state.get_certificates(filters); - pub async fn load_state( - &mut self, - client_id: Option, - path: &str, - ) -> anyhow::Result> { - let mut file = match File::open(path) { - Ok(file) => file, - Err(err) if matches!(err.kind(), ErrorKind::NotFound) => { - info!("The state file does not exists, skipping the loading."); - self.backends_count = self.state.count_backends(); - self.frontends_count = self.state.count_frontends(); - return Ok(None); - } - Err(err) => { - return Err(err).with_context(|| format!("Cannot open file at path {path}")); - } - }; - - let mut buffer = Buffer::with_capacity(200000); + client.finish_ok_with_content( + ContentType::CertificatesWithFingerprints(CertificatesWithFingerprints { certs }).into(), + "Successfully queried certificates from the state of main process", + ); +} - info!("starting to load state from {}", path); +/// return how many requests were received by Sōzu since startup +fn count_requests(server: &mut Server, client: &mut ClientSession) { + let request_counts = server.state.get_request_counts(); - let mut message_counter = 0usize; - let mut diff_counter = 0usize; + client.finish_ok_with_content( + ContentType::RequestCounts(request_counts).into(), + "Successfully counted requests received by the state", + ); +} - let (load_state_tx, mut load_state_rx) = futures::channel::mpsc::channel(10000); - loop { - let previous = buffer.available_data(); +pub fn list_frontend_command( + server: &mut Server, + client: &mut ClientSession, + filters: FrontendFilters, +) { + match server.query_main(RequestType::ListFrontends(filters)) { + Some(response) => client.finish_ok_with_content(response, "Successfully listed frontends"), + None => client.finish_failure("main process could not list frontends"), + } +} - //FIXME: we should read in streaming here - let bytes_read = file - .read(buffer.space()) - .with_context(|| "Error reading the saved state file")?; +fn list_workers(server: &mut Server, client: &mut ClientSession) { + let vec = server + .workers + .values() + .map(|worker| WorkerInfo { + id: worker.id, + pid: worker.pid, + run_state: worker.run_state as i32, + }) + .collect(); - buffer.fill(bytes_read); + debug!("workers: {:?}", vec); + client.finish_ok_with_content( + ContentType::Workers(WorkerInfos { vec }).into(), + "Successfully listed workers", + ); +} - if buffer.available_data() == 0 { - debug!("Empty buffer"); - break; - } +fn list_listeners(server: &mut Server, client: &mut ClientSession) { + let vec = server.state.list_listeners(); + client.finish_ok_with_content( + ContentType::ListenersList(vec).into(), + "Successfully listed listeners", + ); +} - let mut offset = 0usize; - match parse_several_requests::(buffer.data()) { - Ok((i, requests)) => { - if !i.is_empty() { - debug!("could not parse {} bytes", i.len()); - if previous == buffer.available_data() { - bail!("error consuming load state message"); - } - } - offset = buffer.data().offset(i); - - for request in requests { - message_counter += 1; - - if self.state.dispatch(&request.content).is_ok() { - diff_counter += 1; - - let mut found = false; - let id = format!("LOAD-STATE-{}-{diff_counter}", request.id); - - for worker in - self.workers.iter_mut().filter(|worker| worker.is_active()) - { - let worker_message_id = format!("{}-{}", id, worker.id); - worker - .send(worker_message_id.clone(), request.content.clone()) - .await; - self.in_flight - .insert(worker_message_id, (load_state_tx.clone(), 1)); - - found = true; - } - - if !found { - bail!("no worker found"); - } - } - } - } - Err(nom::Err::Incomplete(_)) => { - if buffer.available_data() == buffer.capacity() { - error!( - "message too big, stopping parsing:\n{}", - buffer.data().to_hex(16) - ); - break; - } - } - Err(parse_error) => { - bail!("saved state parse error: {:?}", parse_error); - } - } - buffer.consume(offset); +fn save_state(server: &mut Server, client: &mut ClientSession, path: &str) { + debug!("saving state to file {}", path); + let mut file = match File::create(path) { + Ok(file) => file, + Err(error) => { + client.finish_failure(format!("Cannot create file at path {path}: {error}")); + return; } + }; - info!( - "stopped loading data from file, remaining: {} bytes, saw {} messages, generated {} diff messages", - buffer.available_data(), message_counter, diff_counter - ); - - if diff_counter > 0 { - info!( - "state loaded from {}, will start sending {} messages to workers", - path, diff_counter - ); - - let command_tx = self.command_tx.to_owned(); - let path = path.to_owned(); - - smol::spawn(async move { - let mut ok = 0usize; - let mut error = 0usize; - while let Some((proxy_response, _)) = load_state_rx.next().await { - match proxy_response.status { - ResponseStatus::Ok => { - ok += 1; - } - ResponseStatus::Processing => {} - ResponseStatus::Failure => { - error!("{}", proxy_response.message); - error += 1; - } - }; - debug!("ok:{}, error: {}", ok, error); - } - - let client_id = match client_id { - Some(client_id) => client_id, - None => { - match error { - 0 => info!("loading state: {} ok messages, 0 errors", ok), - _ => error!("loading state: {} ok messages, {} errors", ok, error), - } - return; - } - }; - - // notify the command server - match error { - 0 => { - return_success( - command_tx, - client_id, - Success::LoadState(path.to_string(), ok, error), - ) - .await; - } - _ => { - return_error( - command_tx, - client_id, - format!("Loading state failed, ok: {ok}, error: {error}, path: {path}"), - ) - .await; - } - } - }) - .detach(); - } else { - info!("no messages sent to workers: local state already had those messages"); - if let Some(client_id) = client_id { - return_success( - self.command_tx.clone(), - client_id, - Success::LoadState(path.to_string(), 0, 0), - ) - .await; - } + match server.state.write_requests_to_file(&mut file) { + Ok(counter) => { + client.finish_ok(format!("Saved {counter} config messages to {path}")); + } + Err(error) => { + client.finish_failure(format!("Failed writing state to file: {error}")); } - - self.backends_count = self.state.count_backends(); - self.frontends_count = self.state.count_frontends(); - Ok(None) } +} - pub async fn list_frontends( - &mut self, - filters: FrontendFilters, - ) -> anyhow::Result> { - info!( - "Received a request to list frontends, along these filters: {:?}", - filters - ); - - let listed_frontends = self.state.list_frontends(filters); +/// change logging level on the main process, and on all workers +fn set_logging_level(server: &mut Server, client: &mut ClientSession, logging_filter: String) { + debug!("Changing main process log level to {}", logging_filter); + logging::LOGGER.with(|l| { + let directives = logging::parse_logging_spec(&logging_filter); + l.borrow_mut().set_directives(directives); + }); + + // also change / set the content of RUST_LOG so future workers / main thread + // will have the new logging filter value + ::std::env::set_var("RUST_LOG", &logging_filter); + debug!( + "Logging level now: {}", + ::std::env::var("RUST_LOG").unwrap_or("could get RUST_LOG from env".to_string()) + ); - Ok(Some(Success::ListFrontends( - ContentType::FrontendList(listed_frontends).into(), - ))) - } + worker_request(server, client, RequestType::Logging(logging_filter)); +} - fn list_listeners(&self) -> anyhow::Result> { - let listeners_list = self.state.list_listeners(); +fn subscribe_client_to_events(server: &mut Server, client: &mut ClientSession) { + info!("Subscribing client {:?} to listen to events", client.token); + server.event_subscribers.insert(client.token); +} - Ok(Some(Success::ListListeners( - ContentType::ListenersList(listeners_list).into(), - ))) - } +//=============================================== +// Query clusters - pub async fn list_workers(&mut self) -> anyhow::Result> { - let workers: Vec = self - .workers - .iter() - .map(|worker| WorkerInfo { - id: worker.id, - pid: worker.pid, - run_state: worker.run_state as i32, - }) - .collect(); +#[derive(Debug)] +pub struct QueryClustersTask { + pub client_token: Token, + pub request_type: RequestType, + pub gatherer: DefaultGatherer, + main_process_response: Option, +} - debug!("workers: {:#?}", workers); +pub fn query_clusters( + server: &mut Server, + client: &mut ClientSession, + request_content: RequestType, +) { + client.return_processing("Querying cluster..."); + + server.scatter( + request_content.clone().into(), + Box::new(QueryClustersTask { + client_token: client.token, + gatherer: DefaultGatherer::default(), + main_process_response: server.query_main(request_content.clone()), + request_type: request_content, + }), + Timeout::Default, + None, + ) +} - Ok(Some(Success::ListWorkers( - ContentType::Workers(WorkerInfos { vec: workers }).into(), - ))) +impl GatheringTask for QueryClustersTask { + fn client_token(&self) -> Option { + Some(self.client_token) } - pub fn query_certificates_from_the_state( - &self, - filters: QueryCertificatesFilters, - ) -> anyhow::Result> { - debug!( - "querying certificates in the state with filters {}", - filters - ); - - let certs = self.state.get_certificates(filters); - - Ok(Some(Success::CertificatesFromTheState( - ContentType::CertificatesWithFingerprints(CertificatesWithFingerprints { certs }) - .into(), - ))) + fn get_gatherer(&mut self) -> &mut dyn Gatherer { + &mut self.gatherer } - pub async fn launch_worker( - &mut self, - client_id: String, - _tag: &str, - ) -> anyhow::Result> { - let mut worker = start_worker( - self.next_worker_id, - &self.config, - self.executable_path.clone(), - &self.state, - None, - ) - .with_context(|| format!("Failed at creating worker {}", self.next_worker_id))?; - - return_processing( - self.command_tx.clone(), - client_id.clone(), - "Sending configuration requests to the new worker...", - ) - .await; - - info!("created new worker: {}", worker.id); - - self.next_worker_id += 1; - - let sock = worker - .worker_channel - .take() - .expect("No channel on the worker being launched") - .sock; - let (worker_tx, worker_rx) = channel(10000); - worker.sender = Some(worker_tx); - - let stream = Async::new(unsafe { - let fd = sock.into_raw_fd(); - UnixStream::from_raw_fd(fd) - })?; - - let id = worker.id; - let command_tx = self.command_tx.clone(); - - smol::spawn(async move { - super::worker_loop(id, stream, command_tx, worker_rx).await; - }) - .detach(); - - info!( - "sending listeners: to the new worker: {:?}", - worker.scm_socket.send_listeners(&Listeners { - http: Vec::new(), - tls: Vec::new(), - tcp: Vec::new(), + fn on_finish( + self: Box, + _server: &mut Server, + client: &mut OptionalClient, + _timed_out: bool, + ) { + let mut worker_responses: BTreeMap = self + .gatherer + .responses + .into_iter() + .filter_map(|(worker_id, proxy_response)| { + proxy_response + .content + .map(|response_content| (worker_id.to_string(), response_content)) }) - ); + .collect(); - let activate_requests = self.state.generate_activate_requests(); - for (count, request) in activate_requests.into_iter().enumerate() { - worker.send(format!("{id}-ACTIVATE-{count}"), request).await; + if let Some(main_response) = self.main_process_response { + worker_responses.insert(String::from("main"), main_response); } - self.workers.push(worker); - - return_success( - self.command_tx.clone(), - client_id, - Success::WorkerLaunched(id), - ) - .await; - Ok(None) + client.finish_ok_with_content( + ContentType::WorkerResponses(WorkerResponses { + map: worker_responses, + }) + .into(), + "Successfully queried clusters", + ); } +} - pub async fn upgrade_main(&mut self, client_id: String) -> anyhow::Result> { - self.disable_cloexec_before_upgrade()?; +//=============================================== +// Load static configuration - return_processing( - self.command_tx.clone(), - client_id, - "The proxy is processing the upgrade command.", - ) - .await; +#[derive(Debug)] +struct LoadStaticConfigTask { + gatherer: DefaultGatherer, + client_token: Option, +} - let upgrade_data = self.generate_upgrade_data(); +pub fn load_static_config(server: &mut Server, mut client: OptionalClient, path: Option<&str>) { + let task_id = server.new_task( + Box::new(LoadStaticConfigTask { + gatherer: DefaultGatherer::default(), + client_token: client.as_ref().map(|c| c.token), + }), + Timeout::None, + ); - let (new_main_pid, mut fork_confirmation_channel) = - fork_main_into_new_main(self.executable_path.clone(), upgrade_data) - .with_context(|| "Could not start a new main process")?; + let new_config; - if let Err(e) = fork_confirmation_channel.blocking() { - error!( - "Could not block the fork confirmation channel: {}. This is not normal, you may need to restart sozu", - e - ); + let config = match path { + Some(path) if !path.is_empty() => { + info!("loading static configuration at path {}", path); + new_config = Config::load_from_path(path) + .unwrap_or_else(|_| panic!("cannot load configuration from '{path}'")); + &new_config } - let received_ok_from_new_process = fork_confirmation_channel.read_message(); - debug!("upgrade channel sent {:?}", received_ok_from_new_process); - - // signaling the accept loop that it should stop - if let Err(e) = self - .accept_cancel - .take() // we should create a method on Self for this frequent procedure - .expect("No channel on the main process") - .send(()) - { - error!("could not close the accept loop: {:?}", e); + _ => { + info!("reloading static configuration"); + &server.config } + }; - if !received_ok_from_new_process - .with_context(|| "Did not receive fork confirmation from new worker")? - { - bail!("forking the new worker failed") - } - info!("wrote final message, closing"); - Ok(Some(Success::UpgradeMain(new_main_pid))) - } - - pub async fn upgrade_worker( - &mut self, - client_id: String, - worker_id: u32, - ) -> anyhow::Result> { - info!( - "client[{}] msg wants to upgrade worker {}", - client_id, worker_id - ); + client.return_processing(format!( + "Reloading static configuration at path {}", + config.config_path + )); - if !self - .workers - .iter() - .any(|worker| worker.id == worker_id && worker.is_active()) - { - bail!(format!( - "The worker {} does not exist, or is stopped / stopping.", - &worker_id - )); + let config_messages = match config.generate_config_messages() { + Ok(messages) => messages, + Err(config_err) => { + client.finish_failure(format!("could not generate new config: {}", config_err)); + return; } + }; - // same as launch_worker - let next_id = self.next_worker_id; - let mut new_worker = start_worker( - next_id, - &self.config, - self.executable_path.clone(), - &self.state, - None, - ) - .with_context(|| "failed at creating worker")?; - - return_processing( - self.command_tx.clone(), - client_id.clone(), - "Sending configuration requests to the worker", - ) - .await; - - info!("created new worker: {}", next_id); - - self.next_worker_id += 1; - - let sock = new_worker - .worker_channel - .take() - .with_context(|| "No channel on new worker".to_string())? - .sock; - let (worker_tx, worker_rx) = channel(10000); - new_worker.sender = Some(worker_tx); - - new_worker - .sender - .as_mut() - .with_context(|| "No sender on new worker".to_string())? - .send(WorkerRequest { - id: format!("UPGRADE-{worker_id}-STATUS"), - content: RequestType::Status(Status {}).into(), - }) - .await - .with_context(|| { - format!( - "could not send status message to worker {:?}", - new_worker.id, - ) - })?; - - let mut listeners = None; - { - let old_worker: &mut Worker = self - .workers - .iter_mut() - .find(|worker| worker.id == worker_id) - .unwrap(); - - /* - old_worker.channel.set_blocking(true); - old_worker.channel.write_message(&ProxyRequest { id: String::from(message_id), request: RequestContent::ReturnListenSockets }); - info!("sent returnlistensockets message to worker"); - old_worker.channel.set_blocking(false); - */ - let (sockets_return_tx, mut sockets_return_rx) = futures::channel::mpsc::channel(3); - let id = format!("{client_id}-return-sockets"); - self.in_flight.insert(id.clone(), (sockets_return_tx, 1)); - old_worker - .send( - id.clone(), - RequestType::ReturnListenSockets(ReturnListenSockets {}).into(), - ) - .await; - - info!("sent ReturnListenSockets to old worker"); - - let cloned_command_tx = self.command_tx.clone(); - let cloned_req_id = client_id.clone(); - smol::spawn(async move { - while let Some((proxy_response, _)) = sockets_return_rx.next().await { - match proxy_response.status { - ResponseStatus::Ok => { - info!("returnsockets OK"); - break; - } - ResponseStatus::Processing => { - info!("returnsockets processing"); - } - ResponseStatus::Failure => { - return_error(cloned_command_tx, cloned_req_id, proxy_response.message) - .await; - break; - } - }; - } - }) - .detach(); - - let mut counter = 0usize; - - loop { - info!("waiting for listen sockets from the old worker"); - if let Err(e) = old_worker.scm_socket.set_blocking(true) { - error!("Could not set the old worker socket to blocking: {}", e); - }; - match old_worker.scm_socket.receive_listeners() { - Ok(l) => { - listeners = Some(l); - break; - } - Err(error) => { - error!( - "Could not receive listerners from scm socket with file descriptor {}:\n{:?}", - old_worker.scm_socket.fd, error - ); - counter += 1; - if counter == 50 { - break; - } - std::thread::sleep(Duration::from_millis(100)); - } - } - } - info!("got the listen sockets from the old worker"); - old_worker.run_state = RunState::Stopping; - - let (softstop_tx, mut softstop_rx) = futures::channel::mpsc::channel(10); - let softstop_id = format!("{client_id}-softstop"); - self.in_flight.insert(softstop_id.clone(), (softstop_tx, 1)); - old_worker - .send( - softstop_id.clone(), - RequestType::SoftStop(SoftStop {}).into(), - ) - .await; - - let mut command_tx = self.command_tx.clone(); - let cloned_client_id = client_id.clone(); - let worker_id = old_worker.id; - smol::spawn(async move { - while let Some((proxy_response, _)) = softstop_rx.next().await { - match proxy_response.status { - // should we send all this to the command server? - ResponseStatus::Ok => { - info!("softstop OK"); // this doesn't display :-( - if let Err(e) = command_tx - .send(CommandMessage::WorkerClose { worker_id }) - .await - { - error!( - "could not send worker close message to {}: {:?}", - worker_id, e - ); - } - break; - } - ResponseStatus::Processing => { - info!("softstop processing"); - } - ResponseStatus::Failure => { - info!("softstop error: {:?}", proxy_response.message); - break; - } - }; - } - return_processing( - command_tx.clone(), - cloned_client_id, - "Processing softstop responses from the workers...", - ) - .await; - }) - .detach(); + for (request_index, message) in config_messages.into_iter().enumerate() { + let request = message.content; + if let Err(error) = server.state.dispatch(&request) { + client.return_processing(format!("Could not execute request on state: {:#}", error)); + continue; } - match listeners { - Some(l) => { - info!( - "sending listeners: to the new worker: {:?}", - new_worker.scm_socket.send_listeners(&l) - ); - l.close(); - } - None => error!("could not get the list of listeners from the previous worker"), - }; - - let stream = Async::new(unsafe { - let fd = sock.into_raw_fd(); - UnixStream::from_raw_fd(fd) - })?; - - let id = new_worker.id; - let command_tx = self.command_tx.clone(); - smol::spawn(async move { - super::worker_loop(id, stream, command_tx, worker_rx).await; - }) - .detach(); - - let activate_requests = self.state.generate_activate_requests(); - for (count, request) in activate_requests.into_iter().enumerate() { - new_worker - .send(format!("{client_id}-ACTIVATE-{count}"), request) - .await; + if let &Some(RequestType::AddCertificate(_)) = &request.request_type { + debug!("config generated AddCertificate( ... )"); + } else { + debug!("config generated {:?}", request); } - info!("sent config messages to the new worker"); - self.workers.push(new_worker); - - info!("finished upgrade"); - Ok(Some(Success::UpgradeWorker(id))) + server.scatter_on(request, task_id, request_index, None); } +} - pub async fn reload_configuration( - &mut self, - client_id: String, - config_path: String, - ) -> anyhow::Result> { - // check that this works - let path = match config_path.is_empty() { - true => &self.config.config_path, - false => &config_path, - }; - // config_path.as_deref().unwrap_or(&self.config.config_path); - let new_config = Config::load_from_path(path) - .with_context(|| format!("cannot load configuration from '{path}'"))?; - - let mut diff_counter = 0usize; - - let (load_state_tx, mut load_state_rx) = futures::channel::mpsc::channel(10000); - - return_processing( - self.command_tx.clone(), - client_id.clone(), - "Reloading configuration, sending config messages to workers...", - ) - .await; - - for request in new_config.generate_config_messages()? { - if self.state.dispatch(&request.content).is_ok() { - diff_counter += 1; - - let mut found = false; - let id = format!("LOAD-STATE-{}-{}", &request.id, diff_counter); - - for worker in self.workers.iter_mut().filter(|worker| worker.is_active()) { - let worker_message_id = format!("{}-{}", id, worker.id); - worker - .send(worker_message_id.clone(), request.content.clone()) - .await; - self.in_flight - .insert(worker_message_id, (load_state_tx.clone(), 1)); +impl GatheringTask for LoadStaticConfigTask { + fn client_token(&self) -> Option { + self.client_token + } - found = true; - } + fn get_gatherer(&mut self) -> &mut dyn Gatherer { + &mut self.gatherer + } - if !found { - bail!("no worker found"); + fn on_finish( + self: Box, + server: &mut Server, + client: &mut OptionalClient, + _timed_out: bool, + ) { + let mut messages = vec![]; + for (worker_id, response) in self.gatherer.responses { + match response.status { + ResponseStatus::Ok => {} + ResponseStatus::Failure => { + messages.push(format!("worker {worker_id}: {}", response.message)) } + ResponseStatus::Processing => {} } } - // clone everything we will need in the detached thread - let command_tx = self.command_tx.clone(); - let cloned_identifier = client_id.clone(); - - if diff_counter > 0 { - info!( - "state loaded from {}, will start sending {} messages to workers", - new_config.config_path, diff_counter - ); - smol::spawn(async move { - let mut ok = 0usize; - let mut error = 0usize; - while let Some((proxy_response, _)) = load_state_rx.next().await { - match proxy_response.status { - ResponseStatus::Ok => { - ok += 1; - } - ResponseStatus::Processing => {} - ResponseStatus::Failure => { - error!("{}", proxy_response.message); - error += 1; - } - }; - debug!("ok:{}, error: {}", ok, error); - } - - if error == 0 { - return_success( - command_tx, - cloned_identifier, - Success::ReloadConfiguration(ok, error), - ) - .await; - } else { - return_error( - command_tx, - cloned_identifier, - format!( - "Reloading configuration failed. ok: {ok} messages, error: {error}" - ), - ) - .await; - } - }) - .detach(); + if self.gatherer.errors > 0 { + client.finish_failure(format!( + "\nloading static configuration failed: {} OK, {} errors:\n- {}", + self.gatherer.ok, + self.gatherer.errors, + messages.join("\n- ") + )); } else { - info!("no messages sent to workers: local state already had those messages"); + client.finish_ok(format!( + "Successfully loaded the config: {} ok, {} errors", + self.gatherer.ok, self.gatherer.errors, + )); } - self.backends_count = self.state.count_backends(); - self.frontends_count = self.state.count_frontends(); - gauge!("configuration.clusters", self.state.clusters.len()); - gauge!("configuration.backends", self.backends_count); - gauge!("configuration.frontends", self.frontends_count); - - self.config = new_config; - - Ok(None) + server.update_counts(); } +} - pub async fn status(&mut self, client_id: String) -> anyhow::Result> { - info!("Requesting the status of all workers."); - - let (status_tx, mut status_rx) = futures::channel::mpsc::channel(self.workers.len() * 2); - - // create a status list with the available info of the main process - let mut worker_info_map: BTreeMap = BTreeMap::new(); - - let prefix = format!("{client_id}-status-"); - - return_processing( - self.command_tx.clone(), - client_id.clone(), - "Sending status requests to workers...", - ) - .await; - - let mut count = 0usize; - for worker in self.workers.iter_mut() { - info!("Worker {} is {}", worker.id, worker.run_state); - - // create request ids even if we don't send any request, as keys in the tree map - let worker_request_id = format!("{}{}", prefix, worker.id); - // send a status request to supposedly running workers to update the list afterwards - if worker.run_state == RunState::Running { - info!("Summoning status of worker {}", worker.id); - worker - .send( - worker_request_id.clone(), - RequestType::Status(Status {}).into(), - ) - .await; - count += 1; - self.in_flight - .insert(worker_request_id.clone(), (status_tx.clone(), 1)); - } - worker_info_map.insert(worker_request_id, worker.querying_info()); - } +// ========================================================= +// Worker request - let command_tx = self.command_tx.clone(); - let thread_client_id = client_id.clone(); - let worker_timeout = self.config.worker_timeout; - let now = Instant::now(); - - smol::spawn(async move { - let mut i = 0; - - while let Some((proxy_response, _)) = status_rx.next().await { - info!( - "received response with id {}: {:?}", - proxy_response.id, proxy_response - ); - let new_run_state = match proxy_response.status { - ResponseStatus::Ok => RunState::Running, - ResponseStatus::Processing => continue, - ResponseStatus::Failure => RunState::NotAnswering, - }; - worker_info_map - .entry(proxy_response.id) - .and_modify(|worker_info| worker_info.run_state = new_run_state as i32); - - i += 1; - if i == count || now.elapsed() > Duration::from_secs(worker_timeout as u64) { - break; - } - } +#[derive(Debug)] +struct WorkerTask { + pub client_token: Token, + pub gatherer: DefaultGatherer, +} - let worker_info_vec = WorkerInfos { - vec: worker_info_map - .values() - .map(|worker_info| worker_info.to_owned()) - .collect(), - }; +pub fn worker_request( + server: &mut Server, + client: &mut ClientSession, + request_content: RequestType, +) { + let request = request_content.into(); - return_success( - command_tx, - thread_client_id, - Success::Status(ContentType::Workers(worker_info_vec).into()), - ) - .await; - }) - .detach(); - Ok(None) + if let Err(error) = server.state.dispatch(&request) { + client.finish_failure(format!( + "could not dispatch request on the main process state: {error}", + )); + return; } + client.return_processing("Processing worker request..."); + + server.scatter( + request, + Box::new(WorkerTask { + client_token: client.token, + gatherer: DefaultGatherer::default(), + }), + Timeout::Default, + None, + ) +} - // This handles the CLI's "metrics enable", "metrics disable", "metrics clear" - // To get the proxy's metrics, the cli command is "metrics get", handled by the query() function - pub async fn configure_metrics( - &mut self, - client_id: String, - config: MetricsConfiguration, - ) -> anyhow::Result> { - let (metrics_tx, mut metrics_rx) = futures::channel::mpsc::channel(self.workers.len() * 2); - let mut count = 0usize; - for worker in self - .workers - .iter_mut() - .filter(|worker| worker.run_state != RunState::Stopped) - { - let req_id = format!("{}-metrics-{}", client_id, worker.id); - worker - .send( - req_id.clone(), - RequestType::ConfigureMetrics(config as i32).into(), - ) - .await; - count += 1; - self.in_flight.insert(req_id, (metrics_tx.clone(), 1)); - } - - let prefix = format!("{client_id}-metrics-"); - - let command_tx = self.command_tx.clone(); - let thread_client_id = client_id.clone(); - smol::spawn(async move { - let mut responses = Vec::new(); - let mut i = 0; - while let Some((proxy_response, _)) = metrics_rx.next().await { - match proxy_response.status { - ResponseStatus::Ok => { - let tag = proxy_response.id.trim_start_matches(&prefix).to_string(); - responses.push((tag, proxy_response)); - } - ResponseStatus::Processing => { - //info!("metrics processing"); - continue; - } - ResponseStatus::Failure => { - let tag = proxy_response.id.trim_start_matches(&prefix).to_string(); - responses.push((tag, proxy_response)); - } - }; +impl GatheringTask for WorkerTask { + fn client_token(&self) -> Option { + Some(self.client_token) + } - i += 1; - if i == count { - break; - } - } + fn get_gatherer(&mut self) -> &mut dyn Gatherer { + &mut self.gatherer + } - let mut messages = vec![]; - let mut has_error = false; - for response in responses.iter() { - match response.1.status { - ResponseStatus::Failure => { - messages.push(format!("{}: {}", response.0, response.1.message)); - has_error = true; - } - _ => messages.push(format!("{}: OK", response.0)), + fn on_finish( + self: Box, + _server: &mut Server, + client: &mut OptionalClient, + timed_out: bool, + ) { + let mut messages = vec![]; + + for (worker_id, response) in self.gatherer.responses { + match response.status { + ResponseStatus::Ok => messages.push(format!("{worker_id}: OK")), + ResponseStatus::Failure => { + messages.push(format!("{worker_id}: {}", response.message)) } + ResponseStatus::Processing => {} } + } - if has_error { - return_error(command_tx, thread_client_id, messages.join(", ")).await; - } else { - return_success(command_tx, thread_client_id, Success::Metrics(config)).await; - } - }) - .detach(); - Ok(None) + if self.gatherer.errors > 0 || timed_out { + client.finish_failure(messages.join(", ")); + } else { + client.finish_ok("Successfully applied request to all workers"); + } } +} - pub async fn query( - &mut self, - client_id: String, - request: Request, - ) -> anyhow::Result> { - debug!("Received this query: {:?}", request); - let (query_tx, mut query_rx) = futures::channel::mpsc::channel(self.workers.len() * 2); - let mut count = 0usize; - for worker in self - .workers - .iter_mut() - .filter(|worker| worker.run_state != RunState::Stopped) - { - let req_id = format!("{}-query-{}", client_id, worker.id); - worker.send(req_id.clone(), request.clone()).await; - count += 1; - self.in_flight.insert(req_id, (query_tx.clone(), 1)); - } +// ========================================================= +// Query Metrics - return_processing( - self.command_tx.clone(), - client_id.clone(), - "Query was sent to the workers...", - ) - .await; +#[derive(Debug)] +struct QueryMetricsTask { + pub client_token: Token, + pub gatherer: DefaultGatherer, + options: QueryMetricsOptions, +} - let main_response_content = match &request.request_type { - Some(RequestType::QueryClustersHashes(_)) => Some( - ContentType::ClusterHashes(ClusterHashes { - map: self.state.hash_state(), - }) - .into(), - ), - Some(RequestType::QueryClusterById(cluster_id)) => Some( - ContentType::Clusters(ClusterInformations { - vec: self.state.cluster_state(cluster_id).into_iter().collect(), - }) - .into(), - ), - Some(RequestType::QueryClustersByDomain(domain)) => { - let cluster_ids = self - .state - .get_cluster_ids_by_domain(domain.hostname.clone(), domain.path.clone()); - let vec = cluster_ids - .iter() - .filter_map(|cluster_id| self.state.cluster_state(cluster_id)) - .collect(); - Some(ContentType::Clusters(ClusterInformations { vec }).into()) - } - _ => None, - }; +fn query_metrics(server: &mut Server, client: &mut ClientSession, options: QueryMetricsOptions) { + client.return_processing("Querrying metrics..."); + + server.scatter( + RequestType::QueryMetrics(options.clone()).into(), + Box::new(QueryMetricsTask { + client_token: client.token, + gatherer: DefaultGatherer::default(), + options, + }), + Timeout::Default, + None, + ); +} - // all these are passed to the thread - let command_tx = self.command_tx.clone(); - let cloned_identifier = client_id.clone(); +impl GatheringTask for QueryMetricsTask { + fn client_token(&self) -> Option { + Some(self.client_token) + } + + fn get_gatherer(&mut self) -> &mut dyn Gatherer { + &mut self.gatherer + } - // this may waste resources and time in case of queries others than Metrics + fn on_finish( + self: Box, + _server: &mut Server, + client: &mut OptionalClient, + _timed_out: bool, + ) { let main_metrics = METRICS.with(|metrics| (*metrics.borrow_mut()).dump_local_proxy_metrics()); - smol::spawn(async move { - let mut responses = Vec::new(); - let mut i = 0; - while let Some((proxy_response, worker_id)) = query_rx.next().await { - match proxy_response.status { - ResponseStatus::Ok => { - responses.push((worker_id, proxy_response)); - } - ResponseStatus::Processing => { - info!("metrics processing"); - continue; - } - ResponseStatus::Failure => { - responses.push((worker_id, proxy_response)); - } - }; - - i += 1; - if i == count { - break; + if self.options.list { + let mut summed_proxy_metrics = Vec::new(); + let mut summed_cluster_metrics = Vec::new(); + for (_, response) in self.gatherer.responses { + if let Some(ResponseContent { + content_type: + Some(ContentType::AvailableMetrics(AvailableMetrics { + proxy_metrics: listed_proxy_metrics, + cluster_metrics: listed_cluster_metrics, + })), + }) = response.content + { + summed_proxy_metrics.append(&mut listed_proxy_metrics.clone()); + summed_cluster_metrics.append(&mut listed_cluster_metrics.clone()); } } - - debug!("Received these worker responses: {:?}", responses); - - let mut worker_responses: BTreeMap = responses - .into_iter() - .filter_map(|(worker_id, proxy_response)| { - proxy_response - .content - .map(|response_content| (worker_id.to_string(), response_content)) + return client.finish_ok_with_content( + ContentType::AvailableMetrics(AvailableMetrics { + proxy_metrics: summed_proxy_metrics, + cluster_metrics: summed_cluster_metrics, }) - .collect(); - - let response_content = match &request.request_type { - &Some(RequestType::QueryClustersHashes(_)) - | &Some(RequestType::QueryClusterById(_)) - | &Some(RequestType::QueryClustersByDomain(_)) => { - if let Some(main_response) = main_response_content { - worker_responses.insert(String::from("main"), main_response); - } - ContentType::WorkerResponses(WorkerResponses { - map: worker_responses, - }) - .into() - } - &Some(RequestType::QueryCertificatesFromWorkers(_)) => { - info!( - "Received a response to the certificates query: {:?}", - worker_responses - ); - ContentType::WorkerResponses(WorkerResponses { - map: worker_responses, - }) - .into() - } - Some(RequestType::QueryMetrics(options)) => { - if options.list { - let mut summed_proxy_metrics = Vec::new(); - let mut summed_cluster_metrics = Vec::new(); - for (_, response) in worker_responses { - if let Some(ContentType::AvailableMetrics(AvailableMetrics { - proxy_metrics, - cluster_metrics, - })) = response.content_type - { - summed_proxy_metrics.append(&mut proxy_metrics.clone()); - summed_cluster_metrics.append(&mut cluster_metrics.clone()); - } - } - ContentType::AvailableMetrics(AvailableMetrics { - proxy_metrics: summed_proxy_metrics, - cluster_metrics: summed_cluster_metrics, - }) - .into() - } else { - let workers_metrics = worker_responses - .into_iter() - .filter_map(|(worker_id, worker_response)| match worker_response { - ResponseContent { - content_type: Some(ContentType::WorkerMetrics(worker_metrics)), - } => Some((worker_id, worker_metrics)), - _ => None, - }) - .collect(); - ContentType::Metrics(AggregatedMetrics { - main: main_metrics, - workers: workers_metrics, - }) - .into() - } - } - _ => return, // very very unlikely - }; + .into(), + "Successfully listed available metrics", + ); + } - return_success( - command_tx, - cloned_identifier, - Success::Query(response_content), + let workers_metrics = self + .gatherer + .responses + .into_iter() + .filter_map( + |(worker_id, worker_response)| match worker_response.content { + Some(ResponseContent { + content_type: Some(ContentType::WorkerMetrics(worker_metrics)), + }) => Some((worker_id.to_string(), worker_metrics)), + _ => None, + }, ) - .await; - }) - .detach(); + .collect(); - Ok(None) + client.finish_ok_with_content( + ContentType::Metrics(AggregatedMetrics { + main: main_metrics, + workers: workers_metrics, + }) + .into(), + "Successfully aggregated all metrics", + ); } +} - pub async fn set_logging_level( - &mut self, - logging_filter: String, - client_id: String, - ) -> anyhow::Result> { - debug!("Changing main process log level to {}", logging_filter); - logging::LOGGER.with(|l| { - let directives = logging::parse_logging_spec(&logging_filter); - l.borrow_mut().set_directives(directives); - }); - // also change / set the content of RUST_LOG so future workers / main thread - // will have the new logging filter value - ::std::env::set_var("RUST_LOG", &logging_filter); - debug!("Logging level now: {}", ::std::env::var("RUST_LOG")?); - - // notify the workers too - let _worker_success = self - .worker_requests( - client_id, - RequestType::Logging(logging_filter.clone()).into(), - ) - .await?; - Ok(Some(Success::Logging(logging_filter))) - } +// ========================================================= +// Load state - pub async fn worker_requests( - &mut self, - client_id: String, - request: Request, - ) -> anyhow::Result> { - if let &Some(RequestType::AddCertificate(_)) = &request.request_type { - debug!("workerconfig client request AddCertificate()"); - } else { - debug!("workerconfig client request {:?}", request); - } +#[derive(Debug)] +struct LoadStateTask { + /// this task may be called by the main process, without a client + pub client_token: Option, + pub gatherer: DefaultGatherer, + path: String, +} - self.state - .dispatch(&request) - .with_context(|| "Could not execute request on the state")?; - - if self.config.automatic_state_save & !request.is_a_stop() { - if let Some(path) = self.config.saved_state.clone() { - return_processing( - self.command_tx.clone(), - client_id.clone(), - "Saving state to file", - ) - .await; - - let mut file = File::create(&path) - .with_context(|| "Could not create file to automatically save the state")?; - - self.state - .write_requests_to_file(&mut file) - .with_context(|| format!("could not save state automatically to {path}"))?; - } +pub fn load_state(server: &mut Server, mut client: OptionalClient, path: &str) { + info!("loading state at path {}", path); + + let mut file = match File::open(path) { + Ok(file) => file, + Err(err) if matches!(err.kind(), ErrorKind::NotFound) => { + client.finish_failure(format!("Cannot find file at path {path}")); + return; } + Err(error) => { + client.finish_failure(format!("Cannot open file at path {path}: {error}")); + return; + } + }; - return_processing( - self.command_tx.clone(), - client_id.clone(), - "Sending the request to all workers".to_owned(), - ) - .await; - - let (worker_request_tx, mut worker_request_rx) = - futures::channel::mpsc::channel(self.workers.len() * 2); - let mut found = false; - let mut stopping_workers = HashSet::new(); - let mut worker_count = 0usize; - for worker in self.workers.iter_mut().filter(|worker| worker.is_active()) { - if request.is_a_stop() { - worker.run_state = RunState::Stopping; - stopping_workers.insert(worker.id); - } + client.return_processing(format!("Parsing state file from {path}...")); + + let task_id = server.new_task( + Box::new(LoadStateTask { + client_token: client.as_ref().map(|c| c.token), + gatherer: DefaultGatherer::default(), + path: path.to_owned(), + }), + Timeout::None, + ); - let req_id = format!("{}-worker-{}", client_id, worker.id); - worker.send(req_id.clone(), request.clone()).await; - self.in_flight - .insert(req_id, (worker_request_tx.clone(), 1)); + let mut buffer = Buffer::with_capacity(200000); + let mut scatter_request_counter = 0usize; - found = true; - worker_count += 1; + let status = loop { + let previous = buffer.available_data(); + + match file.read(buffer.space()) { + Ok(bytes_read) => buffer.fill(bytes_read), + Err(error) => break Err(format!("Error reading the saved state file: {error}")), + }; + + if buffer.available_data() == 0 { + trace!("load_state: empty buffer"); + break Ok(()); } - let should_stop_main = request.is_a_stop(); - - let mut command_tx = self.command_tx.clone(); - let thread_client_id = client_id.clone(); - - smol::spawn(async move { - let mut responses = Vec::new(); - let mut response_count = 0usize; - while let Some((proxy_response, worker_id)) = worker_request_rx.next().await { - match proxy_response.status { - ResponseStatus::Ok => { - responses.push((worker_id, proxy_response)); - - if stopping_workers.contains(&worker_id) { - if let Err(e) = command_tx - .send(CommandMessage::WorkerClose { worker_id }) - .await - { - error!( - "could not send worker close message to {}: {:?}", - worker_id, e - ); - } - } - } - ResponseStatus::Processing => { - info!("request is processing"); - continue; + let mut offset = 0usize; + match parse_several_requests::(buffer.data()) { + Ok((i, requests)) => { + if !i.is_empty() { + debug!("load_state: could not parse {} bytes", i.len()); + if previous == buffer.available_data() { + break Err("Error consuming load state message".into()); } - ResponseStatus::Failure => { - responses.push((worker_id, proxy_response)); - } - }; - - response_count += 1; - if response_count == worker_count { - break; } - } + offset = buffer.data().offset(i); - // send the request to kill the main process only after all workers responded - if should_stop_main { - if let Err(e) = command_tx.send(CommandMessage::MasterStop).await { - error!("could not send main stop message: {:?}", e); + for request in requests { + if server.state.dispatch(&request.content).is_ok() { + scatter_request_counter += 1; + server.scatter_on(request.content, task_id, scatter_request_counter, None); + } } } - - let mut messages = vec![]; - let mut has_error = false; - for response in responses.iter() { - match response.1.status { - ResponseStatus::Failure => { - messages.push(format!("{}: {}", response.0, response.1.message)); - has_error = true; - } - _ => messages.push(format!("{}: OK", response.0)), + Err(nom::Err::Incomplete(_)) => { + if buffer.available_data() == buffer.capacity() { + break Err(format!( + "message too big, stopping parsing:\n{}", + buffer.data().to_hex(16) + )); } } - - if has_error { - return_error(command_tx, thread_client_id, messages.join(", ")).await; - } else { - return_success(command_tx, thread_client_id, Success::WorkerRequest).await; + Err(parse_error) => { + break Err(format!("saved state parse error: {:?}", parse_error)); } - }) - .detach(); + } + buffer.consume(offset); + }; - if !found { - bail!("no worker found"); + match status { + Ok(()) => { + client.return_processing("Applying state file..."); + } + Err(message) => { + client.finish_failure(message); + server.cancel_task(task_id); } + } +} - match request.request_type { - Some(RequestType::AddBackend(_)) | Some(RequestType::RemoveBackend(_)) => { - self.backends_count = self.state.count_backends() - } - Some(RequestType::AddHttpFrontend(_)) - | Some(RequestType::AddHttpsFrontend(_)) - | Some(RequestType::AddTcpFrontend(_)) - | Some(RequestType::RemoveHttpFrontend(_)) - | Some(RequestType::RemoveHttpsFrontend(_)) - | Some(RequestType::RemoveTcpFrontend(_)) => { - self.frontends_count = self.state.count_frontends() - } - _ => {} - }; +impl GatheringTask for LoadStateTask { + fn client_token(&self) -> Option { + self.client_token + } - gauge!("configuration.clusters", self.state.clusters.len()); - gauge!("configuration.backends", self.backends_count); - gauge!("configuration.frontends", self.frontends_count); + fn get_gatherer(&mut self) -> &mut dyn Gatherer { + &mut self.gatherer + } - Ok(None) + fn on_finish( + self: Box, + _server: &mut Server, + client: &mut OptionalClient, + _timed_out: bool, + ) { + let DefaultGatherer { ok, errors, .. } = self.gatherer; + if errors == 0 { + client.finish_ok(format!( + "Successfully loaded state from path {}, {} ok messages, {} errors", + self.path, ok, errors + )); + return; + } + client.finish_failure(format!("loading state: {ok} ok messages, {errors} errors")); } +} - pub async fn notify_advancement_to_client( - &mut self, - client_id: String, - response: Advancement, - ) -> anyhow::Result { - let command_response = match response { - Advancement::Ok(success) => { - let success_message = success.to_string(); - - let command_response_data = match success { - Success::ListFrontends(crd) - | Success::RequestCounts(crd) - | Success::ListWorkers(crd) - | Success::CertificatesFromTheState(crd) - | Success::Query(crd) - | Success::ListListeners(crd) - | Success::Status(crd) => Some(crd), - _ => None, - }; +// ========================================================== +// status - Response::new(ResponseStatus::Ok, success_message, command_response_data) - } - Advancement::Processing(processing_message) => { - Response::new(ResponseStatus::Processing, processing_message, None) - } - Advancement::Error(error_message) => { - Response::new(ResponseStatus::Failure, error_message, None) - } - }; +#[derive(Debug)] +struct StatusTask { + pub client_token: Token, + pub gatherer: DefaultGatherer, + worker_infos: HashMap, +} - trace!( - "Sending response to request sent by client {}: {:?}", - client_id, - command_response - ); +fn status(server: &mut Server, client: &mut ClientSession) { + client.return_processing("Querying status of workers..."); + + let worker_infos = server + .workers + .values() + .map(|worker| (worker.id, worker.querying_info())) + .collect(); + + server.scatter( + RequestType::Status(Status {}).into(), + Box::new(StatusTask { + client_token: client.token, + gatherer: DefaultGatherer::default(), + worker_infos, + }), + Timeout::Default, + None, + ); +} - match self.clients.get_mut(&client_id) { - Some(client_tx) => { - trace!("sending from main process to client loop"); - client_tx.send(command_response).await.with_context(|| { - format!("Could not notify client {client_id} about request") - })?; - } - None => bail!(format!("Could not find client {client_id}")), +impl GatheringTask for StatusTask { + fn client_token(&self) -> Option { + Some(self.client_token) + } + + fn get_gatherer(&mut self) -> &mut dyn Gatherer { + &mut self.gatherer + } + + fn on_finish( + mut self: Box, + _server: &mut Server, + client: &mut OptionalClient, + _timed_out: bool, + ) { + for (worker_id, response) in self.gatherer.responses { + let new_run_state = match response.status { + ResponseStatus::Ok => RunState::Running, + ResponseStatus::Processing => continue, + ResponseStatus::Failure => RunState::NotAnswering, + }; + + self.worker_infos + .entry(worker_id) + .and_modify(|worker_info| worker_info.run_state = new_run_state as i32); } - Ok(Success::NotifiedClient(client_id)) + let worker_info_vec = WorkerInfos { + vec: self.worker_infos.into_values().collect(), + }; + + client.finish_ok_with_content( + ContentType::Workers(worker_info_vec).into(), + "Successfully collected the status of workers", + ); } } -// Those return functions are meant to be called in detached threads -// to notify the command server of an request's advancement. -async fn return_error( - mut command_tx: Sender, - client_id: String, - error_message: T, -) where - T: ToString, -{ - let advancement = CommandMessage::Advancement { - client_id, - advancement: Advancement::Error(error_message.to_string()), - }; +// ========================================================== +// Soft stop and hard stop - trace!("return_error: sending event to the command server"); - if let Err(e) = command_tx.send(advancement).await { - error!("Error while return error to the command server: {}", e) +#[derive(Debug)] +struct StopTask { + pub client_token: Token, + pub gatherer: DefaultGatherer, + pub hardness: bool, +} + +/// stop the main process and workers, true for hard stop +fn stop(server: &mut Server, client: &mut ClientSession, hardness: bool) { + let task = Box::new(StopTask { + client_token: client.token, + gatherer: DefaultGatherer::default(), + hardness, + }); + + server.run_state = ServerState::WorkersStopping; + if hardness { + client.return_processing("Performing hard stop..."); + server.scatter( + RequestType::HardStop(HardStop {}).into(), + task, + Timeout::Default, + None, + ); + } else { + client.return_processing("Performing soft stop..."); + server.scatter( + RequestType::SoftStop(SoftStop {}).into(), + task, + Timeout::None, + None, + ); } } -async fn return_processing( - mut command_tx: Sender, - client_id: String, - processing_message: T, -) where - T: ToString, -{ - let advancement = CommandMessage::Advancement { - client_id, - advancement: Advancement::Processing(processing_message.to_string()), - }; +impl GatheringTask for StopTask { + fn client_token(&self) -> Option { + Some(self.client_token) + } - trace!("return_processing: sending event to the command server"); - if let Err(e) = command_tx.send(advancement).await { - error!( - "Error while returning processing to the command server: {}", - e - ) + fn get_gatherer(&mut self) -> &mut dyn Gatherer { + &mut self.gatherer } -} -async fn return_success( - mut command_tx: Sender, - client_id: String, - success: Success, -) { - let advancement = CommandMessage::Advancement { - client_id, - advancement: Advancement::Ok(success), - }; - trace!( - "return_success: sending event to the command server: {:?}", - advancement - ); - if let Err(e) = command_tx.send(advancement).await { - error!("Error while returning success to the command server: {}", e) + fn on_finish( + self: Box, + server: &mut Server, + client: &mut OptionalClient, + timed_out: bool, + ) { + if timed_out && self.hardness { + client.finish_failure(format!( + "Workers take too long to stop ({} ok, {} errors), stopping the main process to sever the link", + self.gatherer.ok, self.gatherer.errors + )); + } + server.run_state = ServerState::Stopping; + client.finish_ok(format!( + "Successfully closed {} workers, {} errors, stopping the main process...", + self.gatherer.ok, self.gatherer.errors + )); } } diff --git a/bin/src/command/server.rs b/bin/src/command/server.rs new file mode 100644 index 000000000..77e598543 --- /dev/null +++ b/bin/src/command/server.rs @@ -0,0 +1,860 @@ +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, + io::Error as IoError, + ops::{Deref, DerefMut}, + os::fd::{AsRawFd, FromRawFd}, + time::{Duration, Instant}, +}; + +use libc::pid_t; +use mio::{ + net::{UnixListener, UnixStream}, + Events, Interest, Poll, Token, +}; +use nix::{ + sys::signal::{kill, Signal}, + unistd::Pid, +}; + +use sozu_command_lib::{ + channel::Channel, + config::Config, + proto::command::{ + request::RequestType, response_content::ContentType, Request, ResponseContent, + ResponseStatus, RunState, Status, + }, + ready::Ready, + request::WorkerRequest, + response::WorkerResponse, + scm_socket::{Listeners, ScmSocket, ScmSocketError}, + state::ConfigState, +}; + +use crate::{ + command::{ + sessions::{ + wants_to_tick, ClientResult, ClientSession, OptionalClient, WorkerResult, WorkerSession, + }, + upgrade::UpgradeData, + }, + util::{disable_close_on_exec, enable_close_on_exec, get_executable_path, UtilError}, + worker::{fork_main_into_worker, WorkerError}, +}; + +use super::upgrade::SerializedWorkerSession; + +pub type ClientId = u32; +pub type SessionId = usize; +pub type TaskId = usize; +pub type WorkerId = u32; +pub type RequestId = String; + +/// Gather messages and notifies when there are no more left to read. +#[allow(unused)] +pub trait Gatherer { + /// increment how many responses we expect + fn inc_expected_responses(&mut self, count: usize); + + /// Return true if enough responses has been gathered + fn has_finished(&self) -> bool; + + /// Aggregate a response + fn on_message( + &mut self, + server: &mut Server, + client: &mut OptionalClient, + worker_id: WorkerId, + message: WorkerResponse, + ); +} + +/// Must be satisfied by commands that need to wait for worker responses +#[allow(unused)] +pub trait GatheringTask: Debug { + /// get access to the client that sent the command (if any) + fn client_token(&self) -> Option; + + /// get access to the gatherer for this task (each task can implement its own gathering strategy) + fn get_gatherer(&mut self) -> &mut dyn Gatherer; + + /// This is called once every worker has answered + /// It allows to operate both on the server (launch workers...) and the client (send an answer...) + fn on_finish( + self: Box, + server: &mut Server, + client: &mut OptionalClient, + timed_out: bool, + ); +} + +/// Implemented by all objects that can behave like a client (for instance: notify of processing request) +pub trait MessageClient { + /// return an OK to the client + fn finish_ok>(&mut self, message: T); + + /// return response content to the client + fn finish_ok_with_content>(&mut self, content: ResponseContent, message: T); + + /// return failure to the client + fn finish_failure>(&mut self, message: T); + + /// notify the client about an ongoing task + fn return_processing>(&mut self, message: T); + + /// transmit response content to the client, even though a task is not finished + fn return_processing_with_content>( + &mut self, + message: S, + content: ResponseContent, + ); +} + +/// A timeout for the tasks of the main process server +pub enum Timeout { + None, + Default, + #[allow(unused)] + Custom(Duration), +} + +/// Contains a task and its execution timeout +#[derive(Debug)] +struct TaskContainer { + job: Box, + timeout: Option, +} + +/// Default strategy when gathering responses from workers +#[derive(Debug, Default)] +pub struct DefaultGatherer { + /// number of OK responses received from workers + pub ok: usize, + /// number of failures received from workers + pub errors: usize, + /// worker responses are accumulated here + pub responses: Vec<(WorkerId, WorkerResponse)>, + /// number of expected responses, excluding processing responses + pub expected_responses: usize, +} + +#[allow(unused)] +impl Gatherer for DefaultGatherer { + fn inc_expected_responses(&mut self, count: usize) { + self.expected_responses += count; + } + + fn has_finished(&self) -> bool { + self.ok + self.errors >= self.expected_responses + } + + fn on_message( + &mut self, + server: &mut Server, + client: &mut OptionalClient, + worker_id: WorkerId, + message: WorkerResponse, + ) { + match message.status { + ResponseStatus::Ok => self.ok += 1, + ResponseStatus::Failure => self.errors += 1, + ResponseStatus::Processing => client.return_processing(format!( + "Worker {} is processing {}. {}", + worker_id, message.id, message.message + )), + } + self.responses.push((worker_id, message)); + } +} + +#[derive(thiserror::Error, Debug)] +pub enum HubError { + #[error("could not create main server: {0}")] + CreateServer(ServerError), + #[error("could not get executable path")] + GetExecutablePath(UtilError), + #[error("could not create SCM socket for worker {0}: {1}")] + CreateScmSocket(u32, ScmSocketError), +} + +/// A platform to receive client connections, pass orders to workers, +/// gather data, etc. +#[derive(Debug)] +pub struct CommandHub { + /// contains workers and the event loop + pub server: Server, + /// keeps track of agents that contacted Sōzu on the UNIX socket + clients: HashMap, + /// register tasks, for parallel execution + tasks: HashMap, +} + +impl Deref for CommandHub { + type Target = Server; + + fn deref(&self) -> &Self::Target { + &self.server + } +} +impl DerefMut for CommandHub { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.server + } +} + +impl CommandHub { + pub fn new( + unix_listener: UnixListener, + config: Config, + executable_path: String, + ) -> Result { + Ok(Self { + server: Server::new(unix_listener, config, executable_path) + .map_err(HubError::CreateServer)?, + clients: HashMap::new(), + tasks: HashMap::new(), + }) + } + + fn register_client(&mut self, mut stream: UnixStream) { + let token = self.next_session_token(); + if let Err(err) = self.register(token, &mut stream) { + error!("Could not register client: {}", err); + } + let channel = Channel::new(stream, 4096, usize::MAX); + let id = self.next_client_id(); + let session = ClientSession::new(channel, id, token); + info!("register new client: {}", id); + debug!("new client: {:?}", session); + self.clients.insert(token, session); + } + + fn get_client_mut(&mut self, token: &Token) -> Option<(&mut Server, &mut ClientSession)> { + self.clients + .get_mut(token) + .map(|client| (&mut self.server, client)) + } + + /// recreate the command hub when upgrading the main process + pub fn from_upgrade_data(upgrade_data: UpgradeData) -> Result { + let UpgradeData { + command_socket_fd, + config, + workers, + state, + next_client_id, + next_session_id, + next_task_id, + next_worker_id, + } = upgrade_data; + + let executable_path = + unsafe { get_executable_path().map_err(HubError::GetExecutablePath)? }; + + let unix_listener = unsafe { UnixListener::from_raw_fd(command_socket_fd) }; + + let command_buffer_size = config.command_buffer_size; + let max_command_buffer_size = config.max_command_buffer_size; + + let mut server = + Server::new(unix_listener, config, executable_path).map_err(HubError::CreateServer)?; + + server.state = state; + server.update_counts(); + server.next_client_id = next_client_id; + server.next_session_id = next_session_id; + server.next_task_id = next_task_id; + server.next_worker_id = next_worker_id; + + for worker in workers + .iter() + .filter(|w| w.run_state != RunState::Stopped && w.run_state != RunState::Stopping) + { + let worker_stream = unsafe { UnixStream::from_raw_fd(worker.channel_fd) }; + let channel: Channel = + Channel::new(worker_stream, command_buffer_size, max_command_buffer_size); + + let scm_socket = ScmSocket::new(worker.scm_fd) + .map_err(|scm_err| HubError::CreateScmSocket(worker.id, scm_err))?; + + if let Err(err) = server.register_worker(worker.id, worker.pid, channel, scm_socket) { + error!("could not register worker: {}", err); + } + } + + Ok(CommandHub { + server, + clients: HashMap::new(), + tasks: HashMap::new(), + }) + } + + /// contains the main event loop + /// - accept clients + /// - receive requests from clients and responses from workers + /// - dispatch these message to the [Server] + /// - manage timeouts of tasks + pub fn run(&mut self) { + let mut events = Events::with_capacity(100); + debug!("running the command hub: {:?}", self); + + loop { + let run_state = self.run_state; + let now = Instant::now(); + + let mut tasks = std::mem::take(&mut self.tasks); + let mut queued_tasks = std::mem::take(&mut self.server.queued_tasks); + self.tasks = tasks + .drain() + .chain(queued_tasks.drain()) + .filter_map(|(task_id, mut task)| { + if task.job.get_gatherer().has_finished() { + self.handle_finishing_task(task_id, task, false); + return None; + } + if let Some(timeout) = task.timeout { + if timeout < now { + self.handle_finishing_task(task_id, task, true); + return None; + } + } + Some((task_id, task)) + }) + .collect(); + + let next_timeout = self.tasks.values().filter_map(|t| t.timeout).max(); + let mut poll_timeout = next_timeout.map(|t| t.saturating_duration_since(now)); + + if self.run_state == ServerState::Stopping { + // when closing, close all ClientSession which are not transfering data + self.clients + .retain(|_, s| s.channel.back_buf.available_data() > 0); + // when all ClientSession are closed, the CommandServer stops + if self.clients.is_empty() { + break; + } + } + + let sessions_to_tick = self + .clients + .iter() + .filter_map(|(t, s)| { + if wants_to_tick(&s.channel) { + Some((*t, Ready::EMPTY, None)) + } else { + None + } + }) + .chain(self.workers.iter().filter_map(|(token, session)| { + if session.run_state != RunState::Stopped && wants_to_tick(&session.channel) { + Some((*token, Ready::EMPTY, None)) + } else { + None + } + })) + .collect::>(); + + let workers_to_spawn = self.workers_to_spawn(); + + // if we have sessions to tick or workers to spawn, we don't want to block on poll + if !sessions_to_tick.is_empty() || workers_to_spawn > 0 { + poll_timeout = Some(Duration::default()); + } + + events.clear(); + trace!("Tasks: {:?}", self.tasks); + trace!("Sessions to tick: {:?}", sessions_to_tick); + trace!("Polling timeout: {:?}", poll_timeout); + match self.poll.poll(&mut events, poll_timeout) { + Ok(()) => {} + Err(error) => error!("Error while polling: {:?}", error), + } + + self.automatic_worker_spawn(workers_to_spawn); + + let events = sessions_to_tick.into_iter().chain( + events + .into_iter() + .map(|event| (event.token(), Ready::from(event), Some(event))), + ); + for (token, ready, event) in events { + match token { + Token(0) => { + if run_state == ServerState::Stopping { + // do not accept new clients when stopping + continue; + } + if ready.is_readable() { + while let Ok((stream, _addr)) = self.unix_listener.accept() { + self.register_client(stream); + } + } + } + token => { + trace!("{:?} got event: {:?}", token, event); + if let Some((server, client)) = self.get_client_mut(&token) { + client.update_readiness(ready); + match client.ready() { + ClientResult::NothingToDo => {} + ClientResult::NewRequest(request) => { + debug!("Received new request: {:?}", request); + server.handle_client_request(client, request); + } + ClientResult::CloseSession => { + info!("Closing client {:#?}", client); + self.event_subscribers.remove(&token); + self.clients.remove(&token); + } + } + } else if let Some(worker) = self.workers.get_mut(&token) { + if run_state == ServerState::Stopping { + // do not read responses from workers when stopping + continue; + } + worker.update_readiness(ready); + let worker_id = worker.id; + match worker.ready() { + WorkerResult::NothingToDo => {} + WorkerResult::NewResponses(responses) => { + for response in responses { + self.handle_worker_response(worker_id, response); + } + } + WorkerResult::CloseSession => self.handle_worker_close(&token), + } + } + } + } + } + } + } + + fn handle_worker_response(&mut self, worker_id: WorkerId, response: WorkerResponse) { + // transmit backend events to subscribing clients + if let Some(ResponseContent { + content_type: Some(ContentType::Event(event)), + }) = response.content + { + for client_token in &self.server.event_subscribers { + if let Some(client) = self.clients.get_mut(client_token) { + client.return_processing_with_content( + format!("{worker_id}"), + ContentType::Event(event.clone()).into(), + ); + } + } + return; + } + + let Some(task_id) = self.in_flight.get(&response.id).copied() else { + error!("Got a response for an unknown task: {}", response); + return; + }; + + let task = match self.tasks.get_mut(&task_id) { + Some(task) => task, + None => { + error!("Got a response for an unknown task"); + return; + } + }; + + let client = &mut task + .job + .client_token() + .and_then(|token| self.clients.get_mut(&token)); + task.job + .get_gatherer() + .on_message(&mut self.server, client, worker_id, response); + } + + fn handle_finishing_task(&mut self, task_id: TaskId, task: TaskContainer, timed_out: bool) { + if timed_out { + debug!("Task timeout: {:?}", task); + } else { + debug!("Task finish: {:?}", task); + } + let client = &mut task + .job + .client_token() + .and_then(|token| self.clients.get_mut(&token)); + task.job.on_finish(&mut self.server, client, false); + self.in_flight + .retain(|_, in_flight_task_id| *in_flight_task_id != task_id); + } +} + +#[derive(thiserror::Error, Debug)] +pub enum ServerError { + #[error("Could not create Poll with MIO: {0:?}")] + CreatePoll(IoError), + #[error("Could not register channel in MIO registry: {0:?}")] + RegisterChannel(IoError), + #[error("Could not fork the main into a new worker: {0}")] + ForkMain(WorkerError), + #[error("Did not find worker. This should NOT happen.")] + WorkerNotFound, + #[error("could not enable cloexec: {0}")] + EnableCloexec(UtilError), + #[error("could not disable cloexec: {0}")] + DisableCloexec(UtilError), +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum ServerState { + Running, + WorkersStopping, + Stopping, +} + +/// Manages workers +/// Functions as an executer for tasks that have two steps: +/// - scatter to workers +/// - gather worker responses +/// - trigger a finishing function when all responses are gathered +#[derive(Debug)] +pub struct Server { + pub config: Config, + /// Sōzu clients that subscribed to events + pub event_subscribers: HashSet, + /// path to the executable binary of Sōzu (for upgrading) + pub executable_path: String, + /// keep track of the tasks + in_flight: HashMap, + next_client_id: ClientId, + next_session_id: SessionId, + next_task_id: TaskId, + next_worker_id: WorkerId, + /// the MIO structure that registers sockets and polls them all + poll: Poll, + /// all tasks created in one tick, to be propagated to the Hub at each tick + queued_tasks: HashMap, + /// contains all business logic of Sōzu (frontends, backends, routing, etc.) + pub state: ConfigState, + /// used to shut down gracefully + pub run_state: ServerState, + /// the UNIX socket on which to receive clients + unix_listener: UnixListener, + /// the Sōzu processes running parallel to the main process. + /// The workers perform the whole business of proxying and must be + /// synchronized at all times. + pub workers: HashMap, +} + +impl Server { + fn new( + mut unix_listener: UnixListener, + config: Config, + executable_path: String, + ) -> Result { + let poll = mio::Poll::new().map_err(ServerError::CreatePoll)?; + poll.registry() + .register( + &mut unix_listener, + Token(0), + Interest::READABLE | Interest::WRITABLE, + ) + .map_err(ServerError::RegisterChannel)?; + + Ok(Self { + config, + event_subscribers: HashSet::new(), + executable_path, + in_flight: HashMap::new(), + next_client_id: 0, + next_session_id: 1, // 0 is reserved for the UnixListener + next_task_id: 0, + next_worker_id: 0, + poll, + queued_tasks: HashMap::new(), + state: ConfigState::new(), + run_state: ServerState::Running, + unix_listener, + workers: HashMap::new(), + }) + } + + /// - fork the main process into a new worker + /// - register the worker in mio + /// - send a Status request to the new worker + pub fn launch_new_worker( + &mut self, + listeners: Option, + ) -> Result<&mut WorkerSession, ServerError> { + let worker_id = self.next_worker_id(); + let (worker_pid, main_to_worker_channel, main_to_worker_scm) = fork_main_into_worker( + &worker_id.to_string(), + &self.config, + self.executable_path.clone(), + &self.state, + Some(listeners.unwrap_or_default()), + ) + .map_err(ServerError::ForkMain)?; + + let worker_session = self.register_worker( + worker_id, + worker_pid, + main_to_worker_channel, + main_to_worker_scm, + )?; + + // TODO: make sure the worker is registered as NotAnswering, + // and create a task that will pass it to Running when it respond OK to this request: + worker_session.send(&WorkerRequest { + id: format!("INITIAL-STATUS-{worker_id}"), + content: RequestType::Status(Status {}).into(), + }); + + Ok(worker_session) + } + + /// count backends and frontends in the cache, update gauge metrics + pub fn update_counts(&mut self) { + gauge!("configuration.clusters", self.state.clusters.len()); + gauge!("configuration.backends", self.state.count_backends()); + gauge!("configuration.frontends", self.state.count_frontends()); + } + + fn next_session_token(&mut self) -> Token { + let token = Token(self.next_session_id); + self.next_session_id += 1; + token + } + fn next_client_id(&mut self) -> ClientId { + let id = self.next_client_id; + self.next_client_id += 1; + id + } + + fn next_task_id(&mut self) -> TaskId { + let id = self.next_task_id; + self.next_task_id += 1; + id + } + + fn next_worker_id(&mut self) -> WorkerId { + let id = self.next_worker_id; + self.next_worker_id += 1; + id + } + + fn register(&mut self, token: Token, stream: &mut UnixStream) -> Result<(), ServerError> { + self.poll + .registry() + .register(stream, token, Interest::READABLE | Interest::WRITABLE) + .map_err(ServerError::RegisterChannel) + } + + /// returns None if the worker is not alive + pub fn get_active_worker_by_id(&self, id: WorkerId) -> Option<&WorkerSession> { + self.workers + .values() + .find(|worker| worker.id == id && worker.is_active()) + } + + /// register a worker session in the server, return the mutable worker session + pub fn register_worker( + &mut self, + worker_id: WorkerId, + pid: pid_t, + mut channel: Channel, + scm_socket: ScmSocket, + ) -> Result<&mut WorkerSession, ServerError> { + let token = self.next_session_token(); + self.register(token, &mut channel.sock)?; + self.workers.insert( + token, + WorkerSession::new(channel, worker_id, pid, token, scm_socket), + ); + self.workers + .get_mut(&token) + .ok_or(ServerError::WorkerNotFound) + } + + /// Add a task in a queue to make it accessible until the next tick + pub fn new_task(&mut self, job: Box, timeout: Timeout) -> TaskId { + let task_id = self.next_task_id(); + let timeout = match timeout { + Timeout::None => None, + Timeout::Default => Some(Duration::from_secs(self.config.worker_timeout as u64)), + Timeout::Custom(duration) => Some(duration), + } + .map(|duration| Instant::now() + duration); + self.queued_tasks + .insert(task_id, TaskContainer { job, timeout }); + task_id + } + + pub fn scatter( + &mut self, + request: Request, + job: Box, + timeout: Timeout, + target: Option, // if None, scatter to all workers + ) { + let task_id = self.new_task(job, timeout); + + self.scatter_on(request, task_id, 0, target); + } + + pub fn scatter_on( + &mut self, + request: Request, + task_id: TaskId, + request_id: usize, + target: Option, + ) { + let task = match self.queued_tasks.get_mut(&task_id) { + Some(task) => task, + None => { + error!("no task found with id {}", task_id); + return; + } + }; + + let mut worker_count = 0; + let mut worker_request = WorkerRequest { + id: String::new(), + content: request, + }; + + for worker in self.workers.values_mut().filter(|w| { + target + .map(|id| id == w.id && w.run_state != RunState::Stopped) + .unwrap_or(w.run_state != RunState::Stopped) + }) { + worker_count += 1; + worker_request.id = format!( + "{}-{}-{}-{}", + worker_request.content.short_name(), + worker.id, + task_id, + request_id, + ); + debug!("scattering to worker {}: {:?}", worker.id, worker_request); + worker.send(&worker_request); + self.in_flight.insert(worker_request.id, task_id); + } + task.job.get_gatherer().inc_expected_responses(worker_count); + } + + pub fn cancel_task(&mut self, task_id: TaskId) { + self.queued_tasks.remove(&task_id); + } + + /// Called when the main cannot communicate anymore with a worker (it's channel closed) + /// Calls Self::close_worker which makes sure the worker is killed to prevent it from + /// going rogue if it wasn't the case + pub fn handle_worker_close(&mut self, token: &Token) { + match self.workers.get(token) { + Some(worker) => { + info!("closing session of worker {}", worker.id); + trace!("closing worker session {:?}", worker); + } + None => { + error!("No worker exists with token {:?}", token); + return; + } + }; + + self.close_worker(token); + } + + /// returns how many workers should be started to reach config count + pub fn workers_to_spawn(&self) -> u16 { + if self.config.worker_automatic_restart && self.run_state == ServerState::Running { + self.config + .worker_count + .saturating_sub(self.alive_workers() as u16) + } else { + 0 + } + } + + /// spawn brand new workers + pub fn automatic_worker_spawn(&mut self, count: u16) { + if count == 0 { + return; + } + + info!("Automatically restarting {} workers", count); + for _ in 0..count { + if let Err(err) = self.launch_new_worker(None) { + error!("could not launch new worker: {}", err); + } + } + } + + fn alive_workers(&self) -> usize { + self.workers + .values() + .filter(|worker| worker.is_active()) + .count() + } + + /// kill the worker process + pub fn close_worker(&mut self, token: &Token) { + let worker = match self.workers.get_mut(token) { + Some(w) => w, + None => { + error!("No worker exists with token {:?}", token); + return; + } + }; + + match kill(Pid::from_raw(worker.pid), Signal::SIGKILL) { + Ok(()) => info!("Worker {} was successfully killed", worker.id), + Err(_) => info!("worker {} was already dead", worker.id), + } + worker.run_state = RunState::Stopped; + } + + /// Make the file descriptors of the channel survive the upgrade + pub fn disable_cloexec_before_upgrade(&mut self) -> Result { + trace!( + "disabling cloexec on listener with file descriptor: {}", + self.unix_listener.as_raw_fd() + ); + + disable_close_on_exec(self.unix_listener.as_raw_fd()).map_err(ServerError::DisableCloexec) + } + + /// This enables workers to be notified in case the main process dies + pub fn enable_cloexec_after_upgrade(&mut self) -> Result { + for worker in self.workers.values_mut() { + if worker.run_state == RunState::Running { + let _ = enable_close_on_exec(worker.channel.fd()).map_err(|e| { + error!( + "could not enable close on exec for worker {}: {}", + worker.id, e + ); + }); + } + } + enable_close_on_exec(self.unix_listener.as_raw_fd()).map_err(ServerError::EnableCloexec) + } + + /// summarize the server into what is needed to recreate it, when upgrading + pub fn generate_upgrade_data(&self) -> UpgradeData { + UpgradeData { + command_socket_fd: self.unix_listener.as_raw_fd(), + config: self.config.clone(), + workers: self + .workers + .values() + .filter_map(|session| match SerializedWorkerSession::try_from(session) { + Ok(serialized_session) => Some(serialized_session), + Err(err) => { + error!("failed to serialize worker session: {}", err); + None + } + }) + .collect(), + state: self.state.clone(), + next_client_id: self.next_client_id, + next_session_id: self.next_session_id, + next_task_id: self.next_task_id, + next_worker_id: self.next_worker_id, + } + } +} diff --git a/bin/src/command/sessions.rs b/bin/src/command/sessions.rs new file mode 100644 index 000000000..aa1bd305f --- /dev/null +++ b/bin/src/command/sessions.rs @@ -0,0 +1,292 @@ +use std::fmt::Debug; + +use libc::pid_t; +use mio::Token; +use serde::{de::DeserializeOwned, Serialize}; + +use sozu_command_lib::{ + channel::Channel, + proto::command::{Request, Response, ResponseContent, ResponseStatus, RunState, WorkerInfo}, + ready::Ready, + request::WorkerRequest, + response::WorkerResponse, + scm_socket::ScmSocket, +}; + +use crate::command::server::{ClientId, MessageClient, WorkerId}; + +/// Track a client from start to finish +#[derive(Debug)] +pub struct ClientSession { + pub channel: Channel, + pub id: ClientId, + pub token: Token, +} + +/// The return type of the ready method +#[derive(Debug)] +pub enum ClientResult { + NothingToDo, + NewRequest(Request), + CloseSession, +} + +impl ClientSession { + pub fn new(mut channel: Channel, id: ClientId, token: Token) -> Self { + channel.interest = Ready::READABLE | Ready::ERROR | Ready::HUP; + Self { channel, id, token } + } + + /// queue a response for the client (the event loop does the send) + fn send(&mut self, response: Response) { + if let Err(e) = self.channel.write_message(&response) { + error!("error writing on channel: {}", e); + self.channel.readiness = Ready::ERROR; + return; + } + self.channel.interest.insert(Ready::WRITABLE); + } + + pub fn update_readiness(&mut self, events: Ready) { + self.channel.handle_events(events); + } + + /// drive the channel read and write + pub fn ready(&mut self) -> ClientResult { + if self.channel.readiness.is_error() || self.channel.readiness.is_hup() { + return ClientResult::CloseSession; + } + + let status = self.channel.writable(); + trace!("client writable: {:?}", status); + let mut requests = extract_messages(&mut self.channel); + match requests.pop() { + Some(request) => { + if !requests.is_empty() { + error!("more than one request at a time"); + } + ClientResult::NewRequest(request) + } + None => ClientResult::NothingToDo, + } + } +} + +impl MessageClient for ClientSession { + fn finish_ok>(&mut self, message: T) { + let message = message.into(); + info!("{}", message); + self.send(Response { + status: ResponseStatus::Ok.into(), + message, + content: None, + }) + } + + fn finish_ok_with_content>(&mut self, content: ResponseContent, message: T) { + let message = message.into(); + info!("{}", message); + self.send(Response { + status: ResponseStatus::Ok.into(), + message, + content: Some(content), + }) + } + + fn finish_failure>(&mut self, message: T) { + let message = message.into(); + error!("{}", message); + self.send(Response { + status: ResponseStatus::Failure.into(), + message, + content: None, + }) + } + + fn return_processing>(&mut self, message: S) { + let message = message.into(); + info!("{}", message); + self.send(Response { + status: ResponseStatus::Processing.into(), + message, + content: None, + }); + } + + fn return_processing_with_content>( + &mut self, + message: S, + content: ResponseContent, + ) { + let message = message.into(); + info!("{}", message); + self.send(Response { + status: ResponseStatus::Processing.into(), + message, + content: Some(content), + }); + } +} + +pub type OptionalClient<'a> = Option<&'a mut ClientSession>; + +impl MessageClient for OptionalClient<'_> { + fn finish_ok>(&mut self, message: T) { + match self { + None => info!("{}", message.into()), + Some(client) => client.finish_ok(message), + } + } + + fn finish_ok_with_content>(&mut self, content: ResponseContent, message: T) { + match self { + None => info!("{}", message.into()), + Some(client) => client.finish_ok_with_content(content, message), + } + } + + fn finish_failure>(&mut self, message: T) { + match self { + None => error!("{}", message.into()), + Some(client) => client.finish_failure(message), + } + } + + fn return_processing>(&mut self, message: T) { + match self { + None => info!("{}", message.into()), + Some(client) => client.return_processing(message), + } + } + + fn return_processing_with_content>( + &mut self, + message: S, + content: ResponseContent, + ) { + match self { + None => info!("{}", message.into()), + Some(client) => client.return_processing_with_content(message, content), + } + } +} + +/// Follow a worker throughout its lifetime (launching, communitation, softstop/hardstop) +#[derive(Debug)] +pub struct WorkerSession { + pub channel: Channel, + pub id: WorkerId, + pub pid: pid_t, + pub run_state: RunState, + /// meant to send listeners to the worker upon start + pub scm_socket: ScmSocket, + pub token: Token, +} + +/// The return type of the ready method +#[derive(Debug)] +pub enum WorkerResult { + NothingToDo, + NewResponses(Vec), + CloseSession, +} + +impl WorkerSession { + pub fn new( + mut channel: Channel, + id: WorkerId, + pid: pid_t, + token: Token, + scm_socket: ScmSocket, + ) -> Self { + channel.interest = Ready::READABLE | Ready::ERROR | Ready::HUP; + Self { + channel, + id, + pid, + run_state: RunState::Running, + scm_socket, + token, + } + } + + /// queue a request for the worker (the event loop does the send) + pub fn send(&mut self, request: &WorkerRequest) { + trace!("Sending to worker: {:?}", request); + if let Err(e) = self.channel.write_message(request) { + error!("Could not send request to worker: {}", e); + self.channel.readiness = Ready::ERROR; + return; + } + self.channel.interest.insert(Ready::WRITABLE); + } + + pub fn update_readiness(&mut self, events: Ready) { + self.channel.handle_events(events); + } + + /// drive the channel read and write + pub fn ready(&mut self) -> WorkerResult { + let status = self.channel.writable(); + trace!("Worker writable: {:?}", status); + let responses = extract_messages(&mut self.channel); + if !responses.is_empty() { + return WorkerResult::NewResponses(responses); + } + + if self.channel.readiness.is_error() || self.channel.readiness.is_hup() { + debug!("worker {} is unresponsive, closing the session", self.id); + return WorkerResult::CloseSession; + } + + WorkerResult::NothingToDo + } + + /// get the run state of the worker (defaults to NotAnswering) + pub fn querying_info(&self) -> WorkerInfo { + let run_state = match self.run_state { + RunState::Stopping => RunState::Stopping, + RunState::Stopped => RunState::Stopped, + RunState::Running | RunState::NotAnswering => RunState::NotAnswering, + }; + WorkerInfo { + id: self.id, + pid: self.pid, + run_state: run_state as i32, + } + } + + pub fn is_active(&self) -> bool { + self.run_state != RunState::Stopping && self.run_state != RunState::Stopped + } +} + +/// read and parse messages (Requests or Responses) from the channel +pub fn extract_messages(channel: &mut Channel) -> Vec +where + Tx: Debug + Serialize, + Rx: Debug + DeserializeOwned, +{ + let mut messages = Vec::new(); + loop { + let status = channel.readable(); + trace!("Channel readable: {:?}", status); + let old_capacity = channel.front_buf.capacity(); + let message = channel.read_message(); + match message { + Ok(message) => messages.push(message), + Err(_) => { + if old_capacity == channel.front_buf.capacity() { + return messages; + } + } + } + } +} + +/// used by the event loop to know wether to call ready on a session, +/// given the state of its channel +pub fn wants_to_tick(channel: &Channel) -> bool { + (channel.readiness.is_writable() && channel.back_buf.available_data() > 0) + || (channel.readiness.is_hup() || channel.readiness.is_error()) +} diff --git a/bin/src/command/upgrade.rs b/bin/src/command/upgrade.rs new file mode 100644 index 000000000..d3ca66cd7 --- /dev/null +++ b/bin/src/command/upgrade.rs @@ -0,0 +1,350 @@ +use std::os::fd::AsRawFd; + +use libc::pid_t; +use mio::Token; +use serde::{Deserialize, Serialize}; + +use sozu_command_lib::{ + config::Config, + proto::command::{ + request::RequestType, ResponseStatus, ReturnListenSockets, RunState, SoftStop, + }, + response::WorkerResponse, + state::ConfigState, +}; + +use crate::{ + command::{ + server::{ + ClientId, Gatherer, GatheringTask, MessageClient, Server, ServerState, SessionId, + TaskId, Timeout, WorkerId, + }, + sessions::{ClientSession, OptionalClient}, + }, + upgrade::{fork_main_into_new_main, UpgradeError}, + util::disable_close_on_exec, +}; + +use super::sessions::WorkerSession; + +#[derive(Debug)] +enum UpgradeWorkerProgress { + /// 1. request listeners from the old worker + /// 2. store listeners to pass them to new worker, + RequestingListenSockets { + old_worker_token: Token, + old_worker_id: WorkerId, + }, + /// 3. soft stop the old worker + /// 4. activate the listeners of the new worker + StopOldActivateNew { + old_worker_id: WorkerId, + new_worker_id: WorkerId, + }, +} + +#[derive(Debug)] +struct UpgradeWorkerTask { + pub client_token: Token, + progress: UpgradeWorkerProgress, + + ok: usize, + errors: usize, + responses: Vec<(WorkerId, WorkerResponse)>, + expected_responses: usize, +} + +pub fn upgrade_worker(server: &mut Server, client: &mut ClientSession, old_worker_id: WorkerId) { + info!( + "client[{:?}] msg wants to upgrade worker {}", + client.token, old_worker_id + ); + + let old_worker_token = match server.get_active_worker_by_id(old_worker_id) { + Some(session) => session.token, + None => { + client.finish_failure(format!( + "Worker {} does not exist, or is stopping / stopped", + old_worker_id + )); + return; + } + }; + + client.return_processing(format!( + "Requesting listen sockets from worker {old_worker_id}" + )); + server.scatter( + RequestType::ReturnListenSockets(ReturnListenSockets {}).into(), + Box::new(UpgradeWorkerTask { + client_token: client.token, + progress: UpgradeWorkerProgress::RequestingListenSockets { + old_worker_token, + old_worker_id, + }, + ok: 0, + errors: 0, + responses: Vec::new(), + expected_responses: 0, + }), + Timeout::Default, + Some(old_worker_id), + ); +} + +impl UpgradeWorkerTask { + fn receive_listen_sockets( + self, + server: &mut Server, + client: &mut OptionalClient, + old_worker_token: Token, + old_worker_id: WorkerId, + ) { + let old_worker = match server.workers.get_mut(&old_worker_token) { + Some(old_worker) => old_worker, + None => { + client.finish_failure(format!("Worker {old_worker_id} died while upgrading, it should be restarted automatically")); + return; + } + }; + let old_worker_id = old_worker.id; + + match old_worker.scm_socket.set_blocking(true) { + Ok(_) => {} + Err(error) => { + client.finish_failure(format!("Could not set SCM sockets to blocking: {error:?}")); + return; + } + } + + let listeners = match old_worker.scm_socket.receive_listeners() { + Ok(listeners) => listeners, + Err(_) => { + client.finish_failure( + "Could not upgrade worker: did not get back listeners from the old worker", + ); + return; + } + }; + + old_worker.run_state = RunState::Stopping; + + // lauch new worker + let new_worker = match server.launch_new_worker(Some(listeners)) { + Ok(worker) => worker, + Err(worker_err) => { + return client.finish_failure(format!("could not launch new worker: {worker_err}")) + } + }; + client.return_processing(format!("Launched a new worker with id {}", new_worker.id)); + let new_worker_id = new_worker.id; + + let finish_task = server.new_task( + Box::new(UpgradeWorkerTask { + client_token: self.client_token, + progress: UpgradeWorkerProgress::StopOldActivateNew { + old_worker_id, + new_worker_id, + }, + + ok: 0, + errors: 0, + responses: Vec::new(), + expected_responses: 0, + }), + Timeout::None, + ); + + // Stop the old worker + client.return_processing(format!("Soft stopping worker with id {}", old_worker_id)); + server.scatter_on( + RequestType::SoftStop(SoftStop {}).into(), + finish_task, + 0, + Some(old_worker_id), + ); + + // activate new worker + for (count, request) in server + .state + .generate_activate_requests() + .into_iter() + .enumerate() + { + server.scatter_on(request, finish_task, count + 1, Some(new_worker_id)); + } + } +} + +impl GatheringTask for UpgradeWorkerTask { + fn client_token(&self) -> Option { + Some(self.client_token) + } + + fn get_gatherer(&mut self) -> &mut dyn super::server::Gatherer { + self + } + + fn on_finish( + self: Box, + server: &mut Server, + client: &mut OptionalClient, + _timed_out: bool, + ) { + match self.progress { + UpgradeWorkerProgress::RequestingListenSockets { + old_worker_token, + old_worker_id, + } => { + if self.ok == 1 { + self.receive_listen_sockets(server, client, old_worker_token, old_worker_id); + } else { + client.finish_failure(format!( + "Could not get listen sockets from old worker:{:?}", + self.responses + )); + } + } + UpgradeWorkerProgress::StopOldActivateNew { + old_worker_id, + new_worker_id, + } => { + client.finish_ok( + format!( + "Upgrade successful:\n- finished soft stop of worker {:?}\n- finished activation of new worker {:?}", + old_worker_id, new_worker_id + ) + ); + } + } + } +} + +impl Gatherer for UpgradeWorkerTask { + fn inc_expected_responses(&mut self, count: usize) { + self.expected_responses += count; + } + + fn has_finished(&self) -> bool { + self.ok + self.errors >= self.expected_responses + } + + fn on_message( + &mut self, + _server: &mut Server, + client: &mut OptionalClient, + worker_id: WorkerId, + message: WorkerResponse, + ) { + match message.status { + ResponseStatus::Ok => { + self.ok += 1; + match self.progress { + UpgradeWorkerProgress::RequestingListenSockets { .. } => {} + UpgradeWorkerProgress::StopOldActivateNew { .. } => { + client.return_processing(format!( + "Worker {} answered OK to {}. {}", + worker_id, message.id, message.message + )) + } + } + } + ResponseStatus::Failure => self.errors += 1, + ResponseStatus::Processing => client.return_processing(format!( + "Worker {} is processing {}. {}", + worker_id, message.id, message.message + )), + } + self.responses.push((worker_id, message)); + } +} + +//=============================================== +// Upgrade the main process + +/// Summary of a worker session, meant to be passed to a new main process +/// during an upgrade, in order to recreate the worker +#[derive(Deserialize, Serialize, Debug)] +pub struct SerializedWorkerSession { + /// file descriptor of the UNIX channel + pub channel_fd: i32, + pub pid: pid_t, + pub id: WorkerId, + pub run_state: RunState, + /// file descriptor of the SCM socket + pub scm_fd: i32, +} + +impl TryFrom<&WorkerSession> for SerializedWorkerSession { + type Error = UpgradeError; + + fn try_from(worker: &WorkerSession) -> Result { + disable_close_on_exec(worker.channel.fd()).map_err(|util_err| { + UpgradeError::DisableCloexec { + fd_name: format!("main-to-worker-{}-channel", worker.id), + util_err, + } + })?; + + Ok(Self { + channel_fd: worker.channel.sock.as_raw_fd(), + pid: worker.pid, + id: worker.id, + run_state: worker.run_state, + scm_fd: worker.scm_socket.raw_fd(), + }) + } +} + +#[derive(Deserialize, Serialize, Debug)] +pub struct UpgradeData { + /// file descriptor of the unix command socket + pub command_socket_fd: i32, + pub config: Config, + pub next_client_id: ClientId, + pub next_session_id: SessionId, + pub next_task_id: TaskId, + pub next_worker_id: WorkerId, + /// JSON serialized workers + pub workers: Vec, + pub state: ConfigState, +} + +pub fn upgrade_main(server: &mut Server, client: &mut ClientSession) { + if let Err(err) = server.disable_cloexec_before_upgrade() { + client.finish_failure(err.to_string()); + } + + client.return_processing("Upgrading the main process..."); + + let upgrade_data = server.generate_upgrade_data(); + + let (new_main_pid, mut fork_confirmation_channel) = + match fork_main_into_new_main(server.executable_path.clone(), upgrade_data) { + Ok(tuple) => tuple, + Err(fork_error) => { + client.finish_failure(format!( + "Could not start a new main process by forking: {}", + fork_error + )); + return; + } + }; + + let received_ok_from_new_process = fork_confirmation_channel.read_message().unwrap_or(false); + + debug!( + "new main process sent a fork confirmation: {:?}", + received_ok_from_new_process + ); + + if !received_ok_from_new_process { + client.finish_failure("Upgrade of main process failed: no feedback from the new main"); + } else { + client.finish_ok(format!( + "Upgrade successful, closing main process. New main process has pid {}", + new_main_pid + )); + server.run_state = ServerState::Stopping; + } +} diff --git a/bin/src/ctl/command.rs b/bin/src/ctl/command.rs index 518eea513..59e0b07a0 100644 --- a/bin/src/ctl/command.rs +++ b/bin/src/ctl/command.rs @@ -1,62 +1,45 @@ -use anyhow::{self, bail, Context}; - -use sozu_command_lib::proto::command::{ - request::RequestType, response_content::ContentType, ListWorkers, QueryMetricsOptions, Request, - Response, ResponseContent, ResponseStatus, RunState, UpgradeMain, +use std::time::Duration; + +use sozu_command_lib::{ + logging::setup_logging_with_config, + proto::command::{ + request::RequestType, response_content::ContentType, ListWorkers, QueryMetricsOptions, + Request, Response, ResponseContent, ResponseStatus, UpgradeMain, + }, }; -use crate::ctl::{create_channel, CommandManager}; +use crate::ctl::{create_channel, CommandManager, CtlError}; impl CommandManager { - fn write_request_on_channel(&mut self, request: Request) -> anyhow::Result<()> { + fn write_request_on_channel(&mut self, request: Request) -> Result<(), CtlError> { self.channel .write_message(&request) - .with_context(|| "Could not write the request") + .map_err(CtlError::WriteRequest) } - fn read_channel_message_with_timeout(&mut self) -> anyhow::Result { + fn read_channel_message_with_timeout(&mut self) -> Result { self.channel .read_message_blocking_timeout(Some(self.timeout)) - .with_context(|| "Command timeout. The proxy didn't send an answer") + .map_err(CtlError::ReadBlocking) } - pub fn send_request(&mut self, request: Request) -> Result<(), anyhow::Error> { + fn send_request_get_response( + &mut self, + request: Request, + timeout: bool, + ) -> Result { self.channel .write_message(&request) - .with_context(|| "Could not write the request")?; - - loop { - let response = self.read_channel_message_with_timeout()?; - - match response.status() { - ResponseStatus::Processing => { - if !self.json { - debug!("Proxy is processing: {}", response.message); - } - } - ResponseStatus::Failure => bail!("Request failed: {}", response.message), - ResponseStatus::Ok => { - if !self.json { - info!("{}", response.message); - } - response.display(self.json)?; - break; - } - } - } - Ok(()) - } - - // 1. Request a list of workers - // 2. Send an UpgradeMain - // 3. Send an UpgradeWorker to each worker - pub fn upgrade_main(&mut self) -> Result<(), anyhow::Error> { - info!("Preparing to upgrade proxy..."); - - self.write_request_on_channel(RequestType::ListWorkers(ListWorkers {}).into())?; + .map_err(CtlError::WriteRequest)?; loop { - let response = self.read_channel_message_with_timeout()?; + let response = if timeout { + self.read_channel_message_with_timeout()? + } else { + self.channel + .read_message_blocking_timeout(None) + .map_err(CtlError::ReadBlocking)? + }; match response.status() { ResponseStatus::Processing => { @@ -64,109 +47,28 @@ impl CommandManager { debug!("Processing: {}", response.message); } } - ResponseStatus::Failure => { - bail!( - "Error: failed to get the list of worker: {}", - response.message - ); - } - ResponseStatus::Ok => { - if let Some(ResponseContent { - content_type: Some(ContentType::Workers(ref worker_infos)), - }) = response.content - { - // display worker status - response.display(false)?; - - self.write_request_on_channel( - RequestType::UpgradeMain(UpgradeMain {}).into(), - )?; - - info!("Upgrading main process"); - - loop { - let response = self.read_channel_message_with_timeout()?; - - match response.status() { - ResponseStatus::Processing => { - debug!("Main process is upgrading"); - } - ResponseStatus::Failure => { - bail!( - "Error: failed to upgrade the main: {}", - response.message - ); - } - ResponseStatus::Ok => { - info!("Main process upgrade succeeded: {}", response.message); - break; - } - } - } - - // Reconnect to the new main - info!("Reconnecting to new main process..."); - self.channel = create_channel(&self.config) - .with_context(|| "could not reconnect to the command unix socket")?; - - // Do a rolling restart of the workers - let running_workers = worker_infos - .vec - .iter() - .filter(|worker| worker.run_state == RunState::Running as i32) - .collect::>(); - let running_count = running_workers.len(); - for (i, worker) in running_workers.iter().enumerate() { - info!( - "Upgrading worker {} (#{} out of {})", - worker.id, - i + 1, - running_count - ); - - self.upgrade_worker(worker.id) - .with_context(|| "Upgrading the worker failed")?; - //thread::sleep(Duration::from_millis(1000)); - } - - info!("Proxy successfully upgraded!"); - } else { - info!("Received a response of the wrong kind: {:?}", response); - } - break; - } + ResponseStatus::Failure => return Err(CtlError::Failure(response.message)), + ResponseStatus::Ok => return Ok(response), } } - Ok(()) } - pub fn upgrade_worker(&mut self, worker_id: u32) -> Result<(), anyhow::Error> { - trace!("upgrading worker {}", worker_id); - - //FIXME: we should be able to soft stop one specific worker - self.write_request_on_channel(RequestType::UpgradeWorker(worker_id).into())?; + fn send_request_display_response( + &mut self, + request: Request, + timeout: bool, + ) -> Result<(), CtlError> { + self.send_request_get_response(request, timeout)? + .display(self.json) + .map_err(CtlError::Display) + } - loop { - let response = self.read_channel_message_with_timeout()?; + pub fn send_request(&mut self, request: Request) -> Result<(), CtlError> { + self.send_request_display_response(request, true) + } - match response.status() { - ResponseStatus::Processing => { - if !self.json { - info!("Proxy is processing: {}", response.message); - } - } - ResponseStatus::Failure => bail!( - "could not stop the worker {}: {}", - worker_id, - response.message - ), - ResponseStatus::Ok => { - info!("Success: {}", response.message); - break; - } - } - } - Ok(()) + pub fn send_request_no_timeout(&mut self, request: Request) -> Result<(), CtlError> { + self.send_request_display_response(request, false) } pub fn get_metrics( @@ -177,7 +79,7 @@ impl CommandManager { cluster_ids: Vec, backend_ids: Vec, no_clusters: bool, - ) -> Result<(), anyhow::Error> { + ) -> Result<(), CtlError> { let request: Request = RequestType::QueryMetrics(QueryMetricsOptions { list, cluster_ids, @@ -200,11 +102,11 @@ impl CommandManager { match response.status() { ResponseStatus::Processing => { if !self.json { - debug!("Proxy is processing: {}", response.message); + debug!("Processing: {}", response.message); } } ResponseStatus::Failure | ResponseStatus::Ok => { - response.display(self.json)?; + response.display(self.json).map_err(CtlError::Display)?; break; } } @@ -224,4 +126,72 @@ impl CommandManager { Ok(()) } + + pub fn upgrade_main(&mut self) -> Result<(), CtlError> { + debug!("updating main process"); + self.send_request(RequestType::UpgradeMain(UpgradeMain {}).into())?; + + info!("recreating a channel to reconnect with the new main process..."); + self.channel = create_channel(&self.config)?; + + info!("requesting the list of workers from the new main"); + let response = + self.send_request_get_response(RequestType::ListWorkers(ListWorkers {}).into(), true)?; + + let workers = match response.content { + Some(ResponseContent { + content_type: Some(ContentType::Workers(worker_infos)), + }) => worker_infos, + _ => return Err(CtlError::WrongResponse(response)), + }; + + info!("About to upgrade these workers: {:?}", workers); + + let mut upgrade_jobs = Vec::new(); + + for worker in workers.vec { + info!("trying to upgrade worker {}", worker.id); + let config = self.config.clone(); + + upgrade_jobs.push(std::thread::spawn(move || { + setup_logging_with_config(&config, &format!("UPGRADE-WRK-{}", worker.id)); + + info!("creating channel to upgrade worker {}", worker.id); + let channel = match create_channel(&config) { + Ok(channel) => channel, + Err(e) => { + error!( + "could not create channel to worker {}, this is critical: {}", + worker.id, e + ); + return; + } + }; + + info!("created channel to upgrade worker {}", worker.id); + + let mut command_manager = CommandManager { + channel, + timeout: Duration::from_secs(60), // overriden by upgrade_timeout anyway + config, + json: false, + }; + + match command_manager.upgrade_worker(worker.id) { + Ok(()) => info!("successfully upgraded worker {}", worker.id), + Err(e) => error!("error upgrading worker {}: {}", worker.id, e), + } + })); + } + + for job in upgrade_jobs { + if let Err(e) = job.join() { + error!("an upgrading job panicked: {:?}", e) + } + } + + info!("Finished upgrading"); + + Ok(()) + } } diff --git a/bin/src/ctl/mod.rs b/bin/src/ctl/mod.rs index ce360fa4e..22a6c9ff6 100644 --- a/bin/src/ctl/mod.rs +++ b/bin/src/ctl/mod.rs @@ -1,20 +1,61 @@ +mod command; +mod request_builder; + use std::time::Duration; -use anyhow::Context; use sozu_command_lib::{ - channel::Channel, - config::Config, + certificate::CertificateError, + channel::{Channel, ChannelError}, + config::{Config, ConfigError}, logging::setup_logging_with_config, - proto::command::{Request, Response}, + proto::{ + command::{Request, Response}, + DisplayError, + }, }; use crate::{ cli::{self, *}, - get_config_file_path, load_configuration, + util::{get_config_file_path, UtilError}, }; -mod command; -mod request_builder; +#[derive(thiserror::Error, Debug)] +pub enum CtlError { + #[error("failed to get config: {0}")] + GetConfig(UtilError), + #[error("failed to load config: {0}")] + LoadConfig(ConfigError), + #[error("could not create channel to Sōzu. Are you sure the proxy is up?: {0}")] + CreateChannel(ChannelError), + #[error("failed to find the path of the command socket: {0}")] + GetCommandSocketPath(ConfigError), + #[error("failed to block channel to Sōzu: {0}")] + BlockChannel(ChannelError), + #[error("could not display response: {0}")] + Display(DisplayError), + #[error("could not read message on a blocking channel: {0}")] + ReadBlocking(ChannelError), + #[error("Request failed: {0}")] + Failure(String), + #[error("could not write request on channel: {0}")] + WriteRequest(ChannelError), + #[error("could not get certificate fingerprint")] + GetFingerprint(CertificateError), + #[error("could not decode fingerprint")] + DecodeFingerprint(CertificateError), + #[error("Please provide either one, {0} OR {1}")] + ArgsNeeded(String, String), + #[error("could not load certificate")] + LoadCertificate(CertificateError), + #[error("wrong address {0}: {1}")] + WrongAddress(String, UtilError), + #[error("wrong input to create listener")] + CreateListener(ConfigError), + #[error("domain can not be empty")] + NeedClusterDomain, + #[error("wrong response from Sōzu: {0:?}")] + WrongResponse(Response), +} pub struct CommandManager { channel: Channel, @@ -24,11 +65,15 @@ pub struct CommandManager { json: bool, } -pub fn ctl(args: cli::Args) -> anyhow::Result<()> { - let config_file_path = get_config_file_path(&args)?; - let config = load_configuration(config_file_path)?; +pub fn ctl(args: cli::Args) -> Result<(), CtlError> { + let config_path = get_config_file_path(&args).map_err(CtlError::GetConfig)?; - setup_logging_with_config(&config, "CTL"); + let config = Config::load_from_path(config_path).map_err(CtlError::LoadConfig)?; + + // prevent logging for json responses for a clean output + if !args.json { + setup_logging_with_config(&config, "CTL"); + } // If the command is `config check` then exit because if we are here, the configuration is valid if let SubCmd::Config { @@ -39,9 +84,7 @@ pub fn ctl(args: cli::Args) -> anyhow::Result<()> { std::process::exit(0); } - let channel = create_channel(&config).with_context(|| { - "could not connect to the command unix socket. Are you sure the proxy is up?" - })?; + let channel = create_channel(&config)?; let timeout = Duration::from_millis(args.timeout.unwrap_or(config.ctl_command_timeout)); if !args.json { @@ -59,7 +102,7 @@ pub fn ctl(args: cli::Args) -> anyhow::Result<()> { } impl CommandManager { - fn handle_command(&mut self, command: SubCmd) -> anyhow::Result<()> { + fn handle_command(&mut self, command: SubCmd) -> Result<(), CtlError> { debug!("Executing command {:?}", command); match command { SubCmd::Shutdown { hard } => { @@ -167,16 +210,18 @@ impl CommandManager { } /// creates a blocking channel -pub fn create_channel(config: &Config) -> anyhow::Result> { +pub fn create_channel(config: &Config) -> Result, CtlError> { + let command_socket_path = &config + .command_socket_path() + .map_err(CtlError::GetCommandSocketPath)?; + let mut channel = Channel::from_path( - &config.command_socket_path()?, + command_socket_path, config.command_buffer_size, config.max_command_buffer_size, ) - .with_context(|| "Could not create Channel from the given path")?; + .map_err(CtlError::CreateChannel)?; - channel - .blocking() - .with_context(|| "Could not block the channel used to communicate with Sōzu")?; + channel.blocking().map_err(CtlError::BlockChannel)?; Ok(channel) } diff --git a/bin/src/ctl/request_builder.rs b/bin/src/ctl/request_builder.rs index ce0ff3ec1..6389ab625 100644 --- a/bin/src/ctl/request_builder.rs +++ b/bin/src/ctl/request_builder.rs @@ -1,7 +1,5 @@ use std::collections::BTreeMap; -use anyhow::{bail, Context}; - use sozu_command_lib::{ certificate::{ decode_fingerprint, get_fingerprint_from_certificate_path, load_full_certificate, @@ -23,57 +21,60 @@ use crate::{ MetricsCmd, TcpFrontendCmd, TcpListenerCmd, }, ctl::CommandManager, + util::parse_socket_address, }; +use super::CtlError; + impl CommandManager { - pub fn save_state(&mut self, path: String) -> anyhow::Result<()> { + pub fn save_state(&mut self, path: String) -> Result<(), CtlError> { debug!("Saving the state to file {}", path); self.send_request(RequestType::SaveState(path).into()) } - pub fn load_state(&mut self, path: String) -> anyhow::Result<()> { + pub fn load_state(&mut self, path: String) -> Result<(), CtlError> { debug!("Loading the state on path {}", path); self.send_request(RequestType::LoadState(path).into()) } - pub fn count_requests(&mut self) -> anyhow::Result<()> { + pub fn count_requests(&mut self) -> Result<(), CtlError> { self.send_request(RequestType::CountRequests(CountRequests {}).into()) } - pub fn soft_stop(&mut self) -> anyhow::Result<()> { + pub fn soft_stop(&mut self) -> Result<(), CtlError> { debug!("shutting down proxy softly"); self.send_request(RequestType::SoftStop(SoftStop {}).into()) } - pub fn hard_stop(&mut self) -> anyhow::Result<()> { + pub fn hard_stop(&mut self) -> Result<(), CtlError> { debug!("shutting down proxy the hard way"); self.send_request(RequestType::HardStop(HardStop {}).into()) } - pub fn status(&mut self) -> anyhow::Result<()> { + pub fn status(&mut self) -> Result<(), CtlError> { debug!("Requesting status…"); self.send_request(RequestType::Status(Status {}).into()) } - pub fn configure_metrics(&mut self, cmd: MetricsCmd) -> anyhow::Result<()> { + pub fn configure_metrics(&mut self, cmd: MetricsCmd) -> Result<(), CtlError> { debug!("Configuring metrics: {:?}", cmd); let configuration = match cmd { MetricsCmd::Enable => MetricsConfiguration::Enabled, MetricsCmd::Disable => MetricsConfiguration::Disabled, MetricsCmd::Clear => MetricsConfiguration::Clear, - _ => bail!("The command passed to the configure_metrics function is wrong."), + _ => return Ok(()), // completely unlikely }; self.send_request(RequestType::ConfigureMetrics(configuration as i32).into()) } - pub fn reload_configuration(&mut self, path: Option) -> anyhow::Result<()> { + pub fn reload_configuration(&mut self, path: Option) -> Result<(), CtlError> { debug!("Reloading configuration…"); let path = match path { Some(p) => p, @@ -88,7 +89,7 @@ impl CommandManager { https: bool, tcp: bool, domain: Option, - ) -> anyhow::Result<()> { + ) -> Result<(), CtlError> { debug!("Listing frontends"); self.send_request( @@ -102,11 +103,11 @@ impl CommandManager { ) } - pub fn events(&mut self) -> anyhow::Result<()> { - self.send_request(RequestType::SubscribeEvents(SubscribeEvents {}).into()) + pub fn events(&mut self) -> Result<(), CtlError> { + self.send_request_no_timeout(RequestType::SubscribeEvents(SubscribeEvents {}).into()) } - pub fn backend_command(&mut self, cmd: BackendCmd) -> anyhow::Result<()> { + pub fn backend_command(&mut self, cmd: BackendCmd) -> Result<(), CtlError> { match cmd { BackendCmd::Add { id, @@ -140,7 +141,7 @@ impl CommandManager { } } - pub fn cluster_command(&mut self, cmd: ClusterCmd) -> anyhow::Result<()> { + pub fn cluster_command(&mut self, cmd: ClusterCmd) -> Result<(), CtlError> { match cmd { ClusterCmd::Add { id, @@ -174,7 +175,10 @@ impl CommandManager { domain, } => { if cluster_id.is_some() && domain.is_some() { - bail!("Error: Either request an cluster ID or a domain name"); + return Err(CtlError::ArgsNeeded( + "a cluster id".to_string(), + "a domain name".to_string(), + )); } let request = if let Some(ref cluster_id) = cluster_id { @@ -184,14 +188,11 @@ impl CommandManager { domain.splitn(2, '/').map(|elem| elem.to_string()).collect(); if splitted.is_empty() { - bail!("Domain can't be empty"); + return Err(CtlError::NeedClusterDomain)?; } let query_domain = QueryClusterByDomain { - hostname: splitted - .get(0) - .with_context(|| "Domain can't be empty")? - .clone(), + hostname: splitted.get(0).ok_or(CtlError::NeedClusterDomain)?.clone(), path: splitted.get(1).cloned().map(|path| format!("/{path}")), // We add the / again because of the splitn removing it }; @@ -205,7 +206,7 @@ impl CommandManager { } } - pub fn tcp_frontend_command(&mut self, cmd: TcpFrontendCmd) -> anyhow::Result<()> { + pub fn tcp_frontend_command(&mut self, cmd: TcpFrontendCmd) -> Result<(), CtlError> { match cmd { TcpFrontendCmd::Add { id, address, tags } => self.send_request( RequestType::AddTcpFrontend(RequestTcpFrontend { @@ -226,7 +227,7 @@ impl CommandManager { } } - pub fn http_frontend_command(&mut self, cmd: HttpFrontendCmd) -> anyhow::Result<()> { + pub fn http_frontend_command(&mut self, cmd: HttpFrontendCmd) -> Result<(), CtlError> { match cmd { HttpFrontendCmd::Add { hostname, @@ -274,7 +275,7 @@ impl CommandManager { } } - pub fn https_frontend_command(&mut self, cmd: HttpFrontendCmd) -> anyhow::Result<()> { + pub fn https_frontend_command(&mut self, cmd: HttpFrontendCmd) -> Result<(), CtlError> { match cmd { HttpFrontendCmd::Add { hostname, @@ -322,7 +323,7 @@ impl CommandManager { } } - pub fn https_listener_command(&mut self, cmd: HttpsListenerCmd) -> anyhow::Result<()> { + pub fn https_listener_command(&mut self, cmd: HttpsListenerCmd) -> Result<(), CtlError> { match cmd { HttpsListenerCmd::Add { address, @@ -351,7 +352,7 @@ impl CommandManager { .with_request_timeout(request_timeout) .with_connect_timeout(connect_timeout) .to_tls(Some(&self.config)) - .with_context(|| "Error creating HTTPS listener")?; + .map_err(CtlError::CreateListener)?; self.send_request(RequestType::AddHttpsListener(https_listener).into()) } @@ -367,7 +368,7 @@ impl CommandManager { } } - pub fn http_listener_command(&mut self, cmd: HttpListenerCmd) -> anyhow::Result<()> { + pub fn http_listener_command(&mut self, cmd: HttpListenerCmd) -> Result<(), CtlError> { match cmd { HttpListenerCmd::Add { address, @@ -392,7 +393,8 @@ impl CommandManager { .with_back_timeout(back_timeout) .with_connect_timeout(connect_timeout) .to_http(Some(&self.config)) - .with_context(|| "Error creating HTTP listener")?; + .map_err(CtlError::CreateListener)?; + self.send_request(RequestType::AddHttpListener(http_listener).into()) } HttpListenerCmd::Remove { address } => { @@ -407,7 +409,7 @@ impl CommandManager { } } - pub fn tcp_listener_command(&mut self, cmd: TcpListenerCmd) -> anyhow::Result<()> { + pub fn tcp_listener_command(&mut self, cmd: TcpListenerCmd) -> Result<(), CtlError> { match cmd { TcpListenerCmd::Add { address, @@ -418,7 +420,7 @@ impl CommandManager { .with_public_address(public_address) .with_expect_proxy(expect_proxy) .to_tcp(Some(&self.config)) - .with_context(|| "Could not create TCP listener")?; + .map_err(CtlError::CreateListener)?; self.send_request(RequestType::AddTcpListener(listener).into()) } @@ -434,7 +436,7 @@ impl CommandManager { } } - pub fn list_listeners(&mut self) -> anyhow::Result<()> { + pub fn list_listeners(&mut self) -> Result<(), CtlError> { self.send_request(RequestType::ListListeners(ListListeners {}).into()) } @@ -442,10 +444,13 @@ impl CommandManager { &mut self, address: String, listener_type: ListenerType, - ) -> anyhow::Result<()> { + ) -> Result<(), CtlError> { + let address = parse_socket_address(&address) + .map_err(|util_err| CtlError::WrongAddress(address, util_err))?; + self.send_request( RequestType::RemoveListener(RemoveListener { - address: address.parse().with_context(|| "wrong socket address")?, + address: address.to_string(), proxy: listener_type.into(), }) .into(), @@ -456,10 +461,13 @@ impl CommandManager { &mut self, address: String, listener_type: ListenerType, - ) -> anyhow::Result<()> { + ) -> Result<(), CtlError> { + let address = parse_socket_address(&address) + .map_err(|util_err| CtlError::WrongAddress(address, util_err))?; + self.send_request( RequestType::ActivateListener(ActivateListener { - address: address.parse().with_context(|| "wrong socket address")?, + address: address.to_string(), proxy: listener_type.into(), from_scm: false, }) @@ -471,10 +479,13 @@ impl CommandManager { &mut self, address: String, listener_type: ListenerType, - ) -> anyhow::Result<()> { + ) -> Result<(), CtlError> { + let address = parse_socket_address(&address) + .map_err(|util_err| CtlError::WrongAddress(address, util_err))?; + self.send_request( RequestType::DeactivateListener(DeactivateListener { - address: address.parse().with_context(|| "wrong socket address")?, + address: address.to_string(), proxy: listener_type.into(), to_scm: false, }) @@ -482,7 +493,7 @@ impl CommandManager { ) } - pub fn logging_filter(&mut self, filter: &LoggingLevel) -> anyhow::Result<()> { + pub fn logging_filter(&mut self, filter: &LoggingLevel) -> Result<(), CtlError> { self.send_request(RequestType::Logging(filter.to_string().to_lowercase()).into()) } @@ -493,7 +504,7 @@ impl CommandManager { certificate_chain_path: &str, key_path: &str, versions: Vec, - ) -> anyhow::Result<()> { + ) -> Result<(), CtlError> { let new_certificate = load_full_certificate( certificate_path, certificate_chain_path, @@ -501,7 +512,7 @@ impl CommandManager { versions, vec![], ) - .with_context(|| "Could not load the full certificate")?; + .map_err(CtlError::LoadCertificate)?; self.send_request( RequestType::AddCertificate(AddCertificate { @@ -523,18 +534,21 @@ impl CommandManager { old_certificate_path: Option<&str>, old_fingerprint: Option<&str>, versions: Vec, - ) -> anyhow::Result<()> { + ) -> Result<(), CtlError> { let old_fingerprint = match (old_certificate_path, old_fingerprint) { (None, None) | (Some(_), Some(_)) => { - bail!("Error: Please provide either one, the old certificate's path OR its fingerprint") + return Err(CtlError::ArgsNeeded( + "the path to the old certificate".to_string(), + "the path to the old fingerprint".to_string(), + )) } (Some(old_certificate_path), None) => { - get_fingerprint_from_certificate_path(old_certificate_path).with_context(|| { - "Could not retrieve the fingerprint from the given certificate path" - })? + get_fingerprint_from_certificate_path(old_certificate_path) + .map_err(CtlError::GetFingerprint)? + } + (None, Some(fingerprint)) => { + decode_fingerprint(fingerprint).map_err(CtlError::DecodeFingerprint)? } - (None, Some(fingerprint)) => decode_fingerprint(fingerprint) - .with_context(|| "Error decoding the given fingerprint")?, }; let new_certificate = load_full_certificate( @@ -544,7 +558,7 @@ impl CommandManager { versions, vec![], ) - .with_context(|| "Could not load the full certificate")?; + .map_err(CtlError::LoadCertificate)?; self.send_request( RequestType::ReplaceCertificate(ReplaceCertificate { @@ -564,18 +578,21 @@ impl CommandManager { address: String, certificate_path: Option<&str>, fingerprint: Option<&str>, - ) -> anyhow::Result<()> { + ) -> Result<(), CtlError> { let fingerprint = match (certificate_path, fingerprint) { (None, None) | (Some(_), Some(_)) => { - bail!("Error: Please provide either one, the path OR the fingerprint of the certificate") + return Err(CtlError::ArgsNeeded( + "the path to the certificate".to_string(), + "the fingerprint of the certificate".to_string(), + )) } (Some(certificate_path), None) => { - get_fingerprint_from_certificate_path(certificate_path).with_context(|| { - "Could not retrieve the finger print from the given certificate path" - })? + get_fingerprint_from_certificate_path(certificate_path) + .map_err(CtlError::GetFingerprint)? + } + (None, Some(fingerprint)) => { + decode_fingerprint(fingerprint).map_err(CtlError::DecodeFingerprint)? } - (None, Some(fingerprint)) => decode_fingerprint(fingerprint) - .with_context(|| "Error decoding the given fingerprint")?, }; self.send_request( @@ -592,7 +609,7 @@ impl CommandManager { fingerprint: Option, domain: Option, query_workers: bool, - ) -> Result<(), anyhow::Error> { + ) -> Result<(), CtlError> { let filters = QueryCertificatesFilters { domain, fingerprint, @@ -604,4 +621,9 @@ impl CommandManager { self.send_request(RequestType::QueryCertificatesFromTheState(filters).into()) } } + + pub fn upgrade_worker(&mut self, worker_id: u32) -> Result<(), CtlError> { + debug!("upgrading worker {}", worker_id); + self.send_request(RequestType::UpgradeWorker(worker_id).into()) + } } diff --git a/bin/src/main.rs b/bin/src/main.rs index 77969aa0b..cbbce85ad 100644 --- a/bin/src/main.rs +++ b/bin/src/main.rs @@ -37,40 +37,48 @@ static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; /// the arguments to the sozu command line mod cli; /// Receives orders from the CLI, transmits to workers +// mod command; mod command; /// The command line logic mod ctl; /// Forking & restarting the main process using a more recent executable of Sōzu mod upgrade; /// Some unix helper functions -mod util; +pub mod util; /// Start and restart the worker UNIX processes mod worker; -#[cfg(target_os = "linux")] -use anyhow::bail; -use anyhow::Context; -use cli::Args; +use std::panic; + #[cfg(target_os = "linux")] use libc::{cpu_set_t, pid_t}; -#[cfg(target_os = "linux")] -use regex::Regex; + use sozu::metrics::METRICS; -use sozu_command_lib::{config::Config, logging::setup_logging_with_config}; -use std::panic; -use crate::worker::{get_executable_path, start_workers, Worker}; +use cli::Args; +use command::{begin_main_process, sessions::WorkerSession, StartError}; +use ctl::CtlError; +use upgrade::UpgradeError; +use worker::WorkerError; + +#[derive(thiserror::Error, Debug)] +enum MainError { + #[error("failed to start Sōzu: {0}")] + StartMain(StartError), + #[error("failed to start new worker: {0}")] + BeginWorker(WorkerError), + #[error("failed to start new main process: {0}")] + BeginNewMain(UpgradeError), + #[error("{0}")] + Cli(CtlError), +} #[paw::main] -fn main(args: Args) -> anyhow::Result<()> { +fn main(args: Args) { register_panic_hook(); - match args.cmd { - cli::SubCmd::Start => { - start(&args)?; - info!("main process stopped"); - Ok(()) - } + let result = match args.cmd { + cli::SubCmd::Start => begin_main_process(&args).map_err(MainError::StartMain), // this is used only by the CLI when upgrading cli::SubCmd::Worker { fd: worker_to_main_channel_fd, @@ -90,6 +98,7 @@ fn main(args: Args) -> anyhow::Result<()> { command_buffer_size, max_command_buffer_size, ) + .map_err(MainError::BeginWorker) } // this is used only by the CLI when upgrading cli::SubCmd::Main { @@ -106,61 +115,22 @@ fn main(args: Args) -> anyhow::Result<()> { command_buffer_size, max_command_buffer_size, ) + .map_err(MainError::BeginNewMain) } - _ => ctl::ctl(args), - } -} - -fn start(args: &cli::Args) -> Result<(), anyhow::Error> { - let config_file_path = get_config_file_path(args)?; - let config = load_configuration(config_file_path)?; - - setup_logging_with_config(&config, "MAIN"); - info!("Starting up"); - util::setup_metrics(&config).with_context(|| "Could not setup metrics")?; - util::write_pid_file(&config).with_context(|| "PID file is not writeable")?; - - update_process_limits(&config)?; - - let executable_path = - unsafe { get_executable_path().with_context(|| "Could not get executable path")? }; - let workers = - start_workers(executable_path, &config).with_context(|| "Failed at spawning workers")?; - - if config.handle_process_affinity { - set_workers_affinity(&workers); - } - - let command_socket_path = config.command_socket_path()?; - - command::start_server(config, command_socket_path, workers) - .with_context(|| "could not start Sozu")?; - - Ok(()) -} - -pub fn get_config_file_path(args: &cli::Args) -> Result<&str, anyhow::Error> { - match args.config.as_ref() { - Some(config_file) => Ok(config_file.as_str()), - None => option_env!("SOZU_CONFIG").ok_or_else(|| { - anyhow::Error::msg( - "Configuration file hasn't been specified. Either use -c with the start command \ - or use the SOZU_CONFIG environment variable when building sozu.", - ) - }), + _ => ctl::ctl(args).map_err(MainError::Cli), + }; + match result { + Ok(_) => {} + Err(main_error) => println!("{}", main_error), } } -pub fn load_configuration(config_file: &str) -> Result { - Config::load_from_path(config_file).with_context(|| "Invalid configuration file.") -} - /// Set workers process affinity, see man sched_setaffinity /// Bind each worker (including the main) process to a CPU core. /// Can bind multiple processes to a CPU core if there are more processes /// than CPU cores. Only works on Linux. #[cfg(target_os = "linux")] -fn set_workers_affinity(workers: &Vec) { +fn set_workers_affinity(workers: &Vec) { let mut cpu_count = 0; let max_cpu = num_cpus::get(); @@ -210,80 +180,6 @@ fn set_process_affinity(pid: pid_t, cpu: usize) { }; } -#[cfg(target_os = "linux")] -// We check the hard_limit. The soft_limit can be changed at runtime -// by the process or any user. hard_limit can only be changed by root -fn update_process_limits(config: &Config) -> Result<(), anyhow::Error> { - let wanted_opened_files = (config.max_connections as u64) * 2; - - // Ensure we don't exceed the system maximum capacity - let f = Config::load_file("/proc/sys/fs/file-max") - .with_context(|| "Couldn't read /proc/sys/fs/file-max")?; - let re_max = Regex::new(r"(\d*)")?; - let system_max_fd = re_max - .captures(&f) - .and_then(|c| c.get(1)) - .and_then(|m| m.as_str().parse::().ok()) - .with_context(|| "Couldn't parse /proc/sys/fs/file-max")?; - if config.max_connections > system_max_fd { - error!( - "Proxies total max_connections can't be higher than system's file-max limit. \ - Current limit: {}, current value: {}", - system_max_fd, config.max_connections - ); - bail!("Too many allowed connections"); - } - - // Get the soft and hard limits for the current process - let mut limits = libc::rlimit { - rlim_cur: 0, - rlim_max: 0, - }; - unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut limits) }; - - // Ensure we don't exceed the hard limit - if limits.rlim_max < wanted_opened_files { - error!( - "at least one worker can't have that many connections. \ - current max file descriptor hard limit is: {}, \ - configured max_connections is {} (the worker needs two file descriptors \ - per client connection)", - limits.rlim_max, config.max_connections - ); - bail!("Too many allowed connection for a worker"); - } - - if limits.rlim_cur < wanted_opened_files && limits.rlim_cur != limits.rlim_max { - // Try to get twice what we need to be safe, or rlim_max if we exceed that - limits.rlim_cur = limits.rlim_max.min(wanted_opened_files * 2); - unsafe { - libc::setrlimit(libc::RLIMIT_NOFILE, &limits); - - // Refresh the data we have - libc::getrlimit(libc::RLIMIT_NOFILE, &mut limits); - } - } - - // Ensure we don't exceed the new soft limit - if limits.rlim_cur < wanted_opened_files { - error!( - "at least one worker can't have that many connections. \ - current max file descriptor soft limit is: {}, \ - configured max_connections is {} (the worker needs two file descriptors \ - per client connection)", - limits.rlim_cur, config.max_connections - ); - bail!("Too many allowed connection for a worker"); - } - - Ok(()) -} - -#[cfg(not(target_os = "linux"))] -fn update_process_limits(_: &Config) -> Result<(), anyhow::Error> { - Ok(()) -} - fn register_panic_hook() { // We save the original panic hook so we can call it later // to have the original behavior diff --git a/bin/src/upgrade.rs b/bin/src/upgrade.rs index 1b05a6639..e1ccc1d59 100644 --- a/bin/src/upgrade.rs +++ b/bin/src/upgrade.rs @@ -1,60 +1,74 @@ use std::{ fs::File, - io::Seek, + io::{Error as IoError, Write}, + io::{Read, Seek}, os::unix::io::{AsRawFd, FromRawFd}, os::unix::process::CommandExt, process::Command, }; -use anyhow::{bail, Context}; -use futures_lite::future; use libc::{self, pid_t}; use mio::net::UnixStream; -use nix::unistd::{fork, ForkResult}; -use serde::{Deserialize, Serialize}; - +use nix::{ + errno::Errno, + unistd::{fork, ForkResult}, +}; +use serde_json::Error as SerdeError; use tempfile::tempfile; use sozu_command_lib::{ - channel::Channel, config::Config, logging::setup_logging_with_config, proto::command::RunState, - request::WorkerRequest, state::ConfigState, + channel::{Channel, ChannelError}, + logging::setup_logging_with_config, }; -use crate::{command::CommandServer, util, worker::Worker}; - -#[derive(Deserialize, Serialize, Debug)] -pub struct SerializedWorker { - pub fd: i32, - pub pid: i32, - pub id: u32, - pub run_state: RunState, - pub queue: Vec, - pub scm: i32, -} - -impl SerializedWorker { - pub fn from_worker(worker: &Worker) -> SerializedWorker { - SerializedWorker { - fd: worker.worker_channel_fd, - pid: worker.pid, - id: worker.id, - run_state: worker.run_state, - queue: worker.queue.clone().into(), - scm: worker.scm_socket.raw_fd(), - } - } -} +use crate::{ + command::{ + server::{CommandHub, HubError, ServerError}, + upgrade::UpgradeData, + }, + util::{self, UtilError}, +}; -/// the data needed to start a new main process -#[derive(Deserialize, Serialize, Debug)] -pub struct UpgradeData { - /// file descriptor of the unix command socket - pub command_socket_fd: i32, - pub config: Config, - /// JSON serialized workers - pub workers: Vec, - pub state: ConfigState, - pub next_id: u32, +#[derive(thiserror::Error, Debug)] +pub enum UpgradeError { + #[error("could not create temporary state file for the upgrade: {0}")] + CreateUpgradeFile(IoError), + #[error("could not disable cloexec on {fd_name}'s file descriptor: {util_err}")] + DisableCloexec { + fd_name: String, + util_err: UtilError, + }, + #[error("could not create MIO pair of unix stream: {0}")] + CreateUnixStream(IoError), + #[error("could not rewind the temporary upgrade file: {0}")] + Rewind(IoError), + #[error("could not write upgrade data to temporary file: {0}")] + SerdeWriteError(SerdeError), + #[error("could not write upgrade data to temporary file: {0}")] + WriteFile(IoError), + #[error("could not read upgrade data from file: {0}")] + ReadFile(IoError), + #[error("could not read upgrade data to temporary file: {0}")] + SerdeReadError(SerdeError), + #[error("unix fork failed: {0}")] + Fork(Errno), + #[error("failed to set metrics on the new main process: {0}")] + SetupMetrics(UtilError), + #[error("could not write PID file of new main process: {0}")] + WritePidFile(UtilError), + #[error( + "the channel failed to send confirmation of upgrade {result} to the old main process: {channel_err}" + )] + SendConfirmation { + result: String, + channel_err: ChannelError, + }, + #[error("Could not block the fork confirmation channel: {0}. This is not normal, you may need to restart sozu")] + BlockChannel(ChannelError), + #[error("could not create a command hub from the upgrade data: {0}")] + CreateHub(HubError), + #[error("could not enable cloexec after upgrade: {0}")] + EnableCloexec(ServerError), } /// unix-forks the main process @@ -66,23 +80,34 @@ pub struct UpgradeData { pub fn fork_main_into_new_main( executable_path: String, upgrade_data: UpgradeData, -) -> Result<(pid_t, Channel<(), bool>), anyhow::Error> { +) -> Result<(pid_t, Channel<(), bool>), UpgradeError> { trace!("parent({})", unsafe { libc::getpid() }); - let mut upgrade_file = - tempfile().with_context(|| "could not create temporary file for upgrade")?; + let mut upgrade_file = tempfile().map_err(UpgradeError::CreateUpgradeFile)?; - util::disable_close_on_exec(upgrade_file.as_raw_fd())?; + util::disable_close_on_exec(upgrade_file.as_raw_fd()).map_err(|util_err| { + UpgradeError::DisableCloexec { + fd_name: "upgrade-file".to_string(), + util_err, + } + })?; - serde_json::to_writer(&mut upgrade_file, &upgrade_data) - .with_context(|| "could not write upgrade data to temporary file")?; + info!("Writing upgrade data to file"); + let upgrade_data_string = + serde_json::to_string(&upgrade_data).map_err(UpgradeError::SerdeWriteError)?; upgrade_file - .rewind() - .with_context(|| "could not seek to beginning of file")?; + .write_all(upgrade_data_string.as_bytes()) + .map_err(UpgradeError::WriteFile)?; + upgrade_file.rewind().map_err(UpgradeError::Rewind)?; - let (old_to_new, new_to_old) = UnixStream::pair()?; + let (old_to_new, new_to_old) = UnixStream::pair().map_err(UpgradeError::CreateUnixStream)?; - util::disable_close_on_exec(new_to_old.as_raw_fd())?; + util::disable_close_on_exec(new_to_old.as_raw_fd()).map_err(|util_err| { + UpgradeError::DisableCloexec { + fd_name: "new-main-to-old-main-channel".to_string(), + util_err, + } + })?; let mut fork_confirmation_channel: Channel<(), bool> = Channel::new( old_to_new, @@ -90,24 +115,14 @@ pub fn fork_main_into_new_main( upgrade_data.config.max_command_buffer_size, ); - if let Err(e) = fork_confirmation_channel.blocking() { - error!( - "Could not block the fork confirmation channel: {}. This is not normal, you may need to restart sozu", - e - ); - } + fork_confirmation_channel + .blocking() + .map_err(UpgradeError::BlockChannel)?; info!("launching new main"); - match unsafe { fork().with_context(|| "fork failed")? } { + match unsafe { fork().map_err(UpgradeError::Fork)? } { ForkResult::Parent { child } => { - info!("main launched: {}", child); - - if let Err(e) = fork_confirmation_channel.nonblocking() { - error!( - "Could not unblock the fork confirmation channel: {}. This is not normal, you may need to restart sozu", - e - ); - } + info!("new main launched, with pid {}", child); Ok((child.into(), fork_confirmation_channel)) } @@ -138,7 +153,7 @@ pub fn begin_new_main_process( upgrade_file_fd: i32, command_buffer_size: usize, max_command_buffer_size: usize, -) -> anyhow::Result<()> { +) -> Result<(), UpgradeError> { let mut fork_confirmation_channel: Channel = Channel::new( unsafe { UnixStream::from_raw_fd(new_to_old_channel_fd) }, command_buffer_size, @@ -150,36 +165,43 @@ pub fn begin_new_main_process( error!("Could not block the fork confirmation channel: {}", e); } - let upgrade_file = unsafe { File::from_raw_fd(upgrade_file_fd) }; + println!("reading upgrade data from file"); + + let mut upgrade_file = unsafe { File::from_raw_fd(upgrade_file_fd) }; + let mut content = String::new(); + let _ = upgrade_file + .read_to_string(&mut content) + .map_err(UpgradeError::ReadFile)?; + let upgrade_data: UpgradeData = - serde_json::from_reader(upgrade_file).with_context(|| "could not parse upgrade data")?; + serde_json::from_str(&content).map_err(UpgradeError::SerdeReadError)?; + let config = upgrade_data.config.clone(); + println!("Setting up logging"); + setup_logging_with_config(&config, "MAIN"); - util::setup_metrics(&config).with_context(|| "Could not setup metrics")?; - //info!("new main got upgrade data: {:?}", upgrade_data); + util::setup_metrics(&config).map_err(UpgradeError::SetupMetrics)?; + + let mut command_hub = + CommandHub::from_upgrade_data(upgrade_data).map_err(UpgradeError::CreateHub)?; + + command_hub + .enable_cloexec_after_upgrade() + .map_err(UpgradeError::EnableCloexec)?; + + util::write_pid_file(&config).map_err(UpgradeError::WritePidFile)?; + + fork_confirmation_channel + .write_message(&true) + .map_err(|channel_err| UpgradeError::SendConfirmation { + result: "success".to_string(), + channel_err, + })?; - let mut server = CommandServer::from_upgrade_data(upgrade_data)?; - server.enable_cloexec_after_upgrade()?; info!("starting new main loop"); - match util::write_pid_file(&config) { - Ok(()) => { - fork_confirmation_channel - .write_message(&true) - .with_context(|| "Could not send confirmation of fork using the channel")?; - future::block_on(async { - server.run().await; - }); - info!("main process stopped"); - Ok(()) - } - Err(e) => { - fork_confirmation_channel - .write_message(&false) - .with_context(|| "Could not send fork failure message using the channel")?; - error!("Couldn't write PID file. Error: {:?}", e); - error!("Couldn't upgrade main process"); - bail!("begin_new_main_process() failed"); - } - } + command_hub.run(); + + info!("main process stopped"); + Ok(()) } diff --git a/bin/src/util.rs b/bin/src/util.rs index 1ddde32a7..5f1c09ab3 100644 --- a/bin/src/util.rs +++ b/bin/src/util.rs @@ -1,64 +1,185 @@ -use std::{fs::File, io::Write, os::unix::io::RawFd}; +use std::{ + ffi::OsString, + fs::{read_link, File}, + io::{Error as IoError, Write}, + net::{AddrParseError, SocketAddr}, + os::unix::io::RawFd, + path::PathBuf, +}; + +use nix::{ + errno::Errno, + fcntl::{fcntl, FcntlArg, FdFlag}, +}; +use thiserror; -use anyhow::Context; - -use nix::fcntl::{fcntl, FcntlArg, FdFlag}; - -use sozu::metrics; use sozu_command_lib::config::Config; +use sozu_lib::metrics::{self, MetricError}; + +use crate::cli; + +#[derive(thiserror::Error, Debug)] +pub enum UtilError { + #[error("could not get flags (F_GETFD) on file descriptor {0}: {1}")] + GetFlags(RawFd, Errno), + #[error("could not convert flags for file descriptor {0}")] + ConvertFlags(RawFd), + #[error("could not set flags for file descriptor {0}: {1}")] + SetFlags(RawFd, Errno), + #[error("could not create pid file {0}: {1}")] + CreatePidFile(String, IoError), + #[error("could not write pid file {0}: {1}")] + WritePidFile(String, IoError), + #[error("could not sync pid file {0}: {1}")] + SyncPidFile(String, IoError), + #[error("Failed to convert PathBuf {0} to String: {1:?}")] + OsString(PathBuf, OsString), + #[error("could not read file {0}: {1}")] + Read(String, IoError), + #[error("failed to retrieve current executable path: {0}")] + CurrentExe(IoError), + #[error("could not setup metrics: {0}")] + SetupMetrics(MetricError), + #[error( + "Configuration file hasn't been specified. Either use -c with the start command, + or use the SOZU_CONFIG environment variable when building sozu." + )] + GetConfigFilePath, + #[error("could not parse socket address: {0}")] + ParseSocketAddress(AddrParseError), +} -pub fn enable_close_on_exec(fd: RawFd) -> Result { +/// FD_CLOEXEC is set by default on every fd in Rust standard lib, +/// so we need to remove the flag on the client, otherwise +/// it won't be accessible +pub fn enable_close_on_exec(fd: RawFd) -> Result { let file_descriptor = - fcntl(fd, FcntlArg::F_GETFD).with_context(|| "could not get file descriptor flags")?; + fcntl(fd, FcntlArg::F_GETFD).map_err(|err_no| UtilError::GetFlags(fd, err_no))?; - let mut new_flags = FdFlag::from_bits(file_descriptor) - .ok_or_else(|| anyhow::format_err!("could not convert flags for file descriptor"))?; + let mut new_flags = FdFlag::from_bits(file_descriptor).ok_or(UtilError::ConvertFlags(fd))?; new_flags.insert(FdFlag::FD_CLOEXEC); - fcntl(fd, FcntlArg::F_SETFD(new_flags)).with_context(|| "could not set file descriptor flags") + fcntl(fd, FcntlArg::F_SETFD(new_flags)).map_err(|err_no| UtilError::SetFlags(fd, err_no)) } /// FD_CLOEXEC is set by default on every fd in Rust standard lib, /// so we need to remove the flag on the client, otherwise /// it won't be accessible -pub fn disable_close_on_exec(fd: RawFd) -> Result { +pub fn disable_close_on_exec(fd: RawFd) -> Result { let old_flags = - fcntl(fd, FcntlArg::F_GETFD).with_context(|| "could not get file descriptor flags")?; + fcntl(fd, FcntlArg::F_GETFD).map_err(|err_no| UtilError::GetFlags(fd, err_no))?; - let mut new_flags = FdFlag::from_bits(old_flags) - .ok_or_else(|| anyhow::format_err!("could not convert flags for file descriptor"))?; + let mut new_flags = FdFlag::from_bits(old_flags).ok_or(UtilError::ConvertFlags(fd))?; new_flags.remove(FdFlag::FD_CLOEXEC); - fcntl(fd, FcntlArg::F_SETFD(new_flags)).with_context(|| "could not set file descriptor flags") + fcntl(fd, FcntlArg::F_SETFD(new_flags)).map_err(|err_no| UtilError::SetFlags(fd, err_no)) } -pub fn setup_metrics(config: &Config) -> anyhow::Result<()> { +pub fn setup_metrics(config: &Config) -> Result<(), UtilError> { if let Some(metrics) = config.metrics.as_ref() { - return Ok(metrics::setup( + return metrics::setup( &metrics.address, "MAIN", metrics.tagged_metrics, metrics.prefix.clone(), - )?); + ) + .map_err(UtilError::SetupMetrics); } Ok(()) } -pub fn write_pid_file(config: &Config) -> Result<(), anyhow::Error> { +pub fn write_pid_file(config: &Config) -> Result<(), UtilError> { let pid_file_path: Option<&str> = config .pid_file_path .as_ref() .map(|pid_file_path| pid_file_path.as_ref()); - if let Some(pid_file_path) = pid_file_path { - let mut file = File::create(pid_file_path)?; + if let Some(path) = pid_file_path { + let mut file = File::create(path) + .map_err(|io_err| UtilError::CreatePidFile(path.to_owned(), io_err))?; let pid = unsafe { libc::getpid() }; - file.write_all(format!("{pid}").as_bytes())?; - file.sync_all()?; + file.write_all(format!("{pid}").as_bytes()) + .map_err(|write_err| UtilError::WritePidFile(path.to_owned(), write_err))?; + file.sync_all() + .map_err(|sync_err| UtilError::SyncPidFile(path.to_owned(), sync_err))?; } Ok(()) } + +pub fn get_config_file_path(args: &cli::Args) -> Result<&str, UtilError> { + match args.config.as_ref() { + Some(config_file) => Ok(config_file.as_str()), + None => option_env!("SOZU_CONFIG").ok_or(UtilError::GetConfigFilePath), + } +} + +pub fn parse_socket_address(address: &str) -> Result { + address + .parse::() + .map_err(UtilError::ParseSocketAddress) +} + +#[cfg(target_os = "freebsd")] +pub unsafe fn get_executable_path() -> Result { + let mut capacity = PATH_MAX as usize; + let mut path: Vec = Vec::with_capacity(capacity); + path.extend(repeat(0).take(capacity)); + + let mib: Vec = vec![CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1]; + let len = mib.len() * size_of::(); + let element_size = size_of::(); + + let res = sysctl( + mib.as_ptr(), + (len / element_size) as u32, + path.as_mut_ptr() as *mut c_void, + &mut capacity, + std::ptr::null() as *const c_void, + 0, + ); + if res != 0 { + panic!("Could not retrieve the path of the executable"); + } + + Ok(String::from_raw_parts( + path.as_mut_ptr(), + capacity - 1, + path.len(), + )) +} + +#[cfg(target_os = "linux")] +pub unsafe fn get_executable_path() -> Result { + let path = read_link("/proc/self/exe") + .map_err(|io_err| UtilError::Read("/proc/self/exe".to_string(), io_err))?; + + let mut path_str = path + .clone() + .into_os_string() + .into_string() + .map_err(|string_err| UtilError::OsString(path, string_err))?; + + if path_str.ends_with(" (deleted)") { + // The kernel appends " (deleted)" to the symlink when the original executable has been replaced + let len = path_str.len(); + path_str.truncate(len - 10) + } + + Ok(path_str) +} + +#[cfg(target_os = "macos")] +extern "C" { + pub fn _NSGetExecutablePath(buf: *mut c_char, size: *mut u32) -> i32; +} + +#[cfg(target_os = "macos")] +pub unsafe fn get_executable_path() -> Result { + let path = std::env::current_exe().map_err(|io_err| UtilError::CurrentExe(io_err))?; + + Ok(path.to_string_lossy().to_string()) +} diff --git a/bin/src/worker.rs b/bin/src/worker.rs index 2c3a57c27..a403c9897 100644 --- a/bin/src/worker.rs +++ b/bin/src/worker.rs @@ -1,207 +1,81 @@ +#[cfg(target_os = "freebsd")] +use std::{ffi::c_void, iter::repeat, mem::size_of}; use std::{ - collections::VecDeque, - fmt, fs::File, + io::Error as IoError, io::Seek, os::unix::io::{AsRawFd, FromRawFd, IntoRawFd}, os::unix::process::CommandExt, process::Command, }; -#[cfg(target_os = "freebsd")] -use std::{ffi::c_void, iter::repeat, mem::size_of}; -#[cfg(target_os = "linux")] -use anyhow::bail; -use anyhow::Context; -use futures::SinkExt; #[cfg(target_os = "macos")] use libc::c_char; use libc::{self, pid_t}; #[cfg(target_os = "freebsd")] use libc::{sysctl, CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, PATH_MAX}; use mio::net::UnixStream; -use nix::{self, unistd::*}; -use nix::{sys::signal::kill, unistd::Pid}; - +use nix::{ + self, + errno::Errno, + unistd::{fork, ForkResult}, +}; use tempfile::tempfile; -use sozu::{metrics, server::Server}; use sozu_command_lib::{ - channel::Channel, + channel::{Channel, ChannelError}, config::Config, logging::setup_logging_with_config, - proto::command::{request::RequestType, Request, RunState, Status, WorkerInfo}, ready::Ready, - request::{read_requests_from_file, WorkerRequest}, + request::{read_requests_from_file, RequestError, WorkerRequest}, response::WorkerResponse, - scm_socket::{Listeners, ScmSocket}, - state::ConfigState, + scm_socket::{Listeners, ScmSocket, ScmSocketError}, + state::{ConfigState, StateError}, +}; +use sozu_lib::{ + metrics::{self, MetricError}, + server::{Server, ServerError as LibServerError}, }; -use crate::util; - -/// An instance of Sōzu, as seen from the main process -pub struct Worker { - pub id: u32, - /// for the worker to receive requests and respond to the main process - pub worker_channel: Option>, - /// file descriptor of the command channel - pub worker_channel_fd: i32, - pub pid: pid_t, - pub run_state: RunState, - pub queue: VecDeque, - /// Used to send and receive listeners (socket addresses and file descriptors) - pub scm_socket: ScmSocket, - /// Used to send proxyrequests to the worker loop - pub sender: Option>, -} - -impl Worker { - pub fn new( - id: u32, - pid: pid_t, - command_channel: Channel, - scm_socket: ScmSocket, - _: &Config, - ) -> Worker { - Worker { - id, - worker_channel_fd: command_channel.sock.as_raw_fd(), - worker_channel: Some(command_channel), - sender: None, - pid, - run_state: RunState::Running, - queue: VecDeque::new(), - scm_socket, - } - } - - /// send proxy request to the worker, via the mpsc sender - pub async fn send(&mut self, order_id: String, content: Request) { - if let Some(worker_tx) = self.sender.as_mut() { - if let Err(e) = worker_tx - .send(WorkerRequest { - id: order_id.clone(), - content, - }) - .await - { - error!( - "error sending message {} to worker {:?}: {:?}", - order_id, self.id, e - ); - } - } - } - - /// send a kill -0 to check on the pid, if it's dead it should be an error - pub fn the_pid_is_alive(&self) -> bool { - kill(Pid::from_raw(self.pid), None).is_ok() - } - - /// get info about a worker, with a NotAnswering run state by default, - /// to be updated when the worker responds - pub fn querying_info(&self) -> WorkerInfo { - let run_state = match self.run_state { - RunState::Stopping => RunState::Stopping, - RunState::Stopped => RunState::Stopped, - RunState::Running | RunState::NotAnswering => RunState::NotAnswering, - }; - WorkerInfo { - id: self.id, - pid: self.pid, - run_state: run_state as i32, - } - } - - pub fn is_active(&self) -> bool { - self.run_state != RunState::Stopping && self.run_state != RunState::Stopped - } - - /* - pub fn push_message(&mut self, message: ProxyRequest) { - self.queue.push_back(message); - self.channel.interest.insert(Ready::WRITABLE); - } - - pub fn can_handle_events(&self) -> bool { - self.channel.readiness().is_readable() || (!self.queue.is_empty() && self.channel.readiness().is_writable()) - }*/ -} - -impl fmt::Debug for Worker { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "Worker {{ id: {}, run_state: {:?} }}", - self.id, self.run_state - ) - } -} - -/// Called once at the beginning of the main process, this forks main into as many workers -pub fn start_workers(executable_path: String, config: &Config) -> anyhow::Result> { - let state = ConfigState::new(); - let mut workers = Vec::new(); - for index in 0..config.worker_count { - let listeners = Some(Listeners { - http: Vec::new(), - tls: Vec::new(), - tcp: Vec::new(), - }); - - let (pid, command_channel, scm_socket) = fork_main_into_worker( - &index.to_string(), - config, - executable_path.clone(), - &state, - listeners, - )?; - let mut worker = Worker::new(index as u32, pid, command_channel, scm_socket, config); - - // the new worker expects a status message at startup - if let Some(worker_channel) = worker.worker_channel.as_mut() { - if let Err(e) = worker_channel.blocking() { - error!("Could not block the worker channel: {}", e); - } - - worker_channel - .write_message(&WorkerRequest { - id: format!("start-status-{index}"), - content: RequestType::Status(Status {}).into(), - }) - .with_context(|| "Could not send status request to the worker")?; - - if let Err(e) = worker_channel.nonblocking() { - error!("Could not unblock the worker channel: {}", e); - } - } - - workers.push(worker); - } - info!("Created workers"); - Ok(workers) -} - -/// called by the CommandServer to start an individual worker -/// returns a handle of the worker, with channels to write to it -pub fn start_worker( - id: u32, - config: &Config, - executable_path: String, - state: &ConfigState, - listeners: Option, -) -> anyhow::Result { - let (worker_pid, main_to_worker_channel, main_to_worker_scm) = - fork_main_into_worker(&id.to_string(), config, executable_path, state, listeners)?; - - Ok(Worker::new( - id, - worker_pid, - main_to_worker_channel, - main_to_worker_scm, - config, - )) +use crate::util::{self, UtilError}; + +#[derive(thiserror::Error, Debug)] +pub enum WorkerError { + #[error("could not read on the channel")] + ReadChannel(ChannelError), + #[error("could not parse configuration from temporary file: {0}")] + ReadRequestsFromFile(RequestError), + #[error("could not setup metrics on new worker: {0}")] + SetupMetrics(MetricError), + #[error("could not create new worker from config: {0}")] + NewServerFromConfig(LibServerError), + #[error("could not create {kind} scm socket: {scm_err}")] + CreateScmSocket { + kind: String, + scm_err: ScmSocketError, + }, + #[error("could not create temporary file to pass the state to the new worker: {0}")] + CreateStateFile(IoError), + #[error("could not disable cloexec on {fd_name}'s file descriptor: {util_err}")] + DisableCloexec { + fd_name: String, + util_err: UtilError, + }, + #[error("could not write state to temporary file: {0}")] + WriteStateFile(StateError), + #[error("could not rewind the temporary state file: {0}")] + Rewind(IoError), + #[error("could not create MIO pair of unix stream: {0}")] + CreateUnixStream(IoError), + #[error("could not send config to the new worker: {0}")] + SendConfig(ChannelError), + #[error("unix fork failed: {0}")] + Fork(Errno), + #[error("Could not set the worker-to-main channel to {state}: {channel_err}")] + SetChannel { + state: String, + channel_err: ChannelError, + }, } /// called within a worker process, this starts the actual proxy @@ -212,22 +86,25 @@ pub fn begin_worker_process( id: i32, command_buffer_size: usize, max_command_buffer_size: usize, -) -> Result<(), anyhow::Error> { +) -> Result<(), WorkerError> { let mut worker_to_main_channel: Channel = Channel::new( unsafe { UnixStream::from_raw_fd(worker_to_main_channel_fd) }, command_buffer_size, max_command_buffer_size, ); - if let Err(e) = worker_to_main_channel.blocking() { - error!("Could not block the worker-to-main channel: {}", e); - } + worker_to_main_channel + .blocking() + .map_err(|channel_err| WorkerError::SetChannel { + state: "blocking".to_string(), + channel_err, + })?; let mut configuration_state_file = unsafe { File::from_raw_fd(configuration_state_fd) }; let worker_config = worker_to_main_channel .read_message() - .with_context(|| "worker could not read configuration from socket")?; + .map_err(WorkerError::ReadChannel)?; let worker_id = format!("{}-{:02}", "WRK", id); @@ -241,11 +118,14 @@ pub fn begin_worker_process( ); info!("worker {} starting...", id); let initial_state = read_requests_from_file(&mut configuration_state_file) - .with_context(|| "could not parse configuration state data")?; + .map_err(WorkerError::ReadRequestsFromFile)?; - if let Err(e) = worker_to_main_channel.nonblocking() { - error!("Could not unblock the worker-to-main channel: {}", e); - } + worker_to_main_channel + .nonblocking() + .map_err(|channel_err| WorkerError::SetChannel { + state: "nonblocking".to_string(), + channel_err, + })?; let mut worker_to_main_channel: Channel = worker_to_main_channel.into(); @@ -258,10 +138,14 @@ pub fn begin_worker_process( metrics.tagged_metrics, metrics.prefix.clone(), ) - .with_context(|| "Could not setup metrics")?; + .map_err(WorkerError::SetupMetrics)?; } - let worker_to_main_scm_socket = ScmSocket::new(worker_to_main_scm_fd) - .with_context(|| "could not create worker-to-main scm socket")?; + + let worker_to_main_scm_socket = + ScmSocket::new(worker_to_main_scm_fd).map_err(|scm_err| WorkerError::CreateScmSocket { + kind: "worker-to-main".to_string(), + scm_err, + })?; let mut server = Server::try_new_from_config( worker_to_main_channel, @@ -270,7 +154,7 @@ pub fn begin_worker_process( initial_state, true, ) - .with_context(|| "Could not create server from config")?; + .map_err(WorkerError::NewServerFromConfig)?; info!("starting event loop"); server.run(); @@ -280,7 +164,7 @@ pub fn begin_worker_process( /// unix-forks the main process /// -/// - Parent: sends config and listeners to the new worker +/// - Parent: sends config, state and listeners to the new worker /// - Child: calls the sozu executable path like so: `sozu worker --id [...]` /// /// returns the child process pid, and channels to talk to it. @@ -290,29 +174,48 @@ pub fn fork_main_into_worker( executable_path: String, state: &ConfigState, listeners: Option, -) -> anyhow::Result<(pid_t, Channel, ScmSocket)> { +) -> Result<(pid_t, Channel, ScmSocket), WorkerError> { trace!("parent({})", unsafe { libc::getpid() }); - let mut state_file = - tempfile().with_context(|| "could not create temporary file for configuration state")?; - util::disable_close_on_exec(state_file.as_raw_fd())?; + let mut state_file = tempfile().map_err(WorkerError::CreateStateFile)?; + util::disable_close_on_exec(state_file.as_raw_fd()).map_err(|util_err| { + WorkerError::DisableCloexec { + fd_name: "state_file".to_string(), + util_err, + } + })?; state .write_requests_to_file(&mut state_file) - .with_context(|| "Could not write state to file")?; + .map_err(WorkerError::WriteStateFile)?; - state_file - .rewind() - .with_context(|| "could not seek to beginning of file")?; + state_file.rewind().map_err(WorkerError::Rewind)?; - let (main_to_worker, worker_to_main) = UnixStream::pair()?; - let (main_to_worker_scm, worker_to_main_scm) = UnixStream::pair()?; + let (main_to_worker, worker_to_main) = + UnixStream::pair().map_err(WorkerError::CreateUnixStream)?; + let (main_to_worker_scm, worker_to_main_scm) = + UnixStream::pair().map_err(WorkerError::CreateUnixStream)?; - let main_to_worker_scm = ScmSocket::new(main_to_worker_scm.into_raw_fd()) - .with_context(|| "Could not create main-to-worker scm socket")?; + let main_to_worker_scm = + ScmSocket::new(main_to_worker_scm.into_raw_fd()).map_err(|scm_err| { + WorkerError::CreateScmSocket { + kind: "main-to-worker".to_string(), + scm_err, + } + })?; - util::disable_close_on_exec(worker_to_main.as_raw_fd())?; - util::disable_close_on_exec(worker_to_main_scm.as_raw_fd())?; + util::disable_close_on_exec(worker_to_main.as_raw_fd()).map_err(|util_err| { + WorkerError::DisableCloexec { + fd_name: "worker-to-main".to_string(), + util_err, + } + })?; + util::disable_close_on_exec(worker_to_main_scm.as_raw_fd()).map_err(|util_err| { + WorkerError::DisableCloexec { + fd_name: "worker-to-main-scm".to_string(), + util_err, + } + })?; let mut main_to_worker_channel: Channel = Channel::new( main_to_worker, @@ -325,18 +228,22 @@ pub fn fork_main_into_worker( error!("Could not block the main-to-worker channel: {}", e); } - info!("{} launching worker", worker_id); + info!("launching worker {}", worker_id); debug!("executable path is {}", executable_path); - match unsafe { fork() } { - Ok(ForkResult::Parent { child: worker_pid }) => { - info!("{} worker launched: {}", worker_id, worker_pid); + + match unsafe { fork().map_err(WorkerError::Fork)? } { + ForkResult::Parent { child: worker_pid } => { + info!("launching worker {} with pid {}", worker_id, worker_pid); main_to_worker_channel .write_message(config) - .with_context(|| "Could not send config to the new worker using the channel")?; + .map_err(WorkerError::SendConfig)?; - if let Err(e) = main_to_worker_channel.nonblocking() { - error!("Could not unblock the main-to-worker channel: {}", e); - } + main_to_worker_channel + .nonblocking() + .map_err(|channel_err| WorkerError::SetChannel { + state: "nonblocking".to_string(), + channel_err, + })?; if let Some(listeners) = listeners { info!("sending listeners to new worker: {:?}", listeners); @@ -344,7 +251,13 @@ pub fn fork_main_into_worker( info!("sent listeners from main: {:?}", result); listeners.close(); }; - util::disable_close_on_exec(main_to_worker_scm.fd)?; + + util::disable_close_on_exec(main_to_worker_scm.fd).map_err(|util_err| { + WorkerError::DisableCloexec { + fd_name: "main-to-worker-main-scm".to_string(), + util_err, + } + })?; Ok(( worker_pid.into(), @@ -352,7 +265,7 @@ pub fn fork_main_into_worker( main_to_worker_scm, )) } - Ok(ForkResult::Child) => { + ForkResult::Child => { trace!("child({}):\twill spawn a child", unsafe { libc::getpid() }); Command::new(executable_path) .arg("worker") @@ -372,70 +285,5 @@ pub fn fork_main_into_worker( unreachable!(); } - Err(e) => { - error!("Error during fork(): {}", e); - Err(anyhow::Error::from(e)) - } } } - -#[cfg(target_os = "linux")] -pub unsafe fn get_executable_path() -> anyhow::Result { - use std::fs; - - let path = fs::read_link("/proc/self/exe").with_context(|| "/proc/self/exe doesn't exist")?; - - let mut path_str = match path.into_os_string().into_string() { - Ok(s) => s, - Err(_) => bail!("Failed to convert PathBuf to String"), - }; - - if path_str.ends_with(" (deleted)") { - // The kernel appends " (deleted)" to the symlink when the original executable has been replaced - let len = path_str.len(); - path_str.truncate(len - 10) - } - - Ok(path_str) -} - -#[cfg(target_os = "macos")] -extern "C" { - pub fn _NSGetExecutablePath(buf: *mut c_char, size: *mut u32) -> i32; -} - -#[cfg(target_os = "macos")] -pub unsafe fn get_executable_path() -> anyhow::Result { - let path = - std::env::current_exe().with_context(|| "failed to retrieve current executable path")?; - Ok(path.to_string_lossy().to_string()) -} - -#[cfg(target_os = "freebsd")] -pub unsafe fn get_executable_path() -> anyhow::Result { - let mut capacity = PATH_MAX as usize; - let mut path: Vec = Vec::with_capacity(capacity); - path.extend(repeat(0).take(capacity)); - - let mib: Vec = vec![CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1]; - let len = mib.len() * size_of::(); - let element_size = size_of::(); - - let res = sysctl( - mib.as_ptr(), - (len / element_size) as u32, - path.as_mut_ptr() as *mut c_void, - &mut capacity, - std::ptr::null() as *const c_void, - 0, - ); - if res != 0 { - panic!("Could not retrieve the path of the executable"); - } - - Ok(String::from_raw_parts( - path.as_mut_ptr(), - capacity - 1, - path.len(), - )) -} diff --git a/command/src/certificate.rs b/command/src/certificate.rs index cc27f7808..c7697a5b4 100644 --- a/command/src/certificate.rs +++ b/command/src/certificate.rs @@ -214,8 +214,7 @@ pub fn get_fingerprint_from_certificate_path( } pub fn decode_fingerprint(fingerprint: &str) -> Result { - let bytes = - hex::decode(fingerprint).map_err(|hex_error| CertificateError::DecodeError(hex_error))?; + let bytes = hex::decode(fingerprint).map_err(CertificateError::DecodeError)?; Ok(Fingerprint(bytes)) } diff --git a/command/src/channel.rs b/command/src/channel.rs index b85788041..d33c58b73 100644 --- a/command/src/channel.rs +++ b/command/src/channel.rs @@ -52,7 +52,7 @@ pub enum ChannelError { /// Used in pairs to communicate, in a blocking or non-blocking way. pub struct Channel { pub sock: MioUnixStream, - front_buf: Buffer, + pub front_buf: Buffer, pub back_buf: Buffer, max_buffer_size: usize, pub readiness: Ready, @@ -62,6 +62,24 @@ pub struct Channel { phantom_rx: PhantomData, } +impl std::fmt::Debug for Channel { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct(&format!( + "Channel<{}, {}>", + std::any::type_name::(), + std::any::type_name::() + )) + .field("sock", &self.sock.as_raw_fd()) + // .field("front_buf", &self.front_buf) + // .field("back_buf", &self.back_buf) + // .field("max_buffer_size", &self.max_buffer_size) + .field("readiness", &self.readiness) + .field("interest", &self.interest) + .field("blocking", &self.blocking) + .finish() + } +} + impl Channel { /// Creates a nonblocking channel on a given socket path pub fn from_path( @@ -187,6 +205,7 @@ impl Channel { let mut count = 0usize; loop { let size = self.front_buf.available_space(); + trace!("channel available space: {}", size); if size == 0 { self.interest.remove(Ready::READABLE); break; @@ -284,6 +303,7 @@ impl Channel { if self.front_buf.capacity() == self.max_buffer_size { error!("command buffer full, cannot grow more, ignoring"); } else { + println!("growing channel"); let new_size = min(self.front_buf.capacity() + 5000, self.max_buffer_size); self.front_buf.grow(new_size); } diff --git a/command/src/command.proto b/command/src/command.proto index dcdc048a7..391f67449 100644 --- a/command/src/command.proto +++ b/command/src/command.proto @@ -19,7 +19,10 @@ message Request { FrontendFilters list_frontends = 5; // list all listeners ListListeners list_listeners = 6; - // launch a new worker, giving its tag + // launch a new worker + // never implemented, the tag is unused and probably not needed + // we may still implement it later with no paramater + // the main process will automatically assign a new id to a new worker string launch_worker = 7; // upgrade the main process UpgradeMain upgrade_main = 8; diff --git a/command/src/logging.rs b/command/src/logging.rs index f6fbd5de2..0d6c3d5aa 100644 --- a/command/src/logging.rs +++ b/command/src/logging.rs @@ -21,6 +21,9 @@ thread_local! { pub static TAG: String = LOGGER.with(|logger| {logger.borrow().tag.clone()}); } +// TODO: check if this error is critical: +// could not register compat logger: SetLoggerError(()) +// The CompatLogger may need a variable that tells wether it has been initiated already pub static COMPAT_LOGGER: CompatLogger = CompatLogger; pub struct Logger { diff --git a/command/src/proto/display.rs b/command/src/proto/display.rs index 6220f3ff1..4bc8df774 100644 --- a/command/src/proto/display.rs +++ b/command/src/proto/display.rs @@ -58,50 +58,50 @@ pub fn concatenate_vector(vec: &[String]) -> String { vec.join(", ") } -pub fn format_request_type(request_type: &RequestType) -> String { +pub fn format_request_type(request_type: &RequestType) -> &str { match request_type { - RequestType::SaveState(_) => "SaveState".to_owned(), - RequestType::LoadState(_) => "LoadState".to_owned(), - RequestType::CountRequests(_) => "CountRequests".to_owned(), - RequestType::ListWorkers(_) => "ListWorkers".to_owned(), - RequestType::ListFrontends(_) => "ListFrontends".to_owned(), - RequestType::ListListeners(_) => "ListListeners".to_owned(), - RequestType::LaunchWorker(_) => "LaunchWorker".to_owned(), - RequestType::UpgradeMain(_) => "UpgradeMain".to_owned(), - RequestType::UpgradeWorker(_) => "UpgradeWorker".to_owned(), - RequestType::SubscribeEvents(_) => "SubscribeEvents".to_owned(), - RequestType::ReloadConfiguration(_) => "ReloadConfiguration".to_owned(), - RequestType::Status(_) => "Status".to_owned(), - RequestType::AddCluster(_) => "AddCluster".to_owned(), - RequestType::RemoveCluster(_) => "RemoveCluster".to_owned(), - RequestType::AddHttpFrontend(_) => "AddHttpFrontend".to_owned(), - RequestType::RemoveHttpFrontend(_) => "RemoveHttpFrontend".to_owned(), - RequestType::AddHttpsFrontend(_) => "AddHttpsFrontend".to_owned(), - RequestType::RemoveHttpsFrontend(_) => "RemoveHttpsFrontend".to_owned(), - RequestType::AddCertificate(_) => "AddCertificate".to_owned(), - RequestType::ReplaceCertificate(_) => "ReplaceCertificate".to_owned(), - RequestType::RemoveCertificate(_) => "RemoveCertificate".to_owned(), - RequestType::AddTcpFrontend(_) => "AddTcpFrontend".to_owned(), - RequestType::RemoveTcpFrontend(_) => "RemoveTcpFrontend".to_owned(), - RequestType::AddBackend(_) => "AddBackend".to_owned(), - RequestType::RemoveBackend(_) => "RemoveBackend".to_owned(), - RequestType::AddHttpListener(_) => "AddHttpListener".to_owned(), - RequestType::AddHttpsListener(_) => "AddHttpsListener".to_owned(), - RequestType::AddTcpListener(_) => "AddTcpListener".to_owned(), - RequestType::RemoveListener(_) => "RemoveListener".to_owned(), - RequestType::ActivateListener(_) => "ActivateListener".to_owned(), - RequestType::DeactivateListener(_) => "DeactivateListener".to_owned(), - RequestType::QueryClusterById(_) => "QueryClusterById".to_owned(), - RequestType::QueryClustersByDomain(_) => "QueryClustersByDomain".to_owned(), - RequestType::QueryClustersHashes(_) => "QueryClustersHashes".to_owned(), - RequestType::QueryMetrics(_) => "QueryMetrics".to_owned(), - RequestType::SoftStop(_) => "SoftStop".to_owned(), - RequestType::HardStop(_) => "HardStop".to_owned(), - RequestType::ConfigureMetrics(_) => "ConfigureMetrics".to_owned(), - RequestType::Logging(_) => "Logging".to_owned(), - RequestType::ReturnListenSockets(_) => "ReturnListenSockets".to_owned(), - RequestType::QueryCertificatesFromTheState(_) => "QueryCertificatesFromTheState".to_owned(), - RequestType::QueryCertificatesFromWorkers(_) => "QueryCertificatesFromWorkers".to_owned(), + RequestType::SaveState(_) => "SaveState", + RequestType::LoadState(_) => "LoadState", + RequestType::CountRequests(_) => "CountRequests", + RequestType::ListWorkers(_) => "ListWorkers", + RequestType::ListFrontends(_) => "ListFrontends", + RequestType::ListListeners(_) => "ListListeners", + RequestType::LaunchWorker(_) => "LaunchWorker", + RequestType::UpgradeMain(_) => "UpgradeMain", + RequestType::UpgradeWorker(_) => "UpgradeWorker", + RequestType::SubscribeEvents(_) => "SubscribeEvents", + RequestType::ReloadConfiguration(_) => "ReloadConfiguration", + RequestType::Status(_) => "Status", + RequestType::AddCluster(_) => "AddCluster", + RequestType::RemoveCluster(_) => "RemoveCluster", + RequestType::AddHttpFrontend(_) => "AddHttpFrontend", + RequestType::RemoveHttpFrontend(_) => "RemoveHttpFrontend", + RequestType::AddHttpsFrontend(_) => "AddHttpsFrontend", + RequestType::RemoveHttpsFrontend(_) => "RemoveHttpsFrontend", + RequestType::AddCertificate(_) => "AddCertificate", + RequestType::ReplaceCertificate(_) => "ReplaceCertificate", + RequestType::RemoveCertificate(_) => "RemoveCertificate", + RequestType::AddTcpFrontend(_) => "AddTcpFrontend", + RequestType::RemoveTcpFrontend(_) => "RemoveTcpFrontend", + RequestType::AddBackend(_) => "AddBackend", + RequestType::RemoveBackend(_) => "RemoveBackend", + RequestType::AddHttpListener(_) => "AddHttpListener", + RequestType::AddHttpsListener(_) => "AddHttpsListener", + RequestType::AddTcpListener(_) => "AddTcpListener", + RequestType::RemoveListener(_) => "RemoveListener", + RequestType::ActivateListener(_) => "ActivateListener", + RequestType::DeactivateListener(_) => "DeactivateListener", + RequestType::QueryClusterById(_) => "QueryClusterById", + RequestType::QueryClustersByDomain(_) => "QueryClustersByDomain", + RequestType::QueryClustersHashes(_) => "QueryClustersHashes", + RequestType::QueryMetrics(_) => "QueryMetrics", + RequestType::SoftStop(_) => "SoftStop", + RequestType::HardStop(_) => "HardStop", + RequestType::ConfigureMetrics(_) => "ConfigureMetrics", + RequestType::Logging(_) => "Logging", + RequestType::ReturnListenSockets(_) => "ReturnListenSockets", + RequestType::QueryCertificatesFromTheState(_) => "QueryCertificatesFromTheState", + RequestType::QueryCertificatesFromWorkers(_) => "QueryCertificatesFromWorkers", } } @@ -128,12 +128,17 @@ impl Response { } } - let content = match &self.content { - Some(content) => content, - None => return Ok(println!("No content")), - }; - - content.display(json) + match &self.content { + Some(content) => content.display(json), + None => { + if json { + println!("{{}}"); + } else { + println!("No content"); + } + Ok(()) + } + } } } @@ -153,9 +158,9 @@ impl ResponseContent { ContentType::Metrics(aggr_metrics) => print_metrics(aggr_metrics), ContentType::FrontendList(frontends) => print_frontends(frontends), ContentType::ListenersList(listeners) => print_listeners(listeners), - ContentType::WorkerMetrics(worker_metrics) => print_worker_metrics(&worker_metrics), - ContentType::AvailableMetrics(list) => print_available_metrics(&list), - ContentType::RequestCounts(request_counts) => print_request_counts(&request_counts), + ContentType::WorkerMetrics(worker_metrics) => print_worker_metrics(worker_metrics), + ContentType::AvailableMetrics(list) => print_available_metrics(list), + ContentType::RequestCounts(request_counts) => print_request_counts(request_counts), ContentType::CertificatesWithFingerprints(certs) => { print_certificates_with_validity(certs) } @@ -205,12 +210,15 @@ pub fn print_status(worker_infos: &WorkerInfos) -> Result<(), DisplayError> { table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); table.add_row(row!["worker id", "pid", "run state"]); - for worker_info in &worker_infos.vec { + let mut sorted_infos = worker_infos.vec.clone(); + sorted_infos.sort_by_key(|worker| worker.id); + + for worker_info in &sorted_infos { let row = row!( worker_info.id, worker_info.pid, RunState::try_from(worker_info.run_state) - .map_err(|e| DisplayError::DecodeError(e))? + .map_err(DisplayError::DecodeError)? .as_str_name() ); table.add_row(row); diff --git a/command/src/request.rs b/command/src/request.rs index c8350b23b..459cc1abc 100644 --- a/command/src/request.rs +++ b/command/src/request.rs @@ -12,9 +12,12 @@ use nom::{HexDisplay, Offset}; use crate::{ buffer::fixed::Buffer, parser::parse_several_requests, - proto::command::{ - request::RequestType, LoadBalancingAlgorithms, PathRuleKind, Request, RequestHttpFrontend, - RulePosition, + proto::{ + command::{ + request::RequestType, LoadBalancingAlgorithms, PathRuleKind, Request, + RequestHttpFrontend, RulePosition, + }, + display::format_request_type, }, response::{HttpFrontend, MessageId}, }; @@ -112,6 +115,13 @@ impl Request { Some(RequestType::SoftStop(_)) | Some(RequestType::HardStop(_)) ) } + + pub fn short_name(&self) -> &str { + match &self.request_type { + Some(request_type) => format_request_type(request_type), + None => "Unallowed", + } + } } /// This is sent only from Sōzu to Sōzu @@ -139,14 +149,12 @@ pub fn read_requests_from_file(file: &mut File) -> Result, Re loop { let previous = buffer.available_data(); - let bytes_read = file - .read(buffer.space()) - .map_err(|e| RequestError::FileError(e))?; + let bytes_read = file.read(buffer.space()).map_err(RequestError::FileError)?; buffer.fill(bytes_read); if buffer.available_data() == 0 { - debug!("Empty buffer"); + trace!("read_requests_from_file: empty buffer"); break; } @@ -154,7 +162,7 @@ pub fn read_requests_from_file(file: &mut File) -> Result, Re match parse_several_requests::(buffer.data()) { Ok((i, requests)) => { if !i.is_empty() { - debug!("could not parse {} bytes", i.len()); + trace!("read_requests_from_file: could not parse {} bytes", i.len()); if previous == buffer.available_data() { break; } @@ -166,7 +174,7 @@ pub fn read_requests_from_file(file: &mut File) -> Result, Re Err(nom::Err::Incomplete(_)) => { if buffer.available_data() == buffer.capacity() { error!( - "message too big, stopping parsing:\n{}", + "read_requests_from_file: message too big, stopping parsing:\n{}", buffer.data().to_hex(16) ); break; diff --git a/command/src/scm_socket.rs b/command/src/scm_socket.rs index ec91bce8c..1ea74135c 100644 --- a/command/src/scm_socket.rs +++ b/command/src/scm_socket.rs @@ -209,7 +209,7 @@ impl ScmSocket { } /// Socket addresses and file descriptors needed by a Proxy to start listening -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq)] pub struct Listeners { pub http: Vec<(SocketAddr, RawFd)>, pub tls: Vec<(SocketAddr, RawFd)>, @@ -305,11 +305,7 @@ mod tests { let receiving_scm_socket = ScmSocket::new(stream_2.as_raw_fd()).expect("Could not create scm socket"); - let listeners = Listeners { - http: vec![], - tcp: vec![], - tls: vec![], - }; + let listeners = Listeners::default(); sending_scm_socket .send_listeners(&listeners) diff --git a/command/src/state.rs b/command/src/state.rs index 2df12a2e5..9532f34b2 100644 --- a/command/src/state.rs +++ b/command/src/state.rs @@ -155,7 +155,7 @@ impl ConfigState { if let Some(request_type) = &request.request_type { let count = self .request_counts - .entry(format_request_type(request_type)) + .entry(format_request_type(request_type).to_owned()) .or_insert(1); *count += 1; } @@ -1175,9 +1175,7 @@ impl ConfigState { /// Types like `HttpFrontend` are converted into protobuf ones, like `RequestHttpFrontend` pub fn cluster_state(&self, cluster_id: &str) -> Option { let configuration = self.clusters.get(cluster_id).cloned(); - if configuration.is_none() { - return None; - } + configuration.as_ref()?; let http_frontends: Vec = self .http_fronts @@ -1402,7 +1400,7 @@ impl ConfigState { .map_err(StateError::FileError)?; if counter % 1000 == 0 { - info!("writing command {}", counter); + info!("writing {} commands to file", counter); file.sync_all().map_err(StateError::FileError)?; } counter += 1; diff --git a/doc/managing_workers.md b/doc/managing_workers.md deleted file mode 100644 index 680403036..000000000 --- a/doc/managing_workers.md +++ /dev/null @@ -1,159 +0,0 @@ -# How are Sōzu's workers managed? - -Sōzu's main process starts and manages _workers_, which are subinstances of itself. -This core feature makes Sōzu pretty efficient, but raises the question of managing state across a whole cluster of processes. - -How do we solve this challenge? Unix sockets and channels. - -## Architecture - -`sozu` command line sends commands on a unix socket. -In the `command::start_server()` function of the main process, -there is a thread running in the background where a unix listener accepts new -connection and spawns client loops. - -The client loops parse client requests and forward them to the Command Server -through mpsc channels. **mpsc** = Multiple Producer, Single Consumer. -The sending end of the channel, called `command_tx`, is cloned and used many times over, -but the messages are all received by a single `command_rx` in the Command Server main loop. - -``` - UNIX UNIX - SOCKET SOCKET - | ^ | ^ - | | | | - +----v-+-----+ +----v-+-----+ - | client | | client | as many more - | loop | | loop | clients as we - +-+-------^--+ +--+-----^---+ want - | | | | - | | mpsc channels | | - | | | | -+----v-------+---------------v-----+------+ -| | -| Command | -| Server | -| | -+----+-------^---------------+-----^------+ - | | | | - | | mpsc channels | | - | | | | - +-v-------+--+ +--v-----+---+ - | worker | | worker | as many more - | loop | | loop | workers as we - +----+-^-----+ +----+-^-----+ want - | | | | - v | v | - UNIX UNIX - SOCKET SOCKET -``` - -As you can guess from the drawing, the exact same logic applies when workers send messages -to the CommandServer. - -The Command Server is able to send messages to clients and to workers by -keeping track of the sending ends of their mpsc channels, `client_tx` and `worker_tx`. - -In turn, clients and workers listen on their own receivers, `client_rx` and `worker_rx`, and -write everything onto their respective unix streams, to notify clients and workers. - -# Asynchronous handling of commands - -It is impossible to manage commands synchronously. -Some tasks are fast enough (for example, dumping the state), some are way too long. -For instance, loading a new state implies to: - -- parse a state file to derive instructions from it -- send ALL instructions to ALL workers -- wait for ALL workers to reply - -Blocking the main thread is unthinkable. Therefore, Sōzu detaches threads by doing, for instance: - -```rust -smol::spawn( - client_loop(id, unix_stream, command_tx, client_rx) -).detach(); -``` - -This make the client loop run in the background. -Using similar syntax, we can wait for worker responses in the background. -But how can we bring data back from those threads? => **more channels**. - -# The flow of requests, responses, and detached threads. - -What the Command Server does to perform a task: - -``` - +-------------+ - | | - | client | - | loop | - | | - +----+--------+ - | MAIN THREAD DETACHED THREAD - +----------+-------------------------------------------+ +------------------+ - | | | | | - | | create mpsc channel | | | - +--------------+ | | +------------------+ | | | - | | | v v v | | Listen on | -+--+ worker loop |<--+--+------ REQUEST SENDER RECEIVER | | the receiver | -| | | | | | | | | | | -| +--------------+ | | | | | | | | -| | | v | | | | Wait for all | -| +--------------+ | | id | | | | responses | -| | | | | | | +-----------+--->| | -+--+ worker loop |<--+ | | | | | | -| | | | | | | | | apply logic | -| +--------------+ | | v v | | | -| | | in_flight hash map use RESPONSES | | | -| +--------------+ | | +-----------------+ sender +-------------+--->| | -| | | | | | +------------>| | | | -+--+ worker loop |<--+ | | -request ids | | | | | -| | | | | | retrieve | | | Send final | -| +--------------+ | | -senders | sender | | | result to the | -| | | |<-----+ | | | main thread | -| | +-----------------+ | | | | (not shown) | -| | id | | | | -| | ^ | | | | -| | | | | | | -+------------------------+->RESPONSES----------------------+------+ | | | - | | | | - | | | | - +------------------------------------------------------+ +------------------+ -``` - -## What the main thread does to client requests - -- **Receive a client request** through the client loop, and if this request necessitates to talk to the workers, -- **send requests to the workers** through the worker loop. This goes fast. -- **create an mpsc** task channel with two ends, the _sender_ and the _receiver_. -- in a hash map called `in_flight`, keep track of: - - the `request_id` - - the _sender_ -- Give the _receiver_ to a **detached thread** - -## What the main thread does to worker responses - -- **Receives worker responses** through the worker loop -- **Looks at the `response_id`**, which is the same as the `request_id` seen above -- searches the `in_flight` hash map to **retrieve the associated _sender_** -- uses the _sender_ to **send the response into the detached thread** - -## What the detached thread does - -- **waits for worker responses** on the _receiver_ -- Completes the logic -- sends the final response **back to the command server** using `command_tx`, - _just like client loops and worker loops do_, because they are detached threads too. - -The Command Server just puts this final response into the client loop, and _voilà_! - -## To sum up - -Here is what is delegated into the background (all those boxes around the main thread): - -1. reading and writing from/onto the unix sockets -2. waiting for and processing worker responses - -The Command Server can be described as event-based because everything is returned -to the main loop using channels, in no precise order, asynchronously. diff --git a/e2e/src/sozu/worker.rs b/e2e/src/sozu/worker.rs index 9dd30c6ff..1527a5115 100644 --- a/e2e/src/sozu/worker.rs +++ b/e2e/src/sozu/worker.rs @@ -51,18 +51,6 @@ pub fn set_no_close_exec(fd: i32) { } impl Worker { - pub fn empty_file_config() -> FileConfig { - FileConfig::default() - } - - pub fn empty_listeners() -> Listeners { - Listeners { - http: Vec::new(), - tls: Vec::new(), - tcp: Vec::new(), - } - } - pub fn into_config(file_config: FileConfig) -> Config { ConfigBuilder::new(file_config, "") .into_config() @@ -70,8 +58,8 @@ impl Worker { } pub fn empty_config() -> (Config, Listeners, ConfigState) { - let listeners = Worker::empty_listeners(); - let config = Worker::empty_file_config(); + let listeners = Listeners::default(); + let config = FileConfig::default(); let config = Worker::into_config(config); let state = ConfigState::new(); (config, listeners, state) diff --git a/e2e/src/tests/tests.rs b/e2e/src/tests/tests.rs index 65ae1cbfa..27e35737b 100644 --- a/e2e/src/tests/tests.rs +++ b/e2e/src/tests/tests.rs @@ -12,6 +12,7 @@ use sozu_command_lib::{ request::RequestType, ActivateListener, AddCertificate, CertificateAndKey, ListenerType, RemoveBackend, RequestHttpFrontend, }, + scm_socket::Listeners, state::ConfigState, }; @@ -171,9 +172,9 @@ pub fn try_backend_stop(nb_requests: usize, zombie: Option) -> State { let config = Worker::into_config(FileConfig { zombie_check_interval: zombie, - ..Worker::empty_file_config() + ..FileConfig::default() }); - let listeners = Worker::empty_listeners(); + let listeners = Listeners::default(); let state = ConfigState::new(); let (mut worker, mut backends) = setup_async_test( "BACKSTOP", diff --git a/lib/src/http.rs b/lib/src/http.rs index 3e52ad579..71504367a 100644 --- a/lib/src/http.rs +++ b/lib/src/http.rs @@ -1065,11 +1065,7 @@ pub fn start_http_worker( let server_scm_socket = ScmSocket::new(scm_server.as_raw_fd()).with_context(|| "Could not create scm socket")?; - if let Err(e) = client_scm_socket.send_listeners(&Listeners { - http: Vec::new(), - tls: Vec::new(), - tcp: Vec::new(), - }) { + if let Err(e) = client_scm_socket.send_listeners(&Listeners::default()) { error!("error sending empty listeners: {:?}", e); } diff --git a/lib/src/metrics/local_drain.rs b/lib/src/metrics/local_drain.rs index ea29ef009..3ee77ed40 100644 --- a/lib/src/metrics/local_drain.rs +++ b/lib/src/metrics/local_drain.rs @@ -269,7 +269,8 @@ impl LocalDrain { return Ok(ContentType::WorkerMetrics(WorkerMetrics { proxy: proxy_metrics, clusters: BTreeMap::new(), - }).into()); + }) + .into()); } let worker_metrics = match (cluster_ids.is_empty(), backend_ids.is_empty()) { diff --git a/lib/src/server.rs b/lib/src/server.rs index 5431d3c78..454bca8e5 100644 --- a/lib/src/server.rs +++ b/lib/src/server.rs @@ -3,11 +3,11 @@ use std::{ cell::RefCell, collections::{HashSet, VecDeque}, convert::TryFrom, + io::Error as IoError, os::unix::io::{AsRawFd, FromRawFd}, rc::Rc, }; -use anyhow::Context; use mio::{ net::{TcpListener as MioTcpListener, TcpStream}, Events, Interest, Poll, Token, @@ -22,13 +22,13 @@ use sozu_command::{ request::RequestType, response_content::ContentType, ActivateListener, AddBackend, CertificatesWithFingerprints, Cluster, ClusterHashes, ClusterInformations, DeactivateListener, Event, HttpListenerConfig, HttpsListenerConfig, ListenerType, - LoadBalancingAlgorithms, LoadMetric, MetricsConfiguration, RemoveBackend, ResponseStatus, - TcpListenerConfig as CommandTcpListener, + LoadBalancingAlgorithms, LoadMetric, MetricsConfiguration, RemoveBackend, Request, + ResponseStatus, TcpListenerConfig as CommandTcpListener, }, ready::Ready, request::WorkerRequest, response::{MessageId, WorkerResponse}, - scm_socket::{Listeners, ScmSocket}, + scm_socket::{Listeners, ScmSocket, ScmSocketError}, state::ConfigState, }; @@ -220,6 +220,21 @@ impl SessionManager { } } +#[derive(thiserror::Error, Debug)] +pub enum ServerError { + #[error("could not create event loop with MIO poll: {0}")] + CreatePoll(IoError), + #[error("could not clone the MIO registry: {0}")] + CloneRegistry(IoError), + #[error("could not register the channel: {0}")] + RegisterChannel(IoError), + #[error("{msg}:{scm_err}")] + ScmSocket { + msg: String, + scm_err: ScmSocketError, + }, +} + /// `Server` handles the event loop, the listeners, the sessions and /// communication with the configuration channel. /// @@ -271,8 +286,8 @@ impl Server { config: Config, initial_state: Vec, expects_initial_status: bool, - ) -> anyhow::Result { - let event_loop = Poll::new().with_context(|| "could not create event loop")?; + ) -> Result { + let event_loop = Poll::new().map_err(ServerError::CreatePoll)?; let pool = Rc::new(RefCell::new(Pool::with_capacity( config.min_buffers, config.max_buffers, @@ -315,7 +330,7 @@ impl Server { let registry = event_loop .registry() .try_clone() - .with_context(|| "could not clone the mio Registry")?; + .map_err(ServerError::CloneRegistry)?; let https = https::HttpsProxy::new(registry, sessions.clone(), pool.clone(), backends.clone()); @@ -350,7 +365,7 @@ impl Server { server_config: ServerConfig, initial_state: Option>, expects_initial_status: bool, - ) -> anyhow::Result { + ) -> Result { FEATURES.with(|_features| { // initializing feature flags }); @@ -361,7 +376,7 @@ impl Server { Token(0), Interest::READABLE | Interest::WRITABLE, ) - .with_context(|| "should register the channel")?; + .map_err(ServerError::RegisterChannel)?; METRICS.with(|metrics| { if let Some(sock) = (*metrics.borrow_mut()).socket_mut() { @@ -379,7 +394,8 @@ impl Server { let registry = poll .registry() .try_clone() - .with_context(|| "could not clone the mio Registry")?; + .map_err(ServerError::CloneRegistry)?; + http::HttpProxy::new(registry, sessions.clone(), pool.clone(), backends.clone()) } })); @@ -390,7 +406,7 @@ impl Server { let registry = poll .registry() .try_clone() - .with_context(|| "could not clone the mio Registry")?; + .map_err(ServerError::CloneRegistry)?; https::HttpsProxy::new(registry, sessions.clone(), pool.clone(), backends.clone()) } @@ -402,7 +418,8 @@ impl Server { let registry = poll .registry() .try_clone() - .with_context(|| "could not clone the mio Registry")?; + .map_err(ServerError::CloneRegistry)?; + tcp::TcpProxy::new(registry, sessions.clone(), backends.clone()) } })); @@ -457,10 +474,19 @@ impl Server { let msg = server.channel.read_message(); debug!("got message: {:?}", msg); - if let Ok(msg) = msg { - if let Err(e) = server.channel.write_message(&WorkerResponse::ok(msg.id)) { + if let Ok(WorkerRequest { + id, + content: + Request { + request_type: Some(RequestType::Status(_)), + }, + }) = msg + { + if let Err(e) = server.channel.write_message(&WorkerResponse::ok(id)) { error!("Could not send an ok to the main process: {}", e); } + } else { + panic!("plz give me a status request first when I start, you sent me this instead: {:?}", msg); } server.unblock_channel(); } @@ -469,15 +495,25 @@ impl Server { server .scm .set_blocking(true) - .with_context(|| "Could not set the scm socket to blocking")?; - let listeners = server - .scm - .receive_listeners() - .with_context(|| "could not receive listeners from the scm socket")?; + .map_err(|scm_err| ServerError::ScmSocket { + msg: "Could not set the scm socket to blocking".to_string(), + scm_err, + })?; + let listeners = + server + .scm + .receive_listeners() + .map_err(|scm_err| ServerError::ScmSocket { + msg: "could not receive listeners from the scm socket".to_string(), + scm_err, + })?; server .scm .set_blocking(false) - .with_context(|| "Could not set the scm socket to unblocking")?; + .map_err(|scm_err| ServerError::ScmSocket { + msg: "Could not set the scm socket to unblocking".to_string(), + scm_err, + })?; info!("received listeners: {:?}", listeners); server.scm_listeners = Some(listeners); @@ -525,7 +561,7 @@ impl Server { } if event.is_read_closed() || event.is_write_closed() { error!("command channel was closed"); - continue; + return; } let ready = Ready::from(event); self.channel.handle_events(ready); @@ -686,7 +722,13 @@ impl Server { } Some(RequestType::ReturnListenSockets(_)) => { info!("received ReturnListenSockets order"); - self.return_listen_sockets(); + match self.return_listen_sockets() { + Ok(_) => push_queue(WorkerResponse::ok(request.id.clone())), + Err(error) => push_queue(WorkerResponse::error( + request.id.clone(), + format!("Could not send listeners on scm socket: {error:?}"), + )), + } } _ => self.notify(request), }, @@ -812,15 +854,21 @@ impl Server { if let Err(e) = self.channel.run() { error!("Error while running the server channel: {}", e); } - self.block_channel(); + // self.block_channel(); let id = self .shutting_down .take() .expect("should have shut down correctly"); // panicking here makes sense actually + + debug!("Responding OK to main process for request {}", id); + let proxy_response = WorkerResponse::ok(id); if let Err(e) = self.channel.write_message(&proxy_response) { error!("Could not write response to the main process: {}", e); } + if let Err(e) = self.channel.run() { + error!("Error while running the server channel: {}", e); + } return true; } @@ -940,6 +988,7 @@ impl Server { ) }; push_queue(response); + return; } // if all certificates are queried, or filtered by domain name, // the request will be handled by the https proxy @@ -1397,7 +1446,7 @@ impl Server { } /// Send all socket addresses and file descriptors of all proxies, via the scm socket - pub fn return_listen_sockets(&mut self) { + pub fn return_listen_sockets(&mut self) -> Result<(), ScmSocketError> { self.unblock_scm_socket(); let mut http_listeners = self.http.borrow_mut().give_back_listeners(); @@ -1448,6 +1497,7 @@ impl Server { self.block_scm_socket(); info!("sent default listeners: {:?}", res); + res } fn block_scm_socket(&mut self) { diff --git a/lib/src/tcp.rs b/lib/src/tcp.rs index f33040883..bdf70a66e 100644 --- a/lib/src/tcp.rs +++ b/lib/src/tcp.rs @@ -7,7 +7,7 @@ use std::{ rc::Rc, }; -use anyhow::{bail, Context}; +use anyhow::Context; use mio::{ net::TcpListener as MioTcpListener, net::{TcpStream as MioTcpStream, UnixStream}, @@ -1233,11 +1233,15 @@ impl TcpProxy { Ok(()) } - pub fn remove_tcp_front(&mut self, front: RequestTcpFrontend) -> anyhow::Result<()> { - let address = front - .address - .parse() - .with_context(|| "wrong socket address")?; + pub fn remove_tcp_front(&mut self, front: RequestTcpFrontend) -> Result<(), ProxyError> { + let address = + front + .address + .parse::() + .map_err(|parse_error| ProxyError::SocketParse { + address: front.address.clone(), + error: parse_error.to_string(), + })?; let mut listener = match self .listeners @@ -1245,7 +1249,7 @@ impl TcpProxy { .find(|l| l.borrow().address == address) { Some(l) => l.borrow_mut(), - None => bail!(format!("no such listener for '{}'", front.address)), + None => return Err(ProxyError::NoListenerFound(address)), }; listener.set_tags(front.address, None); @@ -1761,11 +1765,7 @@ mod tests { let server_scm_socket = ScmSocket::new(scm_server.as_raw_fd()).expect("Could not create scm socket"); client_scm_socket - .send_listeners(&Listeners { - http: Vec::new(), - tls: Vec::new(), - tcp: Vec::new(), - }) + .send_listeners(&Listeners::default()) .unwrap(); let server_config = server::ServerConfig { From 72d43858dc1b61cd433bde98491bf4e3fc614e65 Mon Sep 17 00:00:00 2001 From: Eloi DEMOLIS Date: Fri, 2 Feb 2024 10:25:56 +0100 Subject: [PATCH 10/11] Refactor HTTP, HTTPS and TCP example code Signed-off-by: Eloi DEMOLIS Co-Authored-By: Mbechezi Mlanawo --- lib/Cargo.toml | 3 +- lib/README.md | 2 +- lib/examples/{minimal.rs => http.rs} | 9 +- lib/examples/{main.rs => https.rs} | 14 +- lib/examples/tcp.rs | 2 +- lib/src/http.rs | 176 +++++++--------- lib/src/https.rs | 138 +++++-------- lib/src/lib.rs | 113 ++++++++++- lib/src/tcp.rs | 290 +++++++++------------------ 9 files changed, 346 insertions(+), 401 deletions(-) rename lib/examples/{minimal.rs => http.rs} (93%) rename lib/examples/{main.rs => https.rs} (94%) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index ffd3f5555..de43bdc15 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -21,8 +21,7 @@ include = [ "./README.md", "Cargo.toml", "src/**/*", - "/examples/main.rs", - "/examples/minimal.rs", + "/examples/*", "assets/certificate.pem", "assets/certificate_chain.pem", "assets/key.pem", diff --git a/lib/README.md b/lib/README.md index b7b86ab9e..b57ff0719 100644 --- a/lib/README.md +++ b/lib/README.md @@ -1,7 +1,7 @@ # sozu_lib, a proxy development library `sozu_lib` provides tools to write a proxy that can be reconfigured -without any downtime. See `examples/minimal.rs` for a small example +without any downtime. See `examples/http.rs` for a small example of starting a HTTP proxy with one cluster. A proxy starts as an event loop with which you communicate through diff --git a/lib/examples/minimal.rs b/lib/examples/http.rs similarity index 93% rename from lib/examples/minimal.rs rename to lib/examples/http.rs index 96949c7d6..7c38f85a2 100644 --- a/lib/examples/minimal.rs +++ b/lib/examples/http.rs @@ -37,8 +37,13 @@ fn main() -> anyhow::Result<()> { let worker_thread_join_handle = thread::spawn(move || { let max_buffers = 500; let buffer_size = 16384; - sozu_lib::http::start_http_worker(http_listener, proxy_channel, max_buffers, buffer_size) - .expect("The worker could not be started, or shut down"); + sozu_lib::http::testing::start_http_worker( + http_listener, + proxy_channel, + max_buffers, + buffer_size, + ) + .expect("The worker could not be started, or shut down"); }); let cluster = Cluster { diff --git a/lib/examples/main.rs b/lib/examples/https.rs similarity index 94% rename from lib/examples/main.rs rename to lib/examples/https.rs index d8d0705aa..036ade116 100644 --- a/lib/examples/main.rs +++ b/lib/examples/https.rs @@ -42,7 +42,12 @@ fn main() -> anyhow::Result<()> { let jg = thread::spawn(move || { let max_buffers = 500; let buffer_size = 16384; - sozu_lib::http::start_http_worker(http_listener, channel, max_buffers, buffer_size); + sozu_lib::http::testing::start_http_worker( + http_listener, + channel, + max_buffers, + buffer_size, + ); }); let http_front = RequestHttpFrontend { @@ -82,7 +87,12 @@ fn main() -> anyhow::Result<()> { let jg2 = thread::spawn(move || { let max_buffers = 500; let buffer_size = 16384; - sozu_lib::https::start_https_worker(https_listener, channel2, max_buffers, buffer_size) + sozu_lib::https::testing::start_https_worker( + https_listener, + channel2, + max_buffers, + buffer_size, + ) }); let cert1 = include_str!("../assets/certificate.pem"); diff --git a/lib/examples/tcp.rs b/lib/examples/tcp.rs index 248f32840..4ba4576ae 100644 --- a/lib/examples/tcp.rs +++ b/lib/examples/tcp.rs @@ -34,7 +34,7 @@ fn main() -> anyhow::Result<()> { ..Default::default() }; setup_logging("stdout", None, "debug", "TCP"); - sozu_lib::tcp::start_tcp_worker(listener, max_buffers, buffer_size, channel); + sozu_lib::tcp::testing::start_tcp_worker(listener, max_buffers, buffer_size, channel); }); let tcp_front = RequestTcpFrontend { diff --git a/lib/src/http.rs b/lib/src/http.rs index 71504367a..2c97976e1 100644 --- a/lib/src/http.rs +++ b/lib/src/http.rs @@ -3,19 +3,17 @@ use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, io::ErrorKind, net::{Shutdown, SocketAddr}, - os::unix::io::{AsRawFd, IntoRawFd}, + os::unix::io::AsRawFd, rc::{Rc, Weak}, str::from_utf8_unchecked, }; -use anyhow::Context; use mio::{ - net::{TcpListener, TcpStream, UnixStream}, + net::{TcpListener, TcpStream}, unix::SourceFd, - Interest, Poll, Registry, Token, + Interest, Registry, Token, }; use rusty_ulid::Ulid; -use slab::Slab; use time::{Duration, Instant}; use sozu_command::{ @@ -27,7 +25,6 @@ use sozu_command::{ ready::Ready, request::WorkerRequest, response::{HttpFrontend, WorkerResponse}, - scm_socket::{Listeners, ScmSocket}, state::ClusterId, }; @@ -43,7 +40,7 @@ use crate::{ Http, Pipe, SessionState, }, router::{Route, Router}, - server::{ListenSession, ListenToken, ProxyChannel, Server, SessionManager}, + server::{ListenToken, SessionManager}, socket::server_bind, timer::TimeoutContainer, AcceptError, CachedTags, FrontendFromRequestError, L7ListenerHandler, L7Proxy, ListenerError, @@ -994,113 +991,80 @@ impl L7Proxy for HttpProxy { } } -/// This is starts an HTTP worker with an HTTP listener config. -/// It activates the Listener automatically. -pub fn start_http_worker( - config: HttpListenerConfig, - channel: ProxyChannel, - max_buffers: usize, - buffer_size: usize, -) -> anyhow::Result<()> { - use crate::server; - - let event_loop = Poll::new().with_context(|| "could not create event loop")?; - - let pool = Rc::new(RefCell::new(Pool::with_capacity( - 1, - max_buffers, - buffer_size, - ))); - let backends = Rc::new(RefCell::new(BackendMap::new())); - let mut sessions: Slab>> = Slab::with_capacity(max_buffers); - { - let entry = sessions.vacant_entry(); - info!("taking token {:?} for channel", entry.key()); - entry.insert(Rc::new(RefCell::new(ListenSession { - protocol: Protocol::HTTPListen, - }))); - } - { - let entry = sessions.vacant_entry(); - info!("taking token {:?} for timer", entry.key()); - entry.insert(Rc::new(RefCell::new(ListenSession { - protocol: Protocol::HTTPListen, - }))); - } - { - let entry = sessions.vacant_entry(); - info!("taking token {:?} for metrics", entry.key()); - entry.insert(Rc::new(RefCell::new(ListenSession { - protocol: Protocol::HTTPListen, - }))); - } - - let token = { - let entry = sessions.vacant_entry(); - let key = entry.key(); - let _e = entry.insert(Rc::new(RefCell::new(ListenSession { - protocol: Protocol::HTTPListen, - }))); - Token(key) - }; +pub mod testing { + use crate::testing::*; - let address = config.address.clone(); - let sessions = SessionManager::new(sessions, max_buffers); - let registry = event_loop - .registry() - .try_clone() - .with_context(|| "Failed at creating a registry")?; - let mut proxy = HttpProxy::new(registry, sessions.clone(), pool.clone(), backends.clone()); - let _ = proxy.add_listener(config, token); - let _ = proxy.activate_listener( - &address + /// this function is not used, but is available for example and testing purposes + pub fn start_http_worker( + config: HttpListenerConfig, + channel: ProxyChannel, + max_buffers: usize, + buffer_size: usize, + ) -> anyhow::Result<()> { + let address = config + .address .parse() - .with_context(|| "Could not parse socket address")?, - None, - ); - let (scm_server, scm_client) = - UnixStream::pair().with_context(|| "Failed at creating scm stream sockets")?; - let client_scm_socket = - ScmSocket::new(scm_client.into_raw_fd()).with_context(|| "Could not create scm socket")?; - let server_scm_socket = - ScmSocket::new(scm_server.as_raw_fd()).with_context(|| "Could not create scm socket")?; - - if let Err(e) = client_scm_socket.send_listeners(&Listeners::default()) { - error!("error sending empty listeners: {:?}", e); - } + .with_context(|| "Could not parse socket address")?; - let server_config = server::ServerConfig { - max_connections: max_buffers, - ..Default::default() - }; + let ServerParts { + event_loop, + registry, + sessions, + pool, + backends, + client_scm_socket: _, + server_scm_socket, + server_config, + } = prebuild_server(max_buffers, buffer_size, true)?; + + let token = { + let mut sessions = sessions.borrow_mut(); + let entry = sessions.slab.vacant_entry(); + let key = entry.key(); + let _ = entry.insert(Rc::new(RefCell::new(ListenSession { + protocol: Protocol::HTTPListen, + }))); + Token(key) + }; - let mut server = Server::new( - event_loop, - channel, - server_scm_socket, - sessions, - pool, - backends, - Some(proxy), - None, - None, - server_config, - None, - false, - ) - .with_context(|| "Failed at creating server")?; - - debug!("starting event loop"); - server.run(); - debug!("ending event loop"); - Ok(()) + let mut proxy = HttpProxy::new(registry, sessions.clone(), pool.clone(), backends.clone()); + proxy + .add_listener(config, token) + .with_context(|| "Failed at creating adding the listener")?; + proxy + .activate_listener(&address, None) + .with_context(|| "Failed at creating activating the listener")?; + + let mut server = Server::new( + event_loop, + channel, + server_scm_socket, + sessions, + pool, + backends, + Some(proxy), + None, + None, + server_config, + None, + false, + ) + .with_context(|| "Failed at creating server")?; + + debug!("starting event loop"); + server.run(); + debug!("ending event loop"); + Ok(()) + } } #[cfg(test)] mod tests { extern crate tiny_http; + use super::testing::start_http_worker; use super::*; + use crate::sozu_command::{ channel::Channel, config::ListenerBuilder, @@ -1181,7 +1145,7 @@ mod tests { println!("test received: {:?}", command.read_message()); println!("test received: {:?}", command.read_message()); - let mut client = TcpStream::connect(("127.0.0.1", 1024)).expect("could not parse address"); + let mut client = TcpStream::connect(("127.0.0.1", 1024)).expect("could not connect"); // 5 seconds of timeout client.set_read_timeout(Some(Duration::new(1, 0))).unwrap(); @@ -1264,7 +1228,7 @@ mod tests { println!("test received: {:?}", command.read_message()); println!("test received: {:?}", command.read_message()); - let mut client = TcpStream::connect(("127.0.0.1", 1031)).expect("could not parse address"); + let mut client = TcpStream::connect(("127.0.0.1", 1031)).expect("could not connect"); // 5 seconds of timeout client.set_read_timeout(Some(Duration::new(5, 0))).unwrap(); @@ -1387,7 +1351,7 @@ mod tests { println!("test received: {:?}", command.read_message()); println!("test received: {:?}", command.read_message()); - let mut client = TcpStream::connect(("127.0.0.1", 1041)).expect("could not parse address"); + let mut client = TcpStream::connect(("127.0.0.1", 1041)).expect("could not connect"); // 5 seconds of timeout client.set_read_timeout(Some(Duration::new(5, 0))).unwrap(); diff --git a/lib/src/https.rs b/lib/src/https.rs index 57ab9d312..c1927a2c3 100644 --- a/lib/src/https.rs +++ b/lib/src/https.rs @@ -3,17 +3,16 @@ use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, io::ErrorKind, net::{Shutdown, SocketAddr as StdSocketAddr}, - os::unix::{io::AsRawFd, net::UnixStream}, + os::unix::io::AsRawFd, rc::{Rc, Weak}, str::{from_utf8, from_utf8_unchecked}, sync::Arc, }; -use anyhow::Context; use mio::{ net::{TcpListener as MioTcpListener, TcpStream as MioTcpStream}, unix::SourceFd, - Interest, Poll, Registry, Token, + Interest, Registry, Token, }; use rustls::{ crypto::{ @@ -32,7 +31,6 @@ use rustls::{ CipherSuite, ProtocolVersion, ServerConfig, ServerConnection, SupportedCipherSuite, }; use rusty_ulid::Ulid; -use slab::Slab; use time::{Duration, Instant}; use sozu_command::{ @@ -48,7 +46,6 @@ use sozu_command::{ ready::Ready, request::WorkerRequest, response::{HttpFrontend, WorkerResponse}, - scm_socket::ScmSocket, state::ClusterId, }; @@ -66,7 +63,7 @@ use crate::{ Http, Pipe, SessionState, }, router::{Route, Router}, - server::{ListenSession, ListenToken, ProxyChannel, Server, SessionManager, SessionToken}, + server::{ListenToken, SessionManager}, socket::{server_bind, FrontRustls}, timer::TimeoutContainer, tls::{CertifiedKeyWrapper, MutexWrappedCertificateResolver, ResolveCertificate}, @@ -1498,83 +1495,54 @@ fn rustls_ciphersuite_str(cipher: SupportedCipherSuite) -> &'static str { } } -/// this function is not used, but is available for example and testing purposes -pub fn start_https_worker( - config: HttpsListenerConfig, - channel: ProxyChannel, - max_buffers: usize, - buffer_size: usize, -) -> anyhow::Result<()> { - use crate::server; - - let event_loop = Poll::new().with_context(|| "could not create event loop")?; - - let pool = Rc::new(RefCell::new(Pool::with_capacity( - 1, - max_buffers, - buffer_size, - ))); - let backends = Rc::new(RefCell::new(BackendMap::new())); - - let mut sessions: Slab>> = Slab::with_capacity(max_buffers); - { - let entry = sessions.vacant_entry(); - info!("taking token {:?} for channel", SessionToken(entry.key())); - entry.insert(Rc::new(RefCell::new(ListenSession { - protocol: Protocol::HTTPListen, - }))); - } - { - let entry = sessions.vacant_entry(); - info!("taking token {:?} for timer", SessionToken(entry.key())); - entry.insert(Rc::new(RefCell::new(ListenSession { - protocol: Protocol::HTTPListen, - }))); - } - { - let entry = sessions.vacant_entry(); - info!("taking token {:?} for metrics", SessionToken(entry.key())); - entry.insert(Rc::new(RefCell::new(ListenSession { - protocol: Protocol::HTTPListen, - }))); - } - - let token = { - let entry = sessions.vacant_entry(); - let key = entry.key(); - let _e = entry.insert(Rc::new(RefCell::new(ListenSession { - protocol: Protocol::HTTPListen, - }))); - Token(key) - }; - - let sessions = SessionManager::new(sessions, max_buffers); - let registry = event_loop - .registry() - .try_clone() - .with_context(|| "Failed at creating a registry")?; - let mut proxy = HttpsProxy::new(registry, sessions.clone(), pool.clone(), backends.clone()); - let address = config.address.clone(); - if proxy.add_listener(config, token).is_some() - && proxy - .activate_listener( - &address - .parse() - .with_context(|| "Could not parse socket address")?, - None, - ) - .is_ok() - { - let (scm_server, _scm_client) = - UnixStream::pair().with_context(|| "Failed at creating scm stream sockets")?; - let server_config = server::ServerConfig { - max_connections: max_buffers, - ..Default::default() +pub mod testing { + use crate::testing::*; + + /// this function is not used, but is available for example and testing purposes + pub fn start_https_worker( + config: HttpsListenerConfig, + channel: ProxyChannel, + max_buffers: usize, + buffer_size: usize, + ) -> anyhow::Result<()> { + let address = config + .address + .parse() + .with_context(|| "Could not parse socket address")?; + + let ServerParts { + event_loop, + registry, + sessions, + pool, + backends, + client_scm_socket: _, + server_scm_socket, + server_config, + } = prebuild_server(max_buffers, buffer_size, true)?; + + let token = { + let mut sessions = sessions.borrow_mut(); + let entry = sessions.slab.vacant_entry(); + let key = entry.key(); + let _ = entry.insert(Rc::new(RefCell::new(ListenSession { + protocol: Protocol::HTTPSListen, + }))); + Token(key) }; + + let mut proxy = HttpsProxy::new(registry, sessions.clone(), pool.clone(), backends.clone()); + proxy + .add_listener(config, token) + .with_context(|| "Failed at creating adding the listener")?; + proxy + .activate_listener(&address, None) + .with_context(|| "Failed at creating activating the listener")?; + let mut server = Server::new( event_loop, channel, - ScmSocket::new(scm_server.as_raw_fd()).unwrap(), + server_scm_socket, sessions, pool, backends, @@ -1585,25 +1553,25 @@ pub fn start_https_worker( None, false, ) - .with_context(|| "Failed to create server")?; + .with_context(|| "Failed at creating server")?; - info!("starting event loop"); + debug!("starting event loop"); server.run(); - info!("ending event loop"); + debug!("ending event loop"); + Ok(()) } - Ok(()) } #[cfg(test)] mod tests { + use super::*; + use std::{str::FromStr, sync::Arc}; use sozu_command::config::ListenerBuilder; use crate::router::{trie::TrieNode, MethodRule, PathRule, Route, Router}; - use super::*; - /* #[test] #[cfg(target_pointer_width = "64")] diff --git a/lib/src/lib.rs b/lib/src/lib.rs index c6b2ea4a1..12c8d7f9b 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -76,7 +76,7 @@ //! let worker_thread_join_handle = thread::spawn(move || { //! let max_buffers = 500; //! let buffer_size = 16384; -//! sozu_lib::http::start_http_worker(http_listener, proxy_channel, max_buffers, buffer_size); +//! sozu_lib::http::testing::start_http_worker(http_listener, proxy_channel, max_buffers, buffer_size); //! }); //! ``` //! @@ -207,7 +207,7 @@ //! let _ = worker_thread_join_handle.join(); //! ``` //! -//! Here is the complete example for reference, it matches the `minimal.rs` example: +//! Here is the complete example for reference, it matches the `examples/http.rs` example: //! //! ``` //! extern crate time; @@ -246,7 +246,7 @@ //! let worker_thread_join_handle = thread::spawn(move || { //! let max_buffers = 500; //! let buffer_size = 16384; -//! sozu_lib::http::start_http_worker(http_listener, proxy_channel, max_buffers, buffer_size) +//! sozu_lib::http::testing::start_http_worker(http_listener, proxy_channel, max_buffers, buffer_size) //! .expect("The worker could not be started, or shut down"); //! }); //! @@ -1116,3 +1116,110 @@ impl PeakEWMA { (active_requests + 1) as f64 * self.rtt } } + +pub mod testing { + pub use std::{cell::RefCell, os::fd::IntoRawFd, rc::Rc}; + + pub use anyhow::Context; + pub use mio::{net::UnixStream, Poll, Registry, Token}; + pub use slab::Slab; + pub use sozu_command::{ + proto::command::{HttpListenerConfig, HttpsListenerConfig, TcpListenerConfig}, + scm_socket::{Listeners, ScmSocket}, + }; + + pub use crate::{ + backends::BackendMap, + http::HttpProxy, + https::HttpsProxy, + pool::Pool, + server::Server, + server::{ListenSession, ProxyChannel, ServerConfig, SessionManager}, + tcp::TcpProxy, + Protocol, ProxySession, + }; + + /// Everything needed to create a Server + pub struct ServerParts { + pub event_loop: Poll, + pub registry: Registry, + pub sessions: Rc>, + pub pool: Rc>, + pub backends: Rc>, + pub client_scm_socket: ScmSocket, + pub server_scm_socket: ScmSocket, + pub server_config: ServerConfig, + } + + /// Setup a standalone server, for testing purposes + pub fn prebuild_server( + max_buffers: usize, + buffer_size: usize, + send_scm: bool, + ) -> anyhow::Result { + let event_loop = Poll::new().with_context(|| "Failed at creating event loop")?; + let backends = Rc::new(RefCell::new(BackendMap::new())); + let server_config = ServerConfig { + max_connections: max_buffers, + ..Default::default() + }; + + let pool = Rc::new(RefCell::new(Pool::with_capacity( + 1, + max_buffers, + buffer_size, + ))); + + let mut sessions: Slab>> = Slab::with_capacity(max_buffers); + { + let entry = sessions.vacant_entry(); + info!("taking token {:?} for channel", entry.key()); + entry.insert(Rc::new(RefCell::new(ListenSession { + protocol: Protocol::Channel, + }))); + } + { + let entry = sessions.vacant_entry(); + info!("taking token {:?} for timer", entry.key()); + entry.insert(Rc::new(RefCell::new(ListenSession { + protocol: Protocol::Timer, + }))); + } + { + let entry = sessions.vacant_entry(); + info!("taking token {:?} for metrics", entry.key()); + entry.insert(Rc::new(RefCell::new(ListenSession { + protocol: Protocol::Metrics, + }))); + } + let sessions = SessionManager::new(sessions, max_buffers); + + let registry = event_loop + .registry() + .try_clone() + .with_context(|| "Failed at creating a registry")?; + + let (scm_server, scm_client) = + UnixStream::pair().with_context(|| "Failed at creating scm unix stream")?; + let client_scm_socket = ScmSocket::new(scm_client.into_raw_fd()) + .with_context(|| "Failed at creating the scm client socket")?; + let server_scm_socket = ScmSocket::new(scm_server.into_raw_fd()) + .with_context(|| "Failed at creating the scm server socket")?; + if send_scm { + client_scm_socket + .send_listeners(&Listeners::default()) + .with_context(|| "Failed at sending empty listeners")?; + } + + Ok(ServerParts { + event_loop, + registry, + sessions, + pool, + backends, + client_scm_socket, + server_scm_socket, + server_config, + }) + } +} diff --git a/lib/src/tcp.rs b/lib/src/tcp.rs index bdf70a66e..ff22be9dd 100644 --- a/lib/src/tcp.rs +++ b/lib/src/tcp.rs @@ -7,15 +7,11 @@ use std::{ rc::Rc, }; -use anyhow::Context; use mio::{ - net::TcpListener as MioTcpListener, - net::{TcpStream as MioTcpStream, UnixStream}, - unix::SourceFd, - Interest, Poll, Registry, Token, + net::TcpListener as MioTcpListener, net::TcpStream as MioTcpStream, unix::SourceFd, Interest, + Registry, Token, }; use rusty_ulid::Ulid; -use slab::Slab; use time::{Duration, Instant}; use sozu_command::{config::MAX_LOOP_ITERATIONS, proto::command::request::RequestType, ObjectKind}; @@ -31,10 +27,7 @@ use crate::{ Pipe, }, retry::RetryPolicy, - server::{ - push_event, ListenSession, ListenToken, ProxyChannel, Server, SessionManager, CONN_RETRIES, - TIMER, - }, + server::{push_event, ListenToken, SessionManager, CONN_RETRIES, TIMER}, socket::{server_bind, stats::socket_rtt}, sozu_command::{ logging, @@ -44,7 +37,6 @@ use crate::{ ready::Ready, request::WorkerRequest, response::WorkerResponse, - scm_socket::ScmSocket, state::ClusterId, }, timer::TimeoutContainer, @@ -1473,108 +1465,95 @@ impl ProxyConfiguration for TcpProxy { } } -/// This is not directly used by Sōzu but is available for example and testing purposes -pub fn start_tcp_worker( - config: TcpListenerConfig, - max_buffers: usize, - buffer_size: usize, - channel: ProxyChannel, -) -> anyhow::Result<()> { - use crate::server; - - let poll = Poll::new().with_context(|| "could not create event loop")?; - let pool = Rc::new(RefCell::new(Pool::with_capacity( - 1, - max_buffers, - buffer_size, - ))); - let backends = Rc::new(RefCell::new(BackendMap::new())); - - let mut sessions: Slab>> = Slab::with_capacity(max_buffers); - { - let entry = sessions.vacant_entry(); - info!("taking token {:?} for channel", entry.key()); - entry.insert(Rc::new(RefCell::new(ListenSession { - protocol: Protocol::TCPListen, - }))); - } - { - let entry = sessions.vacant_entry(); - info!("taking token {:?} for timer", entry.key()); - entry.insert(Rc::new(RefCell::new(ListenSession { - protocol: Protocol::TCPListen, - }))); - } - { - let entry = sessions.vacant_entry(); - info!("taking token {:?} for metrics", entry.key()); - entry.insert(Rc::new(RefCell::new(ListenSession { - protocol: Protocol::TCPListen, - }))); - } +pub mod testing { + use crate::testing::*; - let token = { - let entry = sessions.vacant_entry(); - let key = entry.key(); - let _e = entry.insert(Rc::new(RefCell::new(ListenSession { - protocol: Protocol::TCPListen, - }))); - Token(key) - }; + /// This is not directly used by Sōzu but is available for example and testing purposes + pub fn start_tcp_worker( + config: TcpListenerConfig, + max_buffers: usize, + buffer_size: usize, + channel: ProxyChannel, + ) -> anyhow::Result<()> { + let address = config + .address + .parse() + .with_context(|| "Could not parse socket address")?; - let sessions = SessionManager::new(sessions, max_buffers); - let address = config.address.clone(); - let registry = poll - .registry() - .try_clone() - .with_context(|| "Failed at creating a registry")?; - let mut configuration = TcpProxy::new(registry, sessions.clone(), backends.clone()); - let _ = configuration.add_listener(config, pool.clone(), token); - let _ = configuration.activate_listener(&address.parse().unwrap(), None); - let (scm_server, _scm_client) = - UnixStream::pair().with_context(|| "Failed at creating scm stream sockets")?; - let scm_socket = - ScmSocket::new(scm_server.as_raw_fd()).with_context(|| "Could not create scm socket")?; - let server_config = server::ServerConfig { - max_connections: max_buffers, - ..Default::default() - }; + let ServerParts { + event_loop, + registry, + sessions, + pool, + backends, + client_scm_socket: _, + server_scm_socket, + server_config, + } = prebuild_server(max_buffers, buffer_size, true)?; + + let token = { + let mut sessions = sessions.borrow_mut(); + let entry = sessions.slab.vacant_entry(); + let key = entry.key(); + let _ = entry.insert(Rc::new(RefCell::new(ListenSession { + protocol: Protocol::TCPListen, + }))); + Token(key) + }; - let mut server = Server::new( - poll, - channel, - scm_socket, - sessions, - pool, - backends, - None, - None, - Some(configuration), - server_config, - None, - false, - ) - .with_context(|| "Could not create tcp server")?; - - info!("starting event loop"); - server.run(); - info!("ending event loop"); - Ok(()) + let mut proxy = TcpProxy::new(registry, sessions.clone(), backends.clone()); + proxy + .add_listener(config, pool.clone(), token) + .with_context(|| "Failed at creating adding the listener")?; + proxy + .activate_listener(&address, None) + .with_context(|| "Failed at creating activating the listener")?; + + let mut server = Server::new( + event_loop, + channel, + server_scm_socket, + sessions, + pool, + backends, + None, + None, + Some(proxy), + server_config, + None, + false, + ) + .with_context(|| "Failed at creating server")?; + + debug!("starting event loop"); + server.run(); + debug!("ending event loop"); + Ok(()) + } } #[cfg(test)] mod tests { - use super::*; - use crate::sozu_command::{ - channel::Channel, proto::command::LoadBalancingParams, scm_socket::Listeners, - }; + use super::testing::start_tcp_worker; + use crate::testing::*; + use std::{ io::{Read, Write}, net::{Shutdown, TcpListener, TcpStream}, - os::unix::io::IntoRawFd, - sync::atomic::{AtomicBool, Ordering}, - sync::{Arc, Barrier}, - {str, thread}, + str, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Barrier, + }, + thread, + }; + + use sozu_command::{ + channel::Channel, + config::ListenerBuilder, + proto::command::{request::RequestType, LoadBalancingParams, RequestTcpFrontend}, + request::WorkerRequest, + response::WorkerResponse, }; static TEST_FINISHED: AtomicBool = AtomicBool::new(false); @@ -1599,9 +1578,9 @@ mod tests { let _tx = start_proxy().expect("Could not start proxy"); barrier.wait(); - let mut s1 = TcpStream::connect("127.0.0.1:1234").expect("could not parse address"); - let s3 = TcpStream::connect("127.0.0.1:1234").expect("could not parse address"); - let mut s2 = TcpStream::connect("127.0.0.1:1234").expect("could not parse address"); + let mut s1 = TcpStream::connect("127.0.0.1:1234").expect("could not connect"); + let s3 = TcpStream::connect("127.0.0.1:1234").expect("could not connect"); + let mut s2 = TcpStream::connect("127.0.0.1:1234").expect("could not connect"); s1.write(&b"hello "[..]) .map_err(|e| { @@ -1656,7 +1635,7 @@ mod tests { } fn start_server(barrier: Arc) { - let listener = TcpListener::bind("127.0.0.1:5678").expect("could not parse address"); + let listener = TcpListener::bind("127.0.0.1:5678").expect("could not bind"); fn handle_client(stream: &mut TcpStream, id: u8) { let mut buf = [0; 128]; let _response = b" END"; @@ -1694,102 +1673,15 @@ mod tests { /// used in tests only pub fn start_proxy() -> anyhow::Result> { - use crate::server; + let config = ListenerBuilder::new_tcp("127.0.0.1:1234") + .to_tcp(None) + .expect("could not create listener config"); - info!("listen for connections"); let (mut command, channel) = Channel::generate(1000, 10000).with_context(|| "should create a channel")?; - - // this thread should call a start() function that performs the same logic and returns Result<()> - // any error coming from this start() would be mapped and logged within the thread - thread::spawn(move || { + let _jg = thread::spawn(move || { setup_test_logger!(); - info!("starting event loop"); - let poll = Poll::new().expect("could not create event loop"); - let max_connections = 100; - let buffer_size = 16384; - let pool = Rc::new(RefCell::new(Pool::with_capacity( - 1, - 2 * max_connections, - buffer_size, - ))); - let backends = Rc::new(RefCell::new(BackendMap::new())); - - let mut sessions: Slab>> = - Slab::with_capacity(max_connections); - { - let entry = sessions.vacant_entry(); - info!("taking token {:?} for channel", entry.key()); - entry.insert(Rc::new(RefCell::new(ListenSession { - protocol: Protocol::HTTPListen, - }))); - } - { - let entry = sessions.vacant_entry(); - info!("taking token {:?} for timer", entry.key()); - entry.insert(Rc::new(RefCell::new(ListenSession { - protocol: Protocol::HTTPListen, - }))); - } - { - let entry = sessions.vacant_entry(); - info!("taking token {:?} for metrics", entry.key()); - entry.insert(Rc::new(RefCell::new(ListenSession { - protocol: Protocol::HTTPListen, - }))); - } - - let sessions = SessionManager::new(sessions, max_connections); - let registry = poll.registry().try_clone().unwrap(); - let mut configuration = TcpProxy::new(registry, sessions.clone(), backends.clone()); - let listener_config = TcpListenerConfig { - address: "127.0.0.1:1234".to_string(), - ..Default::default() - }; - - { - let address = listener_config.address.clone(); - let mut s = sessions.borrow_mut(); - let entry = s.slab.vacant_entry(); - let _ = - configuration.add_listener(listener_config, pool.clone(), Token(entry.key())); - let _ = configuration.activate_listener(&address.parse().unwrap(), None); - entry.insert(Rc::new(RefCell::new(ListenSession { - protocol: Protocol::TCPListen, - }))); - } - - let (scm_server, scm_client) = UnixStream::pair().unwrap(); - let client_scm_socket = - ScmSocket::new(scm_client.into_raw_fd()).expect("Could not create scm socket"); - let server_scm_socket = - ScmSocket::new(scm_server.as_raw_fd()).expect("Could not create scm socket"); - client_scm_socket - .send_listeners(&Listeners::default()) - .unwrap(); - - let server_config = server::ServerConfig { - max_connections, - ..Default::default() - }; - let mut server = Server::new( - poll, - channel, - server_scm_socket, - sessions, - pool, - backends, - None, - None, - Some(configuration), - server_config, - None, - false, - ) - .expect("Failed at creating the server"); - info!("will run"); - server.run(); - info!("ending event loop"); + start_tcp_worker(config, 100, 16384, channel).expect("could not start the tcp server"); }); command.blocking().unwrap(); From 01a84dcdde72b1ed93a8c6c1ab68207ee94a7fc4 Mon Sep 17 00:00:00 2001 From: Emmanuel Bosquet Date: Tue, 30 Jan 2024 16:45:27 +0100 Subject: [PATCH 11/11] ci: Add benchmarking --- .github/workflows/bench.py | 43 +++++ .github/workflows/bench.toml | 50 ++++++ .github/workflows/benchmark.yml | 161 ++++++++++++++++++ .github/workflows/ecdsa.cnf | 15 ++ .github/workflows/rsa-2048.cnf | 16 ++ .github/workflows/rsa-4096.cnf | 16 ++ bin/src/ctl/mod.rs | 14 +- bin/src/ctl/request_builder.rs | 71 ++++---- bin/src/util.rs | 9 - command/src/command.proto | 60 +++++-- command/src/config.rs | 171 ++++++++----------- command/src/proto/display.rs | 9 + command/src/request.rs | 69 ++++++-- command/src/response.rs | 8 +- command/src/state.rs | 283 ++++++++++++++++---------------- e2e/src/sozu/worker.rs | 10 +- e2e/src/tests/mod.rs | 22 +-- e2e/src/tests/tests.rs | 58 ++++--- lib/examples/http.rs | 8 +- lib/examples/https.rs | 32 ++-- lib/examples/tcp.rs | 8 +- lib/src/backends.rs | 2 +- lib/src/http.rs | 70 ++++---- lib/src/https.rs | 84 +++------- lib/src/lib.rs | 29 ++-- lib/src/protocol/kawa_h1/mod.rs | 4 +- lib/src/server.rs | 14 +- lib/src/socket.rs | 22 +-- lib/src/tcp.rs | 69 ++------ lib/src/tls.rs | 12 +- 30 files changed, 821 insertions(+), 618 deletions(-) create mode 100644 .github/workflows/bench.py create mode 100644 .github/workflows/bench.toml create mode 100644 .github/workflows/benchmark.yml create mode 100644 .github/workflows/ecdsa.cnf create mode 100644 .github/workflows/rsa-2048.cnf create mode 100644 .github/workflows/rsa-4096.cnf diff --git a/.github/workflows/bench.py b/.github/workflows/bench.py new file mode 100644 index 000000000..c957954a6 --- /dev/null +++ b/.github/workflows/bench.py @@ -0,0 +1,43 @@ +import subprocess +import logging +import time + +def run(url: str, bombardierduration: str): + logging.info("🎯 Initalize environnment") + try: + lagging_server = subprocess.Popen( + ["./lagging_server", "-w", "4", "-p", "4444"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + + sozu = subprocess.Popen( + ["./sozu", "start", "-c", "bench.toml"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + + time.sleep(3) + except subprocess.CalledProcessError as e: + logging.error(f"🚨 Command failed with return code {e.returncode}") + + try: + subprocess.run(["./bombardier", "-c", "400", "-p", "intro,result", "--fasthttp", "-l", "-t", "10s", "-d", bombardierduration, url]) + + except subprocess.CalledProcessError as e: + logging.error(f"🚨 Failed to run benchmark {e.returncode}") + + logging.info("🪓 Destroy environment") + try: + subprocess.run(["kill", str(lagging_server.pid)]) + subprocess.run(["kill", str(sozu.pid)]) + except subprocess.CalledProcessError as e: + logging.error(f"🚨 Failed to destroy environnement {e.returncode}") + +logging.basicConfig(encoding='utf-8', level=logging.INFO) +logging.info("💣 Launching benchmark") + +run("http://sozu.io:8080/api", "1m") +run("https://rsa-2048.sozu.io:8443/api", "1m") +run("https://rsa-4096.sozu.io:8443/api", "1m") +run("https://ecdsa.sozu.io:8443/api", "1m") diff --git a/.github/workflows/bench.toml b/.github/workflows/bench.toml new file mode 100644 index 000000000..086d2d546 --- /dev/null +++ b/.github/workflows/bench.toml @@ -0,0 +1,50 @@ + +log_level = "error" +log_target = "stdout" +command_socket = "./sozu.sock" +command_buffer_size = 16384 +max_command_buffer_size = 163840 +worker_count = 1 +worker_automatic_restart = true +handle_process_affinity = false +max_connections = 500 +buffer_size = 16393 +activate_listeners = true + +[[listeners]] +protocol = "http" +# listening address +address = "0.0.0.0:8080" + +[[listeners]] +protocol = "https" +address = "0.0.0.0:8443" +tls_versions = ["TLS_V12", "TLS_V13"] +cipher_list = [ + # TLS 1.3 cipher suites + "TLS13_AES_256_GCM_SHA384", + "TLS13_AES_128_GCM_SHA256", + "TLS13_CHACHA20_POLY1305_SHA256", + # TLS 1.2 cipher suites + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", +] + +[clusters] +[clusters.BenchCluster] + +protocol = "http" +load_balancing = "ROUND_ROBIN" +frontends = [ + { address = "0.0.0.0:8080", hostname = "sozu.io"}, + { address = "0.0.0.0:8443", hostname = "rsa-2048.sozu.io", certificate = "rsa-2048.pem", certificate_chain = "rsa-2048.pem", key = "rsa-2048.key"}, + { address = "0.0.0.0:8443", hostname = "rsa-4096.sozu.io", certificate = "rsa-4096.pem", certificate_chain = "rsa-4096.pem", key = "rsa-4096.key"}, + { address = "0.0.0.0:8443", hostname = "ecdsa.sozu.io", certificate = "ecdsa.pem", certificate_chain = "ecdsa.pem", key = "ecdsa.key"}, +] +backends = [ + { address = "0.0.0.0:4444"} +] diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 000000000..e3a376fa1 --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,161 @@ +--- +name: Benchmark + +on: [ push, pull_request ] + +env: + CARGO_TERM_COLOR: always + +jobs: + build-bombardier: + name: Build Bombardier 💣️ + runs-on: ubuntu-latest + steps: + - name: Clone bombardier + uses: actions/checkout@v4 + with: + repository: codesenberg/bombardier + path: . + + - uses: actions/setup-go@v5 + with: + go-version: '1.18' + check-latest: false + + - name: Build bombardier + run: go build -o bombardier + + - name: 📤 Upload bombardier + uses: actions/upload-artifact@v4 + with: + name: bombardier + path: bombardier + + build-lagging_server: + name: Build Lagging_Server ⚡️ + runs-on: ubuntu-latest + steps: + - name: Install rust toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + rustflags: "" + + - name: Clone lagging_server + uses: actions/checkout@v4 + with: + repository: CleverCloud/lagging_server + path: . + + - uses: Swatinem/rust-cache@v2 + with: + cache-all-crates: true + prefix-key: "lagging_server" + + - name: Build lagging_server + run: cargo build --release + + - name: 📤 Upload lagging_server + uses: actions/upload-artifact@v4 + with: + name: lagging_server + path: target/release/lagging_server + + build-sozu: + name: Build Sozu 🦀 + runs-on: ubuntu-latest + steps: + - name: Install protobuf compiler + run: sudo apt-get install -y protobuf-compiler + + - name: Install rust toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + rustflags: "" + + - uses: actions/checkout@v4 + + - uses: Swatinem/rust-cache@v2 + with: + cache-all-crates: true + prefix-key: "sozu" + + - name: Build Sozu + run: cargo build --release + + - name: 📤 Upload sozu + uses: actions/upload-artifact@v4 + with: + name: sozu + path: target/release/sozu + + bench: + name: Benchmark 🎯 + runs-on: ubuntu-latest + needs: [build-bombardier, build-lagging_server, build-sozu] + steps: + - uses: actions/checkout@v4 + + - name: 📥 Download bombardier + uses: actions/download-artifact@v4 + with: + name: bombardier + path: .github/workflows + - name: 📥 Download lagging_server + uses: actions/download-artifact@v4 + with: + name: lagging_server + path: .github/workflows + - name: 📥 Download sozu + uses: actions/download-artifact@v4 + with: + name: sozu + path: .github/workflows + + - name: Host mapping sozu.io domains + run: | + sudo echo "0.0.0.0 sozu.io" | sudo tee -a /etc/hosts + sudo echo "0.0.0.0 rsa-2048.sozu.io" | sudo tee -a /etc/hosts + sudo echo "0.0.0.0 rsa-4096.sozu.io" | sudo tee -a /etc/hosts + sudo echo "0.0.0.0 ecdsa.sozu.io" | sudo tee -a /etc/hosts + + - name: Generate TLS key rsa 2048 + working-directory: .github/workflows + run: | + openssl req -newkey rsa:2048 -nodes -keyout rsa-2048.key -out rsa-2048.csr -config rsa-2048.cnf + openssl x509 -req -days 365 -in rsa-2048.csr -signkey rsa-2048.key -out rsa-2048.pem -extensions req_ext -extfile rsa-2048.cnf + sudo cp rsa-2048.pem /usr/local/share/ca-certificates/rsa-2048.crt + sudo update-ca-certificates + + - name: Generate TLS key rsa 4096 + working-directory: .github/workflows + run: | + openssl req -newkey rsa:4096 -nodes -keyout rsa-4096.key -out rsa-4096.csr -config rsa-4096.cnf + openssl x509 -req -days 365 -in rsa-4096.csr -signkey rsa-4096.key -out rsa-4096.pem -extensions req_ext -extfile rsa-4096.cnf + sudo cp rsa-4096.pem /usr/local/share/ca-certificates/rsa-4096.crt + sudo update-ca-certificates + + - name: Generate TLS key ecdsa + working-directory: .github/workflows + run: | + openssl ecparam -name prime256v1 -genkey -out ecdsa.key + openssl req -new -key ecdsa.key -out ecdsa.csr -config ecdsa.cnf + openssl x509 -req -days 365 -in ecdsa.csr -signkey ecdsa.key -out ecdsa.pem -extensions req_ext -extfile ecdsa.cnf + sudo cp ecdsa.pem /usr/local/share/ca-certificates/ecdsa.crt + sudo update-ca-certificates + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: 3.12 + + - name: Fix rights + working-directory: .github/workflows + run: | + chmod +x bombardier + chmod +x lagging_server + chmod +x sozu + + - name: ⚡ Launch bench + working-directory: .github/workflows + run: + python bench.py \ No newline at end of file diff --git a/.github/workflows/ecdsa.cnf b/.github/workflows/ecdsa.cnf new file mode 100644 index 000000000..b09c62155 --- /dev/null +++ b/.github/workflows/ecdsa.cnf @@ -0,0 +1,15 @@ +[ req ] +prompt = no +default_md = sha256 +req_extensions = req_ext +distinguished_name = dn + +[ dn ] +CN = sozu.io + +[ req_ext ] +subjectAltName = @alt_names + +[ alt_names ] +DNS.1 = sozu.io +DNS.2 = ecdsa.sozu.io \ No newline at end of file diff --git a/.github/workflows/rsa-2048.cnf b/.github/workflows/rsa-2048.cnf new file mode 100644 index 000000000..07769a9c6 --- /dev/null +++ b/.github/workflows/rsa-2048.cnf @@ -0,0 +1,16 @@ +[ req ] +default_bits = 2048 +prompt = no +default_md = sha256 +req_extensions = req_ext +distinguished_name = dn + +[ dn ] +CN = sozu.io + +[ req_ext ] +subjectAltName = @alt_names + +[ alt_names ] +DNS.1 = sozu.io +DNS.2 = rsa-2048.sozu.io \ No newline at end of file diff --git a/.github/workflows/rsa-4096.cnf b/.github/workflows/rsa-4096.cnf new file mode 100644 index 000000000..f7a1f84f6 --- /dev/null +++ b/.github/workflows/rsa-4096.cnf @@ -0,0 +1,16 @@ +[ req ] +default_bits = 4096 +prompt = no +default_md = sha256 +req_extensions = req_ext +distinguished_name = dn + +[ dn ] +CN = sozu.io + +[ req_ext ] +subjectAltName = @alt_names + +[ alt_names ] +DNS.1 = sozu.io +DNS.2 = rsa-4096.sozu.io \ No newline at end of file diff --git a/bin/src/ctl/mod.rs b/bin/src/ctl/mod.rs index 22a6c9ff6..148f9081f 100644 --- a/bin/src/ctl/mod.rs +++ b/bin/src/ctl/mod.rs @@ -47,8 +47,6 @@ pub enum CtlError { ArgsNeeded(String, String), #[error("could not load certificate")] LoadCertificate(CertificateError), - #[error("wrong address {0}: {1}")] - WrongAddress(String, UtilError), #[error("wrong input to create listener")] CreateListener(ConfigError), #[error("domain can not be empty")] @@ -161,19 +159,13 @@ impl CommandManager { key, address, tls_versions, - } => self.add_certificate( - address.to_string(), - &certificate, - &chain, - &key, - tls_versions, - ), + } => self.add_certificate(address.into(), &certificate, &chain, &key, tls_versions), CertificateCmd::Remove { certificate, address, fingerprint, } => self.remove_certificate( - address.to_string(), + address.into(), certificate.as_deref(), fingerprint.as_deref(), ), @@ -186,7 +178,7 @@ impl CommandManager { old_fingerprint, tls_versions, } => self.replace_certificate( - address.to_string(), + address.into(), &certificate, &chain, &key, diff --git a/bin/src/ctl/request_builder.rs b/bin/src/ctl/request_builder.rs index 6389ab625..1affeb0aa 100644 --- a/bin/src/ctl/request_builder.rs +++ b/bin/src/ctl/request_builder.rs @@ -11,7 +11,8 @@ use sozu_command_lib::{ LoadBalancingParams, MetricsConfiguration, PathRule, ProxyProtocolConfig, QueryCertificatesFilters, QueryClusterByDomain, QueryClustersHashes, RemoveBackend, RemoveCertificate, RemoveListener, ReplaceCertificate, RequestHttpFrontend, - RequestTcpFrontend, RulePosition, SoftStop, Status, SubscribeEvents, TlsVersion, + RequestTcpFrontend, RulePosition, SocketAddress, SoftStop, Status, SubscribeEvents, + TlsVersion, }, }; @@ -21,7 +22,6 @@ use crate::{ MetricsCmd, TcpFrontendCmd, TcpListenerCmd, }, ctl::CommandManager, - util::parse_socket_address, }; use super::CtlError; @@ -118,7 +118,7 @@ impl CommandManager { } => self.send_request( RequestType::AddBackend(AddBackend { cluster_id: id, - address: address.to_string(), + address: address.into(), backend_id, load_balancing_parameters: Some(LoadBalancingParams::default()), sticky_id, @@ -133,7 +133,7 @@ impl CommandManager { } => self.send_request( RequestType::RemoveBackend(RemoveBackend { cluster_id: id, - address: address.to_string(), + address: address.into(), backend_id, }) .into(), @@ -211,7 +211,7 @@ impl CommandManager { TcpFrontendCmd::Add { id, address, tags } => self.send_request( RequestType::AddTcpFrontend(RequestTcpFrontend { cluster_id: id, - address: address.to_string(), + address: address.into(), tags: tags.unwrap_or(BTreeMap::new()), }) .into(), @@ -219,7 +219,7 @@ impl CommandManager { TcpFrontendCmd::Remove { id, address } => self.send_request( RequestType::RemoveTcpFrontend(RequestTcpFrontend { cluster_id: id, - address: address.to_string(), + address: address.into(), ..Default::default() }) .into(), @@ -241,7 +241,7 @@ impl CommandManager { } => self.send_request( RequestType::AddHttpFrontend(RequestHttpFrontend { cluster_id: route.into(), - address: address.to_string(), + address: address.into(), hostname, path: PathRule::from_cli_options(path_prefix, path_regex, path_equals), method: method.map(String::from), @@ -264,7 +264,7 @@ impl CommandManager { } => self.send_request( RequestType::RemoveHttpFrontend(RequestHttpFrontend { cluster_id: route.into(), - address: address.to_string(), + address: address.into(), hostname, path: PathRule::from_cli_options(path_prefix, path_regex, path_equals), method: method.map(String::from), @@ -289,7 +289,7 @@ impl CommandManager { } => self.send_request( RequestType::AddHttpsFrontend(RequestHttpFrontend { cluster_id: route.into(), - address: address.to_string(), + address: address.into(), hostname, path: PathRule::from_cli_options(path_prefix, path_regex, path_equals), method: method.map(String::from), @@ -312,7 +312,7 @@ impl CommandManager { } => self.send_request( RequestType::RemoveHttpsFrontend(RequestHttpFrontend { cluster_id: route.into(), - address: address.to_string(), + address: address.into(), hostname, path: PathRule::from_cli_options(path_prefix, path_regex, path_equals), method: method.map(String::from), @@ -339,7 +339,7 @@ impl CommandManager { request_timeout, connect_timeout, } => { - let https_listener = ListenerBuilder::new_https(address) + let https_listener = ListenerBuilder::new_https(address.into()) .with_public_address(public_address) .with_answer_404_path(answer_404) .with_answer_503_path(answer_503) @@ -357,13 +357,13 @@ impl CommandManager { self.send_request(RequestType::AddHttpsListener(https_listener).into()) } HttpsListenerCmd::Remove { address } => { - self.remove_listener(address.to_string(), ListenerType::Https) + self.remove_listener(address.into(), ListenerType::Https) } HttpsListenerCmd::Activate { address } => { - self.activate_listener(address.to_string(), ListenerType::Https) + self.activate_listener(address.into(), ListenerType::Https) } HttpsListenerCmd::Deactivate { address } => { - self.deactivate_listener(address.to_string(), ListenerType::Https) + self.deactivate_listener(address.into(), ListenerType::Https) } } } @@ -382,7 +382,7 @@ impl CommandManager { request_timeout, connect_timeout, } => { - let http_listener = ListenerBuilder::new_http(address) + let http_listener = ListenerBuilder::new_http(address.into()) .with_public_address(public_address) .with_answer_404_path(answer_404) .with_answer_503_path(answer_503) @@ -398,13 +398,13 @@ impl CommandManager { self.send_request(RequestType::AddHttpListener(http_listener).into()) } HttpListenerCmd::Remove { address } => { - self.remove_listener(address.to_string(), ListenerType::Http) + self.remove_listener(address.into(), ListenerType::Http) } HttpListenerCmd::Activate { address } => { - self.activate_listener(address.to_string(), ListenerType::Http) + self.activate_listener(address.into(), ListenerType::Http) } HttpListenerCmd::Deactivate { address } => { - self.deactivate_listener(address.to_string(), ListenerType::Http) + self.deactivate_listener(address.into(), ListenerType::Http) } } } @@ -416,7 +416,7 @@ impl CommandManager { public_address, expect_proxy, } => { - let listener = ListenerBuilder::new_tcp(address) + let listener = ListenerBuilder::new_tcp(address.into()) .with_public_address(public_address) .with_expect_proxy(expect_proxy) .to_tcp(Some(&self.config)) @@ -425,13 +425,13 @@ impl CommandManager { self.send_request(RequestType::AddTcpListener(listener).into()) } TcpListenerCmd::Remove { address } => { - self.remove_listener(address.to_string(), ListenerType::Tcp) + self.remove_listener(address.into(), ListenerType::Tcp) } TcpListenerCmd::Activate { address } => { - self.activate_listener(address.to_string(), ListenerType::Tcp) + self.activate_listener(address.into(), ListenerType::Tcp) } TcpListenerCmd::Deactivate { address } => { - self.deactivate_listener(address.to_string(), ListenerType::Tcp) + self.deactivate_listener(address.into(), ListenerType::Tcp) } } } @@ -442,15 +442,12 @@ impl CommandManager { pub fn remove_listener( &mut self, - address: String, + address: SocketAddress, listener_type: ListenerType, ) -> Result<(), CtlError> { - let address = parse_socket_address(&address) - .map_err(|util_err| CtlError::WrongAddress(address, util_err))?; - self.send_request( RequestType::RemoveListener(RemoveListener { - address: address.to_string(), + address, proxy: listener_type.into(), }) .into(), @@ -459,15 +456,12 @@ impl CommandManager { pub fn activate_listener( &mut self, - address: String, + address: SocketAddress, listener_type: ListenerType, ) -> Result<(), CtlError> { - let address = parse_socket_address(&address) - .map_err(|util_err| CtlError::WrongAddress(address, util_err))?; - self.send_request( RequestType::ActivateListener(ActivateListener { - address: address.to_string(), + address, proxy: listener_type.into(), from_scm: false, }) @@ -477,15 +471,12 @@ impl CommandManager { pub fn deactivate_listener( &mut self, - address: String, + address: SocketAddress, listener_type: ListenerType, ) -> Result<(), CtlError> { - let address = parse_socket_address(&address) - .map_err(|util_err| CtlError::WrongAddress(address, util_err))?; - self.send_request( RequestType::DeactivateListener(DeactivateListener { - address: address.to_string(), + address, proxy: listener_type.into(), to_scm: false, }) @@ -499,7 +490,7 @@ impl CommandManager { pub fn add_certificate( &mut self, - address: String, + address: SocketAddress, certificate_path: &str, certificate_chain_path: &str, key_path: &str, @@ -527,7 +518,7 @@ impl CommandManager { #[allow(clippy::too_many_arguments)] pub fn replace_certificate( &mut self, - address: String, + address: SocketAddress, new_certificate_path: &str, new_certificate_chain_path: &str, new_key_path: &str, @@ -575,7 +566,7 @@ impl CommandManager { pub fn remove_certificate( &mut self, - address: String, + address: SocketAddress, certificate_path: Option<&str>, fingerprint: Option<&str>, ) -> Result<(), CtlError> { diff --git a/bin/src/util.rs b/bin/src/util.rs index 5f1c09ab3..b888bd970 100644 --- a/bin/src/util.rs +++ b/bin/src/util.rs @@ -2,7 +2,6 @@ use std::{ ffi::OsString, fs::{read_link, File}, io::{Error as IoError, Write}, - net::{AddrParseError, SocketAddr}, os::unix::io::RawFd, path::PathBuf, }; @@ -45,8 +44,6 @@ pub enum UtilError { or use the SOZU_CONFIG environment variable when building sozu." )] GetConfigFilePath, - #[error("could not parse socket address: {0}")] - ParseSocketAddress(AddrParseError), } /// FD_CLOEXEC is set by default on every fd in Rust standard lib, @@ -117,12 +114,6 @@ pub fn get_config_file_path(args: &cli::Args) -> Result<&str, UtilError> { } } -pub fn parse_socket_address(address: &str) -> Result { - address - .parse::() - .map_err(UtilError::ParseSocketAddress) -} - #[cfg(target_os = "freebsd")] pub unsafe fn get_executable_path() -> Result { let mut capacity = PATH_MAX as usize; diff --git a/command/src/command.proto b/command/src/command.proto index 391f67449..e7c92547a 100644 --- a/command/src/command.proto +++ b/command/src/command.proto @@ -115,8 +115,8 @@ message CountRequests {} // details of an HTTP listener message HttpListenerConfig { - required string address = 1; - optional string public_address = 2; + required SocketAddress address = 1; + optional SocketAddress public_address = 2; required string answer_404 = 3; required string answer_503 = 4; required bool expect_proxy = 5 [default = false]; @@ -135,8 +135,8 @@ message HttpListenerConfig { // details of an HTTPS listener message HttpsListenerConfig { - required string address = 1; - optional string public_address = 2; + required SocketAddress address = 1; + optional SocketAddress public_address = 2; required string answer_404 = 3; required string answer_503 = 4; required bool expect_proxy = 5 [default = false]; @@ -168,8 +168,8 @@ message HttpsListenerConfig { // details of an TCP listener message TcpListenerConfig { - required string address = 1; - optional string public_address = 2; + required SocketAddress address = 1; + optional SocketAddress public_address = 2; required bool expect_proxy = 3 [default = false]; // client inactive time, in seconds required uint32 front_timeout = 4 [default = 60]; @@ -182,19 +182,19 @@ message TcpListenerConfig { } message ActivateListener { - required string address = 1; + required SocketAddress address = 1; required ListenerType proxy = 2; required bool from_scm = 3; } message DeactivateListener { - required string address = 1; + required SocketAddress address = 1; required ListenerType proxy = 2; required bool to_scm = 3; } message RemoveListener { - required string address = 1; + required SocketAddress address = 1; required ListenerType proxy = 2; } @@ -217,7 +217,7 @@ message ListenersList { // An HTTP or HTTPS frontend, as order to, or received from, Sōzu message RequestHttpFrontend { optional string cluster_id = 1; - required string address = 2; + required SocketAddress address = 2; required string hostname = 3; required PathRule path = 4; optional string method = 5; @@ -229,7 +229,7 @@ message RequestHttpFrontend { message RequestTcpFrontend { required string cluster_id = 1; // the socket address on which to listen for incoming traffic - required string address = 2; + required SocketAddress address = 2; // custom tags to identify the frontend in the access logs map tags = 3; } @@ -269,20 +269,20 @@ enum RulePosition { // Add a new TLS certificate to an HTTPs listener message AddCertificate { - required string address = 1; + required SocketAddress address = 1; required CertificateAndKey certificate = 2; // A unix timestamp. Overrides certificate expiration. optional int64 expired_at = 3; } message RemoveCertificate { - required string address = 1; + required SocketAddress address = 1; // a hex-encoded TLS fingerprint to identify the certificate to remove required string fingerprint = 2; } message ReplaceCertificate { - required string address = 1; + required SocketAddress address = 1; required CertificateAndKey new_certificate = 2; // a hex-encoded TLS fingerprint to identify the old certificate required string old_fingerprint = 3; @@ -323,7 +323,7 @@ message ListOfCertificatesByAddress { // Summaries of certificates for a given address message CertificatesByAddress { - required string address = 1; + required SocketAddress address = 1; repeated CertificateSummary certificate_summaries = 2; } @@ -382,7 +382,7 @@ message AddBackend { required string cluster_id = 1; required string backend_id = 2; // the socket address of the backend - required string address = 3; + required SocketAddress address = 3; optional string sticky_id = 4; optional LoadBalancingParams load_balancing_parameters = 5; optional bool backup = 6; @@ -393,7 +393,7 @@ message RemoveBackend { required string cluster_id = 1; required string backend_id = 2; // the socket address of the backend - required string address = 3 ; + required SocketAddress address = 3 ; } message LoadBalancingParams { @@ -502,7 +502,7 @@ message Event { required EventKind kind = 1; optional string cluster_id = 2; optional string backend_id = 3; - optional string address = 4; + optional SocketAddress address = 4; } enum EventKind { @@ -607,4 +607,28 @@ message Percentiles { message RequestCounts { map map = 1; +} + +// matches std::net::SocketAddr in the Rust library +// beware that the ports are expressed with uint32 here, +// but they should NOT exceed uint16 value +message SocketAddress { + required IpAddress ip = 1; + required uint32 port = 2; +} + +message IpAddress { + oneof inner { + uint32 v4 = 1; + Uint128 v6 = 2; + } +} + +// used to represent the 128 bits of an IPv6 address +message Uint128 { + // higher value, first 8 bytes of the ip + required uint64 low = 1; + // lower value, last 8 bytes of the ip + required uint64 high = 2; + } \ No newline at end of file diff --git a/command/src/config.rs b/command/src/config.rs index efe1cdc26..594634021 100644 --- a/command/src/config.rs +++ b/command/src/config.rs @@ -65,8 +65,8 @@ use crate::{ request::RequestType, ActivateListener, AddBackend, AddCertificate, CertificateAndKey, Cluster, HttpListenerConfig, HttpsListenerConfig, ListenerType, LoadBalancingAlgorithms, LoadBalancingParams, LoadMetric, MetricsConfiguration, PathRule, ProxyProtocolConfig, - Request, RequestHttpFrontend, RequestTcpFrontend, RulePosition, TcpListenerConfig, - TlsVersion, + Request, RequestHttpFrontend, RequestTcpFrontend, RulePosition, SocketAddress, + TcpListenerConfig, TlsVersion, }, request::WorkerRequest, ObjectKind, @@ -185,8 +185,6 @@ pub enum MissingKind { #[derive(thiserror::Error, Debug)] pub enum ConfigError { - #[error("Could not parse socket address {address}: {error}")] - ParseSocketAddress { address: String, error: String }, #[error("env path not found: {0}")] Env(String), #[error("Could not open file {path_to_open}: {io_error}")] @@ -209,8 +207,8 @@ pub enum ConfigError { InvalidFrontendConfig(String), #[error("invalid path {0:?}")] InvalidPath(PathBuf), - #[error("listening address {0} is already used in the configuration")] - ListenerAddressAlreadyInUse(String), + #[error("listening address {0:?} is already used in the configuration")] + ListenerAddressAlreadyInUse(SocketAddr), #[error("missing {0:?}")] Missing(MissingKind), #[error("could not get parent directory for file {0}")] @@ -231,12 +229,12 @@ pub enum ConfigError { } /// An HTTP, HTTPS or TCP listener as parsed from the `Listeners` section in the toml -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Default, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct ListenerBuilder { - pub address: String, + pub address: SocketAddr, pub protocol: Option, - pub public_address: Option, + pub public_address: Option, /// path to the 404 html file pub answer_404: Option, /// path to the 503 html file @@ -273,50 +271,50 @@ pub fn default_sticky_name() -> String { impl ListenerBuilder { /// starts building an HTTP Listener with config values for timeouts, /// or defaults if no config is provided - pub fn new_http(address: S) -> ListenerBuilder - where - S: ToString, - { + pub fn new_http(address: SocketAddress) -> ListenerBuilder { Self::new(address, ListenerProtocol::Http) } /// starts building an HTTPS Listener with config values for timeouts, /// or defaults if no config is provided - pub fn new_tcp(address: S) -> ListenerBuilder - where - S: ToString, - { + pub fn new_tcp(address: SocketAddress) -> ListenerBuilder { Self::new(address, ListenerProtocol::Tcp) } /// starts building a TCP Listener with config values for timeouts, /// or defaults if no config is provided - pub fn new_https(address: S) -> ListenerBuilder - where - S: ToString, - { + pub fn new_https(address: SocketAddress) -> ListenerBuilder { Self::new(address, ListenerProtocol::Https) } /// starts building a Listener - fn new(address: S, protocol: ListenerProtocol) -> ListenerBuilder - where - S: ToString, - { + fn new(address: SocketAddress, protocol: ListenerProtocol) -> ListenerBuilder { ListenerBuilder { - address: address.to_string(), + address: address.into(), protocol: Some(protocol), sticky_name: DEFAULT_STICKY_NAME.to_string(), - ..Default::default() + public_address: None, + answer_404: None, + answer_503: None, + tls_versions: None, + cipher_list: None, + cipher_suites: None, + expect_proxy: None, + certificate: None, + certificate_chain: None, + key: None, + front_timeout: None, + back_timeout: None, + connect_timeout: None, + request_timeout: None, + config: None, + send_tls13_tickets: None, } } - pub fn with_public_address(&mut self, public_address: Option) -> &mut Self - where - S: ToString, - { + pub fn with_public_address(&mut self, public_address: Option) -> &mut Self { if let Some(address) = public_address { - self.public_address = Some(address.to_string()); + self.public_address = Some(address); } self } @@ -412,17 +410,6 @@ impl ListenerBuilder { self } - pub fn parse_address(&self) -> Result { - parse_socket_address(&self.address) - } - - pub fn parse_public_address(&self) -> Result, ConfigError> { - match &self.public_address { - Some(a) => Ok(Some(parse_socket_address(a)?)), - None => Ok(None), - } - } - /// Assign the timeouts of the config to this listener, only if timeouts did not exist fn assign_config_timeouts(&mut self, config: &Config) { self.front_timeout = Some(self.front_timeout.unwrap_or(config.front_timeout)); @@ -446,13 +433,9 @@ impl ListenerBuilder { let (answer_404, answer_503) = self.get_404_503_answers()?; - let _address = self.parse_address()?; - - let _public_address = self.parse_public_address()?; - let configuration = HttpListenerConfig { - address: self.address.clone(), - public_address: self.public_address.clone(), + address: self.address.into(), + public_address: self.public_address.map(|a| a.into()), expect_proxy: self.expect_proxy.unwrap_or(false), sticky_name: self.sticky_name.clone(), front_timeout: self.front_timeout.unwrap_or(DEFAULT_FRONT_TIMEOUT), @@ -532,23 +515,16 @@ impl ListenerBuilder { .map(split_certificate_chain) .unwrap_or_else(Vec::new); - let (answer_404, answer_503) = self - .get_404_503_answers() - //.with_context(|| "Could not get 404 and 503 answers from file system") - ?; - - let _address = self.parse_address()?; - - let _public_address = self.parse_public_address()?; + let (answer_404, answer_503) = self.get_404_503_answers()?; if let Some(config) = config { self.assign_config_timeouts(config); } let https_listener_config = HttpsListenerConfig { - address: self.address.clone(), + address: self.address.into(), sticky_name: self.sticky_name.clone(), - public_address: self.public_address.clone(), + public_address: self.public_address.map(|a| a.into()), cipher_list, versions, expect_proxy: self.expect_proxy.unwrap_or(false), @@ -582,17 +558,13 @@ impl ListenerBuilder { }); } - let _address = self.parse_address()?; - - let _public_address = self.parse_public_address()?; - if let Some(config) = config { self.assign_config_timeouts(config); } Ok(TcpListenerConfig { - address: self.address.clone(), - public_address: self.public_address.clone(), + address: self.address.into(), + public_address: self.public_address.map(|a| a.into()), expect_proxy: self.expect_proxy.unwrap_or(false), front_timeout: self.front_timeout.unwrap_or(DEFAULT_FRONT_TIMEOUT), back_timeout: self.back_timeout.unwrap_or(DEFAULT_BACK_TIMEOUT), @@ -617,15 +589,6 @@ impl ListenerBuilder { } } -fn parse_socket_address(address: &str) -> Result { - address - .parse::() - .map_err(|parse_error| ConfigError::ParseSocketAddress { - error: parse_error.to_string(), - address: address.to_owned(), - }) -} - fn open_and_read_file(path: &str) -> Result { let mut content = String::new(); let mut file = File::open(path).map_err(|io_error| ConfigError::FileOpen { @@ -928,7 +891,7 @@ impl HttpFrontendConfig { if self.key.is_some() && self.certificate.is_some() { v.push( RequestType::AddCertificate(AddCertificate { - address: self.address.to_string(), + address: self.address.into(), certificate: CertificateAndKey { key: self.key.clone().unwrap(), certificate: self.certificate.clone().unwrap(), @@ -944,7 +907,7 @@ impl HttpFrontendConfig { v.push( RequestType::AddHttpsFrontend(RequestHttpFrontend { cluster_id: Some(cluster_id.to_string()), - address: self.address.to_string(), + address: self.address.into(), hostname: self.hostname.clone(), path: self.path.clone(), method: self.method.clone(), @@ -958,7 +921,7 @@ impl HttpFrontendConfig { v.push( RequestType::AddHttpFrontend(RequestHttpFrontend { cluster_id: Some(cluster_id.to_string()), - address: self.address.to_string(), + address: self.address.into(), hostname: self.hostname.clone(), path: self.path.clone(), method: self.method.clone(), @@ -1015,7 +978,7 @@ impl HttpClusterConfig { backend_id: backend.backend_id.clone().unwrap_or_else(|| { format!("{}-{}-{}", self.cluster_id, backend_count, backend.address) }), - address: backend.address.to_string(), + address: backend.address.into(), load_balancing_parameters, sticky_id: backend.sticky_id.clone(), backup: backend.backup, @@ -1062,7 +1025,7 @@ impl TcpClusterConfig { v.push( RequestType::AddTcpFrontend(RequestTcpFrontend { cluster_id: self.cluster_id.clone(), - address: frontend.address.to_string(), + address: frontend.address.into(), tags: frontend.tags.clone().unwrap_or(BTreeMap::new()), }) .into(), @@ -1080,7 +1043,7 @@ impl TcpClusterConfig { backend_id: backend.backend_id.clone().unwrap_or_else(|| { format!("{}-{}-{}", self.cluster_id, backend_count, backend.address) }), - address: backend.address.to_string(), + address: backend.address.into(), load_balancing_parameters, sticky_id: backend.sticky_id.clone(), backup: backend.backup, @@ -1167,12 +1130,10 @@ impl FileConfig { if let Some(listeners) = config.listeners.as_ref() { for listener in listeners.iter() { - if reserved_address.contains(&listener.parse_address()?) { - return Err(ConfigError::ListenerAddressAlreadyInUse( - listener.address.to_string(), - )); + if reserved_address.contains(&listener.address) { + return Err(ConfigError::ListenerAddressAlreadyInUse(listener.address)); } - reserved_address.insert(listener.parse_address()?); + reserved_address.insert(listener.address); } } @@ -1305,26 +1266,23 @@ impl ConfigBuilder { fn populate_listeners(&mut self, listeners: Vec) -> Result<(), ConfigError> { for listener in listeners.iter() { - let address = listener.parse_address()?; - if self.known_addresses.contains_key(&address) { - return Err(ConfigError::ListenerAddressAlreadyInUse( - listener.address.to_string(), - )); + if self.known_addresses.contains_key(&listener.address) { + return Err(ConfigError::ListenerAddressAlreadyInUse(listener.address)); } let protocol = listener .protocol .ok_or(ConfigError::Missing(MissingKind::Protocol))?; - self.known_addresses.insert(address, protocol); + self.known_addresses.insert(listener.address, protocol); if listener.expect_proxy == Some(true) { - self.expect_proxy_addresses.insert(address); + self.expect_proxy_addresses.insert(listener.address); } if listener.public_address.is_some() && listener.expect_proxy == Some(true) { return Err(ConfigError::Incompatible { object: ObjectKind::Listener, - id: listener.address.to_owned(), + id: listener.address.to_string(), kind: IncompatibilityKind::PublicAddress, }); } @@ -1345,7 +1303,6 @@ impl ConfigBuilder { for (id, file_cluster_config) in file_cluster_configs.drain() { let mut cluster_config = file_cluster_config.to_cluster_config(id.as_str(), &self.expect_proxy_addresses)?; - // .with_context(|| format!("error parsing cluster configuration for cluster {id}"))?; match cluster_config { ClusterConfig::Http(ref mut http) => { @@ -1367,7 +1324,7 @@ impl ConfigBuilder { if frontend.certificate.is_none() { if let Some(https_listener) = self.built.https_listeners.iter().find(|listener| { - listener.address == frontend.address.to_string() + listener.address == frontend.address.into() && listener.certificate.is_some() }) { @@ -1390,14 +1347,14 @@ impl ConfigBuilder { // create a default listener for that front let file_listener_protocol = if frontend.certificate.is_some() { self.push_tls_listener(ListenerBuilder::new( - frontend.address.to_string(), + frontend.address.into(), ListenerProtocol::Https, ))?; ListenerProtocol::Https } else { self.push_http_listener(ListenerBuilder::new( - frontend.address.to_string(), + frontend.address.into(), ListenerProtocol::Http, ))?; @@ -1422,7 +1379,7 @@ impl ConfigBuilder { None => { // create a default listener for that front self.push_tcp_listener(ListenerBuilder::new( - frontend.address.to_string(), + frontend.address.into(), ListenerProtocol::Tcp, ))?; self.known_addresses @@ -1794,14 +1751,20 @@ mod tests { #[test] fn serialize() { - let http = ListenerBuilder::new("127.0.0.1:8080", ListenerProtocol::Http) - .with_answer_404_path(Some("404.html")) - .to_owned(); + let http = ListenerBuilder::new( + SocketAddress::new_v4(127, 0, 0, 1, 8080), + ListenerProtocol::Http, + ) + .with_answer_404_path(Some("404.html")) + .to_owned(); println!("http: {:?}", to_string(&http)); - let https = ListenerBuilder::new("127.0.0.1:8443", ListenerProtocol::Https) - .with_answer_404_path(Some("404.html")) - .to_owned(); + let https = ListenerBuilder::new( + SocketAddress::new_v4(127, 0, 0, 1, 8443), + ListenerProtocol::Https, + ) + .with_answer_404_path(Some("404.html")) + .to_owned(); println!("https: {:?}", to_string(&https)); let listeners = vec![http, https]; diff --git a/command/src/proto/display.rs b/command/src/proto/display.rs index 4bc8df774..b123a0437 100644 --- a/command/src/proto/display.rs +++ b/command/src/proto/display.rs @@ -1,6 +1,7 @@ use std::{ collections::{BTreeMap, HashMap, HashSet}, fmt::{Display, Formatter}, + net::SocketAddr, }; use prettytable::{cell, row, Row, Table}; @@ -18,6 +19,8 @@ use crate::proto::{ DisplayError, }; +use super::command::SocketAddress; + impl Display for CertificateAndKey { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let versions = self.versions.iter().fold(String::new(), |acc, tls_v| { @@ -940,3 +943,9 @@ fn create_cluster_table(headers: Vec<&str>, data: &BTreeMap) -> std::fmt::Result { + write!(f, "{}", SocketAddr::from(self.clone())) + } +} diff --git a/command/src/request.rs b/command/src/request.rs index 459cc1abc..234d41f50 100644 --- a/command/src/request.rs +++ b/command/src/request.rs @@ -3,7 +3,7 @@ use std::{ fmt::{self, Display}, fs::File, io::Read, - net::SocketAddr, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, str::FromStr, }; @@ -14,8 +14,8 @@ use crate::{ parser::parse_several_requests, proto::{ command::{ - request::RequestType, LoadBalancingAlgorithms, PathRuleKind, Request, - RequestHttpFrontend, RulePosition, + ip_address, request::RequestType, IpAddress, LoadBalancingAlgorithms, PathRuleKind, + Request, RequestHttpFrontend, RulePosition, SocketAddress, Uint128, }, display::format_request_type, }, @@ -24,8 +24,6 @@ use crate::{ #[derive(thiserror::Error, Debug)] pub enum RequestError { - #[error("Invalid address {address}: {error}")] - InvalidSocketAddress { address: String, error: String }, #[error("invalid value {value} for field '{name}'")] InvalidValue { name: String, value: i32 }, #[error("Could not read requests from file: {0}")] @@ -201,12 +199,7 @@ impl RequestHttpFrontend { /// convert a requested frontend to a usable one by parsing its address pub fn to_frontend(self) -> Result { Ok(HttpFrontend { - address: self.address.parse::().map_err(|parse_error| { - RequestError::InvalidSocketAddress { - address: self.address.clone(), - error: parse_error.to_string(), - } - })?, + address: self.address.into(), cluster_id: self.cluster_id, hostname: self.hostname, path: self.path, @@ -277,3 +270,57 @@ impl FromStr for LoadBalancingAlgorithms { } } } + +impl SocketAddress { + pub fn new_v4(a: u8, b: u8, c: u8, d: u8, port: u16) -> Self { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(a, b, c, d)), port).into() + } +} + +impl From for SocketAddress { + fn from(socket_addr: SocketAddr) -> SocketAddress { + let ip_inner = match socket_addr { + SocketAddr::V4(ip_v4_addr) => ip_address::Inner::V4(u32::from(*ip_v4_addr.ip())), + SocketAddr::V6(ip_v6_addr) => { + ip_address::Inner::V6(Uint128::from(u128::from(*ip_v6_addr.ip()))) + } + }; + + SocketAddress { + port: socket_addr.port() as u32, + ip: IpAddress { + inner: Some(ip_inner), + }, + } + } +} + +impl From for SocketAddr { + fn from(socket_address: SocketAddress) -> Self { + let port = socket_address.port as u16; + + let ip = match socket_address.ip.inner { + Some(inner) => match inner { + ip_address::Inner::V4(v4_value) => IpAddr::V4(Ipv4Addr::from(v4_value)), + ip_address::Inner::V6(v6_value) => IpAddr::V6(Ipv6Addr::from(u128::from(v6_value))), + }, + None => IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), // should never happen + }; + + SocketAddr::new(ip, port) + } +} + +impl From for u128 { + fn from(value: Uint128) -> Self { + value.low as u128 | ((value.high as u128) << 64) + } +} + +impl From for Uint128 { + fn from(value: u128) -> Self { + let low = value as u64; + let high = (value >> 64) as u64; + Uint128 { low, high } + } +} diff --git a/command/src/response.rs b/command/src/response.rs index c60d3e0f7..99b534157 100644 --- a/command/src/response.rs +++ b/command/src/response.rs @@ -49,7 +49,7 @@ impl From for RequestHttpFrontend { }; RequestHttpFrontend { cluster_id: val.cluster_id, - address: val.address.to_string(), + address: val.address.into(), hostname: val.hostname, path: val.path, method: val.method, @@ -64,7 +64,7 @@ impl From for AddBackend { AddBackend { cluster_id: val.cluster_id, backend_id: val.backend_id, - address: val.address.to_string(), + address: val.address.into(), sticky_id: val.sticky_id, load_balancing_parameters: val.load_balancing_parameters, backup: val.backup, @@ -154,7 +154,7 @@ impl From for RequestTcpFrontend { fn from(val: TcpFrontend) -> Self { RequestTcpFrontend { cluster_id: val.cluster_id, - address: val.address.to_string(), + address: val.address.into(), tags: val.tags, } } @@ -202,7 +202,7 @@ impl Backend { pub fn to_add_backend(self) -> AddBackend { AddBackend { cluster_id: self.cluster_id, - address: self.address.to_string(), + address: self.address.into(), sticky_id: self.sticky_id, backend_id: self.backend_id, load_balancing_parameters: self.load_balancing_parameters, diff --git a/command/src/state.rs b/command/src/state.rs index 9532f34b2..9883b25d0 100644 --- a/command/src/state.rs +++ b/command/src/state.rs @@ -21,7 +21,7 @@ use crate::{ HttpsListenerConfig, ListedFrontends, ListenerType, ListenersList, PathRule, QueryCertificatesFilters, RemoveBackend, RemoveCertificate, RemoveListener, ReplaceCertificate, Request, RequestCounts, RequestHttpFrontend, RequestTcpFrontend, - TcpListenerConfig, + SocketAddress, TcpListenerConfig, }, display::format_request_type, }, @@ -53,8 +53,6 @@ pub enum StateError { RemoveCertificate(String), #[error("Could not replace certificate: {0}")] ReplaceCertificate(String), - #[error("The provided socket address '{address}' is wrong: {error}")] - WrongSocketAddress { address: String, error: String }, #[error( "Could not convert the frontend to an insertable one. Frontend: {frontend} error: {error}" )] @@ -82,11 +80,11 @@ pub struct ConfigState { pub clusters: BTreeMap, pub backends: BTreeMap>, /// socket address -> HTTP listener - pub http_listeners: BTreeMap, + pub http_listeners: BTreeMap, /// socket address -> HTTPS listener - pub https_listeners: BTreeMap, + pub https_listeners: BTreeMap, /// socket address -> TCP listener - pub tcp_listeners: BTreeMap, + pub tcp_listeners: BTreeMap, /// HTTP frontends, indexed by a summary of each front's address;hostname;path, for uniqueness. /// For example: `"0.0.0.0:8080;lolcatho.st;P/api"` pub http_fronts: BTreeMap, @@ -184,13 +182,13 @@ impl ConfigState { } fn add_http_listener(&mut self, listener: &HttpListenerConfig) -> Result<(), StateError> { - let address = listener.address.to_string(); - match self.http_listeners.entry(address.clone()) { + let address: SocketAddr = listener.address.clone().into(); + match self.http_listeners.entry(address) { BTreeMapEntry::Vacant(vacant_entry) => vacant_entry.insert(listener.clone()), BTreeMapEntry::Occupied(_) => { return Err(StateError::Exists { kind: ObjectKind::HttpListener, - id: address, + id: address.to_string(), }) } }; @@ -198,13 +196,13 @@ impl ConfigState { } fn add_https_listener(&mut self, listener: &HttpsListenerConfig) -> Result<(), StateError> { - let address = listener.address.to_string(); - match self.https_listeners.entry(address.clone()) { + let address: SocketAddr = listener.address.clone().into(); + match self.https_listeners.entry(address) { BTreeMapEntry::Vacant(vacant_entry) => vacant_entry.insert(listener.clone()), BTreeMapEntry::Occupied(_) => { return Err(StateError::Exists { kind: ObjectKind::HttpsListener, - id: address, + id: address.to_string(), }) } }; @@ -212,13 +210,13 @@ impl ConfigState { } fn add_tcp_listener(&mut self, listener: &TcpListenerConfig) -> Result<(), StateError> { - let address = listener.address.to_string(); - match self.tcp_listeners.entry(address.clone()) { + let address: SocketAddr = listener.address.clone().into(); + match self.tcp_listeners.entry(address) { BTreeMapEntry::Vacant(vacant_entry) => vacant_entry.insert(listener.clone()), BTreeMapEntry::Occupied(_) => { return Err(StateError::Exists { kind: ObjectKind::TcpListener, - id: address, + id: address.to_string(), }) } }; @@ -227,27 +225,27 @@ impl ConfigState { fn remove_listener(&mut self, remove: &RemoveListener) -> Result<(), StateError> { match ListenerType::try_from(remove.proxy)? { - ListenerType::Http => self.remove_http_listener(&remove.address), - ListenerType::Https => self.remove_https_listener(&remove.address), - ListenerType::Tcp => self.remove_tcp_listener(&remove.address), + ListenerType::Http => self.remove_http_listener(&remove.address.clone().into()), + ListenerType::Https => self.remove_https_listener(&remove.address.clone().into()), + ListenerType::Tcp => self.remove_tcp_listener(&remove.address.clone().into()), } } - fn remove_http_listener(&mut self, address: &str) -> Result<(), StateError> { + fn remove_http_listener(&mut self, address: &SocketAddr) -> Result<(), StateError> { if self.http_listeners.remove(address).is_none() { return Err(StateError::NoChange); } Ok(()) } - fn remove_https_listener(&mut self, address: &str) -> Result<(), StateError> { + fn remove_https_listener(&mut self, address: &SocketAddr) -> Result<(), StateError> { if self.https_listeners.remove(address).is_none() { return Err(StateError::NoChange); } Ok(()) } - fn remove_tcp_listener(&mut self, address: &str) -> Result<(), StateError> { + fn remove_tcp_listener(&mut self, address: &SocketAddr) -> Result<(), StateError> { if self.tcp_listeners.remove(address).is_none() { return Err(StateError::NoChange); } @@ -258,27 +256,27 @@ impl ConfigState { match ListenerType::try_from(activate.proxy)? { ListenerType::Http => self .http_listeners - .get_mut(&activate.address) + .get_mut(&activate.address.clone().into()) .map(|listener| listener.active = true) .ok_or(StateError::NotFound { kind: ObjectKind::HttpListener, - id: activate.address.to_owned(), + id: activate.address.to_string(), }), ListenerType::Https => self .https_listeners - .get_mut(&activate.address) + .get_mut(&activate.address.clone().into()) .map(|listener| listener.active = true) .ok_or(StateError::NotFound { kind: ObjectKind::HttpsListener, - id: activate.address.to_owned(), + id: activate.address.to_string(), }), ListenerType::Tcp => self .tcp_listeners - .get_mut(&activate.address) + .get_mut(&activate.address.clone().into()) .map(|listener| listener.active = true) .ok_or(StateError::NotFound { kind: ObjectKind::TcpListener, - id: activate.address.to_owned(), + id: activate.address.to_string(), }), } } @@ -287,27 +285,27 @@ impl ConfigState { match ListenerType::try_from(deactivate.proxy)? { ListenerType::Http => self .http_listeners - .get_mut(&deactivate.address) + .get_mut(&deactivate.address.clone().into()) .map(|listener| listener.active = false) .ok_or(StateError::NotFound { kind: ObjectKind::HttpListener, - id: deactivate.address.to_owned(), + id: deactivate.address.to_string(), }), ListenerType::Https => self .https_listeners - .get_mut(&deactivate.address) + .get_mut(&deactivate.address.clone().into()) .map(|listener| listener.active = false) .ok_or(StateError::NotFound { kind: ObjectKind::HttpsListener, - id: deactivate.address.to_owned(), + id: deactivate.address.to_string(), }), ListenerType::Tcp => self .tcp_listeners - .get_mut(&deactivate.address) + .get_mut(&deactivate.address.clone().into()) .map(|listener| listener.active = false) .ok_or(StateError::NotFound { kind: ObjectKind::TcpListener, - id: deactivate.address.to_owned(), + id: deactivate.address.to_string(), }), } } @@ -383,11 +381,9 @@ impl ConfigState { )?, ); - let address = parse_socket_address(&add.address)?; - let entry = self .certificates - .entry(address) + .entry(add.address.clone().into()) .or_insert_with(HashMap::new); if entry.contains_key(&fingerprint) { @@ -408,9 +404,7 @@ impl ConfigState { .map_err(|decode_error| StateError::RemoveCertificate(decode_error.to_string()))?, ); - let address = parse_socket_address(&remove.address)?; - - if let Some(index) = self.certificates.get_mut(&address) { + if let Some(index) = self.certificates.get_mut(&remove.address.clone().into()) { index.remove(&fingerprint); } @@ -422,18 +416,17 @@ impl ConfigState { /// - insert the new certificate with the new fingerprint as key /// - check that the new entry is present in the certificates hashmap fn replace_certificate(&mut self, replace: &ReplaceCertificate) -> Result<(), StateError> { - let address = parse_socket_address(&replace.address)?; - + let replace_address = replace.address.clone().into(); let old_fingerprint = Fingerprint( hex::decode(&replace.old_fingerprint) .map_err(|decode_error| StateError::RemoveCertificate(decode_error.to_string()))?, ); self.certificates - .get_mut(&address) + .get_mut(&replace_address) .ok_or(StateError::NotFound { kind: ObjectKind::Certificate, - id: address.to_string(), + id: replace.address.to_string(), })? .remove(&old_fingerprint); @@ -444,12 +437,12 @@ impl ConfigState { ); self.certificates - .get_mut(&address) + .get_mut(&replace_address) .map(|certs| certs.insert(new_fingerprint.clone(), replace.new_certificate.clone())); if !self .certificates - .get(&address) + .get(&replace_address) .ok_or(StateError::ReplaceCertificate( "Unlikely error. This entry in the certificate hashmap should be present" .to_string(), @@ -472,7 +465,7 @@ impl ConfigState { let tcp_frontend = TcpFrontend { cluster_id: front.cluster_id.clone(), - address: parse_socket_address(&front.address)?, + address: front.address.clone().into(), tags: front.tags.clone(), }; if tcp_frontends.contains(&tcp_frontend) { @@ -490,8 +483,6 @@ impl ConfigState { &mut self, front_to_remove: &RequestTcpFrontend, ) -> Result<(), StateError> { - let address = parse_socket_address(&front_to_remove.address)?; - let tcp_frontends = self.tcp_fronts .get_mut(&front_to_remove.cluster_id) @@ -501,7 +492,7 @@ impl ConfigState { })?; let len = tcp_frontends.len(); - tcp_frontends.retain(|front| front.address != address); + tcp_frontends.retain(|front| front.address != front_to_remove.address.clone().into()); if tcp_frontends.len() == len { return Err(StateError::NoChange); } @@ -510,7 +501,7 @@ impl ConfigState { fn add_backend(&mut self, add_backend: &AddBackend) -> Result<(), StateError> { let backend = Backend { - address: parse_socket_address(&add_backend.address)?, + address: add_backend.address.clone().into(), cluster_id: add_backend.cluster_id.clone(), backend_id: add_backend.backend_id.clone(), sticky_id: add_backend.sticky_id.clone(), @@ -540,9 +531,8 @@ impl ConfigState { })?; let len = backend_list.len(); - backend_list.retain(|b| { - b.backend_id != backend.backend_id || b.address.to_string() != backend.address - }); + let remove_address = backend.address.clone().into(); + backend_list.retain(|b| b.backend_id != backend.backend_id || b.address != remove_address); backend_list.sort(); if backend_list.len() == len { return Err(StateError::NoChange); @@ -607,7 +597,7 @@ impl ConfigState { for certificate_and_key in certs.values() { v.push( RequestType::AddCertificate(AddCertificate { - address: front.to_string(), + address: SocketAddress::from(*front), certificate: certificate_and_key.clone(), expired_at: None, }) @@ -645,7 +635,7 @@ impl ConfigState { { v.push( RequestType::ActivateListener(ActivateListener { - address: front.to_string(), + address: SocketAddress::from(*front), proxy: ListenerType::Http.into(), from_scm: false, }) @@ -661,7 +651,7 @@ impl ConfigState { { v.push( RequestType::ActivateListener(ActivateListener { - address: front.to_string(), + address: SocketAddress::from(*front), proxy: ListenerType::Https.into(), from_scm: false, }) @@ -676,7 +666,7 @@ impl ConfigState { { v.push( RequestType::ActivateListener(ActivateListener { - address: front.to_string(), + address: SocketAddress::from(*front), proxy: ListenerType::Tcp.into(), from_scm: false, }) @@ -689,18 +679,18 @@ impl ConfigState { pub fn diff(&self, other: &ConfigState) -> Vec { //pub tcp_listeners: HashMap, - let my_tcp_listeners: HashSet<&String> = self.tcp_listeners.keys().collect(); - let their_tcp_listeners: HashSet<&String> = other.tcp_listeners.keys().collect(); + let my_tcp_listeners: HashSet<&SocketAddr> = self.tcp_listeners.keys().collect(); + let their_tcp_listeners: HashSet<&SocketAddr> = other.tcp_listeners.keys().collect(); let removed_tcp_listeners = my_tcp_listeners.difference(&their_tcp_listeners); let added_tcp_listeners = their_tcp_listeners.difference(&my_tcp_listeners); - let my_http_listeners: HashSet<&String> = self.http_listeners.keys().collect(); - let their_http_listeners: HashSet<&String> = other.http_listeners.keys().collect(); + let my_http_listeners: HashSet<&SocketAddr> = self.http_listeners.keys().collect(); + let their_http_listeners: HashSet<&SocketAddr> = other.http_listeners.keys().collect(); let removed_http_listeners = my_http_listeners.difference(&their_http_listeners); let added_http_listeners = their_http_listeners.difference(&my_http_listeners); - let my_https_listeners: HashSet<&String> = self.https_listeners.keys().collect(); - let their_https_listeners: HashSet<&String> = other.https_listeners.keys().collect(); + let my_https_listeners: HashSet<&SocketAddr> = self.https_listeners.keys().collect(); + let their_https_listeners: HashSet<&SocketAddr> = other.https_listeners.keys().collect(); let removed_https_listeners = my_https_listeners.difference(&their_https_listeners); let added_https_listeners = their_https_listeners.difference(&my_https_listeners); @@ -710,7 +700,7 @@ impl ConfigState { if self.tcp_listeners[*address].active { v.push( RequestType::DeactivateListener(DeactivateListener { - address: address.to_string(), + address: SocketAddress::from(**address), proxy: ListenerType::Tcp.into(), to_scm: false, }) @@ -720,7 +710,7 @@ impl ConfigState { v.push( RequestType::RemoveListener(RemoveListener { - address: address.to_string(), + address: SocketAddress::from(**address), proxy: ListenerType::Tcp.into(), }) .into(), @@ -733,7 +723,7 @@ impl ConfigState { if other.tcp_listeners[*address].active { v.push( RequestType::ActivateListener(ActivateListener { - address: address.to_string(), + address: SocketAddress::from(**address), proxy: ListenerType::Tcp.into(), from_scm: false, }) @@ -746,7 +736,7 @@ impl ConfigState { if self.http_listeners[*address].active { v.push( RequestType::DeactivateListener(DeactivateListener { - address: address.to_string(), + address: SocketAddress::from(**address), proxy: ListenerType::Http.into(), to_scm: false, }) @@ -756,7 +746,7 @@ impl ConfigState { v.push( RequestType::RemoveListener(RemoveListener { - address: address.to_string(), + address: SocketAddress::from(**address), proxy: ListenerType::Http.into(), }) .into(), @@ -769,7 +759,7 @@ impl ConfigState { if other.http_listeners[*address].active { v.push( RequestType::ActivateListener(ActivateListener { - address: address.to_string(), + address: SocketAddress::from(**address), proxy: ListenerType::Http.into(), from_scm: false, }) @@ -782,7 +772,7 @@ impl ConfigState { if self.https_listeners[*address].active { v.push( RequestType::DeactivateListener(DeactivateListener { - address: address.to_string(), + address: SocketAddress::from(**address), proxy: ListenerType::Https.into(), to_scm: false, }) @@ -792,7 +782,7 @@ impl ConfigState { v.push( RequestType::RemoveListener(RemoveListener { - address: address.to_string(), + address: SocketAddress::from(**address), proxy: ListenerType::Https.into(), }) .into(), @@ -805,7 +795,7 @@ impl ConfigState { if other.https_listeners[*address].active { v.push( RequestType::ActivateListener(ActivateListener { - address: address.to_string(), + address: SocketAddress::from(**address), proxy: ListenerType::Https.into(), from_scm: false, }) @@ -821,7 +811,7 @@ impl ConfigState { if my_listener != their_listener { v.push( RequestType::RemoveListener(RemoveListener { - address: addr.to_string(), + address: SocketAddress::from(**addr), proxy: ListenerType::Tcp.into(), }) .into(), @@ -835,7 +825,7 @@ impl ConfigState { if my_listener.active && !their_listener.active { v.push( RequestType::DeactivateListener(DeactivateListener { - address: addr.to_string(), + address: SocketAddress::from(**addr), proxy: ListenerType::Tcp.into(), to_scm: false, }) @@ -846,7 +836,7 @@ impl ConfigState { if !my_listener.active && their_listener.active { v.push( RequestType::ActivateListener(ActivateListener { - address: addr.to_string(), + address: SocketAddress::from(**addr), proxy: ListenerType::Tcp.into(), from_scm: false, }) @@ -862,7 +852,7 @@ impl ConfigState { if my_listener != their_listener { v.push( RequestType::RemoveListener(RemoveListener { - address: addr.to_string(), + address: SocketAddress::from(**addr), proxy: ListenerType::Http.into(), }) .into(), @@ -876,7 +866,7 @@ impl ConfigState { if my_listener.active && !their_listener.active { v.push( RequestType::DeactivateListener(DeactivateListener { - address: addr.to_string(), + address: SocketAddress::from(**addr), proxy: ListenerType::Http.into(), to_scm: false, }) @@ -887,7 +877,7 @@ impl ConfigState { if !my_listener.active && their_listener.active { v.push( RequestType::ActivateListener(ActivateListener { - address: addr.to_string(), + address: SocketAddress::from(**addr), proxy: ListenerType::Http.into(), from_scm: false, }) @@ -903,7 +893,7 @@ impl ConfigState { if my_listener != their_listener { v.push( RequestType::RemoveListener(RemoveListener { - address: addr.to_string(), + address: SocketAddress::from(**addr), proxy: ListenerType::Https.into(), }) .into(), @@ -917,7 +907,7 @@ impl ConfigState { if my_listener.active && !their_listener.active { v.push( RequestType::DeactivateListener(DeactivateListener { - address: addr.to_string(), + address: SocketAddress::from(**addr), proxy: ListenerType::Https.into(), to_scm: false, }) @@ -928,7 +918,7 @@ impl ConfigState { if !my_listener.active && their_listener.active { v.push( RequestType::ActivateListener(ActivateListener { - address: addr.to_string(), + address: SocketAddress::from(**addr), proxy: ListenerType::Https.into(), from_scm: false, }) @@ -978,7 +968,7 @@ impl ConfigState { RequestType::RemoveBackend(RemoveBackend { cluster_id: backend.cluster_id.clone(), backend_id: backend.backend_id.clone(), - address: backend.address.to_string(), + address: SocketAddress::from(backend.address), }) .into(), ); @@ -994,7 +984,7 @@ impl ConfigState { RequestType::RemoveBackend(RemoveBackend { cluster_id: backend.cluster_id.clone(), backend_id: backend.backend_id.clone(), - address: backend.address.to_string(), + address: SocketAddress::from(backend.address), }) .into(), ); @@ -1091,7 +1081,7 @@ impl ConfigState { for &(address, fingerprint) in removed_certificates { v.push( RequestType::RemoveCertificate(RemoveCertificate { - address: address.to_string(), + address: SocketAddress::from(address), fingerprint: fingerprint.to_string(), }) .into(), @@ -1106,7 +1096,7 @@ impl ConfigState { { v.push( RequestType::AddCertificate(AddCertificate { - address: address.to_string(), + address: SocketAddress::from(address), certificate: certificate_and_key.clone(), expired_at: None, }) @@ -1373,9 +1363,21 @@ impl ConfigState { pub fn list_listeners(&self) -> ListenersList { ListenersList { - http_listeners: self.http_listeners.clone(), - https_listeners: self.https_listeners.clone(), - tcp_listeners: self.tcp_listeners.clone(), + http_listeners: self + .http_listeners + .iter() + .map(|(addr, listener)| (addr.to_string(), listener.clone())) + .collect(), + https_listeners: self + .https_listeners + .iter() + .map(|(addr, listener)| (addr.to_string(), listener.clone())) + .collect(), + tcp_listeners: self + .tcp_listeners + .iter() + .map(|(addr, listener)| (addr.to_string(), listener.clone())) + .collect(), } } @@ -1411,15 +1413,6 @@ impl ConfigState { } } -fn parse_socket_address(address: &str) -> Result { - address - .parse::() - .map_err(|parse_error| StateError::WrongSocketAddress { - address: address.to_owned(), - error: parse_error.to_string(), - }) -} - fn domain_check( front_hostname: &str, front_path_rule: &PathRule, @@ -1535,7 +1528,7 @@ mod tests { cluster_id: Some(String::from("cluster_1")), hostname: String::from("lolcatho.st:8080"), path: PathRule::prefix(String::from("/")), - address: "0.0.0.0:8080".to_string(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8080), position: RulePosition::Tree.into(), ..Default::default() }) @@ -1548,7 +1541,7 @@ mod tests { cluster_id: Some(String::from("cluster_2")), hostname: String::from("test.local"), path: PathRule::prefix(String::from("/abc")), - address: "0.0.0.0:8080".to_string(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8080), position: RulePosition::Pre.into(), ..Default::default() }) @@ -1560,7 +1553,7 @@ mod tests { &RequestType::AddBackend(AddBackend { cluster_id: String::from("cluster_1"), backend_id: String::from("cluster_1-0"), - address: "127.0.0.1:1026".parse().unwrap(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1026), ..Default::default() }) .into(), @@ -1571,7 +1564,7 @@ mod tests { &RequestType::AddBackend(AddBackend { cluster_id: String::from("cluster_1"), backend_id: String::from("cluster_1-1"), - address: "127.0.0.2:1027".parse().unwrap(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1027), ..Default::default() }) .into(), @@ -1582,7 +1575,7 @@ mod tests { &RequestType::AddBackend(AddBackend { cluster_id: String::from("cluster_2"), backend_id: String::from("cluster_2-0"), - address: "192.167.1.2:1026".parse().unwrap(), + address: SocketAddress::new_v4(192, 167, 1, 2, 1026), ..Default::default() }) .into(), @@ -1593,7 +1586,7 @@ mod tests { &RequestType::AddBackend(AddBackend { cluster_id: String::from("cluster_1"), backend_id: String::from("cluster_1-3"), - address: "192.168.1.3:1027".parse().unwrap(), + address: SocketAddress::new_v4(192, 168, 1, 3, 1027), ..Default::default() }) .into(), @@ -1604,7 +1597,7 @@ mod tests { &RequestType::RemoveBackend(RemoveBackend { cluster_id: String::from("cluster_1"), backend_id: String::from("cluster_1-3"), - address: "192.168.1.3:1027".parse().unwrap(), + address: SocketAddress::new_v4(192, 168, 1, 3, 1027), }) .into(), ) @@ -1630,7 +1623,7 @@ mod tests { cluster_id: Some(String::from("cluster_1")), hostname: String::from("lolcatho.st:8080"), path: PathRule::prefix(String::from("/")), - address: "0.0.0.0:8080".to_string(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8080), position: RulePosition::Post.into(), ..Default::default() }) @@ -1643,7 +1636,7 @@ mod tests { cluster_id: Some(String::from("cluster_2")), hostname: String::from("test.local"), path: PathRule::prefix(String::from("/abc")), - address: "0.0.0.0:8080".to_string(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8080), ..Default::default() }) .into(), @@ -1654,7 +1647,7 @@ mod tests { &RequestType::AddBackend(AddBackend { cluster_id: String::from("cluster_1"), backend_id: String::from("cluster_1-0"), - address: "127.0.0.1:1026".parse().unwrap(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1026), load_balancing_parameters: Some(LoadBalancingParams::default()), ..Default::default() }) @@ -1666,7 +1659,7 @@ mod tests { &RequestType::AddBackend(AddBackend { cluster_id: String::from("cluster_1"), backend_id: String::from("cluster_1-1"), - address: "127.0.0.2:1027".parse().unwrap(), + address: SocketAddress::new_v4(127, 0, 0, 2, 1027), load_balancing_parameters: Some(LoadBalancingParams::default()), ..Default::default() }) @@ -1678,7 +1671,7 @@ mod tests { &RequestType::AddBackend(AddBackend { cluster_id: String::from("cluster_2"), backend_id: String::from("cluster_2-0"), - address: "192.167.1.2:1026".parse().unwrap(), + address: SocketAddress::new_v4(192, 167, 1, 2, 1026), load_balancing_parameters: Some(LoadBalancingParams::default()), ..Default::default() }) @@ -1704,7 +1697,7 @@ mod tests { cluster_id: Some(String::from("cluster_1")), hostname: String::from("lolcatho.st:8080"), path: PathRule::prefix(String::from("/")), - address: "0.0.0.0:8080".to_string(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8080), position: RulePosition::Post.into(), ..Default::default() }) @@ -1716,7 +1709,7 @@ mod tests { &RequestType::AddBackend(AddBackend { cluster_id: String::from("cluster_1"), backend_id: String::from("cluster_1-0"), - address: "127.0.0.1:1026".parse().unwrap(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1026), load_balancing_parameters: Some(LoadBalancingParams::default()), ..Default::default() }) @@ -1728,7 +1721,7 @@ mod tests { &RequestType::AddBackend(AddBackend { cluster_id: String::from("cluster_1"), backend_id: String::from("cluster_1-1"), - address: "127.0.0.2:1027".parse().unwrap(), + address: SocketAddress::new_v4(127, 0, 0, 2, 1027), load_balancing_parameters: Some(LoadBalancingParams::default()), ..Default::default() }) @@ -1740,7 +1733,7 @@ mod tests { &RequestType::AddBackend(AddBackend { cluster_id: String::from("cluster_1"), backend_id: String::from("cluster_1-2"), - address: "127.0.0.2:1028".parse().unwrap(), + address: SocketAddress::new_v4(127, 0, 0, 2, 1028), load_balancing_parameters: Some(LoadBalancingParams::default()), ..Default::default() }) @@ -1764,20 +1757,20 @@ mod tests { cluster_id: Some(String::from("cluster_2")), hostname: String::from("test.local"), path: PathRule::prefix(String::from("/abc")), - address: "0.0.0.0:8080".to_string(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8080), ..Default::default() }) .into(), RequestType::RemoveBackend(RemoveBackend { cluster_id: String::from("cluster_2"), backend_id: String::from("cluster_2-0"), - address: "192.167.1.2:1026".to_string(), + address: SocketAddress::new_v4(192, 167, 1, 2, 1026), }) .into(), RequestType::AddBackend(AddBackend { cluster_id: String::from("cluster_1"), backend_id: String::from("cluster_1-2"), - address: "127.0.0.2:1028".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 2, 1028), load_balancing_parameters: Some(LoadBalancingParams::default()), ..Default::default() }) @@ -1806,7 +1799,7 @@ mod tests { &RequestType::AddBackend(AddBackend { cluster_id: String::from("cluster_1"), backend_id: String::from("cluster_1-2"), - address: "127.0.0.2:1028".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 2, 1028), load_balancing_parameters: Some(LoadBalancingParams::default()), ..Default::default() }) @@ -1828,7 +1821,7 @@ mod tests { cluster_id: Some(String::from("MyCluster_1")), hostname: String::from("lolcatho.st"), path: PathRule::prefix(String::from("")), - address: "0.0.0.0:8080".to_string(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8080), ..Default::default() }; @@ -1836,7 +1829,7 @@ mod tests { cluster_id: Some(String::from("MyCluster_1")), hostname: String::from("lolcatho.st"), path: PathRule::prefix(String::from("")), - address: "0.0.0.0:8443".to_string(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8443), ..Default::default() }; @@ -1844,7 +1837,7 @@ mod tests { cluster_id: Some(String::from("MyCluster_2")), hostname: String::from("lolcatho.st"), path: PathRule::prefix(String::from("/api")), - address: "0.0.0.0:8080".to_string(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8080), ..Default::default() }; @@ -1852,7 +1845,7 @@ mod tests { cluster_id: Some(String::from("MyCluster_2")), hostname: String::from("lolcatho.st"), path: PathRule::prefix(String::from("/api")), - address: "0.0.0.0:8443".to_string(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8443), ..Default::default() }; @@ -1905,7 +1898,7 @@ mod tests { &RequestType::AddBackend(AddBackend { cluster_id: String::from("cluster_1"), backend_id: String::from("cluster_1-0"), - address: "127.0.0.1:1026".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1026), load_balancing_parameters: Some(LoadBalancingParams::default()), ..Default::default() }) @@ -1948,7 +1941,7 @@ mod tests { &RequestType::AddBackend(AddBackend { cluster_id: String::from("cluster_1"), backend_id: format!("cluster_1-{i}"), - address: "127.0.0.1:1026".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1026), ..Default::default() }) .into(), @@ -1961,7 +1954,7 @@ mod tests { let remove_backend_2 = RequestType::RemoveBackend(RemoveBackend { cluster_id: String::from("cluster_1"), backend_id: String::from("cluster_1-0"), - address: "127.0.0.1:1026".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1026), }) .into(); @@ -1995,7 +1988,7 @@ mod tests { &RequestType::AddBackend(AddBackend { cluster_id: String::from("cluster_1"), backend_id: format!("cluster_1-{i}"), - address: "127.0.0.1:1026".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1026), ..Default::default() }) .into(), @@ -2014,7 +2007,7 @@ mod tests { &RequestType::RemoveBackend(RemoveBackend { cluster_id: String::from("cluster_1"), backend_id: format!("cluster_1-{j}"), - address: "127.0.0.1:1026".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1026), }) .into(), ); @@ -2029,7 +2022,7 @@ mod tests { state .dispatch( &RequestType::AddTcpListener(TcpListenerConfig { - address: "0.0.0.0:1234".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 1234), ..Default::default() }) .into(), @@ -2038,7 +2031,7 @@ mod tests { state .dispatch( &RequestType::ActivateListener(ActivateListener { - address: "0.0.0.0:1234".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 1234), proxy: ListenerType::Tcp.into(), from_scm: false, }) @@ -2048,7 +2041,7 @@ mod tests { state .dispatch( &RequestType::AddHttpListener(HttpListenerConfig { - address: "0.0.0.0:8080".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8080), ..Default::default() }) .into(), @@ -2057,7 +2050,7 @@ mod tests { state .dispatch( &RequestType::AddHttpsListener(HttpsListenerConfig { - address: "0.0.0.0:8443".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8443), ..Default::default() }) .into(), @@ -2066,7 +2059,7 @@ mod tests { state .dispatch( &RequestType::ActivateListener(ActivateListener { - address: "0.0.0.0:8443".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8443), proxy: ListenerType::Https.into(), from_scm: false, }) @@ -2078,7 +2071,7 @@ mod tests { state2 .dispatch( &RequestType::AddTcpListener(TcpListenerConfig { - address: "0.0.0.0:1234".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 1234), expect_proxy: true, ..Default::default() }) @@ -2088,7 +2081,7 @@ mod tests { state2 .dispatch( &RequestType::AddHttpListener(HttpListenerConfig { - address: "0.0.0.0:8080".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8080), answer_404: "test".to_string(), ..Default::default() }) @@ -2098,7 +2091,7 @@ mod tests { state2 .dispatch( &RequestType::ActivateListener(ActivateListener { - address: "0.0.0.0:8080".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8080), proxy: ListenerType::Http.into(), from_scm: false, }) @@ -2108,7 +2101,7 @@ mod tests { state2 .dispatch( &RequestType::AddHttpsListener(HttpsListenerConfig { - address: "0.0.0.0:8443".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8443), answer_404: String::from("test"), ..Default::default() }) @@ -2118,7 +2111,7 @@ mod tests { state2 .dispatch( &RequestType::ActivateListener(ActivateListener { - address: "0.0.0.0:8443".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8443), proxy: ListenerType::Https.into(), from_scm: false, }) @@ -2128,46 +2121,46 @@ mod tests { let e: Vec = vec![ RequestType::RemoveListener(RemoveListener { - address: "0.0.0.0:1234".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 1234), proxy: ListenerType::Tcp.into(), }) .into(), RequestType::AddTcpListener(TcpListenerConfig { - address: "0.0.0.0:1234".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 1234), expect_proxy: true, ..Default::default() }) .into(), RequestType::DeactivateListener(DeactivateListener { - address: "0.0.0.0:1234".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 1234), proxy: ListenerType::Tcp.into(), to_scm: false, }) .into(), RequestType::RemoveListener(RemoveListener { - address: "0.0.0.0:8080".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8080), proxy: ListenerType::Http.into(), }) .into(), RequestType::AddHttpListener(HttpListenerConfig { - address: "0.0.0.0:8080".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8080), answer_404: String::from("test"), ..Default::default() }) .into(), RequestType::ActivateListener(ActivateListener { - address: "0.0.0.0:8080".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8080), proxy: ListenerType::Http.into(), from_scm: false, }) .into(), RequestType::RemoveListener(RemoveListener { - address: "0.0.0.0:8443".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8443), proxy: ListenerType::Https.into(), }) .into(), RequestType::AddHttpsListener(HttpsListenerConfig { - address: "0.0.0.0:8443".parse().unwrap(), + address: SocketAddress::new_v4(0, 0, 0, 0, 8443), answer_404: String::from("test"), ..Default::default() }) @@ -2196,7 +2189,7 @@ mod tests { names: vec!["lolcatho.st".to_string()], }; let add_certificate = AddCertificate { - address: "127.0.0.1:8080".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 8080), certificate: certificate_and_key, expired_at: None, }; diff --git a/e2e/src/sozu/worker.rs b/e2e/src/sozu/worker.rs index 1527a5115..cf16eea5a 100644 --- a/e2e/src/sozu/worker.rs +++ b/e2e/src/sozu/worker.rs @@ -282,11 +282,11 @@ impl Worker { pub fn default_tcp_frontend>( cluster_id: S, - address: String, + address: SocketAddr, ) -> RequestTcpFrontend { RequestTcpFrontend { cluster_id: cluster_id.into(), - address, + address: address.into(), ..Default::default() } } @@ -297,7 +297,7 @@ impl Worker { ) -> RequestHttpFrontend { RequestHttpFrontend { cluster_id: Some(cluster_id.into()), - address: address.to_string(), + address: address.into(), hostname: String::from("localhost"), path: PathRule::prefix(String::from("/")), position: RulePosition::Tree.into(), @@ -308,13 +308,13 @@ impl Worker { pub fn default_backend, S2: Into>( cluster_id: S1, backend_id: S2, - address: String, + address: SocketAddr, sticky_id: Option, ) -> AddBackend { AddBackend { cluster_id: cluster_id.into(), backend_id: backend_id.into(), - address, + address: address.into(), load_balancing_parameters: Some(LoadBalancingParams::default()), sticky_id, backup: None, diff --git a/e2e/src/tests/mod.rs b/e2e/src/tests/mod.rs index f7588f87b..1210e171e 100644 --- a/e2e/src/tests/mod.rs +++ b/e2e/src/tests/mod.rs @@ -22,6 +22,8 @@ use crate::{ sozu::worker::Worker, }; +use self::tests::create_local_address; + #[derive(PartialEq, Eq, Debug)] pub enum State { Success, @@ -55,14 +57,14 @@ pub fn setup_test>( worker.send_proxy_request(Request { request_type: Some(RequestType::AddHttpListener( - ListenerBuilder::new_http(front_address) + ListenerBuilder::new_http(front_address.into()) .to_http(None) .unwrap(), )), }); worker.send_proxy_request(Request { request_type: Some(RequestType::ActivateListener(ActivateListener { - address: front_address.to_string(), + address: front_address.into(), proxy: ListenerType::Http.into(), from_scm: false, })), @@ -82,21 +84,21 @@ pub fn setup_test>( let mut backends = Vec::new(); for i in 0..nb_backends { - let back_address: SocketAddr = format!("127.0.0.1:{}", provide_port()) - .parse() - .expect("could not parse back address"); - worker.send_proxy_request(Request { - request_type: Some(RequestType::AddBackend(Worker::default_backend( + let back_address = create_local_address(); + + worker.send_proxy_request( + RequestType::AddBackend(Worker::default_backend( "cluster_0", format!("cluster_0-{i}"), - back_address.to_string(), + back_address.into(), if should_stick { Some(format!("sticky_cluster_0-{i}")) } else { None }, - ))), - }); + )) + .into(), + ); backends.push(back_address); } diff --git a/e2e/src/tests/tests.rs b/e2e/src/tests/tests.rs index 27e35737b..36f2a171f 100644 --- a/e2e/src/tests/tests.rs +++ b/e2e/src/tests/tests.rs @@ -10,7 +10,7 @@ use sozu_command_lib::{ logging::setup_logging, proto::command::{ request::RequestType, ActivateListener, AddCertificate, CertificateAndKey, ListenerType, - RemoveBackend, RequestHttpFrontend, + RemoveBackend, RequestHttpFrontend, SocketAddress, }, scm_socket::Listeners, state::ConfigState, @@ -29,7 +29,7 @@ use crate::{ tests::{provide_port, repeat_until_error_or, setup_async_test, setup_sync_test, State}, }; -fn create_local_address() -> SocketAddr { +pub fn create_local_address() -> SocketAddr { let address: SocketAddr = format!("127.0.0.1:{}", provide_port()) .parse() .expect("could not parse front address"); @@ -295,12 +295,12 @@ pub fn try_issue_810_panic(part2: bool) -> State { let mut worker = Worker::start_new_worker("810-PANIC", config, &listeners, state); worker.send_proxy_request_type(RequestType::AddTcpListener( - ListenerBuilder::new_tcp(front_address) + ListenerBuilder::new_tcp(front_address.into()) .to_tcp(None) .unwrap(), )); worker.send_proxy_request_type(RequestType::ActivateListener(ActivateListener { - address: front_address.to_string(), + address: front_address.into(), proxy: ListenerType::Tcp.into(), from_scm: false, })); @@ -310,13 +310,13 @@ pub fn try_issue_810_panic(part2: bool) -> State { ))); worker.send_proxy_request_type(RequestType::AddTcpFrontend(Worker::default_tcp_frontend( "cluster_0", - front_address.to_string(), + front_address, ))); worker.send_proxy_request_type(RequestType::AddBackend(Worker::default_backend( "cluster_0", "cluster_0-0", - back_address.to_string(), + back_address, None, ))); worker.read_to_last(); @@ -356,22 +356,20 @@ pub fn try_issue_810_panic(part2: bool) -> State { pub fn try_tls_endpoint() -> State { let front_port = provide_port(); - let front_address: SocketAddr = format!("127.0.0.1:{}", front_port) - .parse() - .expect("could not parse front address"); + let front_address = SocketAddress::new_v4(127, 0, 0, 1, front_port); let back_address = create_local_address(); let (config, listeners, state) = Worker::empty_config(); let mut worker = Worker::start_new_worker("TLS-ENDPOINT", config, &listeners, state); worker.send_proxy_request_type(RequestType::AddHttpsListener( - ListenerBuilder::new_https(front_address) + ListenerBuilder::new_https(front_address.clone().into()) .to_tls(None) .unwrap(), )); worker.send_proxy_request_type(RequestType::ActivateListener(ActivateListener { - address: front_address.to_string(), + address: front_address.clone().into(), proxy: ListenerType::Https.into(), from_scm: false, })); @@ -384,7 +382,7 @@ pub fn try_tls_endpoint() -> State { let hostname = "localhost".to_string(); worker.send_proxy_request_type(RequestType::AddHttpsFrontend(RequestHttpFrontend { hostname: hostname.to_owned(), - ..Worker::default_http_frontend("cluster_0", front_address) + ..Worker::default_http_frontend("cluster_0", front_address.clone().into()) })); let certificate_and_key = CertificateAndKey { @@ -395,7 +393,7 @@ pub fn try_tls_endpoint() -> State { names: vec![], }; let add_certificate = AddCertificate { - address: front_address.to_string(), + address: front_address.into(), certificate: certificate_and_key, expired_at: None, }; @@ -404,7 +402,7 @@ pub fn try_tls_endpoint() -> State { worker.send_proxy_request_type(RequestType::AddBackend(Worker::default_backend( "cluster_0", "cluster_0-0", - back_address.to_string(), + back_address, None, ))); worker.read_to_last(); @@ -643,12 +641,12 @@ fn try_http_behaviors() -> State { let mut worker = Worker::start_new_worker("BEHAVE-WORKER", config, &listeners, state); worker.send_proxy_request_type(RequestType::AddHttpListener( - ListenerBuilder::new_http(front_address) + ListenerBuilder::new_http(front_address.into()) .to_http(None) .unwrap(), )); worker.send_proxy_request_type(RequestType::ActivateListener(ActivateListener { - address: front_address.to_string(), + address: front_address.into(), proxy: ListenerType::Http.into(), from_scm: false, })); @@ -689,7 +687,7 @@ fn try_http_behaviors() -> State { worker.send_proxy_request_type(RequestType::AddBackend(Worker::default_backend( "cluster_0", "cluster_0-0".to_string(), - back_address.to_string(), + back_address, None, ))); worker.read_to_last(); @@ -731,12 +729,12 @@ fn try_http_behaviors() -> State { worker.send_proxy_request_type(RequestType::RemoveBackend(RemoveBackend { cluster_id: String::from("cluster_0"), backend_id: String::from("cluster_0-0"), - address: back_address.to_string(), + address: back_address.into(), })); worker.send_proxy_request_type(RequestType::AddBackend(Worker::default_backend( "cluster_0", "cluster_0-0".to_string(), - back_address.to_string(), + back_address, None, ))); backend.disconnect(); @@ -793,12 +791,12 @@ fn try_http_behaviors() -> State { worker.send_proxy_request_type(RequestType::RemoveBackend(RemoveBackend { cluster_id: String::from("cluster_0"), backend_id: String::from("cluster_0-0"), - address: back_address.to_string(), + address: back_address.into(), })); worker.send_proxy_request_type(RequestType::AddBackend(Worker::default_backend( "cluster_0", "cluster_0-0".to_string(), - back_address.to_string(), + back_address, None, ))); backend.disconnect(); @@ -1012,7 +1010,7 @@ pub fn try_blue_geen() -> State { worker.send_proxy_request_type(RequestType::AddBackend(Worker::default_backend( "cluster_0", "cluster_0-0", - primary_address.to_string(), + primary_address.into(), None, ))); worker.read_to_last(); @@ -1031,7 +1029,7 @@ pub fn try_blue_geen() -> State { worker.send_proxy_request_type(RequestType::AddBackend(Worker::default_backend( "cluster_0", "cluster_0-1", - secondary_address.to_string(), + secondary_address.into(), None, ))); worker.read_to_last(); @@ -1043,7 +1041,7 @@ pub fn try_blue_geen() -> State { worker.send_proxy_request_type(RequestType::RemoveBackend(RemoveBackend { cluster_id: "cluster_0".to_string(), backend_id: "cluster_0-0".to_string(), - address: primary_address.to_string(), + address: primary_address.into(), })); worker.read_to_last(); @@ -1464,7 +1462,7 @@ fn try_wildcard() -> State { let mut worker = Worker::start_new_worker("WLD_CRD", config, &listeners, state); worker.send_proxy_request( RequestType::AddHttpListener( - ListenerBuilder::new_http(front_address) + ListenerBuilder::new_http(front_address.into()) .to_http(None) .unwrap(), ) @@ -1472,7 +1470,7 @@ fn try_wildcard() -> State { ); worker.send_proxy_request( RequestType::ActivateListener(ActivateListener { - address: front_address.to_string(), + address: front_address.into(), proxy: ListenerType::Http.into(), from_scm: false, }) @@ -1489,7 +1487,7 @@ fn try_wildcard() -> State { worker.send_proxy_request( RequestType::AddHttpFrontend(RequestHttpFrontend { cluster_id: Some("cluster_0".to_string()), - address: front_address.to_string(), + address: front_address.into(), hostname: String::from("*.sozu.io"), path: PathRule::prefix(String::from("")), position: RulePosition::Tree.into(), @@ -1503,7 +1501,7 @@ fn try_wildcard() -> State { RequestType::AddBackend(Worker::default_backend( "cluster_0", "cluster_0-0", - back_address.to_string(), + back_address, None, )) .into(), @@ -1536,7 +1534,7 @@ fn try_wildcard() -> State { worker.send_proxy_request( RequestType::AddHttpFrontend(RequestHttpFrontend { cluster_id: Some("cluster_1".to_string()), - address: front_address.to_string(), + address: front_address.into(), hostname: String::from("*.sozu.io"), path: PathRule::prefix(String::from("/api")), position: RulePosition::Tree.into(), @@ -1549,7 +1547,7 @@ fn try_wildcard() -> State { RequestType::AddBackend(Worker::default_backend( "cluster_1", "cluster_1-0", - back_address.to_string(), + back_address, None, )) .into(), diff --git a/lib/examples/http.rs b/lib/examples/http.rs index 7c38f85a2..81b2e77b1 100644 --- a/lib/examples/http.rs +++ b/lib/examples/http.rs @@ -14,7 +14,7 @@ use sozu_command_lib::{ logging::setup_logging, proto::command::{ request::RequestType, AddBackend, Cluster, LoadBalancingAlgorithms, LoadBalancingParams, - PathRule, RequestHttpFrontend, RulePosition, + PathRule, RequestHttpFrontend, RulePosition, SocketAddress, }, request::WorkerRequest, response::WorkerResponse, @@ -25,7 +25,7 @@ fn main() -> anyhow::Result<()> { info!("starting up"); - let http_listener = ListenerBuilder::new_http("127.0.0.1:8080") + let http_listener = ListenerBuilder::new_http(SocketAddress::new_v4(127, 0, 0, 1, 8080)) .to_http(None) .expect("Could not create HTTP listener"); @@ -57,7 +57,7 @@ fn main() -> anyhow::Result<()> { let http_front = RequestHttpFrontend { cluster_id: Some("my-cluster".to_string()), - address: "127.0.0.1:8080".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 8080), hostname: "example.com".to_string(), path: PathRule::prefix(String::from("/")), position: RulePosition::Pre.into(), @@ -70,7 +70,7 @@ fn main() -> anyhow::Result<()> { let http_backend = AddBackend { cluster_id: "my-cluster".to_string(), backend_id: "test-backend".to_string(), - address: "127.0.0.1:8000".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 8080), load_balancing_parameters: Some(LoadBalancingParams::default()), ..Default::default() }; diff --git a/lib/examples/https.rs b/lib/examples/https.rs index 036ade116..e0939e210 100644 --- a/lib/examples/https.rs +++ b/lib/examples/https.rs @@ -15,7 +15,7 @@ use sozu_command_lib::{ logging::setup_logging, proto::command::{ request::RequestType, AddBackend, AddCertificate, CertificateAndKey, LoadBalancingParams, - PathRule, RequestHttpFrontend, + PathRule, RequestHttpFrontend, SocketAddress, }, request::WorkerRequest, }; @@ -26,16 +26,15 @@ fn main() -> anyhow::Result<()> { info!("MAIN\tstarting up"); sozu_lib::metrics::setup( - &"127.0.0.1:8125" - .parse() - .with_context(|| "Could not parse address for metrics setup")?, + &SocketAddress::new_v4(127, 0, 0, 1, 8125).into(), "main", false, None, ); gauge!("sozu.TEST", 42); - let http_listener = ListenerBuilder::new_http("127.0.0.1:8080").to_http(None)?; + let http_listener = + ListenerBuilder::new_http(SocketAddress::new_v4(127, 0, 0, 1, 8080)).to_http(None)?; let (mut command, channel) = Channel::generate(1000, 10000).with_context(|| "should create a channel")?; @@ -52,7 +51,7 @@ fn main() -> anyhow::Result<()> { let http_front = RequestHttpFrontend { cluster_id: Some(String::from("cluster_1")), - address: "127.0.0.1:8080".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 8080), hostname: String::from("lolcatho.st"), path: PathRule::prefix(String::from("/")), ..Default::default() @@ -62,7 +61,7 @@ fn main() -> anyhow::Result<()> { cluster_id: String::from("cluster_1"), backend_id: String::from("cluster_1-0"), sticky_id: None, - address: "127.0.0.1:1026".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1026), load_balancing_parameters: Some(LoadBalancingParams::default()), backup: None, }; @@ -80,7 +79,8 @@ fn main() -> anyhow::Result<()> { info!("MAIN\tHTTP -> {:?}", command.read_message()); info!("MAIN\tHTTP -> {:?}", command.read_message()); - let https_listener = ListenerBuilder::new_https("127.0.0.1:8443").to_tls(None)?; + let https_listener = + ListenerBuilder::new_https(SocketAddress::new_v4(127, 0, 0, 1, 8443)).to_tls(None)?; let (mut command2, channel2) = Channel::generate(1000, 10000).with_context(|| "should create a channel")?; @@ -108,9 +108,7 @@ fn main() -> anyhow::Result<()> { command2.write_message(&WorkerRequest { id: String::from("ID_IJKL1"), content: RequestType::AddCertificate(AddCertificate { - address: "127.0.0.1:8443" - .parse() - .with_context(|| "Could not parse certificate address")?, + address: SocketAddress::new_v4(127, 0, 0, 1, 8443), certificate: certificate_and_key, expired_at: None, }) @@ -119,7 +117,7 @@ fn main() -> anyhow::Result<()> { let tls_front = RequestHttpFrontend { cluster_id: Some(String::from("cluster_1")), - address: "127.0.0.1:8443".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 8443), hostname: String::from("lolcatho.st"), path: PathRule::prefix(String::from("/")), ..Default::default() @@ -133,7 +131,7 @@ fn main() -> anyhow::Result<()> { cluster_id: String::from("cluster_1"), backend_id: String::from("cluster_1-0"), sticky_id: None, - address: "127.0.0.1:1026".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1026), load_balancing_parameters: Some(LoadBalancingParams::default()), backup: None, }; @@ -157,9 +155,7 @@ fn main() -> anyhow::Result<()> { command2.write_message(&WorkerRequest { id: String::from("ID_QRST1"), content: RequestType::AddCertificate(AddCertificate { - address: "127.0.0.1:8443" - .parse() - .with_context(|| "Could not parse certificate address")?, + address: SocketAddress::new_v4(127, 0, 0, 1, 8443), certificate: certificate_and_key2, expired_at: None, }) @@ -168,7 +164,7 @@ fn main() -> anyhow::Result<()> { let tls_front2 = RequestHttpFrontend { cluster_id: Some(String::from("cluster_2")), - address: "127.0.0.1:8443".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 8443), hostname: String::from("test.local"), path: PathRule::prefix(String::from("/")), ..Default::default() @@ -183,7 +179,7 @@ fn main() -> anyhow::Result<()> { cluster_id: String::from("cluster_2"), backend_id: String::from("cluster_2-0"), sticky_id: None, - address: "127.0.0.1:1026".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1026), load_balancing_parameters: Some(LoadBalancingParams::default()), backup: None, }; diff --git a/lib/examples/tcp.rs b/lib/examples/tcp.rs index 4ba4576ae..fc255bd8c 100644 --- a/lib/examples/tcp.rs +++ b/lib/examples/tcp.rs @@ -11,7 +11,7 @@ use sozu_command_lib::{ channel::Channel, logging::setup_logging, proto::command::{ - request::RequestType, AddBackend, LoadBalancingParams, RequestTcpFrontend, + request::RequestType, AddBackend, LoadBalancingParams, RequestTcpFrontend, SocketAddress, TcpListenerConfig, }, request::WorkerRequest, @@ -30,7 +30,7 @@ fn main() -> anyhow::Result<()> { let max_buffers = 500; let buffer_size = 16384; let listener = TcpListenerConfig { - address: "127.0.0.1:8080".parse().expect("could not parse address"), + address: SocketAddress::new_v4(127, 0, 0, 1, 8080), ..Default::default() }; setup_logging("stdout", None, "debug", "TCP"); @@ -39,13 +39,13 @@ fn main() -> anyhow::Result<()> { let tcp_front = RequestTcpFrontend { cluster_id: String::from("test"), - address: "127.0.0.1:8080".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 8080), ..Default::default() }; let tcp_backend = AddBackend { cluster_id: String::from("test"), backend_id: String::from("test-0"), - address: "127.0.0.1:1026".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1026), load_balancing_parameters: Some(LoadBalancingParams::default()), sticky_id: None, backup: None, diff --git a/lib/src/backends.rs b/lib/src/backends.rs index 141bbd108..1d054b162 100644 --- a/lib/src/backends.rs +++ b/lib/src/backends.rs @@ -169,7 +169,7 @@ impl std::ops::Drop for Backend { server::push_event(Event { kind: EventKind::RemovedBackendHasNoConnections as i32, backend_id: Some(self.backend_id.clone()), - address: Some(self.address.to_string()), + address: Some(self.address.into()), cluster_id: None, }); } diff --git a/lib/src/http.rs b/lib/src/http.rs index 2c97976e1..0b64695ce 100644 --- a/lib/src/http.rs +++ b/lib/src/http.rs @@ -523,11 +523,12 @@ impl HttpProxy { pub fn remove_listener(&mut self, remove: RemoveListener) -> Result<(), ProxyError> { let len = self.listeners.len(); + let remove_address = remove.address.clone().into(); self.listeners - .retain(|_, l| l.borrow().address.to_string() != remove.address); + .retain(|_, l| l.borrow().address != remove_address); if !self.listeners.len() < len { - info!("no HTTP listener to remove at address {:?}", remove.address); + info!("no HTTP listener to remove at address {:?}", remove_address); } Ok(()) } @@ -714,16 +715,9 @@ impl HttpProxy { impl HttpListener { pub fn new(config: HttpListenerConfig, token: Token) -> Result { - let address = config - .address - .parse::() - .map_err(|parse_error| ListenerError::SocketParse { - address: config.address.clone(), - error: parse_error.to_string(), - })?; Ok(HttpListener { active: false, - address, + address: config.address.clone().into(), answers: Rc::new(RefCell::new(HttpAnswers::new( &config.answer_404, &config.answer_503, @@ -744,15 +738,16 @@ impl HttpListener { if self.active { return Ok(self.token); } + let address: SocketAddr = self.config.address.clone().into(); let mut listener = match tcp_listener { Some(tcp_listener) => tcp_listener, - None => server_bind(self.config.address.clone()).map_err(|server_bind_error| { - ListenerError::Activation { - address: self.config.address.clone(), + None => { + server_bind(address).map_err(|server_bind_error| ListenerError::Activation { + address, error: server_bind_error.to_string(), - } - })?, + })? + } }; registry @@ -916,13 +911,10 @@ impl ProxyConfiguration for HttpProxy { return Err(AcceptError::RegisterError); } - let public_address: SocketAddr = owned - .config - .public_address - .clone() - .unwrap_or(owned.config.address.clone()) - .parse() - .map_err(|_| AcceptError::WrongSocketAddress)?; + let public_address: SocketAddr = match owned.config.public_address.clone() { + Some(pub_addr) => pub_addr.into(), + None => owned.config.address.clone().into(), + }; let session = HttpSession::new( owned.answers.clone(), @@ -1001,10 +993,7 @@ pub mod testing { max_buffers: usize, buffer_size: usize, ) -> anyhow::Result<()> { - let address = config - .address - .parse() - .with_context(|| "Could not parse socket address")?; + let address = config.address.clone().into(); let ServerParts { event_loop, @@ -1064,6 +1053,7 @@ mod tests { use super::testing::start_http_worker; use super::*; + use sozu_command::proto::command::SocketAddress; use crate::sozu_command::{ channel::Channel, @@ -1075,9 +1065,8 @@ mod tests { use std::{ io::{Read, Write}, - net::{SocketAddr, TcpStream}, + net::TcpStream, str, - str::FromStr, sync::{Arc, Barrier}, thread, time::Duration, @@ -1103,7 +1092,7 @@ mod tests { start_server(1025, barrier.clone()); barrier.wait(); - let config = ListenerBuilder::new_http("127.0.0.1:1024") + let config = ListenerBuilder::new_http(SocketAddress::new_v4(127, 0, 0, 1, 1024)) .to_http(None) .expect("could not create listener config"); @@ -1116,7 +1105,7 @@ mod tests { let front = RequestHttpFrontend { cluster_id: Some(String::from("cluster_1")), - address: "127.0.0.1:1024".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1024), hostname: String::from("localhost"), path: PathRule::prefix(String::from("/")), ..Default::default() @@ -1130,7 +1119,7 @@ mod tests { let backend = Backend { cluster_id: String::from("cluster_1"), backend_id: String::from("cluster_1-0"), - address: "127.0.0.1:1025".parse().unwrap(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1025).into(), load_balancing_parameters: Some(LoadBalancingParams::default()), sticky_id: None, backup: None, @@ -1185,7 +1174,7 @@ mod tests { start_server(1028, barrier.clone()); barrier.wait(); - let config = ListenerBuilder::new_http("127.0.0.1:1031") + let config = ListenerBuilder::new_http(SocketAddress::new_v4(127, 0, 0, 1, 1031)) .to_http(None) .expect("could not create listener config"); @@ -1198,7 +1187,7 @@ mod tests { }); let front = RequestHttpFrontend { - address: "127.0.0.1:1031".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1031), hostname: String::from("localhost"), path: PathRule::prefix(String::from("/")), cluster_id: Some(String::from("cluster_1")), @@ -1211,7 +1200,7 @@ mod tests { }) .unwrap(); let backend = Backend { - address: "127.0.0.1:1028".parse().unwrap(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1028).into(), backend_id: String::from("cluster_1-0"), backup: None, cluster_id: String::from("cluster_1"), @@ -1295,7 +1284,7 @@ mod tests { fn https_redirect() { setup_test_logger!(); - let config = ListenerBuilder::new_http("127.0.0.1:1041") + let config = ListenerBuilder::new_http(SocketAddress::new_v4(127, 0, 0, 1, 1041)) .to_http(None) .expect("could not create listener config"); @@ -1320,7 +1309,7 @@ mod tests { }) .unwrap(); let front = RequestHttpFrontend { - address: "127.0.0.1:1041".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1041), hostname: String::from("localhost"), path: PathRule::prefix(String::from("/")), cluster_id: Some(String::from("cluster_1")), @@ -1333,7 +1322,7 @@ mod tests { }) .unwrap(); let backend = Backend { - address: "127.0.0.1:1040".parse().unwrap(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1040).into(), backend_id: String::from("cluster_1-0"), backup: None, cluster_id: String::from("cluster_1"), @@ -1468,16 +1457,15 @@ mod tests { }) .expect("Could not add http frontend"); - let address: SocketAddr = - FromStr::from_str("127.0.0.1:1030").expect("could not parse address"); + let address = SocketAddress::new_v4(127, 0, 0, 1, 1030); - let default_config = ListenerBuilder::new_http(address) + let default_config = ListenerBuilder::new_http(address.clone()) .to_http(None) .expect("Could not create default HTTP listener config"); let listener = HttpListener { listener: None, - address, + address: address.into(), fronts, answers: Rc::new(RefCell::new(HttpAnswers::new( "HTTP/1.1 404 Not Found\r\n\r\n", diff --git a/lib/src/https.rs b/lib/src/https.rs index c1927a2c3..e50d0c385 100644 --- a/lib/src/https.rs +++ b/lib/src/https.rs @@ -657,17 +657,9 @@ impl HttpsListener { let server_config = Arc::new(Self::create_rustls_context(&config, resolver.to_owned())?); - let address = config - .address - .parse::() - .map_err(|parse_error| ListenerError::SocketParse { - address: config.address.clone(), - error: parse_error.to_string(), - })?; - Ok(HttpsListener { listener: None, - address, + address: config.address.clone().into(), resolver, rustls_details: server_config, active: false, @@ -690,15 +682,16 @@ impl HttpsListener { if self.active { return Ok(self.token); } + let address: StdSocketAddr = self.config.address.clone().into(); let mut listener = match tcp_listener { Some(tcp_listener) => tcp_listener, - None => server_bind(self.config.address.clone()).map_err(|server_bind_error| { - ListenerError::Activation { - address: self.config.address.clone(), + None => { + server_bind(address).map_err(|server_bind_error| ListenerError::Activation { + address, error: server_bind_error.to_string(), - } - })?, + })? + } }; registry @@ -857,14 +850,12 @@ impl HttpsProxy { ) -> Result, ProxyError> { let len = self.listeners.len(); + let remove_address = remove.address.clone().into(); self.listeners - .retain(|_, listener| listener.borrow().address.to_string() != remove.address); + .retain(|_, listener| listener.borrow().address != remove_address); if !self.listeners.len() < len { - info!( - "no HTTPS listener to remove at address {:?}", - remove.address - ) + info!("no HTTPS listener to remove at address {}", remove_address) } Ok(None) } @@ -944,7 +935,7 @@ impl HttpsProxy { .collect(); CertificatesByAddress { - address: owned.address.to_string(), + address: owned.address.into(), certificate_summaries, } }) @@ -979,7 +970,7 @@ impl HttpsProxy { }); } CertificatesByAddress { - address: owned.address.to_string(), + address: owned.address.into(), certificate_summaries, } }) @@ -1134,13 +1125,7 @@ impl HttpsProxy { &mut self, add_certificate: AddCertificate, ) -> Result, ProxyError> { - let address = add_certificate - .address - .parse::() - .map_err(|parse_error| ProxyError::SocketParse { - address: add_certificate.address.clone(), - error: parse_error.to_string(), - })?; + let address = add_certificate.address.clone().into(); let listener = self .listeners @@ -1161,13 +1146,7 @@ impl HttpsProxy { &mut self, remove_certificate: RemoveCertificate, ) -> Result, ProxyError> { - let address = remove_certificate - .address - .parse::() - .map_err(|parse_error| ProxyError::SocketParse { - address: remove_certificate.address, - error: parse_error.to_string(), - })?; + let address = remove_certificate.address.into(); let fingerprint = Fingerprint( hex::decode(&remove_certificate.fingerprint) @@ -1193,13 +1172,7 @@ impl HttpsProxy { &mut self, replace_certificate: ReplaceCertificate, ) -> Result, ProxyError> { - let address = replace_certificate - .address - .parse::() - .map_err(|parse_error| ProxyError::SocketParse { - address: replace_certificate.address.clone(), - error: parse_error.to_string(), - })?; + let address = replace_certificate.address.clone().into(); let listener = self .listeners @@ -1266,13 +1239,10 @@ impl ProxyConfiguration for HttpsProxy { AcceptError::RegisterError })?; - let public_address: StdSocketAddr = owned - .config - .public_address - .clone() - .unwrap_or(owned.config.address.clone()) - .parse() - .map_err(|_| AcceptError::WrongSocketAddress)?; + let public_address: StdSocketAddr = match owned.config.public_address.clone() { + Some(pub_addr) => pub_addr.into(), + None => owned.config.address.clone().into(), + }; let session = Rc::new(RefCell::new(HttpsSession::new( owned.answers.clone(), @@ -1505,10 +1475,7 @@ pub mod testing { max_buffers: usize, buffer_size: usize, ) -> anyhow::Result<()> { - let address = config - .address - .parse() - .with_context(|| "Could not parse socket address")?; + let address = config.address.clone().into(); let ServerParts { event_loop, @@ -1566,9 +1533,9 @@ pub mod testing { mod tests { use super::*; - use std::{str::FromStr, sync::Arc}; + use std::sync::Arc; - use sozu_command::config::ListenerBuilder; + use sozu_command::{config::ListenerBuilder, proto::command::SocketAddress}; use crate::router::{trie::TrieNode, MethodRule, PathRule, Route, Router}; @@ -1624,8 +1591,7 @@ mod tests { &Route::ClusterId(cluster_id1) )); - let address: StdSocketAddr = FromStr::from_str("127.0.0.1:1032") - .expect("test address 127.0.0.1:1032 should be parsed"); + let address = SocketAddress::new_v4(127, 0, 0, 1, 1032); let resolver = Arc::new(MutexWrappedCertificateResolver::default()); let server_config = ServerConfig::builder_with_protocol_versions(&[ @@ -1637,13 +1603,13 @@ mod tests { let rustls_details = Arc::new(server_config); - let default_config = ListenerBuilder::new_https(address) + let default_config = ListenerBuilder::new_https(address.clone()) .to_tls(None) .expect("Could not create default HTTPS listener config"); let listener = HttpsListener { listener: None, - address, + address: address.into(), fronts, rustls_details, resolver, diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 12c8d7f9b..7d3768411 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -36,9 +36,10 @@ //! We need the `sozu_command_lib` to build a listener. //! //! ``` -//! use sozu_command_lib::config::ListenerBuilder; +//! use sozu_command_lib::{config::ListenerBuilder, proto::command::SocketAddress}; //! -//! let http_listener = ListenerBuilder::new_http("127.0.0.1:8080") +//! let address = SocketAddress::new_v4(127,0,0,1,8080); +//! let http_listener = ListenerBuilder::new_http(address) //! .to_http(None) //! .expect("Could not create HTTP listener"); //! ``` @@ -120,11 +121,11 @@ //! ``` //! use std::collections::BTreeMap; //! -//! use sozu_command_lib::proto::command::{PathRule, RequestHttpFrontend, RulePosition}; +//! use sozu_command_lib::proto::command::{PathRule, RequestHttpFrontend, RulePosition, SocketAddress}; //! //! let http_front = RequestHttpFrontend { //! cluster_id: Some("my-cluster".to_string()), -//! address: "127.0.0.1:8080".to_string(), +//! address: SocketAddress::new_v4(127,0,0,1,8080), //! hostname: "example.com".to_string(), //! path: PathRule::prefix(String::from("/")), //! position: RulePosition::Pre.into(), @@ -144,12 +145,12 @@ //! The `address` field must match the IP and port of the backend server. //! //! ``` -//! use sozu_command_lib::proto::command::{AddBackend, LoadBalancingParams}; +//! use sozu_command_lib::proto::command::{AddBackend, LoadBalancingParams, SocketAddress}; //! //! let http_backend = AddBackend { //! cluster_id: "my-cluster".to_string(), //! backend_id: "test-backend".to_string(), -//! address: "127.0.0.1:8000".to_string(), +//! address: SocketAddress::new_v4(127,0,0,1,8000), //! load_balancing_parameters: Some(LoadBalancingParams::default()), //! ..Default::default() //! }; @@ -226,7 +227,7 @@ //! logging::setup_logging, //! proto::command::{ //! request::RequestType, AddBackend, Cluster, LoadBalancingAlgorithms, LoadBalancingParams, -//! PathRule, Request, RequestHttpFrontend, RulePosition, +//! PathRule, Request, RequestHttpFrontend, RulePosition, SocketAddress, //! }, //! request::WorkerRequest, //! }; @@ -236,7 +237,7 @@ //! //! info!("starting up"); //! -//! let http_listener = ListenerBuilder::new_http("127.0.0.1:8080") +//! let http_listener = ListenerBuilder::new_http(SocketAddress::new_v4(127,0,0,1,8080)) //! .to_http(None) //! .expect("Could not create HTTP listener"); //! @@ -261,7 +262,7 @@ //! //! let http_front = RequestHttpFrontend { //! cluster_id: Some("my-cluster".to_string()), -//! address: "127.0.0.1:8080".to_string(), +//! address: SocketAddress::new_v4(127,0,0,1,8080), //! hostname: "example.com".to_string(), //! path: PathRule::prefix(String::from("/")), //! position: RulePosition::Pre.into(), @@ -274,7 +275,7 @@ //! let http_backend = AddBackend { //! cluster_id: "my-cluster".to_string(), //! backend_id: "test-backend".to_string(), -//! address: "127.0.0.1:8000".to_string(), +//! address: SocketAddress::new_v4(127,0,0,1,8000), //! load_balancing_parameters: Some(LoadBalancingParams::default()), //! ..Default::default() //! }; @@ -649,10 +650,8 @@ pub enum ListenerError { PemParse(String), #[error("failed to build rustls context, {0}")] BuildRustls(String), - #[error("Wrong socket address")] - SocketParse { address: String, error: String }, - #[error("could not activate listener with address {address}: {error}")] - Activation { address: String, error: String }, + #[error("could not activate listener with address {address:?}: {error}")] + Activation { address: SocketAddr, error: String }, #[error("Could not register listener socket: {0}")] SocketRegistration(std::io::Error), #[error("could not add frontend: {0}")] @@ -700,8 +699,6 @@ pub enum ProxyError { RemoveCertificate(ListenerError), #[error("could not replace certificate: {0}")] ReplaceCertificate(ListenerError), - #[error("Wrong address {address}: {error}")] - SocketParse { address: String, error: String }, #[error("wrong certificate fingerprint: {0}")] WrongCertificateFingerprint(String), #[error("this request is not supported by the proxy")] diff --git a/lib/src/protocol/kawa_h1/mod.rs b/lib/src/protocol/kawa_h1/mod.rs index 3d2387573..50eac5b2c 100644 --- a/lib/src/protocol/kawa_h1/mod.rs +++ b/lib/src/protocol/kawa_h1/mod.rs @@ -1409,7 +1409,7 @@ impl Http Http WorkerResponse { let new_backend = Backend::new( &add_backend.backend_id, - add_backend.address.parse().unwrap(), + add_backend.address.clone().into(), add_backend.sticky_id.clone(), add_backend.load_balancing_parameters.clone(), add_backend.backup, @@ -1111,7 +1111,7 @@ impl Server { } fn remove_backend(&mut self, req_id: &str, backend: &RemoveBackend) -> WorkerResponse { - let address = backend.address.parse().unwrap(); + let address = backend.address.clone().into(); self.backends .borrow_mut() .remove_backend(&backend.cluster_id, &address); @@ -1232,10 +1232,7 @@ impl Server { req_id, activate.proxy, activate ); - let address: std::net::SocketAddr = match activate.address.parse() { - Ok(a) => a, - Err(e) => return WorkerResponse::error(req_id, format!("Wrong socket address: {e}")), - }; + let address: std::net::SocketAddr = activate.address.clone().into(); match ListenerType::try_from(activate.proxy) { Ok(ListenerType::Http) => { @@ -1312,10 +1309,7 @@ impl Server { req_id, deactivate.proxy, deactivate ); - let address: std::net::SocketAddr = match deactivate.address.parse() { - Ok(a) => a, - Err(e) => return WorkerResponse::error(req_id, format!("Wrong socket address: {e}")), - }; + let address: std::net::SocketAddr = deactivate.address.clone().into(); match ListenerType::try_from(deactivate.proxy) { Ok(ListenerType::Http) => { diff --git a/lib/src/socket.rs b/lib/src/socket.rs index 9440fb054..25e278211 100644 --- a/lib/src/socket.rs +++ b/lib/src/socket.rs @@ -465,20 +465,9 @@ impl SocketHandler for FrontRustls { } } -pub fn server_bind(addr: String) -> Result { - let address = addr.parse::().map_err(|parse_error| { - ServerBindError::InvalidSocketAddress { - address: addr.clone(), - error: parse_error.to_string(), - } - })?; - - let sock = Socket::new( - Domain::for_address(address), - Type::STREAM, - Some(Protocol::TCP), - ) - .map_err(ServerBindError::SocketCreationError)?; +pub fn server_bind(addr: SocketAddr) -> Result { + let sock = Socket::new(Domain::for_address(addr), Type::STREAM, Some(Protocol::TCP)) + .map_err(ServerBindError::SocketCreationError)?; // set so_reuseaddr, but only on unix (mirrors what libstd does) if cfg!(unix) { @@ -489,9 +478,8 @@ pub fn server_bind(addr: String) -> Result { sock.set_reuse_port(true) .map_err(ServerBindError::SetReusePort)?; - // bind the socket - let addr = address.into(); - sock.bind(&addr).map_err(ServerBindError::BindError)?; + sock.bind(&addr.into()) + .map_err(ServerBindError::BindError)?; sock.set_nonblocking(true) .map_err(ServerBindError::SetNonBlocking)?; diff --git a/lib/src/tcp.rs b/lib/src/tcp.rs index ff22be9dd..1323c412c 100644 --- a/lib/src/tcp.rs +++ b/lib/src/tcp.rs @@ -449,7 +449,7 @@ impl TcpSession { push_event(Event { kind: EventKind::BackendUp as i32, backend_id: Some(backend.backend_id.to_owned()), - address: Some(backend.address.to_string()), + address: Some(backend.address.into()), cluster_id: None, }); } @@ -499,7 +499,7 @@ impl TcpSession { push_event(Event { kind: EventKind::BackendDown as i32, backend_id: Some(backend.backend_id.to_owned()), - address: Some(backend.address.to_string()), + address: Some(backend.address.into()), cluster_id: None, }); } @@ -1032,19 +1032,11 @@ impl TcpListener { pool: Rc>, token: Token, ) -> Result { - let address = config - .address - .parse::() - .map_err(|parse_error| ListenerError::SocketParse { - address: config.address.clone(), - error: parse_error.to_string(), - })?; - Ok(TcpListener { cluster_id: None, listener: None, token, - address, + address: config.address.clone().into(), pool, config, active: false, @@ -1063,7 +1055,7 @@ impl TcpListener { } let mut listener = tcp_listener.or_else(|| { - server_bind(self.config.address.clone()) + server_bind(self.config.address.clone().into()) .map_err(|e| { error!("could not create listener {:?}: {}", self.config.address, e); }) @@ -1202,14 +1194,7 @@ impl TcpProxy { } pub fn add_tcp_front(&mut self, front: RequestTcpFrontend) -> Result<(), ProxyError> { - let address = - front - .address - .parse::() - .map_err(|parse_error| ProxyError::SocketParse { - address: front.address.clone(), - error: parse_error.to_string(), - })?; + let address = front.address.into(); let mut listener = self .listeners @@ -1220,20 +1205,13 @@ impl TcpProxy { self.fronts .insert(front.cluster_id.to_string(), listener.token); - listener.set_tags(front.address.to_string(), Some(front.tags)); + listener.set_tags(address.to_string(), Some(front.tags)); listener.cluster_id = Some(front.cluster_id); Ok(()) } pub fn remove_tcp_front(&mut self, front: RequestTcpFrontend) -> Result<(), ProxyError> { - let address = - front - .address - .parse::() - .map_err(|parse_error| ProxyError::SocketParse { - address: front.address.clone(), - error: parse_error.to_string(), - })?; + let address = front.address.into(); let mut listener = match self .listeners @@ -1244,7 +1222,7 @@ impl TcpProxy { None => return Err(ProxyError::NoListenerFound(address)), }; - listener.set_tags(front.address, None); + listener.set_tags(address.to_string(), None); if let Some(cluster_id) = listener.cluster_id.take() { self.fronts.remove(&cluster_id); } @@ -1325,16 +1303,7 @@ impl ProxyConfiguration for TcpProxy { WorkerResponse::ok(message.id) } RequestType::RemoveListener(remove) => { - let address = match remove.address.parse() { - Ok(a) => a, - Err(e) => { - return WorkerResponse::error( - message.id, - format!("Wrong socket address: {e}"), - ) - } - }; - if !self.remove_listener(address) { + if !self.remove_listener(remove.address.clone().into()) { WorkerResponse::error( message.id, format!("no TCP listener to remove at address {:?}", remove.address), @@ -1475,10 +1444,7 @@ pub mod testing { buffer_size: usize, channel: ProxyChannel, ) -> anyhow::Result<()> { - let address = config - .address - .parse() - .with_context(|| "Could not parse socket address")?; + let address = config.address.clone().into(); let ServerParts { event_loop, @@ -1537,6 +1503,7 @@ mod tests { use super::testing::start_tcp_worker; use crate::testing::*; + use sozu_command::proto::command::SocketAddress; use std::{ io::{Read, Write}, net::{Shutdown, TcpListener, TcpStream}, @@ -1673,7 +1640,7 @@ mod tests { /// used in tests only pub fn start_proxy() -> anyhow::Result> { - let config = ListenerBuilder::new_tcp("127.0.0.1:1234") + let config = ListenerBuilder::new_tcp(SocketAddress::new_v4(127, 0, 0, 1, 1234)) .to_tcp(None) .expect("could not create listener config"); @@ -1688,15 +1655,13 @@ mod tests { { let front = RequestTcpFrontend { cluster_id: String::from("yolo"), - address: "127.0.0.1:1234".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1234), ..Default::default() }; let backend = sozu_command_lib::response::Backend { cluster_id: String::from("yolo"), backend_id: String::from("yolo-0"), - address: "127.0.0.1:5678" - .parse() - .with_context(|| "Could not parse address")?, + address: SocketAddress::new_v4(127, 0, 0, 1, 5678).into(), load_balancing_parameters: Some(LoadBalancingParams::default()), sticky_id: None, backup: None, @@ -1718,15 +1683,13 @@ mod tests { { let front = RequestTcpFrontend { cluster_id: String::from("yolo"), - address: "127.0.0.1:1235".to_string(), + address: SocketAddress::new_v4(127, 0, 0, 1, 1235), ..Default::default() }; let backend = sozu_command::response::Backend { cluster_id: String::from("yolo"), backend_id: String::from("yolo-0"), - address: "127.0.0.1:5678" - .parse() - .with_context(|| "Could not parse address")?, + address: SocketAddress::new_v4(127, 0, 0, 1, 5678).into(), load_balancing_parameters: Some(LoadBalancingParams::default()), sticky_id: None, backup: None, diff --git a/lib/src/tls.rs b/lib/src/tls.rs index e6055d01f..e061c3d05 100644 --- a/lib/src/tls.rs +++ b/lib/src/tls.rs @@ -490,12 +490,12 @@ mod tests { use rand::{seq::SliceRandom, thread_rng}; use sozu_command::{ certificate::parse_pem, - proto::command::{AddCertificate, CertificateAndKey}, + proto::command::{AddCertificate, CertificateAndKey, SocketAddress}, }; #[test] fn lifecycle() -> Result<(), Box> { - let address = "127.0.0.1:8080".to_string(); + let address = SocketAddress::new_v4(127, 0, 0, 1, 8080); let mut resolver = CertificateResolver::default(); let certificate_and_key = CertificateAndKey { certificate: String::from(include_str!("../assets/certificate.pem")), @@ -531,7 +531,7 @@ mod tests { #[test] fn name_override() -> Result<(), Box> { - let address = "127.0.0.1:8080".to_string(); + let address = SocketAddress::new_v4(127, 0, 0, 1, 8080); let mut resolver = CertificateResolver::default(); let certificate_and_key = CertificateAndKey { certificate: String::from(include_str!("../assets/certificate.pem")), @@ -576,7 +576,7 @@ mod tests { #[test] fn replacement() -> Result<(), Box> { - let address = "127.0.0.1:8080".to_string(); + let address = SocketAddress::new_v4(127, 0, 0, 1, 8080); let mut resolver = CertificateResolver::default(); // --------------------------------------------------------------------- @@ -641,7 +641,7 @@ mod tests { #[test] fn expiration_override() -> Result<(), Box> { - let address = "127.0.0.1:8080".to_string(); + let address = SocketAddress::new_v4(127, 0, 0, 1, 8080); let mut resolver = CertificateResolver::default(); // --------------------------------------------------------------------- @@ -758,7 +758,7 @@ mod tests { // --------------------------------------------------------------------- // load certificates in resolver - let address = "127.0.0.1:8080".to_string(); + let address = SocketAddress::new_v4(127, 0, 0, 1, 8080); let mut resolver = CertificateResolver::default(); for certificate in &certificates {