Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Allow put_metric_data to be sent gzipped #58

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion .github/workflows/quickstart.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,16 @@ jobs:
build:
runs-on: ubuntu-latest

env:
# All features except integration-test as we haven't setup an AWS account on github CI for those to work
FEATURES: "gzip"

steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo fmt -- --check && ./clippy.sh && cargo test && cargo test --benches
- name: Lint
run: cargo fmt -- --check && ./clippy.sh
- name: Test
run: cargo test --all-targets --features "${FEATURES}"
- name: Check no-default-features
run: cargo check --all-targets --no-default-features
8 changes: 8 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,19 @@ metrics-util = { version = "0.19", features = ["registry"] }
ordered-float = "4"
rand = { version = "0.8", default-features = false, features = [ "small_rng", "std", "std_rng", ] }
tokio = { version = "1", features = ["sync", "time"] }
flate2 = { version = "1", optional = true }


[dev-dependencies]
anyhow = "1"
aws-config = "1"
criterion = { version = "0.5", features = ["async_tokio"] }
proptest = "1"
tokio = { version = "1", features = ["test-util", "macros", "rt-multi-thread"] }

[features]
gzip = ["flate2"]
integration-test = ["gzip"]

[[bench]]
name = "bench"
Expand Down
27 changes: 24 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,18 +23,39 @@ cargo add -s metrics metrics_cloudwatch
```

```rust

use futures_util::FutureExt;

#[tokio::main]
async fn main() {
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let sdk_config = aws_config::defaults(aws_config::BehaviorVersion::latest())
.load()
.await;
let client = aws_sdk_cloudwatch::Client::new(&sdk_config);


let (shutdown_sender, receiver) = tokio::sync::oneshot::channel::<()>();

// Initialize the backend
tokio::spawn(metrics_cloudwatch::Builder::new()
let driver = metrics_cloudwatch::Builder::new()
.shutdown_signal(receiver.map(|_| ()).boxed())
.cloudwatch_namespace("my-namespace")
.init_future(client, metrics::set_global_recorder));
.init_async(client, metrics::set_global_recorder)
.await?;

// `metrics_cloudwatch` uses a background task to buffer and send metrics to CloudWatch
let metrics_task = tokio::spawn(driver);

// Run your application, emitting metrics as needed
metrics::counter!("requests").increment(1);

// Signal `metrics_task` to shutdown
drop(shutdown_sender);

// Wait for `metrics_task` to flush all buffered metrics before shutting down
metrics_task.await?;

Ok(())
}
```

Expand Down
28 changes: 13 additions & 15 deletions benches/bench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,20 +22,16 @@ fn simple(c: &mut Criterion) {
let cloudwatch_client = common::MockCloudWatchClient::default();

let (_shutdown_sender, receiver) = tokio::sync::oneshot::channel::<()>();
let (recorder, task) = collector::new(
cloudwatch_client,
collector::Config {
cloudwatch_namespace: "".into(),
default_dimensions: Default::default(),
storage_resolution: collector::Resolution::Second,
send_interval_secs: 200,
send_timeout_secs: 10,
shutdown_signal: receiver.map(|_| ()).boxed().shared(),
metric_buffer_size: 1024,
force_flush_stream: Some(Box::pin(futures_util::stream::empty())),
},
);
metrics::set_global_recorder(recorder).unwrap();
let task = metrics_cloudwatch::Builder::new()
.cloudwatch_namespace("metrics-cloudwatch-bench")
.storage_resolution(collector::Resolution::Second)
.send_interval_secs(200)
.send_timeout_secs(10)
.shutdown_signal(receiver.map(|_| ()).boxed())
.metric_buffer_size(1024)
.init_future_mock(cloudwatch_client, metrics::set_global_recorder)
.await
.unwrap();
tokio::spawn(task);
});

Expand Down Expand Up @@ -71,8 +67,10 @@ fn simple(c: &mut Criterion) {
send_timeout_secs: 10,
shutdown_signal: receiver.map(|_| ()).boxed().shared(),
metric_buffer_size: 1024,
force_flush_stream: Some(Box::pin(futures_util::stream::empty())),
#[cfg(feature = "gzip")]
gzip: false,
},
Some(Box::pin(futures_util::stream::empty())),
);

let task = tokio::spawn(task);
Expand Down
100 changes: 75 additions & 25 deletions src/builder.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use std::{collections::BTreeMap, fmt, pin::Pin};
use std::{collections::BTreeMap, fmt, future::Future, pin::Pin};

use futures_util::{future, FutureExt, Stream};

Expand All @@ -17,6 +17,9 @@ pub struct Builder {
shutdown_signal: Option<BoxFuture<'static, ()>>,
metric_buffer_size: usize,
force_flush_stream: Option<Pin<Box<dyn Stream<Item = ()> + Send>>>,

#[cfg(feature = "gzip")]
gzip: bool,
}

fn extract_namespace(cloudwatch_namespace: Option<String>) -> Result<String, Error> {
Expand All @@ -39,6 +42,9 @@ impl Builder {
shutdown_signal: Default::default(),
metric_buffer_size: 2048,
force_flush_stream: Default::default(),

#[cfg(feature = "gzip")]
gzip: true,
}
}

Expand Down Expand Up @@ -114,6 +120,14 @@ impl Builder {
}
}

/// Whether to gzip the payload before sending it to CloudWatch.
///
/// Default: true
#[cfg(feature = "gzip")]
pub fn gzip(self, gzip: bool) -> Self {
Self { gzip, ..self }
}

/// Initializes the CloudWatch metrics backend and runs it in a new thread.
///
/// Expects the [metrics::set_global_recorder] function as an argument as a safeguard against
Expand All @@ -125,22 +139,44 @@ impl Builder {
RecorderHandle,
) -> Result<(), metrics::SetRecorderError<RecorderHandle>>,
) -> Result<(), Error> {
collector::init(set_global_recorder, client, self.build_config()?);
let (config, force_flush_stream) = self.build_config()?;
collector::init(set_global_recorder, client, config, force_flush_stream);
Ok(())
}

/// Initializes the CloudWatch metrics and returns a Future that must be polled
///
/// Expects the [metrics::set_global_recorder] function as an argument as a safeguard against
/// accidentally using a different `metrics` version than is used in this crate.
#[deprecated = "Use init_async instead which allows for `metrics` to be fully initialized first before starting the driver task"]
pub async fn init_future(
self,
client: aws_sdk_cloudwatch::Client,
set_global_recorder: fn(
RecorderHandle,
) -> Result<(), metrics::SetRecorderError<RecorderHandle>>,
) -> Result<(), Error> {
collector::init_future(set_global_recorder, client, self.build_config()?).await
let (config, force_flush_stream) = self.build_config()?;
let driver =
collector::init_future(set_global_recorder, client, config, force_flush_stream).await?;
driver.await;
Ok(())
}

/// Initializes the CloudWatch metrics and returns a Future that must be polled to send metrics
/// to CloudWatch
///
/// Expects the [metrics::set_global_recorder] function as an argument as a safeguard against
/// accidentally using a different `metrics` version than is used in this crate.
pub async fn init_async(
self,
client: aws_sdk_cloudwatch::Client,
set_global_recorder: fn(
RecorderHandle,
) -> Result<(), metrics::SetRecorderError<RecorderHandle>>,
) -> Result<impl Future<Output = ()>, Error> {
let (config, force_flush_stream) = self.build_config()?;
collector::init_future(set_global_recorder, client, config, force_flush_stream).await
}

#[doc(hidden)]
Expand All @@ -150,24 +186,32 @@ impl Builder {
set_global_recorder: fn(
RecorderHandle,
) -> Result<(), metrics::SetRecorderError<RecorderHandle>>,
) -> Result<(), Error> {
collector::init_future(set_global_recorder, client, self.build_config()?).await
}

fn build_config(self) -> Result<Config, Error> {
Ok(Config {
cloudwatch_namespace: extract_namespace(self.cloudwatch_namespace)?,
default_dimensions: self.default_dimensions,
storage_resolution: self.storage_resolution.unwrap_or(Resolution::Minute),
send_interval_secs: self.send_interval_secs.unwrap_or(10),
send_timeout_secs: self.send_timeout_secs.unwrap_or(4),
shutdown_signal: self
.shutdown_signal
.unwrap_or_else(|| Box::pin(future::pending()))
.shared(),
metric_buffer_size: self.metric_buffer_size,
force_flush_stream: self.force_flush_stream,
})
) -> Result<impl Future<Output = ()>, Error> {
let (config, force_flush_stream) = self.build_config()?;
collector::init_future(set_global_recorder, client, config, force_flush_stream).await
}

#[allow(clippy::type_complexity)]
fn build_config(
self,
) -> Result<(Config, Option<Pin<Box<dyn Stream<Item = ()> + Send>>>), Error> {
Ok((
Config {
cloudwatch_namespace: extract_namespace(self.cloudwatch_namespace)?,
default_dimensions: self.default_dimensions,
storage_resolution: self.storage_resolution.unwrap_or(Resolution::Minute),
send_interval_secs: self.send_interval_secs.unwrap_or(10),
send_timeout_secs: self.send_timeout_secs.unwrap_or(4),
shutdown_signal: self
.shutdown_signal
.unwrap_or_else(|| Box::pin(future::pending()))
.shared(),
metric_buffer_size: self.metric_buffer_size,
#[cfg(feature = "gzip")]
gzip: self.gzip,
},
self.force_flush_stream,
))
}
}

Expand All @@ -182,16 +226,22 @@ impl fmt::Debug for Builder {
shutdown_signal: _,
metric_buffer_size,
force_flush_stream: _,
#[cfg(feature = "gzip")]
gzip,
} = self;
f.debug_struct("Builder")
.field("cloudwatch_namespace", cloudwatch_namespace)
let mut f = f.debug_struct("Builder");
f.field("cloudwatch_namespace", cloudwatch_namespace)
.field("default_dimensions", default_dimensions)
.field("storage_resolution", storage_resolution)
.field("send_interval_secs", send_interval_secs)
.field("send_timeout_secs", send_timeout_secs)
.field("shutdown_signal", &"BoxFuture")
.field("metric_buffer_size", metric_buffer_size)
.field("force_flush_stream", &"dyn Stream")
.finish()
.field("force_flush_stream", &"dyn Stream");

#[cfg(feature = "gzip")]
f.field("gzip", gzip);

f.finish()
}
}
Loading
Loading