From b8ed37e3a1d6caa84f8d3e6e176d9bcb3e52a382 Mon Sep 17 00:00:00 2001 From: Oleh Martsokha Date: Sun, 1 Sep 2024 18:02:34 +0200 Subject: [PATCH 01/11] feat(all): initial commit --- .dockerignore | 9 ++ .github/dependabot.yaml | 10 ++ .gitignore | 48 +++++--- Cargo.toml | 25 ++++ Dockerfile | 7 ++ LICENSE => LICENSE.txt | 2 +- README.md | 25 +++- crates/cli/Cargo.toml | 41 +++++++ crates/cli/README.md | 15 +++ crates/cli/build.rs | 20 ++++ crates/cli/handler/instance.rs | 38 ++++++ crates/cli/handler/mod.rs | 8 ++ crates/cli/handler/registry.rs | 38 ++++++ crates/cli/main.rs | 51 ++++++++ crates/cli/middleware/mod.rs | 2 + crates/cli/protobuf/instance.proto | 11 ++ crates/cli/protobuf/registry.proto | 11 ++ crates/cli/service/app_config.rs | 56 +++++++++ crates/cli/service/mod.rs | 27 +++++ crates/cli/service/serv_config.rs | 0 crates/core/Cargo.toml | 21 ++++ crates/core/README.md | 9 ++ crates/core/lib.rs | 31 +++++ crates/task/Cargo.toml | 38 ++++++ crates/task/README.md | 9 ++ crates/task/context/failure.rs | 33 ++++++ crates/task/context/mod.rs | 20 ++++ crates/task/context/request.rs | 106 +++++++++++++++++ crates/task/context/response.rs | 91 +++++++++++++++ crates/task/context/state.rs | 1 + crates/task/handler/future.rs | 62 ++++++++++ crates/task/handler/layer.rs | 73 ++++++++++++ crates/task/handler/metric.rs | 32 ++++++ crates/task/handler/mod.rs | 165 ++++++++++++++++++++++++++ crates/task/handler/native.rs | 70 +++++++++++ crates/task/lib.rs | 49 ++++++++ crates/task/routing/builder.rs | 121 +++++++++++++++++++ crates/task/routing/index.rs | 143 +++++++++++++++++++++++ crates/task/routing/layers.rs | 28 +++++ crates/task/routing/mod.rs | 179 +++++++++++++++++++++++++++++ crates/type/Cargo.toml | 20 ++++ crates/type/README.md | 9 ++ crates/type/datatype/condition.rs | 12 ++ crates/type/datatype/mod.rs | 66 +++++++++++ crates/type/datatype/operation.rs | 10 ++ crates/type/lib.rs | 7 ++ crates/type/manifest/condition.rs | 19 +++ crates/type/manifest/mod.rs | 31 +++++ crates/type/manifest/operation.rs | 19 +++ rustfmt.toml | 3 + 50 files changed, 1904 insertions(+), 17 deletions(-) create mode 100644 .dockerignore create mode 100644 .github/dependabot.yaml create mode 100644 Cargo.toml create mode 100644 Dockerfile rename LICENSE => LICENSE.txt (97%) create mode 100644 crates/cli/Cargo.toml create mode 100644 crates/cli/README.md create mode 100644 crates/cli/build.rs create mode 100644 crates/cli/handler/instance.rs create mode 100644 crates/cli/handler/mod.rs create mode 100644 crates/cli/handler/registry.rs create mode 100644 crates/cli/main.rs create mode 100644 crates/cli/middleware/mod.rs create mode 100644 crates/cli/protobuf/instance.proto create mode 100644 crates/cli/protobuf/registry.proto create mode 100644 crates/cli/service/app_config.rs create mode 100644 crates/cli/service/mod.rs create mode 100644 crates/cli/service/serv_config.rs create mode 100644 crates/core/Cargo.toml create mode 100644 crates/core/README.md create mode 100644 crates/core/lib.rs create mode 100644 crates/task/Cargo.toml create mode 100644 crates/task/README.md create mode 100644 crates/task/context/failure.rs create mode 100644 crates/task/context/mod.rs create mode 100644 crates/task/context/request.rs create mode 100644 crates/task/context/response.rs create mode 100644 crates/task/context/state.rs create mode 100644 crates/task/handler/future.rs create mode 100644 crates/task/handler/layer.rs create mode 100644 crates/task/handler/metric.rs create mode 100644 crates/task/handler/mod.rs create mode 100644 crates/task/handler/native.rs create mode 100644 crates/task/lib.rs create mode 100644 crates/task/routing/builder.rs create mode 100644 crates/task/routing/index.rs create mode 100644 crates/task/routing/layers.rs create mode 100644 crates/task/routing/mod.rs create mode 100644 crates/type/Cargo.toml create mode 100644 crates/type/README.md create mode 100644 crates/type/datatype/condition.rs create mode 100644 crates/type/datatype/mod.rs create mode 100644 crates/type/datatype/operation.rs create mode 100644 crates/type/lib.rs create mode 100644 crates/type/manifest/condition.rs create mode 100644 crates/type/manifest/mod.rs create mode 100644 crates/type/manifest/operation.rs create mode 100644 rustfmt.toml diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..e425057 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,9 @@ +# OS +Thumbs.db +.DS_Store + +# Editors +.vs/ +.vscode/ +.idea/ +.fleet/ diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml new file mode 100644 index 0000000..b1e5606 --- /dev/null +++ b/.github/dependabot.yaml @@ -0,0 +1,10 @@ +version: 2 +updates: + + - package-ecosystem: "cargo" + directory: "/" + schedule: + interval: "weekly" + timezone: "Europe/Warsaw" + day: "friday" + time: "18:00" diff --git a/.gitignore b/.gitignore index d01bd1a..706bbd8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,21 +1,39 @@ -# Generated by Cargo -# will have compiled files and executables +# OS +Thumbs.db +.DS_Store +.ignore*/ + +# Editors +.vs/ +.vscode/ +.idea/ +.fleet/ + +# Lang: Rust debug/ target/ - -# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries -# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html Cargo.lock - -# These are backup files generated by rustfmt **/*.rs.bk - -# MSVC Windows builds of rustc generate these, which store debugging information *.pdb -# RustRover -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ \ No newline at end of file +# Output +dist/ +output/ +build/ + +# Binaries +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Environment +env/ +.env +.env* + +# Logs +logs/ +*.log +*.log* diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..cf0a9e0 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,25 @@ +# https://doc.rust-lang.org/cargo/reference/manifest.html + +[workspace] +resolver = "2" +members = [ + "./crates/cli", + "./crates/core", + "./crates/task", +] + +[workspace.package] +version = "0.1.0" +edition = "2021" +license = "Axiston License 1.0" +publish = false + +authors = ["Axiston "] +repository = "https://github.com/axiston/runtime" +homepage = "https://github.com/axiston/runtime" +documentation = "https://docs.rs/axiston" + +[workspace.dependencies] +runtime-core = { path = "./crates/core", version = "0.1.0" } +runtime-task = { path = "./crates/task", version = "0.1.0" } +runtime-type = { path = "./crates/type", version = "0.1.0" } diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..417d8ce --- /dev/null +++ b/Dockerfile @@ -0,0 +1,7 @@ +# Compile & build the application. +FROM rust:latest AS build +WORKDIR /usr/src/runtime/ + +# Configurate & run the application. +FROM debian:buster-slim AS run +WORKDIR /usr/bin/runtime/ diff --git a/LICENSE b/LICENSE.txt similarity index 97% rename from LICENSE rename to LICENSE.txt index c0cdc68..568f639 100644 --- a/LICENSE +++ b/LICENSE.txt @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2024 axiston +Copyright (c) 2024 Axiston Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index 6a54eba..f520b18 100644 --- a/README.md +++ b/README.md @@ -1 +1,24 @@ -# runtime \ No newline at end of file +### axiston/runtime + +[![Build Status][action-badge]][action-url] +[![Crate Coverage][coverage-badge]][coverage-url] + +[action-badge]: https://img.shields.io/github/actions/workflow/status/axiston/runtime/build.yaml +[action-url]: https://github.com/axiston/runtime/actions/workflows/build.yaml +[coverage-badge]: https://img.shields.io/codecov/c/github/axiston/runtime +[coverage-url]: https://app.codecov.io/gh/axiston/runtime + +A server capable of running native `Rust` or `TypeScript` tasks (via Deno +runtime). + +#### Notes + +- Lorem Ipsum. +- Lorem Ipsum. +- Lorem Ipsum. + +#### Usage + +```cmd +runtime --port 8080 +``` diff --git a/crates/cli/Cargo.toml b/crates/cli/Cargo.toml new file mode 100644 index 0000000..802c8cc --- /dev/null +++ b/crates/cli/Cargo.toml @@ -0,0 +1,41 @@ +# https://doc.rust-lang.org/cargo/reference/manifest.html + +[package] +name = "runtime-cli" +version = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +readme = "./README.md" + +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +documentation = { workspace = true } + +[[bin]] +name = "axiston" +path = "main.rs" + +[dependencies] +runtime-core = { workspace = true } + +clap = { version = "4.5", features = ["derive"] } +tokio = { version = "1.40", features = ["macros", "rt-multi-thread"] } +futures = { version = "0.3", features = [] } +anyhow = { version = "1.0", features = ["backtrace"] } + +tracing = { version = "0.1", features = [] } +tracing-subscriber = { version = "0.3", features = ["env-filter", "time"] } +tracing-opentelemetry = { version = "0.25", features = [] } +opentelemetry = { version = "0.24", features = [] } + +tonic = { version = "0.12", features = [] } +prost = { version = "0.13", features = [] } +tonic-types = { version = "0.12", features = [] } +prost-types = { version = "0.13", features = [] } + +[build-dependencies] +tonic-build = { version = "0.12", features = [] } +prost-build = { version = "0.13", features = [] } +anyhow = { version = "1.0", features = [] } diff --git a/crates/cli/README.md b/crates/cli/README.md new file mode 100644 index 0000000..a42a04f --- /dev/null +++ b/crates/cli/README.md @@ -0,0 +1,15 @@ +### axiston/cli + +Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. + +#### Notes + +- Lorem Ipsum. +- Lorem Ipsum. +- Lorem Ipsum. + +#### User's Code + +- Run From Command Line +- Run In Container +- Run with Temporal.io diff --git a/crates/cli/build.rs b/crates/cli/build.rs new file mode 100644 index 0000000..23f4d08 --- /dev/null +++ b/crates/cli/build.rs @@ -0,0 +1,20 @@ +#![forbid(unsafe_code)] + +use std::path::PathBuf; + +fn main() -> anyhow::Result<()> { + let builder = tonic_build::configure() + .build_client(false) + .build_server(true) + .build_transport(true); + + let dir = PathBuf::from("./protobuf/"); + let instance = dir.join("./instance.proto"); + let registry = dir.join("./registry.proto"); + + let protos = [instance.as_path(), registry.as_path()]; + let includes = [dir.as_path()]; + builder.compile(&protos, &includes)?; + + Ok(()) +} diff --git a/crates/cli/handler/instance.rs b/crates/cli/handler/instance.rs new file mode 100644 index 0000000..c7c0475 --- /dev/null +++ b/crates/cli/handler/instance.rs @@ -0,0 +1,38 @@ +use tonic::{Request, Response, Status}; + +use crate::handler::instance::instance_proto::instance_server::{Instance, InstanceServer}; +use crate::handler::instance::instance_proto::{HelloRequest, HelloResponse}; +use crate::service::AppState; + +pub mod instance_proto { + tonic::include_proto!("instance"); +} + +/// TODO. +pub struct InstanceService { + state: AppState, +} + +impl InstanceService { + /// Returns a new [`InstanceService`]. + #[inline] + pub fn new(state: AppState) -> Self { + Self { state } + } + + /// Returns a `GRPC` service. + #[inline] + pub fn into_server(self) -> InstanceServer { + InstanceServer::new(self) + } +} + +#[tonic::async_trait] +impl Instance for InstanceService { + async fn hello( + &self, + request: Request, + ) -> Result, Status> { + todo!() + } +} diff --git a/crates/cli/handler/mod.rs b/crates/cli/handler/mod.rs new file mode 100644 index 0000000..59b404e --- /dev/null +++ b/crates/cli/handler/mod.rs @@ -0,0 +1,8 @@ +//! TODO. +//! + +pub use crate::handler::instance::InstanceService; +pub use crate::handler::registry::RegistryService; + +mod instance; +mod registry; diff --git a/crates/cli/handler/registry.rs b/crates/cli/handler/registry.rs new file mode 100644 index 0000000..939fd40 --- /dev/null +++ b/crates/cli/handler/registry.rs @@ -0,0 +1,38 @@ +use tonic::{Request, Response, Status}; + +use crate::handler::registry::registry_proto::registry_server::{Registry, RegistryServer}; +use crate::handler::registry::registry_proto::{HelloRequest, HelloResponse}; +use crate::service::AppState; + +pub mod registry_proto { + tonic::include_proto!("registry"); +} + +/// TODO. +pub struct RegistryService { + state: AppState, +} + +impl RegistryService { + /// Returns a new [`RegistryService`]. + #[inline] + pub fn new(state: AppState) -> Self { + Self { state } + } + + /// Returns a `GRPC` service. + #[inline] + pub fn into_server(self) -> RegistryServer { + RegistryServer::new(self) + } +} + +#[tonic::async_trait] +impl Registry for RegistryService { + async fn hello( + &self, + request: Request, + ) -> Result, Status> { + todo!() + } +} diff --git a/crates/cli/main.rs b/crates/cli/main.rs new file mode 100644 index 0000000..43ee0bc --- /dev/null +++ b/crates/cli/main.rs @@ -0,0 +1,51 @@ +#![forbid(unsafe_code)] + +use std::net::{Ipv4Addr, SocketAddr}; + +use clap::Parser; +use tonic::transport::Server; + +use crate::handler::{InstanceService, RegistryService}; +use crate::service::{AppConfig, AppState}; + +mod handler; +mod middleware; +mod service; + +/// Command-line arguments. +#[derive(Debug, Parser)] +pub struct Args { + /// Bound server port. + #[arg(short, long, default_value_t = 3000)] + pub port: u16, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let args = Args::parse(); + + // Service. + let config = AppConfig::builder().build(); + let state = AppState::new(config); + + let instance = InstanceService::new(state.clone()); + let instance = instance.into_server(); + + let registry = RegistryService::new(state); + let registry = registry.into_server(); + + // Listen. + let server_addr = SocketAddr::from((Ipv4Addr::LOCALHOST, args.port)); + tracing::debug!( + target: "server:setup", port = args.port, + "server is listening on {}", server_addr, + ); + + Server::builder() + .add_service(instance) + .add_service(registry) + .serve(server_addr) + .await?; + + Ok(()) +} diff --git a/crates/cli/middleware/mod.rs b/crates/cli/middleware/mod.rs new file mode 100644 index 0000000..8e7716c --- /dev/null +++ b/crates/cli/middleware/mod.rs @@ -0,0 +1,2 @@ +//! TODO. +//! diff --git a/crates/cli/protobuf/instance.proto b/crates/cli/protobuf/instance.proto new file mode 100644 index 0000000..0ea60bf --- /dev/null +++ b/crates/cli/protobuf/instance.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package instance; + +message HelloRequest {} + +message HelloResponse {} + +service Instance { + rpc Hello(HelloRequest) returns (HelloResponse); +} diff --git a/crates/cli/protobuf/registry.proto b/crates/cli/protobuf/registry.proto new file mode 100644 index 0000000..5b57eb0 --- /dev/null +++ b/crates/cli/protobuf/registry.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package registry; + +message HelloRequest {} + +message HelloResponse {} + +service Registry { + rpc Hello(HelloRequest) returns (HelloResponse); +} diff --git a/crates/cli/service/app_config.rs b/crates/cli/service/app_config.rs new file mode 100644 index 0000000..971bbaa --- /dev/null +++ b/crates/cli/service/app_config.rs @@ -0,0 +1,56 @@ +/// App [`state`] configuration. +/// +/// [`state`]: crate::service::AppState +#[derive(Debug, Clone)] +#[must_use = "configs do nothing unless you use them"] +pub struct AppConfig {} + +impl AppConfig { + /// Returns a new [`AppBuilder`]. + #[inline] + pub fn builder() -> AppBuilder { + AppBuilder::new() + } +} + +impl Default for AppConfig { + #[inline] + fn default() -> Self { + Self::builder().build() + } +} + +/// [`AppConfig`] builder. +#[derive(Debug, Default, Clone)] +#[must_use = "configs do nothing unless you use them"] +pub struct AppBuilder {} + +impl AppBuilder { + /// Returns a new [`AppBuilder`]. + #[inline] + pub fn new() -> Self { + Self::default() + } + + /// Returns a new [`AppConfig`]. + pub fn build(self) -> AppConfig { + AppConfig {} + } +} + +#[cfg(test)] +mod test { + use crate::service::{AppBuilder, AppConfig}; + + #[test] + fn config_from_default() -> anyhow::Result<()> { + let _ = AppConfig::default(); + Ok(()) + } + + #[test] + fn config_from_builder() -> anyhow::Result<()> { + let _ = AppBuilder::new().build(); + Ok(()) + } +} diff --git a/crates/cli/service/mod.rs b/crates/cli/service/mod.rs new file mode 100644 index 0000000..960260f --- /dev/null +++ b/crates/cli/service/mod.rs @@ -0,0 +1,27 @@ +//! TODO. +//! + +pub use crate::service::app_config::{AppBuilder, AppConfig}; +pub use crate::service::serv_config::Args; + +mod app_config; +mod serv_config; + +/// Application state. +/// +/// Used by [`handlers`]. +/// +/// [`handlers`]: crate::handler +#[derive(Debug, Clone)] +#[must_use = "state does nothing unless you use it"] +pub struct AppState { + // runtime: Rc, +} + +impl AppState { + /// Creates a new [`AppState`]. + #[inline] + pub fn new(config: AppConfig) -> Self { + Self {} + } +} diff --git a/crates/cli/service/serv_config.rs b/crates/cli/service/serv_config.rs new file mode 100644 index 0000000..e69de29 diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml new file mode 100644 index 0000000..7f3fc2c --- /dev/null +++ b/crates/core/Cargo.toml @@ -0,0 +1,21 @@ +# https://doc.rust-lang.org/cargo/reference/manifest.html + +[package] +name = "runtime-core" +version = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +readme = "./README.md" + +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +documentation = { workspace = true } + +[lib] +path = "lib.rs" + +[dependencies] +runtime-task = { workspace = true } +runtime-type = { workspace = true } diff --git a/crates/core/README.md b/crates/core/README.md new file mode 100644 index 0000000..de96a7c --- /dev/null +++ b/crates/core/README.md @@ -0,0 +1,9 @@ +### axiston/core + +Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. + +#### Notes + +- Lorem Ipsum. +- Lorem Ipsum. +- Lorem Ipsum. diff --git a/crates/core/lib.rs b/crates/core/lib.rs new file mode 100644 index 0000000..eb6ed5e --- /dev/null +++ b/crates/core/lib.rs @@ -0,0 +1,31 @@ +#![forbid(unsafe_code)] +#![doc = include_str!("./README.md")] + +//! TODO. + +use std::collections::HashMap; + +use runtime_task::routing::{Index, Router}; +use runtime_type::datatype::condition::{ConditionRequestData, ConditionResponseData}; +use runtime_type::datatype::operation::{OperationRequestData, OperationResponseData}; +use runtime_type::manifest::condition::ConditionManifest; +use runtime_type::manifest::operation::OperationManifest; + +#[derive(Debug, Default, Clone)] +pub struct AppRouter { + // groups: HashMap + conditions: Router, + operations: Router, +} + +impl AppRouter { + /// Returns an empty [`Router`]. + #[inline] + pub fn new() -> Self { + Self::default() + } + + pub fn retrieve_manifests() {} + + pub fn try_execute() {} +} diff --git a/crates/task/Cargo.toml b/crates/task/Cargo.toml new file mode 100644 index 0000000..b3f6843 --- /dev/null +++ b/crates/task/Cargo.toml @@ -0,0 +1,38 @@ +# https://doc.rust-lang.org/cargo/reference/manifest.html + +[package] +name = "runtime-task" +version = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +readme = "./README.md" + +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +documentation = { workspace = true } + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lib] +path = "lib.rs" + +[features] +serde = ["dep:serde", "ecow/serde"] +trace = ["dep:tracing"] + +[dependencies] +tower = { version = "0.5", features = ["load", "util"] } +futures = { version = "0.3", features = [] } +pin-project-lite = { version = "0.2", features = [] } +thiserror = { version = "1.0", features = [] } +anyhow = { version = "1.0", features = ["backtrace"] } + +deunicode = { version = "1.6", features = [] } +ecow = { version = "0.2", features = [] } + +serde = { version = "1.0", optional = true, features = ["derive"] } +tracing = { version = "0.1", optional = true, features = [] } diff --git a/crates/task/README.md b/crates/task/README.md new file mode 100644 index 0000000..c824ab5 --- /dev/null +++ b/crates/task/README.md @@ -0,0 +1,9 @@ +### runtime/task + +Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. + +#### Notes + +- Lorem Ipsum. +- Lorem Ipsum. +- Lorem Ipsum. diff --git a/crates/task/context/failure.rs b/crates/task/context/failure.rs new file mode 100644 index 0000000..94d28bc --- /dev/null +++ b/crates/task/context/failure.rs @@ -0,0 +1,33 @@ +use std::error::Error; + +/// Unrecoverable failure duration [`TaskHandler`] execution. +/// +/// [`TaskHandler`]: crate::handler::TaskHandler +#[derive(Debug, thiserror::Error)] +#[must_use = "errors do nothing unless you use them"] +#[error("failure during `TaskHandler` execution")] +pub struct TaskError { + // name: String, + // message: String, + // explain: String, +} + +impl TaskError { + /// Returns a new [`TaskError`]. + #[inline] + pub fn new(error: T) -> Self { + Self {} + } +} + +#[cfg(test)] +mod test { + use crate::context::TaskError; + use crate::Result; + + #[test] + fn build() -> Result<()> { + let _ = TaskError::new(()); + Ok(()) + } +} diff --git a/crates/task/context/mod.rs b/crates/task/context/mod.rs new file mode 100644 index 0000000..bf9c828 --- /dev/null +++ b/crates/task/context/mod.rs @@ -0,0 +1,20 @@ +//! [`TaskRequest`] and [`TaskResponse`] types. + +pub use crate::context::failure::TaskError; +pub use crate::context::request::TaskRequest; +pub use crate::context::response::TaskResponse; + +pub mod builder { + //! [`TaskRequest`] and [`TaskResponse`] builders. + //! + //! [`TaskRequest`]: crate::context::TaskRequest + //! [`TaskResponse`]: crate::context::TaskResponse + + pub use super::request::TaskRequestBuilder; + pub use super::response::TaskResponseBuilder; +} + +mod failure; +mod request; +mod response; +mod state; diff --git a/crates/task/context/request.rs b/crates/task/context/request.rs new file mode 100644 index 0000000..406d973 --- /dev/null +++ b/crates/task/context/request.rs @@ -0,0 +1,106 @@ +use std::fmt; +use std::ops::{Deref, DerefMut}; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +use crate::routing::Index; + +/// Serializable [`TaskHandler`] service request. +/// +/// [`TaskHandler`]: crate::handler::TaskHandler +#[derive(Clone)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[must_use = "requests do nothing unless you serialize them"] +pub struct TaskRequest { + index: Index, + inner: T, +} + +impl TaskRequest { + /// Returns a new [`TaskRequest`]. + #[inline] + pub fn new(index: Index, inner: T) -> Self { + Self { index, inner } + } + + /// Returns a new [`TaskRequestBuilder`]. + #[inline] + pub fn builder(index: Index, inner: T) -> TaskRequestBuilder { + TaskRequestBuilder::new(index, inner) + } + + /// Returns the reference to the [`Index`]. + #[inline] + pub fn index(&self) -> &Index { + &self.index + } + + /// Returns the inner data. + #[inline] + pub fn into_inner(self) -> T { + self.inner + } +} + +impl Deref for TaskRequest { + type Target = T; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl DerefMut for TaskRequest { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +impl fmt::Debug for TaskRequest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TaskRequest").finish_non_exhaustive() + } +} + +/// [`TaskHandler`] service request builder. +/// +/// [`TaskHandler`]: crate::handler::TaskHandler +#[derive(Debug, Clone)] +#[must_use = "requests do nothing unless you serialize them"] +pub struct TaskRequestBuilder { + index: Index, + inner: T, +} + +impl TaskRequestBuilder { + /// Returns a new [`TaskRequestBuilder`]. + #[inline] + pub fn new(index: Index, inner: T) -> Self { + Self { index, inner } + } + + /// Returns a new [`TaskRequest`]. + pub fn build(self) -> TaskRequest { + TaskRequest { + index: self.index, + inner: self.inner, + } + } +} + +#[cfg(test)] +mod test { + use crate::context::TaskRequest; + use crate::routing::Index; + use crate::Result; + + #[test] + fn build() -> Result<()> { + let index = Index::new("request-id"); + let _ = TaskRequest::builder(index, 5).build(); + Ok(()) + } +} diff --git a/crates/task/context/response.rs b/crates/task/context/response.rs new file mode 100644 index 0000000..e5df213 --- /dev/null +++ b/crates/task/context/response.rs @@ -0,0 +1,91 @@ +use std::fmt; +use std::ops::{Deref, DerefMut}; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +/// Deserializable [`TaskHandler`] service response. +/// +/// [`TaskHandler`]: crate::handler::TaskHandler +#[derive(Clone)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[must_use = "responses do nothing unless you serialize them"] +pub struct TaskResponse { + inner: T, +} + +impl TaskResponse { + /// Returns a new [`TaskResponse`]. + #[inline] + pub fn new(inner: T) -> Self { + Self { inner } + } + + /// Returns a new [`TaskResponseBuilder`]. + #[inline] + pub fn builder(inner: T) -> TaskResponseBuilder { + TaskResponseBuilder::new(inner) + } + + /// Returns the inner data. + #[inline] + pub fn into_inner(self) -> T { + self.inner + } +} + +impl Deref for TaskResponse { + type Target = T; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl DerefMut for TaskResponse { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +impl fmt::Debug for TaskResponse { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TaskResponse").finish_non_exhaustive() + } +} + +/// [`TaskHandler`] service response builder. +/// +/// [`TaskHandler`]: crate::handler::TaskHandler +#[derive(Debug, Default, Clone)] +#[must_use = "responses do nothing unless you serialize them"] +pub struct TaskResponseBuilder { + inner: T, +} + +impl TaskResponseBuilder { + /// Returns a new [`TaskResponseBuilder`]. + #[inline] + pub fn new(inner: T) -> Self { + Self { inner } + } + + /// Returns a new [`TaskResponse`]. + pub fn build(self) -> TaskResponse { + TaskResponse { inner: self.inner } + } +} + +#[cfg(test)] +mod test { + use crate::context::TaskResponse; + use crate::Result; + + #[test] + fn build() -> Result<()> { + let _ = TaskResponse::builder(()).build(); + Ok(()) + } +} diff --git a/crates/task/context/state.rs b/crates/task/context/state.rs new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/crates/task/context/state.rs @@ -0,0 +1 @@ + diff --git a/crates/task/handler/future.rs b/crates/task/handler/future.rs new file mode 100644 index 0000000..42cb4fe --- /dev/null +++ b/crates/task/handler/future.rs @@ -0,0 +1,62 @@ +//! Futures types for [`TaskHandler`]s. +//! +//! [`TaskHandler`]: crate::handler::TaskHandler + +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use futures::future::BoxFuture; +use futures::FutureExt; +use pin_project_lite::pin_project; + +use crate::context::{TaskError, TaskResponse}; + +pin_project! { + /// Opaque [`Future`] return type for [`Task::call`]. + /// + /// [`Task::call`]: crate::task::Task::call + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct TaskFuture { + #[pin] fut: BoxFuture<'static, Result, TaskError>>, + } +} + +impl TaskFuture { + /// Returns a new [`TaskFuture`]. + #[inline] + pub fn new(fut: F) -> Self + where + F: Future, TaskError>>, + F: Sized + Send + 'static, + { + Self { fut: fut.boxed() } + } +} + +impl From, TaskError>>> for TaskFuture { + #[inline] + fn from(fut: BoxFuture<'static, Result, TaskError>>) -> Self { + Self { fut } + } +} + +impl Future for TaskFuture { + type Output = Result, TaskError>; + + #[inline] + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + this.fut.poll(cx) + } +} + +#[cfg(test)] +mod test { + use crate::Result; + + #[test] + fn build() -> Result<()> { + Ok(()) + } +} diff --git a/crates/task/handler/layer.rs b/crates/task/handler/layer.rs new file mode 100644 index 0000000..d87d49b --- /dev/null +++ b/crates/task/handler/layer.rs @@ -0,0 +1,73 @@ +use std::marker::PhantomData; + +use tower::{Layer, Service}; + +use crate::context::{TaskError, TaskRequest, TaskResponse}; +use crate::handler::TaskHandler; + +/// `tower::`[`Layer`] that produces a [`TaskHandler`] services. +pub struct TaskHandlerLayer { + manifest: M, + inner: PhantomData, + request: PhantomData, + response: PhantomData, +} + +impl TaskHandlerLayer { + /// Returns a new [`TaskHandlerLayer`]. + #[inline] + pub fn new(manifest: M) -> Self { + Self { + manifest, + inner: PhantomData, + request: PhantomData, + response: PhantomData, + } + } +} + +impl Default for TaskHandlerLayer +where + M: Default, +{ + #[inline] + fn default() -> Self { + Self { + manifest: M::default(), + inner: PhantomData, + request: PhantomData, + response: PhantomData, + } + } +} + +impl Layer for TaskHandlerLayer +where + M: Clone, + T: 'static, + U: 'static, + S: Service + Clone + Send + 'static, + Req: From> + 'static, + S::Response: Into> + 'static, + S::Error: Into + 'static, + S::Future: Send + 'static, +{ + type Service = TaskHandler; + + #[inline] + fn layer(&self, inner: S) -> Self::Service { + TaskHandler::new(self.manifest.clone(), inner) + } +} + +#[cfg(test)] +mod test { + use crate::handler::TaskHandlerLayer; + use crate::Result; + + #[test] + fn layer() -> Result<()> { + let _ = TaskHandlerLayer::<(), u32, u32>::new(()); + Ok(()) + } +} diff --git a/crates/task/handler/metric.rs b/crates/task/handler/metric.rs new file mode 100644 index 0000000..1d5417b --- /dev/null +++ b/crates/task/handler/metric.rs @@ -0,0 +1,32 @@ +//! [`Load`] metric types for [`TaskHandler`]s. +//! +//! [`Load`]: tower::load::Load +//! [`TaskHandler`]: crate::handler::TaskHandler + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Default, Clone, PartialOrd, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[must_use = "metrics do nothing unless you serialize them"] +pub struct TaskMetric {} + +impl TaskMetric { + /// Returns a new [`TaskMetric`]. + #[inline] + pub fn new() -> Self { + Self::default() + } +} + +#[cfg(test)] +mod test { + use crate::handler::metric::TaskMetric; + use crate::Result; + + #[test] + fn build() -> Result<()> { + let _ = TaskMetric::new(); + Ok(()) + } +} diff --git a/crates/task/handler/mod.rs b/crates/task/handler/mod.rs new file mode 100644 index 0000000..69bbf1e --- /dev/null +++ b/crates/task/handler/mod.rs @@ -0,0 +1,165 @@ +//! [`TaskHandler`] service, its future and metrics. + +use std::fmt; +use std::task::{Context, Poll}; + +use tower::load::Load; +use tower::util::BoxCloneService; +use tower::{Service, ServiceBuilder}; + +use crate::context::{TaskError, TaskRequest, TaskResponse}; +use crate::handler::future::TaskFuture; +pub use crate::handler::layer::TaskHandlerLayer; +use crate::handler::metric::TaskMetric; +use crate::handler::native::NativeTask; + +pub mod future; +mod layer; +pub mod metric; +pub mod native; + +/// Unified `tower::`[`Service`] for executing [`tasks`]. +/// +/// Opaque [`BoxCloneService`]<[`TaskRequest`], [`TaskResponse`], [`TaskError`]>. +/// +/// [`tasks`]: crate::context +#[must_use = "services do nothing unless you `.poll_ready` or `.call` them"] +pub struct TaskHandler { + manifest: M, + inner: BoxCloneService, TaskResponse, TaskError>, +} + +impl TaskHandler { + /// Returns a new [`TaskHandler`]. + #[inline] + pub fn new(manifest: M, inner: S) -> Self + where + T: 'static, + U: 'static, + S: Service + Clone + Send + 'static, + Req: From> + 'static, + S::Response: Into> + 'static, + S::Error: Into + 'static, + S::Future: Send + 'static, + { + let inner = ServiceBuilder::new() + .map_request(From::from) + .map_response(Into::into) + .map_err(Into::into) + .service(inner); + + Self { + manifest, + inner: BoxCloneService::new(inner), + } + } + + /// Returns a new [`NativeTask`] wrapped in [`TaskHandler`]. + pub fn native(manifest: M) -> Self + where + T: Send + 'static, + U: Send + 'static, + { + Self::new(manifest, NativeTask::new()) + } + + /// Returns a reference to the manifest data. + #[inline] + pub fn manifest_ref(&self) -> &M { + &self.manifest + } + + /// Returns a mutable reference to the manifest data. + #[inline] + pub fn manifest_mut(&mut self) -> &mut M { + &mut self.manifest + } + + /// Maps an `TaskHandler` to `TaskHandler` by applying a function to a contained service. + pub fn map(self, f: F) -> TaskHandler + where + F: FnOnce( + BoxCloneService, TaskResponse, TaskError>, + ) -> BoxCloneService, TaskResponse, TaskError>, + { + TaskHandler { + manifest: self.manifest, + inner: f(self.inner), + } + } + + /// Estimates the service's current load. + pub fn metrics(&self) -> TaskMetric { + TaskMetric::new() + } +} + +impl Clone for TaskHandler +where + M: Clone, +{ + fn clone(&self) -> Self { + Self { + manifest: self.manifest.clone(), + inner: self.inner.clone(), + } + } +} + +impl fmt::Debug for TaskHandler { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TaskHandler").finish_non_exhaustive() + } +} + +impl Service> for TaskHandler { + type Response = TaskResponse; + type Error = TaskError; + type Future = TaskFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + #[inline] + fn call(&mut self, req: TaskRequest) -> Self::Future { + self.inner.call(req).into() + } +} + +impl Load for TaskHandler { + type Metric = TaskMetric; + + #[inline] + fn load(&self) -> Self::Metric { + self.metrics() + } +} + +#[cfg(test)] +mod test { + use tower::{service_fn, ServiceBuilder}; + + use crate::context::{TaskError, TaskRequest, TaskResponse}; + use crate::handler::{TaskHandler, TaskHandlerLayer}; + use crate::Result; + + async fn handle(request: TaskRequest) -> Result, TaskError> { + Ok(TaskResponse::new(request.into_inner())) + } + + #[test] + fn service() -> Result<()> { + let _ = TaskHandler::new((), service_fn(handle)); + Ok(()) + } + + #[test] + fn layer() -> Result<()> { + let _ = ServiceBuilder::new() + .layer(TaskHandlerLayer::new(())) + .service(service_fn(handle)); + Ok(()) + } +} diff --git a/crates/task/handler/native.rs b/crates/task/handler/native.rs new file mode 100644 index 0000000..e293739 --- /dev/null +++ b/crates/task/handler/native.rs @@ -0,0 +1,70 @@ +//! TODO. +//! + +use std::fmt; +use std::marker::PhantomData; +use std::task::{Context, Poll}; + +use tower::Service; + +use crate::context::{TaskError, TaskRequest, TaskResponse}; +use crate::handler::future::TaskFuture; + +/// TODO. +#[must_use = "services do nothing unless you `.poll_ready` or `.call` them"] +pub struct NativeTask { + request: PhantomData, + response: PhantomData, +} + +impl NativeTask { + /// Returns a new [`NativeTask`]. + #[inline] + pub fn new() -> Self { + Self { + request: PhantomData, + response: PhantomData, + } + } +} + +impl Clone for NativeTask { + fn clone(&self) -> Self { + Self { + request: PhantomData, + response: PhantomData, + } + } +} + +impl fmt::Debug for NativeTask { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NativeTask").finish_non_exhaustive() + } +} + +impl Service> for NativeTask { + type Response = TaskResponse; + type Error = TaskError; + type Future = TaskFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + todo!() + } + + #[inline] + fn call(&mut self, req: TaskRequest) -> Self::Future { + todo!() + } +} + +#[cfg(test)] +mod test { + use crate::Result; + + #[test] + fn build() -> Result<()> { + Ok(()) + } +} diff --git a/crates/task/lib.rs b/crates/task/lib.rs new file mode 100644 index 0000000..8a26ecb --- /dev/null +++ b/crates/task/lib.rs @@ -0,0 +1,49 @@ +#![forbid(unsafe_code)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![doc = include_str!("./README.md")] + +//! ```rust +//! use tower::{service_fn, Service}; +//! use runtime_task::context::{TaskError, TaskRequest, TaskResponse}; +//! use runtime_task::handler::TaskHandler; +//! use runtime_task::routing::{Index, Layers, Router}; +//! +//! async fn handle(request: TaskRequest) -> Result, TaskError> { +//! Ok(TaskResponse::new(request.into_inner())) +//! } +//! +//! let layers = Layers::default(); +//! let mut router = Router::new(layers); +//! +//! let index = Index::new("task01"); +//! let handler = TaskHandler::new((), service_fn(handle)); +//! router.route(index.clone(), handler); +//! +//! let fut = async { +//! let request = TaskRequest::new(index, 0u32); +//! let response = router.call(request).await; +//! }; +//! +//! ``` + +pub mod context; +pub mod handler; +pub mod routing; + +/// Unrecoverable failure of the [`Router`]. +/// +/// Includes all error types that may occur. +/// +/// [`Router`]: routing::Router +#[derive(Debug, thiserror::Error)] +#[must_use = "errors do nothing unless you use them"] +pub enum Error { + #[error("called task failure: {0}")] + Task(#[from] context::TaskError), // Task not found + // Mismatch arguments +} + +/// Specialized [`Result`] alias for the [`Error`] type. +/// +/// [`Result`]: std::result::Result +pub type Result = std::result::Result; diff --git a/crates/task/routing/builder.rs b/crates/task/routing/builder.rs new file mode 100644 index 0000000..a01e9c8 --- /dev/null +++ b/crates/task/routing/builder.rs @@ -0,0 +1,121 @@ +use std::fmt; +use std::marker::PhantomData; + +use tower::{ServiceBuilder, ServiceExt}; + +use crate::context::{TaskRequest, TaskResponse}; +use crate::handler::TaskHandler; +use crate::routing::Layers; +use crate::Result; + +/// Declarative `tower::`[`Layer`] builder. +/// +/// [`Layer`]: tower::Layer +pub struct LayerBuilder { + layers: Option, + manifest: PhantomData, + request: PhantomData, + response: PhantomData, +} + +impl LayerBuilder { + /// Returns a new [`LayerBuilder`] with default [`Layers`]. + #[inline] + pub fn new(layers: Layers) -> Self { + Self { + layers: Some(layers), + manifest: PhantomData, + request: PhantomData, + response: PhantomData, + } + } + + /// Inserts or replaces default [`Layers`]. + #[inline] + pub fn replace_layers(&mut self, layers: Layers) -> Option { + self.layers.replace(layers) + } + + /// Applies default [`Layers`] to the `handler`. + pub fn apply(&self, handler: TaskHandler) -> TaskHandler + where + T: 'static, + U: 'static, + { + let builder = ServiceBuilder::new(); + handler.map(|svc| builder.service(svc)) + } + + /// Merges default [`Layers`] with provided and applies to the `handler`. + pub fn apply_layers( + &self, + handler: TaskHandler, + layers: Layers, + ) -> TaskHandler + where + T: 'static, + U: 'static, + { + let builder = ServiceBuilder::new(); + handler.map(|svc| builder.service(svc)) + } + + /// Applies specified [`Layer`]s to the [`TaskHandler`]. + pub async fn execute( + &self, + handler: TaskHandler, + request: TaskRequest, + ) -> Result> + where + T: 'static, + U: 'static, + { + let handler = self.apply(handler); + let response = handler.oneshot(request).await; + response.map_err(Into::into) + } + + /// Applies specified [`Layer`]s to the [`TaskHandler`]. + pub async fn execute_with_layers( + &self, + handler: TaskHandler, + request: TaskRequest, + layers: Layers, + ) -> Result> + where + T: 'static, + U: 'static, + { + let handler = self.apply_layers(handler, layers); + let response = handler.oneshot(request).await; + response.map_err(Into::into) + } +} + +impl Default for LayerBuilder { + fn default() -> Self { + Self { + layers: None, + manifest: PhantomData, + request: PhantomData, + response: PhantomData, + } + } +} + +impl Clone for LayerBuilder { + fn clone(&self) -> Self { + Self { + layers: self.layers.clone(), + manifest: PhantomData, + request: PhantomData, + response: PhantomData, + } + } +} + +impl fmt::Debug for LayerBuilder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("LayerBuilder").finish_non_exhaustive() + } +} diff --git a/crates/task/routing/index.rs b/crates/task/routing/index.rs new file mode 100644 index 0000000..7db80b7 --- /dev/null +++ b/crates/task/routing/index.rs @@ -0,0 +1,143 @@ +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::marker::PhantomData; +use std::ops::Deref; +use std::str::FromStr; + +use deunicode::deunicode_with_tofu; +use ecow::EcoString; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +use crate::{Error, Result}; + +/// Opaque and unique [`TaskHandler`] identifier. +/// +/// [`TaskHandler`]: crate::handler::TaskHandler +#[must_use = "ids do nothing unless you use them"] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Index { + inner: EcoString, + marker: PhantomData, +} + +impl Index { + /// Returns a new [`Index`]. + #[inline] + pub fn new(inner: &str) -> Self { + Self { + inner: inner.into(), + marker: PhantomData, + } + } + + /// Parses a new [`Index`]. + #[inline] + pub fn parse(index: impl AsRef) -> Result { + index.as_ref().parse::>() + } + + /// Extracts a string slice containing the entire id. + #[inline] + pub fn as_str(&self) -> &str { + self.inner.as_str() + } +} + +impl FromStr for Index { + type Err = Error; + + fn from_str(inner: &str) -> Result { + let tofu = "\u{FFFD}"; + let deunicoded = deunicode_with_tofu(inner, tofu); + if deunicoded.contains(tofu) { + // return Err(Error::InvalidId(deunicoded)); + panic!("invalid id") + } + + Ok(deunicoded.to_lowercase().replace(' ', "-").into()) + } +} + +impl Clone for Index { + #[inline] + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +impl From for Index { + #[inline] + fn from(value: String) -> Self { + Self::new(value.as_str()) + } +} + +impl From<&str> for Index { + #[inline] + fn from(value: &str) -> Self { + Self::new(value) + } +} + +impl Hash for Index { + #[inline] + fn hash(&self, state: &mut H) { + self.inner.hash(state) + } +} + +impl PartialEq for Index { + #[inline] + fn eq(&self, other: &Self) -> bool { + self.inner.eq(&other.inner) + } +} + +impl Eq for Index {} + +impl Deref for Index { + type Target = str; + + #[inline] + fn deref(&self) -> &Self::Target { + self.as_str() + } +} + +impl fmt::Display for Index { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self.as_str(), f) + } +} + +impl fmt::Debug for Index { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(self.as_str(), f) + } +} + +#[cfg(test)] +mod test { + use crate::routing::Index; + use crate::Result; + + #[test] + pub fn instance() -> Result<()> { + let id = Index::<()>::new("service-entity"); + assert_eq!(id.as_str(), "service-entity"); + Ok(()) + } + + #[test] + pub fn parse() -> Result<()> { + let id = "Service Entity".parse::>()?; + assert_eq!(id.as_str(), "service-entity"); + Ok(()) + } +} diff --git a/crates/task/routing/layers.rs b/crates/task/routing/layers.rs new file mode 100644 index 0000000..92ca295 --- /dev/null +++ b/crates/task/routing/layers.rs @@ -0,0 +1,28 @@ +use std::time::Duration; + +/// Applied `tower::`[`Layer`]s configuration. +/// +/// [`Layer`]: tower::Layer +#[derive(Debug, Default, Clone)] +pub struct Layers { + pub timeout_start_to_close: Duration, + pub timeout_before_start: Duration, + pub timeout_before_close: Duration, + + pub limit_concurrency_task: u32, + pub limit_cpu_consumption: u32, + pub limit_ram_consumption: u32, + + pub retry_initial_interval: Duration, + pub retry_maximum_interval: Duration, + pub retry_backoff_coefficient: u32, + pub retry_maximum_attempts: u32, +} + +impl Layers { + /// Returns a new [`Layers`]. + #[inline] + pub fn new() -> Self { + Self::default() + } +} diff --git a/crates/task/routing/mod.rs b/crates/task/routing/mod.rs new file mode 100644 index 0000000..24e82ff --- /dev/null +++ b/crates/task/routing/mod.rs @@ -0,0 +1,179 @@ +//! Service [`Router`] and declarative [`Layers`]. +//! + +use std::collections::HashMap; +use std::fmt; +use std::sync::{Arc, Mutex}; +use std::task::{Context, Poll}; + +use tower::load::Load; +use tower::{Layer, Service, ServiceExt}; + +use crate::context::{TaskError, TaskRequest, TaskResponse}; +use crate::handler::future::TaskFuture; +use crate::handler::metric::TaskMetric; +use crate::handler::TaskHandler; +use crate::routing::builder::LayerBuilder; +pub use crate::routing::index::Index; +pub use crate::routing::layers::Layers; + +mod builder; +mod index; +mod layers; + +/// Collection of all registered [`TaskHandler`]s. +pub struct Router { + inner: Arc>>, +} + +struct RouterInner { + builder: LayerBuilder, + tasks: HashMap>, +} + +impl Router { + /// Returns an empty [`Router`]. + pub fn new(layers: Layers) -> Self { + let inner = Arc::new(Mutex::new(RouterInner { + builder: LayerBuilder::new(layers), + tasks: HashMap::default(), + })); + + Self { inner } + } + + /// Inserts or replaces default [`Layers`]. + #[inline] + pub fn with_layers(&mut self, layers: Layers) { + let mut guard = self.inner.lock().unwrap(); + let _ = guard.builder.replace_layers(layers); + } + + /// Inserts or replaces [`TaskHandler`] at the [`Index`]. + #[inline] + pub fn route(&mut self, index: Index, handler: TaskHandler) { + let mut guard = self.inner.lock().unwrap(); + let _ = guard.tasks.insert(index, handler); + } + + /// Returns a [`TaskHandler`] corresponding to the [`Index`]. + #[inline] + pub fn find(&self, index: &Index) -> Option> + where + M: Clone, + T: 'static, + U: 'static, + { + let guard = self.inner.lock().unwrap(); + let handler = guard.tasks.get(index).cloned(); + handler.map(|handler| guard.builder.apply(handler)) + } + + /// Returns a [`TaskHandler`] with corresponding to the [`Index`]. + #[inline] + pub fn find_layered(&self, index: &Index, layers: Layers) -> Option> + where + M: Clone, + T: 'static, + U: 'static, + { + let guard = self.inner.lock().unwrap(); + let handler = guard.tasks.get(index).cloned(); + handler.map(|handler| guard.builder.apply_layers(handler, layers)) + } + + /// Returns the number of [`TaskHandler`]s in the [`Router`]. + #[inline] + pub fn len(&self) -> usize { + let guard = self.inner.lock().unwrap(); + guard.tasks.len() + } + + /// Returns `true` if the [`Router`] contains no [`TaskHandler`]s. + #[inline] + pub fn is_empty(&self) -> bool { + let guard = self.inner.lock().unwrap(); + guard.tasks.is_empty() + } +} + +impl Default for Router { + fn default() -> Self { + let inner = Arc::new(Mutex::new(RouterInner { + builder: LayerBuilder::default(), + tasks: HashMap::default(), + })); + + Self { inner } + } +} + +impl Clone for Router +where + M: Clone, +{ + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { inner } + } +} + +impl fmt::Debug for Router { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Router").finish_non_exhaustive() + } +} + +impl Service> for Router +where + M: Clone + Send + 'static, + T: Send + 'static, + U: 'static, +{ + type Response = TaskResponse; + type Error = TaskError; + type Future = TaskFuture; + + #[inline] + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + #[inline] + fn call(&mut self, req: TaskRequest) -> Self::Future { + match self.find(req.index()) { + Some(handler) => { + let fut = async move { handler.oneshot(req).await }; + TaskFuture::new(fut) + } + None => { + // TODO: Use a properly formatted error. + let fut = async { Err(TaskError::new(())) }; + TaskFuture::new(fut) + } + } + } +} + +impl Load for Router { + type Metric = TaskMetric; + + #[inline] + fn load(&self) -> Self::Metric { + // TODO: Call .load() of the underlying service. + TaskMetric::new() + } +} + +#[cfg(test)] +mod test { + use crate::routing::{Layers, Router}; + use crate::Result; + + #[test] + fn build() -> Result<()> { + let layers = Layers::new(); + let _ = Router::<(), i32, i32>::new(layers); + Ok(()) + } +} diff --git a/crates/type/Cargo.toml b/crates/type/Cargo.toml new file mode 100644 index 0000000..bad7cf2 --- /dev/null +++ b/crates/type/Cargo.toml @@ -0,0 +1,20 @@ +# https://doc.rust-lang.org/cargo/reference/manifest.html + +[package] +name = "runtime-type" +version = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +readme = "./README.md" + +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +documentation = { workspace = true } + +[lib] +path = "lib.rs" + +[dependencies] +serde = { version = "1.0", features = ["derive"] } diff --git a/crates/type/README.md b/crates/type/README.md new file mode 100644 index 0000000..de96a7c --- /dev/null +++ b/crates/type/README.md @@ -0,0 +1,9 @@ +### axiston/core + +Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. + +#### Notes + +- Lorem Ipsum. +- Lorem Ipsum. +- Lorem Ipsum. diff --git a/crates/type/datatype/condition.rs b/crates/type/datatype/condition.rs new file mode 100644 index 0000000..c36bb1b --- /dev/null +++ b/crates/type/datatype/condition.rs @@ -0,0 +1,12 @@ +//! [`ConditionRequestData`] and [`ConditionResponseData`] types. +//! + +use serde::{Deserialize, Serialize}; + +use crate::datatype::{RequestData, ResponseData}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct ConditionRequestData {} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ConditionResponseData {} diff --git a/crates/type/datatype/mod.rs b/crates/type/datatype/mod.rs new file mode 100644 index 0000000..3700abe --- /dev/null +++ b/crates/type/datatype/mod.rs @@ -0,0 +1,66 @@ +//! [`RequestData`] and [`ResponseData`] types. +//! + +use serde::{Deserialize, Serialize}; + +use crate::datatype::condition::{ConditionRequestData, ConditionResponseData}; +use crate::datatype::operation::{OperationRequestData, OperationResponseData}; + +pub mod condition; +pub mod operation; + +#[derive(Debug, Serialize, Deserialize)] +pub enum RequestData { + Condition(ConditionRequestData), + Operation(OperationRequestData), +} + +impl RequestData { + /// Returns a new [`RequestData`]. + #[inline] + pub fn new(data: impl Into) -> Self { + data.into() + } +} + +impl From for RequestData { + #[inline] + fn from(value: ConditionRequestData) -> Self { + Self::Condition(value) + } +} + +impl From for RequestData { + #[inline] + fn from(value: OperationRequestData) -> Self { + Self::Operation(value) + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub enum ResponseData { + Condition(ConditionResponseData), + Operation(OperationResponseData), +} + +impl ResponseData { + /// Returns a new [`ResponseData`]. + #[inline] + pub fn new(data: impl Into) -> Self { + data.into() + } +} + +impl From for ResponseData { + #[inline] + fn from(value: ConditionResponseData) -> Self { + Self::Condition(value) + } +} + +impl From for ResponseData { + #[inline] + fn from(value: OperationResponseData) -> Self { + Self::Operation(value) + } +} diff --git a/crates/type/datatype/operation.rs b/crates/type/datatype/operation.rs new file mode 100644 index 0000000..3817b40 --- /dev/null +++ b/crates/type/datatype/operation.rs @@ -0,0 +1,10 @@ +//! [`OperationRequestData`] and [`OperationResponseData`] types. +//! + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct OperationRequestData {} + +#[derive(Debug, Serialize, Deserialize)] +pub struct OperationResponseData {} diff --git a/crates/type/lib.rs b/crates/type/lib.rs new file mode 100644 index 0000000..0148b19 --- /dev/null +++ b/crates/type/lib.rs @@ -0,0 +1,7 @@ +#![forbid(unsafe_code)] +#![doc = include_str!("./README.md")] + +//! TODO. + +pub mod datatype; +pub mod manifest; diff --git a/crates/type/manifest/condition.rs b/crates/type/manifest/condition.rs new file mode 100644 index 0000000..ad23753 --- /dev/null +++ b/crates/type/manifest/condition.rs @@ -0,0 +1,19 @@ +//! Associated [`ConditionManifest`] types. +//! + +use serde::{Deserialize, Serialize}; + +/// Associated trigger metadata. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[must_use = "manifests do nothing unless you serialize them"] +pub struct ConditionManifest {} + +impl ConditionManifest { + /// Returns a new [`ConditionManifest`]. + /// + /// Used for testing. + #[inline] + pub fn new(name: &str) -> Self { + Self {} + } +} diff --git a/crates/type/manifest/mod.rs b/crates/type/manifest/mod.rs new file mode 100644 index 0000000..48a7cf7 --- /dev/null +++ b/crates/type/manifest/mod.rs @@ -0,0 +1,31 @@ +//! Associated [`Manifest`] types. +//! + +use serde::{Deserialize, Serialize}; + +use crate::manifest::condition::ConditionManifest; +use crate::manifest::operation::OperationManifest; + +pub mod condition; +pub mod operation; + +/// Associated metadata. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum Manifest { + Condition(ConditionManifest), + Operation(OperationManifest), +} + +impl From for Manifest { + #[inline] + fn from(value: ConditionManifest) -> Self { + Self::Condition(value) + } +} + +impl From for Manifest { + #[inline] + fn from(value: OperationManifest) -> Self { + Self::Operation(value) + } +} diff --git a/crates/type/manifest/operation.rs b/crates/type/manifest/operation.rs new file mode 100644 index 0000000..62eb0f7 --- /dev/null +++ b/crates/type/manifest/operation.rs @@ -0,0 +1,19 @@ +//! Associated [`OperationManifest`] types. +//! + +use serde::{Deserialize, Serialize}; + +/// Associated action metadata. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[must_use = "manifests do nothing unless you serialize them"] +pub struct OperationManifest {} + +impl OperationManifest { + /// Returns a new [`OperationManifest`]. + /// + /// Used for testing. + #[inline] + pub fn new(name: &str) -> Self { + Self {} + } +} diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000..92d876f --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,3 @@ +group_imports = "StdExternalCrate" +imports_granularity = "Module" +# reorder_impl_items = true From c841871dcf68e398d1d27edadc05988d3ebba959 Mon Sep 17 00:00:00 2001 From: Oleh Martsokha Date: Sat, 14 Sep 2024 16:38:22 +0200 Subject: [PATCH 02/11] feat(all): initial commit --- Cargo.toml | 11 +- README.md | 12 +- crates/cli/Cargo.toml | 16 +- crates/cli/README.md | 8 +- crates/cli/build.rs | 4 +- crates/cli/handler/instance.rs | 27 ++- crates/cli/handler/registry.rs | 17 +- crates/cli/main.rs | 1 + crates/cli/middleware/mod.rs | 10 + crates/cli/middleware/observability.rs | 40 ++++ .../serv_config.rs => middleware/utility.rs} | 0 crates/cli/protobuf/instance.proto | 80 +++++++- crates/cli/protobuf/registry.proto | 38 +++- .../cli/service/{app_config.rs => config.rs} | 0 crates/cli/service/instance.rs | 0 crates/cli/service/mod.rs | 8 +- crates/cli/service/registry.rs | 0 crates/core/Cargo.toml | 10 +- crates/core/README.md | 2 +- crates/core/lib.rs | 33 +--- crates/jsvm/Cargo.toml | 52 +++++ crates/{type => jsvm}/README.md | 2 +- .../jsvm/ext_deno/ext_canvas/init_canvas.js | 0 crates/jsvm/ext_deno/ext_canvas/mod.rs | 2 + .../jsvm/ext_deno/ext_console/init_console.js | 0 crates/jsvm/ext_deno/ext_console/mod.rs | 2 + .../jsvm/ext_deno/ext_crypto/init_crypto.js | 0 crates/jsvm/ext_deno/ext_crypto/mod.rs | 2 + crates/jsvm/ext_deno/ext_fetch/init_fetch.js | 0 crates/jsvm/ext_deno/ext_fetch/mod.rs | 2 + crates/jsvm/ext_deno/ext_fs/init_fs.js | 0 crates/jsvm/ext_deno/ext_fs/mod.rs | 2 + crates/jsvm/ext_deno/ext_io/init_io.js | 0 crates/jsvm/ext_deno/ext_io/mod.rs | 2 + crates/jsvm/ext_deno/ext_net/init_net.js | 0 crates/jsvm/ext_deno/ext_net/mod.rs | 2 + crates/jsvm/ext_deno/ext_url/init_url.js | 0 crates/jsvm/ext_deno/ext_url/mod.rs | 2 + crates/jsvm/ext_deno/ext_web/init_web.js | 0 crates/jsvm/ext_deno/ext_web/mod.rs | 2 + .../jsvm/ext_deno/ext_webgpu/init_webgpu.js | 0 crates/jsvm/ext_deno/ext_webgpu/mod.rs | 2 + .../jsvm/ext_deno/ext_webidl/init_webidl.js | 0 crates/jsvm/ext_deno/ext_webidl/mod.rs | 2 + .../ext_deno/ext_websocket/init_websocket.js | 0 crates/jsvm/ext_deno/ext_websocket/mod.rs | 2 + crates/jsvm/ext_deno/mod.rs | 48 +++++ crates/jsvm/extension/mod.rs | 9 + crates/jsvm/extension/permission.rs | 19 ++ crates/jsvm/extension/route/datatype.rs | 0 crates/jsvm/extension/route/internal.rs | 0 crates/jsvm/extension/route/mod.rs | 32 ++++ crates/jsvm/extension/route/ops.js | 0 crates/jsvm/extension/route/ops.rs | 20 ++ crates/jsvm/extension/trace/datatype.rs | 24 +++ crates/jsvm/extension/trace/internal.rs | 23 +++ crates/jsvm/extension/trace/mod.rs | 39 ++++ crates/jsvm/extension/trace/ops.js | 17 ++ crates/jsvm/extension/trace/ops.rs | 71 +++++++ crates/jsvm/lib.rs | 28 +++ crates/jsvm/runtime/cache/mod.rs | 2 + .../state.rs => jsvm/runtime/machine.rs} | 0 crates/jsvm/runtime/mod.rs | 52 +++++ crates/jsvm/runtime/permission.rs | 0 crates/jsvm/runtime/transpile/cache.rs | 11 ++ crates/jsvm/runtime/transpile/emit.rs | 0 crates/jsvm/runtime/transpile/mod.rs | 4 + crates/jsvm/runtime/util/mod.rs | 2 + crates/task/Cargo.toml | 20 +- crates/task/datatype/action.rs | 34 ++++ crates/task/datatype/mod.rs | 8 + .../condition.rs => task/datatype/service.rs} | 17 +- crates/task/datatype/trigger.rs | 35 ++++ crates/task/handler/layer.rs | 73 ------- crates/task/handler/native.rs | 70 ------- crates/task/lib.rs | 33 +--- crates/task/registry/cache.rs | 6 + crates/task/registry/handler.rs | 39 ++++ crates/task/registry/index.rs | 39 ++++ crates/task/registry/mod.rs | 55 ++++++ crates/task/routing/builder.rs | 121 ------------ crates/task/routing/index.rs | 143 -------------- crates/task/routing/layers.rs | 28 --- crates/task/routing/mod.rs | 179 ------------------ crates/type/Cargo.toml | 20 -- crates/type/datatype/condition.rs | 12 -- crates/type/datatype/mod.rs | 66 ------- crates/type/datatype/operation.rs | 10 - crates/type/lib.rs | 7 - crates/type/manifest/mod.rs | 31 --- crates/type/manifest/operation.rs | 19 -- deno.jsonc | 25 +++ examples/direct/deps.ts | 0 examples/direct/main.ts | 0 examples/hello/deps.ts | 0 examples/hello/main.ts | 0 modules/assert/README.md | 14 ++ modules/assert/deno.jsonc | 13 ++ modules/assert/match.ts | 0 modules/assert/match_test.ts | 0 modules/assert/mod.ts | 1 + modules/runtime/README.md | 14 ++ modules/runtime/deno.jsonc | 15 ++ modules/runtime/mod.ts | 2 + modules/runtime/request.ts | 0 modules/runtime/request_test.ts | 0 modules/runtime/response.ts | 0 modules/runtime/response_test.ts | 0 modules/testing/README.md | 14 ++ modules/testing/deno.jsonc | 12 ++ modules/testing/mod.ts | 1 + modules/testing/setup.ts | 0 modules/testing/setup_test.ts | 0 scripts/tower-path/.gitignore | 38 ++++ scripts/tower-path/Cargo.toml | 33 ++++ scripts/tower-path/LICENSE.txt | 21 ++ scripts/tower-path/README.md | 26 +++ scripts/tower-path/handler/layer.rs | 1 + scripts/tower-path/handler/mod.rs | 4 + scripts/tower-path/lib.rs | 18 ++ scripts/tower-path/routing/container.rs | 55 ++++++ scripts/tower-path/routing/index.rs | 31 +++ scripts/tower-path/routing/mod.rs | 102 ++++++++++ scripts/tower-path/service/layer.rs | 41 ++++ scripts/tower-path/service/mod.rs | 78 ++++++++ scripts/tower-task/.gitignore | 38 ++++ scripts/tower-task/Cargo.toml | 34 ++++ scripts/tower-task/LICENSE.txt | 21 ++ scripts/tower-task/README.md | 20 ++ scripts/tower-task/compose/builder.rs | 34 ++++ scripts/tower-task/compose/layers.rs | 25 +++ scripts/tower-task/compose/mod.rs | 13 ++ .../tower-task}/context/failure.rs | 23 +-- .../tower-task}/context/mod.rs | 0 .../tower-task}/context/request.rs | 32 +--- .../tower-task}/context/response.rs | 12 -- scripts/tower-task/context/state.rs | 1 + .../tower-task}/handler/future.rs | 0 scripts/tower-task/handler/layer.rs | 55 ++++++ .../tower-task}/handler/metric.rs | 4 + .../tower-task}/handler/mod.rs | 61 ++---- scripts/tower-task/lib.rs | 26 +++ 142 files changed, 1821 insertions(+), 998 deletions(-) create mode 100644 crates/cli/middleware/observability.rs rename crates/cli/{service/serv_config.rs => middleware/utility.rs} (100%) rename crates/cli/service/{app_config.rs => config.rs} (100%) create mode 100644 crates/cli/service/instance.rs create mode 100644 crates/cli/service/registry.rs create mode 100644 crates/jsvm/Cargo.toml rename crates/{type => jsvm}/README.md (85%) create mode 100644 crates/jsvm/ext_deno/ext_canvas/init_canvas.js create mode 100644 crates/jsvm/ext_deno/ext_canvas/mod.rs create mode 100644 crates/jsvm/ext_deno/ext_console/init_console.js create mode 100644 crates/jsvm/ext_deno/ext_console/mod.rs create mode 100644 crates/jsvm/ext_deno/ext_crypto/init_crypto.js create mode 100644 crates/jsvm/ext_deno/ext_crypto/mod.rs create mode 100644 crates/jsvm/ext_deno/ext_fetch/init_fetch.js create mode 100644 crates/jsvm/ext_deno/ext_fetch/mod.rs create mode 100644 crates/jsvm/ext_deno/ext_fs/init_fs.js create mode 100644 crates/jsvm/ext_deno/ext_fs/mod.rs create mode 100644 crates/jsvm/ext_deno/ext_io/init_io.js create mode 100644 crates/jsvm/ext_deno/ext_io/mod.rs create mode 100644 crates/jsvm/ext_deno/ext_net/init_net.js create mode 100644 crates/jsvm/ext_deno/ext_net/mod.rs create mode 100644 crates/jsvm/ext_deno/ext_url/init_url.js create mode 100644 crates/jsvm/ext_deno/ext_url/mod.rs create mode 100644 crates/jsvm/ext_deno/ext_web/init_web.js create mode 100644 crates/jsvm/ext_deno/ext_web/mod.rs create mode 100644 crates/jsvm/ext_deno/ext_webgpu/init_webgpu.js create mode 100644 crates/jsvm/ext_deno/ext_webgpu/mod.rs create mode 100644 crates/jsvm/ext_deno/ext_webidl/init_webidl.js create mode 100644 crates/jsvm/ext_deno/ext_webidl/mod.rs create mode 100644 crates/jsvm/ext_deno/ext_websocket/init_websocket.js create mode 100644 crates/jsvm/ext_deno/ext_websocket/mod.rs create mode 100644 crates/jsvm/ext_deno/mod.rs create mode 100644 crates/jsvm/extension/mod.rs create mode 100644 crates/jsvm/extension/permission.rs create mode 100644 crates/jsvm/extension/route/datatype.rs create mode 100644 crates/jsvm/extension/route/internal.rs create mode 100644 crates/jsvm/extension/route/mod.rs create mode 100644 crates/jsvm/extension/route/ops.js create mode 100644 crates/jsvm/extension/route/ops.rs create mode 100644 crates/jsvm/extension/trace/datatype.rs create mode 100644 crates/jsvm/extension/trace/internal.rs create mode 100644 crates/jsvm/extension/trace/mod.rs create mode 100644 crates/jsvm/extension/trace/ops.js create mode 100644 crates/jsvm/extension/trace/ops.rs create mode 100644 crates/jsvm/lib.rs create mode 100644 crates/jsvm/runtime/cache/mod.rs rename crates/{task/context/state.rs => jsvm/runtime/machine.rs} (100%) create mode 100644 crates/jsvm/runtime/mod.rs create mode 100644 crates/jsvm/runtime/permission.rs create mode 100644 crates/jsvm/runtime/transpile/cache.rs create mode 100644 crates/jsvm/runtime/transpile/emit.rs create mode 100644 crates/jsvm/runtime/transpile/mod.rs create mode 100644 crates/jsvm/runtime/util/mod.rs create mode 100644 crates/task/datatype/action.rs create mode 100644 crates/task/datatype/mod.rs rename crates/{type/manifest/condition.rs => task/datatype/service.rs} (53%) create mode 100644 crates/task/datatype/trigger.rs delete mode 100644 crates/task/handler/layer.rs delete mode 100644 crates/task/handler/native.rs create mode 100644 crates/task/registry/cache.rs create mode 100644 crates/task/registry/handler.rs create mode 100644 crates/task/registry/index.rs create mode 100644 crates/task/registry/mod.rs delete mode 100644 crates/task/routing/builder.rs delete mode 100644 crates/task/routing/index.rs delete mode 100644 crates/task/routing/layers.rs delete mode 100644 crates/task/routing/mod.rs delete mode 100644 crates/type/Cargo.toml delete mode 100644 crates/type/datatype/condition.rs delete mode 100644 crates/type/datatype/mod.rs delete mode 100644 crates/type/datatype/operation.rs delete mode 100644 crates/type/lib.rs delete mode 100644 crates/type/manifest/mod.rs delete mode 100644 crates/type/manifest/operation.rs create mode 100644 deno.jsonc create mode 100644 examples/direct/deps.ts create mode 100644 examples/direct/main.ts create mode 100644 examples/hello/deps.ts create mode 100644 examples/hello/main.ts create mode 100644 modules/assert/README.md create mode 100644 modules/assert/deno.jsonc create mode 100644 modules/assert/match.ts create mode 100644 modules/assert/match_test.ts create mode 100644 modules/assert/mod.ts create mode 100644 modules/runtime/README.md create mode 100644 modules/runtime/deno.jsonc create mode 100644 modules/runtime/mod.ts create mode 100644 modules/runtime/request.ts create mode 100644 modules/runtime/request_test.ts create mode 100644 modules/runtime/response.ts create mode 100644 modules/runtime/response_test.ts create mode 100644 modules/testing/README.md create mode 100644 modules/testing/deno.jsonc create mode 100644 modules/testing/mod.ts create mode 100644 modules/testing/setup.ts create mode 100644 modules/testing/setup_test.ts create mode 100644 scripts/tower-path/.gitignore create mode 100644 scripts/tower-path/Cargo.toml create mode 100644 scripts/tower-path/LICENSE.txt create mode 100644 scripts/tower-path/README.md create mode 100644 scripts/tower-path/handler/layer.rs create mode 100644 scripts/tower-path/handler/mod.rs create mode 100644 scripts/tower-path/lib.rs create mode 100644 scripts/tower-path/routing/container.rs create mode 100644 scripts/tower-path/routing/index.rs create mode 100644 scripts/tower-path/routing/mod.rs create mode 100644 scripts/tower-path/service/layer.rs create mode 100644 scripts/tower-path/service/mod.rs create mode 100644 scripts/tower-task/.gitignore create mode 100644 scripts/tower-task/Cargo.toml create mode 100644 scripts/tower-task/LICENSE.txt create mode 100644 scripts/tower-task/README.md create mode 100644 scripts/tower-task/compose/builder.rs create mode 100644 scripts/tower-task/compose/layers.rs create mode 100644 scripts/tower-task/compose/mod.rs rename {crates/task => scripts/tower-task}/context/failure.rs (52%) rename {crates/task => scripts/tower-task}/context/mod.rs (100%) rename {crates/task => scripts/tower-task}/context/request.rs (70%) rename {crates/task => scripts/tower-task}/context/response.rs (89%) create mode 100644 scripts/tower-task/context/state.rs rename {crates/task => scripts/tower-task}/handler/future.rs (100%) create mode 100644 scripts/tower-task/handler/layer.rs rename {crates/task => scripts/tower-task}/handler/metric.rs (84%) rename {crates/task => scripts/tower-task}/handler/mod.rs (67%) create mode 100644 scripts/tower-task/lib.rs diff --git a/Cargo.toml b/Cargo.toml index cf0a9e0..fe72885 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,10 @@ resolver = "2" members = [ "./crates/cli", "./crates/core", + "./crates/jsvm", "./crates/task", + "./scripts/tower-path", + "./scripts/tower-task", ] [workspace.package] @@ -20,6 +23,8 @@ homepage = "https://github.com/axiston/runtime" documentation = "https://docs.rs/axiston" [workspace.dependencies] -runtime-core = { path = "./crates/core", version = "0.1.0" } -runtime-task = { path = "./crates/task", version = "0.1.0" } -runtime-type = { path = "./crates/type", version = "0.1.0" } +axiston-runtime-core = { path = "./crates/core", version = "0.1.0" } +axiston-runtime-jsvm = { path = "./crates/jsvm", version = "0.1.0" } +axiston-runtime-task = { path = "./crates/task", version = "0.1.0" } +tower-path = { path = "./scripts/tower-path", version = "0.1.0" } +tower-task = { path = "./scripts/tower-task", version = "0.1.0" } diff --git a/README.md b/README.md index f520b18..cedab91 100644 --- a/README.md +++ b/README.md @@ -8,8 +8,8 @@ [coverage-badge]: https://img.shields.io/codecov/c/github/axiston/runtime [coverage-url]: https://app.codecov.io/gh/axiston/runtime -A server capable of running native `Rust` or `TypeScript` tasks (via Deno -runtime). +A server application based on `Deno` runtime, capable of running `JavaScript`, +`TypeScript`, and native `Rust` tasks. #### Notes @@ -22,3 +22,11 @@ runtime). ```cmd runtime --port 8080 ``` + +#### Nodes + +- May be of following types: trigger (normal or reaction), action + +#### Edges + +- May attach transformations diff --git a/crates/cli/Cargo.toml b/crates/cli/Cargo.toml index 802c8cc..8e68b84 100644 --- a/crates/cli/Cargo.toml +++ b/crates/cli/Cargo.toml @@ -1,7 +1,7 @@ # https://doc.rust-lang.org/cargo/reference/manifest.html [package] -name = "runtime-cli" +name = "axiston-runtime-cli" version = { workspace = true } edition = { workspace = true } license = { workspace = true } @@ -18,24 +18,28 @@ name = "axiston" path = "main.rs" [dependencies] -runtime-core = { workspace = true } +axiston-runtime-core = { workspace = true } clap = { version = "4.5", features = ["derive"] } -tokio = { version = "1.40", features = ["macros", "rt-multi-thread"] } +tokio = { version = "1.36", features = ["macros", "rt-multi-thread"] } +tokio-stream = { version = "0.1", features = [] } futures = { version = "0.3", features = [] } anyhow = { version = "1.0", features = ["backtrace"] } tracing = { version = "0.1", features = [] } tracing-subscriber = { version = "0.3", features = ["env-filter", "time"] } -tracing-opentelemetry = { version = "0.25", features = [] } -opentelemetry = { version = "0.24", features = [] } +tracing-opentelemetry = { version = "0.26.0", features = [] } +opentelemetry = { version = "0.25.0", features = [] } tonic = { version = "0.12", features = [] } prost = { version = "0.13", features = [] } tonic-types = { version = "0.12", features = [] } prost-types = { version = "0.13", features = [] } +tower = { version = "0.4", features = ["full"] } +tower-http = { version = "0.5", features = ["full"] } + [build-dependencies] +anyhow = { version = "1.0", features = ["backtrace"] } tonic-build = { version = "0.12", features = [] } prost-build = { version = "0.13", features = [] } -anyhow = { version = "1.0", features = [] } diff --git a/crates/cli/README.md b/crates/cli/README.md index a42a04f..444657f 100644 --- a/crates/cli/README.md +++ b/crates/cli/README.md @@ -1,4 +1,4 @@ -### axiston/cli +### runtime/cli Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. @@ -7,9 +7,3 @@ Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. - Lorem Ipsum. - Lorem Ipsum. - Lorem Ipsum. - -#### User's Code - -- Run From Command Line -- Run In Container -- Run with Temporal.io diff --git a/crates/cli/build.rs b/crates/cli/build.rs index 23f4d08..93d6e34 100644 --- a/crates/cli/build.rs +++ b/crates/cli/build.rs @@ -4,9 +4,9 @@ use std::path::PathBuf; fn main() -> anyhow::Result<()> { let builder = tonic_build::configure() - .build_client(false) + .build_transport(true) .build_server(true) - .build_transport(true); + .build_client(false); let dir = PathBuf::from("./protobuf/"); let instance = dir.join("./instance.proto"); diff --git a/crates/cli/handler/instance.rs b/crates/cli/handler/instance.rs index c7c0475..f5f8578 100644 --- a/crates/cli/handler/instance.rs +++ b/crates/cli/handler/instance.rs @@ -1,7 +1,11 @@ -use tonic::{Request, Response, Status}; +use futures::stream::BoxStream; +use futures::StreamExt; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{Request, Response, Status, Streaming}; use crate::handler::instance::instance_proto::instance_server::{Instance, InstanceServer}; -use crate::handler::instance::instance_proto::{HelloRequest, HelloResponse}; +use crate::handler::instance::instance_proto::{EventRequest, EventResponse}; use crate::service::AppState; pub mod instance_proto { @@ -29,10 +33,21 @@ impl InstanceService { #[tonic::async_trait] impl Instance for InstanceService { - async fn hello( + type ConnectStream = BoxStream<'static, Result>; + + async fn connect( &self, - request: Request, - ) -> Result, Status> { - todo!() + request: Request>, + ) -> Result, Status> { + let mut request = request.into_inner(); + + let (tx, rx) = mpsc::channel(128); + tokio::spawn(async move { + while let Some(event) = request.next().await { + }; + }); + + let rx = ReceiverStream::new(rx); + Ok(Response::new(Box::pin(rx))) } } diff --git a/crates/cli/handler/registry.rs b/crates/cli/handler/registry.rs index 939fd40..cc5ec73 100644 --- a/crates/cli/handler/registry.rs +++ b/crates/cli/handler/registry.rs @@ -1,7 +1,9 @@ use tonic::{Request, Response, Status}; use crate::handler::registry::registry_proto::registry_server::{Registry, RegistryServer}; -use crate::handler::registry::registry_proto::{HelloRequest, HelloResponse}; +use crate::handler::registry::registry_proto::{ + CheckRequest, CheckResponse, RegistryRequest, RegistryResponse, +}; use crate::service::AppState; pub mod registry_proto { @@ -29,10 +31,17 @@ impl RegistryService { #[tonic::async_trait] impl Registry for RegistryService { - async fn hello( + async fn registry( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { + todo!() + } + + async fn check( + &self, + request: Request, + ) -> Result, Status> { todo!() } } diff --git a/crates/cli/main.rs b/crates/cli/main.rs index 43ee0bc..7128be0 100644 --- a/crates/cli/main.rs +++ b/crates/cli/main.rs @@ -23,6 +23,7 @@ pub struct Args { #[tokio::main] async fn main() -> anyhow::Result<()> { let args = Args::parse(); + middleware::initialize_tracing().await?; // Service. let config = AppConfig::builder().build(); diff --git a/crates/cli/middleware/mod.rs b/crates/cli/middleware/mod.rs index 8e7716c..a05b5b4 100644 --- a/crates/cli/middleware/mod.rs +++ b/crates/cli/middleware/mod.rs @@ -1,2 +1,12 @@ //! TODO. //! + +use tower::ServiceBuilder; +pub use crate::middleware::observability::initialize_tracing; +mod observability; +mod utility; + +/// Extension trait for `tower::`[`ServiceBuilder`] for layering middleware. +pub trait ServiceBuilderExt {} + +impl ServiceBuilderExt for ServiceBuilder {} diff --git a/crates/cli/middleware/observability.rs b/crates/cli/middleware/observability.rs new file mode 100644 index 0000000..84c34a1 --- /dev/null +++ b/crates/cli/middleware/observability.rs @@ -0,0 +1,40 @@ +#[must_use] +fn build_env_filter() -> tracing_subscriber::EnvFilter { + let current = std::env::var("RUST_LOG") + .or_else(|_| std::env::var("OTEL_LOG_LEVEL")) + .unwrap_or_else(|_| "info".to_string()); + + let env = format!("{},server=trace,otel=debug,tower_http=debug", current); + std::env::set_var("RUST_LOG", env); + tracing_subscriber::EnvFilter::from_default_env() +} + +pub async fn initialize_tracing() -> anyhow::Result<()> { + use tracing_subscriber::fmt::layer; + use tracing_subscriber::layer::SubscriberExt; + use tracing_subscriber::util::SubscriberInitExt; + + // Setups a temporary subscriber to log output during setup. + let env_filter = build_env_filter(); + let fmt_layer = layer().pretty(); + let subscriber = tracing_subscriber::registry() + .with(env_filter) + .with(fmt_layer); + + let _guard = tracing::subscriber::set_default(subscriber); + tracing::trace!(target: "server:otel", "initialized temporary subscriber"); + + // TODO: Enable OpenTelemetry. + // https://github.com/davidB/tracing-opentelemetry-instrumentation-sdk + + // Setups an actual subscriber. + let env_filter = build_env_filter(); + let fmt_layer = layer().pretty(); + tracing_subscriber::registry() + .with(env_filter) + .with(fmt_layer) + .init(); + + tracing::trace!(target: "server:otel", "initialized subscriber"); + Ok(()) +} diff --git a/crates/cli/service/serv_config.rs b/crates/cli/middleware/utility.rs similarity index 100% rename from crates/cli/service/serv_config.rs rename to crates/cli/middleware/utility.rs diff --git a/crates/cli/protobuf/instance.proto b/crates/cli/protobuf/instance.proto index 0ea60bf..eeb0e23 100644 --- a/crates/cli/protobuf/instance.proto +++ b/crates/cli/protobuf/instance.proto @@ -1,11 +1,85 @@ syntax = "proto3"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; + package instance; -message HelloRequest {} +// The message format for sending events. +message EventRequest { + // The unique ID of the request message. + string id = 1; + // The unique ID of the message group. + string group = 2; + + // When the event was recv by the gateway. + google.protobuf.Timestamp recv = 3; + // When the event was sent to the runtime. + google.protobuf.Timestamp send = 4; + + // The content of the message. + oneof payload { + // Step 1.1 + OpenRequest Open = 10; + // Step 2.2 + NotifyResponse AckNotify = 11; + // Step 3.2 + CloseResponse AckClose = 12; + } +} + +// The message format for receiving events. +message EventResponse { + // The unique ID of the response message. + uint32 id = 1; + // The unique ID of the message group. + uint32 group = 2; + // The unique ID of the request message. + uint32 reply = 3; + + // When the event was recv by the runtime. + google.protobuf.Timestamp recv = 4; + // When the event was sent to the gateway. + google.protobuf.Timestamp send = 5; + + // The content of the message. + oneof payload { + // Step 1.2 + OpenResponse AckOpen = 10; + // Step 2.1 + NotifyRequest Notify = 11; + // Step 3.1 + CloseRequest Close = 12; + } +} + +// Start execution request message. +message OpenRequest { + string task = 1; + + map fields = 2; + map secrets = 3; +} + +// Start execution response message. +message OpenResponse { + google.protobuf.Timestamp started = 1; + optional google.protobuf.Duration estimate = 2; +} + +message NotifyRequest { + string task = 1; +} + +message NotifyResponse {} + +message CloseRequest { + string task = 1; +} -message HelloResponse {} +message CloseResponse {} service Instance { - rpc Hello(HelloRequest) returns (HelloResponse); + // Bidirectional streaming RPC for continuous communication between the gateway and runtime. + rpc Connect(stream EventRequest) returns (stream EventResponse); } diff --git a/crates/cli/protobuf/registry.proto b/crates/cli/protobuf/registry.proto index 5b57eb0..5edd854 100644 --- a/crates/cli/protobuf/registry.proto +++ b/crates/cli/protobuf/registry.proto @@ -1,11 +1,43 @@ syntax = "proto3"; +import "google/protobuf/timestamp.proto"; +// import "google/protobuf/empty.proto"; + package registry; -message HelloRequest {} +message Service { + string id = 1; + string name = 2; + string icon = 3; +} + +message Entity { + string id = 1; + string name = 2; + string icon = 3; +} + +message CheckRequest {} -message HelloResponse {} +message CheckResponse {} + +// The message format for requesting for the registry content. +message RegistryRequest {} + +// The message format for responding with the registry content. +message RegistryResponse { + google.protobuf.Timestamp created = 2; + google.protobuf.Timestamp updated = 3; + + repeated Service services = 11; + repeated Entity triggers = 12; + repeated Entity actions = 13; +} service Registry { - rpc Hello(HelloRequest) returns (HelloResponse); + // Comprehensive collection of available tasks and their metadata. + rpc Registry(registry.RegistryRequest) returns (registry.RegistryResponse); + + // Authentication, authorization, and health checks. + rpc Check(registry.CheckRequest) returns (registry.CheckResponse); } diff --git a/crates/cli/service/app_config.rs b/crates/cli/service/config.rs similarity index 100% rename from crates/cli/service/app_config.rs rename to crates/cli/service/config.rs diff --git a/crates/cli/service/instance.rs b/crates/cli/service/instance.rs new file mode 100644 index 0000000..e69de29 diff --git a/crates/cli/service/mod.rs b/crates/cli/service/mod.rs index 960260f..46d2b0d 100644 --- a/crates/cli/service/mod.rs +++ b/crates/cli/service/mod.rs @@ -1,11 +1,11 @@ //! TODO. //! -pub use crate::service::app_config::{AppBuilder, AppConfig}; -pub use crate::service::serv_config::Args; +pub use crate::service::config::{AppBuilder, AppConfig}; -mod app_config; -mod serv_config; +mod config; +mod instance; +mod registry; /// Application state. /// diff --git a/crates/cli/service/registry.rs b/crates/cli/service/registry.rs new file mode 100644 index 0000000..e69de29 diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index 7f3fc2c..2466e42 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -1,7 +1,7 @@ # https://doc.rust-lang.org/cargo/reference/manifest.html [package] -name = "runtime-core" +name = "axiston-runtime-core" version = { workspace = true } edition = { workspace = true } license = { workspace = true } @@ -13,9 +13,13 @@ repository = { workspace = true } homepage = { workspace = true } documentation = { workspace = true } +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + [lib] path = "lib.rs" [dependencies] -runtime-task = { workspace = true } -runtime-type = { workspace = true } +axiston-runtime-jsvm = { workspace = true } +axiston-runtime-task = { workspace = true } diff --git a/crates/core/README.md b/crates/core/README.md index de96a7c..a99bbd1 100644 --- a/crates/core/README.md +++ b/crates/core/README.md @@ -1,4 +1,4 @@ -### axiston/core +### runtime/core Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. diff --git a/crates/core/lib.rs b/crates/core/lib.rs index eb6ed5e..62212a5 100644 --- a/crates/core/lib.rs +++ b/crates/core/lib.rs @@ -1,31 +1,10 @@ #![forbid(unsafe_code)] +#![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("./README.md")] -//! TODO. +//! ### Examples +//! +//! ```rust +//! fn main() {} +//! ``` -use std::collections::HashMap; - -use runtime_task::routing::{Index, Router}; -use runtime_type::datatype::condition::{ConditionRequestData, ConditionResponseData}; -use runtime_type::datatype::operation::{OperationRequestData, OperationResponseData}; -use runtime_type::manifest::condition::ConditionManifest; -use runtime_type::manifest::operation::OperationManifest; - -#[derive(Debug, Default, Clone)] -pub struct AppRouter { - // groups: HashMap - conditions: Router, - operations: Router, -} - -impl AppRouter { - /// Returns an empty [`Router`]. - #[inline] - pub fn new() -> Self { - Self::default() - } - - pub fn retrieve_manifests() {} - - pub fn try_execute() {} -} diff --git a/crates/jsvm/Cargo.toml b/crates/jsvm/Cargo.toml new file mode 100644 index 0000000..0fcbbe9 --- /dev/null +++ b/crates/jsvm/Cargo.toml @@ -0,0 +1,52 @@ +# https://doc.rust-lang.org/cargo/reference/manifest.html + +[package] +name = "axiston-runtime-jsvm" +version = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +readme = "./README.md" + +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +documentation = { workspace = true } + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lib] +path = "lib.rs" + +[dependencies] +axiston-runtime-task = { workspace = true } + +tokio = { version = "1.36", features = [] } +serde = { version = "1.0", features = ["derive"] } +thiserror = { version = "1.0", features = [] } +tracing = { version = "0.1", features = [] } + +deno_core = { version = "0.308.0", features = [] } +deno_ast = { version = "0.42.0", features = [] } +deno_permissions = { version = "0.28.0", features = [] } + +deno_console = { version = "0.168.0", features = [] } +deno_crypto = { version = "0.182.0", features = [] } +deno_webidl = { version = "0.168.0", features = [] } +deno_url = { version = "0.168.0", features = [] } + +deno_fs = { version = "0.78.0", features = ["sync_fs"] } +deno_http = { version = "0.166.0", features = [] } +deno_io = { version = "0.78.0", features = [] } + +deno_fetch = { version = "0.192.0", features = [] } +deno_net = { version = "0.160.0", features = [] } +deno_web = { version = "0.199.0", features = [] } + +deno_tls = { version = "0.155.0", features = [] } +deno_websocket = { version = "0.173.0", features = [] } +deno_webstorage = { version = "0.163.0", features = [] } +deno_canvas = { version = "0.37.0", features = [] } +deno_webgpu = { version = "0.135.0", features = [] } diff --git a/crates/type/README.md b/crates/jsvm/README.md similarity index 85% rename from crates/type/README.md rename to crates/jsvm/README.md index de96a7c..a621b1e 100644 --- a/crates/type/README.md +++ b/crates/jsvm/README.md @@ -1,4 +1,4 @@ -### axiston/core +### runtime/jsvm Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. diff --git a/crates/jsvm/ext_deno/ext_canvas/init_canvas.js b/crates/jsvm/ext_deno/ext_canvas/init_canvas.js new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/ext_deno/ext_canvas/mod.rs b/crates/jsvm/ext_deno/ext_canvas/mod.rs new file mode 100644 index 0000000..8e7716c --- /dev/null +++ b/crates/jsvm/ext_deno/ext_canvas/mod.rs @@ -0,0 +1,2 @@ +//! TODO. +//! diff --git a/crates/jsvm/ext_deno/ext_console/init_console.js b/crates/jsvm/ext_deno/ext_console/init_console.js new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/ext_deno/ext_console/mod.rs b/crates/jsvm/ext_deno/ext_console/mod.rs new file mode 100644 index 0000000..8e7716c --- /dev/null +++ b/crates/jsvm/ext_deno/ext_console/mod.rs @@ -0,0 +1,2 @@ +//! TODO. +//! diff --git a/crates/jsvm/ext_deno/ext_crypto/init_crypto.js b/crates/jsvm/ext_deno/ext_crypto/init_crypto.js new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/ext_deno/ext_crypto/mod.rs b/crates/jsvm/ext_deno/ext_crypto/mod.rs new file mode 100644 index 0000000..8e7716c --- /dev/null +++ b/crates/jsvm/ext_deno/ext_crypto/mod.rs @@ -0,0 +1,2 @@ +//! TODO. +//! diff --git a/crates/jsvm/ext_deno/ext_fetch/init_fetch.js b/crates/jsvm/ext_deno/ext_fetch/init_fetch.js new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/ext_deno/ext_fetch/mod.rs b/crates/jsvm/ext_deno/ext_fetch/mod.rs new file mode 100644 index 0000000..8e7716c --- /dev/null +++ b/crates/jsvm/ext_deno/ext_fetch/mod.rs @@ -0,0 +1,2 @@ +//! TODO. +//! diff --git a/crates/jsvm/ext_deno/ext_fs/init_fs.js b/crates/jsvm/ext_deno/ext_fs/init_fs.js new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/ext_deno/ext_fs/mod.rs b/crates/jsvm/ext_deno/ext_fs/mod.rs new file mode 100644 index 0000000..8e7716c --- /dev/null +++ b/crates/jsvm/ext_deno/ext_fs/mod.rs @@ -0,0 +1,2 @@ +//! TODO. +//! diff --git a/crates/jsvm/ext_deno/ext_io/init_io.js b/crates/jsvm/ext_deno/ext_io/init_io.js new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/ext_deno/ext_io/mod.rs b/crates/jsvm/ext_deno/ext_io/mod.rs new file mode 100644 index 0000000..8e7716c --- /dev/null +++ b/crates/jsvm/ext_deno/ext_io/mod.rs @@ -0,0 +1,2 @@ +//! TODO. +//! diff --git a/crates/jsvm/ext_deno/ext_net/init_net.js b/crates/jsvm/ext_deno/ext_net/init_net.js new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/ext_deno/ext_net/mod.rs b/crates/jsvm/ext_deno/ext_net/mod.rs new file mode 100644 index 0000000..8e7716c --- /dev/null +++ b/crates/jsvm/ext_deno/ext_net/mod.rs @@ -0,0 +1,2 @@ +//! TODO. +//! diff --git a/crates/jsvm/ext_deno/ext_url/init_url.js b/crates/jsvm/ext_deno/ext_url/init_url.js new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/ext_deno/ext_url/mod.rs b/crates/jsvm/ext_deno/ext_url/mod.rs new file mode 100644 index 0000000..8e7716c --- /dev/null +++ b/crates/jsvm/ext_deno/ext_url/mod.rs @@ -0,0 +1,2 @@ +//! TODO. +//! diff --git a/crates/jsvm/ext_deno/ext_web/init_web.js b/crates/jsvm/ext_deno/ext_web/init_web.js new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/ext_deno/ext_web/mod.rs b/crates/jsvm/ext_deno/ext_web/mod.rs new file mode 100644 index 0000000..8e7716c --- /dev/null +++ b/crates/jsvm/ext_deno/ext_web/mod.rs @@ -0,0 +1,2 @@ +//! TODO. +//! diff --git a/crates/jsvm/ext_deno/ext_webgpu/init_webgpu.js b/crates/jsvm/ext_deno/ext_webgpu/init_webgpu.js new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/ext_deno/ext_webgpu/mod.rs b/crates/jsvm/ext_deno/ext_webgpu/mod.rs new file mode 100644 index 0000000..8e7716c --- /dev/null +++ b/crates/jsvm/ext_deno/ext_webgpu/mod.rs @@ -0,0 +1,2 @@ +//! TODO. +//! diff --git a/crates/jsvm/ext_deno/ext_webidl/init_webidl.js b/crates/jsvm/ext_deno/ext_webidl/init_webidl.js new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/ext_deno/ext_webidl/mod.rs b/crates/jsvm/ext_deno/ext_webidl/mod.rs new file mode 100644 index 0000000..8e7716c --- /dev/null +++ b/crates/jsvm/ext_deno/ext_webidl/mod.rs @@ -0,0 +1,2 @@ +//! TODO. +//! diff --git a/crates/jsvm/ext_deno/ext_websocket/init_websocket.js b/crates/jsvm/ext_deno/ext_websocket/init_websocket.js new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/ext_deno/ext_websocket/mod.rs b/crates/jsvm/ext_deno/ext_websocket/mod.rs new file mode 100644 index 0000000..8e7716c --- /dev/null +++ b/crates/jsvm/ext_deno/ext_websocket/mod.rs @@ -0,0 +1,2 @@ +//! TODO. +//! diff --git a/crates/jsvm/ext_deno/mod.rs b/crates/jsvm/ext_deno/mod.rs new file mode 100644 index 0000000..9248393 --- /dev/null +++ b/crates/jsvm/ext_deno/mod.rs @@ -0,0 +1,48 @@ +//! `deno_core::`[`extension`]s bundled with `Deno`. +//! + +mod ext_canvas; +mod ext_console; +mod ext_crypto; +mod ext_fetch; +mod ext_websocket; +mod ext_fs; +mod ext_url; +mod ext_webgpu; +mod ext_net; +mod ext_io; +mod ext_web; +mod ext_webidl; +mod permission; + + +// extension!( +// axiston_permission, +// options = { allow_net_access: bool, filter_net_access: Vec }, +// state = |state, options| { +// state.put::(MyPermission::new( +// options.allow_net_access, options.filter_net_access +// ) ); +// } +// ); +// +// extension!( +// axiston_init_fetch, +// deps = [rustyscript], +// esm_entry_point = "ext:ext_boot/init_fetch.js", +// esm = [ dir "ext_boot", "init_fetch.js" ], +// ); +// +// extension!( +// axiston_init_net, +// deps = [rustyscript], +// esm_entry_point = "ext:ext_boot/init_net.js", +// esm = [ dir "ext_boot", "init_net.js" ], +// ); +// +// extension!( +// axiston_init_web, +// deps = [rustyscript], +// esm_entry_point = "ext:ext_boot/init_web.js", +// esm = [ dir "ext_boot", "init_web.js" ], +// ); diff --git a/crates/jsvm/extension/mod.rs b/crates/jsvm/extension/mod.rs new file mode 100644 index 0000000..9defb80 --- /dev/null +++ b/crates/jsvm/extension/mod.rs @@ -0,0 +1,9 @@ +//! Runtime `deno_core::`[`extension`]s. +//! + +pub use crate::extension::trace::axis_tracing; +pub use crate::extension::route::axis_routing; + +mod route; +mod trace; +mod permission; diff --git a/crates/jsvm/extension/permission.rs b/crates/jsvm/extension/permission.rs new file mode 100644 index 0000000..fbef69c --- /dev/null +++ b/crates/jsvm/extension/permission.rs @@ -0,0 +1,19 @@ +use deno_permissions::NetDescriptor; + +/// TODO. +#[derive(Debug, Default, Clone)] +pub struct MyPermission { + allow_net: bool, + filter_net: Vec, +} + +impl MyPermission { + /// Returns a new [`MyPermission`]. + #[inline] + pub fn new(allow_net: bool, filter_net: Vec) -> Self { + Self { + allow_net, + filter_net, + } + } +} diff --git a/crates/jsvm/extension/route/datatype.rs b/crates/jsvm/extension/route/datatype.rs new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/extension/route/internal.rs b/crates/jsvm/extension/route/internal.rs new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/extension/route/mod.rs b/crates/jsvm/extension/route/mod.rs new file mode 100644 index 0000000..fe3dd42 --- /dev/null +++ b/crates/jsvm/extension/route/mod.rs @@ -0,0 +1,32 @@ +//! Runtime [`extension`] for the [`routing`] events. +//! +//! [`routing`]: axiston_runtime_task::registry::Registry + +mod datatype; +mod internal; +mod ops; + +use deno_core::extension; +use crate::extension::route::ops::{ + op_register_service, op_register_trigger, op_register_action +}; + +extension!( + axis_routing, + ops = [op_register_service, op_register_trigger, op_register_action], + esm_entry_point = "ext:extension/route/ops.js", + esm = [ dir "extension/route", "ops.js" ], +); + +/// Unrecoverable failure during routing ops. +/// +/// Includes all error types that may occur. +#[derive(Debug, thiserror::Error)] +#[must_use = "errors do nothing unless you use them"] +pub enum Error { +} + +/// Specialized [`Result`] alias for [`Error`]. +/// +/// [`Result`]: std::result::Result +pub type Result = std::result::Result; diff --git a/crates/jsvm/extension/route/ops.js b/crates/jsvm/extension/route/ops.js new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/extension/route/ops.rs b/crates/jsvm/extension/route/ops.rs new file mode 100644 index 0000000..b40f870 --- /dev/null +++ b/crates/jsvm/extension/route/ops.rs @@ -0,0 +1,20 @@ +use deno_core::op2; +use crate::extension::route::Result; + +#[op2(fast)] +pub fn op_register_service( +) -> Result<()> { + Ok(()) +} + +#[op2(fast)] +pub fn op_register_trigger( +) -> Result<()> { + Ok(()) +} + +#[op2(fast)] +pub fn op_register_action( +) -> Result<()> { + Ok(()) +} diff --git a/crates/jsvm/extension/trace/datatype.rs b/crates/jsvm/extension/trace/datatype.rs new file mode 100644 index 0000000..8271ce5 --- /dev/null +++ b/crates/jsvm/extension/trace/datatype.rs @@ -0,0 +1,24 @@ +use serde::Deserialize; + +/// Deserializable options for a [`tracing_*`] op. +/// +/// [`tracing_*`]: crate::ext_tracing +#[derive(Debug, Default, Deserialize)] +#[must_use = "datatypes do nothing unless you deserialize them"] +pub struct TracingOptions { + pub target: Option, +} + +impl TracingOptions {} + +#[cfg(test)] +mod test { + use crate::extension::trace::datatype::TracingOptions; + use crate::extension::trace::Result; + + #[test] + fn instance() -> Result<()> { + let _ = TracingOptions::default(); + Ok(()) + } +} diff --git a/crates/jsvm/extension/trace/internal.rs b/crates/jsvm/extension/trace/internal.rs new file mode 100644 index 0000000..f807193 --- /dev/null +++ b/crates/jsvm/extension/trace/internal.rs @@ -0,0 +1,23 @@ +use tracing::{debug, error, info, trace, warn, Level}; +use crate::extension::trace::datatype::TracingOptions; +use crate::extension::trace::Result; + +/// TODO. +pub fn emit_op_tracing_event( + message: &str, + level: Level, + options: Option, +) -> Result<()> { + let options = options.unwrap_or_default(); + let target = options.target.unwrap_or_default(); + + match level { + Level::TRACE => trace!(message), + Level::DEBUG => debug!(message), + Level::INFO => info!(message), + Level::WARN => warn!(message), + Level::ERROR => error!(message), + }; + + Ok(()) +} diff --git a/crates/jsvm/extension/trace/mod.rs b/crates/jsvm/extension/trace/mod.rs new file mode 100644 index 0000000..151a5a6 --- /dev/null +++ b/crates/jsvm/extension/trace/mod.rs @@ -0,0 +1,39 @@ +//! Runtime [`extension`] for the [`tracing`] events. +//! + +use deno_core::extension; + +use crate::extension::trace::ops::{ + op_tracing_debug, op_tracing_debug_fast, op_tracing_error, op_tracing_error_fast, + op_tracing_info, op_tracing_info_fast, op_tracing_trace, op_tracing_trace_fast, + op_tracing_warn, op_tracing_warn_fast, +}; + +mod datatype; +mod internal; +mod ops; + +extension!( + axis_tracing, + ops = [ + op_tracing_trace_fast, op_tracing_trace, op_tracing_debug_fast, op_tracing_debug, + op_tracing_info_fast, op_tracing_info, op_tracing_warn_fast, op_tracing_warn, + op_tracing_error_fast, op_tracing_error], + esm_entry_point = "ext:extension/trace/ops.js", + esm = [ dir "extension/trace", "ops.js" ], +); + +/// Unrecoverable failure during tracing ops. +/// +/// Includes all error types that may occur. +#[derive(Debug, thiserror::Error)] +#[must_use = "errors do nothing unless you use them"] +pub enum Error { + #[error("tracing target reused")] + ReuseTarget, +} + +/// Specialized [`Result`] alias for [`Error`]. +/// +/// [`Result`]: std::result::Result +pub type Result = std::result::Result; diff --git a/crates/jsvm/extension/trace/ops.js b/crates/jsvm/extension/trace/ops.js new file mode 100644 index 0000000..a897080 --- /dev/null +++ b/crates/jsvm/extension/trace/ops.js @@ -0,0 +1,17 @@ +globalThis.tracing = { + "trace": (args) => { + return Deno.core.ops.op_tracing_trace(args); + }, + "debug": (args) => { + return Deno.core.ops.op_tracing_debug(args); + }, + "info": (args) => { + return Deno.core.ops.op_tracing_info(args); + }, + "warn": (args) => { + return Deno.core.ops.op_tracing_warn(args); + }, + "error": (args) => { + return Deno.core.ops.op_tracing_error(args); + }, +}; diff --git a/crates/jsvm/extension/trace/ops.rs b/crates/jsvm/extension/trace/ops.rs new file mode 100644 index 0000000..91d70db --- /dev/null +++ b/crates/jsvm/extension/trace/ops.rs @@ -0,0 +1,71 @@ +use deno_core::op2; +use tracing::Level; + +use crate::extension::trace::datatype::TracingOptions; +use crate::extension::trace::internal::emit_op_tracing_event; +use crate::extension::trace::Result; + +#[op2(fast)] +pub fn op_tracing_trace_fast(#[string] message: &str) -> Result<()> { + emit_op_tracing_event(message, Level::TRACE, None) +} + +#[op2(fast(op_tracing_trace_fast))] +pub fn op_tracing_trace( + #[string] message: &str, + #[serde] options: Option, +) -> Result<()> { + emit_op_tracing_event(message, Level::TRACE, options) +} + +#[op2(fast)] +pub fn op_tracing_debug_fast(#[string] message: &str) -> Result<()> { + emit_op_tracing_event(message, Level::DEBUG, None) +} + +#[op2(fast(op_tracing_debug_fast))] +pub fn op_tracing_debug( + #[string] message: &str, + #[serde] options: Option, +) -> Result<()> { + emit_op_tracing_event(message, Level::DEBUG, options) +} + +#[op2(fast)] +pub fn op_tracing_info_fast(#[string] message: &str) -> Result<()> { + emit_op_tracing_event(message, Level::INFO, None) +} + +#[op2(fast(op_tracing_info_fast))] +pub fn op_tracing_info( + #[string] message: &str, + #[serde] options: Option, +) -> Result<()> { + emit_op_tracing_event(message, Level::INFO, options) +} + +#[op2(fast)] +pub fn op_tracing_warn_fast(#[string] message: &str) -> Result<()> { + emit_op_tracing_event(message, Level::WARN, None) +} + +#[op2(fast(op_tracing_warn_fast))] +pub fn op_tracing_warn( + #[string] message: &str, + #[serde] options: Option, +) -> Result<()> { + emit_op_tracing_event(message, Level::WARN, options) +} + +#[op2(fast)] +pub fn op_tracing_error_fast(#[string] message: &str) -> Result<()> { + emit_op_tracing_event(message, Level::ERROR, None) +} + +#[op2(fast(op_tracing_error_fast))] +pub fn op_tracing_error( + #[string] message: &str, + #[serde] options: Option, +) -> Result<()> { + emit_op_tracing_event(message, Level::ERROR, options) +} diff --git a/crates/jsvm/lib.rs b/crates/jsvm/lib.rs new file mode 100644 index 0000000..c514f13 --- /dev/null +++ b/crates/jsvm/lib.rs @@ -0,0 +1,28 @@ +#![forbid(unsafe_code)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![doc = include_str!("./README.md")] + +//! ### Examples +//! +//! ```rust +//! fn main() {} +//! ``` + +mod extension; + +/// Unrecoverable failure of the [`Jsvm`]. +/// +/// Includes all error types that may occur. +/// +/// [`Jsvm`]: runtime::Jsvm +#[derive(Debug, thiserror::Error)] +#[must_use = "errors do nothing unless you use them"] +pub enum Error { + // #[error("called task failure: {0}")] + // Task(#[from] context::TaskError), +} + +/// Specialized [`Result`] alias for the [`Error`] type. +/// +/// [`Result`]: std::result::Result +pub type Result = std::result::Result; diff --git a/crates/jsvm/runtime/cache/mod.rs b/crates/jsvm/runtime/cache/mod.rs new file mode 100644 index 0000000..8e7716c --- /dev/null +++ b/crates/jsvm/runtime/cache/mod.rs @@ -0,0 +1,2 @@ +//! TODO. +//! diff --git a/crates/task/context/state.rs b/crates/jsvm/runtime/machine.rs similarity index 100% rename from crates/task/context/state.rs rename to crates/jsvm/runtime/machine.rs diff --git a/crates/jsvm/runtime/mod.rs b/crates/jsvm/runtime/mod.rs new file mode 100644 index 0000000..12a43ef --- /dev/null +++ b/crates/jsvm/runtime/mod.rs @@ -0,0 +1,52 @@ +//! TODO. +//! + +mod machine; +mod permission; + +use std::fmt; +use std::rc::Rc; + +use deno_core::{JsRuntime, RuntimeOptions}; +use tokio::runtime::Runtime as TokioRuntime; + +pub struct Jsvm { + inner: JsRuntime, +} + +impl Jsvm { + /// Returns a new [`Jsvm`]. + pub fn new(tokio_runtime: Rc) -> Self { + JsRuntime::init_platform(None, true); + + let options = RuntimeOptions { + extensions: vec![], + module_loader: None, + extension_transpiler: None, + ..RuntimeOptions::default() + }; + + let inner = JsRuntime::new(options); + + todo!() + } + + // run + // inspect +} + +impl fmt::Debug for Jsvm { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Jsvm").finish_non_exhaustive() + } +} + +#[cfg(test)] +mod test { + use crate::Result; + + #[test] + fn build() -> Result<()> { + Ok(()) + } +} diff --git a/crates/jsvm/runtime/permission.rs b/crates/jsvm/runtime/permission.rs new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/runtime/transpile/cache.rs b/crates/jsvm/runtime/transpile/cache.rs new file mode 100644 index 0000000..e5ee924 --- /dev/null +++ b/crates/jsvm/runtime/transpile/cache.rs @@ -0,0 +1,11 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Deserialize, Serialize)] +struct EmitMetadata { + pub source_hash: u64, + pub target_hash: u64, +} + +struct EmitCache {} + +pub struct JsvmTranspile {} diff --git a/crates/jsvm/runtime/transpile/emit.rs b/crates/jsvm/runtime/transpile/emit.rs new file mode 100644 index 0000000..e69de29 diff --git a/crates/jsvm/runtime/transpile/mod.rs b/crates/jsvm/runtime/transpile/mod.rs new file mode 100644 index 0000000..851e48e --- /dev/null +++ b/crates/jsvm/runtime/transpile/mod.rs @@ -0,0 +1,4 @@ +//! TODO. + +mod cache; +mod emit; diff --git a/crates/jsvm/runtime/util/mod.rs b/crates/jsvm/runtime/util/mod.rs new file mode 100644 index 0000000..8e7716c --- /dev/null +++ b/crates/jsvm/runtime/util/mod.rs @@ -0,0 +1,2 @@ +//! TODO. +//! diff --git a/crates/task/Cargo.toml b/crates/task/Cargo.toml index b3f6843..934011d 100644 --- a/crates/task/Cargo.toml +++ b/crates/task/Cargo.toml @@ -1,7 +1,7 @@ # https://doc.rust-lang.org/cargo/reference/manifest.html [package] -name = "runtime-task" +name = "axiston-runtime-task" version = { workspace = true } edition = { workspace = true } license = { workspace = true } @@ -20,19 +20,15 @@ rustdoc-args = ["--cfg", "docsrs"] [lib] path = "lib.rs" -[features] -serde = ["dep:serde", "ecow/serde"] -trace = ["dep:tracing"] - [dependencies] +tower-path = { workspace = true, features = [] } +tower-task = { workspace = true, features = [] } + tower = { version = "0.5", features = ["load", "util"] } futures = { version = "0.3", features = [] } pin-project-lite = { version = "0.2", features = [] } -thiserror = { version = "1.0", features = [] } -anyhow = { version = "1.0", features = ["backtrace"] } -deunicode = { version = "1.6", features = [] } -ecow = { version = "0.2", features = [] } - -serde = { version = "1.0", optional = true, features = ["derive"] } -tracing = { version = "0.1", optional = true, features = [] } +serde = { version = "1.0", features = ["derive"] } +tracing = { version = "0.1", features = [] } +thiserror = { version = "1.0", features = [] } +ecow = { version = "0.2", features = ["serde"] } diff --git a/crates/task/datatype/action.rs b/crates/task/datatype/action.rs new file mode 100644 index 0000000..2297555 --- /dev/null +++ b/crates/task/datatype/action.rs @@ -0,0 +1,34 @@ +//! Operation [`Request`], [`Response`] and [`Manifest`] types. +//! +//! [`Request`]: ActionRequest +//! [`Response`]: ActionResponse +//! [`Manifest`]: ActionManifest + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct ActionRequest {} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ActionResponse {} + +/// Associated action metadata. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[must_use = "manifests do nothing unless you serialize them"] +pub struct ActionManifest { + pub name: String, +} + +impl ActionManifest { + /// Returns a new [`ActionManifest`]. + /// + /// Used for testing. + #[inline] + pub fn new(name: &str) -> Self { + Self { + name: name.to_owned(), + } + } + + // pub fn index() -> Index {} +} diff --git a/crates/task/datatype/mod.rs b/crates/task/datatype/mod.rs new file mode 100644 index 0000000..c8dc605 --- /dev/null +++ b/crates/task/datatype/mod.rs @@ -0,0 +1,8 @@ +//! TODO. +//! + +pub mod action; +pub mod service; +pub mod trigger; + +// pub struct Graph {} diff --git a/crates/type/manifest/condition.rs b/crates/task/datatype/service.rs similarity index 53% rename from crates/type/manifest/condition.rs rename to crates/task/datatype/service.rs index ad23753..9b9b475 100644 --- a/crates/type/manifest/condition.rs +++ b/crates/task/datatype/service.rs @@ -1,19 +1,20 @@ -//! Associated [`ConditionManifest`] types. -//! - use serde::{Deserialize, Serialize}; -/// Associated trigger metadata. +/// Associated service metadata. #[derive(Debug, Clone, Serialize, Deserialize)] #[must_use = "manifests do nothing unless you serialize them"] -pub struct ConditionManifest {} +pub struct ServiceManifest { + pub name: String, +} -impl ConditionManifest { - /// Returns a new [`ConditionManifest`]. +impl ServiceManifest { + /// Returns a new [`ServiceManifest`]. /// /// Used for testing. #[inline] pub fn new(name: &str) -> Self { - Self {} + Self { + name: name.to_owned(), + } } } diff --git a/crates/task/datatype/trigger.rs b/crates/task/datatype/trigger.rs new file mode 100644 index 0000000..eb319a6 --- /dev/null +++ b/crates/task/datatype/trigger.rs @@ -0,0 +1,35 @@ +//! Condition [`Request`], [`Response`] and [`Manifest`] types. +//! +//! [`Request`]: TriggerRequest +//! [`Response`]: TriggerResponse +//! [`Manifest`]: TriggerManifest + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct TriggerRequest {} + +#[derive(Debug, Serialize, Deserialize)] +pub struct TriggerResponse { + pub should_trigger: bool, + pub ignore_retry_ms: u32, +} + +/// Associated trigger metadata. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[must_use = "manifests do nothing unless you serialize them"] +pub struct TriggerManifest { + pub name: String, +} + +impl TriggerManifest { + /// Returns a new [`TriggerManifest`]. + /// + /// Used for testing. + #[inline] + pub fn new(name: &str) -> Self { + Self { + name: name.to_owned(), + } + } +} diff --git a/crates/task/handler/layer.rs b/crates/task/handler/layer.rs deleted file mode 100644 index d87d49b..0000000 --- a/crates/task/handler/layer.rs +++ /dev/null @@ -1,73 +0,0 @@ -use std::marker::PhantomData; - -use tower::{Layer, Service}; - -use crate::context::{TaskError, TaskRequest, TaskResponse}; -use crate::handler::TaskHandler; - -/// `tower::`[`Layer`] that produces a [`TaskHandler`] services. -pub struct TaskHandlerLayer { - manifest: M, - inner: PhantomData, - request: PhantomData, - response: PhantomData, -} - -impl TaskHandlerLayer { - /// Returns a new [`TaskHandlerLayer`]. - #[inline] - pub fn new(manifest: M) -> Self { - Self { - manifest, - inner: PhantomData, - request: PhantomData, - response: PhantomData, - } - } -} - -impl Default for TaskHandlerLayer -where - M: Default, -{ - #[inline] - fn default() -> Self { - Self { - manifest: M::default(), - inner: PhantomData, - request: PhantomData, - response: PhantomData, - } - } -} - -impl Layer for TaskHandlerLayer -where - M: Clone, - T: 'static, - U: 'static, - S: Service + Clone + Send + 'static, - Req: From> + 'static, - S::Response: Into> + 'static, - S::Error: Into + 'static, - S::Future: Send + 'static, -{ - type Service = TaskHandler; - - #[inline] - fn layer(&self, inner: S) -> Self::Service { - TaskHandler::new(self.manifest.clone(), inner) - } -} - -#[cfg(test)] -mod test { - use crate::handler::TaskHandlerLayer; - use crate::Result; - - #[test] - fn layer() -> Result<()> { - let _ = TaskHandlerLayer::<(), u32, u32>::new(()); - Ok(()) - } -} diff --git a/crates/task/handler/native.rs b/crates/task/handler/native.rs deleted file mode 100644 index e293739..0000000 --- a/crates/task/handler/native.rs +++ /dev/null @@ -1,70 +0,0 @@ -//! TODO. -//! - -use std::fmt; -use std::marker::PhantomData; -use std::task::{Context, Poll}; - -use tower::Service; - -use crate::context::{TaskError, TaskRequest, TaskResponse}; -use crate::handler::future::TaskFuture; - -/// TODO. -#[must_use = "services do nothing unless you `.poll_ready` or `.call` them"] -pub struct NativeTask { - request: PhantomData, - response: PhantomData, -} - -impl NativeTask { - /// Returns a new [`NativeTask`]. - #[inline] - pub fn new() -> Self { - Self { - request: PhantomData, - response: PhantomData, - } - } -} - -impl Clone for NativeTask { - fn clone(&self) -> Self { - Self { - request: PhantomData, - response: PhantomData, - } - } -} - -impl fmt::Debug for NativeTask { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("NativeTask").finish_non_exhaustive() - } -} - -impl Service> for NativeTask { - type Response = TaskResponse; - type Error = TaskError; - type Future = TaskFuture; - - #[inline] - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - todo!() - } - - #[inline] - fn call(&mut self, req: TaskRequest) -> Self::Future { - todo!() - } -} - -#[cfg(test)] -mod test { - use crate::Result; - - #[test] - fn build() -> Result<()> { - Ok(()) - } -} diff --git a/crates/task/lib.rs b/crates/task/lib.rs index 8a26ecb..6fdd15b 100644 --- a/crates/task/lib.rs +++ b/crates/task/lib.rs @@ -3,44 +3,21 @@ #![doc = include_str!("./README.md")] //! ```rust -//! use tower::{service_fn, Service}; -//! use runtime_task::context::{TaskError, TaskRequest, TaskResponse}; -//! use runtime_task::handler::TaskHandler; -//! use runtime_task::routing::{Index, Layers, Router}; -//! -//! async fn handle(request: TaskRequest) -> Result, TaskError> { -//! Ok(TaskResponse::new(request.into_inner())) -//! } -//! -//! let layers = Layers::default(); -//! let mut router = Router::new(layers); -//! -//! let index = Index::new("task01"); -//! let handler = TaskHandler::new((), service_fn(handle)); -//! router.route(index.clone(), handler); -//! -//! let fut = async { -//! let request = TaskRequest::new(index, 0u32); -//! let response = router.call(request).await; -//! }; -//! //! ``` -pub mod context; -pub mod handler; -pub mod routing; +pub mod datatype; +pub mod registry; /// Unrecoverable failure of the [`Router`]. /// /// Includes all error types that may occur. /// -/// [`Router`]: routing::Router +/// [`Router`]: registry::Router #[derive(Debug, thiserror::Error)] #[must_use = "errors do nothing unless you use them"] pub enum Error { - #[error("called task failure: {0}")] - Task(#[from] context::TaskError), // Task not found - // Mismatch arguments + // #[error("called task failure: {0}")] + // Task(#[from] context::TaskError), } /// Specialized [`Result`] alias for the [`Error`] type. diff --git a/crates/task/registry/cache.rs b/crates/task/registry/cache.rs new file mode 100644 index 0000000..d90179c --- /dev/null +++ b/crates/task/registry/cache.rs @@ -0,0 +1,6 @@ +//! TODO. +//! + +pub struct RegistryCache { + pub triggers: (), +} diff --git a/crates/task/registry/handler.rs b/crates/task/registry/handler.rs new file mode 100644 index 0000000..272a471 --- /dev/null +++ b/crates/task/registry/handler.rs @@ -0,0 +1,39 @@ +//! TODO. +//! + +use crate::datatype::action::{ActionManifest, ActionRequest, ActionResponse}; +use crate::datatype::trigger::{TriggerManifest, TriggerRequest, TriggerResponse}; +use tower_path::service::WithData; +use tower_task::handler::TaskHandler; + +/// TODO. +pub type TriggerHandler = WithData, TriggerManifest>; + +/// TODO. +pub type ActionHandler = WithData, ActionManifest>; + +#[cfg(test)] +mod test { + use crate::Result; + use tower::{service_fn, ServiceBuilder}; + use tower_path::service::{WithData, WithDataLayer}; + + async fn handle(request: u32) -> Result { + Ok(request) + } + + #[test] + fn service() -> tower_task::Result<()> { + let inner = service_fn(handle); + let _ = WithData::new(inner, 42u32); + Ok(()) + } + + #[test] + fn layer() -> tower_task::Result<()> { + let _ = ServiceBuilder::new() + .layer(WithDataLayer::new(42u32)) + .service(service_fn(handle)); + Ok(()) + } +} diff --git a/crates/task/registry/index.rs b/crates/task/registry/index.rs new file mode 100644 index 0000000..3f6cbe9 --- /dev/null +++ b/crates/task/registry/index.rs @@ -0,0 +1,39 @@ +//! TODO. +//! + +use std::ops::Deref; +use ecow::EcoString; +use tower_path::routing::index::UniqueIndex; + +/// Opaque and unique entity identifier. +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub struct UnderlyingIndex { + inner: EcoString +} + +impl UnderlyingIndex { + /// Returns a new [`UnderlyingIndex`]. + #[inline] + pub fn new (inner: impl AsRef) -> Self { + let inner = EcoString::from(inner.as_ref()); + Self { inner } + } +} + +impl Deref for UnderlyingIndex { + type Target = str; + + #[inline] + fn deref(&self) -> &Self::Target { + self.inner.as_str() + } +} + +/// TODO. +pub type ServiceIndex = UniqueIndex; + +/// TODO. +pub type TriggerIndex = UniqueIndex; + +/// TODO. +pub type ActionIndex = UniqueIndex; diff --git a/crates/task/registry/mod.rs b/crates/task/registry/mod.rs new file mode 100644 index 0000000..8872f98 --- /dev/null +++ b/crates/task/registry/mod.rs @@ -0,0 +1,55 @@ +//! TODO. +//! + +use std::collections::HashMap; +use std::hash::Hash; +use std::sync::{Arc, Mutex}; + +use tower_path::routing::Router; + +use crate::registry::handler::{ActionHandler, TriggerHandler}; +use crate::registry::index::{ActionIndex, ServiceIndex, TriggerIndex}; +use crate::Result; + +mod handler; +mod index; +mod cache; + +/// TODO. +#[derive(Debug, Default, Clone)] +pub struct Registry { + inner: Arc>, +} + +#[derive(Debug, Default)] +struct RegistryInner { + services: HashMap, + triggers: Router, + actions: Router, +} + +impl Registry { + /// Returns an empty [`Registry`]. + #[inline] + pub fn new() -> Self { + Self::default() + } + + /// TODO. + pub fn register_service(&self) -> Result<()> { + Ok(()) + } + + /// TODO. + pub fn register_trigger(&self) -> Result<()> { + Ok(()) + } + + /// TODO. + pub fn register_action(&self) -> Result<()> { + Ok(()) + } +} + +#[cfg(test)] +mod test {} diff --git a/crates/task/routing/builder.rs b/crates/task/routing/builder.rs deleted file mode 100644 index a01e9c8..0000000 --- a/crates/task/routing/builder.rs +++ /dev/null @@ -1,121 +0,0 @@ -use std::fmt; -use std::marker::PhantomData; - -use tower::{ServiceBuilder, ServiceExt}; - -use crate::context::{TaskRequest, TaskResponse}; -use crate::handler::TaskHandler; -use crate::routing::Layers; -use crate::Result; - -/// Declarative `tower::`[`Layer`] builder. -/// -/// [`Layer`]: tower::Layer -pub struct LayerBuilder { - layers: Option, - manifest: PhantomData, - request: PhantomData, - response: PhantomData, -} - -impl LayerBuilder { - /// Returns a new [`LayerBuilder`] with default [`Layers`]. - #[inline] - pub fn new(layers: Layers) -> Self { - Self { - layers: Some(layers), - manifest: PhantomData, - request: PhantomData, - response: PhantomData, - } - } - - /// Inserts or replaces default [`Layers`]. - #[inline] - pub fn replace_layers(&mut self, layers: Layers) -> Option { - self.layers.replace(layers) - } - - /// Applies default [`Layers`] to the `handler`. - pub fn apply(&self, handler: TaskHandler) -> TaskHandler - where - T: 'static, - U: 'static, - { - let builder = ServiceBuilder::new(); - handler.map(|svc| builder.service(svc)) - } - - /// Merges default [`Layers`] with provided and applies to the `handler`. - pub fn apply_layers( - &self, - handler: TaskHandler, - layers: Layers, - ) -> TaskHandler - where - T: 'static, - U: 'static, - { - let builder = ServiceBuilder::new(); - handler.map(|svc| builder.service(svc)) - } - - /// Applies specified [`Layer`]s to the [`TaskHandler`]. - pub async fn execute( - &self, - handler: TaskHandler, - request: TaskRequest, - ) -> Result> - where - T: 'static, - U: 'static, - { - let handler = self.apply(handler); - let response = handler.oneshot(request).await; - response.map_err(Into::into) - } - - /// Applies specified [`Layer`]s to the [`TaskHandler`]. - pub async fn execute_with_layers( - &self, - handler: TaskHandler, - request: TaskRequest, - layers: Layers, - ) -> Result> - where - T: 'static, - U: 'static, - { - let handler = self.apply_layers(handler, layers); - let response = handler.oneshot(request).await; - response.map_err(Into::into) - } -} - -impl Default for LayerBuilder { - fn default() -> Self { - Self { - layers: None, - manifest: PhantomData, - request: PhantomData, - response: PhantomData, - } - } -} - -impl Clone for LayerBuilder { - fn clone(&self) -> Self { - Self { - layers: self.layers.clone(), - manifest: PhantomData, - request: PhantomData, - response: PhantomData, - } - } -} - -impl fmt::Debug for LayerBuilder { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("LayerBuilder").finish_non_exhaustive() - } -} diff --git a/crates/task/routing/index.rs b/crates/task/routing/index.rs deleted file mode 100644 index 7db80b7..0000000 --- a/crates/task/routing/index.rs +++ /dev/null @@ -1,143 +0,0 @@ -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::marker::PhantomData; -use std::ops::Deref; -use std::str::FromStr; - -use deunicode::deunicode_with_tofu; -use ecow::EcoString; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -use crate::{Error, Result}; - -/// Opaque and unique [`TaskHandler`] identifier. -/// -/// [`TaskHandler`]: crate::handler::TaskHandler -#[must_use = "ids do nothing unless you use them"] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Index { - inner: EcoString, - marker: PhantomData, -} - -impl Index { - /// Returns a new [`Index`]. - #[inline] - pub fn new(inner: &str) -> Self { - Self { - inner: inner.into(), - marker: PhantomData, - } - } - - /// Parses a new [`Index`]. - #[inline] - pub fn parse(index: impl AsRef) -> Result { - index.as_ref().parse::>() - } - - /// Extracts a string slice containing the entire id. - #[inline] - pub fn as_str(&self) -> &str { - self.inner.as_str() - } -} - -impl FromStr for Index { - type Err = Error; - - fn from_str(inner: &str) -> Result { - let tofu = "\u{FFFD}"; - let deunicoded = deunicode_with_tofu(inner, tofu); - if deunicoded.contains(tofu) { - // return Err(Error::InvalidId(deunicoded)); - panic!("invalid id") - } - - Ok(deunicoded.to_lowercase().replace(' ', "-").into()) - } -} - -impl Clone for Index { - #[inline] - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - marker: PhantomData, - } - } -} - -impl From for Index { - #[inline] - fn from(value: String) -> Self { - Self::new(value.as_str()) - } -} - -impl From<&str> for Index { - #[inline] - fn from(value: &str) -> Self { - Self::new(value) - } -} - -impl Hash for Index { - #[inline] - fn hash(&self, state: &mut H) { - self.inner.hash(state) - } -} - -impl PartialEq for Index { - #[inline] - fn eq(&self, other: &Self) -> bool { - self.inner.eq(&other.inner) - } -} - -impl Eq for Index {} - -impl Deref for Index { - type Target = str; - - #[inline] - fn deref(&self) -> &Self::Target { - self.as_str() - } -} - -impl fmt::Display for Index { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self.as_str(), f) - } -} - -impl fmt::Debug for Index { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self.as_str(), f) - } -} - -#[cfg(test)] -mod test { - use crate::routing::Index; - use crate::Result; - - #[test] - pub fn instance() -> Result<()> { - let id = Index::<()>::new("service-entity"); - assert_eq!(id.as_str(), "service-entity"); - Ok(()) - } - - #[test] - pub fn parse() -> Result<()> { - let id = "Service Entity".parse::>()?; - assert_eq!(id.as_str(), "service-entity"); - Ok(()) - } -} diff --git a/crates/task/routing/layers.rs b/crates/task/routing/layers.rs deleted file mode 100644 index 92ca295..0000000 --- a/crates/task/routing/layers.rs +++ /dev/null @@ -1,28 +0,0 @@ -use std::time::Duration; - -/// Applied `tower::`[`Layer`]s configuration. -/// -/// [`Layer`]: tower::Layer -#[derive(Debug, Default, Clone)] -pub struct Layers { - pub timeout_start_to_close: Duration, - pub timeout_before_start: Duration, - pub timeout_before_close: Duration, - - pub limit_concurrency_task: u32, - pub limit_cpu_consumption: u32, - pub limit_ram_consumption: u32, - - pub retry_initial_interval: Duration, - pub retry_maximum_interval: Duration, - pub retry_backoff_coefficient: u32, - pub retry_maximum_attempts: u32, -} - -impl Layers { - /// Returns a new [`Layers`]. - #[inline] - pub fn new() -> Self { - Self::default() - } -} diff --git a/crates/task/routing/mod.rs b/crates/task/routing/mod.rs deleted file mode 100644 index 24e82ff..0000000 --- a/crates/task/routing/mod.rs +++ /dev/null @@ -1,179 +0,0 @@ -//! Service [`Router`] and declarative [`Layers`]. -//! - -use std::collections::HashMap; -use std::fmt; -use std::sync::{Arc, Mutex}; -use std::task::{Context, Poll}; - -use tower::load::Load; -use tower::{Layer, Service, ServiceExt}; - -use crate::context::{TaskError, TaskRequest, TaskResponse}; -use crate::handler::future::TaskFuture; -use crate::handler::metric::TaskMetric; -use crate::handler::TaskHandler; -use crate::routing::builder::LayerBuilder; -pub use crate::routing::index::Index; -pub use crate::routing::layers::Layers; - -mod builder; -mod index; -mod layers; - -/// Collection of all registered [`TaskHandler`]s. -pub struct Router { - inner: Arc>>, -} - -struct RouterInner { - builder: LayerBuilder, - tasks: HashMap>, -} - -impl Router { - /// Returns an empty [`Router`]. - pub fn new(layers: Layers) -> Self { - let inner = Arc::new(Mutex::new(RouterInner { - builder: LayerBuilder::new(layers), - tasks: HashMap::default(), - })); - - Self { inner } - } - - /// Inserts or replaces default [`Layers`]. - #[inline] - pub fn with_layers(&mut self, layers: Layers) { - let mut guard = self.inner.lock().unwrap(); - let _ = guard.builder.replace_layers(layers); - } - - /// Inserts or replaces [`TaskHandler`] at the [`Index`]. - #[inline] - pub fn route(&mut self, index: Index, handler: TaskHandler) { - let mut guard = self.inner.lock().unwrap(); - let _ = guard.tasks.insert(index, handler); - } - - /// Returns a [`TaskHandler`] corresponding to the [`Index`]. - #[inline] - pub fn find(&self, index: &Index) -> Option> - where - M: Clone, - T: 'static, - U: 'static, - { - let guard = self.inner.lock().unwrap(); - let handler = guard.tasks.get(index).cloned(); - handler.map(|handler| guard.builder.apply(handler)) - } - - /// Returns a [`TaskHandler`] with corresponding to the [`Index`]. - #[inline] - pub fn find_layered(&self, index: &Index, layers: Layers) -> Option> - where - M: Clone, - T: 'static, - U: 'static, - { - let guard = self.inner.lock().unwrap(); - let handler = guard.tasks.get(index).cloned(); - handler.map(|handler| guard.builder.apply_layers(handler, layers)) - } - - /// Returns the number of [`TaskHandler`]s in the [`Router`]. - #[inline] - pub fn len(&self) -> usize { - let guard = self.inner.lock().unwrap(); - guard.tasks.len() - } - - /// Returns `true` if the [`Router`] contains no [`TaskHandler`]s. - #[inline] - pub fn is_empty(&self) -> bool { - let guard = self.inner.lock().unwrap(); - guard.tasks.is_empty() - } -} - -impl Default for Router { - fn default() -> Self { - let inner = Arc::new(Mutex::new(RouterInner { - builder: LayerBuilder::default(), - tasks: HashMap::default(), - })); - - Self { inner } - } -} - -impl Clone for Router -where - M: Clone, -{ - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { inner } - } -} - -impl fmt::Debug for Router { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Router").finish_non_exhaustive() - } -} - -impl Service> for Router -where - M: Clone + Send + 'static, - T: Send + 'static, - U: 'static, -{ - type Response = TaskResponse; - type Error = TaskError; - type Future = TaskFuture; - - #[inline] - fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - #[inline] - fn call(&mut self, req: TaskRequest) -> Self::Future { - match self.find(req.index()) { - Some(handler) => { - let fut = async move { handler.oneshot(req).await }; - TaskFuture::new(fut) - } - None => { - // TODO: Use a properly formatted error. - let fut = async { Err(TaskError::new(())) }; - TaskFuture::new(fut) - } - } - } -} - -impl Load for Router { - type Metric = TaskMetric; - - #[inline] - fn load(&self) -> Self::Metric { - // TODO: Call .load() of the underlying service. - TaskMetric::new() - } -} - -#[cfg(test)] -mod test { - use crate::routing::{Layers, Router}; - use crate::Result; - - #[test] - fn build() -> Result<()> { - let layers = Layers::new(); - let _ = Router::<(), i32, i32>::new(layers); - Ok(()) - } -} diff --git a/crates/type/Cargo.toml b/crates/type/Cargo.toml deleted file mode 100644 index bad7cf2..0000000 --- a/crates/type/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -# https://doc.rust-lang.org/cargo/reference/manifest.html - -[package] -name = "runtime-type" -version = { workspace = true } -edition = { workspace = true } -license = { workspace = true } -publish = { workspace = true } -readme = "./README.md" - -authors = { workspace = true } -repository = { workspace = true } -homepage = { workspace = true } -documentation = { workspace = true } - -[lib] -path = "lib.rs" - -[dependencies] -serde = { version = "1.0", features = ["derive"] } diff --git a/crates/type/datatype/condition.rs b/crates/type/datatype/condition.rs deleted file mode 100644 index c36bb1b..0000000 --- a/crates/type/datatype/condition.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! [`ConditionRequestData`] and [`ConditionResponseData`] types. -//! - -use serde::{Deserialize, Serialize}; - -use crate::datatype::{RequestData, ResponseData}; - -#[derive(Debug, Serialize, Deserialize)] -pub struct ConditionRequestData {} - -#[derive(Debug, Serialize, Deserialize)] -pub struct ConditionResponseData {} diff --git a/crates/type/datatype/mod.rs b/crates/type/datatype/mod.rs deleted file mode 100644 index 3700abe..0000000 --- a/crates/type/datatype/mod.rs +++ /dev/null @@ -1,66 +0,0 @@ -//! [`RequestData`] and [`ResponseData`] types. -//! - -use serde::{Deserialize, Serialize}; - -use crate::datatype::condition::{ConditionRequestData, ConditionResponseData}; -use crate::datatype::operation::{OperationRequestData, OperationResponseData}; - -pub mod condition; -pub mod operation; - -#[derive(Debug, Serialize, Deserialize)] -pub enum RequestData { - Condition(ConditionRequestData), - Operation(OperationRequestData), -} - -impl RequestData { - /// Returns a new [`RequestData`]. - #[inline] - pub fn new(data: impl Into) -> Self { - data.into() - } -} - -impl From for RequestData { - #[inline] - fn from(value: ConditionRequestData) -> Self { - Self::Condition(value) - } -} - -impl From for RequestData { - #[inline] - fn from(value: OperationRequestData) -> Self { - Self::Operation(value) - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub enum ResponseData { - Condition(ConditionResponseData), - Operation(OperationResponseData), -} - -impl ResponseData { - /// Returns a new [`ResponseData`]. - #[inline] - pub fn new(data: impl Into) -> Self { - data.into() - } -} - -impl From for ResponseData { - #[inline] - fn from(value: ConditionResponseData) -> Self { - Self::Condition(value) - } -} - -impl From for ResponseData { - #[inline] - fn from(value: OperationResponseData) -> Self { - Self::Operation(value) - } -} diff --git a/crates/type/datatype/operation.rs b/crates/type/datatype/operation.rs deleted file mode 100644 index 3817b40..0000000 --- a/crates/type/datatype/operation.rs +++ /dev/null @@ -1,10 +0,0 @@ -//! [`OperationRequestData`] and [`OperationResponseData`] types. -//! - -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Serialize, Deserialize)] -pub struct OperationRequestData {} - -#[derive(Debug, Serialize, Deserialize)] -pub struct OperationResponseData {} diff --git a/crates/type/lib.rs b/crates/type/lib.rs deleted file mode 100644 index 0148b19..0000000 --- a/crates/type/lib.rs +++ /dev/null @@ -1,7 +0,0 @@ -#![forbid(unsafe_code)] -#![doc = include_str!("./README.md")] - -//! TODO. - -pub mod datatype; -pub mod manifest; diff --git a/crates/type/manifest/mod.rs b/crates/type/manifest/mod.rs deleted file mode 100644 index 48a7cf7..0000000 --- a/crates/type/manifest/mod.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! Associated [`Manifest`] types. -//! - -use serde::{Deserialize, Serialize}; - -use crate::manifest::condition::ConditionManifest; -use crate::manifest::operation::OperationManifest; - -pub mod condition; -pub mod operation; - -/// Associated metadata. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum Manifest { - Condition(ConditionManifest), - Operation(OperationManifest), -} - -impl From for Manifest { - #[inline] - fn from(value: ConditionManifest) -> Self { - Self::Condition(value) - } -} - -impl From for Manifest { - #[inline] - fn from(value: OperationManifest) -> Self { - Self::Operation(value) - } -} diff --git a/crates/type/manifest/operation.rs b/crates/type/manifest/operation.rs deleted file mode 100644 index 62eb0f7..0000000 --- a/crates/type/manifest/operation.rs +++ /dev/null @@ -1,19 +0,0 @@ -//! Associated [`OperationManifest`] types. -//! - -use serde::{Deserialize, Serialize}; - -/// Associated action metadata. -#[derive(Debug, Clone, Serialize, Deserialize)] -#[must_use = "manifests do nothing unless you serialize them"] -pub struct OperationManifest {} - -impl OperationManifest { - /// Returns a new [`OperationManifest`]. - /// - /// Used for testing. - #[inline] - pub fn new(name: &str) -> Self { - Self {} - } -} diff --git a/deno.jsonc b/deno.jsonc new file mode 100644 index 0000000..6493337 --- /dev/null +++ b/deno.jsonc @@ -0,0 +1,25 @@ +{ + // https://docs.deno.com/runtime/manual/ + "compilerOptions": { + "strict": true + }, + "workspace": [ + "./modules/assert", + "./modules/runtime", + "./modules/testing" + ], + "lint": { + "include": ["./crates/", "./examples/", "./modules/"], + "exclude": ["./target/", "./debug/"], + "report": "compact" + }, + "test": { + "include": ["./examples/", "./modules/"], + "exclude": ["./target/", "./debug/"] + }, + "fmt": { + "include": ["./**/*.ts", "./**/*.js", "./**/*.md", "./*.json*"], + "exclude": ["./target/", "./debug/"], + "indentWidth": 4 + } +} diff --git a/examples/direct/deps.ts b/examples/direct/deps.ts new file mode 100644 index 0000000..e69de29 diff --git a/examples/direct/main.ts b/examples/direct/main.ts new file mode 100644 index 0000000..e69de29 diff --git a/examples/hello/deps.ts b/examples/hello/deps.ts new file mode 100644 index 0000000..e69de29 diff --git a/examples/hello/main.ts b/examples/hello/main.ts new file mode 100644 index 0000000..e69de29 diff --git a/modules/assert/README.md b/modules/assert/README.md new file mode 100644 index 0000000..3b49c9d --- /dev/null +++ b/modules/assert/README.md @@ -0,0 +1,14 @@ +### @axiston/assert + +Lorem ipsum. Lorem ipsum. Lorem ipsum. + +#### Features + +- Lorem ipsum. +- Lorem ipsum. +- Lorem ipsum. + +#### Usage + +```typescript +``` diff --git a/modules/assert/deno.jsonc b/modules/assert/deno.jsonc new file mode 100644 index 0000000..8b49956 --- /dev/null +++ b/modules/assert/deno.jsonc @@ -0,0 +1,13 @@ +{ + "name": "@axiston/assert", + "version": "0.1.0", + "exports": { + ".": "./mod.ts", + "./match": "./match.ts" + }, + "imports": { + "@std/assert": "jsr:@std/assert@^1.0.0", + "@std/internal": "jsr:@std/internal@^1.0.1", + "@std/text": "jsr:@std/text@^1.0.0" + } +} diff --git a/modules/assert/match.ts b/modules/assert/match.ts new file mode 100644 index 0000000..e69de29 diff --git a/modules/assert/match_test.ts b/modules/assert/match_test.ts new file mode 100644 index 0000000..e69de29 diff --git a/modules/assert/mod.ts b/modules/assert/mod.ts new file mode 100644 index 0000000..4e4de52 --- /dev/null +++ b/modules/assert/mod.ts @@ -0,0 +1 @@ +export * from "./match.ts"; diff --git a/modules/runtime/README.md b/modules/runtime/README.md new file mode 100644 index 0000000..e0ebc4a --- /dev/null +++ b/modules/runtime/README.md @@ -0,0 +1,14 @@ +### @axiston/runtime + +Lorem ipsum. Lorem ipsum. Lorem ipsum. + +#### Features + +- Lorem ipsum. +- Lorem ipsum. +- Lorem ipsum. + +#### Usage + +```typescript +``` diff --git a/modules/runtime/deno.jsonc b/modules/runtime/deno.jsonc new file mode 100644 index 0000000..112abce --- /dev/null +++ b/modules/runtime/deno.jsonc @@ -0,0 +1,15 @@ +{ + "name": "@axiston/runtime", + "version": "0.1.0", + "exports": { + ".": "./mod.ts", + "./lifecycle": "./lifecycle.ts", + "./request": "./request.ts", + "./response": "./response.ts" + }, + "imports": { + "@std/assert": "jsr:@std/assert@^1.0.0", + "@std/internal": "jsr:@std/internal@^1.0.1", + "@std/text": "jsr:@std/text@^1.0.0" + } +} diff --git a/modules/runtime/mod.ts b/modules/runtime/mod.ts new file mode 100644 index 0000000..ac5e76e --- /dev/null +++ b/modules/runtime/mod.ts @@ -0,0 +1,2 @@ +export * from "./request.ts"; +export * from "./response.ts"; diff --git a/modules/runtime/request.ts b/modules/runtime/request.ts new file mode 100644 index 0000000..e69de29 diff --git a/modules/runtime/request_test.ts b/modules/runtime/request_test.ts new file mode 100644 index 0000000..e69de29 diff --git a/modules/runtime/response.ts b/modules/runtime/response.ts new file mode 100644 index 0000000..e69de29 diff --git a/modules/runtime/response_test.ts b/modules/runtime/response_test.ts new file mode 100644 index 0000000..e69de29 diff --git a/modules/testing/README.md b/modules/testing/README.md new file mode 100644 index 0000000..021b0a3 --- /dev/null +++ b/modules/testing/README.md @@ -0,0 +1,14 @@ +### @axiston/testing + +Lorem ipsum. Lorem ipsum. Lorem ipsum. + +#### Features + +- Lorem ipsum. +- Lorem ipsum. +- Lorem ipsum. + +#### Usage + +```typescript +``` diff --git a/modules/testing/deno.jsonc b/modules/testing/deno.jsonc new file mode 100644 index 0000000..a8aae78 --- /dev/null +++ b/modules/testing/deno.jsonc @@ -0,0 +1,12 @@ +{ + "name": "@axiston/testing", + "version": "0.1.0", + "exports": { + ".": "./mod.ts", + "./setup": "./setup.ts" + }, + "imports": { + "@std/internal": "jsr:@std/internal@^1.0.1", + "@std/text": "jsr:@std/text@^1.0.0" + } +} diff --git a/modules/testing/mod.ts b/modules/testing/mod.ts new file mode 100644 index 0000000..ad29263 --- /dev/null +++ b/modules/testing/mod.ts @@ -0,0 +1 @@ +export * from "./setup.ts"; diff --git a/modules/testing/setup.ts b/modules/testing/setup.ts new file mode 100644 index 0000000..e69de29 diff --git a/modules/testing/setup_test.ts b/modules/testing/setup_test.ts new file mode 100644 index 0000000..e69de29 diff --git a/scripts/tower-path/.gitignore b/scripts/tower-path/.gitignore new file mode 100644 index 0000000..60593be --- /dev/null +++ b/scripts/tower-path/.gitignore @@ -0,0 +1,38 @@ +# OS +Thumbs.db +.DS_Store + +# Editors +.vs/ +.vscode/ +.idea/ +.fleet/ + +# Lang: Rust +debug/ +target/ +Cargo.lock +**/*.rs.bk +*.pdb + +# Output +dist/ +output/ +build/ + +# Binaries +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Environment +env/ +.env +.env* + +# Logs +logs/ +*.log +*.log* diff --git a/scripts/tower-path/Cargo.toml b/scripts/tower-path/Cargo.toml new file mode 100644 index 0000000..af11093 --- /dev/null +++ b/scripts/tower-path/Cargo.toml @@ -0,0 +1,33 @@ +# https://doc.rust-lang.org/cargo/reference/manifest.html + +[package] +name = "tower-path" +version = { workspace = true } +edition = { workspace = true } +license = "MIT" +publish = { workspace = true } +readme = "./README.md" + +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +documentation = { workspace = true } + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lib] +path = "lib.rs" + +[features] +serde = ["dep:serde"] +trace = ["dep:tracing"] + +[dependencies] +tower = { version = "0.5", features = ["load", "util"] } +futures = { version = "0.3", features = [] } +pin-project-lite = { version = "0.2", features = [] } + +serde = { version = "1.0", optional = true, features = ["derive"] } +tracing = { version = "0.1", optional = true, features = [] } diff --git a/scripts/tower-path/LICENSE.txt b/scripts/tower-path/LICENSE.txt new file mode 100644 index 0000000..568f639 --- /dev/null +++ b/scripts/tower-path/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Axiston + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/scripts/tower-path/README.md b/scripts/tower-path/README.md new file mode 100644 index 0000000..cce61ec --- /dev/null +++ b/scripts/tower-path/README.md @@ -0,0 +1,26 @@ +### tower/path + +**Also check out other `axiston` projects [here](https://github.com/axiston).** + +Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. + +### Features + +- Lorem Ipsum. +- Lorem Ipsum. +- Lorem Ipsum. + +### Notes + +- The initial version of this project was developed as part of the + [axiston](https://github.com/axiston) project foundation. The source code is + currently hosted in the [axiston/runtime](https://github.com/axiston/runtime) + repository. +- Lorem Ipsum. +- Lorem Ipsum. + +### Examples + +```rust +fn main() {} +``` diff --git a/scripts/tower-path/handler/layer.rs b/scripts/tower-path/handler/layer.rs new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/scripts/tower-path/handler/layer.rs @@ -0,0 +1 @@ + diff --git a/scripts/tower-path/handler/mod.rs b/scripts/tower-path/handler/mod.rs new file mode 100644 index 0000000..ac3435b --- /dev/null +++ b/scripts/tower-path/handler/mod.rs @@ -0,0 +1,4 @@ +//! TODO. +//! + +mod layer; diff --git a/scripts/tower-path/lib.rs b/scripts/tower-path/lib.rs new file mode 100644 index 0000000..4f51220 --- /dev/null +++ b/scripts/tower-path/lib.rs @@ -0,0 +1,18 @@ +#![forbid(unsafe_code)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![doc = include_str!("./README.md")] + +//! ### Examples +//! +//! ```rust +//! fn main() {} +//! ``` + +pub mod handler; +pub mod routing; +pub mod service; + +// TODO: ServiceBuilderExt for TaskHandler. + +// TODO: Move tower-task into its own repository. +// .github/dependabot.yaml,.github/workflows, rustfmt.toml diff --git a/scripts/tower-path/routing/container.rs b/scripts/tower-path/routing/container.rs new file mode 100644 index 0000000..78b7c3d --- /dev/null +++ b/scripts/tower-path/routing/container.rs @@ -0,0 +1,55 @@ +use std::collections::HashMap; +use std::hash::Hash; + +/// TODO. +pub trait RouterContainer { + /// Inserts an index-handler pair into the [`RouterContainer`]. + fn route(&mut self, index: I, service: S) -> Option; + + /// Removes an index from the [`RouterContainer`]. + fn forget(&mut self, index: I) -> Option; +} + +impl RouterContainer for () { + #[inline] + fn route(&mut self, _index: I, _service: S) -> Option { + None + } + + #[inline] + fn forget(&mut self, _index: I) -> Option { + None + } +} + +impl RouterContainer for HashMap +where + I: Eq + Hash, + S: Clone, +{ + #[inline] + fn route(&mut self, index: I, service: S) -> Option { + self.insert(index, service) + } + + #[inline] + fn forget(&mut self, index: I) -> Option { + self.remove(&index) + } +} + +impl RouterContainer for Vec +where + I: Into, + S: Clone, +{ + #[inline] + fn route(&mut self, index: I, service: S) -> Option { + todo!() + } + + #[inline] + fn forget(&mut self, index: I) -> Option { + todo!() + } +} diff --git a/scripts/tower-path/routing/index.rs b/scripts/tower-path/routing/index.rs new file mode 100644 index 0000000..10e9b8d --- /dev/null +++ b/scripts/tower-path/routing/index.rs @@ -0,0 +1,31 @@ +//! TODO. +//! + +use std::hash::Hash; + +/// TODO. +pub trait RouterIndex {} + +/// TODO. +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub struct SegmentIndex { + inner: T, +} + +impl SegmentIndex {} + +impl RouterIndex for SegmentIndex where T: Eq + Hash {} + +/// TODO. +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub struct UniqueIndex { + // TODO: P = () + inner: T, +} + +impl UniqueIndex {} + +impl RouterIndex for UniqueIndex where T: Eq + Hash {} + +/// The default type for router indices. +pub type DefaultIx = UniqueIndex; diff --git a/scripts/tower-path/routing/mod.rs b/scripts/tower-path/routing/mod.rs new file mode 100644 index 0000000..075fafa --- /dev/null +++ b/scripts/tower-path/routing/mod.rs @@ -0,0 +1,102 @@ +//! TODO. +//! + +use std::collections::HashMap; +use std::fmt; +use std::marker::PhantomData; +use std::task::{Context, Poll}; + +use tower::Service; + +use crate::routing::container::RouterContainer; +use crate::routing::index::{DefaultIx, RouterIndex}; + +pub mod container; +pub mod index; + +/// TODO. +#[derive(Clone)] +#[must_use = "routers do nothing unless you use them"] +pub struct Router> { + index: PhantomData, + service: PhantomData, + inner: Contain, +} + +impl Router { + /// Returns a new [`Router`]. + #[inline] + pub fn new(inner: Contain) -> Self { + Self { + index: PhantomData, + service: PhantomData, + inner, + } + } + + /// Returns the underlying route container. + #[inline] + pub fn into_inner(self) -> Contain { + self.inner + } +} + +impl Router +where + I: RouterIndex, + Contain: RouterContainer, +{ + /// Inserts an index-handler pair into the [`Router`]. + #[inline] + pub fn route(mut self, ix: I, route: S) -> Self { + let _ = self.inner.route(ix, route); + self + } + + /// Removes an index from the [`Router`]. + #[inline] + pub fn forget(mut self, ix: I) -> Self { + let _ = self.inner.forget(ix); + self + } +} + +impl Default for Router +where + Contain: Default, +{ + #[inline] + fn default() -> Self { + + Self::new(Contain::default()) + + } +} + +impl fmt::Debug for Router { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Router").finish_non_exhaustive() + } +} + +impl Service for Router +where + S: Service, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + todo!() + } + + #[inline] + fn call(&mut self, req: Req) -> Self::Future { + todo!() + } +} + +#[cfg(test)] +mod test {} diff --git a/scripts/tower-path/service/layer.rs b/scripts/tower-path/service/layer.rs new file mode 100644 index 0000000..f1010f0 --- /dev/null +++ b/scripts/tower-path/service/layer.rs @@ -0,0 +1,41 @@ +use tower::Layer; + +use crate::service::WithData; + +/// `tower::`[`Layer`] that produces a [`WithData`] services. +#[derive(Debug, Default, Clone)] +pub struct WithDataLayer { + manifest: M, +} + +impl WithDataLayer { + /// Returns a new [`WithDataLayer`]. + #[inline] + pub fn new(manifest: M) -> Self { + Self { manifest } + } +} + +impl Layer for WithDataLayer +where + M: Clone, +{ + type Service = WithData; + + #[inline] + fn layer(&self, inner: S) -> Self::Service { + WithData::new(inner, self.manifest.clone()) + } +} + +#[cfg(test)] +mod test { + use crate::service::WithDataLayer; + use crate::Result; + + #[test] + fn layer() -> Result<()> { + let _ = WithDataLayer::new(42u32); + Ok(()) + } +} diff --git a/scripts/tower-path/service/mod.rs b/scripts/tower-path/service/mod.rs new file mode 100644 index 0000000..aeb6c40 --- /dev/null +++ b/scripts/tower-path/service/mod.rs @@ -0,0 +1,78 @@ +//! [`WithData`] and [`WithDataLayer`]. + +use std::fmt; +use std::ops::{Deref, DerefMut}; +use std::task::{Context, Poll}; + +use tower::Service; + +pub use crate::service::layer::WithDataLayer; +mod layer; + +/// Simple `tower::`[`Service`] for attaching additional metadata. +#[must_use = "services do nothing unless you `.poll_ready` or `.call` them"] +pub struct WithData { + inner: S, + data: M, +} + +impl WithData { + /// Returns a new [`WithData`]. + #[inline] + pub fn new(inner: S, data: M) -> Self { + Self { inner, data } + } + + /// Returns the underlying parts. + #[inline] + pub fn into_inner(self) -> (S, M) { + (self.inner, self.data) + } +} + +impl Deref for WithData { + type Target = M; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.data + } +} + +impl DerefMut for WithData { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.data + } +} + +impl fmt::Debug for WithData +where + M: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("WithData") + .field("data", &self.data) + .finish_non_exhaustive() + } +} + +impl Service for WithData +where + S: Service, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + #[inline] + fn call(&mut self, req: Req) -> Self::Future { + self.inner.call(req) + } +} + diff --git a/scripts/tower-task/.gitignore b/scripts/tower-task/.gitignore new file mode 100644 index 0000000..60593be --- /dev/null +++ b/scripts/tower-task/.gitignore @@ -0,0 +1,38 @@ +# OS +Thumbs.db +.DS_Store + +# Editors +.vs/ +.vscode/ +.idea/ +.fleet/ + +# Lang: Rust +debug/ +target/ +Cargo.lock +**/*.rs.bk +*.pdb + +# Output +dist/ +output/ +build/ + +# Binaries +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Environment +env/ +.env +.env* + +# Logs +logs/ +*.log +*.log* diff --git a/scripts/tower-task/Cargo.toml b/scripts/tower-task/Cargo.toml new file mode 100644 index 0000000..88896e1 --- /dev/null +++ b/scripts/tower-task/Cargo.toml @@ -0,0 +1,34 @@ +# https://doc.rust-lang.org/cargo/reference/manifest.html + +[package] +name = "tower-task" +version = { workspace = true } +edition = { workspace = true } +license = "MIT" +publish = { workspace = true } +readme = "./README.md" + +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +documentation = { workspace = true } + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lib] +path = "lib.rs" + +[features] +serde = ["dep:serde"] +trace = ["dep:tracing"] + +[dependencies] +tower = { version = "0.5", features = ["load", "util"] } +futures = { version = "0.3", features = [] } +pin-project-lite = { version = "0.2", features = [] } + +thiserror = { version = "1.0", features = [] } +serde = { version = "1.0", optional = true, features = ["derive"] } +tracing = { version = "0.1", optional = true, features = [] } diff --git a/scripts/tower-task/LICENSE.txt b/scripts/tower-task/LICENSE.txt new file mode 100644 index 0000000..568f639 --- /dev/null +++ b/scripts/tower-task/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Axiston + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/scripts/tower-task/README.md b/scripts/tower-task/README.md new file mode 100644 index 0000000..1dbf910 --- /dev/null +++ b/scripts/tower-task/README.md @@ -0,0 +1,20 @@ +### tower/task + +**Also check out other `axiston` projects [here](https://github.com/axiston).** + +Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. + +### Features + +- Lorem Ipsum. +- Lorem Ipsum. +- Lorem Ipsum. + +### Notes + +- The initial version of this crate was developed as part of the + [axiston](https://github.com/axiston) project. +- The source code is currently hosted in the + [axiston/runtime](https://github.com/axiston/runtime) repository. +- Lorem Ipsum. +- Lorem Ipsum. diff --git a/scripts/tower-task/compose/builder.rs b/scripts/tower-task/compose/builder.rs new file mode 100644 index 0000000..bb46e6d --- /dev/null +++ b/scripts/tower-task/compose/builder.rs @@ -0,0 +1,34 @@ +use crate::compose::LayersConfig; + +/// [`LayersConfig`] builder. +#[derive(Debug, Default, Clone)] +pub struct LayersBuilder {} + +impl LayersBuilder { + /// Returns a new [`LayersBuilder`]. + #[inline] + pub fn new() -> Self { + Self::default() + } + + /// Returns a new [`LayersConfig`]. + #[inline] + pub fn build(self) -> LayersConfig { + LayersConfig {} + } +} + +#[cfg(test)] +mod test { + use crate::compose::{LayersBuilder, LayersConfig}; + + #[test] + fn from_default() { + let _ = LayersConfig::new(); + } + + #[test] + fn from_builder() { + let _ = LayersBuilder::new(); + } +} diff --git a/scripts/tower-task/compose/layers.rs b/scripts/tower-task/compose/layers.rs new file mode 100644 index 0000000..4d139d1 --- /dev/null +++ b/scripts/tower-task/compose/layers.rs @@ -0,0 +1,25 @@ +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +use crate::compose::LayersBuilder; + +/// [`LayerCompose`] configuration for a single service call. +/// +/// [`LayerCompose`]: crate::service::LayerCompose +#[derive(Debug, Default, Clone, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct LayersConfig {} + +impl LayersConfig { + /// Returns a new [`LayersConfig`]. + #[inline] + pub fn new() -> Self { + Self::new() + } + + /// Returns a new [`LayersBuilder`]. + #[inline] + pub fn builder() -> LayersBuilder { + LayersBuilder::new() + } +} diff --git a/scripts/tower-task/compose/mod.rs b/scripts/tower-task/compose/mod.rs new file mode 100644 index 0000000..b736776 --- /dev/null +++ b/scripts/tower-task/compose/mod.rs @@ -0,0 +1,13 @@ +//! [`LayerCompose`], [`LayersConfig`] and its [`LayersBuilder`]. + +pub use crate::compose::builder::LayersBuilder; +pub use crate::compose::layers::LayersConfig; + +mod builder; +mod layers; + +/// TODO. +#[derive(Debug)] +pub struct LayerCompose {} + +impl LayerCompose {} diff --git a/crates/task/context/failure.rs b/scripts/tower-task/context/failure.rs similarity index 52% rename from crates/task/context/failure.rs rename to scripts/tower-task/context/failure.rs index 94d28bc..c5b0824 100644 --- a/crates/task/context/failure.rs +++ b/scripts/tower-task/context/failure.rs @@ -1,5 +1,3 @@ -use std::error::Error; - /// Unrecoverable failure duration [`TaskHandler`] execution. /// /// [`TaskHandler`]: crate::handler::TaskHandler @@ -7,27 +5,14 @@ use std::error::Error; #[must_use = "errors do nothing unless you use them"] #[error("failure during `TaskHandler` execution")] pub struct TaskError { + // TODO: Implement From>. + // name: String, // message: String, // explain: String, } -impl TaskError { - /// Returns a new [`TaskError`]. - #[inline] - pub fn new(error: T) -> Self { - Self {} - } -} +impl TaskError {} #[cfg(test)] -mod test { - use crate::context::TaskError; - use crate::Result; - - #[test] - fn build() -> Result<()> { - let _ = TaskError::new(()); - Ok(()) - } -} +mod test {} diff --git a/crates/task/context/mod.rs b/scripts/tower-task/context/mod.rs similarity index 100% rename from crates/task/context/mod.rs rename to scripts/tower-task/context/mod.rs diff --git a/crates/task/context/request.rs b/scripts/tower-task/context/request.rs similarity index 70% rename from crates/task/context/request.rs rename to scripts/tower-task/context/request.rs index 406d973..265616e 100644 --- a/crates/task/context/request.rs +++ b/scripts/tower-task/context/request.rs @@ -4,8 +4,6 @@ use std::ops::{Deref, DerefMut}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use crate::routing::Index; - /// Serializable [`TaskHandler`] service request. /// /// [`TaskHandler`]: crate::handler::TaskHandler @@ -13,27 +11,21 @@ use crate::routing::Index; #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[must_use = "requests do nothing unless you serialize them"] pub struct TaskRequest { - index: Index, + // layers: LayersConfig, inner: T, } impl TaskRequest { /// Returns a new [`TaskRequest`]. #[inline] - pub fn new(index: Index, inner: T) -> Self { - Self { index, inner } + pub fn new(inner: T) -> Self { + Self { inner } } /// Returns a new [`TaskRequestBuilder`]. #[inline] - pub fn builder(index: Index, inner: T) -> TaskRequestBuilder { - TaskRequestBuilder::new(index, inner) - } - - /// Returns the reference to the [`Index`]. - #[inline] - pub fn index(&self) -> &Index { - &self.index + pub fn builder(inner: T) -> TaskRequestBuilder { + TaskRequestBuilder::new(inner) } /// Returns the inner data. @@ -71,36 +63,30 @@ impl fmt::Debug for TaskRequest { #[derive(Debug, Clone)] #[must_use = "requests do nothing unless you serialize them"] pub struct TaskRequestBuilder { - index: Index, inner: T, } impl TaskRequestBuilder { /// Returns a new [`TaskRequestBuilder`]. #[inline] - pub fn new(index: Index, inner: T) -> Self { - Self { index, inner } + pub fn new(inner: T) -> Self { + Self { inner } } /// Returns a new [`TaskRequest`]. pub fn build(self) -> TaskRequest { - TaskRequest { - index: self.index, - inner: self.inner, - } + TaskRequest { inner: self.inner } } } #[cfg(test)] mod test { use crate::context::TaskRequest; - use crate::routing::Index; use crate::Result; #[test] fn build() -> Result<()> { - let index = Index::new("request-id"); - let _ = TaskRequest::builder(index, 5).build(); + let _ = TaskRequest::builder(5).build(); Ok(()) } } diff --git a/crates/task/context/response.rs b/scripts/tower-task/context/response.rs similarity index 89% rename from crates/task/context/response.rs rename to scripts/tower-task/context/response.rs index e5df213..aa6a511 100644 --- a/crates/task/context/response.rs +++ b/scripts/tower-task/context/response.rs @@ -77,15 +77,3 @@ impl TaskResponseBuilder { TaskResponse { inner: self.inner } } } - -#[cfg(test)] -mod test { - use crate::context::TaskResponse; - use crate::Result; - - #[test] - fn build() -> Result<()> { - let _ = TaskResponse::builder(()).build(); - Ok(()) - } -} diff --git a/scripts/tower-task/context/state.rs b/scripts/tower-task/context/state.rs new file mode 100644 index 0000000..e37be40 --- /dev/null +++ b/scripts/tower-task/context/state.rs @@ -0,0 +1 @@ +// TODO: Implemented State with anymap3. diff --git a/crates/task/handler/future.rs b/scripts/tower-task/handler/future.rs similarity index 100% rename from crates/task/handler/future.rs rename to scripts/tower-task/handler/future.rs diff --git a/scripts/tower-task/handler/layer.rs b/scripts/tower-task/handler/layer.rs new file mode 100644 index 0000000..1069899 --- /dev/null +++ b/scripts/tower-task/handler/layer.rs @@ -0,0 +1,55 @@ +use std::marker::PhantomData; + +use tower::{Layer, Service}; + +use crate::context::{TaskError, TaskRequest, TaskResponse}; +use crate::handler::TaskHandler; + +/// `tower::`[`Layer`] that produces a [`TaskHandler`] services. +pub struct TaskHandlerLayer { + inner: PhantomData<(Req, T, U)>, +} + +impl TaskHandlerLayer { + /// Returns a new [`TaskHandlerLayer`]. + #[inline] + pub fn new() -> Self { + Self::default() + } +} + +impl Default for TaskHandlerLayer { + fn default() -> Self { + Self { inner: PhantomData } + } +} + +impl Layer for TaskHandlerLayer +where + T: 'static, + U: 'static, + S: Service + Clone + Send + 'static, + Req: From> + 'static, + S::Response: Into> + 'static, + S::Error: Into + 'static, + S::Future: Send + 'static, +{ + type Service = TaskHandler; + + #[inline] + fn layer(&self, inner: S) -> Self::Service { + TaskHandler::new(inner) + } +} + +#[cfg(test)] +mod test { + use crate::handler::TaskHandlerLayer; + use crate::Result; + + #[test] + fn layer() -> Result<()> { + let _ = TaskHandlerLayer::::new(); + Ok(()) + } +} diff --git a/crates/task/handler/metric.rs b/scripts/tower-task/handler/metric.rs similarity index 84% rename from crates/task/handler/metric.rs rename to scripts/tower-task/handler/metric.rs index 1d5417b..45ea1f3 100644 --- a/crates/task/handler/metric.rs +++ b/scripts/tower-task/handler/metric.rs @@ -6,6 +6,10 @@ #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +/// [`Load`] metric types for [`TaskHandler`]s. +/// +/// [`Load`]: tower::load::Load +/// [`TaskHandler`]: crate::handler::TaskHandler #[derive(Debug, Default, Clone, PartialOrd, PartialEq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[must_use = "metrics do nothing unless you serialize them"] diff --git a/crates/task/handler/mod.rs b/scripts/tower-task/handler/mod.rs similarity index 67% rename from crates/task/handler/mod.rs rename to scripts/tower-task/handler/mod.rs index 69bbf1e..6c9f971 100644 --- a/crates/task/handler/mod.rs +++ b/scripts/tower-task/handler/mod.rs @@ -1,4 +1,4 @@ -//! [`TaskHandler`] service, its future and metrics. +//! [`TaskHandler`], [`TaskHandlerLayer`], its future and metrics. use std::fmt; use std::task::{Context, Poll}; @@ -11,12 +11,10 @@ use crate::context::{TaskError, TaskRequest, TaskResponse}; use crate::handler::future::TaskFuture; pub use crate::handler::layer::TaskHandlerLayer; use crate::handler::metric::TaskMetric; -use crate::handler::native::NativeTask; pub mod future; mod layer; pub mod metric; -pub mod native; /// Unified `tower::`[`Service`] for executing [`tasks`]. /// @@ -24,15 +22,14 @@ pub mod native; /// /// [`tasks`]: crate::context #[must_use = "services do nothing unless you `.poll_ready` or `.call` them"] -pub struct TaskHandler { - manifest: M, +pub struct TaskHandler { inner: BoxCloneService, TaskResponse, TaskError>, } -impl TaskHandler { +impl TaskHandler { /// Returns a new [`TaskHandler`]. #[inline] - pub fn new(manifest: M, inner: S) -> Self + pub fn new(inner: S) -> Self where T: 'static, U: 'static, @@ -49,41 +46,18 @@ impl TaskHandler { .service(inner); Self { - manifest, inner: BoxCloneService::new(inner), } } - /// Returns a new [`NativeTask`] wrapped in [`TaskHandler`]. - pub fn native(manifest: M) -> Self - where - T: Send + 'static, - U: Send + 'static, - { - Self::new(manifest, NativeTask::new()) - } - - /// Returns a reference to the manifest data. - #[inline] - pub fn manifest_ref(&self) -> &M { - &self.manifest - } - - /// Returns a mutable reference to the manifest data. - #[inline] - pub fn manifest_mut(&mut self) -> &mut M { - &mut self.manifest - } - - /// Maps an `TaskHandler` to `TaskHandler` by applying a function to a contained service. - pub fn map(self, f: F) -> TaskHandler + /// Maps a `TaskHandler` to `TaskHandler` by applying a function to a contained service. + pub fn map(self, f: F) -> TaskHandler< T2, U2> where F: FnOnce( BoxCloneService, TaskResponse, TaskError>, ) -> BoxCloneService, TaskResponse, TaskError>, { TaskHandler { - manifest: self.manifest, inner: f(self.inner), } } @@ -94,25 +68,21 @@ impl TaskHandler { } } -impl Clone for TaskHandler -where - M: Clone, -{ +impl Clone for TaskHandler { + #[inline] fn clone(&self) -> Self { - Self { - manifest: self.manifest.clone(), - inner: self.inner.clone(), - } + let inner = self.inner.clone(); + Self { inner } } } -impl fmt::Debug for TaskHandler { +impl fmt::Debug for TaskHandler { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TaskHandler").finish_non_exhaustive() } } -impl Service> for TaskHandler { +impl Service> for TaskHandler { type Response = TaskResponse; type Error = TaskError; type Future = TaskFuture; @@ -128,7 +98,7 @@ impl Service> for TaskHandler { } } -impl Load for TaskHandler { +impl Load for TaskHandler { type Metric = TaskMetric; #[inline] @@ -151,14 +121,15 @@ mod test { #[test] fn service() -> Result<()> { - let _ = TaskHandler::new((), service_fn(handle)); + let inner = service_fn(handle); + let _ = TaskHandler::new(inner); Ok(()) } #[test] fn layer() -> Result<()> { let _ = ServiceBuilder::new() - .layer(TaskHandlerLayer::new(())) + .layer(TaskHandlerLayer::new()) .service(service_fn(handle)); Ok(()) } diff --git a/scripts/tower-task/lib.rs b/scripts/tower-task/lib.rs new file mode 100644 index 0000000..6ce9107 --- /dev/null +++ b/scripts/tower-task/lib.rs @@ -0,0 +1,26 @@ +#![forbid(unsafe_code)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![doc = include_str!("./README.md")] + +//! ### Examples +//! +//! ```rust +//! use tower_task::Result; +//! +//! fn main() -> Result<()> { +//! Ok(()) +//! } +//! ``` + +pub mod context; +pub mod handler; +pub mod compose; + +/// Specialized [`Result`] alias for the [`TaskError`] type. +/// +/// [`TaskError`]: crate::context::TaskError +/// [`Result`]: std::result::Result +pub type Result = std::result::Result; + +// TODO: Move tower-task into its own repository. +// .github/dependabot.yaml,.github/workflows, rustfmt.toml From 720aad74ee0392ae60c64fa8b3a63bd07b0108ea Mon Sep 17 00:00:00 2001 From: Oleh Martsokha Date: Tue, 10 Dec 2024 09:49:59 +0100 Subject: [PATCH 03/11] feat(all): impl 1/n --- .github/dependabot.yaml | 14 +- .github/workflows/build.yaml | 47 +++ .gitignore | 10 +- Cargo.toml | 36 +- crates/cli/Cargo.toml | 29 +- crates/cli/README.md | 11 + crates/cli/build.rs | 20 -- .../runtime/machine.rs => cli/config/mod.rs} | 0 crates/cli/main.rs | 9 +- crates/cli/middleware/mod.rs | 1 + crates/cli/middleware/utility.rs | 1 + .../layer.rs => crates/cli/server/mod.rs | 0 crates/cli/service/instance.rs | 0 crates/core/README.md | 9 - crates/jsvm/Cargo.toml | 20 +- crates/jsvm/README.md | 14 + .../jsvm/ext_deno/ext_canvas/init_canvas.js | 0 crates/jsvm/ext_deno/ext_canvas/mod.rs | 2 - .../jsvm/ext_deno/ext_console/init_console.js | 0 crates/jsvm/ext_deno/ext_console/mod.rs | 2 - .../jsvm/ext_deno/ext_crypto/init_crypto.js | 0 crates/jsvm/ext_deno/ext_crypto/mod.rs | 2 - crates/jsvm/ext_deno/ext_fetch/init_fetch.js | 0 crates/jsvm/ext_deno/ext_fetch/mod.rs | 2 - crates/jsvm/ext_deno/ext_fs/init_fs.js | 0 crates/jsvm/ext_deno/ext_fs/mod.rs | 2 - crates/jsvm/ext_deno/ext_io/init_io.js | 0 crates/jsvm/ext_deno/ext_io/mod.rs | 2 - crates/jsvm/ext_deno/ext_net/init_net.js | 0 crates/jsvm/ext_deno/ext_net/mod.rs | 2 - crates/jsvm/ext_deno/ext_url/init_url.js | 0 crates/jsvm/ext_deno/ext_url/mod.rs | 2 - crates/jsvm/ext_deno/ext_web/init_web.js | 0 crates/jsvm/ext_deno/ext_web/mod.rs | 2 - .../jsvm/ext_deno/ext_webgpu/init_webgpu.js | 0 crates/jsvm/ext_deno/ext_webgpu/mod.rs | 2 - .../jsvm/ext_deno/ext_webidl/init_webidl.js | 0 crates/jsvm/ext_deno/ext_webidl/mod.rs | 2 - .../ext_deno/ext_websocket/init_websocket.js | 0 crates/jsvm/ext_deno/ext_websocket/mod.rs | 2 - crates/jsvm/ext_deno/mod.rs | 48 --- crates/jsvm/extension/mod.rs | 3 +- crates/jsvm/extension/permission.rs | 19 - crates/jsvm/extension/route/datatype.rs | 1 + crates/jsvm/extension/route/internal.rs | 1 + crates/jsvm/extension/route/mod.rs | 8 +- crates/jsvm/extension/route/ops.rs | 10 +- crates/jsvm/extension/trace/internal.rs | 1 + crates/jsvm/lib.rs | 1 + crates/jsvm/runtime/cert_provider.rs | 23 ++ crates/jsvm/runtime/deno_runtime.rs | 175 ++++++++++ crates/jsvm/runtime/filesystem/compile_fs.rs | 270 ++++++++++++++ crates/jsvm/runtime/filesystem/mod.rs | 9 + crates/jsvm/runtime/filesystem/static_fs.rs | 240 +++++++++++++ crates/jsvm/runtime/filesystem/virtual_fs.rs | 330 ++++++++++++++++++ crates/jsvm/runtime/mod.rs | 29 +- crates/jsvm/runtime/module_loader.rs | 34 ++ crates/jsvm/runtime/permission.rs | 0 crates/jsvm/runtime/permissions.rs | 135 +++++++ crates/jsvm/runtime/transpile/cache.rs | 11 - crates/jsvm/runtime/transpile/disk_cache.rs | 134 +++++++ crates/jsvm/runtime/transpile/emit.rs | 0 crates/jsvm/runtime/transpile/emit_cache.rs | 104 ++++++ crates/jsvm/runtime/transpile/mod.rs | 15 +- crates/{core => schema}/Cargo.toml | 19 +- crates/schema/README.md | 20 ++ crates/schema/build.rs | 27 ++ crates/schema/lib.rs | 23 ++ .../{cli => schema}/protobuf/instance.proto | 10 +- .../{cli => schema}/protobuf/registry.proto | 0 crates/server/Cargo.toml | 40 +++ crates/server/README.md | 20 ++ crates/{cli => server}/handler/instance.rs | 13 +- crates/{cli => server}/handler/mod.rs | 0 crates/{cli => server}/handler/registry.rs | 10 +- crates/{core => server}/lib.rs | 3 + crates/server/middleware/mod.rs | 1 + .../registry => server/service}/cache.rs | 0 crates/{cli => server}/service/config.rs | 0 crates/server/service/instance.rs | 12 + crates/{cli => server}/service/mod.rs | 0 crates/server/service/registry.rs | 1 + crates/task/Cargo.toml | 21 +- crates/task/README.md | 11 + crates/task/context/failure.rs | 60 ++++ crates/task/context/mod.rs | 29 ++ crates/task/context/request.rs | 163 +++++++++ .../task}/context/response.rs | 74 ++-- crates/task/datatype/action.rs | 34 -- crates/task/datatype/mod.rs | 8 - crates/task/datatype/trigger.rs | 35 -- .../task}/handler/future.rs | 13 +- .../task}/handler/metric.rs | 18 +- .../tower-task => crates/task}/handler/mod.rs | 78 +++-- crates/task/lib.rs | 19 +- crates/task/registry/action.rs | 76 ++++ crates/task/registry/custom_serde.rs | 62 ++++ crates/task/registry/handler.rs | 39 --- crates/task/registry/mod.rs | 47 ++- crates/task/{datatype => registry}/service.rs | 12 +- crates/task/registry/trigger.rs | 88 +++++ crates/task/{registry => routing}/index.rs | 22 +- crates/task/routing/layer_compose.rs | 77 ++++ crates/task/routing/mod.rs | 79 +++++ .../{cli/service => task/routing}/registry.rs | 0 crates/task/routing/route_index.rs | 47 +++ scripts/tower-path/.gitignore | 38 -- scripts/tower-path/Cargo.toml | 33 -- scripts/tower-path/LICENSE.txt | 21 -- scripts/tower-path/README.md | 26 -- scripts/tower-path/handler/mod.rs | 4 - scripts/tower-path/lib.rs | 18 - scripts/tower-path/routing/container.rs | 55 --- scripts/tower-path/routing/index.rs | 31 -- scripts/tower-path/routing/mod.rs | 102 ------ scripts/tower-path/service/layer.rs | 41 --- scripts/tower-path/service/mod.rs | 78 ----- scripts/tower-task/.gitignore | 38 -- scripts/tower-task/Cargo.toml | 34 -- scripts/tower-task/LICENSE.txt | 21 -- scripts/tower-task/README.md | 20 -- scripts/tower-task/compose/builder.rs | 34 -- scripts/tower-task/compose/layers.rs | 25 -- scripts/tower-task/compose/mod.rs | 13 - scripts/tower-task/context/failure.rs | 18 - scripts/tower-task/context/mod.rs | 20 -- scripts/tower-task/context/request.rs | 92 ----- scripts/tower-task/context/state.rs | 1 - scripts/tower-task/handler/layer.rs | 55 --- scripts/tower-task/lib.rs | 26 -- 130 files changed, 2682 insertions(+), 1319 deletions(-) create mode 100644 .github/workflows/build.yaml delete mode 100644 crates/cli/build.rs rename crates/{jsvm/runtime/machine.rs => cli/config/mod.rs} (100%) rename scripts/tower-path/handler/layer.rs => crates/cli/server/mod.rs (100%) delete mode 100644 crates/cli/service/instance.rs delete mode 100644 crates/core/README.md delete mode 100644 crates/jsvm/ext_deno/ext_canvas/init_canvas.js delete mode 100644 crates/jsvm/ext_deno/ext_canvas/mod.rs delete mode 100644 crates/jsvm/ext_deno/ext_console/init_console.js delete mode 100644 crates/jsvm/ext_deno/ext_console/mod.rs delete mode 100644 crates/jsvm/ext_deno/ext_crypto/init_crypto.js delete mode 100644 crates/jsvm/ext_deno/ext_crypto/mod.rs delete mode 100644 crates/jsvm/ext_deno/ext_fetch/init_fetch.js delete mode 100644 crates/jsvm/ext_deno/ext_fetch/mod.rs delete mode 100644 crates/jsvm/ext_deno/ext_fs/init_fs.js delete mode 100644 crates/jsvm/ext_deno/ext_fs/mod.rs delete mode 100644 crates/jsvm/ext_deno/ext_io/init_io.js delete mode 100644 crates/jsvm/ext_deno/ext_io/mod.rs delete mode 100644 crates/jsvm/ext_deno/ext_net/init_net.js delete mode 100644 crates/jsvm/ext_deno/ext_net/mod.rs delete mode 100644 crates/jsvm/ext_deno/ext_url/init_url.js delete mode 100644 crates/jsvm/ext_deno/ext_url/mod.rs delete mode 100644 crates/jsvm/ext_deno/ext_web/init_web.js delete mode 100644 crates/jsvm/ext_deno/ext_web/mod.rs delete mode 100644 crates/jsvm/ext_deno/ext_webgpu/init_webgpu.js delete mode 100644 crates/jsvm/ext_deno/ext_webgpu/mod.rs delete mode 100644 crates/jsvm/ext_deno/ext_webidl/init_webidl.js delete mode 100644 crates/jsvm/ext_deno/ext_webidl/mod.rs delete mode 100644 crates/jsvm/ext_deno/ext_websocket/init_websocket.js delete mode 100644 crates/jsvm/ext_deno/ext_websocket/mod.rs delete mode 100644 crates/jsvm/ext_deno/mod.rs delete mode 100644 crates/jsvm/extension/permission.rs create mode 100644 crates/jsvm/runtime/cert_provider.rs create mode 100644 crates/jsvm/runtime/deno_runtime.rs create mode 100644 crates/jsvm/runtime/filesystem/compile_fs.rs create mode 100644 crates/jsvm/runtime/filesystem/mod.rs create mode 100644 crates/jsvm/runtime/filesystem/static_fs.rs create mode 100644 crates/jsvm/runtime/filesystem/virtual_fs.rs create mode 100644 crates/jsvm/runtime/module_loader.rs delete mode 100644 crates/jsvm/runtime/permission.rs create mode 100644 crates/jsvm/runtime/permissions.rs delete mode 100644 crates/jsvm/runtime/transpile/cache.rs create mode 100644 crates/jsvm/runtime/transpile/disk_cache.rs delete mode 100644 crates/jsvm/runtime/transpile/emit.rs create mode 100644 crates/jsvm/runtime/transpile/emit_cache.rs rename crates/{core => schema}/Cargo.toml (53%) create mode 100644 crates/schema/README.md create mode 100644 crates/schema/build.rs create mode 100644 crates/schema/lib.rs rename crates/{cli => schema}/protobuf/instance.proto (87%) rename crates/{cli => schema}/protobuf/registry.proto (100%) create mode 100644 crates/server/Cargo.toml create mode 100644 crates/server/README.md rename crates/{cli => server}/handler/instance.rs (74%) rename crates/{cli => server}/handler/mod.rs (100%) rename crates/{cli => server}/handler/registry.rs (74%) rename crates/{core => server}/lib.rs (74%) create mode 100644 crates/server/middleware/mod.rs rename crates/{task/registry => server/service}/cache.rs (100%) rename crates/{cli => server}/service/config.rs (100%) create mode 100644 crates/server/service/instance.rs rename crates/{cli => server}/service/mod.rs (100%) create mode 100644 crates/server/service/registry.rs create mode 100644 crates/task/context/failure.rs create mode 100644 crates/task/context/mod.rs create mode 100644 crates/task/context/request.rs rename {scripts/tower-task => crates/task}/context/response.rs (53%) delete mode 100644 crates/task/datatype/action.rs delete mode 100644 crates/task/datatype/mod.rs delete mode 100644 crates/task/datatype/trigger.rs rename {scripts/tower-task => crates/task}/handler/future.rs (77%) rename {scripts/tower-task => crates/task}/handler/metric.rs (55%) rename {scripts/tower-task => crates/task}/handler/mod.rs (64%) create mode 100644 crates/task/registry/action.rs create mode 100644 crates/task/registry/custom_serde.rs delete mode 100644 crates/task/registry/handler.rs rename crates/task/{datatype => registry}/service.rs (66%) create mode 100644 crates/task/registry/trigger.rs rename crates/task/{registry => routing}/index.rs (50%) create mode 100644 crates/task/routing/layer_compose.rs create mode 100644 crates/task/routing/mod.rs rename crates/{cli/service => task/routing}/registry.rs (100%) create mode 100644 crates/task/routing/route_index.rs delete mode 100644 scripts/tower-path/.gitignore delete mode 100644 scripts/tower-path/Cargo.toml delete mode 100644 scripts/tower-path/LICENSE.txt delete mode 100644 scripts/tower-path/README.md delete mode 100644 scripts/tower-path/handler/mod.rs delete mode 100644 scripts/tower-path/lib.rs delete mode 100644 scripts/tower-path/routing/container.rs delete mode 100644 scripts/tower-path/routing/index.rs delete mode 100644 scripts/tower-path/routing/mod.rs delete mode 100644 scripts/tower-path/service/layer.rs delete mode 100644 scripts/tower-path/service/mod.rs delete mode 100644 scripts/tower-task/.gitignore delete mode 100644 scripts/tower-task/Cargo.toml delete mode 100644 scripts/tower-task/LICENSE.txt delete mode 100644 scripts/tower-task/README.md delete mode 100644 scripts/tower-task/compose/builder.rs delete mode 100644 scripts/tower-task/compose/layers.rs delete mode 100644 scripts/tower-task/compose/mod.rs delete mode 100644 scripts/tower-task/context/failure.rs delete mode 100644 scripts/tower-task/context/mod.rs delete mode 100644 scripts/tower-task/context/request.rs delete mode 100644 scripts/tower-task/context/state.rs delete mode 100644 scripts/tower-task/handler/layer.rs delete mode 100644 scripts/tower-task/lib.rs diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index b1e5606..73d670d 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -1,10 +1,10 @@ version: 2 updates: - - package-ecosystem: "cargo" - directory: "/" - schedule: - interval: "weekly" - timezone: "Europe/Warsaw" - day: "friday" - time: "18:00" + - package-ecosystem: "cargo" + directory: "/" + schedule: + interval: "weekly" + timezone: "Europe/Warsaw" + day: "friday" + time: "18:00" diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml new file mode 100644 index 0000000..8cf358c --- /dev/null +++ b/.github/workflows/build.yaml @@ -0,0 +1,47 @@ +name: ci & cd + +on: + push: + branches: + - "main" # Trigger on main branch. + tags: + - "v*.*.*" # Trigger on semantic version tags. + pull_request: # Validation only (without pushing). + +jobs: + build: + runs-on: ubuntu-22.04 + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: stable + + - name: Run Cargo:fmt + run: cargo +nightly fmt --all -- --check + + - name: Run Cargo:clippy + run: cargo clippy --all-features -- -D warnings + + - name: Run Cargo:test + run: cargo test --verbose --all-features + + publish: + runs-on: ubuntu-22.04 + if: github.event_name == 'push' + steps: + - name: Check out + uses: actions/checkout@v3 + + - name: Set up Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: stable + + - name: Publish + run: cargo publish --token ${CRATES_TOKEN} + env: + CRATES_TOKEN: ${{ secrets.CRATES_TOKEN }} diff --git a/.gitignore b/.gitignore index 706bbd8..d0768bf 100644 --- a/.gitignore +++ b/.gitignore @@ -21,13 +21,6 @@ dist/ output/ build/ -# Binaries -*.exe -*.exe~ -*.dll -*.so -*.dylib - # Environment env/ .env @@ -37,3 +30,6 @@ env/ logs/ *.log *.log* + +# Generated +/crates/schema/generated/ diff --git a/Cargo.toml b/Cargo.toml index fe72885..ba73a79 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,11 +4,10 @@ resolver = "2" members = [ "./crates/cli", - "./crates/core", "./crates/jsvm", + "./crates/schema", + "./crates/server", "./crates/task", - "./scripts/tower-path", - "./scripts/tower-task", ] [workspace.package] @@ -23,8 +22,29 @@ homepage = "https://github.com/axiston/runtime" documentation = "https://docs.rs/axiston" [workspace.dependencies] -axiston-runtime-core = { path = "./crates/core", version = "0.1.0" } -axiston-runtime-jsvm = { path = "./crates/jsvm", version = "0.1.0" } -axiston-runtime-task = { path = "./crates/task", version = "0.1.0" } -tower-path = { path = "./scripts/tower-path", version = "0.1.0" } -tower-task = { path = "./scripts/tower-task", version = "0.1.0" } +axiston-rt-jsvm = { path = "./crates/jsvm", version = "0.1.0" } +axiston-rt-schema = { path = "./crates/schema", version = "0.1.0", features = ["server"] } +axiston-rt-server = { path = "./crates/server", version = "0.1.0" } +axiston-rt-task = { path = "./crates/task", version = "0.1.0" } + +tokio = { version = "1.36", features = ["macros", "rt-multi-thread"] } +tokio-stream = { version = "0.1", features = [] } +pin-project-lite = { version = "0.2", features = [] } +futures = { version = "0.3", features = [] } +thiserror = { version = "2.0", features = [] } +anyhow = { version = "1.0", features = ["backtrace"] } + +tracing = { version = "0.1", features = [] } +derive_more = { version = "1.0", features = ["full"] } +serde = { version = "1.0", features = ["derive"] } +ecow = { version = "0.2", features = ["serde"] } + +tonic = { version = "0.12", features = [] } +prost = { version = "0.13", features = [] } +tonic-types = { version = "0.12", features = [] } +prost-types = { version = "0.13", features = [] } +tonic-build = { version = "0.12", features = [] } +prost-build = { version = "0.13", features = [] } + +tower = { version = "0.5", features = ["full"] } +tower-http = { version = "0.6", features = ["full"] } diff --git a/crates/cli/Cargo.toml b/crates/cli/Cargo.toml index 8e68b84..d1fd2be 100644 --- a/crates/cli/Cargo.toml +++ b/crates/cli/Cargo.toml @@ -1,7 +1,7 @@ # https://doc.rust-lang.org/cargo/reference/manifest.html [package] -name = "axiston-runtime-cli" +name = "axiston-rt-cli" version = { workspace = true } edition = { workspace = true } license = { workspace = true } @@ -18,28 +18,19 @@ name = "axiston" path = "main.rs" [dependencies] -axiston-runtime-core = { workspace = true } +axiston-rt-server = { workspace = true } clap = { version = "4.5", features = ["derive"] } -tokio = { version = "1.36", features = ["macros", "rt-multi-thread"] } -tokio-stream = { version = "0.1", features = [] } -futures = { version = "0.3", features = [] } -anyhow = { version = "1.0", features = ["backtrace"] } +tokio = { workspace = true } +tokio-stream = { workspace = true } +futures = { workspace = true } +anyhow = { workspace = true } -tracing = { version = "0.1", features = [] } +tracing = { workspace = true } tracing-subscriber = { version = "0.3", features = ["env-filter", "time"] } -tracing-opentelemetry = { version = "0.26.0", features = [] } -opentelemetry = { version = "0.25.0", features = [] } - -tonic = { version = "0.12", features = [] } -prost = { version = "0.13", features = [] } -tonic-types = { version = "0.12", features = [] } -prost-types = { version = "0.13", features = [] } +tracing-opentelemetry = { version = "0.26", features = [] } +opentelemetry = { version = "0.25", features = [] } +tonic = { workspace = true } tower = { version = "0.4", features = ["full"] } tower-http = { version = "0.5", features = ["full"] } - -[build-dependencies] -anyhow = { version = "1.0", features = ["backtrace"] } -tonic-build = { version = "0.12", features = [] } -prost-build = { version = "0.13", features = [] } diff --git a/crates/cli/README.md b/crates/cli/README.md index 444657f..fac5fbb 100644 --- a/crates/cli/README.md +++ b/crates/cli/README.md @@ -1,5 +1,16 @@ ### runtime/cli +[![Build Status][action-badge]][action-url] +[![Crate Docs][docs-badge]][docs-url] +[![Crate Version][crates-badge]][crates-url] + +[action-badge]: https://img.shields.io/github/actions/workflow/status/axiston/runtime/build.yaml?branch=main&label=build&logo=github&style=flat-square +[action-url]: https://github.com/axiston/runtime/actions/workflows/build.yaml +[crates-badge]: https://img.shields.io/crates/v/axiston-rt-cli.svg?logo=rust&style=flat-square +[crates-url]: https://crates.io/crates/axiston-rt-cli +[docs-badge]: https://img.shields.io/docsrs/axiston-rt-cli?logo=Docs.rs&style=flat-square +[docs-url]: http://docs.rs/axiston-rt-cli + Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. #### Notes diff --git a/crates/cli/build.rs b/crates/cli/build.rs deleted file mode 100644 index 93d6e34..0000000 --- a/crates/cli/build.rs +++ /dev/null @@ -1,20 +0,0 @@ -#![forbid(unsafe_code)] - -use std::path::PathBuf; - -fn main() -> anyhow::Result<()> { - let builder = tonic_build::configure() - .build_transport(true) - .build_server(true) - .build_client(false); - - let dir = PathBuf::from("./protobuf/"); - let instance = dir.join("./instance.proto"); - let registry = dir.join("./registry.proto"); - - let protos = [instance.as_path(), registry.as_path()]; - let includes = [dir.as_path()]; - builder.compile(&protos, &includes)?; - - Ok(()) -} diff --git a/crates/jsvm/runtime/machine.rs b/crates/cli/config/mod.rs similarity index 100% rename from crates/jsvm/runtime/machine.rs rename to crates/cli/config/mod.rs diff --git a/crates/cli/main.rs b/crates/cli/main.rs index 7128be0..f2dd0de 100644 --- a/crates/cli/main.rs +++ b/crates/cli/main.rs @@ -2,15 +2,14 @@ use std::net::{Ipv4Addr, SocketAddr}; +use axiston_rt_server::handler::{InstanceService, RegistryService}; +use axiston_rt_server::service::{AppConfig, AppState}; use clap::Parser; use tonic::transport::Server; -use crate::handler::{InstanceService, RegistryService}; -use crate::service::{AppConfig, AppState}; - -mod handler; +mod config; mod middleware; -mod service; +mod server; /// Command-line arguments. #[derive(Debug, Parser)] diff --git a/crates/cli/middleware/mod.rs b/crates/cli/middleware/mod.rs index a05b5b4..c665ba1 100644 --- a/crates/cli/middleware/mod.rs +++ b/crates/cli/middleware/mod.rs @@ -2,6 +2,7 @@ //! use tower::ServiceBuilder; + pub use crate::middleware::observability::initialize_tracing; mod observability; mod utility; diff --git a/crates/cli/middleware/utility.rs b/crates/cli/middleware/utility.rs index e69de29..8b13789 100644 --- a/crates/cli/middleware/utility.rs +++ b/crates/cli/middleware/utility.rs @@ -0,0 +1 @@ + diff --git a/scripts/tower-path/handler/layer.rs b/crates/cli/server/mod.rs similarity index 100% rename from scripts/tower-path/handler/layer.rs rename to crates/cli/server/mod.rs diff --git a/crates/cli/service/instance.rs b/crates/cli/service/instance.rs deleted file mode 100644 index e69de29..0000000 diff --git a/crates/core/README.md b/crates/core/README.md deleted file mode 100644 index a99bbd1..0000000 --- a/crates/core/README.md +++ /dev/null @@ -1,9 +0,0 @@ -### runtime/core - -Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. - -#### Notes - -- Lorem Ipsum. -- Lorem Ipsum. -- Lorem Ipsum. diff --git a/crates/jsvm/Cargo.toml b/crates/jsvm/Cargo.toml index 0fcbbe9..e417db5 100644 --- a/crates/jsvm/Cargo.toml +++ b/crates/jsvm/Cargo.toml @@ -1,7 +1,7 @@ # https://doc.rust-lang.org/cargo/reference/manifest.html [package] -name = "axiston-runtime-jsvm" +name = "axiston-rt-jsvm" version = { workspace = true } edition = { workspace = true } license = { workspace = true } @@ -21,16 +21,21 @@ rustdoc-args = ["--cfg", "docsrs"] path = "lib.rs" [dependencies] -axiston-runtime-task = { workspace = true } +axiston-rt-task = { workspace = true } tokio = { version = "1.36", features = [] } -serde = { version = "1.0", features = ["derive"] } -thiserror = { version = "1.0", features = [] } tracing = { version = "0.1", features = [] } +async-trait = { version = "0.1", features = [] } +ctor = { version = "0.2", features = [] } + +thiserror = { version = "1.0", features = [] } +serde = { version = "1.0", features = ["derive"] } +rand = { version = "0.8", features = [] } -deno_core = { version = "0.308.0", features = [] } -deno_ast = { version = "0.42.0", features = [] } +deno_core = { version = "0.307.0", features = [] } +deno_ast = { version = "0.42.2", features = [] } deno_permissions = { version = "0.28.0", features = [] } +deno_cache_dir = { version = "0.10.3", features = [] } deno_console = { version = "0.168.0", features = [] } deno_crypto = { version = "0.182.0", features = [] } @@ -38,13 +43,12 @@ deno_webidl = { version = "0.168.0", features = [] } deno_url = { version = "0.168.0", features = [] } deno_fs = { version = "0.78.0", features = ["sync_fs"] } -deno_http = { version = "0.166.0", features = [] } deno_io = { version = "0.78.0", features = [] } - deno_fetch = { version = "0.192.0", features = [] } deno_net = { version = "0.160.0", features = [] } deno_web = { version = "0.199.0", features = [] } +deno_http = { version = "0.166.0", features = [] } deno_tls = { version = "0.155.0", features = [] } deno_websocket = { version = "0.173.0", features = [] } deno_webstorage = { version = "0.163.0", features = [] } diff --git a/crates/jsvm/README.md b/crates/jsvm/README.md index a621b1e..6c867e7 100644 --- a/crates/jsvm/README.md +++ b/crates/jsvm/README.md @@ -1,7 +1,21 @@ ### runtime/jsvm +[![Build Status][action-badge]][action-url] +[![Crate Docs][docs-badge]][docs-url] +[![Crate Version][crates-badge]][crates-url] + +[action-badge]: https://img.shields.io/github/actions/workflow/status/axiston/runtime/build.yaml?branch=main&label=build&logo=github&style=flat-square +[action-url]: https://github.com/axiston/runtime/actions/workflows/build.yaml +[crates-badge]: https://img.shields.io/crates/v/axiston-rt-jsvm.svg?logo=rust&style=flat-square +[crates-url]: https://crates.io/crates/axiston-rt-jsvm +[docs-badge]: https://img.shields.io/docsrs/axiston-rt-jsvm?logo=Docs.rs&style=flat-square +[docs-url]: http://docs.rs/axiston-rt-jsvm + Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. +Implementation is based on supabase/edge-runtime and deno/deno crates. Mention +rustyscript. + #### Notes - Lorem Ipsum. diff --git a/crates/jsvm/ext_deno/ext_canvas/init_canvas.js b/crates/jsvm/ext_deno/ext_canvas/init_canvas.js deleted file mode 100644 index e69de29..0000000 diff --git a/crates/jsvm/ext_deno/ext_canvas/mod.rs b/crates/jsvm/ext_deno/ext_canvas/mod.rs deleted file mode 100644 index 8e7716c..0000000 --- a/crates/jsvm/ext_deno/ext_canvas/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! TODO. -//! diff --git a/crates/jsvm/ext_deno/ext_console/init_console.js b/crates/jsvm/ext_deno/ext_console/init_console.js deleted file mode 100644 index e69de29..0000000 diff --git a/crates/jsvm/ext_deno/ext_console/mod.rs b/crates/jsvm/ext_deno/ext_console/mod.rs deleted file mode 100644 index 8e7716c..0000000 --- a/crates/jsvm/ext_deno/ext_console/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! TODO. -//! diff --git a/crates/jsvm/ext_deno/ext_crypto/init_crypto.js b/crates/jsvm/ext_deno/ext_crypto/init_crypto.js deleted file mode 100644 index e69de29..0000000 diff --git a/crates/jsvm/ext_deno/ext_crypto/mod.rs b/crates/jsvm/ext_deno/ext_crypto/mod.rs deleted file mode 100644 index 8e7716c..0000000 --- a/crates/jsvm/ext_deno/ext_crypto/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! TODO. -//! diff --git a/crates/jsvm/ext_deno/ext_fetch/init_fetch.js b/crates/jsvm/ext_deno/ext_fetch/init_fetch.js deleted file mode 100644 index e69de29..0000000 diff --git a/crates/jsvm/ext_deno/ext_fetch/mod.rs b/crates/jsvm/ext_deno/ext_fetch/mod.rs deleted file mode 100644 index 8e7716c..0000000 --- a/crates/jsvm/ext_deno/ext_fetch/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! TODO. -//! diff --git a/crates/jsvm/ext_deno/ext_fs/init_fs.js b/crates/jsvm/ext_deno/ext_fs/init_fs.js deleted file mode 100644 index e69de29..0000000 diff --git a/crates/jsvm/ext_deno/ext_fs/mod.rs b/crates/jsvm/ext_deno/ext_fs/mod.rs deleted file mode 100644 index 8e7716c..0000000 --- a/crates/jsvm/ext_deno/ext_fs/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! TODO. -//! diff --git a/crates/jsvm/ext_deno/ext_io/init_io.js b/crates/jsvm/ext_deno/ext_io/init_io.js deleted file mode 100644 index e69de29..0000000 diff --git a/crates/jsvm/ext_deno/ext_io/mod.rs b/crates/jsvm/ext_deno/ext_io/mod.rs deleted file mode 100644 index 8e7716c..0000000 --- a/crates/jsvm/ext_deno/ext_io/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! TODO. -//! diff --git a/crates/jsvm/ext_deno/ext_net/init_net.js b/crates/jsvm/ext_deno/ext_net/init_net.js deleted file mode 100644 index e69de29..0000000 diff --git a/crates/jsvm/ext_deno/ext_net/mod.rs b/crates/jsvm/ext_deno/ext_net/mod.rs deleted file mode 100644 index 8e7716c..0000000 --- a/crates/jsvm/ext_deno/ext_net/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! TODO. -//! diff --git a/crates/jsvm/ext_deno/ext_url/init_url.js b/crates/jsvm/ext_deno/ext_url/init_url.js deleted file mode 100644 index e69de29..0000000 diff --git a/crates/jsvm/ext_deno/ext_url/mod.rs b/crates/jsvm/ext_deno/ext_url/mod.rs deleted file mode 100644 index 8e7716c..0000000 --- a/crates/jsvm/ext_deno/ext_url/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! TODO. -//! diff --git a/crates/jsvm/ext_deno/ext_web/init_web.js b/crates/jsvm/ext_deno/ext_web/init_web.js deleted file mode 100644 index e69de29..0000000 diff --git a/crates/jsvm/ext_deno/ext_web/mod.rs b/crates/jsvm/ext_deno/ext_web/mod.rs deleted file mode 100644 index 8e7716c..0000000 --- a/crates/jsvm/ext_deno/ext_web/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! TODO. -//! diff --git a/crates/jsvm/ext_deno/ext_webgpu/init_webgpu.js b/crates/jsvm/ext_deno/ext_webgpu/init_webgpu.js deleted file mode 100644 index e69de29..0000000 diff --git a/crates/jsvm/ext_deno/ext_webgpu/mod.rs b/crates/jsvm/ext_deno/ext_webgpu/mod.rs deleted file mode 100644 index 8e7716c..0000000 --- a/crates/jsvm/ext_deno/ext_webgpu/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! TODO. -//! diff --git a/crates/jsvm/ext_deno/ext_webidl/init_webidl.js b/crates/jsvm/ext_deno/ext_webidl/init_webidl.js deleted file mode 100644 index e69de29..0000000 diff --git a/crates/jsvm/ext_deno/ext_webidl/mod.rs b/crates/jsvm/ext_deno/ext_webidl/mod.rs deleted file mode 100644 index 8e7716c..0000000 --- a/crates/jsvm/ext_deno/ext_webidl/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! TODO. -//! diff --git a/crates/jsvm/ext_deno/ext_websocket/init_websocket.js b/crates/jsvm/ext_deno/ext_websocket/init_websocket.js deleted file mode 100644 index e69de29..0000000 diff --git a/crates/jsvm/ext_deno/ext_websocket/mod.rs b/crates/jsvm/ext_deno/ext_websocket/mod.rs deleted file mode 100644 index 8e7716c..0000000 --- a/crates/jsvm/ext_deno/ext_websocket/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! TODO. -//! diff --git a/crates/jsvm/ext_deno/mod.rs b/crates/jsvm/ext_deno/mod.rs deleted file mode 100644 index 9248393..0000000 --- a/crates/jsvm/ext_deno/mod.rs +++ /dev/null @@ -1,48 +0,0 @@ -//! `deno_core::`[`extension`]s bundled with `Deno`. -//! - -mod ext_canvas; -mod ext_console; -mod ext_crypto; -mod ext_fetch; -mod ext_websocket; -mod ext_fs; -mod ext_url; -mod ext_webgpu; -mod ext_net; -mod ext_io; -mod ext_web; -mod ext_webidl; -mod permission; - - -// extension!( -// axiston_permission, -// options = { allow_net_access: bool, filter_net_access: Vec }, -// state = |state, options| { -// state.put::(MyPermission::new( -// options.allow_net_access, options.filter_net_access -// ) ); -// } -// ); -// -// extension!( -// axiston_init_fetch, -// deps = [rustyscript], -// esm_entry_point = "ext:ext_boot/init_fetch.js", -// esm = [ dir "ext_boot", "init_fetch.js" ], -// ); -// -// extension!( -// axiston_init_net, -// deps = [rustyscript], -// esm_entry_point = "ext:ext_boot/init_net.js", -// esm = [ dir "ext_boot", "init_net.js" ], -// ); -// -// extension!( -// axiston_init_web, -// deps = [rustyscript], -// esm_entry_point = "ext:ext_boot/init_web.js", -// esm = [ dir "ext_boot", "init_web.js" ], -// ); diff --git a/crates/jsvm/extension/mod.rs b/crates/jsvm/extension/mod.rs index 9defb80..9633c9c 100644 --- a/crates/jsvm/extension/mod.rs +++ b/crates/jsvm/extension/mod.rs @@ -1,9 +1,8 @@ //! Runtime `deno_core::`[`extension`]s. //! -pub use crate::extension::trace::axis_tracing; pub use crate::extension::route::axis_routing; +pub use crate::extension::trace::axis_tracing; mod route; mod trace; -mod permission; diff --git a/crates/jsvm/extension/permission.rs b/crates/jsvm/extension/permission.rs deleted file mode 100644 index fbef69c..0000000 --- a/crates/jsvm/extension/permission.rs +++ /dev/null @@ -1,19 +0,0 @@ -use deno_permissions::NetDescriptor; - -/// TODO. -#[derive(Debug, Default, Clone)] -pub struct MyPermission { - allow_net: bool, - filter_net: Vec, -} - -impl MyPermission { - /// Returns a new [`MyPermission`]. - #[inline] - pub fn new(allow_net: bool, filter_net: Vec) -> Self { - Self { - allow_net, - filter_net, - } - } -} diff --git a/crates/jsvm/extension/route/datatype.rs b/crates/jsvm/extension/route/datatype.rs index e69de29..8b13789 100644 --- a/crates/jsvm/extension/route/datatype.rs +++ b/crates/jsvm/extension/route/datatype.rs @@ -0,0 +1 @@ + diff --git a/crates/jsvm/extension/route/internal.rs b/crates/jsvm/extension/route/internal.rs index e69de29..8b13789 100644 --- a/crates/jsvm/extension/route/internal.rs +++ b/crates/jsvm/extension/route/internal.rs @@ -0,0 +1 @@ + diff --git a/crates/jsvm/extension/route/mod.rs b/crates/jsvm/extension/route/mod.rs index fe3dd42..bf6bbc8 100644 --- a/crates/jsvm/extension/route/mod.rs +++ b/crates/jsvm/extension/route/mod.rs @@ -7,9 +7,8 @@ mod internal; mod ops; use deno_core::extension; -use crate::extension::route::ops::{ - op_register_service, op_register_trigger, op_register_action -}; + +use crate::extension::route::ops::{op_register_action, op_register_service, op_register_trigger}; extension!( axis_routing, @@ -23,8 +22,7 @@ extension!( /// Includes all error types that may occur. #[derive(Debug, thiserror::Error)] #[must_use = "errors do nothing unless you use them"] -pub enum Error { -} +pub enum Error {} /// Specialized [`Result`] alias for [`Error`]. /// diff --git a/crates/jsvm/extension/route/ops.rs b/crates/jsvm/extension/route/ops.rs index b40f870..225b3d9 100644 --- a/crates/jsvm/extension/route/ops.rs +++ b/crates/jsvm/extension/route/ops.rs @@ -1,20 +1,18 @@ use deno_core::op2; + use crate::extension::route::Result; #[op2(fast)] -pub fn op_register_service( -) -> Result<()> { +pub fn op_register_service() -> Result<()> { Ok(()) } #[op2(fast)] -pub fn op_register_trigger( -) -> Result<()> { +pub fn op_register_trigger() -> Result<()> { Ok(()) } #[op2(fast)] -pub fn op_register_action( -) -> Result<()> { +pub fn op_register_action() -> Result<()> { Ok(()) } diff --git a/crates/jsvm/extension/trace/internal.rs b/crates/jsvm/extension/trace/internal.rs index f807193..ef9c6fc 100644 --- a/crates/jsvm/extension/trace/internal.rs +++ b/crates/jsvm/extension/trace/internal.rs @@ -1,4 +1,5 @@ use tracing::{debug, error, info, trace, warn, Level}; + use crate::extension::trace::datatype::TracingOptions; use crate::extension::trace::Result; diff --git a/crates/jsvm/lib.rs b/crates/jsvm/lib.rs index c514f13..07548ab 100644 --- a/crates/jsvm/lib.rs +++ b/crates/jsvm/lib.rs @@ -9,6 +9,7 @@ //! ``` mod extension; +mod runtime; /// Unrecoverable failure of the [`Jsvm`]. /// diff --git a/crates/jsvm/runtime/cert_provider.rs b/crates/jsvm/runtime/cert_provider.rs new file mode 100644 index 0000000..c152e40 --- /dev/null +++ b/crates/jsvm/runtime/cert_provider.rs @@ -0,0 +1,23 @@ +use deno_core::error::AnyError; +use deno_tls::rustls::RootCertStore; +use deno_tls::RootCertStoreProvider; + +#[derive(Debug, Clone)] +pub struct MyRootCertStoreProvider { + root_cert_store: RootCertStore, +} + +impl MyRootCertStoreProvider { + /// Returns a new [`MyRootCertStoreProvider`]. + #[inline] + pub fn new(root_cert_store: RootCertStore) -> Self { + Self { root_cert_store } + } +} + +impl RootCertStoreProvider for MyRootCertStoreProvider { + #[inline] + fn get_or_try_init(&self) -> Result<&RootCertStore, AnyError> { + Ok(&self.root_cert_store) + } +} diff --git a/crates/jsvm/runtime/deno_runtime.rs b/crates/jsvm/runtime/deno_runtime.rs new file mode 100644 index 0000000..33b8a03 --- /dev/null +++ b/crates/jsvm/runtime/deno_runtime.rs @@ -0,0 +1,175 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +// Copyright 2023-2024 the Supabase authors. All rights reserved. MIT license. + +use std::rc::Rc; +use std::sync::{Arc, LazyLock}; + +use deno_core::anyhow::{self, anyhow}; +use deno_core::{JsRuntime, ModuleCodeString, ModuleLoader, RuntimeOptions}; +use deno_http::DefaultHttpPropertyExtractor; +use deno_io::Stdio; +use deno_tls::rustls::RootCertStore; +use deno_tls::RootCertStoreProvider; + +use crate::runtime::cert_provider::MyRootCertStoreProvider; +use crate::runtime::filesystem::{CompileFs, FileBackedVfs, StaticFs}; +use crate::runtime::module_loader::MyModuleLoader; +use crate::runtime::{axis_permissions, MyPermission}; + +/// Header value of the `User-Agent` key. +static AXISTON_UA: LazyLock = LazyLock::new(|| { + format!( + "Deno/{} (Variant; Axiston/{})", + option_env!("CARGO_PKG_VERSION").unwrap_or("1.0"), + env!("CARGO_PKG_VERSION"), + ) +}); + +#[ctor::ctor] +fn init_v8_platform() { + // Initialize V8 flags. + let v8_flags = std::env::var("V8_FLAGS").unwrap_or_default(); + + if !v8_flags.is_empty() { + let flags: Vec<_> = v8_flags.split(' ').map(|x| x.to_owned()).collect(); + let flags = deno_core::v8_set_flags(flags).iter(); + for flag in flags.filter(|flag| flag.is_empty()) { + // TODO: Setup console tracing_subscriber. + tracing::error!(target: "v8:init", flag = flag, "flag unrecognized"); + } + } + + // NOTE(denoland/deno/20495): Due to new PKU feature introduced in V8 11.6 we need + // to initialize the V8 platform on a parent thread of all threads that will spawn + // V8 isolates. + JsRuntime::init_platform(None, false); +} + +pub struct DenoRuntime { + js_runtime: JsRuntime, +} + +impl DenoRuntime { + async fn new(maybe_seed: Option) -> Self { + let mut root_cert_store = RootCertStore::empty(); + let root_cert_store_provider: Arc = + Arc::new(MyRootCertStoreProvider::new(root_cert_store.clone())); + let mut stdio = Some(Stdio::default()); + + // let op_fs = { + // if is_user_worker { + // Arc::new(StaticFs::new( + // static_files, + // base_dir_path, + // vfs_path, + // vfs, + // npm_snapshot, + // )) as Arc + // } else { + // Arc::new(CompileFs::from_rc(vfs)) as Arc + // } + // }; + + let file_backed_vfs = FileBackedVfs::new(); + let op_fs = Arc::new(CompileFs::new(file_backed_vfs)) as Arc; + + let extensions = vec![ + axis_permissions::init_ops(true, None), + deno_webidl::deno_webidl::init_ops(), + deno_console::deno_console::init_ops(), + deno_url::deno_url::init_ops(), + deno_web::deno_web::init_ops::( + Arc::new(deno_web::BlobStore::default()), + None, + ), + deno_webgpu::deno_webgpu::init_ops(), + deno_canvas::deno_canvas::init_ops(), + deno_fetch::deno_fetch::init_ops::(deno_fetch::Options { + user_agent: AXISTON_UA.clone(), + root_cert_store_provider: Some(root_cert_store_provider.clone()), + ..Default::default() + }), + deno_websocket::deno_websocket::init_ops::( + AXISTON_UA.clone(), + Some(root_cert_store_provider.clone()), + None, + ), + deno_crypto::deno_crypto::init_ops(maybe_seed), + deno_net::deno_net::init_ops::(Some(root_cert_store_provider), None), + deno_tls::deno_tls::init_ops(), + deno_http::deno_http::init_ops::(), + deno_io::deno_io::init_ops(stdio), + deno_fs::deno_fs::init_ops::(op_fs), + // deno_node::init_ops::(Some(node_resolver), Some(npm_resolver), op_fs), + ]; + + let module_loader = Rc::new(MyModuleLoader::new()) as Rc; + + let runtime_options = RuntimeOptions { + extensions, + is_main: true, + module_loader: Some(module_loader), + ..RuntimeOptions::default() + }; + + let mut js_runtime = JsRuntime::new(runtime_options); + + let bootstrap_script = ""; + let bootstrap_module = ModuleCodeString::from(bootstrap_script); + + js_runtime + .execute_script(deno_core::located_script_name!(), bootstrap_module) + .expect("Failed to execute bootstrap script"); + + Self { js_runtime } + } + + pub async fn run(&mut self) -> anyhow::Result<()> { + todo!() + } + + pub async fn inspector(&self) { + todo!() + } + + /// Returns a new [`DenoRuntimeBuilder`]. + #[inline] + pub fn builder() -> DenoRuntimeBuilder { + DenoRuntimeBuilder::new() + } +} + +/// [`DenoRuntime`] builder. +#[must_use = "runtime does nothing unless you use itt"] +#[derive(Debug, Default)] +pub struct DenoRuntimeBuilder { + maybe_seed: Option, +} + +impl DenoRuntimeBuilder { + /// Returns a new [`DenoRuntimeBuilder`]. + #[inline] + pub fn new() -> Self { + Self::default() + } + + /// Adds or overrides the initial seed for the crypto extension. + pub fn with_crypto_seed(mut self, seed: u64) -> Self { + self.maybe_seed = Some(seed); + self + } + + pub fn build(self) -> DenoRuntime { + let extensions = vec![]; + + let options = RuntimeOptions { + extensions, + ..RuntimeOptions::default() + }; + + todo!() + } +} + +#[cfg(test)] +mod test {} diff --git a/crates/jsvm/runtime/filesystem/compile_fs.rs b/crates/jsvm/runtime/filesystem/compile_fs.rs new file mode 100644 index 0000000..2abd698 --- /dev/null +++ b/crates/jsvm/runtime/filesystem/compile_fs.rs @@ -0,0 +1,270 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +// Copyright 2023-2024 the Supabase authors. All rights reserved. MIT license. + +use std::path::{Path, PathBuf}; +use std::rc::Rc; +use std::sync::Arc; + +use deno_fs::{AccessCheckCb, FileSystem, FsDirEntry, FsFileType, OpenOptions, RealFs}; +use deno_io::fs::{File, FsError, FsResult, FsStat}; + +use crate::runtime::filesystem::FileBackedVfs; + +#[derive(Debug, Clone)] +pub struct CompileFs { + vfs: Arc, +} + +impl CompileFs { + /// Returns a new [`CompileFs`]. + #[inline] + pub fn new(vfs: FileBackedVfs) -> Self { + Self::from_rc(Arc::new(vfs)) + } + + /// Returns a new [`CompileFs`]. + #[inline] + pub fn from_rc(vfs: Arc) -> Self { + Self { vfs } + } + + #[inline] + pub fn file_backed_vfs(&self) -> Arc { + self.vfs.clone() + } + + fn error_if_in_vfs(&self, path: &Path) -> FsResult<()> { + if self.vfs.is_path_within(path) { + Err(FsError::NotSupported) + } else { + Ok(()) + } + } +} + +#[async_trait::async_trait(?Send)] +impl FileSystem for CompileFs { + fn cwd(&self) -> FsResult { + todo!() + } + + fn tmp_dir(&self) -> FsResult { + todo!() + } + + fn chdir(&self, path: &Path) -> FsResult<()> { + todo!() + } + + fn umask(&self, mask: Option) -> FsResult { + todo!() + } + + fn open_sync( + &self, + path: &Path, + options: OpenOptions, + access_check: Option, + ) -> FsResult> { + todo!() + } + + async fn open_async<'a>( + &'a self, + path: PathBuf, + options: OpenOptions, + access_check: Option>, + ) -> FsResult> { + todo!() + } + + fn mkdir_sync(&self, path: &Path, recursive: bool, mode: u32) -> FsResult<()> { + todo!() + } + + async fn mkdir_async(&self, path: PathBuf, recursive: bool, mode: u32) -> FsResult<()> { + todo!() + } + + fn chmod_sync(&self, path: &Path, mode: u32) -> FsResult<()> { + todo!() + } + + async fn chmod_async(&self, path: PathBuf, mode: u32) -> FsResult<()> { + todo!() + } + + fn chown_sync(&self, path: &Path, uid: Option, gid: Option) -> FsResult<()> { + todo!() + } + + async fn chown_async(&self, path: PathBuf, uid: Option, gid: Option) -> FsResult<()> { + todo!() + } + + fn lchown_sync(&self, path: &Path, uid: Option, gid: Option) -> FsResult<()> { + todo!() + } + + async fn lchown_async( + &self, + path: PathBuf, + uid: Option, + gid: Option, + ) -> FsResult<()> { + todo!() + } + + fn remove_sync(&self, path: &Path, recursive: bool) -> FsResult<()> { + todo!() + } + + async fn remove_async(&self, path: PathBuf, recursive: bool) -> FsResult<()> { + todo!() + } + + fn copy_file_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> { + todo!() + } + + async fn copy_file_async(&self, oldpath: PathBuf, newpath: PathBuf) -> FsResult<()> { + todo!() + } + + fn cp_sync(&self, path: &Path, new_path: &Path) -> FsResult<()> { + todo!() + } + + async fn cp_async(&self, path: PathBuf, new_path: PathBuf) -> FsResult<()> { + todo!() + } + + fn stat_sync(&self, path: &Path) -> FsResult { + todo!() + } + + async fn stat_async(&self, path: PathBuf) -> FsResult { + todo!() + } + + fn lstat_sync(&self, path: &Path) -> FsResult { + todo!() + } + + async fn lstat_async(&self, path: PathBuf) -> FsResult { + todo!() + } + + fn realpath_sync(&self, path: &Path) -> FsResult { + todo!() + } + + async fn realpath_async(&self, path: PathBuf) -> FsResult { + todo!() + } + + fn read_dir_sync(&self, path: &Path) -> FsResult> { + todo!() + } + + async fn read_dir_async(&self, path: PathBuf) -> FsResult> { + todo!() + } + + fn rename_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> { + todo!() + } + + async fn rename_async(&self, oldpath: PathBuf, newpath: PathBuf) -> FsResult<()> { + todo!() + } + + fn link_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> { + todo!() + } + + async fn link_async(&self, oldpath: PathBuf, newpath: PathBuf) -> FsResult<()> { + todo!() + } + + fn symlink_sync( + &self, + oldpath: &Path, + newpath: &Path, + file_type: Option, + ) -> FsResult<()> { + todo!() + } + + async fn symlink_async( + &self, + old_path: PathBuf, + new_path: PathBuf, + file_type: Option, + ) -> FsResult<()> { + todo!() + } + + fn read_link_sync(&self, path: &Path) -> FsResult { + todo!() + } + + async fn read_link_async(&self, path: PathBuf) -> FsResult { + todo!() + } + + fn truncate_sync(&self, path: &Path, len: u64) -> FsResult<()> { + todo!() + } + + async fn truncate_async(&self, path: PathBuf, len: u64) -> FsResult<()> { + todo!() + } + + fn utime_sync( + &self, + path: &Path, + atime_secs: i64, + atime_nanos: u32, + mtime_secs: i64, + mtime_nanos: u32, + ) -> FsResult<()> { + todo!() + } + + async fn utime_async( + &self, + path: PathBuf, + atime_secs: i64, + atime_nanos: u32, + mtime_secs: i64, + mtime_nanos: u32, + ) -> FsResult<()> { + todo!() + } + + fn lutime_sync( + &self, + path: &Path, + atime_secs: i64, + atime_nanos: u32, + mtime_secs: i64, + mtime_nanos: u32, + ) -> FsResult<()> { + todo!() + } + + async fn lutime_async( + &self, + path: PathBuf, + atime_secs: i64, + atime_nanos: u32, + mtime_secs: i64, + mtime_nanos: u32, + ) -> FsResult<()> { + self.error_if_in_vfs(&path)?; + RealFs + .lutime_async(path, atime_secs, atime_nanos, mtime_secs, mtime_nanos) + .await + } +} diff --git a/crates/jsvm/runtime/filesystem/mod.rs b/crates/jsvm/runtime/filesystem/mod.rs new file mode 100644 index 0000000..e0af754 --- /dev/null +++ b/crates/jsvm/runtime/filesystem/mod.rs @@ -0,0 +1,9 @@ +//! TODO. + +pub use crate::runtime::filesystem::compile_fs::CompileFs; +pub use crate::runtime::filesystem::static_fs::StaticFs; +pub use crate::runtime::filesystem::virtual_fs::{FileBackedVfs, FileBackedVfsFile}; + +mod compile_fs; +mod static_fs; +mod virtual_fs; diff --git a/crates/jsvm/runtime/filesystem/static_fs.rs b/crates/jsvm/runtime/filesystem/static_fs.rs new file mode 100644 index 0000000..aa51f31 --- /dev/null +++ b/crates/jsvm/runtime/filesystem/static_fs.rs @@ -0,0 +1,240 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +// Copyright 2023-2024 the Supabase authors. All rights reserved. MIT license. + +use std::path::{Path, PathBuf}; +use std::rc::Rc; +use std::sync::Arc; + +use deno_fs::{AccessCheckCb, FileSystem, FsDirEntry, FsFileType, OpenOptions}; +use deno_io::fs::{File, FsResult, FsStat}; + +use crate::runtime::filesystem::FileBackedVfs; + +#[derive(Debug, Clone)] +pub struct StaticFs { + inner: Arc, +} + +#[async_trait::async_trait(?Send)] +impl FileSystem for StaticFs { + fn cwd(&self) -> FsResult { + todo!() + } + + fn tmp_dir(&self) -> FsResult { + todo!() + } + + fn chdir(&self, path: &Path) -> FsResult<()> { + todo!() + } + + fn umask(&self, mask: Option) -> FsResult { + todo!() + } + + fn open_sync( + &self, + path: &Path, + options: OpenOptions, + access_check: Option, + ) -> FsResult> { + todo!() + } + + async fn open_async<'a>( + &'a self, + path: PathBuf, + options: OpenOptions, + access_check: Option>, + ) -> FsResult> { + todo!() + } + + fn mkdir_sync(&self, path: &Path, recursive: bool, mode: u32) -> FsResult<()> { + todo!() + } + + async fn mkdir_async(&self, path: PathBuf, recursive: bool, mode: u32) -> FsResult<()> { + todo!() + } + + fn chmod_sync(&self, path: &Path, mode: u32) -> FsResult<()> { + todo!() + } + + async fn chmod_async(&self, path: PathBuf, mode: u32) -> FsResult<()> { + todo!() + } + + fn chown_sync(&self, path: &Path, uid: Option, gid: Option) -> FsResult<()> { + todo!() + } + + async fn chown_async(&self, path: PathBuf, uid: Option, gid: Option) -> FsResult<()> { + todo!() + } + + fn lchown_sync(&self, path: &Path, uid: Option, gid: Option) -> FsResult<()> { + todo!() + } + + async fn lchown_async( + &self, + path: PathBuf, + uid: Option, + gid: Option, + ) -> FsResult<()> { + todo!() + } + + fn remove_sync(&self, path: &Path, recursive: bool) -> FsResult<()> { + todo!() + } + + async fn remove_async(&self, path: PathBuf, recursive: bool) -> FsResult<()> { + todo!() + } + + fn copy_file_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> { + todo!() + } + + async fn copy_file_async(&self, oldpath: PathBuf, newpath: PathBuf) -> FsResult<()> { + todo!() + } + + fn cp_sync(&self, path: &Path, new_path: &Path) -> FsResult<()> { + todo!() + } + + async fn cp_async(&self, path: PathBuf, new_path: PathBuf) -> FsResult<()> { + todo!() + } + + fn stat_sync(&self, path: &Path) -> FsResult { + todo!() + } + + async fn stat_async(&self, path: PathBuf) -> FsResult { + todo!() + } + + fn lstat_sync(&self, path: &Path) -> FsResult { + todo!() + } + + async fn lstat_async(&self, path: PathBuf) -> FsResult { + todo!() + } + + fn realpath_sync(&self, path: &Path) -> FsResult { + todo!() + } + + async fn realpath_async(&self, path: PathBuf) -> FsResult { + todo!() + } + + fn read_dir_sync(&self, path: &Path) -> FsResult> { + todo!() + } + + async fn read_dir_async(&self, path: PathBuf) -> FsResult> { + todo!() + } + + fn rename_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> { + todo!() + } + + async fn rename_async(&self, oldpath: PathBuf, newpath: PathBuf) -> FsResult<()> { + todo!() + } + + fn link_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> { + todo!() + } + + async fn link_async(&self, oldpath: PathBuf, newpath: PathBuf) -> FsResult<()> { + todo!() + } + + fn symlink_sync( + &self, + oldpath: &Path, + newpath: &Path, + file_type: Option, + ) -> FsResult<()> { + todo!() + } + + async fn symlink_async( + &self, + oldpath: PathBuf, + newpath: PathBuf, + file_type: Option, + ) -> FsResult<()> { + todo!() + } + + fn read_link_sync(&self, path: &Path) -> FsResult { + todo!() + } + + async fn read_link_async(&self, path: PathBuf) -> FsResult { + todo!() + } + + fn truncate_sync(&self, path: &Path, len: u64) -> FsResult<()> { + todo!() + } + + async fn truncate_async(&self, path: PathBuf, len: u64) -> FsResult<()> { + todo!() + } + + fn utime_sync( + &self, + path: &Path, + atime_secs: i64, + atime_nanos: u32, + mtime_secs: i64, + mtime_nanos: u32, + ) -> FsResult<()> { + todo!() + } + + async fn utime_async( + &self, + path: PathBuf, + atime_secs: i64, + atime_nanos: u32, + mtime_secs: i64, + mtime_nanos: u32, + ) -> FsResult<()> { + todo!() + } + + fn lutime_sync( + &self, + path: &Path, + atime_secs: i64, + atime_nanos: u32, + mtime_secs: i64, + mtime_nanos: u32, + ) -> FsResult<()> { + todo!() + } + + async fn lutime_async( + &self, + path: PathBuf, + atime_secs: i64, + atime_nanos: u32, + mtime_secs: i64, + mtime_nanos: u32, + ) -> FsResult<()> { + todo!() + } +} diff --git a/crates/jsvm/runtime/filesystem/virtual_fs.rs b/crates/jsvm/runtime/filesystem/virtual_fs.rs new file mode 100644 index 0000000..820cf28 --- /dev/null +++ b/crates/jsvm/runtime/filesystem/virtual_fs.rs @@ -0,0 +1,330 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +// Copyright 2023-2024 the Supabase authors. All rights reserved. MIT license. + +use std::io::SeekFrom; +use std::path::{Path, PathBuf}; +use std::process::Stdio; +use std::rc::Rc; + +use deno_core::{BufMutView, BufView, ResourceHandleFd, WriteOutcome}; +use deno_io::fs::{File, FsError, FsResult, FsStat}; +use serde::{Deserialize, Serialize}; + +#[derive(Debug)] +pub struct FileBackedVfsRoot { + pub directory: VirtualDirectory, + pub root_path: PathBuf, +} + +#[derive(Debug)] +pub struct FileBackedVfs { + fs_root: FileBackedVfsRoot, +} + +impl FileBackedVfs { + /// Returns a new [`FileBackedVfs`]. + pub fn new() -> Self { + todo!() + } + + #[inline] + pub fn is_path_within(&self, path: &Path) -> bool { + path.starts_with(&self.fs_root.root_path) + } +} + +#[derive(Debug, Clone)] +pub struct FileBackedVfsFile {} + +impl FileBackedVfsFile {} + +#[async_trait::async_trait(?Send)] +impl File for FileBackedVfsFile { + fn read_sync(self: Rc, buf: &mut [u8]) -> FsResult { + Err(FsError::NotSupported) + } + + async fn read_byob(self: Rc, buf: BufMutView) -> FsResult<(usize, BufMutView)> { + Err(FsError::NotSupported) + } + + fn write_sync(self: Rc, buf: &[u8]) -> FsResult { + Err(FsError::NotSupported) + } + + async fn write(self: Rc, buf: BufView) -> FsResult { + Err(FsError::NotSupported) + } + + fn write_all_sync(self: Rc, buf: &[u8]) -> FsResult<()> { + Err(FsError::NotSupported) + } + + async fn write_all(self: Rc, buf: BufView) -> FsResult<()> { + Err(FsError::NotSupported) + } + + fn read_all_sync(self: Rc) -> FsResult> { + Err(FsError::NotSupported) + } + + async fn read_all_async(self: Rc) -> FsResult> { + Err(FsError::NotSupported) + } + + fn chmod_sync(self: Rc, path_mode: u32) -> FsResult<()> { + Err(FsError::NotSupported) + } + + async fn chmod_async(self: Rc, mode: u32) -> FsResult<()> { + Err(FsError::NotSupported) + } + + fn seek_sync(self: Rc, pos: SeekFrom) -> FsResult { + Err(FsError::NotSupported) + } + + async fn seek_async(self: Rc, pos: SeekFrom) -> FsResult { + Err(FsError::NotSupported) + } + + fn datasync_sync(self: Rc) -> FsResult<()> { + Err(FsError::NotSupported) + } + + async fn datasync_async(self: Rc) -> FsResult<()> { + Err(FsError::NotSupported) + } + + fn sync_sync(self: Rc) -> FsResult<()> { + Err(FsError::NotSupported) + } + + async fn sync_async(self: Rc) -> FsResult<()> { + Err(FsError::NotSupported) + } + + fn stat_sync(self: Rc) -> FsResult { + Err(FsError::NotSupported) + } + + async fn stat_async(self: Rc) -> FsResult { + Err(FsError::NotSupported) + } + + fn lock_sync(self: Rc, exclusive: bool) -> FsResult<()> { + Err(FsError::NotSupported) + } + + async fn lock_async(self: Rc, exclusive: bool) -> FsResult<()> { + Err(FsError::NotSupported) + } + + fn unlock_sync(self: Rc) -> FsResult<()> { + Err(FsError::NotSupported) + } + + async fn unlock_async(self: Rc) -> FsResult<()> { + Err(FsError::NotSupported) + } + + fn truncate_sync(self: Rc, len: u64) -> FsResult<()> { + Err(FsError::NotSupported) + } + + async fn truncate_async(self: Rc, len: u64) -> FsResult<()> { + Err(FsError::NotSupported) + } + + fn utime_sync( + self: Rc, + atime_secs: i64, + atime_nanos: u32, + mtime_secs: i64, + mtime_nanos: u32, + ) -> FsResult<()> { + Err(FsError::NotSupported) + } + + async fn utime_async( + self: Rc, + atime_secs: i64, + atime_nanos: u32, + mtime_secs: i64, + mtime_nanos: u32, + ) -> FsResult<()> { + Err(FsError::NotSupported) + } + + #[inline] + fn as_stdio(self: Rc) -> FsResult { + Err(FsError::NotSupported) + } + + #[inline] + fn backing_fd(self: Rc) -> Option { + None + } + + #[inline] + fn try_clone_inner(self: Rc) -> FsResult> { + Ok(self) + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub enum VfsEntry { + Directory(VirtualDirectory), + File(VirtualFile), + Symlink(VirtualSymlink), +} + +#[derive(Debug)] +enum VfsEntryRef<'a> { + Directory(&'a VirtualDirectory), + File(&'a VirtualFile), + Symlink(&'a VirtualSymlink), +} + +impl<'a> VfsEntryRef<'a> { + /// Returns a new [`FsStat`]. + pub fn as_fs_stat(&self) -> FsStat { + match self { + VfsEntryRef::Directory(x) => x.as_fs_stat(), + VfsEntryRef::File(x) => x.as_fs_stat(), + VfsEntryRef::Symlink(x) => x.as_fs_stat(), + } + } +} + +impl VfsEntry { + /// Returns a reference to the entries name. + pub fn name(&self) -> &str { + match self { + VfsEntry::Directory(dir) => &dir.name, + VfsEntry::File(file) => &file.name, + VfsEntry::Symlink(sm) => &sm.name, + } + } + + /// Returns a new [`VfsEntryRef`] from this entry. + pub fn as_entry_ref(&self) -> VfsEntryRef<'_> { + match self { + VfsEntry::Directory(dir) => VfsEntryRef::Directory(dir), + VfsEntry::File(file) => VfsEntryRef::File(file), + VfsEntry::Symlink(sm) => VfsEntryRef::Symlink(sm), + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct VirtualDirectory { + pub name: String, + // Should be sorted by name. + pub entries: Vec, +} + +impl VirtualDirectory { + /// Returns a new [`FsStat`]. + pub fn as_fs_stat(&self) -> FsStat { + FsStat { + is_file: false, + is_directory: true, + is_symlink: false, + size: 0, + mtime: None, + atime: None, + birthtime: None, + dev: 0, + ino: 0, + mode: 0, + nlink: 0, + uid: 0, + gid: 0, + rdev: 0, + blksize: 0, + blocks: 0, + is_block_device: false, + is_char_device: false, + is_fifo: false, + is_socket: false, + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct VirtualFile { + pub name: String, + pub offset: u64, + pub len: u64, +} + +impl VirtualFile { + /// Returns a new [`FsStat`]. + pub fn as_fs_stat(&self) -> FsStat { + FsStat { + is_file: true, + is_directory: false, + is_symlink: false, + size: 0, + mtime: None, + atime: None, + birthtime: None, + dev: 0, + ino: 0, + mode: 0, + nlink: 0, + uid: 0, + gid: 0, + rdev: 0, + blksize: 0, + blocks: 0, + is_block_device: false, + is_char_device: false, + is_fifo: false, + is_socket: false, + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct VirtualSymlink { + pub name: String, + pub dest_parts: Vec, +} + +impl VirtualSymlink { + /// Returns a new [`FsStat`]. + pub fn as_fs_stat(&self) -> FsStat { + FsStat { + is_file: false, + is_directory: false, + is_symlink: true, + size: 0, + mtime: None, + atime: None, + birthtime: None, + dev: 0, + ino: 0, + mode: 0, + nlink: 0, + uid: 0, + gid: 0, + rdev: 0, + blksize: 0, + blocks: 0, + is_block_device: false, + is_char_device: false, + is_fifo: false, + is_socket: false, + } + } + + pub fn resolve_dest_from_root(&self, root: &Path) -> PathBuf { + let mut dest = root.to_path_buf(); + for part in &self.dest_parts { + dest.push(part); + } + dest + } +} diff --git a/crates/jsvm/runtime/mod.rs b/crates/jsvm/runtime/mod.rs index 12a43ef..57aa338 100644 --- a/crates/jsvm/runtime/mod.rs +++ b/crates/jsvm/runtime/mod.rs @@ -1,38 +1,33 @@ //! TODO. //! -mod machine; -mod permission; +mod cert_provider; +mod deno_runtime; +mod filesystem; +mod module_loader; +mod permissions; +mod transpile; use std::fmt; use std::rc::Rc; -use deno_core::{JsRuntime, RuntimeOptions}; use tokio::runtime::Runtime as TokioRuntime; +use crate::runtime::deno_runtime::DenoRuntime; +pub use crate::runtime::permissions::{axis_permissions, MyPermission}; + pub struct Jsvm { - inner: JsRuntime, + deno_runtime: DenoRuntime, + tokio_runtime: TokioRuntime, } impl Jsvm { /// Returns a new [`Jsvm`]. pub fn new(tokio_runtime: Rc) -> Self { - JsRuntime::init_platform(None, true); - - let options = RuntimeOptions { - extensions: vec![], - module_loader: None, - extension_transpiler: None, - ..RuntimeOptions::default() - }; - - let inner = JsRuntime::new(options); - todo!() } - // run - // inspect + // TODO: load modules } impl fmt::Debug for Jsvm { diff --git a/crates/jsvm/runtime/module_loader.rs b/crates/jsvm/runtime/module_loader.rs new file mode 100644 index 0000000..2a848df --- /dev/null +++ b/crates/jsvm/runtime/module_loader.rs @@ -0,0 +1,34 @@ +use deno_core::anyhow::Error; +use deno_core::{ + ModuleLoadResponse, ModuleLoader, ModuleSpecifier, RequestedModuleType, ResolutionKind, +}; + +pub struct MyModuleLoader {} + +impl MyModuleLoader { + /// Returns a new [`MyModuleLoader`]. + pub fn new() -> Self { + Self {} + } +} + +impl ModuleLoader for MyModuleLoader { + fn resolve( + &self, + specifier: &str, + referrer: &str, + kind: ResolutionKind, + ) -> Result { + todo!() + } + + fn load( + &self, + module_specifier: &ModuleSpecifier, + maybe_referrer: Option<&ModuleSpecifier>, + is_dyn_import: bool, + requested_module_type: RequestedModuleType, + ) -> ModuleLoadResponse { + todo!() + } +} diff --git a/crates/jsvm/runtime/permission.rs b/crates/jsvm/runtime/permission.rs deleted file mode 100644 index e69de29..0000000 diff --git a/crates/jsvm/runtime/permissions.rs b/crates/jsvm/runtime/permissions.rs new file mode 100644 index 0000000..29f7f0e --- /dev/null +++ b/crates/jsvm/runtime/permissions.rs @@ -0,0 +1,135 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +// Copyright 2023-2024 the Supabase authors. All rights reserved. MIT license. + +use std::borrow::Cow; +use std::path::Path; + +use deno_core::error::AnyError; +use deno_core::url::Url; +use deno_fetch::FetchPermissions; +use deno_fs::FsPermissions; +use deno_io::fs::FsError; +use deno_net::NetPermissions; +use deno_permissions::NetDescriptor; +use deno_web::TimersPermission; +use deno_websocket::WebSocketPermissions; + +/// TODO. +#[derive(Debug, Default, Clone)] +pub struct MyPermission { + allow_net: bool, + filter_net: Option>, +} + +deno_core::extension!( + axis_permissions, + options = { allow_net: bool, filter_net: Option> }, + state = |state, options| { + state.put::( + MyPermission::new(options.allow_net, options.filter_net) + ); + } +); + +impl MyPermission { + /// Returns a new [`MyPermission`]. + #[inline] + pub fn new(allow_net: bool, filter_net: Option>) -> Self { + Self { + allow_net, + filter_net, + } + } +} + +impl TimersPermission for MyPermission { + #[inline] + fn allow_hrtime(&mut self) -> bool { + false + } +} + +impl FetchPermissions for MyPermission { + fn check_net_url(&mut self, _url: &Url, api_name: &str) -> Result<(), AnyError> { + Ok(()) + } + + fn check_read(&mut self, _p: &Path, api_name: &str) -> Result<(), AnyError> { + Ok(()) + } +} + +impl NetPermissions for MyPermission { + fn check_net>( + &mut self, + _host: &(T, Option), + _api_name: &str, + ) -> Result<(), AnyError> { + Ok(()) + } + + fn check_read(&mut self, _p: &Path, _api_name: &str) -> Result<(), AnyError> { + Ok(()) + } + + fn check_write(&mut self, _p: &Path, _api_name: &str) -> Result<(), AnyError> { + Ok(()) + } +} + +impl WebSocketPermissions for MyPermission { + fn check_net_url(&mut self, _url: &Url, _api_name: &str) -> Result<(), AnyError> { + Ok(()) + } +} + +impl FsPermissions for MyPermission { + fn check_open<'a>( + &mut self, + resolved: bool, + read: bool, + write: bool, + path: &'a Path, + api_name: &str, + ) -> Result, FsError> { + Ok(Cow::Borrowed(path)) + } + + fn check_read(&mut self, path: &Path, api_name: &str) -> Result<(), AnyError> { + Ok(()) + } + + fn check_read_all(&mut self, api_name: &str) -> Result<(), AnyError> { + Ok(()) + } + + fn check_read_blind( + &mut self, + p: &Path, + display: &str, + api_name: &str, + ) -> Result<(), AnyError> { + Ok(()) + } + + fn check_write(&mut self, path: &Path, api_name: &str) -> Result<(), AnyError> { + Ok(()) + } + + fn check_write_partial(&mut self, path: &Path, api_name: &str) -> Result<(), AnyError> { + Ok(()) + } + + fn check_write_all(&mut self, api_name: &str) -> Result<(), AnyError> { + Ok(()) + } + + fn check_write_blind( + &mut self, + p: &Path, + display: &str, + api_name: &str, + ) -> Result<(), AnyError> { + Ok(()) + } +} diff --git a/crates/jsvm/runtime/transpile/cache.rs b/crates/jsvm/runtime/transpile/cache.rs deleted file mode 100644 index e5ee924..0000000 --- a/crates/jsvm/runtime/transpile/cache.rs +++ /dev/null @@ -1,11 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Deserialize, Serialize)] -struct EmitMetadata { - pub source_hash: u64, - pub target_hash: u64, -} - -struct EmitCache {} - -pub struct JsvmTranspile {} diff --git a/crates/jsvm/runtime/transpile/disk_cache.rs b/crates/jsvm/runtime/transpile/disk_cache.rs new file mode 100644 index 0000000..1ef54d1 --- /dev/null +++ b/crates/jsvm/runtime/transpile/disk_cache.rs @@ -0,0 +1,134 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +// Copyright 2023-2024 the Supabase authors. All rights reserved. MIT license. + +use std::ffi::OsStr; +use std::fs::File; +use std::io::{Read, Result, Write}; +#[cfg(target_os = "windows")] +use std::path::{Component, Prefix}; +use std::path::{Path, PathBuf}; + +use deno_cache_dir::url_to_filename; +#[cfg(target_os = "windows")] +use deno_core::url::Host; +use deno_core::url::Url; +#[cfg(target_os = "windows")] +use serde::{Deserialize, Serialize}; + +/// On-disk storage for previously emitted files. +pub struct DiskCache { + root_path: PathBuf, +} + +impl DiskCache { + /// Returns a new [`DiskCache`]. + pub fn new(path: impl AsRef) -> Self { + let path = path.as_ref().to_owned(); + assert!(path.is_absolute()); + Self { root_path: path } + } + + /// Returns the reference to the root path. + #[inline] + pub fn root_path(&self) -> &Path { + self.root_path.as_path() + } + + fn get_filename(&self, url: &Url) -> Option { + let mut out = PathBuf::new(); + let scheme = url.scheme(); + out.push(scheme); + + match scheme { + "wasm" => { + let host = url.host_str()?; + // Windows doesn't support ":" in filenames, so we + // represent port using a special string. + let host_port = url + .port() + .map(|port| format!("{host}_PORT{port}")) + .unwrap_or_else(|| host.to_string()); + out.push(host_port); + out.extend(url.path_segments()?); + } + "http" | "https" | "data" | "blob" => return url_to_filename(url).ok(), + "file" => { + let path = url.to_file_path().ok()?; + let mut components = path.components(); + + // Windows doesn't support ":" in filenames, so we need to extract disk + // prefix, e.g.: file:///C:/deno/js/unit_test_runner.ts should produce: + // file\c\deno\js\unit_test_runner.ts + #[cfg(target_os = "windows")] + if let Some(Component::Prefix(prefix)) = components.next() { + match prefix.kind() { + Prefix::Disk(disk_byte) | Prefix::VerbatimDisk(disk_byte) => { + let disk = (disk_byte as char).to_string(); + out.push(disk); + } + Prefix::UNC(server, share) | Prefix::VerbatimUNC(server, share) => { + out.push("UNC"); + let host = Host::parse(server.to_str()?).ok()?; + let host = host.to_string().replace(':', "_"); + out.push(host); + out.push(share); + } + _ => unreachable!(), + } + } + + // Must be relative, so strip forward slash. + let without_forward_slash = components.as_path().strip_prefix("/"); + out = out.join(without_forward_slash.unwrap_or(components.as_path())); + } + _ => return None, + }; + + Some(out) + } + + pub fn get_filename_with_extension(&self, url: &Url, extension: &str) -> Option { + let base = self.get_filename(url)?; + + match base.extension() { + None => Some(base.with_extension(extension)), + Some(ext) => { + let original_extension = OsStr::to_str(ext).unwrap_or("tmp"); + let final_extension = format!("{original_extension}.{extension}"); + Some(base.with_extension(final_extension)) + } + } + } + + /// Reads the entire contents of a file into a bytes vector. + pub fn read(&self, path: impl AsRef) -> Result> { + let buf = std::fs::read(path)?; + Ok(buf) + } + + /// Writes an entire buffer into the temporary file and then rename the file. + pub fn write(&self, path: impl AsRef, buf: impl AsRef<[u8]>) -> Result<()> { + let path = path.as_ref().to_owned(); + std::fs::create_dir_all(&path)?; + + let temp_path = self.gen_temp_path(&path); + let mut file = File::open(&temp_path)?; + file.write_all(buf.as_ref())?; + + let file_path = self.root_path.join(path); + std::fs::rename(&temp_path, &file_path)?; + Ok(()) + } + + /// Returns the temporary file path. + fn gen_temp_path(&self, path: &Path) -> PathBuf { + let seq: String = (0..4) + .map(|_| format!("{:02x}", rand::random::())) + .collect(); + + self.root_path + .join(path) + .with_file_name(seq) + .with_extension("tmp") + } +} diff --git a/crates/jsvm/runtime/transpile/emit.rs b/crates/jsvm/runtime/transpile/emit.rs deleted file mode 100644 index e69de29..0000000 diff --git a/crates/jsvm/runtime/transpile/emit_cache.rs b/crates/jsvm/runtime/transpile/emit_cache.rs new file mode 100644 index 0000000..9572f0d --- /dev/null +++ b/crates/jsvm/runtime/transpile/emit_cache.rs @@ -0,0 +1,104 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +// Copyright 2023-2024 the Supabase authors. All rights reserved. MIT license. + +use std::path::{Path, PathBuf}; + +use deno_ast::ModuleSpecifier; +use deno_core::anyhow::anyhow; +use deno_core::error::AnyError; +use deno_core::serde_json; +use serde::{Deserialize, Serialize}; + +use crate::runtime::transpile::DiskCache; + +#[derive(Debug, Deserialize, Serialize)] +struct EmitMetadata { + pub source_hash: u64, + pub target_hash: u64, +} + +/// Cache for previously emitted files. +pub struct EmitCache { + disk_cache: DiskCache, + cli_version: &'static str, +} + +impl EmitCache { + /// Returns a new [`EmitCache`]. + pub fn new(path: impl AsRef) -> Self { + Self { + disk_cache: DiskCache::new(path), + cli_version: "", + } + } + + pub fn read_emit(&self, specifier: &ModuleSpecifier, source_hash: u64) -> Option { + let meta_filename = self.get_meta_filename(specifier)?; + let emit_filename = self.get_emit_filename(specifier)?; + + // Load and verify the metadata file is for this source and CLI version. + let bytes = self.disk_cache.read(&meta_filename).ok()?; + let meta: EmitMetadata = serde_json::from_slice(&bytes).ok()?; + if meta.source_hash != source_hash { + return None; + } + + // Load and verify the compilation result is for the metadata. + let emit_bytes = self.disk_cache.read(&emit_filename).ok()?; + if meta.target_hash != compute_emit_hash(&emit_bytes, self.cli_version) { + return None; + } + + String::from_utf8(emit_bytes).ok() + } + + pub fn write_emit(&self, specifier: &ModuleSpecifier, source_hash: u64, target_code: &str) { + if let Err(err) = self.write_emit_inner(specifier, source_hash, target_code) { + // Should never error here, but if it ever does don't fail. + if cfg!(debug_assertions) { + panic!("Error saving emit data ({specifier}): {err}"); + } else { + // log::debug!("Error saving emit data({}): {}", specifier, err); + } + } + } + + fn write_emit_inner( + &self, + specifier: &ModuleSpecifier, + source_hash: u64, + code: &str, + ) -> Result<(), AnyError> { + let meta_filename = self + .get_meta_filename(specifier) + .ok_or_else(|| anyhow!("Could not get meta filename."))?; + let emit_filename = self + .get_emit_filename(specifier) + .ok_or_else(|| anyhow!("Could not get emit filename."))?; + + let target_hash = compute_emit_hash(code.as_bytes(), self.cli_version); + let metadata = EmitMetadata { + source_hash, + target_hash, + }; + + let metadata = serde_json::to_vec(&metadata)?; + self.disk_cache.write(&meta_filename, &metadata)?; + self.disk_cache.write(&emit_filename, code.as_bytes())?; + + Ok(()) + } + + fn get_meta_filename(&self, specifier: &ModuleSpecifier) -> Option { + self.disk_cache + .get_filename_with_extension(specifier, "meta") + } + + fn get_emit_filename(&self, specifier: &ModuleSpecifier) -> Option { + self.disk_cache.get_filename_with_extension(specifier, "js") + } +} + +fn compute_emit_hash(bytes: &[u8], cli_version: &str) -> u64 { + todo!() +} diff --git a/crates/jsvm/runtime/transpile/mod.rs b/crates/jsvm/runtime/transpile/mod.rs index 851e48e..12bf7eb 100644 --- a/crates/jsvm/runtime/transpile/mod.rs +++ b/crates/jsvm/runtime/transpile/mod.rs @@ -1,4 +1,15 @@ //! TODO. -mod cache; -mod emit; +pub use crate::runtime::transpile::disk_cache::DiskCache; +pub use crate::runtime::transpile::emit_cache::EmitCache; + +mod disk_cache; +mod emit_cache; + +pub struct Transpiler {} + +impl Transpiler {} + +pub struct Emitter {} + +impl Emitter {} diff --git a/crates/core/Cargo.toml b/crates/schema/Cargo.toml similarity index 53% rename from crates/core/Cargo.toml rename to crates/schema/Cargo.toml index 2466e42..aa3043d 100644 --- a/crates/core/Cargo.toml +++ b/crates/schema/Cargo.toml @@ -1,7 +1,7 @@ # https://doc.rust-lang.org/cargo/reference/manifest.html [package] -name = "axiston-runtime-core" +name = "axiston-rt-schema" version = { workspace = true } edition = { workspace = true } license = { workspace = true } @@ -20,6 +20,19 @@ rustdoc-args = ["--cfg", "docsrs"] [lib] path = "lib.rs" +[features] +# Enables or disables gRPC client code generation. +client = [] +# Enables or disables gRPC server code generation. +server = [] + [dependencies] -axiston-runtime-jsvm = { workspace = true } -axiston-runtime-task = { workspace = true } +tonic = { workspace = true } +prost = { workspace = true } +tonic-types = { workspace = true } +prost-types = { workspace = true } + +[build-dependencies] +anyhow = { workspace = true } +tonic-build = { workspace = true } +prost-build = { workspace = true } diff --git a/crates/schema/README.md b/crates/schema/README.md new file mode 100644 index 0000000..2fc8313 --- /dev/null +++ b/crates/schema/README.md @@ -0,0 +1,20 @@ +### runtime/task + +[![Build Status][action-badge]][action-url] +[![Crate Docs][docs-badge]][docs-url] +[![Crate Version][crates-badge]][crates-url] + +[action-badge]: https://img.shields.io/github/actions/workflow/status/axiston/runtime/build.yaml?branch=main&label=build&logo=github&style=flat-square +[action-url]: https://github.com/axiston/runtime/actions/workflows/build.yaml +[crates-badge]: https://img.shields.io/crates/v/axiston-rt-schema.svg?logo=rust&style=flat-square +[crates-url]: https://crates.io/crates/axiston-rt-schema +[docs-badge]: https://img.shields.io/docsrs/axiston-rt-schema?logo=Docs.rs&style=flat-square +[docs-url]: http://docs.rs/axiston-rt-schema + +Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. + +#### Notes + +- Lorem Ipsum. +- Lorem Ipsum. +- Lorem Ipsum. diff --git a/crates/schema/build.rs b/crates/schema/build.rs new file mode 100644 index 0000000..2fc49f0 --- /dev/null +++ b/crates/schema/build.rs @@ -0,0 +1,27 @@ +#![forbid(unsafe_code)] + +use std::fs::create_dir_all; +use std::path::PathBuf; + +fn main() -> anyhow::Result<()> { + let generate_client = cfg!(feature = "client"); + let generate_server = cfg!(feature = "server"); + + let builder = tonic_build::configure() + .build_server(generate_server) + .build_client(generate_client) + .build_transport(true); + + let input_dir = PathBuf::from("./protobuf/"); + let instance = input_dir.join("./instance.proto"); + let registry = input_dir.join("./registry.proto"); + + let output_dir = PathBuf::from("./generated/"); + create_dir_all(output_dir.as_path())?; + + let protos = [instance.as_path(), registry.as_path()]; + let includes = [input_dir.as_path()]; + builder.out_dir(output_dir).compile(&protos, &includes)?; + + Ok(()) +} diff --git a/crates/schema/lib.rs b/crates/schema/lib.rs new file mode 100644 index 0000000..7afd028 --- /dev/null +++ b/crates/schema/lib.rs @@ -0,0 +1,23 @@ +#![forbid(unsafe_code)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![doc = include_str!("./README.md")] + +//! ### Examples +//! +//! ```rust +//! fn main() {} +//! ``` + +pub mod instance { + //! Includes files generated by `prost`. + //! Built from `instance.proto`. + + include!("./generated/instance.rs"); +} + +pub mod registry { + //! Includes files generated by `prost`. + //! Built from `registry.proto`. + + include!("./generated/registry.rs"); +} diff --git a/crates/cli/protobuf/instance.proto b/crates/schema/protobuf/instance.proto similarity index 87% rename from crates/cli/protobuf/instance.proto rename to crates/schema/protobuf/instance.proto index eeb0e23..bfc5d7e 100644 --- a/crates/cli/protobuf/instance.proto +++ b/crates/schema/protobuf/instance.proto @@ -79,7 +79,15 @@ message CloseRequest { message CloseResponse {} +message StatusRequest {} + +message StatusResponse {} + service Instance { - // Bidirectional streaming RPC for continuous communication between the gateway and runtime. + // TODO. + rpc Status(StatusRequest) returns (StatusResponse); + + // Bidirectional event streaming RPC for continuous communication + // between the gateway (as a client) and the runtime (as a server). rpc Connect(stream EventRequest) returns (stream EventResponse); } diff --git a/crates/cli/protobuf/registry.proto b/crates/schema/protobuf/registry.proto similarity index 100% rename from crates/cli/protobuf/registry.proto rename to crates/schema/protobuf/registry.proto diff --git a/crates/server/Cargo.toml b/crates/server/Cargo.toml new file mode 100644 index 0000000..ced13f4 --- /dev/null +++ b/crates/server/Cargo.toml @@ -0,0 +1,40 @@ +# https://doc.rust-lang.org/cargo/reference/manifest.html + +[package] +name = "axiston-rt-server" +version = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +readme = "./README.md" + +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +documentation = { workspace = true } + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lib] +path = "lib.rs" + +[dependencies] +# axiston-rt-jsvm = { workspace = true } +axiston-rt-schema = { workspace = true } +# axiston-rt-task = { workspace = true } + +tokio = { workspace = true } +tokio-stream = { workspace = true } +futures = { workspace = true } +anyhow = { workspace = true } +tracing = { workspace = true } + +tonic = { workspace = true } +prost = { workspace = true } +tonic-types = { workspace = true } +prost-types = { workspace = true } + +tower = { version = "0.4", features = ["full"] } +tower-http = { version = "0.5", features = ["full"] } diff --git a/crates/server/README.md b/crates/server/README.md new file mode 100644 index 0000000..a31cdb2 --- /dev/null +++ b/crates/server/README.md @@ -0,0 +1,20 @@ +### runtime/server + +[![Build Status][action-badge]][action-url] +[![Crate Docs][docs-badge]][docs-url] +[![Crate Version][crates-badge]][crates-url] + +[action-badge]: https://img.shields.io/github/actions/workflow/status/axiston/runtime/build.yaml?branch=main&label=build&logo=github&style=flat-square +[action-url]: https://github.com/axiston/runtime/actions/workflows/build.yaml +[crates-badge]: https://img.shields.io/crates/v/axiston-rt-server.svg?logo=rust&style=flat-square +[crates-url]: https://crates.io/crates/axiston-rt-server +[docs-badge]: https://img.shields.io/docsrs/axiston-rt-server?logo=Docs.rs&style=flat-square +[docs-url]: http://docs.rs/axiston-rt-server + +Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. + +#### Notes + +- Lorem Ipsum. +- Lorem Ipsum. +- Lorem Ipsum. diff --git a/crates/cli/handler/instance.rs b/crates/server/handler/instance.rs similarity index 74% rename from crates/cli/handler/instance.rs rename to crates/server/handler/instance.rs index f5f8578..b4b3024 100644 --- a/crates/cli/handler/instance.rs +++ b/crates/server/handler/instance.rs @@ -1,17 +1,13 @@ +use axiston_rt_schema::instance::instance_server::{Instance, InstanceServer}; +use axiston_rt_schema::instance::{EventRequest, EventResponse}; use futures::stream::BoxStream; use futures::StreamExt; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::{Request, Response, Status, Streaming}; -use crate::handler::instance::instance_proto::instance_server::{Instance, InstanceServer}; -use crate::handler::instance::instance_proto::{EventRequest, EventResponse}; use crate::service::AppState; -pub mod instance_proto { - tonic::include_proto!("instance"); -} - /// TODO. pub struct InstanceService { state: AppState, @@ -42,10 +38,7 @@ impl Instance for InstanceService { let mut request = request.into_inner(); let (tx, rx) = mpsc::channel(128); - tokio::spawn(async move { - while let Some(event) = request.next().await { - }; - }); + tokio::spawn(async move { while let Some(event) = request.next().await {} }); let rx = ReceiverStream::new(rx); Ok(Response::new(Box::pin(rx))) diff --git a/crates/cli/handler/mod.rs b/crates/server/handler/mod.rs similarity index 100% rename from crates/cli/handler/mod.rs rename to crates/server/handler/mod.rs diff --git a/crates/cli/handler/registry.rs b/crates/server/handler/registry.rs similarity index 74% rename from crates/cli/handler/registry.rs rename to crates/server/handler/registry.rs index cc5ec73..55ceb1c 100644 --- a/crates/cli/handler/registry.rs +++ b/crates/server/handler/registry.rs @@ -1,15 +1,9 @@ +use axiston_rt_schema::registry::registry_server::{Registry, RegistryServer}; +use axiston_rt_schema::registry::{CheckRequest, CheckResponse, RegistryRequest, RegistryResponse}; use tonic::{Request, Response, Status}; -use crate::handler::registry::registry_proto::registry_server::{Registry, RegistryServer}; -use crate::handler::registry::registry_proto::{ - CheckRequest, CheckResponse, RegistryRequest, RegistryResponse, -}; use crate::service::AppState; -pub mod registry_proto { - tonic::include_proto!("registry"); -} - /// TODO. pub struct RegistryService { state: AppState, diff --git a/crates/core/lib.rs b/crates/server/lib.rs similarity index 74% rename from crates/core/lib.rs rename to crates/server/lib.rs index 62212a5..51c5295 100644 --- a/crates/core/lib.rs +++ b/crates/server/lib.rs @@ -8,3 +8,6 @@ //! fn main() {} //! ``` +pub mod handler; +pub mod middleware; +pub mod service; diff --git a/crates/server/middleware/mod.rs b/crates/server/middleware/mod.rs new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/crates/server/middleware/mod.rs @@ -0,0 +1 @@ + diff --git a/crates/task/registry/cache.rs b/crates/server/service/cache.rs similarity index 100% rename from crates/task/registry/cache.rs rename to crates/server/service/cache.rs diff --git a/crates/cli/service/config.rs b/crates/server/service/config.rs similarity index 100% rename from crates/cli/service/config.rs rename to crates/server/service/config.rs diff --git a/crates/server/service/instance.rs b/crates/server/service/instance.rs new file mode 100644 index 0000000..9a20ff1 --- /dev/null +++ b/crates/server/service/instance.rs @@ -0,0 +1,12 @@ +/// TODO. +#[derive(Debug, Clone)] +pub struct InstanceService {} + +impl InstanceService {} + +#[cfg(test)] +mod test { + fn build_default() -> anyhow::Result<()> { + Ok(()) + } +} diff --git a/crates/cli/service/mod.rs b/crates/server/service/mod.rs similarity index 100% rename from crates/cli/service/mod.rs rename to crates/server/service/mod.rs diff --git a/crates/server/service/registry.rs b/crates/server/service/registry.rs new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/crates/server/service/registry.rs @@ -0,0 +1 @@ + diff --git a/crates/task/Cargo.toml b/crates/task/Cargo.toml index 934011d..9dbf48c 100644 --- a/crates/task/Cargo.toml +++ b/crates/task/Cargo.toml @@ -1,7 +1,7 @@ # https://doc.rust-lang.org/cargo/reference/manifest.html [package] -name = "axiston-runtime-task" +name = "axiston-rt-task" version = { workspace = true } edition = { workspace = true } license = { workspace = true } @@ -21,14 +21,15 @@ rustdoc-args = ["--cfg", "docsrs"] path = "lib.rs" [dependencies] -tower-path = { workspace = true, features = [] } -tower-task = { workspace = true, features = [] } +futures = { workspace = true } +pin-project-lite = { workspace = true } +thiserror = { workspace = true } -tower = { version = "0.5", features = ["load", "util"] } -futures = { version = "0.3", features = [] } -pin-project-lite = { version = "0.2", features = [] } +derive_more = { workspace = true } +serde = { workspace = true } +tracing = { workspace = true } +ecow = { workspace = true } +tower = { workspace = true } -serde = { version = "1.0", features = ["derive"] } -tracing = { version = "0.1", features = [] } -thiserror = { version = "1.0", features = [] } -ecow = { version = "0.2", features = ["serde"] } +[dev-dependencies] +tokio = { workspace = true } diff --git a/crates/task/README.md b/crates/task/README.md index c824ab5..8734298 100644 --- a/crates/task/README.md +++ b/crates/task/README.md @@ -1,5 +1,16 @@ ### runtime/task +[![Build Status][action-badge]][action-url] +[![Crate Docs][docs-badge]][docs-url] +[![Crate Version][crates-badge]][crates-url] + +[action-badge]: https://img.shields.io/github/actions/workflow/status/axiston/runtime/build.yaml?branch=main&label=build&logo=github&style=flat-square +[action-url]: https://github.com/axiston/runtime/actions/workflows/build.yaml +[crates-badge]: https://img.shields.io/crates/v/axiston-rt-task.svg?logo=rust&style=flat-square +[crates-url]: https://crates.io/crates/axiston-rt-task +[docs-badge]: https://img.shields.io/docsrs/axiston-rt-task?logo=Docs.rs&style=flat-square +[docs-url]: http://docs.rs/axiston-rt-task + Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. #### Notes diff --git a/crates/task/context/failure.rs b/crates/task/context/failure.rs new file mode 100644 index 0000000..60d76f4 --- /dev/null +++ b/crates/task/context/failure.rs @@ -0,0 +1,60 @@ +use std::error::Error; + +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +// TODO: wrap box error instead + +/// Unrecoverable failure duration [`TaskHandler`] execution. +/// +/// [`TaskHandler`]: crate::handler::TaskHandler +#[derive(Debug, Error, Serialize, Deserialize)] +#[must_use = "errors do nothing unless you use them"] +#[error("failure during `TaskHandler` execution")] +pub struct TaskError { + name: String, + message: String, +} + +impl TaskError { + /// Returns a new [`TaskError`]. + pub fn new() -> Self { + Self { + name: "".to_owned(), + message: "".to_owned(), + } + } + + /// Returns the underlying error's name. + #[inline] + pub fn name(&self) -> &str { + &self.name + } + + /// Returns the underlying error's message. + #[inline] + pub fn message(&self) -> &str { + &self.message + } +} + +impl From> for TaskError { + fn from(value: Box) -> Self { + todo!() + } +} + +/// Specialized [`Result`] alias for the [`TaskError`] type. +pub type TaskResult = Result; + +#[cfg(test)] +mod test { + use crate::context::TaskError; + use crate::Result; + + #[test] + fn instance() -> Result<()> { + let _ = TaskError::new(); + Ok(()) + } +} diff --git a/crates/task/context/mod.rs b/crates/task/context/mod.rs new file mode 100644 index 0000000..156f8d9 --- /dev/null +++ b/crates/task/context/mod.rs @@ -0,0 +1,29 @@ +//! [`TaskRequest`], [`TaskResponse`] and [`TaskError`]. + +pub mod builders { + //! [`TaskRequest`] and [`TaskResponse`] builders. + //! + //! [`TaskRequest`]: crate::context::TaskRequest + //! [`TaskResponse`]: crate::context::TaskResponse + + pub use super::request::TaskRequestBuilder; + pub use super::response::TaskResponseBuilder; +} + +pub mod storages { + //! [`TaskRequest`] and [`TaskResponse`] storages. + //! + //! [`TaskRequest`]: crate::context::TaskRequest + //! [`TaskResponse`]: crate::context::TaskResponse + + pub use super::request::{Fields, Secrets}; + pub use super::response::Metrics; +} + +pub use crate::context::failure::{TaskError, TaskResult}; +pub use crate::context::request::TaskRequest; +pub use crate::context::response::TaskResponse; + +mod failure; +mod request; +mod response; diff --git a/crates/task/context/request.rs b/crates/task/context/request.rs new file mode 100644 index 0000000..5e8494d --- /dev/null +++ b/crates/task/context/request.rs @@ -0,0 +1,163 @@ +use std::collections::HashMap; +use std::fmt; + +use derive_more::{Deref, DerefMut, From}; +use serde::{Deserialize, Serialize}; + +use crate::routing::Layers; + +/// TODO. +#[derive(Debug, Default, Clone, Serialize, Deserialize, From)] +#[must_use = "requests do nothing unless you serialize them"] +pub struct Fields { + inner: HashMap, +} + +impl Fields { + /// Returns an empty [`Fields`] store. + #[inline] + pub fn new() -> Self { + Self::default() + } +} + +/// TODO. +#[derive(Debug, Default, Clone, Serialize, Deserialize, From)] +#[must_use = "requests do nothing unless you serialize them"] +pub struct Secrets { + inner: HashMap, +} + +impl Secrets { + /// Returns an empty [`Secrets`] store. + #[inline] + pub fn new() -> Self { + Self::default() + } +} + +/// Serializable [`TaskHandler`] service request. +/// +/// [`TaskHandler`]: crate::handler::TaskHandler +#[derive(Clone, Serialize, Deserialize, Deref, DerefMut)] +#[must_use = "requests do nothing unless you serialize them"] +pub struct TaskRequest { + #[deref] + #[deref_mut] + inner: T, + + fields: Fields, + secrets: Secrets, + layers: Layers, +} + +impl TaskRequest { + /// Returns a new [`TaskRequest`]. + #[inline] + pub fn new(inner: T) -> Self { + Self { + inner, + fields: Fields::new(), + secrets: Secrets::new(), + layers: Layers::new(), + } + } + + /// Returns a new [`TaskRequestBuilder`]. + #[inline] + pub fn builder(inner: T) -> TaskRequestBuilder { + TaskRequestBuilder::new(inner) + } + + /// Returns the inner data. + #[inline] + pub fn into_inner(self) -> T { + self.inner + } +} + +impl fmt::Debug for TaskRequest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TaskRequest") + .field("fields", &self.fields) + .field("secrets", &self.secrets) + .finish_non_exhaustive() + } +} + +/// [`TaskHandler`] service request builder. +/// +/// [`TaskHandler`]: crate::handler::TaskHandler +#[derive(Debug, Clone)] +#[must_use = "requests do nothing unless you serialize them"] +pub struct TaskRequestBuilder { + inner: T, + fields: Option, + secrets: Option, + layers: Option, +} + +impl TaskRequestBuilder { + /// Returns a new [`TaskRequestBuilder`]. + #[inline] + pub fn new(inner: T) -> Self { + Self { + inner, + fields: None, + secrets: None, + layers: None, + } + } + + // TODO: Method to add a single field. + // TODO: Method to add a single secret. + + /// Overrides the default value of [`TaskRequest`]`::fields`. + #[inline] + pub fn with_fields(mut self, fields: Fields) -> Self { + self.fields = Some(fields); + self + } + + /// Overrides the default value of [`TaskRequest`]`::secrets`. + #[inline] + pub fn with_secrets(mut self, secrets: Secrets) -> Self { + self.secrets = Some(secrets); + self + } + + /// Overrides the default value of [`TaskRequest`]`::layers`. + #[inline] + pub fn with_layers(mut self, layers: Layers) -> Self { + self.layers = Some(layers); + self + } + + /// Returns a new [`TaskRequest`]. + pub fn build(self) -> TaskRequest { + TaskRequest { + inner: self.inner, + fields: self.fields.unwrap_or_default(), + secrets: self.secrets.unwrap_or_default(), + layers: self.layers.unwrap_or_default(), + } + } +} + +#[cfg(test)] +mod test { + use crate::context::storages::{Fields, Secrets}; + use crate::context::TaskRequest; + use crate::routing::Layers; + use crate::Result; + + #[test] + fn build() -> Result<()> { + let _request = TaskRequest::builder(5) + .with_fields(Fields::new()) + .with_secrets(Secrets::new()) + .with_layers(Layers::new()) + .build(); + Ok(()) + } +} diff --git a/scripts/tower-task/context/response.rs b/crates/task/context/response.rs similarity index 53% rename from scripts/tower-task/context/response.rs rename to crates/task/context/response.rs index aa6a511..969f92c 100644 --- a/scripts/tower-task/context/response.rs +++ b/crates/task/context/response.rs @@ -1,24 +1,45 @@ +use std::collections::HashMap; use std::fmt; -use std::ops::{Deref, DerefMut}; -#[cfg(feature = "serde")] +use derive_more::{Deref, DerefMut, From}; use serde::{Deserialize, Serialize}; +/// TODO. +#[derive(Debug, Default, Clone, Serialize, Deserialize, From)] +#[must_use = "responses do nothing unless you serialize them"] +pub struct Metrics { + inner: HashMap, +} + +impl Metrics { + /// Returns an empty [`Metrics`] store. + #[inline] + pub fn new() -> Self { + Self::default() + } +} + /// Deserializable [`TaskHandler`] service response. /// /// [`TaskHandler`]: crate::handler::TaskHandler -#[derive(Clone)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, Serialize, Deserialize, Deref, DerefMut)] #[must_use = "responses do nothing unless you serialize them"] pub struct TaskResponse { + #[deref] + #[deref_mut] inner: T, + + metrics: Metrics, } impl TaskResponse { /// Returns a new [`TaskResponse`]. #[inline] pub fn new(inner: T) -> Self { - Self { inner } + Self { + inner, + metrics: Metrics::default(), + } } /// Returns a new [`TaskResponseBuilder`]. @@ -34,25 +55,11 @@ impl TaskResponse { } } -impl Deref for TaskResponse { - type Target = T; - - #[inline] - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -impl DerefMut for TaskResponse { - #[inline] - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} - impl fmt::Debug for TaskResponse { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TaskResponse").finish_non_exhaustive() + f.debug_struct("TaskResponse") + .field("metrics", &self.metrics) + .finish_non_exhaustive() } } @@ -63,17 +70,36 @@ impl fmt::Debug for TaskResponse { #[must_use = "responses do nothing unless you serialize them"] pub struct TaskResponseBuilder { inner: T, + metrics: Option, } impl TaskResponseBuilder { /// Returns a new [`TaskResponseBuilder`]. #[inline] pub fn new(inner: T) -> Self { - Self { inner } + Self { + inner, + metrics: None, + } } /// Returns a new [`TaskResponse`]. pub fn build(self) -> TaskResponse { - TaskResponse { inner: self.inner } + TaskResponse { + inner: self.inner, + metrics: self.metrics.unwrap_or_default(), + } + } +} + +#[cfg(test)] +mod test { + use crate::context::TaskResponse; + use crate::Result; + + #[test] + fn build() -> Result<()> { + let _response = TaskResponse::builder(5).build(); + Ok(()) } } diff --git a/crates/task/datatype/action.rs b/crates/task/datatype/action.rs deleted file mode 100644 index 2297555..0000000 --- a/crates/task/datatype/action.rs +++ /dev/null @@ -1,34 +0,0 @@ -//! Operation [`Request`], [`Response`] and [`Manifest`] types. -//! -//! [`Request`]: ActionRequest -//! [`Response`]: ActionResponse -//! [`Manifest`]: ActionManifest - -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Serialize, Deserialize)] -pub struct ActionRequest {} - -#[derive(Debug, Serialize, Deserialize)] -pub struct ActionResponse {} - -/// Associated action metadata. -#[derive(Debug, Clone, Serialize, Deserialize)] -#[must_use = "manifests do nothing unless you serialize them"] -pub struct ActionManifest { - pub name: String, -} - -impl ActionManifest { - /// Returns a new [`ActionManifest`]. - /// - /// Used for testing. - #[inline] - pub fn new(name: &str) -> Self { - Self { - name: name.to_owned(), - } - } - - // pub fn index() -> Index {} -} diff --git a/crates/task/datatype/mod.rs b/crates/task/datatype/mod.rs deleted file mode 100644 index c8dc605..0000000 --- a/crates/task/datatype/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! TODO. -//! - -pub mod action; -pub mod service; -pub mod trigger; - -// pub struct Graph {} diff --git a/crates/task/datatype/trigger.rs b/crates/task/datatype/trigger.rs deleted file mode 100644 index eb319a6..0000000 --- a/crates/task/datatype/trigger.rs +++ /dev/null @@ -1,35 +0,0 @@ -//! Condition [`Request`], [`Response`] and [`Manifest`] types. -//! -//! [`Request`]: TriggerRequest -//! [`Response`]: TriggerResponse -//! [`Manifest`]: TriggerManifest - -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Serialize, Deserialize)] -pub struct TriggerRequest {} - -#[derive(Debug, Serialize, Deserialize)] -pub struct TriggerResponse { - pub should_trigger: bool, - pub ignore_retry_ms: u32, -} - -/// Associated trigger metadata. -#[derive(Debug, Clone, Serialize, Deserialize)] -#[must_use = "manifests do nothing unless you serialize them"] -pub struct TriggerManifest { - pub name: String, -} - -impl TriggerManifest { - /// Returns a new [`TriggerManifest`]. - /// - /// Used for testing. - #[inline] - pub fn new(name: &str) -> Self { - Self { - name: name.to_owned(), - } - } -} diff --git a/scripts/tower-task/handler/future.rs b/crates/task/handler/future.rs similarity index 77% rename from scripts/tower-task/handler/future.rs rename to crates/task/handler/future.rs index 42cb4fe..a64ee32 100644 --- a/scripts/tower-task/handler/future.rs +++ b/crates/task/handler/future.rs @@ -13,9 +13,11 @@ use pin_project_lite::pin_project; use crate::context::{TaskError, TaskResponse}; pin_project! { - /// Opaque [`Future`] return type for [`Task::call`]. + /// Opaque [`Future`] return type for [`TaskHandler::call`]. /// - /// [`Task::call`]: crate::task::Task::call + /// Opaque `futures::`[`BoxFuture`]. + /// + /// [`TaskHandler::call`]: crate::context::TaskHandler #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct TaskFuture { #[pin] fut: BoxFuture<'static, Result, TaskError>>, @@ -53,10 +55,15 @@ impl Future for TaskFuture { #[cfg(test)] mod test { + use crate::context::TaskResponse; + use crate::handler::future::TaskFuture; use crate::Result; #[test] - fn build() -> Result<()> { + fn future_from_block() -> Result<()> { + let fut = async move { Ok(TaskResponse::new(5)) }; + let _fut = TaskFuture::new(fut); + Ok(()) } } diff --git a/scripts/tower-task/handler/metric.rs b/crates/task/handler/metric.rs similarity index 55% rename from scripts/tower-task/handler/metric.rs rename to crates/task/handler/metric.rs index 45ea1f3..b1b9d1d 100644 --- a/scripts/tower-task/handler/metric.rs +++ b/crates/task/handler/metric.rs @@ -3,20 +3,18 @@ //! [`Load`]: tower::load::Load //! [`TaskHandler`]: crate::handler::TaskHandler -#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -/// [`Load`] metric types for [`TaskHandler`]s. +/// `tower::load::`[`Load`] metrics for [`TaskHandler`]s. /// /// [`Load`]: tower::load::Load /// [`TaskHandler`]: crate::handler::TaskHandler -#[derive(Debug, Default, Clone, PartialOrd, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Debug, Default, Clone, PartialOrd, PartialEq, Serialize, Deserialize)] #[must_use = "metrics do nothing unless you serialize them"] -pub struct TaskMetric {} +pub struct TaskMetrics {} -impl TaskMetric { - /// Returns a new [`TaskMetric`]. +impl TaskMetrics { + /// Returns a new [`TaskMetrics`]. #[inline] pub fn new() -> Self { Self::default() @@ -25,12 +23,12 @@ impl TaskMetric { #[cfg(test)] mod test { - use crate::handler::metric::TaskMetric; + use crate::handler::metric::TaskMetrics; use crate::Result; #[test] - fn build() -> Result<()> { - let _ = TaskMetric::new(); + fn from_default() -> Result<()> { + let _ = TaskMetrics::new(); Ok(()) } } diff --git a/scripts/tower-task/handler/mod.rs b/crates/task/handler/mod.rs similarity index 64% rename from scripts/tower-task/handler/mod.rs rename to crates/task/handler/mod.rs index 6c9f971..1d4d0a3 100644 --- a/scripts/tower-task/handler/mod.rs +++ b/crates/task/handler/mod.rs @@ -1,29 +1,29 @@ //! [`TaskHandler`], [`TaskHandlerLayer`], its future and metrics. use std::fmt; +use std::marker::PhantomData; +use std::ops::Deref; +use std::sync::Arc; use std::task::{Context, Poll}; use tower::load::Load; use tower::util::BoxCloneService; -use tower::{Service, ServiceBuilder}; +use tower::{Layer, Service, ServiceBuilder}; use crate::context::{TaskError, TaskRequest, TaskResponse}; use crate::handler::future::TaskFuture; -pub use crate::handler::layer::TaskHandlerLayer; -use crate::handler::metric::TaskMetric; +use crate::handler::metric::TaskMetrics; pub mod future; -mod layer; pub mod metric; -/// Unified `tower::`[`Service`] for executing [`tasks`]. +/// Unified `tower::`[`Service`] for executing tasks. /// /// Opaque [`BoxCloneService`]<[`TaskRequest`], [`TaskResponse`], [`TaskError`]>. -/// -/// [`tasks`]: crate::context #[must_use = "services do nothing unless you `.poll_ready` or `.call` them"] pub struct TaskHandler { inner: BoxCloneService, TaskResponse, TaskError>, + metrics: Arc, } impl TaskHandler { @@ -47,11 +47,12 @@ impl TaskHandler { Self { inner: BoxCloneService::new(inner), + metrics: Arc::new(TaskMetrics::default()), } } /// Maps a `TaskHandler` to `TaskHandler` by applying a function to a contained service. - pub fn map(self, f: F) -> TaskHandler< T2, U2> + pub fn map(self, f: F) -> TaskHandler where F: FnOnce( BoxCloneService, TaskResponse, TaskError>, @@ -59,20 +60,18 @@ impl TaskHandler { { TaskHandler { inner: f(self.inner), + metrics: self.metrics, } } - - /// Estimates the service's current load. - pub fn metrics(&self) -> TaskMetric { - TaskMetric::new() - } } impl Clone for TaskHandler { #[inline] fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { inner } + Self { + inner: self.inner.clone(), + metrics: self.metrics.clone(), + } } } @@ -99,11 +98,48 @@ impl Service> for TaskHandler { } impl Load for TaskHandler { - type Metric = TaskMetric; + type Metric = TaskMetrics; #[inline] fn load(&self) -> Self::Metric { - self.metrics() + self.metrics.deref().clone() + } +} + +/// `tower::`[`Layer`] that produces a [`TaskHandler`] services. +pub struct TaskHandlerLayer { + inner: PhantomData<(Req, T, U)>, +} + +impl TaskHandlerLayer { + /// Returns a new [`TaskHandlerLayer`]. + #[inline] + pub fn new() -> Self { + Self::default() + } +} + +impl Default for TaskHandlerLayer { + fn default() -> Self { + Self { inner: PhantomData } + } +} + +impl Layer for TaskHandlerLayer +where + T: 'static, + U: 'static, + S: Service + Clone + Send + 'static, + Req: From> + 'static, + S::Response: Into> + 'static, + S::Error: Into + 'static, + S::Future: Send + 'static, +{ + type Service = TaskHandler; + + #[inline] + fn layer(&self, inner: S) -> Self::Service { + TaskHandler::new(inner) } } @@ -120,15 +156,15 @@ mod test { } #[test] - fn service() -> Result<()> { + fn manual_compose() -> Result<()> { let inner = service_fn(handle); - let _ = TaskHandler::new(inner); + let _service = TaskHandler::new(inner); Ok(()) } #[test] - fn layer() -> Result<()> { - let _ = ServiceBuilder::new() + fn service_builder() -> Result<()> { + let _service = ServiceBuilder::new() .layer(TaskHandlerLayer::new()) .service(service_fn(handle)); Ok(()) diff --git a/crates/task/lib.rs b/crates/task/lib.rs index 6fdd15b..a8d3aab 100644 --- a/crates/task/lib.rs +++ b/crates/task/lib.rs @@ -3,21 +3,30 @@ #![doc = include_str!("./README.md")] //! ```rust +//! use axiston_rt_task::routing::Router; +//! use axiston_rt_task::Result; +//! +//! fn main() -> Result<()> { +//! let router = Router::default(); +//! Ok(()) +//! } //! ``` -pub mod datatype; +pub mod context; +pub mod handler; pub mod registry; +pub mod routing; -/// Unrecoverable failure of the [`Router`]. +/// Unrecoverable failure of the [`Registry`]. /// /// Includes all error types that may occur. /// -/// [`Router`]: registry::Router +/// [`Registry`]: registry::Registry #[derive(Debug, thiserror::Error)] #[must_use = "errors do nothing unless you use them"] pub enum Error { - // #[error("called task failure: {0}")] - // Task(#[from] context::TaskError), + #[error("called task failure: {0}")] + Task(#[from] context::TaskError), } /// Specialized [`Result`] alias for the [`Error`] type. diff --git a/crates/task/registry/action.rs b/crates/task/registry/action.rs new file mode 100644 index 0000000..1fb3caf --- /dev/null +++ b/crates/task/registry/action.rs @@ -0,0 +1,76 @@ +//! Operation [`Request`], [`Response`] and [`Manifest`] types. +//! +//! [`Request`]: ActionRequest +//! [`Response`]: ActionResponse +//! [`Manifest`]: ActionManifest + +use serde::{Deserialize, Serialize}; + +/// TODO. +#[derive(Debug, Serialize, Deserialize)] +pub struct ActionRequest {} + +impl ActionRequest { + /// Returns a new [`ActionRequest`]. + pub fn new() -> Self { + Self {} + } +} + +/// TODO. +#[derive(Debug, Serialize, Deserialize)] +pub struct ActionResponse {} + +impl ActionResponse { + /// Returns a new [`ActionResponse`]. + pub fn new() -> Self { + Self {} + } +} + +/// Associated action metadata. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[must_use = "manifests do nothing unless you serialize them"] +pub struct ActionManifest { + pub name: String, +} + +impl ActionManifest { + /// Returns a new [`ActionManifest`]. + /// + /// Used for testing. + #[inline] + pub fn new(name: &str) -> Self { + Self { + name: name.to_owned(), + } + } + + // pub fn index() -> Index {} +} + +#[cfg(test)] +mod test { + use tower::{service_fn, ServiceBuilder}; + + use crate::context::{TaskError, TaskRequest, TaskResponse}; + use crate::handler::TaskHandlerLayer; + use crate::registry::action::{ActionRequest, ActionResponse}; + use crate::Result; + + async fn action_handle( + request: TaskRequest, + ) -> Result, TaskError> { + Ok(TaskResponse::new(ActionResponse::new())) + } + + #[tokio::test] + async fn native_action_handle() -> Result<()> { + let req = TaskRequest::new(ActionRequest::new()); + let svc = ServiceBuilder::new() + .layer(TaskHandlerLayer::new()) + .service(service_fn(action_handle)); + + Ok(()) + } +} diff --git a/crates/task/registry/custom_serde.rs b/crates/task/registry/custom_serde.rs new file mode 100644 index 0000000..fd8bdc8 --- /dev/null +++ b/crates/task/registry/custom_serde.rs @@ -0,0 +1,62 @@ +pub mod serde_option_duration_ms { + //! Serializing and deserializing `Option` as milliseconds. + //! + //! # Example: + //! + //! - `{"duration": 1500}` for `Some(Duration::from_millis(1500))` + //! - `{"duration": null}` for `None` + + use std::time::Duration; + + use serde::{Deserialize, Deserializer, Serializer}; + + /// Serializes an `Option` into milliseconds. + pub fn serialize(option: &Option, serializer: S) -> Result + where + S: Serializer, + { + match option { + Some(duration) => serializer.serialize_some(&(duration.as_millis() as u64)), + None => serializer.serialize_none(), + } + } + + /// Deserializes an `Option` from milliseconds. + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let millis: Option = Option::deserialize(deserializer)?; + Ok(millis.map(Duration::from_millis)) + } +} + +pub mod serde_duration_ms { + //! Serializing and deserializing `Duration` as milliseconds. + //! + //! # Example: + //! + //! - `{"duration": 1500}` for `Duration::from_millis(1500)` + + use std::time::Duration; + + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + /// Serializes a `Duration` into milliseconds. + pub fn serialize(duration: &Duration, serializer: S) -> Result + where + S: Serializer, + { + let millis = duration.as_millis() as u64; + millis.serialize(serializer) + } + + /// Deserializes a `Duration` from milliseconds. + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let millis = u64::deserialize(deserializer)?; + Ok(Duration::from_millis(millis)) + } +} diff --git a/crates/task/registry/handler.rs b/crates/task/registry/handler.rs deleted file mode 100644 index 272a471..0000000 --- a/crates/task/registry/handler.rs +++ /dev/null @@ -1,39 +0,0 @@ -//! TODO. -//! - -use crate::datatype::action::{ActionManifest, ActionRequest, ActionResponse}; -use crate::datatype::trigger::{TriggerManifest, TriggerRequest, TriggerResponse}; -use tower_path::service::WithData; -use tower_task::handler::TaskHandler; - -/// TODO. -pub type TriggerHandler = WithData, TriggerManifest>; - -/// TODO. -pub type ActionHandler = WithData, ActionManifest>; - -#[cfg(test)] -mod test { - use crate::Result; - use tower::{service_fn, ServiceBuilder}; - use tower_path::service::{WithData, WithDataLayer}; - - async fn handle(request: u32) -> Result { - Ok(request) - } - - #[test] - fn service() -> tower_task::Result<()> { - let inner = service_fn(handle); - let _ = WithData::new(inner, 42u32); - Ok(()) - } - - #[test] - fn layer() -> tower_task::Result<()> { - let _ = ServiceBuilder::new() - .layer(WithDataLayer::new(42u32)) - .service(service_fn(handle)); - Ok(()) - } -} diff --git a/crates/task/registry/mod.rs b/crates/task/registry/mod.rs index 8872f98..dadccf2 100644 --- a/crates/task/registry/mod.rs +++ b/crates/task/registry/mod.rs @@ -1,21 +1,18 @@ -//! TODO. +//! TODO. [`Registry`]. //! -use std::collections::HashMap; -use std::hash::Hash; use std::sync::{Arc, Mutex}; -use tower_path::routing::Router; +use crate::registry::action::{ActionManifest, ActionRequest, ActionResponse}; +use crate::registry::trigger::{TriggerManifest, TriggerRequest, TriggerResponse}; +use crate::routing::Router; -use crate::registry::handler::{ActionHandler, TriggerHandler}; -use crate::registry::index::{ActionIndex, ServiceIndex, TriggerIndex}; -use crate::Result; - -mod handler; -mod index; -mod cache; +mod action; +mod custom_serde; +mod trigger; /// TODO. +#[must_use = "routers do nothing unless you use them"] #[derive(Debug, Default, Clone)] pub struct Registry { inner: Arc>, @@ -23,9 +20,9 @@ pub struct Registry { #[derive(Debug, Default)] struct RegistryInner { - services: HashMap, - triggers: Router, - actions: Router, + // registered_services: Router, + registered_triggers: Router, + registered_actions: Router, } impl Registry { @@ -35,21 +32,17 @@ impl Registry { Self::default() } - /// TODO. - pub fn register_service(&self) -> Result<()> { - Ok(()) - } + // pub fn register_action() +} - /// TODO. - pub fn register_trigger(&self) -> Result<()> { - Ok(()) - } +#[cfg(test)] +mod test { + use crate::registry::Registry; + use crate::Result; - /// TODO. - pub fn register_action(&self) -> Result<()> { + #[test] + fn build_empty() -> Result<()> { + let _ = Registry::new(); Ok(()) } } - -#[cfg(test)] -mod test {} diff --git a/crates/task/datatype/service.rs b/crates/task/registry/service.rs similarity index 66% rename from crates/task/datatype/service.rs rename to crates/task/registry/service.rs index 9b9b475..91a2c1b 100644 --- a/crates/task/datatype/service.rs +++ b/crates/task/registry/service.rs @@ -5,6 +5,9 @@ use serde::{Deserialize, Serialize}; #[must_use = "manifests do nothing unless you serialize them"] pub struct ServiceManifest { pub name: String, + pub description: String, + pub icon: Option, + pub version: String, } impl ServiceManifest { @@ -13,8 +16,11 @@ impl ServiceManifest { /// Used for testing. #[inline] pub fn new(name: &str) -> Self { - Self { - name: name.to_owned(), - } + // Self { + // name: name.to_owned(), + // icon: None, + // } + + todo!() } } diff --git a/crates/task/registry/trigger.rs b/crates/task/registry/trigger.rs new file mode 100644 index 0000000..445f49f --- /dev/null +++ b/crates/task/registry/trigger.rs @@ -0,0 +1,88 @@ +//! Condition [`Request`], [`Response`] and [`Manifest`] types. +//! +//! [`Request`]: TriggerRequest +//! [`Response`]: TriggerResponse +//! [`Manifest`]: TriggerManifest + +use std::time::Duration; + +use serde::{Deserialize, Serialize}; + +/// TODO. +#[derive(Debug, Serialize, Deserialize)] +pub struct TriggerRequest {} + +impl TriggerRequest { + /// Returns a new [`TriggerRequest`]. + pub fn new() -> Self { + Self {} + } +} + +/// TODO. +#[derive(Debug, Serialize, Deserialize)] +pub struct TriggerResponse { + pub should_trigger: bool, + pub ignore_retry_ms: Option, +} + +impl TriggerResponse { + /// Returns a new [`TriggerResponse`]. + pub fn new(should_trigger: bool) -> Self { + Self { + should_trigger, + ignore_retry_ms: None, + } + } + + pub fn with_ignore_retry(self) -> Self { + todo!() + } +} + +/// Associated trigger metadata. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[must_use = "manifests do nothing unless you serialize them"] +pub struct TriggerManifest { + pub name: String, +} + +impl TriggerManifest { + /// Returns a new [`TriggerManifest`]. + /// + /// Used for testing. + #[inline] + pub fn new(name: &str) -> Self { + Self { + name: name.to_owned(), + } + } +} + +#[cfg(test)] +mod test { + use tower::{service_fn, Service, ServiceBuilder}; + + use crate::context::{TaskError, TaskRequest, TaskResponse}; + use crate::handler::TaskHandlerLayer; + use crate::registry::trigger::{TriggerRequest, TriggerResponse}; + use crate::Result; + + async fn trigger_handle( + request: TaskRequest, + ) -> Result, TaskError> { + let resp = TriggerResponse::new(true); + Ok(TaskResponse::builder(resp).build()) + } + + #[tokio::test] + async fn native_trigger_handle() -> Result<()> { + let req = TaskRequest::new(TriggerRequest::new()); + let mut svc = ServiceBuilder::new() + .layer(TaskHandlerLayer::new()) + .service(service_fn(trigger_handle)); + + let _resp = svc.call(req).await?; + Ok(()) + } +} diff --git a/crates/task/registry/index.rs b/crates/task/routing/index.rs similarity index 50% rename from crates/task/registry/index.rs rename to crates/task/routing/index.rs index 3f6cbe9..62ffd08 100644 --- a/crates/task/registry/index.rs +++ b/crates/task/routing/index.rs @@ -1,26 +1,23 @@ -//! TODO. -//! - use std::ops::Deref; + use ecow::EcoString; -use tower_path::routing::index::UniqueIndex; -/// Opaque and unique entity identifier. +/// Opaque and unique identifier. #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct UnderlyingIndex { - inner: EcoString + inner: EcoString, } impl UnderlyingIndex { /// Returns a new [`UnderlyingIndex`]. #[inline] - pub fn new (inner: impl AsRef) -> Self { + pub fn new(inner: impl AsRef) -> Self { let inner = EcoString::from(inner.as_ref()); Self { inner } } } -impl Deref for UnderlyingIndex { +impl Deref for UnderlyingIndex { type Target = str; #[inline] @@ -28,12 +25,3 @@ impl Deref for UnderlyingIndex { self.inner.as_str() } } - -/// TODO. -pub type ServiceIndex = UniqueIndex; - -/// TODO. -pub type TriggerIndex = UniqueIndex; - -/// TODO. -pub type ActionIndex = UniqueIndex; diff --git a/crates/task/routing/layer_compose.rs b/crates/task/routing/layer_compose.rs new file mode 100644 index 0000000..2f38fd7 --- /dev/null +++ b/crates/task/routing/layer_compose.rs @@ -0,0 +1,77 @@ +//! [`LayerCompose`], [`Layers`] and its [`LayersBuilder`]. + +use serde::{Deserialize, Serialize}; + +/// TODO. +#[derive(Debug, Default, Clone)] +pub struct LayerCompose { + layers: Option, +} + +impl LayerCompose { + /// Returns a new [`LayerCompose`]. + #[inline] + pub fn new(layers: Layers) -> Self { + Self { + layers: Some(layers), + } + } +} + +/// Declarative `tower::`[`Layer`]s configuration. +/// +/// [`Layer`]: tower::Layer +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Layers {} + +impl Layers { + /// Returns a new [`Layers`]. + #[inline] + pub fn new() -> Self { + Self::default() + } + + /// Returns a new [`LayersBuilder`]. + #[inline] + pub fn builder() -> LayersBuilder { + LayersBuilder::new() + } +} + +/// [`Layers`] builder. +#[derive(Debug, Default, Clone)] +pub struct LayersBuilder {} + +impl LayersBuilder { + /// Returns a new [`LayersBuilder`]. + #[inline] + pub fn new() -> Self { + Self::default() + } + + /// Returns a new [`Layers`]. + #[inline] + pub fn build(self) -> Layers { + Layers {} + } +} + +#[cfg(test)] +mod test { + use crate::routing::layer_compose::{LayerCompose, Layers, LayersBuilder}; + use crate::Result; + + #[test] + fn from_default() -> Result<()> { + let config = Layers::new(); + let _compose = LayerCompose::new(config); + Ok(()) + } + + #[test] + fn from_builder() -> Result<()> { + let config = LayersBuilder::new().build(); + let _compose = LayerCompose::new(config); + Ok(()) + } +} diff --git a/crates/task/routing/mod.rs b/crates/task/routing/mod.rs new file mode 100644 index 0000000..d99d4bc --- /dev/null +++ b/crates/task/routing/mod.rs @@ -0,0 +1,79 @@ +//! [`Router`], [`RouteIndex`] and [`compose`] utilities. +//! +//! [`compose`]: Layers + +use std::collections::HashMap; +use std::fmt; +use std::hash::Hash; + +use crate::handler::TaskHandler; +use crate::routing::layer_compose::LayerCompose; +pub use crate::routing::layer_compose::{Layers, LayersBuilder}; +pub use crate::routing::route_index::RouteIndex; +use crate::Result; + +mod index; +mod layer_compose; +mod route_index; + +/// TODO. +#[must_use = "routers do nothing unless you use them"] +pub struct Router { + layer_compose: LayerCompose, + route_services: HashMap>, +} + +impl Router { + /// Returns an empty [`Router`]. + #[inline] + pub fn new(layers: Layers) -> Self { + Self { + layer_compose: LayerCompose::new(layers), + route_services: HashMap::new(), + } + } + + /// Overrides the default value of [`Router`]`::layers`. + #[inline] + pub fn with_layers(mut self, layers: Layers) -> Self { + self.layer_compose = LayerCompose::new(layers); + self + } +} + +impl fmt::Debug for Router { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Router").finish_non_exhaustive() + } +} + +impl Default for Router { + fn default() -> Self { + Self { + layer_compose: LayerCompose::default(), + route_services: HashMap::default(), + } + } +} + +impl Clone for Router { + fn clone(&self) -> Self { + Self { + layer_compose: self.layer_compose.clone(), + route_services: self.route_services.clone(), + } + } +} + +#[cfg(test)] +mod test { + use crate::routing::{Layers, Router}; + use crate::Result; + + #[test] + fn build_default_router() -> Result<()> { + // TODO. + // let _ = Router::new(Layers::new()); + Ok(()) + } +} diff --git a/crates/cli/service/registry.rs b/crates/task/routing/registry.rs similarity index 100% rename from crates/cli/service/registry.rs rename to crates/task/routing/registry.rs diff --git a/crates/task/routing/route_index.rs b/crates/task/routing/route_index.rs new file mode 100644 index 0000000..b0279bd --- /dev/null +++ b/crates/task/routing/route_index.rs @@ -0,0 +1,47 @@ +use std::ops::{Deref, DerefMut}; + +/// Opaque and unique identifier. +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub struct RouteIndex { + inner: T, +} + +impl RouteIndex { + /// Returns a new [`RouteIndex`]. + #[inline] + pub fn new(inner: T) -> Self { + Self { inner } + } + + /// Returns the underlying index. + #[inline] + pub fn into_inner(self) -> T { + self.inner + } +} + +impl Deref for RouteIndex { + type Target = T; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl DerefMut for RouteIndex { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +#[cfg(test)] +mod test { + use crate::Result; + + #[test] + fn index_from_string() -> Result<()> { + Ok(()) + } +} diff --git a/scripts/tower-path/.gitignore b/scripts/tower-path/.gitignore deleted file mode 100644 index 60593be..0000000 --- a/scripts/tower-path/.gitignore +++ /dev/null @@ -1,38 +0,0 @@ -# OS -Thumbs.db -.DS_Store - -# Editors -.vs/ -.vscode/ -.idea/ -.fleet/ - -# Lang: Rust -debug/ -target/ -Cargo.lock -**/*.rs.bk -*.pdb - -# Output -dist/ -output/ -build/ - -# Binaries -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Environment -env/ -.env -.env* - -# Logs -logs/ -*.log -*.log* diff --git a/scripts/tower-path/Cargo.toml b/scripts/tower-path/Cargo.toml deleted file mode 100644 index af11093..0000000 --- a/scripts/tower-path/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -# https://doc.rust-lang.org/cargo/reference/manifest.html - -[package] -name = "tower-path" -version = { workspace = true } -edition = { workspace = true } -license = "MIT" -publish = { workspace = true } -readme = "./README.md" - -authors = { workspace = true } -repository = { workspace = true } -homepage = { workspace = true } -documentation = { workspace = true } - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[lib] -path = "lib.rs" - -[features] -serde = ["dep:serde"] -trace = ["dep:tracing"] - -[dependencies] -tower = { version = "0.5", features = ["load", "util"] } -futures = { version = "0.3", features = [] } -pin-project-lite = { version = "0.2", features = [] } - -serde = { version = "1.0", optional = true, features = ["derive"] } -tracing = { version = "0.1", optional = true, features = [] } diff --git a/scripts/tower-path/LICENSE.txt b/scripts/tower-path/LICENSE.txt deleted file mode 100644 index 568f639..0000000 --- a/scripts/tower-path/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2024 Axiston - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/scripts/tower-path/README.md b/scripts/tower-path/README.md deleted file mode 100644 index cce61ec..0000000 --- a/scripts/tower-path/README.md +++ /dev/null @@ -1,26 +0,0 @@ -### tower/path - -**Also check out other `axiston` projects [here](https://github.com/axiston).** - -Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. - -### Features - -- Lorem Ipsum. -- Lorem Ipsum. -- Lorem Ipsum. - -### Notes - -- The initial version of this project was developed as part of the - [axiston](https://github.com/axiston) project foundation. The source code is - currently hosted in the [axiston/runtime](https://github.com/axiston/runtime) - repository. -- Lorem Ipsum. -- Lorem Ipsum. - -### Examples - -```rust -fn main() {} -``` diff --git a/scripts/tower-path/handler/mod.rs b/scripts/tower-path/handler/mod.rs deleted file mode 100644 index ac3435b..0000000 --- a/scripts/tower-path/handler/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! TODO. -//! - -mod layer; diff --git a/scripts/tower-path/lib.rs b/scripts/tower-path/lib.rs deleted file mode 100644 index 4f51220..0000000 --- a/scripts/tower-path/lib.rs +++ /dev/null @@ -1,18 +0,0 @@ -#![forbid(unsafe_code)] -#![cfg_attr(docsrs, feature(doc_cfg))] -#![doc = include_str!("./README.md")] - -//! ### Examples -//! -//! ```rust -//! fn main() {} -//! ``` - -pub mod handler; -pub mod routing; -pub mod service; - -// TODO: ServiceBuilderExt for TaskHandler. - -// TODO: Move tower-task into its own repository. -// .github/dependabot.yaml,.github/workflows, rustfmt.toml diff --git a/scripts/tower-path/routing/container.rs b/scripts/tower-path/routing/container.rs deleted file mode 100644 index 78b7c3d..0000000 --- a/scripts/tower-path/routing/container.rs +++ /dev/null @@ -1,55 +0,0 @@ -use std::collections::HashMap; -use std::hash::Hash; - -/// TODO. -pub trait RouterContainer { - /// Inserts an index-handler pair into the [`RouterContainer`]. - fn route(&mut self, index: I, service: S) -> Option; - - /// Removes an index from the [`RouterContainer`]. - fn forget(&mut self, index: I) -> Option; -} - -impl RouterContainer for () { - #[inline] - fn route(&mut self, _index: I, _service: S) -> Option { - None - } - - #[inline] - fn forget(&mut self, _index: I) -> Option { - None - } -} - -impl RouterContainer for HashMap -where - I: Eq + Hash, - S: Clone, -{ - #[inline] - fn route(&mut self, index: I, service: S) -> Option { - self.insert(index, service) - } - - #[inline] - fn forget(&mut self, index: I) -> Option { - self.remove(&index) - } -} - -impl RouterContainer for Vec -where - I: Into, - S: Clone, -{ - #[inline] - fn route(&mut self, index: I, service: S) -> Option { - todo!() - } - - #[inline] - fn forget(&mut self, index: I) -> Option { - todo!() - } -} diff --git a/scripts/tower-path/routing/index.rs b/scripts/tower-path/routing/index.rs deleted file mode 100644 index 10e9b8d..0000000 --- a/scripts/tower-path/routing/index.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! TODO. -//! - -use std::hash::Hash; - -/// TODO. -pub trait RouterIndex {} - -/// TODO. -#[derive(Debug, Clone, Eq, PartialEq, Hash)] -pub struct SegmentIndex { - inner: T, -} - -impl SegmentIndex {} - -impl RouterIndex for SegmentIndex where T: Eq + Hash {} - -/// TODO. -#[derive(Debug, Clone, Eq, PartialEq, Hash)] -pub struct UniqueIndex { - // TODO: P = () - inner: T, -} - -impl UniqueIndex {} - -impl RouterIndex for UniqueIndex where T: Eq + Hash {} - -/// The default type for router indices. -pub type DefaultIx = UniqueIndex; diff --git a/scripts/tower-path/routing/mod.rs b/scripts/tower-path/routing/mod.rs deleted file mode 100644 index 075fafa..0000000 --- a/scripts/tower-path/routing/mod.rs +++ /dev/null @@ -1,102 +0,0 @@ -//! TODO. -//! - -use std::collections::HashMap; -use std::fmt; -use std::marker::PhantomData; -use std::task::{Context, Poll}; - -use tower::Service; - -use crate::routing::container::RouterContainer; -use crate::routing::index::{DefaultIx, RouterIndex}; - -pub mod container; -pub mod index; - -/// TODO. -#[derive(Clone)] -#[must_use = "routers do nothing unless you use them"] -pub struct Router> { - index: PhantomData, - service: PhantomData, - inner: Contain, -} - -impl Router { - /// Returns a new [`Router`]. - #[inline] - pub fn new(inner: Contain) -> Self { - Self { - index: PhantomData, - service: PhantomData, - inner, - } - } - - /// Returns the underlying route container. - #[inline] - pub fn into_inner(self) -> Contain { - self.inner - } -} - -impl Router -where - I: RouterIndex, - Contain: RouterContainer, -{ - /// Inserts an index-handler pair into the [`Router`]. - #[inline] - pub fn route(mut self, ix: I, route: S) -> Self { - let _ = self.inner.route(ix, route); - self - } - - /// Removes an index from the [`Router`]. - #[inline] - pub fn forget(mut self, ix: I) -> Self { - let _ = self.inner.forget(ix); - self - } -} - -impl Default for Router -where - Contain: Default, -{ - #[inline] - fn default() -> Self { - - Self::new(Contain::default()) - - } -} - -impl fmt::Debug for Router { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Router").finish_non_exhaustive() - } -} - -impl Service for Router -where - S: Service, -{ - type Response = S::Response; - type Error = S::Error; - type Future = S::Future; - - #[inline] - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - todo!() - } - - #[inline] - fn call(&mut self, req: Req) -> Self::Future { - todo!() - } -} - -#[cfg(test)] -mod test {} diff --git a/scripts/tower-path/service/layer.rs b/scripts/tower-path/service/layer.rs deleted file mode 100644 index f1010f0..0000000 --- a/scripts/tower-path/service/layer.rs +++ /dev/null @@ -1,41 +0,0 @@ -use tower::Layer; - -use crate::service::WithData; - -/// `tower::`[`Layer`] that produces a [`WithData`] services. -#[derive(Debug, Default, Clone)] -pub struct WithDataLayer { - manifest: M, -} - -impl WithDataLayer { - /// Returns a new [`WithDataLayer`]. - #[inline] - pub fn new(manifest: M) -> Self { - Self { manifest } - } -} - -impl Layer for WithDataLayer -where - M: Clone, -{ - type Service = WithData; - - #[inline] - fn layer(&self, inner: S) -> Self::Service { - WithData::new(inner, self.manifest.clone()) - } -} - -#[cfg(test)] -mod test { - use crate::service::WithDataLayer; - use crate::Result; - - #[test] - fn layer() -> Result<()> { - let _ = WithDataLayer::new(42u32); - Ok(()) - } -} diff --git a/scripts/tower-path/service/mod.rs b/scripts/tower-path/service/mod.rs deleted file mode 100644 index aeb6c40..0000000 --- a/scripts/tower-path/service/mod.rs +++ /dev/null @@ -1,78 +0,0 @@ -//! [`WithData`] and [`WithDataLayer`]. - -use std::fmt; -use std::ops::{Deref, DerefMut}; -use std::task::{Context, Poll}; - -use tower::Service; - -pub use crate::service::layer::WithDataLayer; -mod layer; - -/// Simple `tower::`[`Service`] for attaching additional metadata. -#[must_use = "services do nothing unless you `.poll_ready` or `.call` them"] -pub struct WithData { - inner: S, - data: M, -} - -impl WithData { - /// Returns a new [`WithData`]. - #[inline] - pub fn new(inner: S, data: M) -> Self { - Self { inner, data } - } - - /// Returns the underlying parts. - #[inline] - pub fn into_inner(self) -> (S, M) { - (self.inner, self.data) - } -} - -impl Deref for WithData { - type Target = M; - - #[inline] - fn deref(&self) -> &Self::Target { - &self.data - } -} - -impl DerefMut for WithData { - #[inline] - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.data - } -} - -impl fmt::Debug for WithData -where - M: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("WithData") - .field("data", &self.data) - .finish_non_exhaustive() - } -} - -impl Service for WithData -where - S: Service, -{ - type Response = S::Response; - type Error = S::Error; - type Future = S::Future; - - #[inline] - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - self.inner.poll_ready(cx) - } - - #[inline] - fn call(&mut self, req: Req) -> Self::Future { - self.inner.call(req) - } -} - diff --git a/scripts/tower-task/.gitignore b/scripts/tower-task/.gitignore deleted file mode 100644 index 60593be..0000000 --- a/scripts/tower-task/.gitignore +++ /dev/null @@ -1,38 +0,0 @@ -# OS -Thumbs.db -.DS_Store - -# Editors -.vs/ -.vscode/ -.idea/ -.fleet/ - -# Lang: Rust -debug/ -target/ -Cargo.lock -**/*.rs.bk -*.pdb - -# Output -dist/ -output/ -build/ - -# Binaries -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Environment -env/ -.env -.env* - -# Logs -logs/ -*.log -*.log* diff --git a/scripts/tower-task/Cargo.toml b/scripts/tower-task/Cargo.toml deleted file mode 100644 index 88896e1..0000000 --- a/scripts/tower-task/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -# https://doc.rust-lang.org/cargo/reference/manifest.html - -[package] -name = "tower-task" -version = { workspace = true } -edition = { workspace = true } -license = "MIT" -publish = { workspace = true } -readme = "./README.md" - -authors = { workspace = true } -repository = { workspace = true } -homepage = { workspace = true } -documentation = { workspace = true } - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[lib] -path = "lib.rs" - -[features] -serde = ["dep:serde"] -trace = ["dep:tracing"] - -[dependencies] -tower = { version = "0.5", features = ["load", "util"] } -futures = { version = "0.3", features = [] } -pin-project-lite = { version = "0.2", features = [] } - -thiserror = { version = "1.0", features = [] } -serde = { version = "1.0", optional = true, features = ["derive"] } -tracing = { version = "0.1", optional = true, features = [] } diff --git a/scripts/tower-task/LICENSE.txt b/scripts/tower-task/LICENSE.txt deleted file mode 100644 index 568f639..0000000 --- a/scripts/tower-task/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2024 Axiston - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/scripts/tower-task/README.md b/scripts/tower-task/README.md deleted file mode 100644 index 1dbf910..0000000 --- a/scripts/tower-task/README.md +++ /dev/null @@ -1,20 +0,0 @@ -### tower/task - -**Also check out other `axiston` projects [here](https://github.com/axiston).** - -Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. - -### Features - -- Lorem Ipsum. -- Lorem Ipsum. -- Lorem Ipsum. - -### Notes - -- The initial version of this crate was developed as part of the - [axiston](https://github.com/axiston) project. -- The source code is currently hosted in the - [axiston/runtime](https://github.com/axiston/runtime) repository. -- Lorem Ipsum. -- Lorem Ipsum. diff --git a/scripts/tower-task/compose/builder.rs b/scripts/tower-task/compose/builder.rs deleted file mode 100644 index bb46e6d..0000000 --- a/scripts/tower-task/compose/builder.rs +++ /dev/null @@ -1,34 +0,0 @@ -use crate::compose::LayersConfig; - -/// [`LayersConfig`] builder. -#[derive(Debug, Default, Clone)] -pub struct LayersBuilder {} - -impl LayersBuilder { - /// Returns a new [`LayersBuilder`]. - #[inline] - pub fn new() -> Self { - Self::default() - } - - /// Returns a new [`LayersConfig`]. - #[inline] - pub fn build(self) -> LayersConfig { - LayersConfig {} - } -} - -#[cfg(test)] -mod test { - use crate::compose::{LayersBuilder, LayersConfig}; - - #[test] - fn from_default() { - let _ = LayersConfig::new(); - } - - #[test] - fn from_builder() { - let _ = LayersBuilder::new(); - } -} diff --git a/scripts/tower-task/compose/layers.rs b/scripts/tower-task/compose/layers.rs deleted file mode 100644 index 4d139d1..0000000 --- a/scripts/tower-task/compose/layers.rs +++ /dev/null @@ -1,25 +0,0 @@ -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -use crate::compose::LayersBuilder; - -/// [`LayerCompose`] configuration for a single service call. -/// -/// [`LayerCompose`]: crate::service::LayerCompose -#[derive(Debug, Default, Clone, Eq, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct LayersConfig {} - -impl LayersConfig { - /// Returns a new [`LayersConfig`]. - #[inline] - pub fn new() -> Self { - Self::new() - } - - /// Returns a new [`LayersBuilder`]. - #[inline] - pub fn builder() -> LayersBuilder { - LayersBuilder::new() - } -} diff --git a/scripts/tower-task/compose/mod.rs b/scripts/tower-task/compose/mod.rs deleted file mode 100644 index b736776..0000000 --- a/scripts/tower-task/compose/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -//! [`LayerCompose`], [`LayersConfig`] and its [`LayersBuilder`]. - -pub use crate::compose::builder::LayersBuilder; -pub use crate::compose::layers::LayersConfig; - -mod builder; -mod layers; - -/// TODO. -#[derive(Debug)] -pub struct LayerCompose {} - -impl LayerCompose {} diff --git a/scripts/tower-task/context/failure.rs b/scripts/tower-task/context/failure.rs deleted file mode 100644 index c5b0824..0000000 --- a/scripts/tower-task/context/failure.rs +++ /dev/null @@ -1,18 +0,0 @@ -/// Unrecoverable failure duration [`TaskHandler`] execution. -/// -/// [`TaskHandler`]: crate::handler::TaskHandler -#[derive(Debug, thiserror::Error)] -#[must_use = "errors do nothing unless you use them"] -#[error("failure during `TaskHandler` execution")] -pub struct TaskError { - // TODO: Implement From>. - - // name: String, - // message: String, - // explain: String, -} - -impl TaskError {} - -#[cfg(test)] -mod test {} diff --git a/scripts/tower-task/context/mod.rs b/scripts/tower-task/context/mod.rs deleted file mode 100644 index bf9c828..0000000 --- a/scripts/tower-task/context/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -//! [`TaskRequest`] and [`TaskResponse`] types. - -pub use crate::context::failure::TaskError; -pub use crate::context::request::TaskRequest; -pub use crate::context::response::TaskResponse; - -pub mod builder { - //! [`TaskRequest`] and [`TaskResponse`] builders. - //! - //! [`TaskRequest`]: crate::context::TaskRequest - //! [`TaskResponse`]: crate::context::TaskResponse - - pub use super::request::TaskRequestBuilder; - pub use super::response::TaskResponseBuilder; -} - -mod failure; -mod request; -mod response; -mod state; diff --git a/scripts/tower-task/context/request.rs b/scripts/tower-task/context/request.rs deleted file mode 100644 index 265616e..0000000 --- a/scripts/tower-task/context/request.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::fmt; -use std::ops::{Deref, DerefMut}; - -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -/// Serializable [`TaskHandler`] service request. -/// -/// [`TaskHandler`]: crate::handler::TaskHandler -#[derive(Clone)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[must_use = "requests do nothing unless you serialize them"] -pub struct TaskRequest { - // layers: LayersConfig, - inner: T, -} - -impl TaskRequest { - /// Returns a new [`TaskRequest`]. - #[inline] - pub fn new(inner: T) -> Self { - Self { inner } - } - - /// Returns a new [`TaskRequestBuilder`]. - #[inline] - pub fn builder(inner: T) -> TaskRequestBuilder { - TaskRequestBuilder::new(inner) - } - - /// Returns the inner data. - #[inline] - pub fn into_inner(self) -> T { - self.inner - } -} - -impl Deref for TaskRequest { - type Target = T; - - #[inline] - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -impl DerefMut for TaskRequest { - #[inline] - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} - -impl fmt::Debug for TaskRequest { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TaskRequest").finish_non_exhaustive() - } -} - -/// [`TaskHandler`] service request builder. -/// -/// [`TaskHandler`]: crate::handler::TaskHandler -#[derive(Debug, Clone)] -#[must_use = "requests do nothing unless you serialize them"] -pub struct TaskRequestBuilder { - inner: T, -} - -impl TaskRequestBuilder { - /// Returns a new [`TaskRequestBuilder`]. - #[inline] - pub fn new(inner: T) -> Self { - Self { inner } - } - - /// Returns a new [`TaskRequest`]. - pub fn build(self) -> TaskRequest { - TaskRequest { inner: self.inner } - } -} - -#[cfg(test)] -mod test { - use crate::context::TaskRequest; - use crate::Result; - - #[test] - fn build() -> Result<()> { - let _ = TaskRequest::builder(5).build(); - Ok(()) - } -} diff --git a/scripts/tower-task/context/state.rs b/scripts/tower-task/context/state.rs deleted file mode 100644 index e37be40..0000000 --- a/scripts/tower-task/context/state.rs +++ /dev/null @@ -1 +0,0 @@ -// TODO: Implemented State with anymap3. diff --git a/scripts/tower-task/handler/layer.rs b/scripts/tower-task/handler/layer.rs deleted file mode 100644 index 1069899..0000000 --- a/scripts/tower-task/handler/layer.rs +++ /dev/null @@ -1,55 +0,0 @@ -use std::marker::PhantomData; - -use tower::{Layer, Service}; - -use crate::context::{TaskError, TaskRequest, TaskResponse}; -use crate::handler::TaskHandler; - -/// `tower::`[`Layer`] that produces a [`TaskHandler`] services. -pub struct TaskHandlerLayer { - inner: PhantomData<(Req, T, U)>, -} - -impl TaskHandlerLayer { - /// Returns a new [`TaskHandlerLayer`]. - #[inline] - pub fn new() -> Self { - Self::default() - } -} - -impl Default for TaskHandlerLayer { - fn default() -> Self { - Self { inner: PhantomData } - } -} - -impl Layer for TaskHandlerLayer -where - T: 'static, - U: 'static, - S: Service + Clone + Send + 'static, - Req: From> + 'static, - S::Response: Into> + 'static, - S::Error: Into + 'static, - S::Future: Send + 'static, -{ - type Service = TaskHandler; - - #[inline] - fn layer(&self, inner: S) -> Self::Service { - TaskHandler::new(inner) - } -} - -#[cfg(test)] -mod test { - use crate::handler::TaskHandlerLayer; - use crate::Result; - - #[test] - fn layer() -> Result<()> { - let _ = TaskHandlerLayer::::new(); - Ok(()) - } -} diff --git a/scripts/tower-task/lib.rs b/scripts/tower-task/lib.rs deleted file mode 100644 index 6ce9107..0000000 --- a/scripts/tower-task/lib.rs +++ /dev/null @@ -1,26 +0,0 @@ -#![forbid(unsafe_code)] -#![cfg_attr(docsrs, feature(doc_cfg))] -#![doc = include_str!("./README.md")] - -//! ### Examples -//! -//! ```rust -//! use tower_task::Result; -//! -//! fn main() -> Result<()> { -//! Ok(()) -//! } -//! ``` - -pub mod context; -pub mod handler; -pub mod compose; - -/// Specialized [`Result`] alias for the [`TaskError`] type. -/// -/// [`TaskError`]: crate::context::TaskError -/// [`Result`]: std::result::Result -pub type Result = std::result::Result; - -// TODO: Move tower-task into its own repository. -// .github/dependabot.yaml,.github/workflows, rustfmt.toml From bd11c99a2b83cc6bc21d4d58ab58103444a5fb7b Mon Sep 17 00:00:00 2001 From: Oleh Martsokha Date: Wed, 11 Dec 2024 09:08:31 +0100 Subject: [PATCH 04/11] feat(all): schema 1/n --- Cargo.toml | 2 +- Makefile | 11 ++ crates/schema/Cargo.toml | 2 + crates/schema/lib.rs | 47 ++++++- crates/schema/protobuf/entity.proto | 82 +++++++++++ crates/schema/protobuf/event/request.proto | 99 +++++++++++++ crates/schema/protobuf/event/response.proto | 131 ++++++++++++++++++ crates/schema/protobuf/instance.proto | 114 ++++++++------- .../schema/protobuf/internal/jsonschema.proto | 38 +++++ .../schema/protobuf/internal/jsonvalue.proto | 38 +++++ crates/schema/protobuf/registry.proto | 75 ++++++---- crates/server/Cargo.toml | 4 +- crates/server/handler/instance.rs | 24 +++- .../service/{config.rs => app_config.rs} | 0 crates/server/service/cache.rs | 6 - crates/server/service/mod.rs | 4 +- crates/task/lib.rs | 3 + 17 files changed, 579 insertions(+), 101 deletions(-) create mode 100644 Makefile create mode 100644 crates/schema/protobuf/entity.proto create mode 100644 crates/schema/protobuf/event/request.proto create mode 100644 crates/schema/protobuf/event/response.proto create mode 100644 crates/schema/protobuf/internal/jsonschema.proto create mode 100644 crates/schema/protobuf/internal/jsonvalue.proto rename crates/server/service/{config.rs => app_config.rs} (100%) delete mode 100644 crates/server/service/cache.rs diff --git a/Cargo.toml b/Cargo.toml index ba73a79..b138e33 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,7 +23,7 @@ documentation = "https://docs.rs/axiston" [workspace.dependencies] axiston-rt-jsvm = { path = "./crates/jsvm", version = "0.1.0" } -axiston-rt-schema = { path = "./crates/schema", version = "0.1.0", features = ["server"] } +axiston-rt-schema = { path = "./crates/schema", version = "0.1.0" } axiston-rt-server = { path = "./crates/server", version = "0.1.0" } axiston-rt-task = { path = "./crates/task", version = "0.1.0" } diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..b8e0a56 --- /dev/null +++ b/Makefile @@ -0,0 +1,11 @@ +# Makefile for client & server GRPC Generation +# https://github.com/hyperium/tonic + +# Environment Variables +SCHEMA_OUTPUT = ./crates/schema/generated/ + +.PHONY: clean +clean: ## Deletes the output directory. + $(call print-info, "Cleaning project...") + rm -f $(SCHEMA_OUTPUT) + $(call print-success, "Project cleaned.") diff --git a/crates/schema/Cargo.toml b/crates/schema/Cargo.toml index aa3043d..571d081 100644 --- a/crates/schema/Cargo.toml +++ b/crates/schema/Cargo.toml @@ -21,6 +21,8 @@ rustdoc-args = ["--cfg", "docsrs"] path = "lib.rs" [features] +default = ["client", "server"] + # Enables or disables gRPC client code generation. client = [] # Enables or disables gRPC server code generation. diff --git a/crates/schema/lib.rs b/crates/schema/lib.rs index 7afd028..1e3864a 100644 --- a/crates/schema/lib.rs +++ b/crates/schema/lib.rs @@ -2,22 +2,55 @@ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("./README.md")] -//! ### Examples -//! -//! ```rust -//! fn main() {} -//! ``` +pub mod json { + //! Includes files generated by `prost`. + + pub mod schema { + //! Includes files generated by `prost`. + //! Built from `jsonschema.proto`. + + include!("./generated/rt.json.schema.rs"); + } + + pub mod value { + //! Includes files generated by `prost`. + //! Built from `jsonvalue.proto`. + + include!("./generated/rt.json.value.rs"); + } +} pub mod instance { //! Includes files generated by `prost`. //! Built from `instance.proto`. - include!("./generated/instance.rs"); + include!("./generated/rt.instance.rs"); } pub mod registry { //! Includes files generated by `prost`. //! Built from `registry.proto`. - include!("./generated/registry.rs"); + include!("./generated/rt.registry.rs"); +} + +pub mod entity { + //! Includes files generated by `prost`. + //! Built from `entity.proto`. + + include!("./generated/rt.entity.rs"); +} + +pub mod request { + //! Includes files generated by `prost`. + //! Built from `request.proto`. + + include!("./generated/rt.request.rs"); +} + +pub mod response { + //! Includes files generated by `prost`. + //! Built from `response.proto`. + + include!("./generated/rt.response.rs"); } diff --git a/crates/schema/protobuf/entity.proto b/crates/schema/protobuf/entity.proto new file mode 100644 index 0000000..8f954be --- /dev/null +++ b/crates/schema/protobuf/entity.proto @@ -0,0 +1,82 @@ +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/empty.proto"; +import "internal/jsonschema.proto"; +import "internal/jsonvalue.proto"; + +package rt.entity; + +// Describes service details. +message Service { + // Unique identifier for the service. + string service_id = 1; + // Name of the service (e.g., Google Email). + string name = 21; + // Unique identifier for the service's icon. + string icon = 22; + // Brief description of the service. + string description = 4; +} + +// Describes action or trigger details. +message Entity { + string entity_id = 1; + string service_id = 2; + + string name = 21; + string icon = 22; + + // Input specifications for this entity. + repeated Input inputs = 7; + // Output specifications for this entity. + repeated Output outputs = 8; + // Possible error codes this entity might return. + repeated Error errors = 9; +} + +// Describes secrets required by a entity. +message Secret { + // Unique identifier for the secret. + string id = 1; + // Name of the secret (e.g., "API Key"). + string name = 2; + // Description of the secret. + string description = 3; + // Whether the secret is mandatory for the service. + bool is_required = 4; +} + +// Describes the input requirements for an entity. +message Input { + // Name of the input (e.g., "recipient"). + string name = 1; + // Data type of the input (e.g., "string", "int"). + rt.json.schema.JsonSchema data_type = 2; + // Description of the input. + string description = 3; + // Whether this input is mandatory. + bool is_required = 4; + // Default value for the input, if applicable. + rt.json.value.JsonValue default_value = 5; +} + +// Describes the output generated by an entity. +message Output { + // Name of the output (e.g., "message_id"). + string name = 1; + // Data type of the output (e.g., "string", "int"). + rt.json.schema.JsonSchema data_type = 2; + // Description of the output. + string description = 3; +} + +// Describes potential errors an entity can return. +message Error { + // Unique error code (e.g., "ERR_401"). + string code = 1; + // Human-readable error message. + string message = 2; + // Description or guidance for resolving the error. + string resolution = 3; +} diff --git a/crates/schema/protobuf/event/request.proto b/crates/schema/protobuf/event/request.proto new file mode 100644 index 0000000..920ec71 --- /dev/null +++ b/crates/schema/protobuf/event/request.proto @@ -0,0 +1,99 @@ +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; + +package rt.request; + +// Request to open a connection to the event bus. +// Indicates the Gateway is ready accept events. +message OpenRequest { + // Token for authenticating the Gateway. + string authentication_token = 3; + // Required runtime capabilities. + repeated string runtime_capabilities = 4; + // Preferred communication protocols. + repeated string preferred_protocols = 5; + + // Constraints on resources for the task. + optional ResourceLimits resource_limits = 26; +} + +// Request to submit a task for execution by the Runtime. +message ExecuteRequest { + // Unique identifier for the task. + string task_id = 1; + // Custom task parameters as key-value pairs. + map task_fields = 2; + // Sensitive task-specific data (e.g., API keys). + map task_secrets = 3; + + // Priority level of the task (higher is more important). + optional int32 priority = 23; + // Deadline for the task completion. + optional google.protobuf.Timestamp deadline = 24; + // Whether task dependencies are cached. + optional bool cache_deps = 25; + + // Policy for retrying failed tasks. + optional RetryPolicy retry_policy = 31; + // Policy for handling task timeouts. + optional TimeoutPolicy timeout_policy = 32; +} + +// Request to close the connection, preventing the Runtime from accepting new tasks. +message CloseRequest { + // Force immediate closure without waiting. + // Ignores pending tasks (from this connection) before closing. + optional bool force_close = 2; + // Reason for closing the connection. + optional string reason = 3; + // Require acknowledgment before closing. + optional bool ack_required = 4; +} + +// Policy for retrying failed tasks. +message RetryPolicy { + // Maximum number of retry attempts. + uint32 max_retries = 1; + // Base delay between consecutive retries. + google.protobuf.Duration base_backoff = 2; + // Multiplier for exponential backoff. + optional double exponential_multiplier = 3; + // Maximum delay between consecutive retries. + google.protobuf.Duration max_backoff = 4; +} + +// Policy for handling task timeouts. +message TimeoutPolicy { + // Maximum execution time allowed for the task. + google.protobuf.Duration execution_timeout = 1; + // Whether to forcibly terminate the task on timeout. + bool terminate_on_timeout = 2; + // Action to take on timeout (e.g., "retry", "terminate"). + TimeoutAction timeout_action = 3; + // Extra time given before final termination after timeout. + optional google.protobuf.Duration grace_period = 4; + // Frequency of checking for timeout conditions. + optional google.protobuf.Duration monitor_interval = 5; +} + +// Lists all of possible timeout actions. +enum TimeoutAction { + // Default value, action unspecified. + TIMEOUT_ACTION_UNSPECIFIED = 0; + // Task is considered to be failed. Retry the task. + TIMEOUT_ACTION_RETRY = 1; + // Task is considered to be failed. Do not retry the task. + TIMEOUT_ACTION_TERMINATE = 2; +} + +// Limits runtime resources. +message ResourceLimits { + // Maximum used CPU percentage. + uint32 max_cpu_percent = 1; + // Maximum used RAM in MB. + uint32 max_ram_mb = 2; + // Maximum used disk in MB. + uint64 max_disk_mb = 3; +} diff --git a/crates/schema/protobuf/event/response.proto b/crates/schema/protobuf/event/response.proto new file mode 100644 index 0000000..d8e4de5 --- /dev/null +++ b/crates/schema/protobuf/event/response.proto @@ -0,0 +1,131 @@ +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; + +package rt.response; + +// Start execution response message. +message OpenResponse { + google.protobuf.Timestamp started = 1; + optional google.protobuf.Duration estimate = 2; +} + +// Response message containing the result of task execution. +message ExecuteResponse { + // Unique identifier for the task. + string task_id = 1; + // Task's return code indicating success or failure. + uint32 return_code = 2; + + // When the task started. + google.protobuf.Timestamp start_time = 4; + // When the task completed. + google.protobuf.Timestamp end_time = 5; + // Total time taken for execution. + google.protobuf.Duration execution_time = 6; +} + +// Response to acknowledge that no new tasks will be accepted. +message CloseResponse { + // Indicates if it's safe to terminate the connection. + bool is_safe_to_close = 1; + // Number of tasks still in the queue or running. + int32 remaining_tasks = 3; +} + +// Intermediate task status notification sent by the runtime. +message NotifyResponse { + oneof status_info { + // Details for the "waiting" status. + WaitingStatus waiting = 21; + // Details for the "pre-running" status. + PreRunningStatus pre_running = 22; + // Details for the "running" status. + RunningStatus running = 23; + // Details for the "post-running" status. + PostRunningStatus post_running = 24; + } +} + +// Waiting status information. +message WaitingStatus { + // Number of tasks ahead in the queue. + uint32 tasks_before = 1; + // Total number of tasks in the queue. + uint32 queue_size = 2; + // Maximum capacity of the queue. + uint32 queue_capacity = 3; + // Estimated wait time before task starts. + google.protobuf.Duration wait_time = 4; +} + +// Pre-running status information. +message PreRunningStatus { + // Size (in bytes) of data to serialize. + uint64 input_bytes = 1; + // Version of the Runtime. + uint64 runtime_version = 2; + // File system type used ("virtual" or "physical"). + FileSystemType fs_type = 3; + // Whether task dependencies are cached. + bool cache_deps = 4; + // Allocated runtime resources. + ResourceAllocated resource = 5; +} + +// Running status information. +message RunningStatus { + // Identifier of the thread running this task. + int32 thread_id = 8; + // Estimated remaining run time. + google.protobuf.Duration run_time = 1; + // Current progress checkpoint. + int32 checkpoint = 6; +} + +// Post-running status information. +message PostRunningStatus { + // Task's return code indicating success or failure. + uint32 return_code = 1; + // Total bytes read during task execution. + uint64 read_bytes = 2; + // Total bytes written during task execution. + uint64 written_bytes = 3; + // Size (in bytes) of data to deserialize. + uint64 output_bytes = 4; + // Peak or maximum recorded resource usage. + ResourceUsage resource_usage = 5; +} + +// Represents the type of filesystem used. +enum FileSystemType { + // Default value, should not be used. + FILESYSTEM_UNSPECIFIED = 0; + // Virtual filesystem (e.g., in-memory, network-based). + FILESYSTEM_VIRTUAL = 1; + // Physical filesystem (e.g., SSD, HDD). + FILESYSTEM_PHYSICAL = 2; +} + +// Allocated runtime resources. +message ResourceAllocated { + // Allocated CPU cores. + uint32 allocated_cpu_cores = 21; + // Allocated RAM in megabytes. + uint32 allocated_ram_mb = 22; + // Allocated HDD in megabytes. + uint32 allocated_disk_mb = 23; +} + +// Peak or maximum recorded resource usage. +message ResourceUsage { + // Peak CPU usage as a percentage. + uint32 peak_cpu_percent = 21; + // Peak RAM usage in megabytes. + uint32 peak_ram_mb = 22; + // Peak disk usage in megabytes. + uint32 peak_disk_mb = 23; + // Peak GPU usage as a percentage. + uint32 peak_gpu_percent = 24; +} diff --git a/crates/schema/protobuf/instance.proto b/crates/schema/protobuf/instance.proto index bfc5d7e..caee5ec 100644 --- a/crates/schema/protobuf/instance.proto +++ b/crates/schema/protobuf/instance.proto @@ -2,15 +2,17 @@ syntax = "proto3"; import "google/protobuf/timestamp.proto"; import "google/protobuf/duration.proto"; +import "event/request.proto"; +import "event/response.proto"; -package instance; +package rt.instance; -// The message format for sending events. +// Describes the message format for sending events. message EventRequest { // The unique ID of the request message. - string id = 1; + uint32 request_id = 1; // The unique ID of the message group. - string group = 2; + uint32 group_id = 2; // When the event was recv by the gateway. google.protobuf.Timestamp recv = 3; @@ -19,23 +21,23 @@ message EventRequest { // The content of the message. oneof payload { - // Step 1.1 - OpenRequest Open = 10; - // Step 2.2 - NotifyResponse AckNotify = 11; - // Step 3.2 - CloseResponse AckClose = 12; + // Step 1.1: Gateway requests to open a connection. + rt.request.OpenRequest open_request = 11; + // Step 2.1: Gateway submits a task for the execution. + rt.request.ExecuteRequest execute_request = 12; + // Step 3.1: Gateway requests to close the connection. + rt.request.CloseRequest close_request = 13; } } -// The message format for receiving events. +// Describes the message format for receiving events. message EventResponse { + // The unique ID of the request message. + uint32 request_id = 1; // The unique ID of the response message. - uint32 id = 1; + uint32 response_id = 2; // The unique ID of the message group. - uint32 group = 2; - // The unique ID of the request message. - uint32 reply = 3; + uint32 group_id = 3; // When the event was recv by the runtime. google.protobuf.Timestamp recv = 4; @@ -44,50 +46,64 @@ message EventResponse { // The content of the message. oneof payload { - // Step 1.2 - OpenResponse AckOpen = 10; - // Step 2.1 - NotifyRequest Notify = 11; - // Step 3.1 - CloseRequest Close = 12; + // Step 1.2: Runtime acknowledges that the connection is open. + rt.response.OpenResponse open_response = 11; + // Step 2.2: Runtime notifies the Gateway about the task's status change. + rt.response.NotifyResponse notify_response = 12; + // Step 2.3: Runtime responds with the result of executing the task. + rt.response.ExecuteResponse execute_response = 13; + // Step 3.2: Runtime confirms the connection is closed. + rt.response.CloseResponse close_response = 14; } } -// Start execution request message. -message OpenRequest { - string task = 1; - - map fields = 2; - map secrets = 3; -} - -// Start execution response message. -message OpenResponse { - google.protobuf.Timestamp started = 1; - optional google.protobuf.Duration estimate = 2; -} +// Requests service status and metrics. +message StatusRequest { + // Forces retrieval of the latest metrics. + optional bool current = 11; + // Includes detailed metrics in the response. + optional bool verbose = 12; -message NotifyRequest { - string task = 1; + // Sliding window length (used by metrics). + optional int32 window = 21; } -message NotifyResponse {} - -message CloseRequest { - string task = 1; +// Details service status and performance metrics. +message StatusResponse { + // Task-related metrics: + + // Number of tasks waiting in the queue to be processed. + int64 tasks_waiting = 11; + // Number of tasks currently being processed. + int64 tasks_running = 12; + // Total number of tasks that have been completed successfully. + int64 tasks_done = 13; + + // Time-related metrics: + + // Total time the service has been running since startup (human-readable format). + google.protobuf.Duration total_uptime = 21; + // Cumulative time the service has spent idle (not processing any tasks). + google.protobuf.Duration total_idle_time = 22; + // Cumulative time the service has been overwhelmed and tasks have been queued due to load. + google.protobuf.Duration total_wait_time = 23; + + // Average processing time for tasks in the most recent window. + google.protobuf.Duration avg_recent_time = 31; + // Overall average processing time since the service started. + google.protobuf.Duration avg_total_time = 32; + // Average processing time for tasks that failed. + google.protobuf.Duration avg_failure_time = 33; + // Average processing time for tasks that succeeded. + google.protobuf.Duration avg_success_time = 34; } -message CloseResponse {} - -message StatusRequest {} - -message StatusResponse {} - +// Provides runtime instance management. service Instance { - // TODO. + // Retrieves detailed service health and performance metrics. rpc Status(StatusRequest) returns (StatusResponse); - // Bidirectional event streaming RPC for continuous communication + // Provides a bidirectional event streaming RPC for continuous communication // between the gateway (as a client) and the runtime (as a server). - rpc Connect(stream EventRequest) returns (stream EventResponse); + rpc Bus(stream EventRequest) returns (stream EventResponse); } diff --git a/crates/schema/protobuf/internal/jsonschema.proto b/crates/schema/protobuf/internal/jsonschema.proto new file mode 100644 index 0000000..78b02be --- /dev/null +++ b/crates/schema/protobuf/internal/jsonschema.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +import "google/protobuf/empty.proto"; + +package rt.json.schema; + +// Represents any valid `JSON` schema type. +message JsonSchema { + // Specifies that the value field can hold one of several possible types. + oneof value { + // Represents a string type, e.g. `"hello"` or `"world"`. + google.protobuf.Empty string_type = 1; + // Represents an integer type e.g. `4` or `138253`. + google.protobuf.Empty integer_type = 2; + // Represents a float type e.g. `2.6` or `6.0004`. + google.protobuf.Empty float_type = 3; + // Represents a boolean type i.e. `true`/`false`. + google.protobuf.Empty boolean_value = 4; + // Represents a null value i.e. `null`. + google.protobuf.Empty null_value = 5; + // Represents an object type e.g. `{ "key": "value" }`. + JsonSchemaObject object_value = 6; + // Represents an array type e.g. `["key", "value"]`. + JsonSchemaArray array_value = 7; + } +} + +// Defines the schema for a `JSON` object, which consists of key-value pairs. +message JsonSchemaObject { + // Represents the map of the field names and their types. + map fields = 1; +} + +// Defines the schema for a `JSON` array, which contains a list of elements. +message JsonSchemaArray { + // Represents the types of the elements in the array. + repeated JsonSchema elements = 1; +} diff --git a/crates/schema/protobuf/internal/jsonvalue.proto b/crates/schema/protobuf/internal/jsonvalue.proto new file mode 100644 index 0000000..d4c6825 --- /dev/null +++ b/crates/schema/protobuf/internal/jsonvalue.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +import "google/protobuf/empty.proto"; + +package rt.json.value; + +// Represents different types of `JSON` values. +message JsonValue { + // Specifies that the value field can hold one of several possible types. + oneof value { + // Represents a string type, e.g. `"hello"` or `"world"`. + string string_type = 1; + // Represents an integer type e.g. `4` or `138253`. + int32 integer_type = 2; + // Represents a float type e.g. `2.6` or `6.0004`. + float float_type = 3; + // Represents a boolean type i.e. `true`/`false`. + bool boolean_value = 4; + // Represents a null value i.e. `null`. + google.protobuf.Empty null_value = 5; + // Represents an object type e.g. `{ "key": "value" }`. + JsonObject object_value = 6; + // Represents an array type e.g. `["key", "value"]`. + JsonArray array_value = 7; + } +} + +// Contains the values of a map of `JSON` values. +message JsonObject { + // Contains the map of the field names and their values. + map fields = 1; +} + +// Contains the values of an array of `JSON` values. +message JsonArray { + // Contains the values of the elements in the array. + repeated JsonValue items = 1; +} diff --git a/crates/schema/protobuf/registry.proto b/crates/schema/protobuf/registry.proto index 5edd854..4de490f 100644 --- a/crates/schema/protobuf/registry.proto +++ b/crates/schema/protobuf/registry.proto @@ -1,43 +1,62 @@ syntax = "proto3"; import "google/protobuf/timestamp.proto"; -// import "google/protobuf/empty.proto"; +import "google/protobuf/empty.proto"; +import "entity.proto"; -package registry; +package rt.registry; -message Service { - string id = 1; - string name = 2; - string icon = 3; -} - -message Entity { - string id = 1; - string name = 2; - string icon = 3; -} - -message CheckRequest {} - -message CheckResponse {} - -// The message format for requesting for the registry content. +// Retrieves the registry details. message RegistryRequest {} -// The message format for responding with the registry content. +// Contains the registry details. message RegistryResponse { - google.protobuf.Timestamp created = 2; - google.protobuf.Timestamp updated = 3; + // Total number of registered services. + uint32 total_services = 1; + // Total number of registered actions. + uint32 total_actions = 2; + // Total number of registered triggers. + uint32 total_triggers = 3; + + // Registry registration startup timestamp. + google.protobuf.Timestamp first_updated_at = 4; + // Registry registration shutdown timestamp. + google.protobuf.Timestamp last_updated_at = 5; + + // List of registered services. + repeated rt.entity.Service services = 11; + // List of registered actions. + repeated rt.entity.Entity actions = 12; + // List of registered triggers. + repeated rt.entity.Entity triggers = 13; +} + +message SearchRequest { + uint32 query_id = 11; + // Search term to match all registered entities to. + string query = 12; + // Filter by associated tags. + repeated string tags = 13; + + // Limit on the number of search results. + uint32 max_results = 21; + // Include deprecated entities in search results if true. + bool include_deprecated = 22; +} - repeated Service services = 11; - repeated Entity triggers = 12; - repeated Entity actions = 13; +message SearchResponse { + // Entities matching the search criteria. + repeated rt.entity.Entity matching_entities = 1; + // Total number of matches found. + uint32 total_matches = 2; + // True if results were truncated due to max_results. + bool truncated = 3; } service Registry { // Comprehensive collection of available tasks and their metadata. - rpc Registry(registry.RegistryRequest) returns (registry.RegistryResponse); + rpc Registry(RegistryRequest) returns (RegistryResponse); - // Authentication, authorization, and health checks. - rpc Check(registry.CheckRequest) returns (registry.CheckResponse); + // Searches for specific services in the registry. + rpc Search(SearchRequest) returns (SearchResponse); } diff --git a/crates/server/Cargo.toml b/crates/server/Cargo.toml index ced13f4..56f9157 100644 --- a/crates/server/Cargo.toml +++ b/crates/server/Cargo.toml @@ -36,5 +36,5 @@ prost = { workspace = true } tonic-types = { workspace = true } prost-types = { workspace = true } -tower = { version = "0.4", features = ["full"] } -tower-http = { version = "0.5", features = ["full"] } +tower = { workspace = true } +tower-http = { workspace = true } diff --git a/crates/server/handler/instance.rs b/crates/server/handler/instance.rs index b4b3024..cdfe6bc 100644 --- a/crates/server/handler/instance.rs +++ b/crates/server/handler/instance.rs @@ -1,5 +1,5 @@ use axiston_rt_schema::instance::instance_server::{Instance, InstanceServer}; -use axiston_rt_schema::instance::{EventRequest, EventResponse}; +use axiston_rt_schema::instance::{EventRequest, EventResponse, StatusRequest, StatusResponse}; use futures::stream::BoxStream; use futures::StreamExt; use tokio::sync::mpsc; @@ -29,16 +29,28 @@ impl InstanceService { #[tonic::async_trait] impl Instance for InstanceService { - type ConnectStream = BoxStream<'static, Result>; + async fn status( + &self, + request: Request, + ) -> Result, Status> { + todo!() + } + + type BusStream = BoxStream<'static, Result>; - async fn connect( + async fn bus( &self, request: Request>, - ) -> Result, Status> { + ) -> Result, Status> { let mut request = request.into_inner(); - let (tx, rx) = mpsc::channel(128); - tokio::spawn(async move { while let Some(event) = request.next().await {} }); + // TODO: Create a new queue. + + let _handle = tokio::spawn(async move { + while let Some(event) = request.next().await { + let _ = event; + } + }); let rx = ReceiverStream::new(rx); Ok(Response::new(Box::pin(rx))) diff --git a/crates/server/service/config.rs b/crates/server/service/app_config.rs similarity index 100% rename from crates/server/service/config.rs rename to crates/server/service/app_config.rs diff --git a/crates/server/service/cache.rs b/crates/server/service/cache.rs deleted file mode 100644 index d90179c..0000000 --- a/crates/server/service/cache.rs +++ /dev/null @@ -1,6 +0,0 @@ -//! TODO. -//! - -pub struct RegistryCache { - pub triggers: (), -} diff --git a/crates/server/service/mod.rs b/crates/server/service/mod.rs index 46d2b0d..a9bd65e 100644 --- a/crates/server/service/mod.rs +++ b/crates/server/service/mod.rs @@ -1,9 +1,9 @@ //! TODO. //! -pub use crate::service::config::{AppBuilder, AppConfig}; +pub use crate::service::app_config::{AppBuilder, AppConfig}; -mod config; +mod app_config; mod instance; mod registry; diff --git a/crates/task/lib.rs b/crates/task/lib.rs index a8d3aab..f80fc2b 100644 --- a/crates/task/lib.rs +++ b/crates/task/lib.rs @@ -33,3 +33,6 @@ pub enum Error { /// /// [`Result`]: std::result::Result pub type Result = std::result::Result; + +// TODO: Is there any real reason to make a different between action and trigger? +// Make trigger dynamically determined if the action returns a single boolean. From 7c532bb4e831f2d394ea1979282c9d0d80101acb Mon Sep 17 00:00:00 2001 From: Oleh Martsokha Date: Thu, 12 Dec 2024 11:49:16 +0100 Subject: [PATCH 05/11] feat(all): impl 2/n --- .../extension}/custom_serde.rs | 0 crates/task/Cargo.toml | 5 +- crates/task/context/failure.rs | 51 ++------ crates/task/context/mod.rs | 10 -- crates/task/context/request.rs | 78 +++++------- crates/task/context/response.rs | 56 ++++++--- crates/task/handler/metric.rs | 2 +- crates/task/lib.rs | 32 ++++- crates/task/registry/action.rs | 76 ------------ crates/task/registry/mod.rs | 48 -------- crates/task/registry/service.rs | 26 ---- crates/task/registry/trigger.rs | 88 -------------- crates/task/routing/index.rs | 59 +++++++-- .../routing/{layer_compose.rs => layers.rs} | 34 ++++-- crates/task/routing/manifest.rs | 39 ++++++ crates/task/routing/mod.rs | 112 ++++++++++++++---- crates/task/routing/registry.rs | 0 crates/task/routing/route.rs | 84 +++++++++++++ crates/task/routing/route_index.rs | 47 -------- 19 files changed, 391 insertions(+), 456 deletions(-) rename crates/{task/registry => jsvm/extension}/custom_serde.rs (100%) delete mode 100644 crates/task/registry/action.rs delete mode 100644 crates/task/registry/mod.rs delete mode 100644 crates/task/registry/service.rs delete mode 100644 crates/task/registry/trigger.rs rename crates/task/routing/{layer_compose.rs => layers.rs} (60%) create mode 100644 crates/task/routing/manifest.rs delete mode 100644 crates/task/routing/registry.rs create mode 100644 crates/task/routing/route.rs delete mode 100644 crates/task/routing/route_index.rs diff --git a/crates/task/registry/custom_serde.rs b/crates/jsvm/extension/custom_serde.rs similarity index 100% rename from crates/task/registry/custom_serde.rs rename to crates/jsvm/extension/custom_serde.rs diff --git a/crates/task/Cargo.toml b/crates/task/Cargo.toml index 9dbf48c..82b45ff 100644 --- a/crates/task/Cargo.toml +++ b/crates/task/Cargo.toml @@ -25,8 +25,11 @@ futures = { workspace = true } pin-project-lite = { workspace = true } thiserror = { workspace = true } -derive_more = { workspace = true } serde = { workspace = true } +serde_json = { workspace = true } +jsonschema = { workspace = true } + +derive_more = { workspace = true } tracing = { workspace = true } ecow = { workspace = true } tower = { workspace = true } diff --git a/crates/task/context/failure.rs b/crates/task/context/failure.rs index 60d76f4..a1ca010 100644 --- a/crates/task/context/failure.rs +++ b/crates/task/context/failure.rs @@ -1,60 +1,27 @@ use std::error::Error; -use serde::{Deserialize, Serialize}; -use thiserror::Error; - -// TODO: wrap box error instead - /// Unrecoverable failure duration [`TaskHandler`] execution. /// /// [`TaskHandler`]: crate::handler::TaskHandler -#[derive(Debug, Error, Serialize, Deserialize)] +#[derive(Debug, thiserror::Error)] +#[error("internal handler error")] #[must_use = "errors do nothing unless you use them"] -#[error("failure during `TaskHandler` execution")] pub struct TaskError { - name: String, - message: String, + inner: Box, } impl TaskError { /// Returns a new [`TaskError`]. - pub fn new() -> Self { + #[inline] + pub fn new(inner: E) -> Self + where + E: Error + 'static, + { Self { - name: "".to_owned(), - message: "".to_owned(), + inner: Box::new(inner), } } - - /// Returns the underlying error's name. - #[inline] - pub fn name(&self) -> &str { - &self.name - } - - /// Returns the underlying error's message. - #[inline] - pub fn message(&self) -> &str { - &self.message - } -} - -impl From> for TaskError { - fn from(value: Box) -> Self { - todo!() - } } /// Specialized [`Result`] alias for the [`TaskError`] type. pub type TaskResult = Result; - -#[cfg(test)] -mod test { - use crate::context::TaskError; - use crate::Result; - - #[test] - fn instance() -> Result<()> { - let _ = TaskError::new(); - Ok(()) - } -} diff --git a/crates/task/context/mod.rs b/crates/task/context/mod.rs index 156f8d9..2d8654b 100644 --- a/crates/task/context/mod.rs +++ b/crates/task/context/mod.rs @@ -10,16 +10,6 @@ pub mod builders { pub use super::response::TaskResponseBuilder; } -pub mod storages { - //! [`TaskRequest`] and [`TaskResponse`] storages. - //! - //! [`TaskRequest`]: crate::context::TaskRequest - //! [`TaskResponse`]: crate::context::TaskResponse - - pub use super::request::{Fields, Secrets}; - pub use super::response::Metrics; -} - pub use crate::context::failure::{TaskError, TaskResult}; pub use crate::context::request::TaskRequest; pub use crate::context::response::TaskResponse; diff --git a/crates/task/context/request.rs b/crates/task/context/request.rs index 5e8494d..a45aaad 100644 --- a/crates/task/context/request.rs +++ b/crates/task/context/request.rs @@ -1,40 +1,18 @@ -use std::collections::HashMap; use std::fmt; use derive_more::{Deref, DerefMut, From}; use serde::{Deserialize, Serialize}; +use serde_json::Value; -use crate::routing::Layers; +use crate::routing::layers::Layers; /// TODO. -#[derive(Debug, Default, Clone, Serialize, Deserialize, From)] -#[must_use = "requests do nothing unless you serialize them"] -pub struct Fields { - inner: HashMap, -} - -impl Fields { - /// Returns an empty [`Fields`] store. - #[inline] - pub fn new() -> Self { - Self::default() - } -} +#[derive(Debug, Clone, Serialize, Deserialize, Deref, From)] +pub struct Inputs(pub Value); /// TODO. -#[derive(Debug, Default, Clone, Serialize, Deserialize, From)] -#[must_use = "requests do nothing unless you serialize them"] -pub struct Secrets { - inner: HashMap, -} - -impl Secrets { - /// Returns an empty [`Secrets`] store. - #[inline] - pub fn new() -> Self { - Self::default() - } -} +#[derive(Debug, Clone, Serialize, Deserialize, Deref, From)] +pub struct Secrets(pub Value); /// Serializable [`TaskHandler`] service request. /// @@ -46,9 +24,9 @@ pub struct TaskRequest { #[deref_mut] inner: T, - fields: Fields, - secrets: Secrets, - layers: Layers, + pub(crate) inputs: Inputs, + pub(crate) secrets: Secrets, + pub(crate) layers: Layers, } impl TaskRequest { @@ -57,8 +35,8 @@ impl TaskRequest { pub fn new(inner: T) -> Self { Self { inner, - fields: Fields::new(), - secrets: Secrets::new(), + inputs: Inputs(Value::default()), + secrets: Secrets(Value::default()), layers: Layers::new(), } } @@ -79,7 +57,7 @@ impl TaskRequest { impl fmt::Debug for TaskRequest { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TaskRequest") - .field("fields", &self.fields) + .field("inputs", &self.inputs) .field("secrets", &self.secrets) .finish_non_exhaustive() } @@ -92,8 +70,8 @@ impl fmt::Debug for TaskRequest { #[must_use = "requests do nothing unless you serialize them"] pub struct TaskRequestBuilder { inner: T, - fields: Option, - secrets: Option, + inputs: Option, + secrets: Option, layers: Option, } @@ -103,26 +81,23 @@ impl TaskRequestBuilder { pub fn new(inner: T) -> Self { Self { inner, - fields: None, + inputs: None, secrets: None, layers: None, } } - // TODO: Method to add a single field. - // TODO: Method to add a single secret. - - /// Overrides the default value of [`TaskRequest`]`::fields`. + /// Overrides the default value of [`TaskRequest`]`::inputs`. #[inline] - pub fn with_fields(mut self, fields: Fields) -> Self { - self.fields = Some(fields); + pub fn with_inputs(mut self, json: Value) -> Self { + self.inputs = Some(json); self } /// Overrides the default value of [`TaskRequest`]`::secrets`. #[inline] - pub fn with_secrets(mut self, secrets: Secrets) -> Self { - self.secrets = Some(secrets); + pub fn with_secrets(mut self, json: Value) -> Self { + self.secrets = Some(json); self } @@ -137,8 +112,8 @@ impl TaskRequestBuilder { pub fn build(self) -> TaskRequest { TaskRequest { inner: self.inner, - fields: self.fields.unwrap_or_default(), - secrets: self.secrets.unwrap_or_default(), + inputs: Inputs(self.inputs.unwrap_or_default()), + secrets: Secrets(self.secrets.unwrap_or_default()), layers: self.layers.unwrap_or_default(), } } @@ -146,16 +121,17 @@ impl TaskRequestBuilder { #[cfg(test)] mod test { - use crate::context::storages::{Fields, Secrets}; + use serde_json::Value; + use crate::context::TaskRequest; use crate::routing::Layers; use crate::Result; #[test] - fn build() -> Result<()> { + fn build_empty_request() -> Result<()> { let _request = TaskRequest::builder(5) - .with_fields(Fields::new()) - .with_secrets(Secrets::new()) + .with_inputs(Value::default()) + .with_secrets(Value::default()) .with_layers(Layers::new()) .build(); Ok(()) diff --git a/crates/task/context/response.rs b/crates/task/context/response.rs index 969f92c..1426e5c 100644 --- a/crates/task/context/response.rs +++ b/crates/task/context/response.rs @@ -1,23 +1,16 @@ -use std::collections::HashMap; use std::fmt; use derive_more::{Deref, DerefMut, From}; use serde::{Deserialize, Serialize}; +use serde_json::Value; /// TODO. -#[derive(Debug, Default, Clone, Serialize, Deserialize, From)] -#[must_use = "responses do nothing unless you serialize them"] -pub struct Metrics { - inner: HashMap, -} +#[derive(Debug, Clone, Serialize, Deserialize, Deref, From)] +pub struct Outputs(pub Value); -impl Metrics { - /// Returns an empty [`Metrics`] store. - #[inline] - pub fn new() -> Self { - Self::default() - } -} +/// TODO. +#[derive(Debug, Clone, Serialize, Deserialize, Deref, From)] +pub struct Metrics(pub Value); /// Deserializable [`TaskHandler`] service response. /// @@ -29,7 +22,8 @@ pub struct TaskResponse { #[deref_mut] inner: T, - metrics: Metrics, + pub(crate) outputs: Outputs, + pub(crate) metrics: Metrics, } impl TaskResponse { @@ -38,7 +32,8 @@ impl TaskResponse { pub fn new(inner: T) -> Self { Self { inner, - metrics: Metrics::default(), + outputs: Outputs(Value::default()), + metrics: Metrics(Value::default()), } } @@ -58,6 +53,7 @@ impl TaskResponse { impl fmt::Debug for TaskResponse { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TaskResponse") + .field("outputs", &self.outputs) .field("metrics", &self.metrics) .finish_non_exhaustive() } @@ -70,7 +66,8 @@ impl fmt::Debug for TaskResponse { #[must_use = "responses do nothing unless you serialize them"] pub struct TaskResponseBuilder { inner: T, - metrics: Option, + outputs: Option, + metrics: Option, } impl TaskResponseBuilder { @@ -79,27 +76,48 @@ impl TaskResponseBuilder { pub fn new(inner: T) -> Self { Self { inner, + outputs: None, metrics: None, } } + /// Overrides the default value of [`TaskResponse`]`::outputs`. + #[inline] + pub fn with_outputs(mut self, json: Value) -> Self { + self.outputs = Some(json); + self + } + + /// Overrides the default value of [`TaskResponse`]`::metrics`. + #[inline] + pub fn with_metrics(mut self, json: Value) -> Self { + self.metrics = Some(json); + self + } + /// Returns a new [`TaskResponse`]. pub fn build(self) -> TaskResponse { TaskResponse { inner: self.inner, - metrics: self.metrics.unwrap_or_default(), + outputs: Outputs(self.outputs.unwrap_or_default()), + metrics: Metrics(self.metrics.unwrap_or_default()), } } } #[cfg(test)] mod test { + use serde_json::Value; + use crate::context::TaskResponse; use crate::Result; #[test] - fn build() -> Result<()> { - let _response = TaskResponse::builder(5).build(); + fn build_empty_response() -> Result<()> { + let _response = TaskResponse::builder(5) + .with_outputs(Value::default()) + .with_metrics(Value::default()) + .build(); Ok(()) } } diff --git a/crates/task/handler/metric.rs b/crates/task/handler/metric.rs index b1b9d1d..3d13763 100644 --- a/crates/task/handler/metric.rs +++ b/crates/task/handler/metric.rs @@ -28,7 +28,7 @@ mod test { #[test] fn from_default() -> Result<()> { - let _ = TaskMetrics::new(); + let _metrics = TaskMetrics::new(); Ok(()) } } diff --git a/crates/task/lib.rs b/crates/task/lib.rs index f80fc2b..af879b6 100644 --- a/crates/task/lib.rs +++ b/crates/task/lib.rs @@ -7,28 +7,48 @@ //! use axiston_rt_task::Result; //! //! fn main() -> Result<()> { -//! let router = Router::default(); +//! let router: Router = Router::default(); //! Ok(()) //! } //! ``` +use std::borrow::Cow; + +use jsonschema::ValidationError; + pub mod context; pub mod handler; -pub mod registry; pub mod routing; -/// Unrecoverable failure of the [`Registry`]. +/// Unrecoverable failure of the [`Router`]. /// /// Includes all error types that may occur. /// -/// [`Registry`]: registry::Registry +/// [`Router`]: routing::Router #[derive(Debug, thiserror::Error)] #[must_use = "errors do nothing unless you use them"] pub enum Error { - #[error("called task failure: {0}")] + /// Task validation failure. + #[error("task validation failure: {0}")] + Validate(ValidationError<'static>), + /// Task execution failure. + #[error("task execution failure: {0}")] Task(#[from] context::TaskError), } +impl<'a> From> for Error { + fn from(validation_error: ValidationError<'a>) -> Self { + let validation_error = ValidationError { + instance: Cow::Owned(validation_error.instance.into_owned()), + kind: validation_error.kind, + instance_path: validation_error.instance_path, + schema_path: validation_error.schema_path, + }; + + Self::Validate(validation_error) + } +} + /// Specialized [`Result`] alias for the [`Error`] type. /// /// [`Result`]: std::result::Result @@ -36,3 +56,5 @@ pub type Result = std::result::Result; // TODO: Is there any real reason to make a different between action and trigger? // Make trigger dynamically determined if the action returns a single boolean. + +// TODO: Manifests are defined in proto files with serde derive. diff --git a/crates/task/registry/action.rs b/crates/task/registry/action.rs deleted file mode 100644 index 1fb3caf..0000000 --- a/crates/task/registry/action.rs +++ /dev/null @@ -1,76 +0,0 @@ -//! Operation [`Request`], [`Response`] and [`Manifest`] types. -//! -//! [`Request`]: ActionRequest -//! [`Response`]: ActionResponse -//! [`Manifest`]: ActionManifest - -use serde::{Deserialize, Serialize}; - -/// TODO. -#[derive(Debug, Serialize, Deserialize)] -pub struct ActionRequest {} - -impl ActionRequest { - /// Returns a new [`ActionRequest`]. - pub fn new() -> Self { - Self {} - } -} - -/// TODO. -#[derive(Debug, Serialize, Deserialize)] -pub struct ActionResponse {} - -impl ActionResponse { - /// Returns a new [`ActionResponse`]. - pub fn new() -> Self { - Self {} - } -} - -/// Associated action metadata. -#[derive(Debug, Clone, Serialize, Deserialize)] -#[must_use = "manifests do nothing unless you serialize them"] -pub struct ActionManifest { - pub name: String, -} - -impl ActionManifest { - /// Returns a new [`ActionManifest`]. - /// - /// Used for testing. - #[inline] - pub fn new(name: &str) -> Self { - Self { - name: name.to_owned(), - } - } - - // pub fn index() -> Index {} -} - -#[cfg(test)] -mod test { - use tower::{service_fn, ServiceBuilder}; - - use crate::context::{TaskError, TaskRequest, TaskResponse}; - use crate::handler::TaskHandlerLayer; - use crate::registry::action::{ActionRequest, ActionResponse}; - use crate::Result; - - async fn action_handle( - request: TaskRequest, - ) -> Result, TaskError> { - Ok(TaskResponse::new(ActionResponse::new())) - } - - #[tokio::test] - async fn native_action_handle() -> Result<()> { - let req = TaskRequest::new(ActionRequest::new()); - let svc = ServiceBuilder::new() - .layer(TaskHandlerLayer::new()) - .service(service_fn(action_handle)); - - Ok(()) - } -} diff --git a/crates/task/registry/mod.rs b/crates/task/registry/mod.rs deleted file mode 100644 index dadccf2..0000000 --- a/crates/task/registry/mod.rs +++ /dev/null @@ -1,48 +0,0 @@ -//! TODO. [`Registry`]. -//! - -use std::sync::{Arc, Mutex}; - -use crate::registry::action::{ActionManifest, ActionRequest, ActionResponse}; -use crate::registry::trigger::{TriggerManifest, TriggerRequest, TriggerResponse}; -use crate::routing::Router; - -mod action; -mod custom_serde; -mod trigger; - -/// TODO. -#[must_use = "routers do nothing unless you use them"] -#[derive(Debug, Default, Clone)] -pub struct Registry { - inner: Arc>, -} - -#[derive(Debug, Default)] -struct RegistryInner { - // registered_services: Router, - registered_triggers: Router, - registered_actions: Router, -} - -impl Registry { - /// Returns an empty [`Registry`]. - #[inline] - pub fn new() -> Self { - Self::default() - } - - // pub fn register_action() -} - -#[cfg(test)] -mod test { - use crate::registry::Registry; - use crate::Result; - - #[test] - fn build_empty() -> Result<()> { - let _ = Registry::new(); - Ok(()) - } -} diff --git a/crates/task/registry/service.rs b/crates/task/registry/service.rs deleted file mode 100644 index 91a2c1b..0000000 --- a/crates/task/registry/service.rs +++ /dev/null @@ -1,26 +0,0 @@ -use serde::{Deserialize, Serialize}; - -/// Associated service metadata. -#[derive(Debug, Clone, Serialize, Deserialize)] -#[must_use = "manifests do nothing unless you serialize them"] -pub struct ServiceManifest { - pub name: String, - pub description: String, - pub icon: Option, - pub version: String, -} - -impl ServiceManifest { - /// Returns a new [`ServiceManifest`]. - /// - /// Used for testing. - #[inline] - pub fn new(name: &str) -> Self { - // Self { - // name: name.to_owned(), - // icon: None, - // } - - todo!() - } -} diff --git a/crates/task/registry/trigger.rs b/crates/task/registry/trigger.rs deleted file mode 100644 index 445f49f..0000000 --- a/crates/task/registry/trigger.rs +++ /dev/null @@ -1,88 +0,0 @@ -//! Condition [`Request`], [`Response`] and [`Manifest`] types. -//! -//! [`Request`]: TriggerRequest -//! [`Response`]: TriggerResponse -//! [`Manifest`]: TriggerManifest - -use std::time::Duration; - -use serde::{Deserialize, Serialize}; - -/// TODO. -#[derive(Debug, Serialize, Deserialize)] -pub struct TriggerRequest {} - -impl TriggerRequest { - /// Returns a new [`TriggerRequest`]. - pub fn new() -> Self { - Self {} - } -} - -/// TODO. -#[derive(Debug, Serialize, Deserialize)] -pub struct TriggerResponse { - pub should_trigger: bool, - pub ignore_retry_ms: Option, -} - -impl TriggerResponse { - /// Returns a new [`TriggerResponse`]. - pub fn new(should_trigger: bool) -> Self { - Self { - should_trigger, - ignore_retry_ms: None, - } - } - - pub fn with_ignore_retry(self) -> Self { - todo!() - } -} - -/// Associated trigger metadata. -#[derive(Debug, Clone, Serialize, Deserialize)] -#[must_use = "manifests do nothing unless you serialize them"] -pub struct TriggerManifest { - pub name: String, -} - -impl TriggerManifest { - /// Returns a new [`TriggerManifest`]. - /// - /// Used for testing. - #[inline] - pub fn new(name: &str) -> Self { - Self { - name: name.to_owned(), - } - } -} - -#[cfg(test)] -mod test { - use tower::{service_fn, Service, ServiceBuilder}; - - use crate::context::{TaskError, TaskRequest, TaskResponse}; - use crate::handler::TaskHandlerLayer; - use crate::registry::trigger::{TriggerRequest, TriggerResponse}; - use crate::Result; - - async fn trigger_handle( - request: TaskRequest, - ) -> Result, TaskError> { - let resp = TriggerResponse::new(true); - Ok(TaskResponse::builder(resp).build()) - } - - #[tokio::test] - async fn native_trigger_handle() -> Result<()> { - let req = TaskRequest::new(TriggerRequest::new()); - let mut svc = ServiceBuilder::new() - .layer(TaskHandlerLayer::new()) - .service(service_fn(trigger_handle)); - - let _resp = svc.call(req).await?; - Ok(()) - } -} diff --git a/crates/task/routing/index.rs b/crates/task/routing/index.rs index 62ffd08..639c156 100644 --- a/crates/task/routing/index.rs +++ b/crates/task/routing/index.rs @@ -1,27 +1,64 @@ -use std::ops::Deref; +//! [`RouteIndex`] and [`ServiceIndex`]. +use derive_more::{Deref, DerefMut}; use ecow::EcoString; -/// Opaque and unique identifier. -#[derive(Debug, Clone, Eq, PartialEq, Hash)] -pub struct UnderlyingIndex { +/// Opaque and unique [`Service`] identifier. +/// +/// [`Service`]: crate::routing::manifest::ServiceManifest +#[derive(Debug, Clone, Eq, PartialEq, Hash, Deref, DerefMut)] +#[must_use = "indexes do nothing unless you serialize them"] +pub struct ServiceIndex { inner: EcoString, } -impl UnderlyingIndex { - /// Returns a new [`UnderlyingIndex`]. +impl ServiceIndex { + /// Returns a new [`ServiceIndex`]. #[inline] - pub fn new(inner: impl AsRef) -> Self { + pub fn new>(inner: S) -> Self { let inner = EcoString::from(inner.as_ref()); Self { inner } } + + /// Returns the underlying index. + #[inline] + pub fn into_inner(self) -> EcoString { + self.inner.clone() + } } -impl Deref for UnderlyingIndex { - type Target = str; +/// Opaque and unique [`Route`] identifier. +/// +/// [`Route`]: crate::routing::Route +#[derive(Debug, Clone, Eq, PartialEq, Hash, Deref, DerefMut)] +#[must_use = "indexes do nothing unless you serialize them"] +pub struct RouteIndex { + inner: EcoString, +} + +impl RouteIndex { + /// Returns a new [`RouteIndex`]. + #[inline] + pub fn new>(inner: S) -> Self { + let inner = EcoString::from(inner.as_ref()); + Self { inner } + } + /// Returns the underlying index. #[inline] - fn deref(&self) -> &Self::Target { - self.inner.as_str() + pub fn into_inner(self) -> EcoString { + self.inner.clone() + } +} + +#[cfg(test)] +mod test { + use crate::routing::RouteIndex; + use crate::Result; + + #[test] + fn index_from_string() -> Result<()> { + let _ = RouteIndex::new("index"); + Ok(()) } } diff --git a/crates/task/routing/layer_compose.rs b/crates/task/routing/layers.rs similarity index 60% rename from crates/task/routing/layer_compose.rs rename to crates/task/routing/layers.rs index 2f38fd7..0aba0a6 100644 --- a/crates/task/routing/layer_compose.rs +++ b/crates/task/routing/layers.rs @@ -22,7 +22,10 @@ impl LayerCompose { /// /// [`Layer`]: tower::Layer #[derive(Debug, Default, Clone, Serialize, Deserialize)] -pub struct Layers {} +pub struct Layers { + timeout_policy: Option<()>, + retry_policy: Option<()>, +} impl Layers { /// Returns a new [`Layers`]. @@ -40,7 +43,10 @@ impl Layers { /// [`Layers`] builder. #[derive(Debug, Default, Clone)] -pub struct LayersBuilder {} +pub struct LayersBuilder { + timeout_policy: Option<()>, + retry_policy: Option<()>, +} impl LayersBuilder { /// Returns a new [`LayersBuilder`]. @@ -49,27 +55,41 @@ impl LayersBuilder { Self::default() } + /// Overrides the default value of [`LayersBuilder`]`::timeout_policy`. + pub fn with_timeout_policy(mut self, timeout_policy: ()) -> Self { + self.timeout_policy = Some(timeout_policy); + self + } + + /// Overrides the default value of [`LayersBuilder`]`::retry_policy`. + pub fn with_retry_policy(mut self, retry_policy: ()) -> Self { + self.retry_policy = Some(retry_policy); + self + } + /// Returns a new [`Layers`]. - #[inline] pub fn build(self) -> Layers { - Layers {} + Layers { + timeout_policy: self.timeout_policy, + retry_policy: self.retry_policy, + } } } #[cfg(test)] mod test { - use crate::routing::layer_compose::{LayerCompose, Layers, LayersBuilder}; + use crate::routing::layers::{LayerCompose, Layers, LayersBuilder}; use crate::Result; #[test] - fn from_default() -> Result<()> { + fn with_default_layers() -> Result<()> { let config = Layers::new(); let _compose = LayerCompose::new(config); Ok(()) } #[test] - fn from_builder() -> Result<()> { + fn from_layers_builder() -> Result<()> { let config = LayersBuilder::new().build(); let _compose = LayerCompose::new(config); Ok(()) diff --git a/crates/task/routing/manifest.rs b/crates/task/routing/manifest.rs new file mode 100644 index 0000000..233f700 --- /dev/null +++ b/crates/task/routing/manifest.rs @@ -0,0 +1,39 @@ +//! [`RouteManifest`] and [`ServiceManifest`]. + +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +/// TODO. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[must_use = "manifests do nothing unless you serialize them"] +pub struct ServiceManifest { + pub(crate) service_id: String, +} + +impl ServiceManifest { + /// Returns a new [`ServiceManifest`]. + pub fn new() -> Self { + todo!() + } +} + +/// TODO. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[must_use = "manifests do nothing unless you serialize them"] +pub struct RouteManifest { + pub(crate) route_id: String, + pub(crate) service_id: String, + pub(crate) inputs_schema: Value, + pub(crate) outputs_schema: Value, + pub(crate) errors_schema: Value, +} + +impl RouteManifest { + /// Returns a new [`RouteManifest`]. + pub fn new() -> Self { + todo!() + } +} + +#[cfg(test)] +mod test {} diff --git a/crates/task/routing/mod.rs b/crates/task/routing/mod.rs index d99d4bc..9d4d7ee 100644 --- a/crates/task/routing/mod.rs +++ b/crates/task/routing/mod.rs @@ -4,41 +4,92 @@ use std::collections::HashMap; use std::fmt; -use std::hash::Hash; +use std::sync::Arc; -use crate::handler::TaskHandler; -use crate::routing::layer_compose::LayerCompose; -pub use crate::routing::layer_compose::{Layers, LayersBuilder}; -pub use crate::routing::route_index::RouteIndex; -use crate::Result; +use crate::routing::index::{RouteIndex, ServiceIndex}; +use crate::routing::layers::{LayerCompose, Layers}; +use crate::routing::manifest::{RouteManifest, ServiceManifest}; +pub use crate::routing::route::Route; -mod index; -mod layer_compose; -mod route_index; +pub mod index; +pub mod layers; +pub mod manifest; +mod route; /// TODO. -#[must_use = "routers do nothing unless you use them"] -pub struct Router { +pub type RouteRequest = (); + +/// TODO. +pub type RouteResponse = (); + +/// TODO. +#[must_use = "routes do nothing unless you use them"] +pub struct Router { + router_inner: Arc>, +} + +struct RouterInner { layer_compose: LayerCompose, - route_services: HashMap>, + service_manifests: HashMap, + route_handlers: HashMap>, } impl Router { /// Returns an empty [`Router`]. #[inline] pub fn new(layers: Layers) -> Self { - Self { + let router_inner = RouterInner { layer_compose: LayerCompose::new(layers), - route_services: HashMap::new(), + service_manifests: HashMap::default(), + route_handlers: HashMap::new(), + }; + + Self { + router_inner: Arc::new(router_inner), } } - /// Overrides the default value of [`Router`]`::layers`. - #[inline] - pub fn with_layers(mut self, layers: Layers) -> Self { - self.layer_compose = LayerCompose::new(layers); - self + /// Overrides the default value of [`Router`]`::layer_compose`. + pub fn with_layers(self, layers: Layers) -> Self { + let mut inner = Arc::try_unwrap(self.router_inner) + .unwrap_or_else(|router_handler| (*router_handler).clone()); + inner.layer_compose = LayerCompose::new(layers); + + Self { + router_inner: Arc::new(inner), + } } + + /// Registers another [`ServiceManifest`] by its [`ServiceIndex`]. + pub fn with_service( + self, + service_index: ServiceIndex, + service_manifest: ServiceManifest, + ) -> Self { + let mut inner = Arc::try_unwrap(self.router_inner) + .unwrap_or_else(|router_handler| (*router_handler).clone()); + let _ = inner + .service_manifests + .insert(service_index, service_manifest); + + Self { + router_inner: Arc::new(inner), + } + } + + /// Registers another [`Route`] by its [`RouteIndex`]. + pub fn with_route(self, route_index: RouteIndex, route: Route) -> Self { + let mut inner = Arc::try_unwrap(self.router_inner) + .unwrap_or_else(|router_handler| (*router_handler).clone()); + let _ = inner.route_handlers.insert(route_index, route); + + Self { + router_inner: Arc::new(inner), + } + } + + // TODO: Method to return the whole registry. + // TODO: Method to execute a single route. } impl fmt::Debug for Router { @@ -49,18 +100,32 @@ impl fmt::Debug for Router { impl Default for Router { fn default() -> Self { - Self { + let router_handler = RouterInner { layer_compose: LayerCompose::default(), - route_services: HashMap::default(), + service_manifests: HashMap::default(), + route_handlers: HashMap::default(), + }; + + Self { + router_inner: Arc::new(router_handler), } } } impl Clone for Router { + fn clone(&self) -> Self { + Self { + router_inner: self.router_inner.clone(), + } + } +} + +impl Clone for RouterInner { fn clone(&self) -> Self { Self { layer_compose: self.layer_compose.clone(), - route_services: self.route_services.clone(), + service_manifests: self.service_manifests.clone(), + route_handlers: self.route_handlers.clone(), } } } @@ -72,8 +137,7 @@ mod test { #[test] fn build_default_router() -> Result<()> { - // TODO. - // let _ = Router::new(Layers::new()); + let _router: Router = Router::new(Layers::new()); Ok(()) } } diff --git a/crates/task/routing/registry.rs b/crates/task/routing/registry.rs deleted file mode 100644 index e69de29..0000000 diff --git a/crates/task/routing/route.rs b/crates/task/routing/route.rs new file mode 100644 index 0000000..a311ce7 --- /dev/null +++ b/crates/task/routing/route.rs @@ -0,0 +1,84 @@ +use std::sync::Arc; + +use jsonschema::{draft202012, Validator}; +use tower::load::Load; +use tower::Service; + +use crate::context::{TaskRequest, TaskResponse}; +use crate::handler::metric::TaskMetrics; +use crate::handler::TaskHandler; +use crate::routing::layers::LayerCompose; +use crate::routing::RouteManifest; +use crate::Result; + +/// TODO. +#[must_use = "routes do nothing unless you use them"] +pub struct Route { + pub(crate) route_handler: Arc>, +} + +struct RouteHandler { + pub(crate) route_task_handler: TaskHandler, + pub(crate) route_manifest: RouteManifest, + pub(crate) inputs_schema_validator: Validator, + pub(crate) outputs_schema_validator: Validator, + pub(crate) errors_schema_validator: Validator, +} + +impl Route { + /// Returns a new [`Route`]. + pub fn new( + route_task_handler: TaskHandler, + layer_compose: Option, + route_manifest: RouteManifest, + ) -> Result { + let route_handler = RouteHandler { + inputs_schema_validator: draft202012::new(&route_manifest.inputs_schema)?, + outputs_schema_validator: draft202012::new(&route_manifest.outputs_schema)?, + errors_schema_validator: draft202012::new(&route_manifest.errors_schema)?, + route_task_handler, + route_manifest, + }; + + Ok(Self { + route_handler: Arc::new(route_handler), + }) + } + + /// Returns the underlying `tower::`[`Service`]. + #[inline] + fn task_handler(&self) -> TaskHandler { + self.route_handler.route_task_handler.clone() + } + + /// Returns the underlying `tower::`[`Service`]'s metrics. + #[inline] + pub fn task_handler_metrics(&self) -> TaskMetrics { + self.route_handler.route_task_handler.load() + } + + /// Processes the request and returns the response asynchronously. + pub async fn execute(&self, task_request: TaskRequest) -> Result> { + // TODO: Apply layers. + // let _ = &task_request.layers; + + self.route_handler + .inputs_schema_validator + .validate(&task_request.inputs)?; + let mut task_handler = self.route_handler.route_task_handler.clone(); + let task_response = task_handler.call(task_request).await?; + self.route_handler + .outputs_schema_validator + .validate(&task_response.outputs)?; + Ok(task_response) + } +} + +impl Clone for Route { + fn clone(&self) -> Self { + todo!() + } +} + +#[cfg(test)] +mod test {} diff --git a/crates/task/routing/route_index.rs b/crates/task/routing/route_index.rs deleted file mode 100644 index b0279bd..0000000 --- a/crates/task/routing/route_index.rs +++ /dev/null @@ -1,47 +0,0 @@ -use std::ops::{Deref, DerefMut}; - -/// Opaque and unique identifier. -#[derive(Debug, Clone, Eq, PartialEq, Hash)] -pub struct RouteIndex { - inner: T, -} - -impl RouteIndex { - /// Returns a new [`RouteIndex`]. - #[inline] - pub fn new(inner: T) -> Self { - Self { inner } - } - - /// Returns the underlying index. - #[inline] - pub fn into_inner(self) -> T { - self.inner - } -} - -impl Deref for RouteIndex { - type Target = T; - - #[inline] - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -impl DerefMut for RouteIndex { - #[inline] - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} - -#[cfg(test)] -mod test { - use crate::Result; - - #[test] - fn index_from_string() -> Result<()> { - Ok(()) - } -} From 6d60115272ef666fd0f5316bd21aa5c93c5add3a Mon Sep 17 00:00:00 2001 From: Oleh Martsokha Date: Sat, 14 Dec 2024 15:58:13 +0100 Subject: [PATCH 06/11] feat(all): impl 3/n --- Cargo.toml | 9 +- crates/cli/config/mod.rs | 16 ++ crates/cli/main.rs | 13 +- crates/jsvm/Cargo.toml | 52 +++--- crates/schema/Cargo.toml | 4 + crates/schema/build.rs | 6 +- crates/schema/lib.rs | 35 ++-- crates/schema/protobuf/entity.proto | 82 --------- crates/schema/protobuf/event/request.proto | 99 ---------- crates/schema/protobuf/instance.proto | 58 +++--- .../internal/{jsonvalue.proto => json.proto} | 14 +- .../schema/protobuf/internal/jsonschema.proto | 38 ---- crates/schema/protobuf/message/entity.proto | 51 ++++++ crates/schema/protobuf/message/request.proto | 66 +++++++ .../{event => message}/response.proto | 2 - crates/schema/protobuf/policy/retry.proto | 18 ++ crates/schema/protobuf/policy/timeout.proto | 30 +++ crates/schema/protobuf/registry.proto | 78 ++++---- crates/server/Cargo.toml | 10 +- crates/server/handler/instance.rs | 118 ++++++++++-- crates/server/handler/mod.rs | 133 +++++++++++++- crates/server/handler/registry.rs | 19 +- crates/server/service/instance.rs | 12 -- crates/server/service/mod.rs | 23 ++- .../service/{registry.rs => task_metrics.rs} | 0 crates/server/service/task_queue.rs | 171 ++++++++++++++++++ crates/task/context/failure.rs | 64 ++++++- crates/task/context/mod.rs | 1 + crates/task/context/request.rs | 24 +-- crates/task/context/response.rs | 30 ++- crates/task/handler/future.rs | 26 ++- crates/task/handler/metric.rs | 43 ++++- crates/task/handler/mod.rs | 77 +++++--- crates/task/lib.rs | 30 ++- crates/task/routing/manifest.rs | 13 +- crates/task/routing/mod.rs | 83 ++++++--- crates/task/routing/route.rs | 37 +++- 37 files changed, 1073 insertions(+), 512 deletions(-) delete mode 100644 crates/schema/protobuf/entity.proto delete mode 100644 crates/schema/protobuf/event/request.proto rename crates/schema/protobuf/internal/{jsonvalue.proto => json.proto} (83%) delete mode 100644 crates/schema/protobuf/internal/jsonschema.proto create mode 100644 crates/schema/protobuf/message/entity.proto create mode 100644 crates/schema/protobuf/message/request.proto rename crates/schema/protobuf/{event => message}/response.proto (97%) create mode 100644 crates/schema/protobuf/policy/retry.proto create mode 100644 crates/schema/protobuf/policy/timeout.proto delete mode 100644 crates/server/service/instance.rs rename crates/server/service/{registry.rs => task_metrics.rs} (100%) create mode 100644 crates/server/service/task_queue.rs diff --git a/Cargo.toml b/Cargo.toml index b138e33..5d29424 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ members = [ version = "0.1.0" edition = "2021" license = "Axiston License 1.0" -publish = false +publish = true authors = ["Axiston "] repository = "https://github.com/axiston/runtime" @@ -34,10 +34,15 @@ futures = { version = "0.3", features = [] } thiserror = { version = "2.0", features = [] } anyhow = { version = "1.0", features = ["backtrace"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = { version = "1.0", features = [] } +jsonschema = { version = "0.26", features = [] } + tracing = { version = "0.1", features = [] } derive_more = { version = "1.0", features = ["full"] } -serde = { version = "1.0", features = ["derive"] } ecow = { version = "0.2", features = ["serde"] } +time = { version = "0.3", features = ["serde"] } +uuid = { version = "1.11", features = ["serde", "v4", "v7"] } tonic = { version = "0.12", features = [] } prost = { version = "0.13", features = [] } diff --git a/crates/cli/config/mod.rs b/crates/cli/config/mod.rs index 8b13789..df4e4e4 100644 --- a/crates/cli/config/mod.rs +++ b/crates/cli/config/mod.rs @@ -1 +1,17 @@ +use clap::Parser; +/// Command-line arguments. +#[derive(Debug, Parser)] +pub struct Args { + /// Bound server port. + #[arg(short, long, default_value_t = 3000)] + pub port: u16, +} + +impl Args { + /// Returns a new [`Args`]. + #[inline] + pub fn new() -> Self { + Self::parse() + } +} diff --git a/crates/cli/main.rs b/crates/cli/main.rs index f2dd0de..79f37d1 100644 --- a/crates/cli/main.rs +++ b/crates/cli/main.rs @@ -4,24 +4,17 @@ use std::net::{Ipv4Addr, SocketAddr}; use axiston_rt_server::handler::{InstanceService, RegistryService}; use axiston_rt_server::service::{AppConfig, AppState}; -use clap::Parser; use tonic::transport::Server; +use crate::config::Args; + mod config; mod middleware; mod server; -/// Command-line arguments. -#[derive(Debug, Parser)] -pub struct Args { - /// Bound server port. - #[arg(short, long, default_value_t = 3000)] - pub port: u16, -} - #[tokio::main] async fn main() -> anyhow::Result<()> { - let args = Args::parse(); + let args = Args::new(); middleware::initialize_tracing().await?; // Service. diff --git a/crates/jsvm/Cargo.toml b/crates/jsvm/Cargo.toml index e417db5..af162ec 100644 --- a/crates/jsvm/Cargo.toml +++ b/crates/jsvm/Cargo.toml @@ -21,36 +21,38 @@ rustdoc-args = ["--cfg", "docsrs"] path = "lib.rs" [dependencies] -axiston-rt-task = { workspace = true } +# Make sure tokio, async_trait and deno_* version are the same. +# https://github.com/denoland/deno/blob/main/Cargo.toml +axiston-rt-task = { workspace = true } tokio = { version = "1.36", features = [] } tracing = { version = "0.1", features = [] } async-trait = { version = "0.1", features = [] } ctor = { version = "0.2", features = [] } -thiserror = { version = "1.0", features = [] } -serde = { version = "1.0", features = ["derive"] } +thiserror = { workspace = true } +serde = { workspace = true } rand = { version = "0.8", features = [] } -deno_core = { version = "0.307.0", features = [] } -deno_ast = { version = "0.42.2", features = [] } -deno_permissions = { version = "0.28.0", features = [] } -deno_cache_dir = { version = "0.10.3", features = [] } - -deno_console = { version = "0.168.0", features = [] } -deno_crypto = { version = "0.182.0", features = [] } -deno_webidl = { version = "0.168.0", features = [] } -deno_url = { version = "0.168.0", features = [] } - -deno_fs = { version = "0.78.0", features = ["sync_fs"] } -deno_io = { version = "0.78.0", features = [] } -deno_fetch = { version = "0.192.0", features = [] } -deno_net = { version = "0.160.0", features = [] } -deno_web = { version = "0.199.0", features = [] } - -deno_http = { version = "0.166.0", features = [] } -deno_tls = { version = "0.155.0", features = [] } -deno_websocket = { version = "0.173.0", features = [] } -deno_webstorage = { version = "0.163.0", features = [] } -deno_canvas = { version = "0.37.0", features = [] } -deno_webgpu = { version = "0.135.0", features = [] } +deno_core = { version = "0.324.0", features = [] } +deno_ast = { version = "0.44.0", features = ["transpiling"] } +deno_permissions = { version = "0.42.0", features = [] } +deno_cache_dir = { version = "0.14.0", features = [] } + +deno_console = { version = "0.182.0", features = [] } +deno_crypto = { version = "0.196.0", features = [] } +deno_webidl = { version = "0.182.0", features = [] } +deno_url = { version = "0.182.0", features = [] } + +deno_fs = { version = "0.92.0", features = [] } +deno_io = { version = "0.92.0", features = [] } +deno_fetch = { version = "0.206.0", features = [] } +deno_net = { version = "0.174.0", features = [] } +deno_web = { version = "0.213.0", features = [] } + +deno_http = { version = "0.180.0", features = [] } +deno_tls = { version = "0.169.0", features = [] } +deno_websocket = { version = "0.187.0", features = [] } +deno_webstorage = { version = "0.177.0", features = [] } +deno_canvas = { version = "0.51.0", features = [] } +deno_webgpu = { version = "0.149.0", features = [] } diff --git a/crates/schema/Cargo.toml b/crates/schema/Cargo.toml index 571d081..35c1a70 100644 --- a/crates/schema/Cargo.toml +++ b/crates/schema/Cargo.toml @@ -29,6 +29,10 @@ client = [] server = [] [dependencies] +serde = { workspace = true } +serde_json = { workspace = true } +jsonschema = { workspace = true } + tonic = { workspace = true } prost = { workspace = true } tonic-types = { workspace = true } diff --git a/crates/schema/build.rs b/crates/schema/build.rs index 2fc49f0..8264a2b 100644 --- a/crates/schema/build.rs +++ b/crates/schema/build.rs @@ -1,6 +1,5 @@ #![forbid(unsafe_code)] -use std::fs::create_dir_all; use std::path::PathBuf; fn main() -> anyhow::Result<()> { @@ -16,12 +15,9 @@ fn main() -> anyhow::Result<()> { let instance = input_dir.join("./instance.proto"); let registry = input_dir.join("./registry.proto"); - let output_dir = PathBuf::from("./generated/"); - create_dir_all(output_dir.as_path())?; - let protos = [instance.as_path(), registry.as_path()]; let includes = [input_dir.as_path()]; - builder.out_dir(output_dir).compile(&protos, &includes)?; + builder.compile_protos(&protos, &includes)?; Ok(()) } diff --git a/crates/schema/lib.rs b/crates/schema/lib.rs index 1e3864a..2052c04 100644 --- a/crates/schema/lib.rs +++ b/crates/schema/lib.rs @@ -2,21 +2,32 @@ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("./README.md")] -pub mod json { +pub mod policy { //! Includes files generated by `prost`. - pub mod schema { + pub mod retry { //! Includes files generated by `prost`. - //! Built from `jsonschema.proto`. + //! Built from `policy/retry.proto`. - include!("./generated/rt.json.schema.rs"); + tonic::include_proto!("rt.policy.retry"); } - pub mod value { + pub mod timeout { //! Includes files generated by `prost`. - //! Built from `jsonvalue.proto`. + //! Built from `policy/timeout.proto`. - include!("./generated/rt.json.value.rs"); + tonic::include_proto!("rt.policy.timeout"); + } +} + +pub mod internal { + //! Includes files generated by `prost`. + + pub mod json { + //! Includes files generated by `prost`. + //! Built from `internal/json.proto`. + + tonic::include_proto!("rt.internal.json"); } } @@ -24,33 +35,33 @@ pub mod instance { //! Includes files generated by `prost`. //! Built from `instance.proto`. - include!("./generated/rt.instance.rs"); + tonic::include_proto!("rt.instance"); } pub mod registry { //! Includes files generated by `prost`. //! Built from `registry.proto`. - include!("./generated/rt.registry.rs"); + tonic::include_proto!("rt.registry"); } pub mod entity { //! Includes files generated by `prost`. //! Built from `entity.proto`. - include!("./generated/rt.entity.rs"); + tonic::include_proto!("rt.entity"); } pub mod request { //! Includes files generated by `prost`. //! Built from `request.proto`. - include!("./generated/rt.request.rs"); + tonic::include_proto!("rt.request"); } pub mod response { //! Includes files generated by `prost`. //! Built from `response.proto`. - include!("./generated/rt.response.rs"); + tonic::include_proto!("rt.response"); } diff --git a/crates/schema/protobuf/entity.proto b/crates/schema/protobuf/entity.proto deleted file mode 100644 index 8f954be..0000000 --- a/crates/schema/protobuf/entity.proto +++ /dev/null @@ -1,82 +0,0 @@ -syntax = "proto3"; - -import "google/protobuf/timestamp.proto"; -import "google/protobuf/empty.proto"; -import "internal/jsonschema.proto"; -import "internal/jsonvalue.proto"; - -package rt.entity; - -// Describes service details. -message Service { - // Unique identifier for the service. - string service_id = 1; - // Name of the service (e.g., Google Email). - string name = 21; - // Unique identifier for the service's icon. - string icon = 22; - // Brief description of the service. - string description = 4; -} - -// Describes action or trigger details. -message Entity { - string entity_id = 1; - string service_id = 2; - - string name = 21; - string icon = 22; - - // Input specifications for this entity. - repeated Input inputs = 7; - // Output specifications for this entity. - repeated Output outputs = 8; - // Possible error codes this entity might return. - repeated Error errors = 9; -} - -// Describes secrets required by a entity. -message Secret { - // Unique identifier for the secret. - string id = 1; - // Name of the secret (e.g., "API Key"). - string name = 2; - // Description of the secret. - string description = 3; - // Whether the secret is mandatory for the service. - bool is_required = 4; -} - -// Describes the input requirements for an entity. -message Input { - // Name of the input (e.g., "recipient"). - string name = 1; - // Data type of the input (e.g., "string", "int"). - rt.json.schema.JsonSchema data_type = 2; - // Description of the input. - string description = 3; - // Whether this input is mandatory. - bool is_required = 4; - // Default value for the input, if applicable. - rt.json.value.JsonValue default_value = 5; -} - -// Describes the output generated by an entity. -message Output { - // Name of the output (e.g., "message_id"). - string name = 1; - // Data type of the output (e.g., "string", "int"). - rt.json.schema.JsonSchema data_type = 2; - // Description of the output. - string description = 3; -} - -// Describes potential errors an entity can return. -message Error { - // Unique error code (e.g., "ERR_401"). - string code = 1; - // Human-readable error message. - string message = 2; - // Description or guidance for resolving the error. - string resolution = 3; -} diff --git a/crates/schema/protobuf/event/request.proto b/crates/schema/protobuf/event/request.proto deleted file mode 100644 index 920ec71..0000000 --- a/crates/schema/protobuf/event/request.proto +++ /dev/null @@ -1,99 +0,0 @@ -syntax = "proto3"; - -import "google/protobuf/timestamp.proto"; -import "google/protobuf/duration.proto"; - -package rt.request; - -// Request to open a connection to the event bus. -// Indicates the Gateway is ready accept events. -message OpenRequest { - // Token for authenticating the Gateway. - string authentication_token = 3; - // Required runtime capabilities. - repeated string runtime_capabilities = 4; - // Preferred communication protocols. - repeated string preferred_protocols = 5; - - // Constraints on resources for the task. - optional ResourceLimits resource_limits = 26; -} - -// Request to submit a task for execution by the Runtime. -message ExecuteRequest { - // Unique identifier for the task. - string task_id = 1; - // Custom task parameters as key-value pairs. - map task_fields = 2; - // Sensitive task-specific data (e.g., API keys). - map task_secrets = 3; - - // Priority level of the task (higher is more important). - optional int32 priority = 23; - // Deadline for the task completion. - optional google.protobuf.Timestamp deadline = 24; - // Whether task dependencies are cached. - optional bool cache_deps = 25; - - // Policy for retrying failed tasks. - optional RetryPolicy retry_policy = 31; - // Policy for handling task timeouts. - optional TimeoutPolicy timeout_policy = 32; -} - -// Request to close the connection, preventing the Runtime from accepting new tasks. -message CloseRequest { - // Force immediate closure without waiting. - // Ignores pending tasks (from this connection) before closing. - optional bool force_close = 2; - // Reason for closing the connection. - optional string reason = 3; - // Require acknowledgment before closing. - optional bool ack_required = 4; -} - -// Policy for retrying failed tasks. -message RetryPolicy { - // Maximum number of retry attempts. - uint32 max_retries = 1; - // Base delay between consecutive retries. - google.protobuf.Duration base_backoff = 2; - // Multiplier for exponential backoff. - optional double exponential_multiplier = 3; - // Maximum delay between consecutive retries. - google.protobuf.Duration max_backoff = 4; -} - -// Policy for handling task timeouts. -message TimeoutPolicy { - // Maximum execution time allowed for the task. - google.protobuf.Duration execution_timeout = 1; - // Whether to forcibly terminate the task on timeout. - bool terminate_on_timeout = 2; - // Action to take on timeout (e.g., "retry", "terminate"). - TimeoutAction timeout_action = 3; - // Extra time given before final termination after timeout. - optional google.protobuf.Duration grace_period = 4; - // Frequency of checking for timeout conditions. - optional google.protobuf.Duration monitor_interval = 5; -} - -// Lists all of possible timeout actions. -enum TimeoutAction { - // Default value, action unspecified. - TIMEOUT_ACTION_UNSPECIFIED = 0; - // Task is considered to be failed. Retry the task. - TIMEOUT_ACTION_RETRY = 1; - // Task is considered to be failed. Do not retry the task. - TIMEOUT_ACTION_TERMINATE = 2; -} - -// Limits runtime resources. -message ResourceLimits { - // Maximum used CPU percentage. - uint32 max_cpu_percent = 1; - // Maximum used RAM in MB. - uint32 max_ram_mb = 2; - // Maximum used disk in MB. - uint64 max_disk_mb = 3; -} diff --git a/crates/schema/protobuf/instance.proto b/crates/schema/protobuf/instance.proto index caee5ec..668d372 100644 --- a/crates/schema/protobuf/instance.proto +++ b/crates/schema/protobuf/instance.proto @@ -2,8 +2,8 @@ syntax = "proto3"; import "google/protobuf/timestamp.proto"; import "google/protobuf/duration.proto"; -import "event/request.proto"; -import "event/response.proto"; +import "message/request.proto"; +import "message/response.proto"; package rt.instance; @@ -23,10 +23,12 @@ message EventRequest { oneof payload { // Step 1.1: Gateway requests to open a connection. rt.request.OpenRequest open_request = 11; + // Step 1.3: Gateway request to update policy. + rt.request.PolicyRequest policy_request = 12; // Step 2.1: Gateway submits a task for the execution. - rt.request.ExecuteRequest execute_request = 12; + rt.request.ExecuteRequest execute_request = 13; // Step 3.1: Gateway requests to close the connection. - rt.request.CloseRequest close_request = 13; + rt.request.CloseRequest close_request = 14; } } @@ -58,52 +60,42 @@ message EventResponse { } // Requests service status and metrics. -message StatusRequest { - // Forces retrieval of the latest metrics. - optional bool current = 11; +message GetStatusRequest { // Includes detailed metrics in the response. - optional bool verbose = 12; - + optional bool verbose_metrics = 1; + // Forces retrieval of the latest metrics. + optional bool force_latest = 2; // Sliding window length (used by metrics). - optional int32 window = 21; + optional uint32 sliding_window = 3; } -// Details service status and performance metrics. -message StatusResponse { +// Contains service status and performance metrics. +message GetStatusResponse { // Task-related metrics: // Number of tasks waiting in the queue to be processed. - int64 tasks_waiting = 11; + uint64 tasks_waiting = 11; // Number of tasks currently being processed. - int64 tasks_running = 12; - // Total number of tasks that have been completed successfully. - int64 tasks_done = 13; + uint64 tasks_running = 12; // Time-related metrics: - // Total time the service has been running since startup (human-readable format). - google.protobuf.Duration total_uptime = 21; - // Cumulative time the service has spent idle (not processing any tasks). - google.protobuf.Duration total_idle_time = 22; - // Cumulative time the service has been overwhelmed and tasks have been queued due to load. - google.protobuf.Duration total_wait_time = 23; - - // Average processing time for tasks in the most recent window. - google.protobuf.Duration avg_recent_time = 31; - // Overall average processing time since the service started. - google.protobuf.Duration avg_total_time = 32; - // Average processing time for tasks that failed. - google.protobuf.Duration avg_failure_time = 33; - // Average processing time for tasks that succeeded. - google.protobuf.Duration avg_success_time = 34; + // Average waiting time for tasks in the most recent window. + google.protobuf.Duration recent_waiting_time = 21; + // Average running time for tasks in the most recent window. + google.protobuf.Duration recent_running_time = 22; + // Overall average waiting time since the service started. + google.protobuf.Duration average_waiting_time = 23; + // Overall average running time since the service started. + google.protobuf.Duration average_running_time = 24; } // Provides runtime instance management. service Instance { // Retrieves detailed service health and performance metrics. - rpc Status(StatusRequest) returns (StatusResponse); + rpc GetStatus(GetStatusRequest) returns (GetStatusResponse); // Provides a bidirectional event streaming RPC for continuous communication // between the gateway (as a client) and the runtime (as a server). - rpc Bus(stream EventRequest) returns (stream EventResponse); + rpc ConnectWorker(stream EventRequest) returns (stream EventResponse); } diff --git a/crates/schema/protobuf/internal/jsonvalue.proto b/crates/schema/protobuf/internal/json.proto similarity index 83% rename from crates/schema/protobuf/internal/jsonvalue.proto rename to crates/schema/protobuf/internal/json.proto index d4c6825..dc62b7b 100644 --- a/crates/schema/protobuf/internal/jsonvalue.proto +++ b/crates/schema/protobuf/internal/json.proto @@ -2,18 +2,18 @@ syntax = "proto3"; import "google/protobuf/empty.proto"; -package rt.json.value; +package rt.internal.json; // Represents different types of `JSON` values. -message JsonValue { +message JsonData { // Specifies that the value field can hold one of several possible types. oneof value { // Represents a string type, e.g. `"hello"` or `"world"`. - string string_type = 1; + string string_value = 1; // Represents an integer type e.g. `4` or `138253`. - int32 integer_type = 2; + int32 integer_value = 2; // Represents a float type e.g. `2.6` or `6.0004`. - float float_type = 3; + float float_value = 3; // Represents a boolean type i.e. `true`/`false`. bool boolean_value = 4; // Represents a null value i.e. `null`. @@ -28,11 +28,11 @@ message JsonValue { // Contains the values of a map of `JSON` values. message JsonObject { // Contains the map of the field names and their values. - map fields = 1; + map fields = 1; } // Contains the values of an array of `JSON` values. message JsonArray { // Contains the values of the elements in the array. - repeated JsonValue items = 1; + repeated JsonData items = 1; } diff --git a/crates/schema/protobuf/internal/jsonschema.proto b/crates/schema/protobuf/internal/jsonschema.proto deleted file mode 100644 index 78b02be..0000000 --- a/crates/schema/protobuf/internal/jsonschema.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -import "google/protobuf/empty.proto"; - -package rt.json.schema; - -// Represents any valid `JSON` schema type. -message JsonSchema { - // Specifies that the value field can hold one of several possible types. - oneof value { - // Represents a string type, e.g. `"hello"` or `"world"`. - google.protobuf.Empty string_type = 1; - // Represents an integer type e.g. `4` or `138253`. - google.protobuf.Empty integer_type = 2; - // Represents a float type e.g. `2.6` or `6.0004`. - google.protobuf.Empty float_type = 3; - // Represents a boolean type i.e. `true`/`false`. - google.protobuf.Empty boolean_value = 4; - // Represents a null value i.e. `null`. - google.protobuf.Empty null_value = 5; - // Represents an object type e.g. `{ "key": "value" }`. - JsonSchemaObject object_value = 6; - // Represents an array type e.g. `["key", "value"]`. - JsonSchemaArray array_value = 7; - } -} - -// Defines the schema for a `JSON` object, which consists of key-value pairs. -message JsonSchemaObject { - // Represents the map of the field names and their types. - map fields = 1; -} - -// Defines the schema for a `JSON` array, which contains a list of elements. -message JsonSchemaArray { - // Represents the types of the elements in the array. - repeated JsonSchema elements = 1; -} diff --git a/crates/schema/protobuf/message/entity.proto b/crates/schema/protobuf/message/entity.proto new file mode 100644 index 0000000..1f0f06b --- /dev/null +++ b/crates/schema/protobuf/message/entity.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/empty.proto"; +import "internal/json.proto"; + +package rt.entity; + +// Describes service details. +message Service { + // Unique identifier for the service. + string service_id = 1; + // Name of the service (e.g., Google Email). + string name = 21; + // Unique identifier for the service's icon. + string icon = 22; + // Brief description of the service. + string description = 4; + + string version = 5; +} + +// Describes action or trigger details. +message Entity { + // Unique identifier for the entity. + string entity_id = 1; + // Unique identifier for the service. + string service_id = 2; + + // Name of the entity (e.g., Send via Google Email). + string name = 21; + // Unique identifier for the entity's icon. + string icon = 22; + + // Input `JSON` Schema for this entity. + repeated rt.internal.json.JsonData inputs = 7; + // Output `JSON` Schema for this entity. + repeated rt.internal.json.JsonData outputs = 8; + // Error `JSON` Schema for this entity. + repeated rt.internal.json.JsonData errors = 9; +} + +// Describes secrets required by a entity. +message Secret { + // Unique identifier for the secret. + string secret_id = 1; + // Name of the secret (e.g., "API Key"). + string name = 2; + // Description of the secret. + string description = 3; +} diff --git a/crates/schema/protobuf/message/request.proto b/crates/schema/protobuf/message/request.proto new file mode 100644 index 0000000..1efaf03 --- /dev/null +++ b/crates/schema/protobuf/message/request.proto @@ -0,0 +1,66 @@ +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; +import "policy/retry.proto"; +import "policy/timeout.proto"; + +package rt.request; + +// Indicates the gateway is ready accept events. +message OpenRequest { + // Token for authenticating the gateway. + string authentication_token = 3; + // Required runtime capabilities. + repeated string runtime_capabilities = 4; + // Preferred communication protocols. + repeated string preferred_protocols = 5; + + // Constraints on resources for the task. + optional ResourceLimits resource_limits = 26; +} + +// Request to update the middleware policy by the Runtime. +message PolicyRequest { + // Policy for retrying failed tasks. + optional rt.policy.retry.RetryPolicy retry_policy = 31; + // Policy for handling task timeouts. + optional rt.policy.timeout.TimeoutPolicy timeout_policy = 32; +} + +// Request to submit a task for execution by the Runtime. +message ExecuteRequest { + // Unique identifier for the task. + string task_id = 1; + // Custom task parameters as key-value pairs. + map task_fields = 2; + // Sensitive task-specific data (e.g., API keys). + map task_secrets = 3; + + // Priority level of the task (higher is more important). + optional int32 priority = 23; + // Deadline for the task completion. + optional google.protobuf.Timestamp deadline = 24; + // Whether task dependencies are cached. + optional bool cache_deps = 25; +} + +// Request to close the connection, blocking the Runtime queue. +message CloseRequest { + // Forces immediate closure without waiting. + optional bool force_close = 2; + // Reason for closing the connection. + optional string reason = 3; + // Require acknowledgment before closing. + optional bool ack_required = 4; +} + +// Limits runtime resources. +message ResourceLimits { + // Maximum used CPU percentage. + uint32 max_cpu_percent = 1; + // Maximum used RAM in MB. + uint32 max_ram_mb = 2; + // Maximum used disk in MB. + uint64 max_disk_mb = 3; +} diff --git a/crates/schema/protobuf/event/response.proto b/crates/schema/protobuf/message/response.proto similarity index 97% rename from crates/schema/protobuf/event/response.proto rename to crates/schema/protobuf/message/response.proto index d8e4de5..7454675 100644 --- a/crates/schema/protobuf/event/response.proto +++ b/crates/schema/protobuf/message/response.proto @@ -7,8 +7,6 @@ package rt.response; // Start execution response message. message OpenResponse { - google.protobuf.Timestamp started = 1; - optional google.protobuf.Duration estimate = 2; } // Response message containing the result of task execution. diff --git a/crates/schema/protobuf/policy/retry.proto b/crates/schema/protobuf/policy/retry.proto new file mode 100644 index 0000000..4d9b4f6 --- /dev/null +++ b/crates/schema/protobuf/policy/retry.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; + +package rt.policy.retry; + +// Policy for retrying failed tasks. +message RetryPolicy { + // Maximum number of retry attempts. + uint32 max_retries = 1; + // Base delay between consecutive retries. + google.protobuf.Duration base_backoff = 2; + // Multiplier for exponential backoff. + optional double exponential_multiplier = 3; + // Maximum delay between consecutive retries. + google.protobuf.Duration max_backoff = 4; +} diff --git a/crates/schema/protobuf/policy/timeout.proto b/crates/schema/protobuf/policy/timeout.proto new file mode 100644 index 0000000..64601e1 --- /dev/null +++ b/crates/schema/protobuf/policy/timeout.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; + +package rt.policy.timeout; + +// Policy for handling task timeouts. +message TimeoutPolicy { + // Maximum execution time allowed for the task. + google.protobuf.Duration execution_timeout = 1; + // Whether to forcibly terminate the task on timeout. + bool terminate_on_timeout = 2; + // Action to take on timeout (e.g., "retry", "terminate"). + TimeoutAction timeout_action = 3; + // Extra time given before final termination after timeout. + optional google.protobuf.Duration grace_period = 4; + // Frequency of checking for timeout conditions. + optional google.protobuf.Duration monitor_interval = 5; +} + +// Lists all of possible timeout actions. +enum TimeoutAction { + // Default value, action unspecified. + TIMEOUT_ACTION_UNSPECIFIED = 0; + // Task is considered to be failed. Retry the task. + TIMEOUT_ACTION_RETRY = 1; + // Task is considered to be failed. Do not retry the task. + TIMEOUT_ACTION_TERMINATE = 2; +} diff --git a/crates/schema/protobuf/registry.proto b/crates/schema/protobuf/registry.proto index 4de490f..c2877ec 100644 --- a/crates/schema/protobuf/registry.proto +++ b/crates/schema/protobuf/registry.proto @@ -2,61 +2,63 @@ syntax = "proto3"; import "google/protobuf/timestamp.proto"; import "google/protobuf/empty.proto"; -import "entity.proto"; +import "message/entity.proto"; package rt.registry; -// Retrieves the registry details. -message RegistryRequest {} - // Contains the registry details. -message RegistryResponse { +message RegistryContentResponse { // Total number of registered services. uint32 total_services = 1; - // Total number of registered actions. - uint32 total_actions = 2; - // Total number of registered triggers. - uint32 total_triggers = 3; - - // Registry registration startup timestamp. - google.protobuf.Timestamp first_updated_at = 4; - // Registry registration shutdown timestamp. - google.protobuf.Timestamp last_updated_at = 5; + // Total number of registered entities. + uint32 total_entities = 2; // List of registered services. repeated rt.entity.Service services = 11; - // List of registered actions. - repeated rt.entity.Entity actions = 12; - // List of registered triggers. - repeated rt.entity.Entity triggers = 13; + // List of registered entities. + repeated rt.entity.Entity entities = 12; + + // Registry registration startup timestamp. + google.protobuf.Timestamp first_updated_at = 21; + // Registry registration shutdown timestamp. + google.protobuf.Timestamp last_updated_at = 22; } -message SearchRequest { - uint32 query_id = 11; - // Search term to match all registered entities to. - string query = 12; - // Filter by associated tags. - repeated string tags = 13; - - // Limit on the number of search results. - uint32 max_results = 21; - // Include deprecated entities in search results if true. - bool include_deprecated = 22; +// Defines the format of the search query. +message FindServicesRequest { + // Unique identifier for the query. + string query_id = 1; + // Searches the query to match services or entities. + string query = 2; + // Filters by associated tags (if any). + repeated string tags = 3; + + // Limits on the number of search results. + uint32 max_results = 11; + // Includes deprecated entities in search results. + bool include_deprecated = 12; } -message SearchResponse { +// Defines the format of the search results. +message FindServicesResponse { + // Unique identifier for the query. + string query_id = 1; + + // Services matching the search criteria. + repeated rt.entity.Service matching_services = 2; // Entities matching the search criteria. - repeated rt.entity.Entity matching_entities = 1; + repeated rt.entity.Entity matching_entities = 3; + // Total number of matches found. - uint32 total_matches = 2; - // True if results were truncated due to max_results. - bool truncated = 3; + uint32 total_matches = 11; + // Indicates if results were truncated. + bool is_truncated = 12; } service Registry { - // Comprehensive collection of available tasks and their metadata. - rpc Registry(RegistryRequest) returns (RegistryResponse); + // Retrieves all available tasks and their metadata. + rpc GetRegistryContent(google.protobuf.Empty) returns (RegistryContentResponse); - // Searches for specific services in the registry. - rpc Search(SearchRequest) returns (SearchResponse); + // Searches for specific services and entities in the registry. + rpc FindServices(FindServicesRequest) returns (FindServicesResponse); } diff --git a/crates/server/Cargo.toml b/crates/server/Cargo.toml index 56f9157..be8ac02 100644 --- a/crates/server/Cargo.toml +++ b/crates/server/Cargo.toml @@ -23,7 +23,7 @@ path = "lib.rs" [dependencies] # axiston-rt-jsvm = { workspace = true } axiston-rt-schema = { workspace = true } -# axiston-rt-task = { workspace = true } +axiston-rt-task = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } @@ -31,6 +31,14 @@ futures = { workspace = true } anyhow = { workspace = true } tracing = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +jsonschema = { workspace = true } + +derive_more = { workspace = true } +time = { workspace = true } +uuid = { workspace = true } + tonic = { workspace = true } prost = { workspace = true } tonic-types = { workspace = true } diff --git a/crates/server/handler/instance.rs b/crates/server/handler/instance.rs index cdfe6bc..bcaf09b 100644 --- a/crates/server/handler/instance.rs +++ b/crates/server/handler/instance.rs @@ -1,14 +1,25 @@ +use std::num::{NonZero, NonZeroU32}; + +use axiston_rt_schema::instance::event_request::Payload as RequestPayload; +use axiston_rt_schema::instance::event_response::Payload as ResponsePayload; use axiston_rt_schema::instance::instance_server::{Instance, InstanceServer}; -use axiston_rt_schema::instance::{EventRequest, EventResponse, StatusRequest, StatusResponse}; +use axiston_rt_schema::instance::{ + EventRequest, EventResponse, GetStatusRequest, GetStatusResponse, +}; +use axiston_rt_schema::response::OpenResponse; use futures::stream::BoxStream; -use futures::StreamExt; +use futures::{Stream, StreamExt}; +use prost_types::Timestamp; +use time::OffsetDateTime; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::{Request, Response, Status, Streaming}; +use crate::handler::ErrorKind; use crate::service::AppState; -/// TODO. +/// Implements [`Instance`] service for the [`InstanceService`]. +#[derive(Clone)] pub struct InstanceService { state: AppState, } @@ -25,33 +36,114 @@ impl InstanceService { pub fn into_server(self) -> InstanceServer { InstanceServer::new(self) } + + /// TODO. + async fn process_event_payload( + &self, + payload: RequestPayload, + ) -> Result { + match payload { + RequestPayload::OpenRequest(_) => { + // let _guard = task_counter.guard_running_tasks(); + // task_counter. + } + RequestPayload::PolicyRequest(_) => {} + RequestPayload::ExecuteRequest(_) => {} + RequestPayload::CloseRequest(_) => {} + } + + todo!() + } } #[tonic::async_trait] impl Instance for InstanceService { - async fn status( + async fn get_status( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { + let (metadata, extension, request) = request.into_parts(); + + let sliding_window = request.sliding_window.and_then(NonZeroU32::new); + // let snapshot = self.state.task_counter.get_snapshot(sliding_window); + + let message = GetStatusResponse { + tasks_waiting: 0, + tasks_running: 0, + recent_waiting_time: None, + recent_running_time: None, + average_waiting_time: None, + average_running_time: None, + }; + + // Ok(Response::new(message)) + todo!() } - type BusStream = BoxStream<'static, Result>; + type ConnectWorkerStream = BoxStream<'static, Result>; - async fn bus( + async fn connect_worker( &self, request: Request>, - ) -> Result, Status> { + ) -> Result, Status> { let mut request = request.into_inner(); let (tx, rx) = mpsc::channel(128); + // TODO: Create a new queue. + // let task_queue = self.state.task_queue.clone(); + // let task_counter = self.state.task_counter.clone(); + + let this = self.clone(); + let fut = async move { + while let Some(request) = request.next().await { + let recv_data_time = OffsetDateTime::now_utc(); + let recv_timestamp = Timestamp { + seconds: recv_data_time.unix_timestamp(), + nanos: recv_data_time.nanosecond() as i32, + }; + + let event_request = match request { + Ok(event_request) => event_request, + Err(event_status) => todo!(), + }; + + let Some(request_payload) = event_request.payload else { + tx.send(Err(ErrorKind::Unknown.into_status())).await; + continue; + }; - let _handle = tokio::spawn(async move { - while let Some(event) = request.next().await { - let _ = event; + let response_payload = match request_payload { + RequestPayload::OpenRequest(_) => { + ResponsePayload::OpenResponse(OpenResponse {}) + } + RequestPayload::PolicyRequest(_) => todo!(), + RequestPayload::ExecuteRequest(_) => todo!(), + RequestPayload::CloseRequest(_) => todo!(), + }; + + let send_data_time = OffsetDateTime::now_utc(); + let send_timestamp = Timestamp { + seconds: send_data_time.unix_timestamp(), + nanos: send_data_time.nanosecond() as i32, + }; + + let event_response = EventResponse { + request_id: event_request.request_id, + group_id: event_request.group_id, + response_id: 0, + recv: Some(recv_timestamp), + send: Some(send_timestamp), + payload: Some(response_payload), + }; + + tx.send(Ok(event_response)).await; } - }); + // Client closed connection. + }; + + let _handle = tokio::spawn(fut); let rx = ReceiverStream::new(rx); Ok(Response::new(Box::pin(rx))) } diff --git a/crates/server/handler/mod.rs b/crates/server/handler/mod.rs index 59b404e..b0372b9 100644 --- a/crates/server/handler/mod.rs +++ b/crates/server/handler/mod.rs @@ -1,8 +1,139 @@ -//! TODO. +//! All `tonic::`[`Server`]s with related handlers. //! +//! [`Server`]: tonic::transport::Server + +use std::borrow::Cow; + +use derive_more::From; +use tonic::{Code, Status}; pub use crate::handler::instance::InstanceService; pub use crate::handler::registry::RegistryService; mod instance; mod registry; + +/// The error type for [`Server`]s. +/// +/// [`Server`]: tonic::transport::Server +#[derive(Debug, Default, From)] +pub struct Error { + kind: ErrorKind, +} + +impl Error { + /// Returns a new [`Error`]. + #[inline] + pub fn new(kind: ErrorKind) -> Self { + Self { kind } + } + + /// Returns a new [`Status`]. + pub fn into_status(self) -> Status { + todo!() + } +} + +impl From for Status { + #[inline] + fn from(value: Error) -> Self { + value.into_status() + } +} + +/// Comprehensive list of all possible [`Error`]s. +#[derive(Debug, Default, Clone, Copy)] +#[must_use = "errors do nothing unless you use them"] +pub enum ErrorKind { + #[default] + Unknown, + Aborted, +} + +impl ErrorKind { + /// Explicitly converts into the [`Error`]. + #[inline] + pub fn into_error(self) -> Error { + self.into() + } + + /// Returns a new [`ErrorRepr`]. + pub fn into_repr(self) -> ErrorRepr<'static> { + match self { + ErrorKind::Unknown => ErrorRepr::INTERNAL_SERVICE_ERROR, + ErrorKind::Aborted => ErrorRepr::SERVICE_WAS_ABORTED, + } + } + + /// Returns a new [`Status`]. + pub fn into_status(self) -> Status { + self.into_repr().into_status() + } +} + +/// Internal representation of a serialized [`Error`] response. +#[derive(Debug, Clone)] +#[must_use = "errors do nothing unless you use them"] +struct ErrorRepr<'a> { + pub message: Cow<'a, str>, + pub code: Code, +} + +impl<'a> ErrorRepr<'a> { + const INTERNAL_SERVICE_ERROR: Self = Self::new( + "Internal service error. Unknown underlying error or panic.", + Code::Unknown, + ); + + const SERVICE_WAS_ABORTED: Self = Self::new( + "Request processing was aborted by either client or server.", + Code::Unknown, + ); + + /// Returns a new [`ErrorRepr`]. + #[inline] + pub const fn new(message: &'a str, code: Code) -> Self { + Self { + message: Cow::Borrowed(message), + code, + } + } + + /// Returns a new [`Status`]. + #[inline] + pub fn into_status(self) -> Status { + Status::new(self.code, self.message) + } +} + +impl From> for Status { + #[inline] + fn from(value: ErrorRepr<'_>) -> Self { + value.into_status() + } +} + +/// A specialized [`Result`] type for the [`Error`] type. +/// +/// Used by [`Server`]s. +/// +/// [`Result`]: std::result::Result +/// [`Server`]: tonic::transport::Server +pub type Result = std::result::Result; + +#[cfg(test)] +mod test { + use crate::handler::{Error, ErrorKind}; + + #[test] + fn build_default_error() { + let error = Error::default(); + let _ = error.into_status(); + } + + #[test] + fn build_error_kind() { + let error = Error::new(ErrorKind::default()); + let _ = error.into_status(); + } +} diff --git a/crates/server/handler/registry.rs b/crates/server/handler/registry.rs index 55ceb1c..7ff7ace 100644 --- a/crates/server/handler/registry.rs +++ b/crates/server/handler/registry.rs @@ -1,10 +1,13 @@ use axiston_rt_schema::registry::registry_server::{Registry, RegistryServer}; -use axiston_rt_schema::registry::{CheckRequest, CheckResponse, RegistryRequest, RegistryResponse}; +use axiston_rt_schema::registry::{ + FindServicesRequest, FindServicesResponse, RegistryContentResponse, +}; +// use axiston_rt_schema::registry::{CheckRequest, CheckResponse, RegistryRequest, RegistryResponse}; use tonic::{Request, Response, Status}; use crate::service::AppState; -/// TODO. +/// Implements [`Registry`] service for the [`RegistryServer`]. pub struct RegistryService { state: AppState, } @@ -25,17 +28,17 @@ impl RegistryService { #[tonic::async_trait] impl Registry for RegistryService { - async fn registry( + async fn get_registry_content( &self, - request: Request, - ) -> Result, Status> { + request: Request<()>, + ) -> Result, Status> { todo!() } - async fn check( + async fn find_services( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { todo!() } } diff --git a/crates/server/service/instance.rs b/crates/server/service/instance.rs deleted file mode 100644 index 9a20ff1..0000000 --- a/crates/server/service/instance.rs +++ /dev/null @@ -1,12 +0,0 @@ -/// TODO. -#[derive(Debug, Clone)] -pub struct InstanceService {} - -impl InstanceService {} - -#[cfg(test)] -mod test { - fn build_default() -> anyhow::Result<()> { - Ok(()) - } -} diff --git a/crates/server/service/mod.rs b/crates/server/service/mod.rs index a9bd65e..a1f7d63 100644 --- a/crates/server/service/mod.rs +++ b/crates/server/service/mod.rs @@ -1,11 +1,14 @@ -//! TODO. -//! +//! Application state and dependency injection. + +use axiston_rt_task::routing::layers::Layers; +use axiston_rt_task::routing::Router; pub use crate::service::app_config::{AppBuilder, AppConfig}; +pub use crate::service::task_queue::TaskQueue; mod app_config; -mod instance; -mod registry; +mod task_metrics; +mod task_queue; /// Application state. /// @@ -15,13 +18,21 @@ mod registry; #[derive(Debug, Clone)] #[must_use = "state does nothing unless you use it"] pub struct AppState { - // runtime: Rc, + pub task_router: Router, + pub task_queue: TaskQueue, + // pub task_counter: TaskStatus, + // pub runtime: Rc, } impl AppState { /// Creates a new [`AppState`]. #[inline] pub fn new(config: AppConfig) -> Self { - Self {} + let layers = Layers::builder().build(); + + Self { + task_router: Router::new(layers), + task_queue: TaskQueue::new(), + } } } diff --git a/crates/server/service/registry.rs b/crates/server/service/task_metrics.rs similarity index 100% rename from crates/server/service/registry.rs rename to crates/server/service/task_metrics.rs diff --git a/crates/server/service/task_queue.rs b/crates/server/service/task_queue.rs new file mode 100644 index 0000000..67c7c6e --- /dev/null +++ b/crates/server/service/task_queue.rs @@ -0,0 +1,171 @@ +use std::cmp::{Ordering, Reverse}; +use std::collections::{BinaryHeap, HashMap, HashSet}; +use std::fmt; +use std::sync::{Arc, Mutex}; + +use derive_more::{Deref, DerefMut}; +use time::OffsetDateTime; +use tokio::sync::mpsc::{channel, Receiver, Sender}; +use uuid::{NoContext, Timestamp, Uuid}; + +/// TODO. +#[derive(Default, Clone)] +pub struct TaskQueue { + inner: Arc>, +} + +#[derive(Default, Deref, DerefMut)] +struct TaskQueueInner { + tasks: BinaryHeap, +} + +/// Represents a single change in task execution status. +#[derive(Debug, Clone)] +pub enum TaskQueueEvent { + Waiting, + PreRunning, + Running, + PostRunning, +} + +impl TaskQueue { + /// Returns a new [`TaskQueue`]. + #[inline] + pub fn new() -> Self { + Self::default() + } + + /// Returns a new [`TaskQueueHandler`]. + #[inline] + pub fn handler(&self) -> TaskQueueHandler { + TaskQueueHandler::new(self.clone(), 128) + } + + /// Adds the task into the task queue. + fn add_task(&self, tx: Sender) -> Uuid { + let utc_datetime = OffsetDateTime::now_utc(); + let uuid_timestamp = Timestamp::from_unix( + NoContext, + utc_datetime.unix_timestamp() as u64, + utc_datetime.nanosecond(), + ); + + let mut guard = self.inner.lock().expect("should not be held"); + + // Makes sure that UUIDv7 is not duplicated. + let mut task_id = Uuid::new_v7(uuid_timestamp); + while guard.iter().any(|task| task.id.0 == task_id) { + task_id = Uuid::new_v7(uuid_timestamp); + } + + guard.push(TaskData::new(task_id, tx)); + task_id + } + + /// Removes the task from the task queue. + pub fn remove_task(&self, id: Uuid) { + todo!() + } +} + +impl fmt::Debug for TaskQueue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TaskQueue").finish_non_exhaustive() + } +} + +/// Contains all the data required to execute a single task. +struct TaskData { + /// - [`BinaryHeap`] is a max-heap by default, so `cmp::`[`Reverse`] is used. + /// - UUID `v7` should be used to remain sortable by a timestamp. + id: Reverse, + tx: Sender, +} + +impl TaskData { + /// Returns a new [`TaskData`]. + #[inline] + pub fn new(id: Uuid, tx: Sender) -> Self { + Self { + id: Reverse(id), + tx, + } + } +} + +impl PartialEq for TaskData { + #[inline] + fn eq(&self, other: &Self) -> bool { + PartialEq::eq(&self.id, &other.id) + } +} + +impl Eq for TaskData {} + +impl PartialOrd for TaskData { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + PartialOrd::partial_cmp(&self.id, &other.id) + } +} + +impl Ord for TaskData { + #[inline] + fn cmp(&self, other: &Self) -> Ordering { + Ord::cmp(&self.id, &other.id) + } +} + +/// TODO. +#[derive(Deref, DerefMut)] +pub struct TaskQueueHandler { + task_queue: TaskQueue, + send_event: Sender, + task_ids: HashSet, + + #[deref] + #[deref_mut] + recv_event: Receiver, +} + +impl TaskQueueHandler { + /// Returns a new [`TaskQueueHandler`]. + fn new(task_queue: TaskQueue, channel_cap: usize) -> Self { + let (tx, rx) = channel::(channel_cap); + Self { + task_queue, + send_event: tx.clone(), + task_ids: HashSet::new(), + recv_event: rx, + } + } + + /// Adds the task into the task queue. + #[inline] + pub fn add_task(&self) -> Uuid { + self.task_queue.add_task(self.send_event.clone()) + } + + /// Removes the task from the task queue. + #[inline] + pub fn remove_task(&self, id: Uuid) { + self.task_queue.remove_task(id) + } +} + +impl fmt::Debug for TaskQueueHandler { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NotifyGuard").finish_non_exhaustive() + } +} + +impl Drop for TaskQueueHandler { + fn drop(&mut self) { + let inner = &self.task_queue.inner; + let mut guard = inner.lock().expect("should not be held"); + guard.tasks.retain(|data| { + let Reverse(id) = data.id; + !self.task_ids.contains(&id) + }); + } +} diff --git a/crates/task/context/failure.rs b/crates/task/context/failure.rs index a1ca010..df1cc4f 100644 --- a/crates/task/context/failure.rs +++ b/crates/task/context/failure.rs @@ -1,27 +1,79 @@ use std::error::Error; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + /// Unrecoverable failure duration [`TaskHandler`] execution. /// /// [`TaskHandler`]: crate::handler::TaskHandler -#[derive(Debug, thiserror::Error)] +#[derive(Debug, thiserror::Error, Serialize, Deserialize)] #[error("internal handler error")] #[must_use = "errors do nothing unless you use them"] pub struct TaskError { - inner: Box, + pub(crate) values: Value, } impl TaskError { /// Returns a new [`TaskError`]. #[inline] - pub fn new(inner: E) -> Self + pub fn new(values: Value) -> Self where E: Error + 'static, { - Self { - inner: Box::new(inner), - } + Self { values } + } + + /// Returns a new [`TaskErrorBuilder`]. + #[inline] + pub fn builder() -> TaskErrorBuilder { + TaskErrorBuilder::new() } } /// Specialized [`Result`] alias for the [`TaskError`] type. pub type TaskResult = Result; + +/// [`TaskHandler`] service error builder. +/// +/// [`TaskHandler`]: crate::handler::TaskHandler +#[derive(Debug, Clone)] +#[must_use = "errors do nothing unless you serialize them"] +pub struct TaskErrorBuilder { + values: Option, +} + +impl TaskErrorBuilder { + /// Returns a new [`TaskErrorBuilder`]. + #[inline] + pub fn new() -> Self { + Self { values: None } + } + + /// Overrides the default value of [`TaskError`]`::values`. + #[inline] + pub fn with_values(mut self, values: Value) -> Self { + self.values = Some(values); + self + } + + /// Returns a new [`TaskError`]. + pub fn build(self) -> TaskError { + TaskError { + values: self.values.unwrap_or_default(), + } + } +} + +#[cfg(test)] +mod test { + use serde_json::Value; + + use crate::context::TaskError; + + #[test] + fn build_empty_error() -> crate::Result<()> { + let _error = TaskError::builder().with_values(Value::default()).build(); + + Ok(()) + } +} diff --git a/crates/task/context/mod.rs b/crates/task/context/mod.rs index 2d8654b..116796a 100644 --- a/crates/task/context/mod.rs +++ b/crates/task/context/mod.rs @@ -6,6 +6,7 @@ pub mod builders { //! [`TaskRequest`]: crate::context::TaskRequest //! [`TaskResponse`]: crate::context::TaskResponse + pub use super::failure::TaskErrorBuilder; pub use super::request::TaskRequestBuilder; pub use super::response::TaskResponseBuilder; } diff --git a/crates/task/context/request.rs b/crates/task/context/request.rs index a45aaad..bbf4128 100644 --- a/crates/task/context/request.rs +++ b/crates/task/context/request.rs @@ -1,19 +1,11 @@ use std::fmt; -use derive_more::{Deref, DerefMut, From}; +use derive_more::{Deref, DerefMut}; use serde::{Deserialize, Serialize}; use serde_json::Value; use crate::routing::layers::Layers; -/// TODO. -#[derive(Debug, Clone, Serialize, Deserialize, Deref, From)] -pub struct Inputs(pub Value); - -/// TODO. -#[derive(Debug, Clone, Serialize, Deserialize, Deref, From)] -pub struct Secrets(pub Value); - /// Serializable [`TaskHandler`] service request. /// /// [`TaskHandler`]: crate::handler::TaskHandler @@ -24,8 +16,8 @@ pub struct TaskRequest { #[deref_mut] inner: T, - pub(crate) inputs: Inputs, - pub(crate) secrets: Secrets, + pub(crate) inputs: Value, + pub(crate) secrets: Value, pub(crate) layers: Layers, } @@ -35,8 +27,8 @@ impl TaskRequest { pub fn new(inner: T) -> Self { Self { inner, - inputs: Inputs(Value::default()), - secrets: Secrets(Value::default()), + inputs: Value::default(), + secrets: Value::default(), layers: Layers::new(), } } @@ -112,8 +104,8 @@ impl TaskRequestBuilder { pub fn build(self) -> TaskRequest { TaskRequest { inner: self.inner, - inputs: Inputs(self.inputs.unwrap_or_default()), - secrets: Secrets(self.secrets.unwrap_or_default()), + inputs: self.inputs.unwrap_or_default(), + secrets: self.secrets.unwrap_or_default(), layers: self.layers.unwrap_or_default(), } } @@ -124,7 +116,7 @@ mod test { use serde_json::Value; use crate::context::TaskRequest; - use crate::routing::Layers; + use crate::routing::layers::Layers; use crate::Result; #[test] diff --git a/crates/task/context/response.rs b/crates/task/context/response.rs index 1426e5c..84b9360 100644 --- a/crates/task/context/response.rs +++ b/crates/task/context/response.rs @@ -1,17 +1,9 @@ use std::fmt; -use derive_more::{Deref, DerefMut, From}; +use derive_more::{Deref, DerefMut}; use serde::{Deserialize, Serialize}; use serde_json::Value; -/// TODO. -#[derive(Debug, Clone, Serialize, Deserialize, Deref, From)] -pub struct Outputs(pub Value); - -/// TODO. -#[derive(Debug, Clone, Serialize, Deserialize, Deref, From)] -pub struct Metrics(pub Value); - /// Deserializable [`TaskHandler`] service response. /// /// [`TaskHandler`]: crate::handler::TaskHandler @@ -22,8 +14,8 @@ pub struct TaskResponse { #[deref_mut] inner: T, - pub(crate) outputs: Outputs, - pub(crate) metrics: Metrics, + pub(crate) outputs: Value, + pub(crate) metrics: Value, } impl TaskResponse { @@ -32,8 +24,8 @@ impl TaskResponse { pub fn new(inner: T) -> Self { Self { inner, - outputs: Outputs(Value::default()), - metrics: Metrics(Value::default()), + outputs: Value::default(), + metrics: Value::default(), } } @@ -83,15 +75,15 @@ impl TaskResponseBuilder { /// Overrides the default value of [`TaskResponse`]`::outputs`. #[inline] - pub fn with_outputs(mut self, json: Value) -> Self { - self.outputs = Some(json); + pub fn with_outputs(mut self, values: Value) -> Self { + self.outputs = Some(values); self } /// Overrides the default value of [`TaskResponse`]`::metrics`. #[inline] - pub fn with_metrics(mut self, json: Value) -> Self { - self.metrics = Some(json); + pub fn with_metrics(mut self, values: Value) -> Self { + self.metrics = Some(values); self } @@ -99,8 +91,8 @@ impl TaskResponseBuilder { pub fn build(self) -> TaskResponse { TaskResponse { inner: self.inner, - outputs: Outputs(self.outputs.unwrap_or_default()), - metrics: Metrics(self.metrics.unwrap_or_default()), + outputs: self.outputs.unwrap_or_default(), + metrics: self.metrics.unwrap_or_default(), } } } diff --git a/crates/task/handler/future.rs b/crates/task/handler/future.rs index a64ee32..e0be0ea 100644 --- a/crates/task/handler/future.rs +++ b/crates/task/handler/future.rs @@ -11,6 +11,7 @@ use futures::FutureExt; use pin_project_lite::pin_project; use crate::context::{TaskError, TaskResponse}; +use crate::handler::metric::LockTaskMetrics; pin_project! { /// Opaque [`Future`] return type for [`TaskHandler::call`]. @@ -21,6 +22,7 @@ pin_project! { #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct TaskFuture { #[pin] fut: BoxFuture<'static, Result, TaskError>>, + metrics: Option, } } @@ -29,17 +31,31 @@ impl TaskFuture { #[inline] pub fn new(fut: F) -> Self where - F: Future, TaskError>>, - F: Sized + Send + 'static, + F: Future, TaskError>> + Sized + Send + 'static, { - Self { fut: fut.boxed() } + Self { + fut: fut.boxed(), + metrics: None, + } + } + + /// Returns a new [`TaskFuture`]. + #[inline] + pub fn with_metrics(fut: F, metrics: LockTaskMetrics) -> Self + where + F: Future, TaskError>> + Sized + Send + 'static, + { + Self { + fut: fut.boxed(), + metrics: Some(metrics), + } } } impl From, TaskError>>> for TaskFuture { #[inline] fn from(fut: BoxFuture<'static, Result, TaskError>>) -> Self { - Self { fut } + Self { fut, metrics: None } } } @@ -60,7 +76,7 @@ mod test { use crate::Result; #[test] - fn future_from_block() -> Result<()> { + fn from_async_block() -> Result<()> { let fut = async move { Ok(TaskResponse::new(5)) }; let _fut = TaskFuture::new(fut); diff --git a/crates/task/handler/metric.rs b/crates/task/handler/metric.rs index 3d13763..7ecc4d9 100644 --- a/crates/task/handler/metric.rs +++ b/crates/task/handler/metric.rs @@ -3,15 +3,46 @@ //! [`Load`]: tower::load::Load //! [`TaskHandler`]: crate::handler::TaskHandler +use std::sync::{Arc, Mutex}; + use serde::{Deserialize, Serialize}; +/// Reference-counting utility for [`TaskMetrics`]. +#[derive(Debug, Default, Clone)] +#[must_use = "metrics do nothing unless you serialize them"] +pub struct LockTaskMetrics { + inner: Arc>, +} + +impl LockTaskMetrics { + /// Returns a new [`LockTaskMetrics`]. + #[inline] + pub fn new(metrics: TaskMetrics) -> Self { + Self { + inner: Arc::new(Mutex::new(metrics)), + } + } + + /// Returns a new [`TaskMetrics`]. + pub fn snapshot(&self) -> TaskMetrics { + TaskMetrics {} + } +} + /// `tower::load::`[`Load`] metrics for [`TaskHandler`]s. /// /// [`Load`]: tower::load::Load /// [`TaskHandler`]: crate::handler::TaskHandler #[derive(Debug, Default, Clone, PartialOrd, PartialEq, Serialize, Deserialize)] #[must_use = "metrics do nothing unless you serialize them"] -pub struct TaskMetrics {} +pub struct TaskMetrics { + // TODO: pub average_waiting_time: Duration, + // TODO: pub average_recent_waiting_time: Duration, + // TODO: pub average_running_time: Duration, + // TODO: pub average_recent_running_time: Duration, + // TODO: pub total_success_runs: u32, + // TODO: pub total_failure_runs: u32, +} impl TaskMetrics { /// Returns a new [`TaskMetrics`]. @@ -23,11 +54,17 @@ impl TaskMetrics { #[cfg(test)] mod test { - use crate::handler::metric::TaskMetrics; + use crate::handler::metric::{LockTaskMetrics, TaskMetrics}; use crate::Result; #[test] - fn from_default() -> Result<()> { + fn metrics_lock() -> Result<()> { + let _metrics = LockTaskMetrics::default(); + Ok(()) + } + + #[test] + fn default_metrics() -> Result<()> { let _metrics = TaskMetrics::new(); Ok(()) } diff --git a/crates/task/handler/mod.rs b/crates/task/handler/mod.rs index 1d4d0a3..8a39bab 100644 --- a/crates/task/handler/mod.rs +++ b/crates/task/handler/mod.rs @@ -2,38 +2,51 @@ use std::fmt; use std::marker::PhantomData; -use std::ops::Deref; -use std::sync::Arc; use std::task::{Context, Poll}; use tower::load::Load; -use tower::util::BoxCloneService; +use tower::util::BoxCloneSyncService; use tower::{Layer, Service, ServiceBuilder}; use crate::context::{TaskError, TaskRequest, TaskResponse}; use crate::handler::future::TaskFuture; -use crate::handler::metric::TaskMetrics; +use crate::handler::metric::{LockTaskMetrics, TaskMetrics}; pub mod future; pub mod metric; /// Unified `tower::`[`Service`] for executing tasks. /// -/// Opaque [`BoxCloneService`]<[`TaskRequest`], [`TaskResponse`], [`TaskError`]>. +/// Opaque [`BoxCloneSyncService`]<[`TaskRequest`], [`TaskResponse`], [`TaskError`]>. #[must_use = "services do nothing unless you `.poll_ready` or `.call` them"] pub struct TaskHandler { - inner: BoxCloneService, TaskResponse, TaskError>, - metrics: Arc, + inner: BoxCloneSyncService, TaskResponse, TaskError>, + metrics: LockTaskMetrics, } impl TaskHandler { /// Returns a new [`TaskHandler`]. - #[inline] pub fn new(inner: S) -> Self where T: 'static, U: 'static, - S: Service + Clone + Send + 'static, + S: Service + Clone + Send + Sync + 'static, + Req: From> + 'static, + S::Response: Into> + 'static, + S::Error: Into + 'static, + S::Future: Send + 'static, + { + Self::with_metrics(inner, LockTaskMetrics::default()) + } + + /// Returns a new [`TaskHandler`] with provided [`LockTaskMetrics`]. + /// + /// Allows to share [`LockTaskMetrics`] and the inner [`TaskMetrics`]. + pub fn with_metrics(inner: S, metrics: LockTaskMetrics) -> Self + where + T: 'static, + U: 'static, + S: Service + Clone + Send + Sync + 'static, Req: From> + 'static, S::Response: Into> + 'static, S::Error: Into + 'static, @@ -46,8 +59,8 @@ impl TaskHandler { .service(inner); Self { - inner: BoxCloneService::new(inner), - metrics: Arc::new(TaskMetrics::default()), + inner: BoxCloneSyncService::new(inner), + metrics, } } @@ -55,14 +68,20 @@ impl TaskHandler { pub fn map(self, f: F) -> TaskHandler where F: FnOnce( - BoxCloneService, TaskResponse, TaskError>, - ) -> BoxCloneService, TaskResponse, TaskError>, + BoxCloneSyncService, TaskResponse, TaskError>, + ) -> BoxCloneSyncService, TaskResponse, TaskError>, { TaskHandler { inner: f(self.inner), metrics: self.metrics, } } + + /// Returns a new [`TaskMetrics`]. + #[inline] + pub fn snapshot(&self) -> TaskMetrics { + self.metrics.snapshot() + } } impl Clone for TaskHandler { @@ -81,7 +100,11 @@ impl fmt::Debug for TaskHandler { } } -impl Service> for TaskHandler { +impl Service> for TaskHandler +where + T: 'static, + U: 'static, +{ type Response = TaskResponse; type Error = TaskError; type Future = TaskFuture; @@ -93,7 +116,7 @@ impl Service> for TaskHandler { #[inline] fn call(&mut self, req: TaskRequest) -> Self::Future { - self.inner.call(req).into() + TaskFuture::with_metrics(self.inner.call(req), self.metrics.clone()) } } @@ -102,26 +125,34 @@ impl Load for TaskHandler { #[inline] fn load(&self) -> Self::Metric { - self.metrics.deref().clone() + self.metrics.snapshot() } } /// `tower::`[`Layer`] that produces a [`TaskHandler`] services. pub struct TaskHandlerLayer { + metrics: LockTaskMetrics, inner: PhantomData<(Req, T, U)>, } impl TaskHandlerLayer { /// Returns a new [`TaskHandlerLayer`]. #[inline] - pub fn new() -> Self { - Self::default() + pub fn new(metrics: LockTaskMetrics) -> Self { + Self { + metrics, + inner: PhantomData, + } } } impl Default for TaskHandlerLayer { + #[inline] fn default() -> Self { - Self { inner: PhantomData } + Self { + metrics: LockTaskMetrics::default(), + inner: PhantomData, + } } } @@ -129,7 +160,7 @@ impl Layer for TaskHandlerLayer where T: 'static, U: 'static, - S: Service + Clone + Send + 'static, + S: Service + Clone + Send + Sync + 'static, Req: From> + 'static, S::Response: Into> + 'static, S::Error: Into + 'static, @@ -139,7 +170,7 @@ where #[inline] fn layer(&self, inner: S) -> Self::Service { - TaskHandler::new(inner) + TaskHandler::with_metrics(inner, self.metrics.clone()) } } @@ -156,7 +187,7 @@ mod test { } #[test] - fn manual_compose() -> Result<()> { + fn service_compose() -> Result<()> { let inner = service_fn(handle); let _service = TaskHandler::new(inner); Ok(()) @@ -165,7 +196,7 @@ mod test { #[test] fn service_builder() -> Result<()> { let _service = ServiceBuilder::new() - .layer(TaskHandlerLayer::new()) + .layer(TaskHandlerLayer::default()) .service(service_fn(handle)); Ok(()) } diff --git a/crates/task/lib.rs b/crates/task/lib.rs index af879b6..2e33bbc 100644 --- a/crates/task/lib.rs +++ b/crates/task/lib.rs @@ -13,27 +13,44 @@ //! ``` use std::borrow::Cow; +use std::collections::HashMap; +use derive_more::From; use jsonschema::ValidationError; +use crate::routing::index::{RouteIndex, ServiceIndex}; +use crate::routing::manifest::{RouteManifest, ServiceManifest}; + pub mod context; pub mod handler; pub mod routing; +/// TODO. +#[derive(Debug)] +pub struct Registry { + services: HashMap, + routes: HashMap, +} + /// Unrecoverable failure of the [`Router`]. /// /// Includes all error types that may occur. /// /// [`Router`]: routing::Router -#[derive(Debug, thiserror::Error)] +#[derive(Debug, thiserror::Error, From)] #[must_use = "errors do nothing unless you use them"] pub enum Error { + /// Route with the index not found. + #[error("index not found")] + Index(RouteIndex), /// Task validation failure. + #[from(ignore)] #[error("task validation failure: {0}")] - Validate(ValidationError<'static>), + Validation(ValidationError<'static>), /// Task execution failure. + #[from(ignore)] #[error("task execution failure: {0}")] - Task(#[from] context::TaskError), + Execution(#[from] context::TaskError), } impl<'a> From> for Error { @@ -45,7 +62,7 @@ impl<'a> From> for Error { schema_path: validation_error.schema_path, }; - Self::Validate(validation_error) + Self::Validation(validation_error) } } @@ -53,8 +70,3 @@ impl<'a> From> for Error { /// /// [`Result`]: std::result::Result pub type Result = std::result::Result; - -// TODO: Is there any real reason to make a different between action and trigger? -// Make trigger dynamically determined if the action returns a single boolean. - -// TODO: Manifests are defined in proto files with serde derive. diff --git a/crates/task/routing/manifest.rs b/crates/task/routing/manifest.rs index 233f700..19ecf65 100644 --- a/crates/task/routing/manifest.rs +++ b/crates/task/routing/manifest.rs @@ -7,7 +7,7 @@ use serde_json::Value; #[derive(Debug, Clone, Serialize, Deserialize)] #[must_use = "manifests do nothing unless you serialize them"] pub struct ServiceManifest { - pub(crate) service_id: String, + pub service_id: String, } impl ServiceManifest { @@ -21,11 +21,12 @@ impl ServiceManifest { #[derive(Debug, Clone, Serialize, Deserialize)] #[must_use = "manifests do nothing unless you serialize them"] pub struct RouteManifest { - pub(crate) route_id: String, - pub(crate) service_id: String, - pub(crate) inputs_schema: Value, - pub(crate) outputs_schema: Value, - pub(crate) errors_schema: Value, + pub route_id: String, + // version, deprecation notice + pub service_id: String, + pub inputs_schema: Value, + pub outputs_schema: Value, + pub errors_schema: Value, } impl RouteManifest { diff --git a/crates/task/routing/mod.rs b/crates/task/routing/mod.rs index 9d4d7ee..c70ae35 100644 --- a/crates/task/routing/mod.rs +++ b/crates/task/routing/mod.rs @@ -6,10 +6,12 @@ use std::collections::HashMap; use std::fmt; use std::sync::Arc; +use crate::context::{TaskRequest, TaskResponse}; use crate::routing::index::{RouteIndex, ServiceIndex}; use crate::routing::layers::{LayerCompose, Layers}; use crate::routing::manifest::{RouteManifest, ServiceManifest}; pub use crate::routing::route::Route; +use crate::{Registry, Result}; pub mod index; pub mod layers; @@ -25,13 +27,15 @@ pub type RouteResponse = (); /// TODO. #[must_use = "routes do nothing unless you use them"] pub struct Router { - router_inner: Arc>, + inner: Arc>, } +// TODO: Should route manifest be inside of route handler? + struct RouterInner { - layer_compose: LayerCompose, + layer_compose: Option, service_manifests: HashMap, - route_handlers: HashMap>, + routes: HashMap>, } impl Router { @@ -39,24 +43,23 @@ impl Router { #[inline] pub fn new(layers: Layers) -> Self { let router_inner = RouterInner { - layer_compose: LayerCompose::new(layers), + layer_compose: Some(LayerCompose::new(layers)), service_manifests: HashMap::default(), - route_handlers: HashMap::new(), + routes: HashMap::new(), }; Self { - router_inner: Arc::new(router_inner), + inner: Arc::new(router_inner), } } /// Overrides the default value of [`Router`]`::layer_compose`. pub fn with_layers(self, layers: Layers) -> Self { - let mut inner = Arc::try_unwrap(self.router_inner) - .unwrap_or_else(|router_handler| (*router_handler).clone()); - inner.layer_compose = LayerCompose::new(layers); + let mut inner = Arc::try_unwrap(self.inner).unwrap_or_else(|x| (*x).clone()); + inner.layer_compose = Some(LayerCompose::new(layers)); Self { - router_inner: Arc::new(inner), + inner: Arc::new(inner), } } @@ -66,30 +69,66 @@ impl Router { service_index: ServiceIndex, service_manifest: ServiceManifest, ) -> Self { - let mut inner = Arc::try_unwrap(self.router_inner) - .unwrap_or_else(|router_handler| (*router_handler).clone()); + let mut inner = Arc::try_unwrap(self.inner).unwrap_or_else(|x| (*x).clone()); let _ = inner .service_manifests .insert(service_index, service_manifest); Self { - router_inner: Arc::new(inner), + inner: Arc::new(inner), } } /// Registers another [`Route`] by its [`RouteIndex`]. pub fn with_route(self, route_index: RouteIndex, route: Route) -> Self { - let mut inner = Arc::try_unwrap(self.router_inner) - .unwrap_or_else(|router_handler| (*router_handler).clone()); - let _ = inner.route_handlers.insert(route_index, route); + let mut inner = Arc::try_unwrap(self.inner).unwrap_or_else(|x| (*x).clone()); + let _ = inner.routes.insert(route_index, route); Self { - router_inner: Arc::new(inner), + inner: Arc::new(inner), } } // TODO: Method to return the whole registry. // TODO: Method to execute a single route. + + /// Returns the reference to the [`ServiceManifest`]. + pub fn get_service_manifest(&self, service_index: &ServiceIndex) -> Option<&ServiceManifest> { + self.inner.service_manifests.get(service_index) + } + + /// Returns the reference to the [`RouteManifest`]. + pub fn get_route_manifest(&self, route_index: &RouteIndex) -> Option<&RouteManifest> { + let route = self.inner.routes.get(route_index); + route.map(|x| &x.route_handler.route_manifest) + } + + /// Returns all [`ServiceManifest`]s and [`RouteManifest`]s. + pub fn get_registry(&self) -> Registry { + let routes = self.inner.routes.iter(); + Registry { + services: self.inner.service_manifests.clone(), + routes: routes + .map(|(i, r)| (i.clone(), r.route_handler.route_manifest.clone())) + .collect(), + } + } + + /// TODO. + pub async fn route( + &self, + route_index: &RouteIndex, + task_request: TaskRequest, + ) -> Result> + where + T: 'static, + U: 'static, + { + let route_handler = self.inner.routes.get(route_index); + let route_handler = route_handler.ok_or_else(|| route_index.clone())?; + let layer_compose = self.inner.layer_compose.as_ref(); + route_handler.route(task_request, layer_compose).await + } } impl fmt::Debug for Router { @@ -101,13 +140,13 @@ impl fmt::Debug for Router { impl Default for Router { fn default() -> Self { let router_handler = RouterInner { - layer_compose: LayerCompose::default(), + layer_compose: None, service_manifests: HashMap::default(), - route_handlers: HashMap::default(), + routes: HashMap::default(), }; Self { - router_inner: Arc::new(router_handler), + inner: Arc::new(router_handler), } } } @@ -115,7 +154,7 @@ impl Default for Router { impl Clone for Router { fn clone(&self) -> Self { Self { - router_inner: self.router_inner.clone(), + inner: self.inner.clone(), } } } @@ -125,7 +164,7 @@ impl Clone for RouterInner { Self { layer_compose: self.layer_compose.clone(), service_manifests: self.service_manifests.clone(), - route_handlers: self.route_handlers.clone(), + routes: self.routes.clone(), } } } diff --git a/crates/task/routing/route.rs b/crates/task/routing/route.rs index a311ce7..3a9e510 100644 --- a/crates/task/routing/route.rs +++ b/crates/task/routing/route.rs @@ -17,7 +17,7 @@ pub struct Route { pub(crate) route_handler: Arc>, } -struct RouteHandler { +pub(crate) struct RouteHandler { pub(crate) route_task_handler: TaskHandler, pub(crate) route_manifest: RouteManifest, pub(crate) inputs_schema_validator: Validator, @@ -29,7 +29,6 @@ impl Route { /// Returns a new [`Route`]. pub fn new( route_task_handler: TaskHandler, - layer_compose: Option, route_manifest: RouteManifest, ) -> Result { let route_handler = RouteHandler { @@ -58,25 +57,45 @@ impl Route { } /// Processes the request and returns the response asynchronously. - pub async fn execute(&self, task_request: TaskRequest) -> Result> { + pub async fn route( + &self, + task_request: TaskRequest, + layer_compose: Option<&LayerCompose>, + ) -> Result> + where + T: 'static, + U: 'static, + { // TODO: Apply layers. // let _ = &task_request.layers; self.route_handler .inputs_schema_validator .validate(&task_request.inputs)?; + let mut task_handler = self.route_handler.route_task_handler.clone(); - let task_response = task_handler.call(task_request).await?; - self.route_handler - .outputs_schema_validator - .validate(&task_response.outputs)?; - Ok(task_response) + match task_handler.call(task_request).await { + Ok(task_response) => { + self.route_handler + .outputs_schema_validator + .validate(&task_response.outputs)?; + Ok(task_response) + } + Err(task_error) => { + self.route_handler + .errors_schema_validator + .validate(&task_error.values)?; + Err(task_error.into()) + } + } } } impl Clone for Route { fn clone(&self) -> Self { - todo!() + Self { + route_handler: self.route_handler.clone(), + } } } From 0e4ea0acdbdd2a9616e2df64659475cd29892512 Mon Sep 17 00:00:00 2001 From: Oleh Martsokha Date: Wed, 25 Dec 2024 11:38:01 +0100 Subject: [PATCH 07/11] feat(all): client --- crates/client/Cargo.toml | 38 ++++++ crates/client/README.md | 20 +++ crates/client/config/custom_hooks.rs | 39 ++++++ crates/client/config/mod.rs | 115 +++++++++++++++++ crates/client/config/pool_config.rs | 67 ++++++++++ crates/client/lib.rs | 84 ++++++++++++ crates/client/manager/instance_client.rs | 89 +++++++++++++ crates/client/manager/manager_config.rs | 58 +++++++++ crates/client/manager/mod.rs | 148 ++++++++++++++++++++++ crates/client/manager/runtime_endpoint.rs | 110 ++++++++++++++++ crates/client/middleware/future.rs | 35 +++++ crates/client/middleware/mod.rs | 41 ++++++ crates/jsvm/extension/route/datatype.rs | 1 - 13 files changed, 844 insertions(+), 1 deletion(-) create mode 100644 crates/client/Cargo.toml create mode 100644 crates/client/README.md create mode 100644 crates/client/config/custom_hooks.rs create mode 100644 crates/client/config/mod.rs create mode 100644 crates/client/config/pool_config.rs create mode 100644 crates/client/lib.rs create mode 100644 crates/client/manager/instance_client.rs create mode 100644 crates/client/manager/manager_config.rs create mode 100644 crates/client/manager/mod.rs create mode 100644 crates/client/manager/runtime_endpoint.rs create mode 100644 crates/client/middleware/future.rs create mode 100644 crates/client/middleware/mod.rs delete mode 100644 crates/jsvm/extension/route/datatype.rs diff --git a/crates/client/Cargo.toml b/crates/client/Cargo.toml new file mode 100644 index 0000000..9530f29 --- /dev/null +++ b/crates/client/Cargo.toml @@ -0,0 +1,38 @@ +# https://doc.rust-lang.org/cargo/reference/manifest.html + +[package] +name = "axiston-rt-client" +version = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +readme = "./README.md" + +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +documentation = { workspace = true } + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lib] +path = "lib.rs" + +[dependencies] +axiston-rt-schema = { workspace = true, features = ["client"] } +deadpool = { version = "0.12", features = ["managed", "rt_tokio_1"] } + +tokio = { workspace = true } +http = { version = "1.2", features = [] } +tracing = { workspace = true } +thiserror = { workspace = true } +tonic = { workspace = true } +prost = { workspace = true } +tower = { workspace = true } +tower-http = { workspace = true } + +derive_more = { workspace = true } +serde = { workspace = true } +uuid = { workspace = true } diff --git a/crates/client/README.md b/crates/client/README.md new file mode 100644 index 0000000..f819023 --- /dev/null +++ b/crates/client/README.md @@ -0,0 +1,20 @@ +### runtime/client + +[![Build Status][action-badge]][action-url] +[![Crate Docs][docs-badge]][docs-url] +[![Crate Version][crates-badge]][crates-url] + +[action-badge]: https://img.shields.io/github/actions/workflow/status/axiston/runtime/build.yaml?branch=main&label=build&logo=github&style=flat-square +[action-url]: https://github.com/axiston/runtime/actions/workflows/build.yaml +[crates-badge]: https://img.shields.io/crates/v/axiston-rt-schema.svg?logo=rust&style=flat-square +[crates-url]: https://crates.io/crates/axiston-rt-schema +[docs-badge]: https://img.shields.io/docsrs/axiston-rt-schema?logo=Docs.rs&style=flat-square +[docs-url]: http://docs.rs/axiston-rt-schema + +Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. + +#### Notes + +- Lorem Ipsum. +- Lorem Ipsum. +- Lorem Ipsum. diff --git a/crates/client/config/custom_hooks.rs b/crates/client/config/custom_hooks.rs new file mode 100644 index 0000000..157a4b9 --- /dev/null +++ b/crates/client/config/custom_hooks.rs @@ -0,0 +1,39 @@ +use deadpool::managed::{HookResult, Metrics}; + +use crate::manager::{RuntimeClient, RuntimeError}; + +/// Custom hook called after a new connection has been established. +/// +/// See [`PoolBuilder`] for more details. +/// +/// [`PoolBuilder`]: deadpool::managed::PoolBuilder +pub fn post_create(_conn: &mut RuntimeClient, _metrics: &Metrics) -> HookResult { + tracing::trace!(target: "runtime", "post_create"); + + // Note: should never return an error. + Ok(()) +} + +/// Custom hook called before a connection has been recycled. +/// +/// See [`PoolBuilder`] for more details. +/// +/// [`PoolBuilder`]: deadpool::managed::PoolBuilder +pub fn pre_recycle(_conn: &mut RuntimeClient, _metrics: &Metrics) -> HookResult { + tracing::trace!(target: "runtime", "pre_recycle"); + + // Note: should never return an error. + Ok(()) +} + +/// Custom hook called after a connection has been recycled. +/// +/// See [`PoolBuilder`] for more details. +/// +/// [`PoolBuilder`]: deadpool::managed::PoolBuilder +pub fn post_recycle(_conn: &mut RuntimeClient, _metrics: &Metrics) -> HookResult { + tracing::trace!(target: "runtime", "post_recycle"); + + // Note: should never return an error. + Ok(()) +} diff --git a/crates/client/config/mod.rs b/crates/client/config/mod.rs new file mode 100644 index 0000000..b29752c --- /dev/null +++ b/crates/client/config/mod.rs @@ -0,0 +1,115 @@ +use std::fmt; + +use deadpool::managed::{Hook, Object, Pool}; +use derive_more::{Deref, DerefMut, From}; +use uuid::Uuid; + +use crate::config::custom_hooks::{post_create, post_recycle, pre_recycle}; +pub use crate::config::pool_config::RuntimeConfig; +use crate::manager::{RuntimeEndpoint, RuntimeManager, RuntimeManagerConfig}; +use crate::Result; + +mod custom_hooks; +mod pool_config; + +/// Asynchronous `runtime` connection pool. +/// +/// - Implemented with [`tonic`] and [`deadpool`]. +/// - Includes predefined create/recycle hooks. +/// - Emits traces on lifecycle events. +/// - Uses [`RuntimeConfig`] for configuration. +#[derive(Clone)] +pub struct Runtime { + inner: Pool, +} + +/// `RuntimeConnection` wrapper. +/// +/// Hides connection pool manager types. +#[derive(Debug, From, Deref, DerefMut)] +pub struct RuntimeObject { + inner_object: Object, +} + +impl RuntimeObject { + /// Removes this runtime endpoint from the pool. + pub async fn unregister_self(&self) -> Result<()> { + let Some(runtime_pool) = Object::pool(&self.inner_object) else { + return Ok(()); + }; + + let runtime_manager = runtime_pool.manager(); + runtime_manager + .unregister_endpoint(self.as_endpoint_id()) + .await?; + + Ok(()) + } +} + +impl Runtime { + /// Returns a new [`Runtime`]. + pub fn new(config: RuntimeConfig) -> Self { + let manager_config = + RuntimeManagerConfig::new().with_recycling_method(config.recycling_method); + let manager = RuntimeManager::new(manager_config); + let pool = Pool::builder(manager) + .max_size(config.max_conn.unwrap_or(64)) + .create_timeout(config.create_timeout) + .wait_timeout(config.wait_timeout) + .recycle_timeout(config.recycle_timeout) + .post_create(Hook::sync_fn(post_create)) + .pre_recycle(Hook::sync_fn(pre_recycle)) + .post_recycle(Hook::sync_fn(post_recycle)) + .runtime(deadpool::Runtime::Tokio1); + + let pool = pool.build().expect("should not require runtime"); + Self { inner: pool } + } + + /// Adds the runtime endpoint into the pool. + pub async fn register_endpoint>(&self, rt: E) -> Result<()> { + self.inner + .manager() + .register_endpoint(rt.into()) + .await + .map_err(Into::into) + } + + /// Removes the runtime endpoint from the pool. + pub async fn unregister_endpoint>(&self, rt: E) -> Result<()> { + self.inner + .manager() + .unregister_endpoint(&rt.into()) + .await + .map_err(Into::into) + } + + pub async fn get_connection(&self) -> Result { + self.inner.get().await.map(Into::into).map_err(Into::into) + } +} + +impl Default for Runtime { + fn default() -> Self { + Self::new(RuntimeConfig::default()) + } +} + +impl fmt::Debug for Runtime { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Runtime").finish_non_exhaustive() + } +} + +#[cfg(test)] +mod test { + use crate::{Result, Runtime, RuntimeConfig}; + + #[test] + fn build_default_runtime() -> Result<()> { + let config = RuntimeConfig::new(); + let _runtime = Runtime::new(config); + Ok(()) + } +} diff --git a/crates/client/config/pool_config.rs b/crates/client/config/pool_config.rs new file mode 100644 index 0000000..7cbc6c8 --- /dev/null +++ b/crates/client/config/pool_config.rs @@ -0,0 +1,67 @@ +use std::time::Duration; + +use serde::{Deserialize, Serialize}; + +use crate::manager::RecyclingMethod; + +/// Configures [`Runtime`] for one or more runtimes. +/// +/// [`Runtime`]: crate::Runtime +#[derive(Debug, Default, Serialize, Deserialize)] +#[must_use = "configs do nothing unless you use them"] +pub struct RuntimeConfig { + pub(crate) max_conn: Option, + pub(crate) create_timeout: Option, + pub(crate) wait_timeout: Option, + pub(crate) recycle_timeout: Option, + pub(crate) recycling_method: RecyclingMethod, +} + +impl RuntimeConfig { + /// Creates a new [`RuntimeConfig`]. + #[inline] + pub fn new() -> Self { + Self::default() + } + + /// Overwrites the default value of [`RuntimeConfig`]`::max_conn`. + pub fn with_max_conn(mut self, max_conn: usize) -> Self { + self.max_conn = Some(max_conn); + self + } + + /// Overwrites the default value of [`RuntimeConfig`]`::create_timeout`. + pub fn with_create_timeout(mut self, create_timeout: Duration) -> Self { + self.create_timeout = Some(create_timeout); + self + } + + /// Overwrites the default value of [`RuntimeConfig`]`::wait_timeout`. + pub fn with_wait_timeout(mut self, wait_timeout: Duration) -> Self { + self.wait_timeout = Some(wait_timeout); + self + } + + /// Overwrites the default value of [`RuntimeConfig`]`::recycle_timeout`. + pub fn with_recycle_timeout(mut self, recycle_timeout: Duration) -> Self { + self.recycle_timeout = Some(recycle_timeout); + self + } + + /// Overrides the value of [`RuntimeConfig`]`::recycling_method`. + pub fn with_recycling_method(mut self, recycling_method: RecyclingMethod) -> Self { + self.recycling_method = recycling_method; + self + } +} + +#[cfg(test)] +mod test { + use crate::{Result, RuntimeConfig}; + + #[test] + fn build_default_settings() -> Result<()> { + let _config = RuntimeConfig::new(); + Ok(()) + } +} diff --git a/crates/client/lib.rs b/crates/client/lib.rs new file mode 100644 index 0000000..d21e8af --- /dev/null +++ b/crates/client/lib.rs @@ -0,0 +1,84 @@ +#![forbid(unsafe_code)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![doc = include_str!("./README.md")] + +//! ### Examples +//! +//! ```rust +//! use axiston_rt_client::{Runtime, Result, RuntimeEndpoint}; +//! +//! +//! #[tokio::main] +//! async fn main() -> Result<()> { +//! let addr = "https://example.com/"; +//! let endpoint = RuntimeEndpoint::from_bytes(addr.into())?; +//! +//! let runtime = Runtime::default(); +//! runtime.register_endpoint(endpoint).await?; +//! let _conn = runtime.get_connection().await?; +//! +//! Ok(()) +//! } +//! ``` + +mod config; +mod manager; +mod middleware; + +use deadpool::managed::PoolError; +use derive_more::From; + +pub use crate::config::{Runtime, RuntimeConfig, RuntimeObject}; +pub use crate::manager::RuntimeEndpoint; +use crate::manager::RuntimeError; + +/// Unrecoverable failure of the [`Runtime`]. +/// +/// Includes all error types that may occur. +#[non_exhaustive] +#[derive(Debug, From, thiserror::Error)] +#[must_use = "errors do nothing unless you use them"] +pub enum Error { + /// Timeout happened. + #[error("timeout happened")] + Timout(deadpool::managed::TimeoutType), + + /// Runtime: All endpoints have reached the limit. + #[error("runtime: all endpoints have reached the limit")] + EndpointsLimit, + + /// Runtime: Connection pool has no endpoints. + #[error("runtime: connection pool has no endpoints")] + NoEndpoints, + + /// Runtime: Transport failure (from the client or server). + #[error("runtime: transport failure: {0}")] + Transport(tonic::transport::Error), +} + +impl From for Error { + fn from(runtime_connection_error: RuntimeError) -> Self { + match runtime_connection_error { + RuntimeError::Transport(transport_failure) => Self::Transport(transport_failure), + RuntimeError::EndpointsLimit => Self::EndpointsLimit, + RuntimeError::NoEndpoints => Self::NoEndpoints, + } + } +} + +impl From> for Error { + fn from(value: PoolError) -> Self { + match value { + PoolError::Timeout(timeout_type) => Self::Timout(timeout_type), + PoolError::Backend(backend_error) => backend_error.into(), + PoolError::Closed => unreachable!(), + PoolError::NoRuntimeSpecified => unreachable!(), + PoolError::PostCreateHook(_) => unreachable!(), + } + } +} + +/// Specialized [`Result`] alias for the [`Error`] type. +/// +/// [`Result`]: std::result::Result +pub type Result = std::result::Result; diff --git a/crates/client/manager/instance_client.rs b/crates/client/manager/instance_client.rs new file mode 100644 index 0000000..1ac60cf --- /dev/null +++ b/crates/client/manager/instance_client.rs @@ -0,0 +1,89 @@ +use std::fmt; + +use axiston_rt_schema::instance::instance_client::InstanceClient; +use axiston_rt_schema::registry::registry_client::RegistryClient; +use derive_more::From; +use tonic::transport::{Channel, Endpoint}; +use uuid::Uuid; +use crate::middleware::RtChannel; + +/// Represents a client for interacting with runtime services. +/// +/// The `RuntimeClient` is responsible for managing communication with instance +/// and registry services, identified by a unique endpoint ID. It wraps generated +/// gRPC clients for both instance and registry operations, providing a cohesive +/// interface for runtime service interactions. +pub struct RuntimeClient { + pub(crate) endpoint_id: Uuid, + pub(crate) instance_client: InstanceClient, + pub(crate) registry_client: RegistryClient, +} + +impl RuntimeClient { + /// Returns a new [`RuntimeClient`]. + #[inline] + pub fn new(id: Uuid, channel: Channel) -> Self { + let rt_channel = RtChannel::new(channel); + + Self { + endpoint_id: id, + instance_client: InstanceClient::new(rt_channel.clone()), + registry_client: RegistryClient::new(rt_channel), + } + } + + /// Returns a new [`RuntimeClient`]. + pub async fn connect(id: Uuid, endpoint: Endpoint) -> RuntimeResult { + let channel = endpoint.connect().await?; + Ok(Self::new(id, channel)) + } + + /// Returns the reference to the underlying unique endpoint identifier. + #[inline] + pub(crate) fn as_endpoint_id(&self) -> &Uuid { + &self.endpoint_id + } + + /// Returns the reference to the underlying (generated) instance client. + #[inline] + pub(crate) fn as_instance_client(&self) -> &InstanceClient { + &self.instance_client + } + + /// Returns the reference to the underlying (generated) instance client. + #[inline] + pub(crate) fn as_registry_client(&self) -> &RegistryClient { + &self.registry_client + } +} + +impl fmt::Debug for RuntimeClient { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RuntimeClient").finish_non_exhaustive() + } +} + +/// Unrecoverable failure of the [`RuntimeClient`]. +/// +/// Includes all error types that may occur. +/// Used to remap from [`PoolError`]. +/// +/// [`PoolError`]: deadpool::managed::PoolError +#[derive(Debug, From, thiserror::Error)] +#[must_use = "errors do nothing unless you use them"] +pub enum RuntimeError { + /// All endpoints have reached the limit. + #[error("all endpoints have reached the limit")] + EndpointsLimit, + + /// Connection pool has no endpoints. + #[error("connection pool has no endpoints")] + NoEndpoints, + + /// Transport failure (from the client or server). + #[error("transport failure: {0}")] + Transport(tonic::transport::Error), +} + +/// Specialized [`Result`] alias for the [`RuntimeError`] type. +pub type RuntimeResult = Result; diff --git a/crates/client/manager/manager_config.rs b/crates/client/manager/manager_config.rs new file mode 100644 index 0000000..3ae2863 --- /dev/null +++ b/crates/client/manager/manager_config.rs @@ -0,0 +1,58 @@ +use serde::{Deserialize, Serialize}; + +/// Configures `RuntimeManager` for one or more runtimes. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +#[must_use = "configs do nothing unless you use them"] +pub struct RuntimeManagerConfig { + /// Method of how a connection is recycled. + /// + /// See [`RecyclingMethod`]. + pub recycling_method: RecyclingMethod, +} + +impl RuntimeManagerConfig { + /// Returns a new [`RuntimeManagerConfig`]. + #[inline] + pub fn new() -> Self { + Self::default() + } + + /// Overrides the value of [`RuntimeManagerConfig`]`::recycling_method`. + pub fn with_recycling_method(mut self, recycling_method: RecyclingMethod) -> Self { + self.recycling_method = recycling_method; + self + } +} + +/// Possible methods of how a connection is recycled. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub enum RecyclingMethod { + /// Only check for open event bus when recycling existing connections + /// Unless you have special needs this is a safe choice. + #[default] + Fast, + /// In addition to checking for open event bus a test query is executed. + /// + /// This is slower, but guarantees that the database connection is ready to be used. + Verified, +} + +impl RecyclingMethod { + /// Returns a new [`RecyclingMethod`]. + #[inline] + pub fn new() -> Self { + Self::default() + } +} + +#[cfg(test)] +mod test { + use crate::manager::RuntimeManagerConfig; + use crate::Result; + + #[test] + fn build_default_settings() -> Result<()> { + let _ = RuntimeManagerConfig::new(); + Ok(()) + } +} diff --git a/crates/client/manager/mod.rs b/crates/client/manager/mod.rs new file mode 100644 index 0000000..31890af --- /dev/null +++ b/crates/client/manager/mod.rs @@ -0,0 +1,148 @@ +//! [`Manager`] of [`RuntimeClient`]s. +//! + +mod instance_client; +mod manager_config; +mod runtime_endpoint; + +use std::collections::HashMap; +use std::fmt; + +use deadpool::managed::{Manager, Metrics, RecycleResult}; +use tokio::sync::Mutex; +use tonic::transport::Channel; +use uuid::Uuid; + +pub use crate::manager::instance_client::{RuntimeClient, RuntimeError, RuntimeResult}; +pub use crate::manager::manager_config::{RecyclingMethod, RuntimeManagerConfig}; +pub use crate::manager::runtime_endpoint::RuntimeEndpoint; + +/// [`Manager`] of [`RuntimeClient`]s. +pub struct RuntimeManager { + inner: Mutex, +} + +struct RuntimeManagerInner { + config: RuntimeManagerConfig, + endpoints: HashMap)>, +} + +impl RuntimeManager { + /// Returns a new [`RuntimeManager`]. + #[inline] + pub fn new(config: RuntimeManagerConfig) -> Self { + let inner = Mutex::new(RuntimeManagerInner { + endpoints: HashMap::new(), + config, + }); + + Self { inner } + } + + /// Adds the runtime endpoint into the pool. + pub(crate) async fn register_endpoint(&self, endpoint: RuntimeEndpoint) -> RuntimeResult<()> { + let mut manager = self.inner.lock().await; + + // Ensures the UUIDv4 is not duplicated. + let mut endpoint_id = Uuid::new_v4(); + while manager.endpoints.contains_key(&endpoint_id) { + endpoint_id = Uuid::new_v4(); + } + + manager.endpoints.insert(endpoint_id, (endpoint, None)); + Ok(()) + } + + /// Removes the runtime endpoint from the pool. + pub(crate) async fn unregister_endpoint(&self, endpoint_id: &Uuid) -> RuntimeResult<()> { + let mut manager = self.inner.lock().await; + // TODO: Don't remove it, but use a *disable* flag. + let _ = manager.endpoints.remove(endpoint_id); + Ok(()) + } + + /// - Returns the least used channel. + /// - Increases the counter of current connections by 1. + async fn next_channel(&self) -> RuntimeResult<(Uuid, Channel)> { + let mut manager = self.inner.lock().await; + if manager.endpoints.is_empty() { + return Err(RuntimeError::NoEndpoints); + } + + // Returns the endpoint with the least of connections out of the pool + // of endpoints with no limits or if their limit was not reached yet. + let endpoint = manager + .endpoints + .iter_mut() + .filter(|(_, (r, _))| r.limit.is_none() || r.limit.is_some_and(|x| x < r.current)) + .min_by(|(_, (l, _)), (_, (r, _))| l.current.cmp(&r.current)); + + let Some((id, (runtime_endpoint, runtime_channel))) = endpoint else { + return Err(RuntimeError::EndpointsLimit); + }; + + let runtime_channel = if let Some(runtime_channel) = runtime_channel { + runtime_channel.clone() + } else { + let channel = runtime_endpoint.endpoint.connect().await?; + *runtime_channel = Some(channel.clone()); + channel + }; + + runtime_endpoint.current += 1; + Ok((*id, runtime_channel)) + } + + /// Reduces the counter of current connections by 1. + async fn drop_channel(&self, endpoint_id: &Uuid) { + let mut manager = self.inner.lock().await; + if let Some((endpoint, _)) = manager.endpoints.get_mut(endpoint_id) { + endpoint.current -= 1; + } + } + + /// Recycles a dropped instance of runtime client. + async fn test_connection(&self, _runtime_client: &mut RuntimeClient) -> RuntimeResult<()> { + let manager = self.inner.lock().await; + + // TODO: Recycle dropped connections. + match manager.config.recycling_method { + RecyclingMethod::Fast => {} + RecyclingMethod::Verified => {} + } + + Ok(()) + } +} + +impl Default for RuntimeManager { + fn default() -> Self { + Self::new(RuntimeManagerConfig::default()) + } +} + +impl fmt::Debug for RuntimeManager { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RuntimeManager").finish_non_exhaustive() + } +} + +impl Manager for RuntimeManager { + type Type = RuntimeClient; + type Error = RuntimeError; + + async fn create(&self) -> Result { + let (id, channel) = self.next_channel().await?; + Ok(RuntimeClient::new(id, channel)) + } + + async fn recycle( + &self, + conn: &mut Self::Type, + _metrics: &Metrics, + ) -> RecycleResult { + self.drop_channel(&conn.endpoint_id).await; + self.test_connection(conn).await?; + Ok(()) + } +} diff --git a/crates/client/manager/runtime_endpoint.rs b/crates/client/manager/runtime_endpoint.rs new file mode 100644 index 0000000..9683769 --- /dev/null +++ b/crates/client/manager/runtime_endpoint.rs @@ -0,0 +1,110 @@ +use std::sync::LazyLock; + +use tonic::codegen::Bytes; +use tonic::transport::{Endpoint, Uri}; + +use crate::{Error, Result}; + +/// Builds and configures `HTTP/2` channels. +/// +/// Includes configuration for the manager. +#[derive(Debug, Clone)] +pub struct RuntimeEndpoint { + // TODO: Allow multiple endpoints. + // TODO: Use [`Channel::balance_list`]. + pub(crate) endpoint: Endpoint, + pub(crate) limit: Option, + pub(crate) current: u32, +} + +impl RuntimeEndpoint { + /// Returns a new [`RuntimeEndpoint`]. + pub fn new(endpoint: Endpoint) -> Self { + Self { + endpoint, + limit: None, + current: 0, + } + } + + /// Returns a new [`RuntimeEndpoint`]. + pub fn from_bytes(endpoint: Bytes) -> Result { + let endpoint = Endpoint::from_shared(endpoint)?; + let endpoint = endpoint.user_agent(USER_AGENT.as_str())?; + Ok(Self::new(endpoint)) + } + + /// Overrides the value of [`RuntimeEndpoint`]`::connection_limit`. + #[inline] + pub fn connection_limit(mut self, limit: Option) -> Self { + self.limit = limit; + self + } + + /// Get the endpoint uri. + #[inline] + pub fn uri(&self) -> &Uri { + self.endpoint.uri() + } +} + +impl From for RuntimeEndpoint { + #[inline] + fn from(value: Endpoint) -> Self { + Self { + endpoint: value, + limit: None, + current: 0, + } + } +} + +impl TryFrom<&str> for RuntimeEndpoint { + type Error = Error; + + #[inline] + fn try_from(value: &str) -> Result { + Self::from_bytes(Bytes::copy_from_slice(value.as_bytes())) + } +} + +impl TryFrom for RuntimeEndpoint { + type Error = Error; + + #[inline] + fn try_from(value: Bytes) -> Result { + Self::from_bytes(value) + } +} + +// TODO: Replace with `static USER_AGENT: String`. +static USER_AGENT: LazyLock String> = LazyLock::new(format_user_agent); +fn format_user_agent() -> String { + format!( + "Axiston/{} (Rust; Ver {})", + env!("CARGO_PKG_VERSION"), + env!("CARGO_PKG_RUST_VERSION") + ) +} + +#[cfg(test)] +mod test { + use tonic::transport::Endpoint; + + use crate::manager::RuntimeEndpoint; + use crate::Result; + + #[test] + fn endpoint_from_bytes() -> Result<()> { + let addr = "https://example.com/".into(); + let _endpoint = RuntimeEndpoint::from_bytes(addr)?; + Ok(()) + } + + #[test] + fn endpoint_from_inner() -> Result<()> { + let endpoint = Endpoint::from_static("https://example.com/"); + let _endpoint = RuntimeEndpoint::new(endpoint); + Ok(()) + } +} diff --git a/crates/client/middleware/future.rs b/crates/client/middleware/future.rs new file mode 100644 index 0000000..78c670a --- /dev/null +++ b/crates/client/middleware/future.rs @@ -0,0 +1,35 @@ +//! [`Future`]s for the runtime [`Channel`]. +//! +//! [`Channel`]: crate::middleware::RtChannel + +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use tonic::body::BoxBody; +use tonic::codegen::http::Response; +use tonic::transport::channel::ResponseFuture; +use tonic::transport::Error; + +/// Response [`Future`] for the runtime [`Channel`]. +/// +/// [`Channel`]: crate::middleware::RtChannel +pub struct RtResponseFuture { + inner: ResponseFuture, +} + +impl RtResponseFuture { + /// Returns a new [`RtResponseFuture`]. + #[inline] + pub fn new(inner: ResponseFuture) -> Self { + Self { inner } + } +} + +impl Future for RtResponseFuture { + type Output = Result, Error>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + Pin::new(&mut self.inner).poll(cx) + } +} diff --git a/crates/client/middleware/mod.rs b/crates/client/middleware/mod.rs new file mode 100644 index 0000000..78a0ccf --- /dev/null +++ b/crates/client/middleware/mod.rs @@ -0,0 +1,41 @@ +//! TODO. + +pub mod future; + +use std::task::{Context, Poll}; + +use tonic::body::BoxBody; +use tonic::transport::{Channel, Error}; +use http::{Request, Response}; +use tower::Service; + +use crate::middleware::future::RtResponseFuture; + +/// TODO. +#[derive(Debug, Clone)] +pub struct RtChannel { + inner: Channel, +} + +impl RtChannel { + /// Returns a new [`RtChannel`]. + #[inline] + pub fn new(inner: Channel) -> Self { + Self { inner } + } +} + +impl Service> for RtChannel { + type Response = Response; + type Error = Error; + type Future = RtResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + Service::poll_ready(&mut self.inner, cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let fut = Service::call(&mut self.inner, req); + RtResponseFuture::new(fut) + } +} diff --git a/crates/jsvm/extension/route/datatype.rs b/crates/jsvm/extension/route/datatype.rs deleted file mode 100644 index 8b13789..0000000 --- a/crates/jsvm/extension/route/datatype.rs +++ /dev/null @@ -1 +0,0 @@ - From 51916c1ff840707f9395f04a189f86889c7f267b Mon Sep 17 00:00:00 2001 From: Oleh Martsokha Date: Wed, 1 Jan 2025 19:00:08 +0100 Subject: [PATCH 08/11] feat(all): task --- crates/task/Cargo.toml | 3 +- crates/task/context/failure.rs | 78 ++++---- crates/task/context/mod.rs | 3 +- crates/task/context/request.rs | 48 +++-- crates/task/context/response.rs | 5 +- crates/task/handler/future.rs | 29 ++- crates/task/{routing => handler}/layers.rs | 27 ++- crates/task/handler/metric.rs | 30 +-- crates/task/handler/mod.rs | 39 ++-- crates/task/lib.rs | 88 ++++----- crates/task/routing/index.rs | 28 +-- crates/task/routing/manifest.rs | 148 ++++++++++++-- crates/task/routing/mod.rs | 214 ++++++++++++++------- crates/task/routing/route.rs | 93 +++++---- 14 files changed, 512 insertions(+), 321 deletions(-) rename crates/task/{routing => handler}/layers.rs (76%) diff --git a/crates/task/Cargo.toml b/crates/task/Cargo.toml index 82b45ff..74fe536 100644 --- a/crates/task/Cargo.toml +++ b/crates/task/Cargo.toml @@ -27,7 +27,8 @@ thiserror = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -jsonschema = { workspace = true } +jsonschema = { version = "0.28", features = [] } +semver = { version = "1.0", features = ["serde"] } derive_more = { workspace = true } tracing = { workspace = true } diff --git a/crates/task/context/failure.rs b/crates/task/context/failure.rs index df1cc4f..f0546e3 100644 --- a/crates/task/context/failure.rs +++ b/crates/task/context/failure.rs @@ -1,7 +1,8 @@ use std::error::Error; +use jsonschema::ValidationError; use serde::{Deserialize, Serialize}; -use serde_json::Value; +use serde_json::{json, Value}; /// Unrecoverable failure duration [`TaskHandler`] execution. /// @@ -10,43 +11,33 @@ use serde_json::Value; #[error("internal handler error")] #[must_use = "errors do nothing unless you use them"] pub struct TaskError { - pub(crate) values: Value, + pub(crate) kind: TaskErrorKind, + #[serde(skip)] + pub(crate) error: Option>, + pub(crate) values: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[must_use = "errors do nothing unless you use them"] +pub enum TaskErrorKind { + /// Task wih a requested identifier was not found. + NotFound, + /// Request or response schema validation failed. + Schema, } impl TaskError { /// Returns a new [`TaskError`]. #[inline] - pub fn new(values: Value) -> Self + pub fn new(kind: TaskErrorKind, error: E) -> Self where - E: Error + 'static, + E: Into>, { - Self { values } - } - - /// Returns a new [`TaskErrorBuilder`]. - #[inline] - pub fn builder() -> TaskErrorBuilder { - TaskErrorBuilder::new() - } -} - -/// Specialized [`Result`] alias for the [`TaskError`] type. -pub type TaskResult = Result; - -/// [`TaskHandler`] service error builder. -/// -/// [`TaskHandler`]: crate::handler::TaskHandler -#[derive(Debug, Clone)] -#[must_use = "errors do nothing unless you serialize them"] -pub struct TaskErrorBuilder { - values: Option, -} - -impl TaskErrorBuilder { - /// Returns a new [`TaskErrorBuilder`]. - #[inline] - pub fn new() -> Self { - Self { values: None } + Self { + kind, + error: Some(error.into()), + values: None, + } } /// Overrides the default value of [`TaskError`]`::values`. @@ -55,24 +46,33 @@ impl TaskErrorBuilder { self.values = Some(values); self } +} - /// Returns a new [`TaskError`]. - pub fn build(self) -> TaskError { - TaskError { - values: self.values.unwrap_or_default(), - } +impl<'a> From> for TaskError { + fn from(value: ValidationError<'a>) -> Self { + Self::new( + TaskErrorKind::Schema, + "request or response schema validation failed", + ) + .with_values(json!({ + "instance": value.instance.into_owned(), + })) } } +/// Specialized [`Result`] alias for the [`TaskError`] type. +pub type TaskResult = Result; + #[cfg(test)] mod test { use serde_json::Value; - use crate::context::TaskError; + use crate::context::{TaskError, TaskErrorKind, TaskResult}; #[test] - fn build_empty_error() -> crate::Result<()> { - let _error = TaskError::builder().with_values(Value::default()).build(); + fn build_empty_error() -> TaskResult<()> { + let _error = TaskError::new(TaskErrorKind::NotFound, "requested entity was not found") + .with_values(Value::default()); Ok(()) } diff --git a/crates/task/context/mod.rs b/crates/task/context/mod.rs index 116796a..cb99aa0 100644 --- a/crates/task/context/mod.rs +++ b/crates/task/context/mod.rs @@ -6,12 +6,11 @@ pub mod builders { //! [`TaskRequest`]: crate::context::TaskRequest //! [`TaskResponse`]: crate::context::TaskResponse - pub use super::failure::TaskErrorBuilder; pub use super::request::TaskRequestBuilder; pub use super::response::TaskResponseBuilder; } -pub use crate::context::failure::{TaskError, TaskResult}; +pub use crate::context::failure::{TaskError, TaskErrorKind, TaskResult}; pub use crate::context::request::TaskRequest; pub use crate::context::response::TaskResponse; diff --git a/crates/task/context/request.rs b/crates/task/context/request.rs index bbf4128..4891f84 100644 --- a/crates/task/context/request.rs +++ b/crates/task/context/request.rs @@ -4,7 +4,7 @@ use derive_more::{Deref, DerefMut}; use serde::{Deserialize, Serialize}; use serde_json::Value; -use crate::routing::layers::Layers; +use crate::handler::Layers; /// Serializable [`TaskHandler`] service request. /// @@ -16,27 +16,29 @@ pub struct TaskRequest { #[deref_mut] inner: T, + pub(crate) index: String, + pub(crate) layers: Option, pub(crate) inputs: Value, pub(crate) secrets: Value, - pub(crate) layers: Layers, } impl TaskRequest { /// Returns a new [`TaskRequest`]. #[inline] - pub fn new(inner: T) -> Self { + pub fn new(index: &str, inner: T) -> Self { Self { inner, + index: index.to_owned(), + layers: None, inputs: Value::default(), secrets: Value::default(), - layers: Layers::new(), } } /// Returns a new [`TaskRequestBuilder`]. #[inline] - pub fn builder(inner: T) -> TaskRequestBuilder { - TaskRequestBuilder::new(inner) + pub fn builder(index: &str, inner: T) -> TaskRequestBuilder { + TaskRequestBuilder::new(index, inner) } /// Returns the inner data. @@ -44,13 +46,25 @@ impl TaskRequest { pub fn into_inner(self) -> T { self.inner } + + /// Applies the default [`Layers`] if it has none. + pub(crate) fn apply_default_layers(&mut self, layers: Option<&Layers>) { + if self.layers.is_some() { + return; + } + + // self.layers.is_none() && + if let Some(layers) = layers { + self.layers.replace(layers.clone()); + } + } } impl fmt::Debug for TaskRequest { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TaskRequest") .field("inputs", &self.inputs) - .field("secrets", &self.secrets) + .field("secrets", &"*****") .finish_non_exhaustive() } } @@ -62,20 +76,22 @@ impl fmt::Debug for TaskRequest { #[must_use = "requests do nothing unless you serialize them"] pub struct TaskRequestBuilder { inner: T, + index: String, + layers: Option, inputs: Option, secrets: Option, - layers: Option, } impl TaskRequestBuilder { /// Returns a new [`TaskRequestBuilder`]. #[inline] - pub fn new(inner: T) -> Self { + pub fn new(index: &str, inner: T) -> Self { Self { inner, + index: index.to_owned(), + layers: None, inputs: None, secrets: None, - layers: None, } } @@ -104,9 +120,10 @@ impl TaskRequestBuilder { pub fn build(self) -> TaskRequest { TaskRequest { inner: self.inner, + index: self.index, + layers: self.layers, inputs: self.inputs.unwrap_or_default(), secrets: self.secrets.unwrap_or_default(), - layers: self.layers.unwrap_or_default(), } } } @@ -115,13 +132,12 @@ impl TaskRequestBuilder { mod test { use serde_json::Value; - use crate::context::TaskRequest; - use crate::routing::layers::Layers; - use crate::Result; + use crate::context::{TaskRequest, TaskResult}; + use crate::routing::Layers; #[test] - fn build_empty_request() -> Result<()> { - let _request = TaskRequest::builder(5) + fn build_empty_request() -> TaskResult<()> { + let _request = TaskRequest::builder("builtin0", 5) .with_inputs(Value::default()) .with_secrets(Value::default()) .with_layers(Layers::new()) diff --git a/crates/task/context/response.rs b/crates/task/context/response.rs index 84b9360..b7bf86a 100644 --- a/crates/task/context/response.rs +++ b/crates/task/context/response.rs @@ -101,11 +101,10 @@ impl TaskResponseBuilder { mod test { use serde_json::Value; - use crate::context::TaskResponse; - use crate::Result; + use crate::context::{TaskResponse, TaskResult}; #[test] - fn build_empty_response() -> Result<()> { + fn build_empty_response() -> TaskResult<()> { let _response = TaskResponse::builder(5) .with_outputs(Value::default()) .with_metrics(Value::default()) diff --git a/crates/task/handler/future.rs b/crates/task/handler/future.rs index e0be0ea..5cbbbcb 100644 --- a/crates/task/handler/future.rs +++ b/crates/task/handler/future.rs @@ -1,4 +1,4 @@ -//! Futures types for [`TaskHandler`]s. +//! [`Future`] types for [`TaskHandler`]s. //! //! [`TaskHandler`]: crate::handler::TaskHandler @@ -10,19 +10,19 @@ use futures::future::BoxFuture; use futures::FutureExt; use pin_project_lite::pin_project; -use crate::context::{TaskError, TaskResponse}; -use crate::handler::metric::LockTaskMetrics; +use crate::context::{TaskResponse, TaskResult}; +use crate::handler::metric::TaskMetricsLock; pin_project! { /// Opaque [`Future`] return type for [`TaskHandler::call`]. /// - /// Opaque `futures::`[`BoxFuture`]. + /// Contains a single `futures::`[`BoxFuture`]. /// /// [`TaskHandler::call`]: crate::context::TaskHandler #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct TaskFuture { - #[pin] fut: BoxFuture<'static, Result, TaskError>>, - metrics: Option, + #[pin] fut: BoxFuture<'static, TaskResult>>, + metrics: Option, } } @@ -31,7 +31,7 @@ impl TaskFuture { #[inline] pub fn new(fut: F) -> Self where - F: Future, TaskError>> + Sized + Send + 'static, + F: Future>> + Sized + Send + 'static, { Self { fut: fut.boxed(), @@ -41,9 +41,9 @@ impl TaskFuture { /// Returns a new [`TaskFuture`]. #[inline] - pub fn with_metrics(fut: F, metrics: LockTaskMetrics) -> Self + pub fn with_metrics(fut: F, metrics: TaskMetricsLock) -> Self where - F: Future, TaskError>> + Sized + Send + 'static, + F: Future>> + Sized + Send + 'static, { Self { fut: fut.boxed(), @@ -52,15 +52,15 @@ impl TaskFuture { } } -impl From, TaskError>>> for TaskFuture { +impl From>>> for TaskFuture { #[inline] - fn from(fut: BoxFuture<'static, Result, TaskError>>) -> Self { + fn from(fut: BoxFuture<'static, TaskResult>>) -> Self { Self { fut, metrics: None } } } impl Future for TaskFuture { - type Output = Result, TaskError>; + type Output = TaskResult>; #[inline] fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { @@ -71,12 +71,11 @@ impl Future for TaskFuture { #[cfg(test)] mod test { - use crate::context::TaskResponse; + use crate::context::{TaskResponse, TaskResult}; use crate::handler::future::TaskFuture; - use crate::Result; #[test] - fn from_async_block() -> Result<()> { + fn from_async_block() -> TaskResult<()> { let fut = async move { Ok(TaskResponse::new(5)) }; let _fut = TaskFuture::new(fut); diff --git a/crates/task/routing/layers.rs b/crates/task/handler/layers.rs similarity index 76% rename from crates/task/routing/layers.rs rename to crates/task/handler/layers.rs index 0aba0a6..8fe8553 100644 --- a/crates/task/routing/layers.rs +++ b/crates/task/handler/layers.rs @@ -4,14 +4,14 @@ use serde::{Deserialize, Serialize}; /// TODO. #[derive(Debug, Default, Clone)] -pub struct LayerCompose { - layers: Option, +pub struct LayerCompose<'a> { + layers: Option<&'a Layers>, } -impl LayerCompose { +impl<'a> LayerCompose<'a> { /// Returns a new [`LayerCompose`]. #[inline] - pub fn new(layers: Layers) -> Self { + pub fn new(layers: &'a Layers) -> Self { Self { layers: Some(layers), } @@ -78,20 +78,17 @@ impl LayersBuilder { #[cfg(test)] mod test { - use crate::routing::layers::{LayerCompose, Layers, LayersBuilder}; - use crate::Result; + use crate::context::TaskResult; + use crate::handler::layers::{LayerCompose, Layers}; #[test] - fn with_default_layers() -> Result<()> { - let config = Layers::new(); - let _compose = LayerCompose::new(config); - Ok(()) - } + fn from_layers_builder() -> TaskResult<()> { + let layers = Layers::builder() + .with_retry_policy(()) + .with_timeout_policy(()) + .build(); - #[test] - fn from_layers_builder() -> Result<()> { - let config = LayersBuilder::new().build(); - let _compose = LayerCompose::new(config); + let _compose = LayerCompose::new(&layers); Ok(()) } } diff --git a/crates/task/handler/metric.rs b/crates/task/handler/metric.rs index 7ecc4d9..c8c1b45 100644 --- a/crates/task/handler/metric.rs +++ b/crates/task/handler/metric.rs @@ -1,4 +1,4 @@ -//! [`Load`] metric types for [`TaskHandler`]s. +//! `tower::`[`Load`] metric types for [`TaskHandler`]s. //! //! [`Load`]: tower::load::Load //! [`TaskHandler`]: crate::handler::TaskHandler @@ -7,15 +7,20 @@ use std::sync::{Arc, Mutex}; use serde::{Deserialize, Serialize}; -/// Reference-counting utility for [`TaskMetrics`]. +/// Reference-counting wrapper for [`TaskMetrics`]. +/// +/// Use by [`TaskHandler`]s and [`TaskFuture`]s. +/// +/// [`TaskHandler`]: crate::handler::TaskHandler +/// [`TaskFuture`]: crate::handler::future::TaskFuture #[derive(Debug, Default, Clone)] #[must_use = "metrics do nothing unless you serialize them"] -pub struct LockTaskMetrics { +pub struct TaskMetricsLock { inner: Arc>, } -impl LockTaskMetrics { - /// Returns a new [`LockTaskMetrics`]. +impl TaskMetricsLock { + /// Returns a new [`TaskMetricsLock`]. #[inline] pub fn new(metrics: TaskMetrics) -> Self { Self { @@ -54,18 +59,13 @@ impl TaskMetrics { #[cfg(test)] mod test { - use crate::handler::metric::{LockTaskMetrics, TaskMetrics}; - use crate::Result; - - #[test] - fn metrics_lock() -> Result<()> { - let _metrics = LockTaskMetrics::default(); - Ok(()) - } + use crate::context::TaskResult; + use crate::handler::metric::TaskMetricsLock; #[test] - fn default_metrics() -> Result<()> { - let _metrics = TaskMetrics::new(); + fn metrics_lock() -> TaskResult<()> { + let metrics_lock = TaskMetricsLock::default(); + let _metrics = metrics_lock.snapshot(); Ok(()) } } diff --git a/crates/task/handler/mod.rs b/crates/task/handler/mod.rs index 8a39bab..32e19b7 100644 --- a/crates/task/handler/mod.rs +++ b/crates/task/handler/mod.rs @@ -10,9 +10,11 @@ use tower::{Layer, Service, ServiceBuilder}; use crate::context::{TaskError, TaskRequest, TaskResponse}; use crate::handler::future::TaskFuture; -use crate::handler::metric::{LockTaskMetrics, TaskMetrics}; +pub use crate::handler::layers::{Layers, LayersBuilder}; +use crate::handler::metric::{TaskMetrics, TaskMetricsLock}; pub mod future; +mod layers; pub mod metric; /// Unified `tower::`[`Service`] for executing tasks. @@ -21,7 +23,7 @@ pub mod metric; #[must_use = "services do nothing unless you `.poll_ready` or `.call` them"] pub struct TaskHandler { inner: BoxCloneSyncService, TaskResponse, TaskError>, - metrics: LockTaskMetrics, + metrics: TaskMetricsLock, } impl TaskHandler { @@ -36,13 +38,13 @@ impl TaskHandler { S::Error: Into + 'static, S::Future: Send + 'static, { - Self::with_metrics(inner, LockTaskMetrics::default()) + Self::with_metrics(inner, TaskMetricsLock::default()) } - /// Returns a new [`TaskHandler`] with provided [`LockTaskMetrics`]. + /// Returns a new [`TaskHandler`] with provided [`TaskMetricsLock`]. /// - /// Allows to share [`LockTaskMetrics`] and the inner [`TaskMetrics`]. - pub fn with_metrics(inner: S, metrics: LockTaskMetrics) -> Self + /// Allows to share [`TaskMetricsLock`] and the inner [`TaskMetrics`]. + pub fn with_metrics(inner: S, metrics: TaskMetricsLock) -> Self where T: 'static, U: 'static, @@ -102,8 +104,8 @@ impl fmt::Debug for TaskHandler { impl Service> for TaskHandler where - T: 'static, - U: 'static, + T: 'static + Send, + U: 'static + Send, { type Response = TaskResponse; type Error = TaskError; @@ -116,7 +118,15 @@ where #[inline] fn call(&mut self, req: TaskRequest) -> Self::Future { - TaskFuture::with_metrics(self.inner.call(req), self.metrics.clone()) + let Some(layers) = req.layers.as_ref() else { + return TaskFuture::with_metrics(self.inner.call(req), self.metrics.clone()); + }; + + // { let compose = LayerCompose::new(layers); } + // TODO: Apply layers here. + let mut inner_svc = self.inner.clone(); + let fut = async move { inner_svc.call(req).await }; + TaskFuture::with_metrics(fut, self.metrics.clone()) } } @@ -131,14 +141,14 @@ impl Load for TaskHandler { /// `tower::`[`Layer`] that produces a [`TaskHandler`] services. pub struct TaskHandlerLayer { - metrics: LockTaskMetrics, + metrics: TaskMetricsLock, inner: PhantomData<(Req, T, U)>, } impl TaskHandlerLayer { /// Returns a new [`TaskHandlerLayer`]. #[inline] - pub fn new(metrics: LockTaskMetrics) -> Self { + pub fn new(metrics: TaskMetricsLock) -> Self { Self { metrics, inner: PhantomData, @@ -150,7 +160,7 @@ impl Default for TaskHandlerLayer { #[inline] fn default() -> Self { Self { - metrics: LockTaskMetrics::default(), + metrics: TaskMetricsLock::default(), inner: PhantomData, } } @@ -180,21 +190,20 @@ mod test { use crate::context::{TaskError, TaskRequest, TaskResponse}; use crate::handler::{TaskHandler, TaskHandlerLayer}; - use crate::Result; async fn handle(request: TaskRequest) -> Result, TaskError> { Ok(TaskResponse::new(request.into_inner())) } #[test] - fn service_compose() -> Result<()> { + fn service_compose() -> Result<(), TaskError> { let inner = service_fn(handle); let _service = TaskHandler::new(inner); Ok(()) } #[test] - fn service_builder() -> Result<()> { + fn service_builder() -> Result<(), TaskError> { let _service = ServiceBuilder::new() .layer(TaskHandlerLayer::default()) .service(service_fn(handle)); diff --git a/crates/task/lib.rs b/crates/task/lib.rs index 2e33bbc..f8cfe3e 100644 --- a/crates/task/lib.rs +++ b/crates/task/lib.rs @@ -3,70 +3,54 @@ #![doc = include_str!("./README.md")] //! ```rust -//! use axiston_rt_task::routing::Router; -//! use axiston_rt_task::Result; +//! use tower::{ServiceBuilder, service_fn}; +//! +//! use axiston_rt_task::context::{TaskRequest, TaskResponse, TaskResult}; +//! use axiston_rt_task::handler::{TaskHandlerLayer, TaskHandler, Layers}; +//! use axiston_rt_task::routing::manifest::{TaskManifest, ServiceManifest}; +//! use axiston_rt_task::Router; +//! +//! async fn handler(request: TaskRequest) -> TaskResult> { +//! Ok(TaskResponse::new(request.into_inner())) +//! } +//! +//! fn main() -> TaskResult<()> { +//! let service_manifest = ServiceManifest::new("service"); +//! let task_manifest = TaskManifest::new("task"); +//! +//! let task_handler: TaskHandler = ServiceBuilder::new() +//! .layer(TaskHandlerLayer::default()) +//! .service(service_fn(handler)); +//! +//! let layers = Layers::builder().build(); +//! let router = Router::default() +//! .with_layers(layers) +//! .with_service(service_manifest) +//! .with_route(task_manifest, task_handler); //! -//! fn main() -> Result<()> { -//! let router: Router = Router::default(); //! Ok(()) //! } //! ``` -use std::borrow::Cow; use std::collections::HashMap; -use derive_more::From; -use jsonschema::ValidationError; - -use crate::routing::index::{RouteIndex, ServiceIndex}; -use crate::routing::manifest::{RouteManifest, ServiceManifest}; +use crate::routing::index::{ServiceIndex, TaskIndex}; +use crate::routing::manifest::{ServiceManifest, TaskManifest}; +pub use crate::routing::Router; pub mod context; pub mod handler; pub mod routing; -/// TODO. -#[derive(Debug)] -pub struct Registry { - services: HashMap, - routes: HashMap, -} - -/// Unrecoverable failure of the [`Router`]. +/// Lists all registered services and tasks. /// -/// Includes all error types that may occur. +/// Also see [`Router::as_registry`]. /// -/// [`Router`]: routing::Router -#[derive(Debug, thiserror::Error, From)] -#[must_use = "errors do nothing unless you use them"] -pub enum Error { - /// Route with the index not found. - #[error("index not found")] - Index(RouteIndex), - /// Task validation failure. - #[from(ignore)] - #[error("task validation failure: {0}")] - Validation(ValidationError<'static>), - /// Task execution failure. - #[from(ignore)] - #[error("task execution failure: {0}")] - Execution(#[from] context::TaskError), -} - -impl<'a> From> for Error { - fn from(validation_error: ValidationError<'a>) -> Self { - let validation_error = ValidationError { - instance: Cow::Owned(validation_error.instance.into_owned()), - kind: validation_error.kind, - instance_path: validation_error.instance_path, - schema_path: validation_error.schema_path, - }; - - Self::Validation(validation_error) - } +/// [`Router::as_registry`]: routing::Router::as_registry +#[derive(Debug)] +pub struct Registry { + /// List of all registered services. + pub services: HashMap, + /// List of all registered tasks. + pub tasks: HashMap, } - -/// Specialized [`Result`] alias for the [`Error`] type. -/// -/// [`Result`]: std::result::Result -pub type Result = std::result::Result; diff --git a/crates/task/routing/index.rs b/crates/task/routing/index.rs index 639c156..e1ee8e3 100644 --- a/crates/task/routing/index.rs +++ b/crates/task/routing/index.rs @@ -1,4 +1,4 @@ -//! [`RouteIndex`] and [`ServiceIndex`]. +//! [`TaskIndex`] and [`ServiceIndex`]. use derive_more::{Deref, DerefMut}; use ecow::EcoString; @@ -15,7 +15,7 @@ pub struct ServiceIndex { impl ServiceIndex { /// Returns a new [`ServiceIndex`]. #[inline] - pub fn new>(inner: S) -> Self { + pub fn new(inner: impl AsRef) -> Self { let inner = EcoString::from(inner.as_ref()); Self { inner } } @@ -27,19 +27,19 @@ impl ServiceIndex { } } -/// Opaque and unique [`Route`] identifier. +/// Opaque and unique [`TaskHandler`] identifier. /// -/// [`Route`]: crate::routing::Route +/// [`TaskHandler`]: crate::handler::TaskHandler #[derive(Debug, Clone, Eq, PartialEq, Hash, Deref, DerefMut)] #[must_use = "indexes do nothing unless you serialize them"] -pub struct RouteIndex { +pub struct TaskIndex { inner: EcoString, } -impl RouteIndex { - /// Returns a new [`RouteIndex`]. +impl TaskIndex { + /// Returns a new [`TaskIndex`]. #[inline] - pub fn new>(inner: S) -> Self { + pub fn new(inner: impl AsRef) -> Self { let inner = EcoString::from(inner.as_ref()); Self { inner } } @@ -50,15 +50,3 @@ impl RouteIndex { self.inner.clone() } } - -#[cfg(test)] -mod test { - use crate::routing::RouteIndex; - use crate::Result; - - #[test] - fn index_from_string() -> Result<()> { - let _ = RouteIndex::new("index"); - Ok(()) - } -} diff --git a/crates/task/routing/manifest.rs b/crates/task/routing/manifest.rs index 19ecf65..adf1b93 100644 --- a/crates/task/routing/manifest.rs +++ b/crates/task/routing/manifest.rs @@ -1,40 +1,152 @@ -//! [`RouteManifest`] and [`ServiceManifest`]. +//! [`TaskManifest`] and [`ServiceManifest`]. +use jsonschema::{draft202012, Validator}; +use semver::Version; use serde::{Deserialize, Serialize}; use serde_json::Value; -/// TODO. +use crate::context::TaskResult; + +/// Metadata and properties of a single service. #[derive(Debug, Clone, Serialize, Deserialize)] #[must_use = "manifests do nothing unless you serialize them"] pub struct ServiceManifest { + /// Unique service identifier. + #[serde(rename = "service")] pub service_id: String, + /// Currently used service's version. + #[serde(rename = "version")] + pub version: Option, + + /// Stabilization flag and reason. + #[serde(rename = "stabilized")] + pub stabilized: Option, + /// Deprecation flag and reason. + #[serde(rename = "deprecated")] + pub deprecated: Option, } impl ServiceManifest { - /// Returns a new [`ServiceManifest`]. - pub fn new() -> Self { - todo!() + /// Creates a new [`ServiceManifest`] with the specified service identifier. + pub fn new(id: &str) -> Self { + Self { + service_id: id.to_owned(), + version: None, + stabilized: None, + deprecated: None, + } } } -/// TODO. +/// Metadata and properties of a single task. #[derive(Debug, Clone, Serialize, Deserialize)] #[must_use = "manifests do nothing unless you serialize them"] -pub struct RouteManifest { +pub struct TaskManifest { + /// Unique task identifier. + #[serde(rename = "task")] pub route_id: String, - // version, deprecation notice - pub service_id: String, - pub inputs_schema: Value, - pub outputs_schema: Value, - pub errors_schema: Value, + /// Unique service identifier. + #[serde(rename = "service")] + pub service_id: Option, + /// Currently used task version. + #[serde(rename = "version")] + pub version: Option, + + /// JSON Schema used for i/o validation. + #[serde(rename = "schemas")] + pub schemas: Option, + + /// Stabilization flag and reason. + #[serde(rename = "stabilized")] + pub stabilized: Option, + /// Deprecation flag and reason. + #[serde(rename = "deprecated")] + pub deprecated: Option, +} + +/// Stabilization or deprecation notices with metadata. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[must_use = "manifests do nothing unless you serialize them"] +pub struct Notice { + /// The version since the notice was applied. + pub since_version: Option, + /// Reason for the change. + pub change_reason: Option, } -impl RouteManifest { - /// Returns a new [`RouteManifest`]. - pub fn new() -> Self { - todo!() +/// Schemas used for input, output, and error validation. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[must_use = "manifests do nothing unless you serialize them"] +pub struct TaskSchemas { + pub inputs_schema: Option, + pub outputs_schema: Option, + pub errors_schema: Option, +} + +impl TaskManifest { + /// Creates a new [`TaskManifest`] with the specified task identifier. + pub fn new(id: &str) -> Self { + Self { + route_id: id.to_owned(), + service_id: None, + version: None, + schemas: None, + stabilized: None, + deprecated: None, + } + } + + /// Creates schema validators for input, output, and error schemas. + /// + /// Returns a `TaskSchemaValidators` instance containing compiled validators, + /// or an error if any schema compilation fails. + pub(crate) fn create_schema_validators(&self) -> TaskResult { + let Some(schemas) = &self.schemas else { + return Ok(TaskSchemaValidators::default()); + }; + + let inputs_schema = schemas.inputs_schema.as_ref(); + let outputs_schema = schemas.outputs_schema.as_ref(); + let errors_schema = schemas.errors_schema.as_ref(); + + Ok(TaskSchemaValidators { + inputs: inputs_schema.map(draft202012::new).transpose()?, + outputs: outputs_schema.map(draft202012::new).transpose()?, + errors: errors_schema.map(draft202012::new).transpose()?, + }) } } -#[cfg(test)] -mod test {} +/// Validators for task schemas to validate input, output, and error structures. +#[derive(Debug, Default)] +pub(crate) struct TaskSchemaValidators { + pub inputs: Option, + pub outputs: Option, + pub errors: Option, +} + +impl TaskSchemaValidators { + pub fn validate_inputs(&self, values: &Value) -> TaskResult<()> { + let Some(schema) = self.inputs.as_ref() else { + return Ok(()); + }; + + schema.validate(values).map_err(From::from) + } + + pub fn validate_outputs(&self, values: &Value) -> TaskResult<()> { + let Some(schema) = self.outputs.as_ref() else { + return Ok(()); + }; + + schema.validate(values).map_err(From::from) + } + + pub fn validate_errors(&self, values: &Value) -> TaskResult<()> { + let Some(schema) = self.errors.as_ref() else { + return Ok(()); + }; + + schema.validate(values).map_err(From::from) + } +} diff --git a/crates/task/routing/mod.rs b/crates/task/routing/mod.rs index c70ae35..a19e416 100644 --- a/crates/task/routing/mod.rs +++ b/crates/task/routing/mod.rs @@ -1,41 +1,41 @@ -//! [`Router`], [`RouteIndex`] and [`compose`] utilities. -//! -//! [`compose`]: Layers +//! [`Router`], [`TaskIndex`] and [`manifest`]s. use std::collections::HashMap; use std::fmt; use std::sync::Arc; -use crate::context::{TaskRequest, TaskResponse}; -use crate::routing::index::{RouteIndex, ServiceIndex}; -use crate::routing::layers::{LayerCompose, Layers}; -use crate::routing::manifest::{RouteManifest, ServiceManifest}; -pub use crate::routing::route::Route; -use crate::{Registry, Result}; +use tower::ServiceExt; + +use crate::context::{TaskError, TaskErrorKind, TaskRequest, TaskResponse}; +use crate::handler::{Layers, TaskHandler}; +use crate::routing::index::{ServiceIndex, TaskIndex}; +use crate::routing::manifest::{ServiceManifest, TaskManifest}; +use crate::routing::route::Route; +use crate::Registry; pub mod index; -pub mod layers; pub mod manifest; mod route; -/// TODO. +/// Request data alias for a default [`Router`]. pub type RouteRequest = (); -/// TODO. +/// Response data alias for a default [`Router`]. pub type RouteResponse = (); -/// TODO. +/// Provides a mechanism for managing and executing tasks within a system. +/// +/// It allows the registration of services and tasks using manifests, supports middleware [`Layers`] +/// for extensibility, and routes incoming [`TaskRequest`] to the appropriate [`TaskHandler`]s. #[must_use = "routes do nothing unless you use them"] pub struct Router { inner: Arc>, } -// TODO: Should route manifest be inside of route handler? - struct RouterInner { - layer_compose: Option, + layer_compose: Option, service_manifests: HashMap, - routes: HashMap>, + routes: HashMap>, } impl Router { @@ -43,7 +43,7 @@ impl Router { #[inline] pub fn new(layers: Layers) -> Self { let router_inner = RouterInner { - layer_compose: Some(LayerCompose::new(layers)), + layer_compose: Some(layers), service_manifests: HashMap::default(), routes: HashMap::new(), }; @@ -53,81 +53,124 @@ impl Router { } } - /// Overrides the default value of [`Router`]`::layer_compose`. - pub fn with_layers(self, layers: Layers) -> Self { + fn inspect_inner_mut(self, f: F) -> Self + where + F: FnOnce(&mut RouterInner), + { let mut inner = Arc::try_unwrap(self.inner).unwrap_or_else(|x| (*x).clone()); - inner.layer_compose = Some(LayerCompose::new(layers)); - + f(&mut inner); Self { inner: Arc::new(inner), } } - /// Registers another [`ServiceManifest`] by its [`ServiceIndex`]. - pub fn with_service( - self, - service_index: ServiceIndex, - service_manifest: ServiceManifest, - ) -> Self { - let mut inner = Arc::try_unwrap(self.inner).unwrap_or_else(|x| (*x).clone()); - let _ = inner - .service_manifests - .insert(service_index, service_manifest); + /// Overrides the default value of [`Router`]`::layer_compose`. + pub fn with_layers(self, layers: Layers) -> Self { + self.inspect_inner_mut(|x| { + x.layer_compose = Some(layers); + }) + } - Self { - inner: Arc::new(inner), - } + /// Registers another [`ServiceManifest`] by its [`ServiceIndex`]. + pub fn with_service(self, service_manifest: impl Into) -> Self { + self.with_service_impl(service_manifest.into()) } - /// Registers another [`Route`] by its [`RouteIndex`]. - pub fn with_route(self, route_index: RouteIndex, route: Route) -> Self { - let mut inner = Arc::try_unwrap(self.inner).unwrap_or_else(|x| (*x).clone()); - let _ = inner.routes.insert(route_index, route); + fn with_service_impl(self, service_manifest: ServiceManifest) -> Self { + self.inspect_inner_mut(|x| { + let service_index = ServiceIndex::new(&service_manifest.service_id); + x.service_manifests.insert(service_index, service_manifest); + }) + } - Self { - inner: Arc::new(inner), - } + /// Registers another [`TaskHandler`] by its [`TaskIndex`]. + pub fn with_route( + self, + task_manifest: impl Into, + task_handler: impl Into>, + ) -> Self { + self.with_route_impl(task_handler.into(), task_manifest.into()) } - // TODO: Method to return the whole registry. - // TODO: Method to execute a single route. + fn with_route_impl(self, task_handler: TaskHandler, task_manifest: TaskManifest) -> Self { + self.inspect_inner_mut(move |x| { + let route_index = TaskIndex::new(&task_manifest.route_id); + let route = Route::new(task_handler, task_manifest) + .expect("should not provide malformed manifests"); + x.routes.insert(route_index, route); + }) + } /// Returns the reference to the [`ServiceManifest`]. - pub fn get_service_manifest(&self, service_index: &ServiceIndex) -> Option<&ServiceManifest> { + pub fn find_service_manifest(&self, service_index: &ServiceIndex) -> Option<&ServiceManifest> { self.inner.service_manifests.get(service_index) } - /// Returns the reference to the [`RouteManifest`]. - pub fn get_route_manifest(&self, route_index: &RouteIndex) -> Option<&RouteManifest> { + /// Returns the reference to the [`TaskManifest`]. + pub fn find_task_manifest(&self, route_index: &TaskIndex) -> Option<&TaskManifest> { let route = self.inner.routes.get(route_index); route.map(|x| &x.route_handler.route_manifest) } - /// Returns all [`ServiceManifest`]s and [`RouteManifest`]s. - pub fn get_registry(&self) -> Registry { + /// Returns a new [`Registry`]. + /// + /// # Notes + /// + /// - Clones every [`ServiceManifest`]s and [`TaskManifest`]s. + pub fn as_registry(&self) -> Registry { let routes = self.inner.routes.iter(); Registry { services: self.inner.service_manifests.clone(), - routes: routes + tasks: routes .map(|(i, r)| (i.clone(), r.route_handler.route_manifest.clone())) .collect(), } } - /// TODO. - pub async fn route( + /// Executes the requested task handler with a given request. + /// + /// # Errors + /// + /// - Returns an error if the task wasn't found in the registry. + /// - Returns an error if the requested handler returns an error. + pub async fn route_task( &self, - route_index: &RouteIndex, - task_request: TaskRequest, - ) -> Result> + mut task_request: TaskRequest, + ) -> Result, TaskError> where - T: 'static, - U: 'static, + T: 'static + Send, + U: 'static + Send, { - let route_handler = self.inner.routes.get(route_index); - let route_handler = route_handler.ok_or_else(|| route_index.clone())?; + let route_index = TaskIndex::new(&task_request.index); + let route_handler = self.inner.routes.get(&route_index); + let route_handler = route_handler.ok_or_else(|| { + TaskError::new( + TaskErrorKind::NotFound, + "requested task identifier was not found", + ) + })?; + let layer_compose = self.inner.layer_compose.as_ref(); - route_handler.route(task_request, layer_compose).await + task_request.apply_default_layers(layer_compose); + route_handler.route(task_request).await + } + + /// Executes the provided task handler with a given request. + /// + /// # Errors + /// + /// - Returns an error if the provided handler returns an error. + pub async fn route_task_with_handler( + &self, + task_request: TaskRequest, + task_handler: TaskHandler, + ) -> Result, TaskError> + where + T: 'static + Send, + U: 'static + Send, + { + let fut = task_handler.oneshot(task_request); + fut.await.map_err(From::from) } } @@ -139,14 +182,18 @@ impl fmt::Debug for Router { impl Default for Router { fn default() -> Self { - let router_handler = RouterInner { + Self { + inner: Arc::new(RouterInner::default()), + } + } +} + +impl Default for RouterInner { + fn default() -> Self { + Self { layer_compose: None, service_manifests: HashMap::default(), routes: HashMap::default(), - }; - - Self { - inner: Arc::new(router_handler), } } } @@ -171,12 +218,41 @@ impl Clone for RouterInner { #[cfg(test)] mod test { - use crate::routing::{Layers, Router}; - use crate::Result; + use tower::{service_fn, ServiceBuilder}; + + use crate::context::{TaskError, TaskRequest, TaskResponse}; + use crate::handler::{TaskHandler, TaskHandlerLayer}; + use crate::routing::manifest::ServiceManifest; + use crate::routing::{Layers, Router, TaskManifest}; + + async fn handle_builtin0(request: TaskRequest) -> Result, TaskError> { + Ok(TaskResponse::new(request.into_inner())) + } + + async fn handle_builtin1(request: TaskRequest) -> Result, TaskError> { + Ok(TaskResponse::new(request.into_inner())) + } #[test] - fn build_default_router() -> Result<()> { - let _router: Router = Router::new(Layers::new()); + fn build_default_router() -> Result<(), TaskError> { + let service0_manifest = ServiceManifest::new("service0"); + + let builtin0_manifest = TaskManifest::new("builtin0"); + let builtin0_service: TaskHandler = ServiceBuilder::new() + .layer(TaskHandlerLayer::default()) + .service(service_fn(handle_builtin0)); + + let builtin1_manifest = TaskManifest::new("builtin1"); + let builtin1_service: TaskHandler = ServiceBuilder::new() + .layer(TaskHandlerLayer::default()) + .service(service_fn(handle_builtin1)); + + let router = Router::default() + .with_layers(Layers::new()) + .with_service(service0_manifest) + .with_route(builtin0_manifest, builtin0_service) + .with_route(builtin1_manifest, builtin1_service); + Ok(()) } } diff --git a/crates/task/routing/route.rs b/crates/task/routing/route.rs index 3a9e510..b1187f4 100644 --- a/crates/task/routing/route.rs +++ b/crates/task/routing/route.rs @@ -1,15 +1,15 @@ use std::sync::Arc; +use std::task::{Context, Poll}; -use jsonschema::{draft202012, Validator}; use tower::load::Load; use tower::Service; -use crate::context::{TaskRequest, TaskResponse}; +use crate::context::{TaskError, TaskRequest, TaskResponse}; +use crate::handler::future::TaskFuture; use crate::handler::metric::TaskMetrics; use crate::handler::TaskHandler; -use crate::routing::layers::LayerCompose; -use crate::routing::RouteManifest; -use crate::Result; +use crate::routing::manifest::TaskSchemaValidators; +use crate::routing::TaskManifest; /// TODO. #[must_use = "routes do nothing unless you use them"] @@ -17,36 +17,32 @@ pub struct Route { pub(crate) route_handler: Arc>, } -pub(crate) struct RouteHandler { +#[must_use = "routes do nothing unless you use them"] +pub struct RouteHandler { pub(crate) route_task_handler: TaskHandler, - pub(crate) route_manifest: RouteManifest, - pub(crate) inputs_schema_validator: Validator, - pub(crate) outputs_schema_validator: Validator, - pub(crate) errors_schema_validator: Validator, + pub(crate) route_manifest: TaskManifest, + pub(crate) schema_validators: TaskSchemaValidators, } impl Route { /// Returns a new [`Route`]. pub fn new( route_task_handler: TaskHandler, - route_manifest: RouteManifest, - ) -> Result { - let route_handler = RouteHandler { - inputs_schema_validator: draft202012::new(&route_manifest.inputs_schema)?, - outputs_schema_validator: draft202012::new(&route_manifest.outputs_schema)?, - errors_schema_validator: draft202012::new(&route_manifest.errors_schema)?, + route_manifest: TaskManifest, + ) -> Result { + let schema_validators = route_manifest.create_schema_validators()?; + let route_handler = Arc::new(RouteHandler { route_task_handler, route_manifest, - }; + schema_validators, + }); - Ok(Self { - route_handler: Arc::new(route_handler), - }) + Ok(Self { route_handler }) } /// Returns the underlying `tower::`[`Service`]. #[inline] - fn task_handler(&self) -> TaskHandler { + pub fn task_handler(&self) -> TaskHandler { self.route_handler.route_task_handler.clone() } @@ -57,35 +53,32 @@ impl Route { } /// Processes the request and returns the response asynchronously. - pub async fn route( - &self, - task_request: TaskRequest, - layer_compose: Option<&LayerCompose>, - ) -> Result> + pub async fn route(&self, task_request: TaskRequest) -> Result, TaskError> where - T: 'static, - U: 'static, + T: 'static + Send, + U: 'static + Send, { - // TODO: Apply layers. - // let _ = &task_request.layers; - self.route_handler - .inputs_schema_validator - .validate(&task_request.inputs)?; + .schema_validators + .validate_inputs(&task_request.inputs)?; let mut task_handler = self.route_handler.route_task_handler.clone(); match task_handler.call(task_request).await { Ok(task_response) => { self.route_handler - .outputs_schema_validator - .validate(&task_response.outputs)?; + .schema_validators + .validate_outputs(&task_response.outputs)?; + Ok(task_response) } Err(task_error) => { - self.route_handler - .errors_schema_validator - .validate(&task_error.values)?; - Err(task_error.into()) + if let Some(values) = &task_error.values { + self.route_handler + .schema_validators + .validate_errors(values)?; + } + + Err(task_error) } } } @@ -99,5 +92,23 @@ impl Clone for Route { } } -#[cfg(test)] -mod test {} +impl Service> for Route +where + T: 'static + Send, + U: 'static + Send, +{ + type Response = TaskResponse; + type Error = TaskError; + type Future = TaskFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + let mut handler = self.route_handler.route_task_handler.clone(); + handler.poll_ready(cx) + } + + fn call(&mut self, req: TaskRequest) -> Self::Future { + let mut handler = self.route_handler.route_task_handler.clone(); + // TODO: Validation + handler.call(req) + } +} From 736d537dd55a3d73313908c6f175c628b76f9a9b Mon Sep 17 00:00:00 2001 From: Oleh Martsokha Date: Wed, 1 Jan 2025 19:15:35 +0100 Subject: [PATCH 09/11] feat(all): task 2/n --- crates/task/Cargo.toml | 13 +- crates/task/context/{failure.rs => error.rs} | 16 +- crates/task/context/layers.rs | 86 +++++++++ crates/task/context/mod.rs | 6 +- crates/task/context/policies.rs | 130 ++++++++++++++ crates/task/context/request.rs | 78 +++++---- crates/task/context/response.rs | 24 +-- crates/task/handler/compose.rs | 174 +++++++++++++++++++ crates/task/handler/layers.rs | 94 ---------- crates/task/handler/metric.rs | 17 +- crates/task/handler/mod.rs | 17 +- crates/task/handler/retry.rs | 72 ++++++++ crates/task/lib.rs | 28 ++- crates/task/routing/index.rs | 7 +- crates/task/routing/manifest.rs | 6 +- crates/task/routing/mod.rs | 108 ++++++++---- crates/task/routing/route.rs | 115 ++++++------ 17 files changed, 734 insertions(+), 257 deletions(-) rename crates/task/context/{failure.rs => error.rs} (86%) create mode 100644 crates/task/context/layers.rs create mode 100644 crates/task/context/policies.rs create mode 100644 crates/task/handler/compose.rs delete mode 100644 crates/task/handler/layers.rs create mode 100644 crates/task/handler/retry.rs diff --git a/crates/task/Cargo.toml b/crates/task/Cargo.toml index 74fe536..996a977 100644 --- a/crates/task/Cargo.toml +++ b/crates/task/Cargo.toml @@ -17,21 +17,28 @@ documentation = { workspace = true } all-features = true rustdoc-args = ["--cfg", "docsrs"] +[features] +default = ["hashbrown"] +# Enables high-performance SwissTable hash map. +hashbrown = ["dep:hashbrown"] + [lib] path = "lib.rs" [dependencies] futures = { workspace = true } pin-project-lite = { workspace = true } +tracing = { workspace = true } thiserror = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } jsonschema = { version = "0.28", features = [] } +hashbrown = { version = "0.15", optional = true, features = ["serde"] } semver = { version = "1.0", features = ["serde"] } +replace_with = { version = "0.1", features = [] } derive_more = { workspace = true } -tracing = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } ecow = { workspace = true } tower = { workspace = true } diff --git a/crates/task/context/failure.rs b/crates/task/context/error.rs similarity index 86% rename from crates/task/context/failure.rs rename to crates/task/context/error.rs index f0546e3..f106ea2 100644 --- a/crates/task/context/failure.rs +++ b/crates/task/context/error.rs @@ -4,19 +4,23 @@ use jsonschema::ValidationError; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; -/// Unrecoverable failure duration [`TaskHandler`] execution. +/// Unrecoverable failure during the [`TaskHandler`] execution. /// /// [`TaskHandler`]: crate::handler::TaskHandler #[derive(Debug, thiserror::Error, Serialize, Deserialize)] #[error("internal handler error")] #[must_use = "errors do nothing unless you use them"] pub struct TaskError { - pub(crate) kind: TaskErrorKind, #[serde(skip)] pub(crate) error: Option>, + + #[serde(rename = "kind")] + pub(crate) kind: TaskErrorKind, + #[serde(rename = "values")] pub(crate) values: Option, } +/// A list specifying general categories of [`TaskError`]s. #[derive(Debug, Clone, Serialize, Deserialize)] #[must_use = "errors do nothing unless you use them"] pub enum TaskErrorKind { @@ -24,6 +28,14 @@ pub enum TaskErrorKind { NotFound, /// Request or response schema validation failed. Schema, + + /// TODO. + TimeoutPolicy, + /// TODO. + RetryPolicy, + + /// Unknown (type-erased) error occurred. + Unknown, } impl TaskError { diff --git a/crates/task/context/layers.rs b/crates/task/context/layers.rs new file mode 100644 index 0000000..edf818f --- /dev/null +++ b/crates/task/context/layers.rs @@ -0,0 +1,86 @@ +//! [`TaskLayers`] and its [`TaskLayersBuilder`] utility. + +use serde::{Deserialize, Serialize}; + +use crate::context::policies::{RetryPolicy, TimeoutPolicy}; +use crate::context::TaskResult; + +/// Declarative `tower::`[`Layer`]s configuration. +/// +/// [`Layer`]: tower::Layer +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +#[must_use = "layers do nothing unless you use them"] +pub struct TaskLayers { + #[serde(rename = "timeout")] + pub(crate) timeout_policy: Option, + #[serde(rename = "retry")] + pub(crate) retry_policy: Option, +} + +impl TaskLayers { + /// Returns a new [`TaskLayers`]. + #[inline] + pub fn new() -> Self { + Self::default() + } + + /// Returns a new [`TaskLayersBuilder`]. + #[inline] + pub fn builder() -> TaskLayersBuilder { + TaskLayersBuilder::new() + } +} + +/// [`TaskLayers`] builder. +#[derive(Debug, Default, Clone)] +pub struct TaskLayersBuilder { + pub(crate) timeout_policy: Option, + pub(crate) retry_policy: Option, +} + +impl TaskLayersBuilder { + /// Returns a new [`TaskLayersBuilder`]. + #[inline] + pub fn new() -> Self { + Self::default() + } + + /// Overrides the default value of [`TaskLayersBuilder`]`::timeout_policy`. + pub fn with_timeout_policy(mut self, timeout_policy: TimeoutPolicy) -> Self { + self.timeout_policy = Some(timeout_policy); + self + } + + /// Overrides the default value of [`TaskLayersBuilder`]`::retry_policy`. + pub fn with_retry_policy(mut self, retry_policy: RetryPolicy) -> Self { + self.retry_policy = Some(retry_policy); + self + } + + /// Returns a new [`TaskLayers`]. + pub fn build(self) -> TaskResult { + Ok(TaskLayers { + timeout_policy: self.timeout_policy, + retry_policy: self.retry_policy, + }) + } +} + +#[cfg(test)] +mod test { + use std::time::Duration; + + use crate::context::layers::TaskLayers; + use crate::context::policies::{RetryPolicy, TimeoutPolicy}; + use crate::context::TaskResult; + + #[test] + fn from_layers_builder() -> TaskResult<()> { + let _layers = TaskLayers::builder() + .with_retry_policy(RetryPolicy::linear(3, Duration::from_secs(2))) + .with_timeout_policy(TimeoutPolicy::retry(Duration::from_secs(12))) + .build()?; + + Ok(()) + } +} diff --git a/crates/task/context/mod.rs b/crates/task/context/mod.rs index cb99aa0..a1f1e59 100644 --- a/crates/task/context/mod.rs +++ b/crates/task/context/mod.rs @@ -10,10 +10,12 @@ pub mod builders { pub use super::response::TaskResponseBuilder; } -pub use crate::context::failure::{TaskError, TaskErrorKind, TaskResult}; +pub use crate::context::error::{TaskError, TaskErrorKind, TaskResult}; pub use crate::context::request::TaskRequest; pub use crate::context::response::TaskResponse; -mod failure; +mod error; +pub mod layers; +pub mod policies; mod request; mod response; diff --git a/crates/task/context/policies.rs b/crates/task/context/policies.rs new file mode 100644 index 0000000..e4a049c --- /dev/null +++ b/crates/task/context/policies.rs @@ -0,0 +1,130 @@ +//! [`RetryPolicy`] and [`TimeoutPolicy`]. + +use std::time::Duration; + +use serde::{Deserialize, Serialize}; + +/// Defines a policy for handling timeouts. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[must_use = "policies do nothing unless you use them"] +pub struct TimeoutPolicy { + /// The duration after which a timeout will occur. + pub duration: Duration, + /// The action to take when a timeout occurs. + pub action: TimeoutAction, +} + +/// Specifies actions to take when a timeout occurs. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +#[must_use = "policies do nothing unless you use them"] +pub enum TimeoutAction { + /// Retry the operation after a timeout. + Retry, + /// Terminate the operation after a timeout (default behavior). + #[default] + Terminate, +} + +impl TimeoutPolicy { + /// Returns a new retry [`TimeoutPolicy`] with the specified timeout duration. + pub fn retry(timeout: Duration) -> Self { + Self { + duration: timeout, + action: TimeoutAction::Retry, + } + } + + /// Returns a new terminate [`TimeoutPolicy`] with the specified timeout duration. + pub fn terminate(timeout: Duration) -> Self { + Self { + duration: timeout, + action: TimeoutAction::Terminate, + } + } +} + +/// Defines a policy for handling retries. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[must_use = "policies do nothing unless you use them"] +pub struct RetryPolicy { + /// The maximum number of retry attempts. + pub retries: u32, + /// The strategy to use for determining retry intervals. + pub strategy: RetryStrategy, +} + +/// Specifies strategies for calculating retry intervals. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[must_use = "policies do nothing unless you use them"] +pub enum RetryStrategy { + /// Linear backoff strategy with optional jitter and max backoff duration. + Linear { + step_backoff: Duration, + max_backoff: Option, + jitter_perc: Option, + }, + /// Exponential backoff strategy with optional jitter and max backoff duration. + Exponential { + base_backoff: Duration, + max_backoff: Option, + jitter_perc: Option, + }, +} + +impl RetryPolicy { + /// Returns a new linear [`RetryPolicy`] with the specified retries and base backoff duration. + pub fn linear(retries: u32, base_backoff: Duration) -> Self { + Self { + retries, + strategy: RetryStrategy::Linear { + step_backoff: base_backoff, + max_backoff: None, + jitter_perc: None, + }, + } + } + + /// Returns a new exponential [`RetryPolicy`] with the specified retries, base backoff. + pub fn exponential(retries: u32, base_backoff: Duration) -> Self { + Self { + retries, + strategy: RetryStrategy::Exponential { + base_backoff, + max_backoff: None, + jitter_perc: None, + }, + } + } + + /// Sets the maximum backoff duration and returns the modified policy. + pub fn with_max_backoff(mut self, new_max_backoff: Duration) -> Self { + match self.strategy { + RetryStrategy::Linear { + ref mut max_backoff, + .. + } => *max_backoff = Some(new_max_backoff), + RetryStrategy::Exponential { + ref mut max_backoff, + .. + } => *max_backoff = Some(new_max_backoff), + }; + + self + } + + /// Sets the jitter percentage and returns the modified policy. + pub fn with_jitter_perc(mut self, new_jitter_perc: f64) -> Self { + match self.strategy { + RetryStrategy::Linear { + ref mut jitter_perc, + .. + } => *jitter_perc = Some(new_jitter_perc), + RetryStrategy::Exponential { + ref mut jitter_perc, + .. + } => *jitter_perc = Some(new_jitter_perc), + }; + + self + } +} diff --git a/crates/task/context/request.rs b/crates/task/context/request.rs index 4891f84..7becc5b 100644 --- a/crates/task/context/request.rs +++ b/crates/task/context/request.rs @@ -2,9 +2,10 @@ use std::fmt; use derive_more::{Deref, DerefMut}; use serde::{Deserialize, Serialize}; -use serde_json::Value; +use serde_json::{Map, Value}; -use crate::handler::Layers; +use crate::context::layers::TaskLayers; +use crate::context::TaskResult; /// Serializable [`TaskHandler`] service request. /// @@ -16,9 +17,14 @@ pub struct TaskRequest { #[deref_mut] inner: T, - pub(crate) index: String, - pub(crate) layers: Option, + #[serde(rename = "task")] + pub(crate) task_id: String, + #[serde(skip)] + pub(crate) layers: Option, + + #[serde(rename = "inputs")] pub(crate) inputs: Value, + #[serde(rename = "secrets")] pub(crate) secrets: Value, } @@ -28,10 +34,10 @@ impl TaskRequest { pub fn new(index: &str, inner: T) -> Self { Self { inner, - index: index.to_owned(), + task_id: index.to_owned(), layers: None, - inputs: Value::default(), - secrets: Value::default(), + inputs: Value::Object(Map::new()), + secrets: Value::Object(Map::new()), } } @@ -47,8 +53,8 @@ impl TaskRequest { self.inner } - /// Applies the default [`Layers`] if it has none. - pub(crate) fn apply_default_layers(&mut self, layers: Option<&Layers>) { + /// Applies the default [`TaskLayers`] if it has none. + pub(crate) fn apply_default_layers(&mut self, layers: Option<&TaskLayers>) { if self.layers.is_some() { return; } @@ -77,7 +83,7 @@ impl fmt::Debug for TaskRequest { pub struct TaskRequestBuilder { inner: T, index: String, - layers: Option, + layers: Option, inputs: Option, secrets: Option, } @@ -95,53 +101,63 @@ impl TaskRequestBuilder { } } - /// Overrides the default value of [`TaskRequest`]`::inputs`. - #[inline] - pub fn with_inputs(mut self, json: Value) -> Self { - self.inputs = Some(json); + /// Adds other key/value pair into the [`TaskRequest`]`::inputs` object. + pub fn with_inputs(mut self, key: &str, value: impl Into) -> Self { + let inputs = self + .inputs + .get_or_insert_with(|| Value::Object(Map::default())); + let Value::Object(object) = inputs else { + unreachable!(); + }; + + object.insert(key.to_owned(), value.into()); self } - /// Overrides the default value of [`TaskRequest`]`::secrets`. - #[inline] - pub fn with_secrets(mut self, json: Value) -> Self { - self.secrets = Some(json); + /// Adds other key/value pair into the [`TaskRequest`]`::secrets` object. + pub fn with_secrets(mut self, key: &str, value: impl Into) -> Self { + let inputs = self + .secrets + .get_or_insert_with(|| Value::Object(Map::default())); + let Value::Object(object) = inputs else { + unreachable!(); + }; + + object.insert(key.to_owned(), value.into()); self } /// Overrides the default value of [`TaskRequest`]`::layers`. - #[inline] - pub fn with_layers(mut self, layers: Layers) -> Self { + pub fn with_layers(mut self, layers: TaskLayers) -> Self { self.layers = Some(layers); self } /// Returns a new [`TaskRequest`]. - pub fn build(self) -> TaskRequest { - TaskRequest { + pub fn build(self) -> TaskResult> { + Ok(TaskRequest { inner: self.inner, - index: self.index, + task_id: self.index, layers: self.layers, inputs: self.inputs.unwrap_or_default(), secrets: self.secrets.unwrap_or_default(), - } + }) } } #[cfg(test)] mod test { - use serde_json::Value; - + use crate::context::layers::TaskLayers; use crate::context::{TaskRequest, TaskResult}; - use crate::routing::Layers; #[test] fn build_empty_request() -> TaskResult<()> { let _request = TaskRequest::builder("builtin0", 5) - .with_inputs(Value::default()) - .with_secrets(Value::default()) - .with_layers(Layers::new()) - .build(); + .with_inputs("input0", 5) + .with_secrets("secret0", "qwerty") + .with_layers(TaskLayers::new()) + .build()?; + Ok(()) } } diff --git a/crates/task/context/response.rs b/crates/task/context/response.rs index b7bf86a..1214a61 100644 --- a/crates/task/context/response.rs +++ b/crates/task/context/response.rs @@ -2,8 +2,11 @@ use std::fmt; use derive_more::{Deref, DerefMut}; use serde::{Deserialize, Serialize}; +use serde_json::map::Map; use serde_json::Value; +use crate::context::TaskResult; + /// Deserializable [`TaskHandler`] service response. /// /// [`TaskHandler`]: crate::handler::TaskHandler @@ -14,7 +17,9 @@ pub struct TaskResponse { #[deref_mut] inner: T, + #[serde(rename = "outputs")] pub(crate) outputs: Value, + #[serde(rename = "metrics")] pub(crate) metrics: Value, } @@ -24,8 +29,8 @@ impl TaskResponse { pub fn new(inner: T) -> Self { Self { inner, - outputs: Value::default(), - metrics: Value::default(), + outputs: Value::Object(Map::new()), + metrics: Value::Object(Map::new()), } } @@ -73,27 +78,25 @@ impl TaskResponseBuilder { } } - /// Overrides the default value of [`TaskResponse`]`::outputs`. - #[inline] + /// Adds other key/value pair into the [`TaskResponse`]`::outputs` object. pub fn with_outputs(mut self, values: Value) -> Self { self.outputs = Some(values); self } - /// Overrides the default value of [`TaskResponse`]`::metrics`. - #[inline] + /// Adds other key/value pair into the [`TaskResponse`]`::metrics` object. pub fn with_metrics(mut self, values: Value) -> Self { self.metrics = Some(values); self } /// Returns a new [`TaskResponse`]. - pub fn build(self) -> TaskResponse { - TaskResponse { + pub fn build(self) -> TaskResult> { + Ok(TaskResponse { inner: self.inner, outputs: self.outputs.unwrap_or_default(), metrics: self.metrics.unwrap_or_default(), - } + }) } } @@ -108,7 +111,8 @@ mod test { let _response = TaskResponse::builder(5) .with_outputs(Value::default()) .with_metrics(Value::default()) - .build(); + .build()?; + Ok(()) } } diff --git a/crates/task/handler/compose.rs b/crates/task/handler/compose.rs new file mode 100644 index 0000000..8161bad --- /dev/null +++ b/crates/task/handler/compose.rs @@ -0,0 +1,174 @@ +//! [`LayerCompose`]. + +use std::borrow::Cow; +use std::time::Duration; + +use tower::retry::backoff::{ExponentialBackoff, ExponentialBackoffMaker, MakeBackoff}; +use tower::retry::RetryLayer; +use tower::timeout::error::Elapsed; +use tower::timeout::TimeoutLayer; +use tower::util::rng::HasherRng; +use tower::util::{BoxCloneSyncService, Either, MapErrLayer}; +use tower::{BoxError, ServiceBuilder}; + +use crate::context::layers::TaskLayers; +use crate::context::policies::{RetryStrategy, TimeoutAction}; +use crate::context::{TaskError, TaskErrorKind, TaskRequest, TaskResponse}; +use crate::handler::retry::{BackoffError, BackoffPolicy}; + +/// TODO. +#[derive(Debug, Default, Clone)] +pub struct LayerCompose<'a> { + layers: Cow<'a, TaskLayers>, +} + +type BoxErrorHandler0 = fn(BoxError) -> TaskError; +type BoxErrorHandler1 = fn(TaskError) -> BoxError; + +type LinearRetryLayer = RetryLayer>; +type ExponentialRetryLayer = RetryLayer>; +type EitherRetryLayer = Either; + +impl<'a> LayerCompose<'a> { + /// Returns a new [`LayerCompose`]. + #[inline] + pub fn new(layers: &'a TaskLayers) -> Self { + Self { + layers: Cow::Borrowed(layers), + } + } + + fn error_handle_pre_layer(&self) -> MapErrLayer { + let handler = |x: TaskError| -> BoxError { Box::new(x) }; + MapErrLayer::new(handler) + } + + fn error_handle_post_layer(&self) -> MapErrLayer { + let handler = |x: BoxError| -> TaskError { + let box_error = match x.downcast::() { + Ok(backoff_error) => backoff_error.into_inner(), + Err(box_error) => box_error, + }; + + let box_error = match box_error.downcast::() { + Ok(_) => { + return TaskError::new( + TaskErrorKind::TimeoutPolicy, + "timeout policy error occurred", + ) + } + Err(box_error) => box_error, + }; + + match box_error.downcast::() { + Ok(task_error) => *task_error, + Err(_) => TaskError::new( + TaskErrorKind::Unknown, + "unknown (type-erased) error occurred", + ), + } + }; + + MapErrLayer::new(handler) + } + + /// Returns the optional `tower::`[`RetryLayer`]. + fn optional_retry_layer(&self) -> Option { + let Some(retry_policy) = &self.layers.retry_policy else { + return None; + }; + + // TODO: Make RetryStrategy::Linear actually linear. + let maker = match retry_policy.strategy { + RetryStrategy::Linear { + step_backoff, + max_backoff, + jitter_perc, + } => ExponentialBackoffMaker::new( + step_backoff, + max_backoff.unwrap_or_else(|| Duration::from_secs(u64::MAX)), + jitter_perc.unwrap_or_default(), + HasherRng::new(), + ), + RetryStrategy::Exponential { + base_backoff, + max_backoff, + jitter_perc, + } => ExponentialBackoffMaker::new( + base_backoff, + max_backoff.unwrap_or_else(|| Duration::from_secs(u64::MAX)), + jitter_perc.unwrap_or_default(), + HasherRng::new(), + ), + }; + + let backoff = maker.unwrap().make_backoff(); + let policy = BackoffPolicy::new(retry_policy.retries, backoff); + Some(Either::Right(RetryLayer::new(policy))) + } + + /// Returns the optional `tower::`[`TimeoutLayer`]. + fn optional_timeout_layer(&self) -> Option { + let Some(timeout_policy) = &self.layers.timeout_policy else { + return None; + }; + + Some(TimeoutLayer::new(timeout_policy.duration)) + } + + /// TODO. + pub fn apply_layers( + self, + task_handler: BoxCloneSyncService, TaskResponse, TaskError>, + ) -> BoxCloneSyncService, TaskResponse, TaskError> + where + T: 'static + Send + Clone, + U: 'static + Send, + { + let timeout_policy = self.layers.timeout_policy.as_ref(); + let is_retry = timeout_policy + .map(|x| matches!(x.action, TimeoutAction::Retry)) + .unwrap_or_default(); + + if is_retry { + let service = ServiceBuilder::new() + .layer(self.error_handle_post_layer()) + .option_layer(self.optional_retry_layer()) + .option_layer(self.optional_timeout_layer()) + .layer(self.error_handle_pre_layer()) + .service(task_handler); + + BoxCloneSyncService::new(service) + } else { + let service = ServiceBuilder::new() + .layer(self.error_handle_post_layer()) + .option_layer(self.optional_timeout_layer()) + .option_layer(self.optional_retry_layer()) + .layer(self.error_handle_pre_layer()) + .service(task_handler); + + BoxCloneSyncService::new(service) + } + } +} + +#[cfg(test)] +mod test { + use std::time::Duration; + + use crate::context::layers::TaskLayers; + use crate::context::policies::{RetryPolicy, TimeoutPolicy}; + use crate::context::TaskResult; + use crate::handler::compose::LayerCompose; + + #[test] + fn compose() -> TaskResult<()> { + let layers = TaskLayers::builder() + .with_retry_policy(RetryPolicy::linear(3, Duration::from_secs(2))) + .with_timeout_policy(TimeoutPolicy::retry(Duration::from_secs(12))) + .build()?; + + let _compose = LayerCompose::new(&layers); + Ok(()) + } +} diff --git a/crates/task/handler/layers.rs b/crates/task/handler/layers.rs deleted file mode 100644 index 8fe8553..0000000 --- a/crates/task/handler/layers.rs +++ /dev/null @@ -1,94 +0,0 @@ -//! [`LayerCompose`], [`Layers`] and its [`LayersBuilder`]. - -use serde::{Deserialize, Serialize}; - -/// TODO. -#[derive(Debug, Default, Clone)] -pub struct LayerCompose<'a> { - layers: Option<&'a Layers>, -} - -impl<'a> LayerCompose<'a> { - /// Returns a new [`LayerCompose`]. - #[inline] - pub fn new(layers: &'a Layers) -> Self { - Self { - layers: Some(layers), - } - } -} - -/// Declarative `tower::`[`Layer`]s configuration. -/// -/// [`Layer`]: tower::Layer -#[derive(Debug, Default, Clone, Serialize, Deserialize)] -pub struct Layers { - timeout_policy: Option<()>, - retry_policy: Option<()>, -} - -impl Layers { - /// Returns a new [`Layers`]. - #[inline] - pub fn new() -> Self { - Self::default() - } - - /// Returns a new [`LayersBuilder`]. - #[inline] - pub fn builder() -> LayersBuilder { - LayersBuilder::new() - } -} - -/// [`Layers`] builder. -#[derive(Debug, Default, Clone)] -pub struct LayersBuilder { - timeout_policy: Option<()>, - retry_policy: Option<()>, -} - -impl LayersBuilder { - /// Returns a new [`LayersBuilder`]. - #[inline] - pub fn new() -> Self { - Self::default() - } - - /// Overrides the default value of [`LayersBuilder`]`::timeout_policy`. - pub fn with_timeout_policy(mut self, timeout_policy: ()) -> Self { - self.timeout_policy = Some(timeout_policy); - self - } - - /// Overrides the default value of [`LayersBuilder`]`::retry_policy`. - pub fn with_retry_policy(mut self, retry_policy: ()) -> Self { - self.retry_policy = Some(retry_policy); - self - } - - /// Returns a new [`Layers`]. - pub fn build(self) -> Layers { - Layers { - timeout_policy: self.timeout_policy, - retry_policy: self.retry_policy, - } - } -} - -#[cfg(test)] -mod test { - use crate::context::TaskResult; - use crate::handler::layers::{LayerCompose, Layers}; - - #[test] - fn from_layers_builder() -> TaskResult<()> { - let layers = Layers::builder() - .with_retry_policy(()) - .with_timeout_policy(()) - .build(); - - let _compose = LayerCompose::new(&layers); - Ok(()) - } -} diff --git a/crates/task/handler/metric.rs b/crates/task/handler/metric.rs index c8c1b45..f5f8a4a 100644 --- a/crates/task/handler/metric.rs +++ b/crates/task/handler/metric.rs @@ -30,7 +30,8 @@ impl TaskMetricsLock { /// Returns a new [`TaskMetrics`]. pub fn snapshot(&self) -> TaskMetrics { - TaskMetrics {} + let guard = self.inner.lock().expect("should not be locked"); + guard.clone() } } @@ -41,12 +42,14 @@ impl TaskMetricsLock { #[derive(Debug, Default, Clone, PartialOrd, PartialEq, Serialize, Deserialize)] #[must_use = "metrics do nothing unless you serialize them"] pub struct TaskMetrics { - // TODO: pub average_waiting_time: Duration, - // TODO: pub average_recent_waiting_time: Duration, - // TODO: pub average_running_time: Duration, - // TODO: pub average_recent_running_time: Duration, - // TODO: pub total_success_runs: u32, - // TODO: pub total_failure_runs: u32, + // TODO: Implement all metrics. + + // pub average_waiting_time: Duration, + // pub average_recent_waiting_time: Duration, + // pub average_running_time: Duration, + // pub average_recent_running_time: Duration, + // pub total_success_runs: u32, + // pub total_failure_runs: u32, } impl TaskMetrics { diff --git a/crates/task/handler/mod.rs b/crates/task/handler/mod.rs index 32e19b7..8e0e42c 100644 --- a/crates/task/handler/mod.rs +++ b/crates/task/handler/mod.rs @@ -9,13 +9,14 @@ use tower::util::BoxCloneSyncService; use tower::{Layer, Service, ServiceBuilder}; use crate::context::{TaskError, TaskRequest, TaskResponse}; +use crate::handler::compose::LayerCompose; use crate::handler::future::TaskFuture; -pub use crate::handler::layers::{Layers, LayersBuilder}; use crate::handler::metric::{TaskMetrics, TaskMetricsLock}; +mod compose; pub mod future; -mod layers; pub mod metric; +mod retry; /// Unified `tower::`[`Service`] for executing tasks. /// @@ -104,7 +105,7 @@ impl fmt::Debug for TaskHandler { impl Service> for TaskHandler where - T: 'static + Send, + T: 'static + Send + Clone, U: 'static + Send, { type Response = TaskResponse; @@ -118,15 +119,13 @@ where #[inline] fn call(&mut self, req: TaskRequest) -> Self::Future { - let Some(layers) = req.layers.as_ref() else { + let Some(layers) = &req.layers else { return TaskFuture::with_metrics(self.inner.call(req), self.metrics.clone()); }; - // { let compose = LayerCompose::new(layers); } - // TODO: Apply layers here. - let mut inner_svc = self.inner.clone(); - let fut = async move { inner_svc.call(req).await }; - TaskFuture::with_metrics(fut, self.metrics.clone()) + let compose = LayerCompose::new(layers); + let mut svc = compose.apply_layers(self.inner.clone()); + TaskFuture::with_metrics(svc.call(req), self.metrics.clone()) } } diff --git a/crates/task/handler/retry.rs b/crates/task/handler/retry.rs new file mode 100644 index 0000000..643b76a --- /dev/null +++ b/crates/task/handler/retry.rs @@ -0,0 +1,72 @@ +use replace_with::replace_with_or_abort; +use tower::retry::backoff::{Backoff, ExponentialBackoff, ExponentialBackoffMaker, MakeBackoff}; +use tower::retry::Policy; +use tower::BoxError; + +/// TODO. +#[derive(Debug, Clone)] +pub struct BackoffPolicy { + retries: u32, + backoff: B, +} + +impl BackoffPolicy { + /// Returns a new [`BackoffPolicy`]. + pub fn new(retries: u32, backoff: B) -> Self { + Self { retries, backoff } + } +} + +impl Default for BackoffPolicy { + fn default() -> Self { + let mut maker = ExponentialBackoffMaker::default(); + Self::new(3, maker.make_backoff()) + } +} + +impl Policy for BackoffPolicy +where + Req: 'static + Clone, + Resp: 'static, + B: Backoff, +{ + type Future = B::Future; + + fn retry(&mut self, _req: &mut Req, resp: &mut Result) -> Option { + match resp.as_mut() { + Ok(_) => return None, + Err(e) if self.retries == 0 => { + replace_with_or_abort(e, |e| Box::new(BackoffError::new(e))) + } + Err(_) => {} + } + + self.retries -= 1; + Some(self.backoff.next_backoff()) + } + + #[inline] + fn clone_request(&mut self, req: &Req) -> Option { + Some(req.clone()) + } +} + +#[derive(Debug, thiserror::Error)] +#[error("retry layer has failed: {inner}")] +#[must_use = "errors do nothing unless you use them"] +pub struct BackoffError { + inner: BoxError, +} + +impl BackoffError { + /// Returns a new [`BackoffError`]. + pub fn new(inner: BoxError) -> Self { + Self { inner } + } + + /// Returns the underlying boxed error. + #[inline] + pub fn into_inner(self) -> BoxError { + self.inner + } +} diff --git a/crates/task/lib.rs b/crates/task/lib.rs index f8cfe3e..23786d7 100644 --- a/crates/task/lib.rs +++ b/crates/task/lib.rs @@ -3,10 +3,13 @@ #![doc = include_str!("./README.md")] //! ```rust +//! use std::time::Duration; //! use tower::{ServiceBuilder, service_fn}; //! //! use axiston_rt_task::context::{TaskRequest, TaskResponse, TaskResult}; -//! use axiston_rt_task::handler::{TaskHandlerLayer, TaskHandler, Layers}; +//! use axiston_rt_task::context::layers::TaskLayers; +//! use axiston_rt_task::context::policies::{RetryPolicy, TimeoutPolicy}; +//! use axiston_rt_task::handler::{TaskHandlerLayer, TaskHandler}; //! use axiston_rt_task::routing::manifest::{TaskManifest, ServiceManifest}; //! use axiston_rt_task::Router; //! @@ -14,7 +17,8 @@ //! Ok(TaskResponse::new(request.into_inner())) //! } //! -//! fn main() -> TaskResult<()> { +//! #[tokio::main] +//! async fn main() -> TaskResult<()> { //! let service_manifest = ServiceManifest::new("service"); //! let task_manifest = TaskManifest::new("task"); //! @@ -22,12 +26,20 @@ //! .layer(TaskHandlerLayer::default()) //! .service(service_fn(handler)); //! -//! let layers = Layers::builder().build(); +//! let layers = TaskLayers::builder() +//! .with_retry_policy(RetryPolicy::linear(3, Duration::from_secs(2))) +//! .with_timeout_policy(TimeoutPolicy::retry(Duration::from_secs(12))) +//! .build()?; +//! //! let router = Router::default() //! .with_layers(layers) //! .with_service(service_manifest) //! .with_route(task_manifest, task_handler); //! +//! let request = TaskRequest::builder("task", 5).build()?; +//! let response = router.route_task(request).await?; +//! assert_eq!(response.into_inner(), 5); +//! //! Ok(()) //! } //! ``` @@ -47,10 +59,18 @@ pub mod routing; /// Also see [`Router::as_registry`]. /// /// [`Router::as_registry`]: routing::Router::as_registry -#[derive(Debug)] +#[derive(Debug, Default)] pub struct Registry { /// List of all registered services. pub services: HashMap, /// List of all registered tasks. pub tasks: HashMap, } + +impl Registry { + /// Returns an empty [`Registry`]. + #[inline] + pub fn new() -> Self { + Self::default() + } +} diff --git a/crates/task/routing/index.rs b/crates/task/routing/index.rs index e1ee8e3..4bd5d55 100644 --- a/crates/task/routing/index.rs +++ b/crates/task/routing/index.rs @@ -1,6 +1,8 @@ //! [`TaskIndex`] and [`ServiceIndex`]. -use derive_more::{Deref, DerefMut}; +use std::borrow::Cow; + +use derive_more::{Deref, DerefMut, From}; use ecow::EcoString; /// Opaque and unique [`Service`] identifier. @@ -30,7 +32,8 @@ impl ServiceIndex { /// Opaque and unique [`TaskHandler`] identifier. /// /// [`TaskHandler`]: crate::handler::TaskHandler -#[derive(Debug, Clone, Eq, PartialEq, Hash, Deref, DerefMut)] +#[derive(Debug, Clone, Eq, PartialEq, Hash, Deref, DerefMut, From)] +#[from(Cow<'static, str>, String, &'static str)] #[must_use = "indexes do nothing unless you serialize them"] pub struct TaskIndex { inner: EcoString, diff --git a/crates/task/routing/manifest.rs b/crates/task/routing/manifest.rs index adf1b93..12944fb 100644 --- a/crates/task/routing/manifest.rs +++ b/crates/task/routing/manifest.rs @@ -142,7 +142,11 @@ impl TaskSchemaValidators { schema.validate(values).map_err(From::from) } - pub fn validate_errors(&self, values: &Value) -> TaskResult<()> { + pub fn validate_errors(&self, values: Option<&Value>) -> TaskResult<()> { + let Some(values) = values else { + return Ok(()); + }; + let Some(schema) = self.errors.as_ref() else { return Ok(()); }; diff --git a/crates/task/routing/mod.rs b/crates/task/routing/mod.rs index a19e416..991dd6a 100644 --- a/crates/task/routing/mod.rs +++ b/crates/task/routing/mod.rs @@ -1,13 +1,18 @@ //! [`Router`], [`TaskIndex`] and [`manifest`]s. +#[cfg(not(feature = "hashbrown"))] use std::collections::HashMap; use std::fmt; use std::sync::Arc; +#[cfg(feature = "hashbrown")] +use hashbrown::HashMap; use tower::ServiceExt; +use crate::context::layers::TaskLayers; use crate::context::{TaskError, TaskErrorKind, TaskRequest, TaskResponse}; -use crate::handler::{Layers, TaskHandler}; +use crate::handler::metric::TaskMetrics; +use crate::handler::TaskHandler; use crate::routing::index::{ServiceIndex, TaskIndex}; use crate::routing::manifest::{ServiceManifest, TaskManifest}; use crate::routing::route::Route; @@ -25,7 +30,7 @@ pub type RouteResponse = (); /// Provides a mechanism for managing and executing tasks within a system. /// -/// It allows the registration of services and tasks using manifests, supports middleware [`Layers`] +/// It allows the registration of services and tasks using manifests, supports middleware [`TaskLayers`] /// for extensibility, and routes incoming [`TaskRequest`] to the appropriate [`TaskHandler`]s. #[must_use = "routes do nothing unless you use them"] pub struct Router { @@ -33,7 +38,7 @@ pub struct Router { } struct RouterInner { - layer_compose: Option, + layer_compose: Option, service_manifests: HashMap, routes: HashMap>, } @@ -41,7 +46,7 @@ struct RouterInner { impl Router { /// Returns an empty [`Router`]. #[inline] - pub fn new(layers: Layers) -> Self { + pub fn new(layers: TaskLayers) -> Self { let router_inner = RouterInner { layer_compose: Some(layers), service_manifests: HashMap::default(), @@ -57,7 +62,8 @@ impl Router { where F: FnOnce(&mut RouterInner), { - let mut inner = Arc::try_unwrap(self.inner).unwrap_or_else(|x| (*x).clone()); + let inner = Arc::try_unwrap(self.inner); + let mut inner = inner.unwrap_or_else(|x| (*x).clone()); f(&mut inner); Self { inner: Arc::new(inner), @@ -65,7 +71,7 @@ impl Router { } /// Overrides the default value of [`Router`]`::layer_compose`. - pub fn with_layers(self, layers: Layers) -> Self { + pub fn with_layers(self, layers: TaskLayers) -> Self { self.inspect_inner_mut(|x| { x.layer_compose = Some(layers); }) @@ -94,22 +100,44 @@ impl Router { fn with_route_impl(self, task_handler: TaskHandler, task_manifest: TaskManifest) -> Self { self.inspect_inner_mut(move |x| { - let route_index = TaskIndex::new(&task_manifest.route_id); + let task_index = TaskIndex::new(&task_manifest.route_id); let route = Route::new(task_handler, task_manifest) .expect("should not provide malformed manifests"); - x.routes.insert(route_index, route); + x.routes.insert(task_index, route); }) } /// Returns the reference to the [`ServiceManifest`]. - pub fn find_service_manifest(&self, service_index: &ServiceIndex) -> Option<&ServiceManifest> { - self.inner.service_manifests.get(service_index) + pub fn find_service_manifest( + &self, + service_index: impl Into, + ) -> Option<&ServiceManifest> { + self.inner.service_manifests.get(&service_index.into()) } /// Returns the reference to the [`TaskManifest`]. - pub fn find_task_manifest(&self, route_index: &TaskIndex) -> Option<&TaskManifest> { - let route = self.inner.routes.get(route_index); - route.map(|x| &x.route_handler.route_manifest) + pub fn find_task_manifest(&self, task_index: impl Into) -> Option<&TaskManifest> { + self.inner + .routes + .get(&task_index.into()) + .map(|r| r.manifest()) + } + + /// Returns the [`TaskHandler`] of the given task. + pub fn find_task_handler(&self, task_index: impl Into) -> Option> { + self.inner + .routes + .get(&task_index.into()) + .map(|r| r.task_handler()) + .cloned() + } + + /// Returns the [`TaskMetrics`] of the given task. + pub fn find_task_metrics(&self, task_index: impl Into) -> Option { + self.inner + .routes + .get(&task_index.into()) + .map(|r| r.task_handler_metrics()) } /// Returns a new [`Registry`]. @@ -120,9 +148,12 @@ impl Router { pub fn as_registry(&self) -> Registry { let routes = self.inner.routes.iter(); Registry { + #[cfg(not(feature = "hashbrown"))] services: self.inner.service_manifests.clone(), + #[cfg(feature = "hashbrown")] + services: self.inner.service_manifests.clone().into_iter().collect(), tasks: routes - .map(|(i, r)| (i.clone(), r.route_handler.route_manifest.clone())) + .map(|(i, r)| (i.clone(), r.manifest().clone())) .collect(), } } @@ -135,24 +166,22 @@ impl Router { /// - Returns an error if the requested handler returns an error. pub async fn route_task( &self, - mut task_request: TaskRequest, + task_request: TaskRequest, ) -> Result, TaskError> where - T: 'static + Send, + T: 'static + Send + Clone, U: 'static + Send, { - let route_index = TaskIndex::new(&task_request.index); - let route_handler = self.inner.routes.get(&route_index); - let route_handler = route_handler.ok_or_else(|| { + let task_index = TaskIndex::new(&task_request.task_id); + let task_handler = self.find_task_handler(task_index).ok_or_else(|| { TaskError::new( TaskErrorKind::NotFound, "requested task identifier was not found", ) })?; - let layer_compose = self.inner.layer_compose.as_ref(); - task_request.apply_default_layers(layer_compose); - route_handler.route(task_request).await + self.route_task_with_handler(task_request, task_handler) + .await } /// Executes the provided task handler with a given request. @@ -162,15 +191,16 @@ impl Router { /// - Returns an error if the provided handler returns an error. pub async fn route_task_with_handler( &self, - task_request: TaskRequest, + mut task_request: TaskRequest, task_handler: TaskHandler, ) -> Result, TaskError> where - T: 'static + Send, + T: 'static + Send + Clone, U: 'static + Send, { - let fut = task_handler.oneshot(task_request); - fut.await.map_err(From::from) + let layer_compose = self.inner.layer_compose.as_ref(); + task_request.apply_default_layers(layer_compose); + task_handler.oneshot(task_request).await } } @@ -220,21 +250,21 @@ impl Clone for RouterInner { mod test { use tower::{service_fn, ServiceBuilder}; - use crate::context::{TaskError, TaskRequest, TaskResponse}; + use crate::context::layers::TaskLayers; + use crate::context::{TaskError, TaskRequest, TaskResponse, TaskResult}; use crate::handler::{TaskHandler, TaskHandlerLayer}; use crate::routing::manifest::ServiceManifest; - use crate::routing::{Layers, Router, TaskManifest}; + use crate::routing::{Router, TaskManifest}; - async fn handle_builtin0(request: TaskRequest) -> Result, TaskError> { + async fn handle_builtin0(request: TaskRequest) -> TaskResult> { Ok(TaskResponse::new(request.into_inner())) } - async fn handle_builtin1(request: TaskRequest) -> Result, TaskError> { + async fn handle_builtin1(request: TaskRequest) -> TaskResult> { Ok(TaskResponse::new(request.into_inner())) } - #[test] - fn build_default_router() -> Result<(), TaskError> { + fn create_testing_router() -> Router { let service0_manifest = ServiceManifest::new("service0"); let builtin0_manifest = TaskManifest::new("builtin0"); @@ -247,11 +277,19 @@ mod test { .layer(TaskHandlerLayer::default()) .service(service_fn(handle_builtin1)); - let router = Router::default() - .with_layers(Layers::new()) + Router::default() + .with_layers(TaskLayers::new()) .with_service(service0_manifest) .with_route(builtin0_manifest, builtin0_service) - .with_route(builtin1_manifest, builtin1_service); + .with_route(builtin1_manifest, builtin1_service) + } + + #[tokio::test] + async fn simple_routing() -> Result<(), TaskError> { + let router = create_testing_router(); + let request = TaskRequest::builder("builtin0", 5).build()?; + let response = router.route_task(request).await?; + assert_eq!(response.into_inner(), 5); Ok(()) } diff --git a/crates/task/routing/route.rs b/crates/task/routing/route.rs index b1187f4..ae35678 100644 --- a/crates/task/routing/route.rs +++ b/crates/task/routing/route.rs @@ -11,90 +11,65 @@ use crate::handler::TaskHandler; use crate::routing::manifest::TaskSchemaValidators; use crate::routing::TaskManifest; -/// TODO. +/// Routing structure that wraps [`TaskHandler`] and req/resp validation. #[must_use = "routes do nothing unless you use them"] pub struct Route { - pub(crate) route_handler: Arc>, + route_handler: Arc>, } #[must_use = "routes do nothing unless you use them"] -pub struct RouteHandler { - pub(crate) route_task_handler: TaskHandler, - pub(crate) route_manifest: TaskManifest, - pub(crate) schema_validators: TaskSchemaValidators, +struct RouteHandler { + task_handler: TaskHandler, + schema_validators: TaskSchemaValidators, + manifest: TaskManifest, } impl Route { - /// Returns a new [`Route`]. + /// Creates a new [`Route`]. pub fn new( - route_task_handler: TaskHandler, - route_manifest: TaskManifest, + task_handler: TaskHandler, + task_manifest: TaskManifest, ) -> Result { - let schema_validators = route_manifest.create_schema_validators()?; - let route_handler = Arc::new(RouteHandler { - route_task_handler, - route_manifest, - schema_validators, - }); - - Ok(Self { route_handler }) + let schema_validators = task_manifest.create_schema_validators()?; + Ok(Self { + route_handler: Arc::new(RouteHandler { + task_handler, + schema_validators, + manifest: task_manifest, + }), + }) } - /// Returns the underlying `tower::`[`Service`]. + /// Returns the reference to the inner [`TaskHandler`]. #[inline] - pub fn task_handler(&self) -> TaskHandler { - self.route_handler.route_task_handler.clone() + pub fn task_handler(&self) -> &TaskHandler { + &self.route_handler.task_handler } - /// Returns the underlying `tower::`[`Service`]'s metrics. + /// Returns [`TaskMetrics`] of the inner [`TaskHandler`]. #[inline] pub fn task_handler_metrics(&self) -> TaskMetrics { - self.route_handler.route_task_handler.load() + self.route_handler.task_handler.load() } - /// Processes the request and returns the response asynchronously. - pub async fn route(&self, task_request: TaskRequest) -> Result, TaskError> - where - T: 'static + Send, - U: 'static + Send, - { - self.route_handler - .schema_validators - .validate_inputs(&task_request.inputs)?; - - let mut task_handler = self.route_handler.route_task_handler.clone(); - match task_handler.call(task_request).await { - Ok(task_response) => { - self.route_handler - .schema_validators - .validate_outputs(&task_response.outputs)?; - - Ok(task_response) - } - Err(task_error) => { - if let Some(values) = &task_error.values { - self.route_handler - .schema_validators - .validate_errors(values)?; - } - - Err(task_error) - } - } + /// Returns the reference to the inner [`TaskManifest`]. + #[inline] + pub fn manifest(&self) -> &TaskManifest { + &self.route_handler.manifest } } impl Clone for Route { fn clone(&self) -> Self { Self { - route_handler: self.route_handler.clone(), + route_handler: Arc::clone(&self.route_handler), } } } impl Service> for Route where - T: 'static + Send, + T: 'static + Send + Clone, U: 'static + Send, { type Response = TaskResponse; @@ -102,13 +77,39 @@ where type Future = TaskFuture; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - let mut handler = self.route_handler.route_task_handler.clone(); + let mut handler = self.route_handler.task_handler.clone(); handler.poll_ready(cx) } fn call(&mut self, req: TaskRequest) -> Self::Future { - let mut handler = self.route_handler.route_task_handler.clone(); - // TODO: Validation - handler.call(req) + let this = self.clone(); + let fut = async move { + this.route_handler + .schema_validators + .validate_inputs(&req.inputs)?; + + let mut handler = this.route_handler.task_handler.clone(); + let response = handler.call(req).await; + + response + .and_then(|response| { + this.route_handler + .schema_validators + .validate_outputs(&response.outputs) + .and(Ok(response)) + }) + .map_err(|error| { + match this + .route_handler + .schema_validators + .validate_errors(error.values.as_ref()) + { + Ok(_) => error, + Err(v_error) => v_error, + } + }) + }; + + TaskFuture::new(fut) } } From 3828d68739911a33d97a604ec1355fcda86553c1 Mon Sep 17 00:00:00 2001 From: Oleh Martsokha Date: Tue, 7 Jan 2025 16:28:05 +0100 Subject: [PATCH 10/11] feat(all): source --- crates/source/Cargo.toml | 49 +++++++++++ crates/source/README.md | 20 +++++ crates/source/lib.rs | 33 ++++++++ crates/source/loader/from_git.rs | 43 ++++++++++ crates/source/loader/from_tar.rs | 52 ++++++++++++ crates/source/loader/from_zip.rs | 46 ++++++++++ crates/source/loader/mod.rs | 119 ++++++++++++++++++++++++++ crates/source/script/builder.rs | 40 +++++++++ crates/source/script/container.rs | 54 ++++++++++++ crates/source/script/metadata.rs | 54 ++++++++++++ crates/source/script/mod.rs | 80 ++++++++++++++++++ crates/source/utils/hashing.rs | 134 ++++++++++++++++++++++++++++++ crates/source/utils/mod.rs | 13 +++ 13 files changed, 737 insertions(+) create mode 100644 crates/source/Cargo.toml create mode 100644 crates/source/README.md create mode 100644 crates/source/lib.rs create mode 100644 crates/source/loader/from_git.rs create mode 100644 crates/source/loader/from_tar.rs create mode 100644 crates/source/loader/from_zip.rs create mode 100644 crates/source/loader/mod.rs create mode 100644 crates/source/script/builder.rs create mode 100644 crates/source/script/container.rs create mode 100644 crates/source/script/metadata.rs create mode 100644 crates/source/script/mod.rs create mode 100644 crates/source/utils/hashing.rs create mode 100644 crates/source/utils/mod.rs diff --git a/crates/source/Cargo.toml b/crates/source/Cargo.toml new file mode 100644 index 0000000..4c9be73 --- /dev/null +++ b/crates/source/Cargo.toml @@ -0,0 +1,49 @@ +# https://doc.rust-lang.org/cargo/reference/manifest.html + +[package] +name = "axiston-rt-source" +version = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +readme = "./README.md" + +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +documentation = { workspace = true } + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lib] +path = "lib.rs" + +[features] +default = [] +# Enables source loading from git links. +source-git = ["dep:git2"] +# Enables source loading from zip archives. +source-zip = ["dep:zip"] +# Enables source loading from tar archives. +source-tar = ["dep:tar"] + +[dependencies] +tracing = { workspace = true } +thiserror = { workspace = true } + +serde = { workspace = true } +serde_json = { workspace = true } +bytes = { workspace = true } + +tempfile = { version = "3.15", features = [] } +sha2 = { version = "0.10", features = [] } +walkdir = { version = "2.5", features = [] } + +git2 = { version = "0.20", optional = true, features = [] } +zip = { version = "2.2", optional = true, features = [] } +tar = { version = "0.4", optional = true, features = [] } + +[dev-dependencies] +hex = { version = "0.4", features = [] } diff --git a/crates/source/README.md b/crates/source/README.md new file mode 100644 index 0000000..408e732 --- /dev/null +++ b/crates/source/README.md @@ -0,0 +1,20 @@ +### runtime/source + +[![Build Status][action-badge]][action-url] +[![Crate Docs][docs-badge]][docs-url] +[![Crate Version][crates-badge]][crates-url] + +[action-badge]: https://img.shields.io/github/actions/workflow/status/axiston/runtime/build.yaml?branch=main&label=build&logo=github&style=flat-square +[action-url]: https://github.com/axiston/runtime/actions/workflows/build.yaml +[crates-badge]: https://img.shields.io/crates/v/axiston-rt-server.svg?logo=rust&style=flat-square +[crates-url]: https://crates.io/crates/axiston-rt-server +[docs-badge]: https://img.shields.io/docsrs/axiston-rt-server?logo=Docs.rs&style=flat-square +[docs-url]: http://docs.rs/axiston-rt-server + +Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. + +#### Notes + +- Lorem Ipsum. +- Lorem Ipsum. +- Lorem Ipsum. diff --git a/crates/source/lib.rs b/crates/source/lib.rs new file mode 100644 index 0000000..7525265 --- /dev/null +++ b/crates/source/lib.rs @@ -0,0 +1,33 @@ +#![forbid(unsafe_code)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![doc = include_str!("./README.md")] + +//! ### Examples +//! +//! ```rust +//! fn main() {} +//! ``` + +pub mod loader; +mod script; +mod utils; + +pub use crate::script::{Source, SourceBuilder, SourceContainer, SourceMetadata}; + +/// Unrecoverable failure of the [`SourceLoader`]. +/// +/// Includes all error types that may occur. +/// +/// [`SourceLoader`]: loader::SourceLoader +#[derive(Debug, thiserror::Error)] +#[must_use = "errors do nothing unless you use them"] +pub enum Error { + /// Underlying I/O error. + #[error("underlying i/o error: {0}")] + Io(#[from] std::io::Error), +} + +/// Specialized [`Result`] alias for the [`Error`] type. +/// +/// [`Result`]: std::result::Result +pub type Result = std::result::Result; diff --git a/crates/source/loader/from_git.rs b/crates/source/loader/from_git.rs new file mode 100644 index 0000000..c5eff04 --- /dev/null +++ b/crates/source/loader/from_git.rs @@ -0,0 +1,43 @@ +use std::path::Path; + +use crate::loader::SourceLoader; +use crate::{Result, Source, SourceContainer, SourceMetadata}; + +/// TODO. +/// +/// # Notes +/// +/// - Uses [`git2`] crate. +pub struct GitSourceLoader {} + +impl GitSourceLoader { + /// Returns a new [`GitSourceLoader`]. + pub fn new() -> Self { + Self {} + } +} + +impl SourceLoader for GitSourceLoader { + async fn load_source_script( + self, + output_dir_path: &Path, + needs_metadata: bool, + ) -> Result { + // let module_main_path = output_dir_path.join("./main.ts"); + // let metadata_path = output_dir_path.join("./meta.toml"); + + todo!() + } +} + +#[cfg(test)] +mod test { + use crate::loader::GitSourceLoader; + use crate::Result; + + #[test] + fn build_source_loader() -> Result<()> { + let _ = GitSourceLoader::new(); + Ok(()) + } +} diff --git a/crates/source/loader/from_tar.rs b/crates/source/loader/from_tar.rs new file mode 100644 index 0000000..7e88207 --- /dev/null +++ b/crates/source/loader/from_tar.rs @@ -0,0 +1,52 @@ +use std::future::Future; +use std::io::Cursor; +use std::path::Path; + +use crate::loader::{SourceBuffer, SourceLoader}; +use crate::script::{SourceContainer, SourceMetadata}; +use crate::{Result, Source}; + +/// TODO. +/// +/// # Notes +/// +/// - Uses [`tar`] crate. +pub struct TarSourceLoader<'a> { + buf: SourceBuffer<'a>, +} + +impl<'a> TarSourceLoader<'a> { + /// Returns a new [`TarSourceLoader`]. + #[inline] + pub fn new(buf: SourceBuffer<'a>) -> Self { + Self { buf } + } + + pub fn into_reader(self) -> Result>> { + let buf = self.buf.into_bytes()?; + Ok(Cursor::new(buf)) + } +} + +impl SourceLoader for TarSourceLoader<'_> { + async fn load_source_script( + self, + output_dir_path: &Path, + needs_metadata: bool, + ) -> Result { + todo!() + } +} + +#[cfg(test)] +mod test { + use crate::loader::{SourceBuffer, TarSourceLoader}; + use crate::Result; + + #[test] + fn build_source_loader() -> Result<()> { + let buf = SourceBuffer::default(); + let _ = TarSourceLoader::new(buf); + Ok(()) + } +} diff --git a/crates/source/loader/from_zip.rs b/crates/source/loader/from_zip.rs new file mode 100644 index 0000000..96480e6 --- /dev/null +++ b/crates/source/loader/from_zip.rs @@ -0,0 +1,46 @@ +use std::future::Future; +use std::path::Path; + +use crate::loader::{SourceBuffer, SourceLoader}; +use crate::script::{SourceContainer, SourceMetadata}; +use crate::{Result, Source}; + +/// TODO. +/// +/// # Notes +/// +/// - Uses [`zip`] crate. +pub struct ZipSourceLoader<'a> { + buf: SourceBuffer<'a>, +} + +impl<'a> ZipSourceLoader<'a> { + /// Returns a new [`ZipSourceLoader`]. + #[inline] + pub fn new(buf: SourceBuffer<'a>) -> Self { + Self { buf } + } +} + +impl SourceLoader for ZipSourceLoader<'_> { + async fn load_source_script( + self, + output_dir_path: &Path, + needs_metadata: bool, + ) -> Result { + todo!() + } +} + +#[cfg(test)] +mod test { + use crate::loader::{SourceBuffer, ZipSourceLoader}; + use crate::Result; + + #[test] + fn build_source_loader() -> Result<()> { + let buf = SourceBuffer::default(); + let _ = ZipSourceLoader::new(buf); + Ok(()) + } +} diff --git a/crates/source/loader/mod.rs b/crates/source/loader/mod.rs new file mode 100644 index 0000000..e04a70a --- /dev/null +++ b/crates/source/loader/mod.rs @@ -0,0 +1,119 @@ +//! All available [`SourceLoader`] implementations. + +#[cfg(feature = "source-git")] +mod from_git; +#[cfg(feature = "source-tar")] +mod from_tar; +#[cfg(feature = "source-zip")] +mod from_zip; + +use std::borrow::Cow; +use std::future::Future; +use std::io::{Error, ErrorKind, Read}; +use std::path::Path; + +#[cfg(feature = "source-git")] +#[cfg_attr(docsrs, doc(cfg(feature = "source-git")))] +pub use crate::loader::from_git::GitSourceLoader; +#[cfg(feature = "source-tar")] +#[cfg_attr(docsrs, doc(cfg(feature = "source-tar")))] +pub use crate::loader::from_tar::TarSourceLoader; +#[cfg(feature = "source-zip")] +#[cfg_attr(docsrs, doc(cfg(feature = "source-zip")))] +pub use crate::loader::from_zip::ZipSourceLoader; +use crate::{Result, Source}; + +/// TODO. +pub trait SourceLoader { + /// TODO. + fn load_source_script( + self, + output_dir_path: &Path, + loading_metadata: bool, + ) -> impl Future>; +} + +/// TODO. +pub struct SourceBuffer<'a> { + buf: ResolveBuffer<'a>, +} + +enum ResolveBuffer<'a> { + Bytes { buf: Cow<'a, [u8]> }, + String { buf: Cow<'a, str> }, +} + +impl SourceBuffer<'static> { + /// Clones the provided byte sequence into the new [`SourceBuffer`]. + pub fn from_bytes(buf: impl AsRef<[u8]>) -> Self { + let buf = ResolveBuffer::Bytes { + buf: Cow::Owned(buf.as_ref().to_owned()), + }; + + Self { buf } + } + + /// Clones the provided string into the new [`SourceBuffer`]. + pub fn from_string(buf: impl AsRef) -> Self { + let buf = ResolveBuffer::String { + buf: Cow::Owned(buf.as_ref().to_owned()), + }; + + Self { buf } + } + + /// Reads all bytes from the provided reader. + /// + /// # Errors + /// + /// If the data in the buffer is not valid UTF-8 then an error is returned. + /// See [`Read::read_to_end`] for the semantics. + pub fn from_reader(mut reader: impl Read) -> Result { + let mut buf = Vec::default(); + let _ = reader.read_to_end(&mut buf)?; + let buf = ResolveBuffer::Bytes { + buf: Cow::Owned(buf), + }; + + Ok(Self { buf }) + } +} + +impl SourceBuffer<'_> { + /// Returns the underlying byte sequence. + fn into_bytes(self) -> Result> { + match self.buf { + ResolveBuffer::Bytes { buf } => Ok(buf.into_owned()), + ResolveBuffer::String { buf } => Ok(buf.into_owned().into_bytes()), + } + } + + /// Returns the underlying byte sequence as a string, validates UTF-8 if needed. + /// + /// # Errors + /// + /// If the data in the buffer is not valid UTF-8 then an error is returned. + /// See [`Read::read_to_string`] and [`String::from_utf8`] for the semantics. + fn into_string(self) -> Result { + let buf = match self.buf { + ResolveBuffer::Bytes { buf } => buf, + ResolveBuffer::String { buf } => return Ok(buf.into_owned()), + }; + + let buf = String::from_utf8(buf.into_owned()).map_err(|_| { + Error::new(ErrorKind::InvalidData, "stream did not contain valid UTF-8") + })?; + + Ok(buf) + } +} + +impl Default for SourceBuffer<'static> { + fn default() -> Self { + let buf = ResolveBuffer::Bytes { + buf: Cow::Owned(Vec::new()), + }; + + Self { buf } + } +} diff --git a/crates/source/script/builder.rs b/crates/source/script/builder.rs new file mode 100644 index 0000000..4b01824 --- /dev/null +++ b/crates/source/script/builder.rs @@ -0,0 +1,40 @@ +use tempfile::TempDir; + +use crate::loader::SourceLoader; +use crate::{Result, Source}; + +/// TODO. +pub struct SourceBuilder { + loader: L, + loading_metadata: bool, +} + +impl SourceBuilder { + /// Returns a new [`SourceBuilder`]. + #[inline] + pub fn new(loader: L) -> Self { + Self { + loader, + loading_metadata: true, + } + } + + /// TODO. + pub fn without_metadata(mut self) -> Self { + self.loading_metadata = false; + self + } +} + +impl SourceBuilder +where + L: SourceLoader, +{ + /// TODO. + pub async fn build(self, temp_dir: TempDir) -> Result { + let output_dir = temp_dir.path(); + self.loader + .load_source_script(output_dir, self.loading_metadata) + .await + } +} diff --git a/crates/source/script/container.rs b/crates/source/script/container.rs new file mode 100644 index 0000000..2696717 --- /dev/null +++ b/crates/source/script/container.rs @@ -0,0 +1,54 @@ +use std::fmt; +use std::hash::Hash; +use std::path::PathBuf; + +use bytes::Bytes; +use tempfile::TempDir; + +use crate::utils::io::hash_directory_with_filter; +use crate::Result; + +/// Represents all downloaded and unarchived module files. +#[must_use = "metadata does nothing unless you use it"] +pub struct SourceContainer { + temp_module_dir: TempDir, + module_main_path: PathBuf, + metadata_path: Option, +} + +impl SourceContainer { + /// Returns a new [`SourceContainer`]. + pub(crate) fn new( + temp_module_dir: TempDir, + module_main_path: PathBuf, + metadata_path: Option, + ) -> Self { + Self { + temp_module_dir, + module_main_path, + metadata_path, + } + } + + pub(crate) fn sha256(&self) -> Result { + let module_dir_path = self.temp_module_dir.path(); + hash_directory_with_filter(module_dir_path, |path| { + path.extension() + .map_or(false, |ext| ext == "ts" || ext == "js") + }) + .map(From::from) + .map_err(From::from) + } + + /// Closes and removes the temporary directory. + pub(crate) fn close(self) -> Result<()> { + self.temp_module_dir.close()?; + Ok(()) + } +} + +impl fmt::Debug for SourceContainer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SourceContainer").finish_non_exhaustive() + } +} diff --git a/crates/source/script/metadata.rs b/crates/source/script/metadata.rs new file mode 100644 index 0000000..3fe6d48 --- /dev/null +++ b/crates/source/script/metadata.rs @@ -0,0 +1,54 @@ +use std::path::Path; + +use serde::{Deserialize, Serialize}; + +use crate::Result; + +/// TODO. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[must_use = "metadata does nothing unless you use it"] +pub struct SourceMetadata { + pub name: String, + pub version: String, + + pub created_at: (), + pub accessed_at: (), +} + +impl SourceMetadata { + /// TODO. + pub fn from_dir(dir_path: impl AsRef) -> Result> { + Self::from_dir_impl(dir_path.as_ref()) + } + + /// TODO. + pub fn from_file(file_path: impl AsRef) -> Result> { + Self::from_file(file_path.as_ref()) + } + + /// TODO. + fn from_dir_impl(dir_path: &Path) -> Result> { + todo!() + } + + /// TODO. + fn from_file_impl(file_path: &Path) -> Result> { + todo!() + } + + /// TODO. + fn from_json(buf: &[u8]) -> Result { + todo!() + } + + /// TODO. + fn into_json(self) -> Result> { + todo!() + } +} + +impl Default for SourceMetadata { + fn default() -> Self { + todo!() + } +} diff --git a/crates/source/script/mod.rs b/crates/source/script/mod.rs new file mode 100644 index 0000000..526643b --- /dev/null +++ b/crates/source/script/mod.rs @@ -0,0 +1,80 @@ +//! TODO. + +use tempfile::tempdir_in; + +use crate::loader::{SourceBuffer, SourceLoader}; +pub use crate::script::builder::SourceBuilder; +pub use crate::script::container::SourceContainer; +pub use crate::script::metadata::SourceMetadata; +use crate::Result; + +mod builder; +mod container; +mod metadata; + +/// TODO. +#[derive(Debug)] +#[must_use = "metadata does nothing unless you use it"] +pub struct Source { + pub(crate) source_container: SourceContainer, + pub(crate) source_metadata: Option, +} + +impl Source { + /// Returns a new [`Source`]. + #[inline] + pub fn new(container: SourceContainer, metadata: SourceMetadata) -> Self { + Self { + source_container: container, + source_metadata: Some(metadata), + } + } + + /// Loads a new [`Source`] using a [`GitSourceLoader`]. + /// + /// [`GitSourceLoader`]: crate::loader::GitSourceLoader + #[cfg(feature = "source-git")] + #[cfg_attr(docsrs, doc(cfg(feature = "source-git")))] + pub async fn from_git() -> Result { + use crate::loader::GitSourceLoader; + let loader = GitSourceLoader::new(); + Self::from_loader(loader).await + } + + /// Loads a new [`Source`] using a [`TarSourceLoader`]. + /// + /// [`TarSourceLoader`]: crate::loader::TarSourceLoader + #[cfg(feature = "source-tar")] + #[cfg_attr(docsrs, doc(cfg(feature = "source-tar")))] + pub async fn from_tar(buf: SourceBuffer<'_>) -> Result { + use crate::loader::TarSourceLoader; + let loader = TarSourceLoader::new(buf); + Self::from_loader(loader).await + } + + /// Loads a new [`Source`] using a [`ZipSourceLoader`]. + /// + /// [`ZipSourceLoader`]: crate::loader::ZipSourceLoader + #[cfg(feature = "source-zip")] + #[cfg_attr(docsrs, doc(cfg(feature = "source-zip")))] + pub async fn from_zip(buf: SourceBuffer<'_>) -> Result { + use crate::loader::ZipSourceLoader; + let loader = ZipSourceLoader::new(buf); + Self::from_loader(loader).await + } + + /// Loads a new [`Source`] using a provided [`SourceLoader`]. + pub async fn from_loader(loader: L) -> Result + where + L: SourceLoader, + { + let temp_dir = tempdir_in("./")?; + SourceBuilder::new(loader).build(temp_dir).await + } + + /// Closes and removes the temporary directory. + pub fn close(self) -> Result<()> { + self.source_container.close()?; + Ok(()) + } +} diff --git a/crates/source/utils/hashing.rs b/crates/source/utils/hashing.rs new file mode 100644 index 0000000..d219f90 --- /dev/null +++ b/crates/source/utils/hashing.rs @@ -0,0 +1,134 @@ +//! A utility for computing the SHA-256 hash of a directory. +//! +//! This module recursively traverses a directory, calculates hashes of all files, +//! and combines them to generate a single hash for the directory structure and contents. + +use std::fs::File; +use std::io::{self, Read}; +use std::path::Path; + +use sha2::{Digest, Sha256}; +use walkdir::WalkDir; + +/// Computes the `SHA-256` hash of a single file. +/// +/// # Arguments +/// * `path` - A reference to the path of the file to be hashed. +/// +/// # Returns +/// * `Ok(Vec)` - The computed hash as a byte vector if successful. +/// * `Err(io::Error)` - An error if the file could not be read. +pub fn hash_file(path: impl AsRef) -> io::Result> { + _hash_file_impl(path.as_ref()) +} + +/// Computes the `SHA-256` hash of a directory by traversing its contents. +/// +/// # Arguments +/// * `path` - A reference to the path of the directory to be hashed. +/// +/// # Returns +/// * `Ok(Vec)` - The computed hash as a byte vector if successful. +/// * `Err(io::Error)` - An error if a file cannot be read or traversal fails. +pub fn hash_directory(path: impl AsRef) -> io::Result> { + _hash_directory_impl(path.as_ref(), None) +} + +/// Computes the `SHA-256` hash of a directory with a custom file filter. +/// +/// # Arguments +/// * `path` - A reference to the path of the directory to be hashed. +/// * `filter` - A function that determines whether a file should be included. +/// +/// # Returns +/// * `Ok(Vec)` - The computed hash as a byte vector if successful. +/// * `Err(io::Error)` - An error if a file cannot be read or traversal fails. +pub fn hash_directory_with_filter(path: impl AsRef, filter: F) -> io::Result> +where + F: Fn(&Path) -> bool, +{ + _hash_directory_impl(path.as_ref(), Some(&filter)) +} + +/// Computes the `SHA-256` hash of a file. +fn _hash_file_impl(path: &Path) -> io::Result> { + let mut file = File::open(path)?; + let mut hasher = Sha256::new(); + let mut buffer = [0; 1024]; + + while let Ok(n) = file.read(&mut buffer) { + if n == 0 { + break; + } + hasher.update(&buffer[..n]); + } + + Ok(hasher.finalize().to_vec()) +} + +/// Computes the `SHA-256` hash of a directory with an optional filter. +fn _hash_directory_impl( + path: &Path, + filter: Option<&dyn Fn(&Path) -> bool>, +) -> io::Result> { + let mut hasher = Sha256::new(); + + for entry in WalkDir::new(path).sort_by_file_name() { + let entry = entry?; + let path = entry.path(); + + if !path.is_file() { + continue; + } + + if let Some(filter_fn) = filter { + if !filter_fn(path) { + continue; + } + } + + hasher.update(path.as_os_str().as_encoded_bytes()); + let file_hash = _hash_file_impl(path)?; + hasher.update(&file_hash); + } + + Ok(hasher.finalize().to_vec()) +} + +// Tests for the hashing functions. +#[cfg(test)] +mod tests { + use std::fs::File; + use std::io::Write; + + use super::*; + + #[test] + fn test_hash_file() -> io::Result<()> { + let temp_dir = tempfile::tempdir()?; + let file_path = temp_dir.path().join("test.txt"); + let mut file = File::create(&file_path)?; + file.write_all(b"hello world")?; + + let hash = hash_file(&file_path)?; + assert_eq!( + hex::encode(hash), + "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9" + ); + + Ok(()) + } + + #[test] + fn test_hash_directory() -> io::Result<()> { + let temp_dir = tempfile::tempdir()?; + let file_path = temp_dir.path().join("test.txt"); + let mut file = File::create(&file_path)?; + file.write_all(b"hello world")?; + + let hash = hash_directory(temp_dir.path())?; + assert!(!hash.is_empty()); + + Ok(()) + } +} diff --git a/crates/source/utils/mod.rs b/crates/source/utils/mod.rs new file mode 100644 index 0000000..71aa581 --- /dev/null +++ b/crates/source/utils/mod.rs @@ -0,0 +1,13 @@ +//! Additional utilities. + +mod hashing; + +pub mod io { + //! `std::io` utilities. + + pub use crate::utils::hashing::{hash_directory, hash_directory_with_filter, hash_file}; +} + +pub mod fs { + //! `std::fs` utilities. +} From f903d406c4939bcdd13306dbfc82d03e0d610032 Mon Sep 17 00:00:00 2001 From: Oleh Martsokha Date: Sun, 2 Feb 2025 16:29:00 +0100 Subject: [PATCH 11/11] feat(all): restructure 1/n --- .github/workflows/build.yaml | 6 +- Cargo.toml | 33 +- Dockerfile | 52 ++- Makefile | 2 +- README.md | 8 - crates/cli/Cargo.toml | 33 +- crates/cli/README.md | 10 +- crates/cli/config/from_json.rs | 14 + crates/cli/config/from_toml.rs | 14 + crates/cli/config/mod.rs | 52 ++- crates/cli/main.rs | 26 +- crates/cli/middleware/mod.rs | 45 +- crates/cli/middleware/observability.rs | 40 -- crates/cli/server/config.rs | 34 ++ crates/cli/server/mod.rs | 66 +++ crates/cli/server/signal.rs | 71 +++ crates/client/Cargo.toml | 13 +- crates/client/README.md | 8 +- crates/client/config/custom_hooks.rs | 8 +- crates/client/config/mod.rs | 86 ++-- crates/client/config/pool_config.rs | 4 +- crates/client/lib.rs | 64 +-- crates/client/manager/client.rs | 65 +++ .../manager/{manager_config.rs => config.rs} | 10 +- crates/client/manager/instance_client.rs | 89 ---- crates/client/manager/mod.rs | 189 ++++---- crates/client/manager/runtime_endpoint.rs | 110 ----- crates/client/middleware/future.rs | 20 +- crates/client/middleware/mod.rs | 37 +- crates/jsvm/Cargo.toml | 35 -- crates/jsvm/README.md | 3 - crates/jsvm/extension/custom_serde.rs | 62 --- crates/jsvm/extension/mod.rs | 7 - crates/jsvm/extension/route/mod.rs | 30 -- crates/jsvm/extension/route/ops.rs | 18 - crates/jsvm/extension/trace/datatype.rs | 24 -- crates/jsvm/extension/trace/internal.rs | 24 -- crates/jsvm/extension/trace/mod.rs | 39 -- crates/jsvm/extension/trace/ops.js | 17 - crates/jsvm/extension/trace/ops.rs | 71 --- crates/jsvm/lib.rs | 24 +- crates/jsvm/runtime/cache/mod.rs | 2 - crates/jsvm/runtime/cert_provider.rs | 23 - crates/jsvm/runtime/deno_runtime.rs | 175 -------- crates/jsvm/runtime/filesystem/compile_fs.rs | 270 ------------ crates/jsvm/runtime/filesystem/mod.rs | 9 - crates/jsvm/runtime/filesystem/static_fs.rs | 240 ----------- crates/jsvm/runtime/filesystem/virtual_fs.rs | 330 -------------- crates/jsvm/runtime/mod.rs | 46 -- crates/jsvm/runtime/module_loader.rs | 34 -- crates/jsvm/runtime/permissions.rs | 135 ------ crates/jsvm/runtime/transpile/disk_cache.rs | 134 ------ crates/jsvm/runtime/transpile/emit_cache.rs | 104 ----- crates/jsvm/runtime/transpile/mod.rs | 15 - crates/jsvm/runtime/util/mod.rs | 2 - .../{source/utils => jsvm/utility}/hashing.rs | 92 +++- crates/{source/utils => jsvm/utility}/mod.rs | 2 +- crates/schema/Cargo.toml | 10 +- crates/schema/README.md | 2 +- crates/schema/build.rs | 2 + crates/schema/lib.rs | 66 +-- crates/schema/protobuf/instance.proto | 88 ++-- crates/schema/protobuf/internal/json.proto | 38 -- crates/schema/protobuf/message/entity.proto | 51 --- crates/schema/protobuf/message/event.proto | 103 +++++ crates/schema/protobuf/message/graph.proto | 41 ++ crates/schema/protobuf/message/request.proto | 66 --- crates/schema/protobuf/message/response.proto | 129 ------ crates/schema/protobuf/message/status.proto | 55 +++ crates/schema/protobuf/policy/resource.proto | 53 +++ crates/schema/protobuf/policy/retry.proto | 27 +- crates/schema/protobuf/policy/timeout.proto | 11 +- crates/schema/protobuf/registry.proto | 73 +++- crates/server/Cargo.toml | 40 +- crates/server/handler/instance.rs | 117 +---- crates/server/handler/mod.rs | 5 +- crates/server/handler/registry.rs | 11 +- crates/server/lib.rs | 1 + crates/server/middleware/mod.rs | 2 +- crates/server/routing/context.rs | 405 ++++++++++++++++++ crates/server/routing/handler.rs | 371 ++++++++++++++++ crates/{task => server}/routing/manifest.rs | 61 --- crates/{task => server}/routing/mod.rs | 177 +++++--- crates/server/routing/route.rs | 238 ++++++++++ crates/server/service/app_config.rs | 56 --- crates/server/service/config.rs | 42 ++ .../utility.rs => server/service/graph.rs} | 0 crates/server/service/mod.rs | 74 +++- .../internal.rs => server/service/source.rs} | 0 crates/server/service/task_metrics.rs | 1 - crates/server/service/task_queue.rs | 171 -------- crates/source/Cargo.toml | 49 --- crates/source/README.md | 20 - crates/source/lib.rs | 33 -- crates/source/loader/from_git.rs | 43 -- crates/source/loader/from_tar.rs | 52 --- crates/source/loader/from_zip.rs | 46 -- crates/source/loader/mod.rs | 119 ----- crates/source/script/builder.rs | 40 -- crates/source/script/container.rs | 54 --- crates/source/script/metadata.rs | 54 --- crates/source/script/mod.rs | 80 ---- crates/task/Cargo.toml | 46 -- crates/task/README.md | 20 - crates/task/context/error.rs | 91 ---- crates/task/context/layers.rs | 86 ---- crates/task/context/mod.rs | 21 - crates/task/context/policies.rs | 130 ------ crates/task/context/request.rs | 163 ------- crates/task/context/response.rs | 118 ----- crates/task/handler/compose.rs | 174 -------- crates/task/handler/future.rs | 84 ---- crates/task/handler/metric.rs | 74 ---- crates/task/handler/mod.rs | 211 --------- crates/task/handler/retry.rs | 72 ---- crates/task/lib.rs | 76 ---- crates/task/routing/index.rs | 55 --- crates/task/routing/route.rs | 115 ----- .../route/ops.js => docs/INSTALLATION.md | 0 docs/REQUIREMENTS.md | 0 modules/assert/README.md | 8 + modules/assert/deno.jsonc | 22 +- modules/runtime/README.md | 8 + modules/runtime/deno.jsonc | 26 +- modules/runtime/lifecycle.ts | 0 modules/runtime/lifecycle_test.ts | 0 modules/testing/README.md | 8 + modules/testing/deno.jsonc | 20 +- 128 files changed, 2467 insertions(+), 5514 deletions(-) create mode 100644 crates/cli/config/from_json.rs create mode 100644 crates/cli/config/from_toml.rs delete mode 100644 crates/cli/middleware/observability.rs create mode 100644 crates/cli/server/config.rs create mode 100644 crates/cli/server/signal.rs create mode 100644 crates/client/manager/client.rs rename crates/client/manager/{manager_config.rs => config.rs} (88%) delete mode 100644 crates/client/manager/instance_client.rs delete mode 100644 crates/client/manager/runtime_endpoint.rs delete mode 100644 crates/jsvm/extension/custom_serde.rs delete mode 100644 crates/jsvm/extension/route/mod.rs delete mode 100644 crates/jsvm/extension/route/ops.rs delete mode 100644 crates/jsvm/extension/trace/datatype.rs delete mode 100644 crates/jsvm/extension/trace/internal.rs delete mode 100644 crates/jsvm/extension/trace/mod.rs delete mode 100644 crates/jsvm/extension/trace/ops.js delete mode 100644 crates/jsvm/extension/trace/ops.rs delete mode 100644 crates/jsvm/runtime/cache/mod.rs delete mode 100644 crates/jsvm/runtime/cert_provider.rs delete mode 100644 crates/jsvm/runtime/deno_runtime.rs delete mode 100644 crates/jsvm/runtime/filesystem/compile_fs.rs delete mode 100644 crates/jsvm/runtime/filesystem/mod.rs delete mode 100644 crates/jsvm/runtime/filesystem/static_fs.rs delete mode 100644 crates/jsvm/runtime/filesystem/virtual_fs.rs delete mode 100644 crates/jsvm/runtime/module_loader.rs delete mode 100644 crates/jsvm/runtime/permissions.rs delete mode 100644 crates/jsvm/runtime/transpile/disk_cache.rs delete mode 100644 crates/jsvm/runtime/transpile/emit_cache.rs delete mode 100644 crates/jsvm/runtime/transpile/mod.rs delete mode 100644 crates/jsvm/runtime/util/mod.rs rename crates/{source/utils => jsvm/utility}/hashing.rs (56%) rename crates/{source/utils => jsvm/utility}/mod.rs (77%) delete mode 100644 crates/schema/protobuf/internal/json.proto delete mode 100644 crates/schema/protobuf/message/entity.proto create mode 100644 crates/schema/protobuf/message/event.proto create mode 100644 crates/schema/protobuf/message/graph.proto delete mode 100644 crates/schema/protobuf/message/request.proto delete mode 100644 crates/schema/protobuf/message/response.proto create mode 100644 crates/schema/protobuf/message/status.proto create mode 100644 crates/schema/protobuf/policy/resource.proto create mode 100644 crates/server/routing/context.rs create mode 100644 crates/server/routing/handler.rs rename crates/{task => server}/routing/manifest.rs (59%) rename crates/{task => server}/routing/mod.rs (72%) create mode 100644 crates/server/routing/route.rs delete mode 100644 crates/server/service/app_config.rs create mode 100644 crates/server/service/config.rs rename crates/{cli/middleware/utility.rs => server/service/graph.rs} (100%) rename crates/{jsvm/extension/route/internal.rs => server/service/source.rs} (100%) delete mode 100644 crates/server/service/task_metrics.rs delete mode 100644 crates/server/service/task_queue.rs delete mode 100644 crates/source/Cargo.toml delete mode 100644 crates/source/README.md delete mode 100644 crates/source/lib.rs delete mode 100644 crates/source/loader/from_git.rs delete mode 100644 crates/source/loader/from_tar.rs delete mode 100644 crates/source/loader/from_zip.rs delete mode 100644 crates/source/loader/mod.rs delete mode 100644 crates/source/script/builder.rs delete mode 100644 crates/source/script/container.rs delete mode 100644 crates/source/script/metadata.rs delete mode 100644 crates/source/script/mod.rs delete mode 100644 crates/task/Cargo.toml delete mode 100644 crates/task/README.md delete mode 100644 crates/task/context/error.rs delete mode 100644 crates/task/context/layers.rs delete mode 100644 crates/task/context/mod.rs delete mode 100644 crates/task/context/policies.rs delete mode 100644 crates/task/context/request.rs delete mode 100644 crates/task/context/response.rs delete mode 100644 crates/task/handler/compose.rs delete mode 100644 crates/task/handler/future.rs delete mode 100644 crates/task/handler/metric.rs delete mode 100644 crates/task/handler/mod.rs delete mode 100644 crates/task/handler/retry.rs delete mode 100644 crates/task/lib.rs delete mode 100644 crates/task/routing/index.rs delete mode 100644 crates/task/routing/route.rs rename crates/jsvm/extension/route/ops.js => docs/INSTALLATION.md (100%) create mode 100644 docs/REQUIREMENTS.md create mode 100644 modules/runtime/lifecycle.ts create mode 100644 modules/runtime/lifecycle_test.ts diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 8cf358c..9aafe34 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -3,10 +3,10 @@ name: ci & cd on: push: branches: - - "main" # Trigger on main branch. + - "main" tags: - - "v*.*.*" # Trigger on semantic version tags. - pull_request: # Validation only (without pushing). + - "v*.*.*" + pull_request: jobs: build: diff --git a/Cargo.toml b/Cargo.toml index 5d29424..746dbcb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,16 +4,16 @@ resolver = "2" members = [ "./crates/cli", + "./crates/client", "./crates/jsvm", "./crates/schema", "./crates/server", - "./crates/task", ] [workspace.package] version = "0.1.0" edition = "2021" -license = "Axiston License 1.0" +license = "Apache-2.0" publish = true authors = ["Axiston "] @@ -25,24 +25,36 @@ documentation = "https://docs.rs/axiston" axiston-rt-jsvm = { path = "./crates/jsvm", version = "0.1.0" } axiston-rt-schema = { path = "./crates/schema", version = "0.1.0" } axiston-rt-server = { path = "./crates/server", version = "0.1.0" } -axiston-rt-task = { path = "./crates/task", version = "0.1.0" } -tokio = { version = "1.36", features = ["macros", "rt-multi-thread"] } +clap = { version = "4.5", features = ["derive"] } +tokio = { version = "1.43", features = ["macros", "rt-multi-thread", "signal"] } +deadpool = { version = "0.12", features = ["managed", "rt_tokio_1"] } tokio-stream = { version = "0.1", features = [] } -pin-project-lite = { version = "0.2", features = [] } futures = { version = "0.3", features = [] } +http = { version = "1.2", features = [] } +pin-project-lite = { version = "0.2", features = [] } +tracing = { version = "0.1", features = [] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } +tower = { version = "0.5", features = ["full"] } +tower-http = { version = "0.6", features = ["full"] } thiserror = { version = "2.0", features = [] } anyhow = { version = "1.0", features = ["backtrace"] } +derive_more = { version = "2.0", features = ["full"] } serde = { version = "1.0", features = ["derive"] } +serde_with = { version = "3.12", features = [] } +serde_toml = { package = "toml", version = "0.8", features = [] } serde_json = { version = "1.0", features = [] } -jsonschema = { version = "0.26", features = [] } -tracing = { version = "0.1", features = [] } -derive_more = { version = "1.0", features = ["full"] } ecow = { version = "0.2", features = ["serde"] } time = { version = "0.3", features = ["serde"] } -uuid = { version = "1.11", features = ["serde", "v4", "v7"] } +uuid = { version = "1.12", features = ["serde", "v4", "v7"] } +bytes = { version = "1.9", features = ["serde"] } +hashbrown = { version = "0.15", features = ["serde"] } +petgraph = { version = "0.7", features = [] } +cron = { version = "0.15", features = ["serde"] } +semver = { version = "1.0", features = ["serde"] } +jsonschema = { version = "0.28", features = [] } tonic = { version = "0.12", features = [] } prost = { version = "0.13", features = [] } @@ -50,6 +62,3 @@ tonic-types = { version = "0.12", features = [] } prost-types = { version = "0.13", features = [] } tonic-build = { version = "0.12", features = [] } prost-build = { version = "0.13", features = [] } - -tower = { version = "0.5", features = ["full"] } -tower-http = { version = "0.6", features = ["full"] } diff --git a/Dockerfile b/Dockerfile index 417d8ce..c7b1d2b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,47 @@ -# Compile & build the application. -FROM rust:latest AS build -WORKDIR /usr/src/runtime/ +# Stage 1: Build. +FROM rust:1.84 AS build +WORKDIR /usr/src/app/ -# Configurate & run the application. -FROM debian:buster-slim AS run -WORKDIR /usr/bin/runtime/ +RUN apt-get update +RUN apt-get install -y curl build-essential + +# Cache dependencies by copying only Cargo.* files. +COPY Cargo.toml Cargo.lock? ./ +COPY crates/cli/Cargo.toml crates/cli/ +COPY crates/jsvm/Cargo.toml crates/jsvm/ +COPY crates/schema/Cargo.toml crates/schema/ +COPY crates/client/Cargo.toml crates/client/ +COPY crates/server/Cargo.toml crates/server/ + +# Create a dummy files to allow dependency resolution. +RUN mkdir -p crates/cli crates/jsvm crates/schema +RUN mkdir -p crates/client crates/server +RUN echo "fn main() {}" > crates/cli/main.rs +RUN echo "" > crates/cli/lib.rs +RUN echo "" > crates/jsvm/lib.rs +RUN echo "" > crates/schema/lib.rs +RUN echo "" > crates/client/lib.rs +RUN echo "" > crates/server/lib.rs + +# Pre-build dependencies to cache them. +RUN cargo build --release --workspace + +# Copy the source code and build the final binaries. +COPY crates ./crates +RUN cargo build --release --workspace + +# Stage 2: Runtime. +FROM debian:bookworm-slim AS runtime +WORKDIR /usr/src/bin/ + +# Copy the built binary from the previous stage. +COPY --from=build /usr/src/app/target/release/cli /usr/src/bin/cli + +# Ensure the binary is an executable. +RUN chmod +x /usr/src/bin/cli + +# Expose the port the server runs on. +EXPOSE 8080 + +# Set the default command to run the server. +CMD ["/usr/src/bin/cli", "--port", "8080"] diff --git a/Makefile b/Makefile index b8e0a56..019e95c 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -# Makefile for client & server GRPC Generation +# Makefile for client & server GRPC Generation. # https://github.com/hyperium/tonic # Environment Variables diff --git a/README.md b/README.md index cedab91..0735eea 100644 --- a/README.md +++ b/README.md @@ -22,11 +22,3 @@ A server application based on `Deno` runtime, capable of running `JavaScript`, ```cmd runtime --port 8080 ``` - -#### Nodes - -- May be of following types: trigger (normal or reaction), action - -#### Edges - -- May attach transformations diff --git a/crates/cli/Cargo.toml b/crates/cli/Cargo.toml index d1fd2be..1074484 100644 --- a/crates/cli/Cargo.toml +++ b/crates/cli/Cargo.toml @@ -13,24 +13,35 @@ repository = { workspace = true } homepage = { workspace = true } documentation = { workspace = true } +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + [[bin]] name = "axiston" path = "main.rs" +[features] +default = [] + +# - Enables the global tracer provider. +support-otel = [] + [dependencies] -axiston-rt-server = { workspace = true } +axiston-rt-schema = { workspace = true, features = ["server"] } +axiston-rt-server = { workspace = true, features = [] } -clap = { version = "4.5", features = ["derive"] } +clap = { workspace = true } tokio = { workspace = true } -tokio-stream = { workspace = true } -futures = { workspace = true } -anyhow = { workspace = true } - tracing = { workspace = true } -tracing-subscriber = { version = "0.3", features = ["env-filter", "time"] } -tracing-opentelemetry = { version = "0.26", features = [] } -opentelemetry = { version = "0.25", features = [] } +tracing-subscriber = { workspace = true } +anyhow = { workspace = true } tonic = { workspace = true } -tower = { version = "0.4", features = ["full"] } -tower-http = { version = "0.5", features = ["full"] } +prost = { workspace = true } +tonic-types = { workspace = true } +prost-types = { workspace = true } + +serde = { workspace = true } +serde_toml = { workspace = true } +serde_json = { workspace = true } diff --git a/crates/cli/README.md b/crates/cli/README.md index fac5fbb..da1673b 100644 --- a/crates/cli/README.md +++ b/crates/cli/README.md @@ -1,4 +1,4 @@ -### runtime/cli +### runtime/client [![Build Status][action-badge]][action-url] [![Crate Docs][docs-badge]][docs-url] @@ -6,10 +6,10 @@ [action-badge]: https://img.shields.io/github/actions/workflow/status/axiston/runtime/build.yaml?branch=main&label=build&logo=github&style=flat-square [action-url]: https://github.com/axiston/runtime/actions/workflows/build.yaml -[crates-badge]: https://img.shields.io/crates/v/axiston-rt-cli.svg?logo=rust&style=flat-square -[crates-url]: https://crates.io/crates/axiston-rt-cli -[docs-badge]: https://img.shields.io/docsrs/axiston-rt-cli?logo=Docs.rs&style=flat-square -[docs-url]: http://docs.rs/axiston-rt-cli +[crates-badge]: https://img.shields.io/crates/v/axiston-rt-client.svg?logo=rust&style=flat-square +[crates-url]: https://crates.io/crates/axiston-rt-client +[docs-badge]: https://img.shields.io/docsrs/axiston-rt-client?logo=Docs.rs&style=flat-square +[docs-url]: http://docs.rs/axiston-rt-client Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. diff --git a/crates/cli/config/from_json.rs b/crates/cli/config/from_json.rs new file mode 100644 index 0000000..4203f50 --- /dev/null +++ b/crates/cli/config/from_json.rs @@ -0,0 +1,14 @@ +use std::path::Path; + +use crate::config::Args; + +/// - Reads the entire contents of a file and deserializes an instance of [`Args`]. +/// +/// # Errors +/// +/// - See [std::fs::read] and [`serde_json::from_slice`] documentation for details. +pub fn load_json(path: impl AsRef) -> anyhow::Result { + let file_content = std::fs::read(path)?; + let parsed_args = serde_json::from_slice(&file_content)?; + Ok(parsed_args) +} diff --git a/crates/cli/config/from_toml.rs b/crates/cli/config/from_toml.rs new file mode 100644 index 0000000..5ee27f1 --- /dev/null +++ b/crates/cli/config/from_toml.rs @@ -0,0 +1,14 @@ +use std::path::Path; + +use crate::config::Args; + +/// - Reads the entire contents of a file and deserializes an instance of [`Args`]. +/// +/// # Errors +/// +/// - See [std::fs::read_to_string] and [`serde_toml::from_str`] documentation for details. +pub fn load_toml(path: impl AsRef) -> anyhow::Result { + let file_content = std::fs::read_to_string(path)?; + let parsed_args = serde_toml::from_str(&file_content)?; + Ok(parsed_args) +} diff --git a/crates/cli/config/mod.rs b/crates/cli/config/mod.rs index df4e4e4..e7e9e84 100644 --- a/crates/cli/config/mod.rs +++ b/crates/cli/config/mod.rs @@ -1,17 +1,53 @@ +//! Loads and parses configuration files. + +mod from_json; +mod from_toml; + +use std::ffi::OsStr; +use std::path::PathBuf; + +use axiston_rt_server::service::ServiceConfig; use clap::Parser; +use serde::{Deserialize, Serialize}; + +use crate::config::from_json::load_json; +use crate::config::from_toml::load_toml; +use crate::server::ServerConfig; /// Command-line arguments. -#[derive(Debug, Parser)] +#[derive(Debug, Clone, Default, Serialize, Deserialize, Parser)] +#[must_use = "config does nothing unless you use it"] pub struct Args { - /// Bound server port. - #[arg(short, long, default_value_t = 3000)] - pub port: u16, + #[command(flatten)] + pub server: ServerConfig, + #[command(flatten)] + pub service: ServiceConfig, +} + +/// Commands for the CLI. +#[derive(Debug, Clone, Parser)] +#[must_use = "configs do nothing unless you use them"] +pub struct Cli { + /// Provide configuration via command-line flags. + #[command(flatten)] + pub args: Args, + + /// Provide configuration via a configuration file. + #[arg(short, long, value_name = "FILE")] + pub config: Option, } impl Args { - /// Returns a new [`Args`]. - #[inline] - pub fn new() -> Self { - Self::parse() + /// Parses the provided configuration via command-line flags or a configuration file. + pub fn try_parse_with_files() -> anyhow::Result { + let cli = Cli::parse(); + match cli.config { + None => Ok(cli.args), + Some(path) => match path.extension() { + Some(ext) if OsStr::new("toml") == ext => load_toml(path), + Some(ext) if OsStr::new("json") == ext => load_json(path), + _ => Err(anyhow::anyhow!("should specify a supported file extension")), + }, + } } } diff --git a/crates/cli/main.rs b/crates/cli/main.rs index 79f37d1..4a3e96f 100644 --- a/crates/cli/main.rs +++ b/crates/cli/main.rs @@ -1,12 +1,11 @@ #![forbid(unsafe_code)] -use std::net::{Ipv4Addr, SocketAddr}; - use axiston_rt_server::handler::{InstanceService, RegistryService}; -use axiston_rt_server::service::{AppConfig, AppState}; -use tonic::transport::Server; +use axiston_rt_server::service::{RouterExt, ServiceState}; use crate::config::Args; +use crate::middleware::initialize_tracing; +use crate::server::run_supported_server; mod config; mod middleware; @@ -14,12 +13,11 @@ mod server; #[tokio::main] async fn main() -> anyhow::Result<()> { - let args = Args::new(); - middleware::initialize_tracing().await?; + let args = Args::try_parse_with_files()?; + initialize_tracing().await?; // Service. - let config = AppConfig::builder().build(); - let state = AppState::new(config); + let state = ServiceState::new(args.service); let instance = InstanceService::new(state.clone()); let instance = instance.into_server(); @@ -28,17 +26,7 @@ async fn main() -> anyhow::Result<()> { let registry = registry.into_server(); // Listen. - let server_addr = SocketAddr::from((Ipv4Addr::LOCALHOST, args.port)); - tracing::debug!( - target: "server:setup", port = args.port, - "server is listening on {}", server_addr, - ); - - Server::builder() - .add_service(instance) - .add_service(registry) - .serve(server_addr) - .await?; + run_supported_server(args.server, instance, registry).await?; Ok(()) } diff --git a/crates/cli/middleware/mod.rs b/crates/cli/middleware/mod.rs index c665ba1..84c34a1 100644 --- a/crates/cli/middleware/mod.rs +++ b/crates/cli/middleware/mod.rs @@ -1,13 +1,40 @@ -//! TODO. -//! +#[must_use] +fn build_env_filter() -> tracing_subscriber::EnvFilter { + let current = std::env::var("RUST_LOG") + .or_else(|_| std::env::var("OTEL_LOG_LEVEL")) + .unwrap_or_else(|_| "info".to_string()); -use tower::ServiceBuilder; + let env = format!("{},server=trace,otel=debug,tower_http=debug", current); + std::env::set_var("RUST_LOG", env); + tracing_subscriber::EnvFilter::from_default_env() +} -pub use crate::middleware::observability::initialize_tracing; -mod observability; -mod utility; +pub async fn initialize_tracing() -> anyhow::Result<()> { + use tracing_subscriber::fmt::layer; + use tracing_subscriber::layer::SubscriberExt; + use tracing_subscriber::util::SubscriberInitExt; -/// Extension trait for `tower::`[`ServiceBuilder`] for layering middleware. -pub trait ServiceBuilderExt {} + // Setups a temporary subscriber to log output during setup. + let env_filter = build_env_filter(); + let fmt_layer = layer().pretty(); + let subscriber = tracing_subscriber::registry() + .with(env_filter) + .with(fmt_layer); -impl ServiceBuilderExt for ServiceBuilder {} + let _guard = tracing::subscriber::set_default(subscriber); + tracing::trace!(target: "server:otel", "initialized temporary subscriber"); + + // TODO: Enable OpenTelemetry. + // https://github.com/davidB/tracing-opentelemetry-instrumentation-sdk + + // Setups an actual subscriber. + let env_filter = build_env_filter(); + let fmt_layer = layer().pretty(); + tracing_subscriber::registry() + .with(env_filter) + .with(fmt_layer) + .init(); + + tracing::trace!(target: "server:otel", "initialized subscriber"); + Ok(()) +} diff --git a/crates/cli/middleware/observability.rs b/crates/cli/middleware/observability.rs deleted file mode 100644 index 84c34a1..0000000 --- a/crates/cli/middleware/observability.rs +++ /dev/null @@ -1,40 +0,0 @@ -#[must_use] -fn build_env_filter() -> tracing_subscriber::EnvFilter { - let current = std::env::var("RUST_LOG") - .or_else(|_| std::env::var("OTEL_LOG_LEVEL")) - .unwrap_or_else(|_| "info".to_string()); - - let env = format!("{},server=trace,otel=debug,tower_http=debug", current); - std::env::set_var("RUST_LOG", env); - tracing_subscriber::EnvFilter::from_default_env() -} - -pub async fn initialize_tracing() -> anyhow::Result<()> { - use tracing_subscriber::fmt::layer; - use tracing_subscriber::layer::SubscriberExt; - use tracing_subscriber::util::SubscriberInitExt; - - // Setups a temporary subscriber to log output during setup. - let env_filter = build_env_filter(); - let fmt_layer = layer().pretty(); - let subscriber = tracing_subscriber::registry() - .with(env_filter) - .with(fmt_layer); - - let _guard = tracing::subscriber::set_default(subscriber); - tracing::trace!(target: "server:otel", "initialized temporary subscriber"); - - // TODO: Enable OpenTelemetry. - // https://github.com/davidB/tracing-opentelemetry-instrumentation-sdk - - // Setups an actual subscriber. - let env_filter = build_env_filter(); - let fmt_layer = layer().pretty(); - tracing_subscriber::registry() - .with(env_filter) - .with(fmt_layer) - .init(); - - tracing::trace!(target: "server:otel", "initialized subscriber"); - Ok(()) -} diff --git a/crates/cli/server/config.rs b/crates/cli/server/config.rs new file mode 100644 index 0000000..2e00deb --- /dev/null +++ b/crates/cli/server/config.rs @@ -0,0 +1,34 @@ +use clap::Args; +use serde::{Deserialize, Serialize}; + +/// App [`server`] configuration. +/// +/// [`server`]: crate::server +#[derive(Debug, Clone, Serialize, Deserialize, Args)] +#[must_use = "config does nothing unless you use it"] +pub struct ServerConfig { + /// Port exposed by the server. + #[arg(short, long, default_value_t = 3000)] + pub port: u16, + + /// Server shutdown timeout (in seconds). + #[arg(short, long, default_value_t = 8)] + pub shutdown_timeout: u64, +} + +impl ServerConfig { + /// Returns a new [`ServerConfig`]. + #[inline] + pub fn new() -> Self { + Self::default() + } +} + +impl Default for ServerConfig { + fn default() -> Self { + Self { + port: 3000, + shutdown_timeout: 8, + } + } +} diff --git a/crates/cli/server/mod.rs b/crates/cli/server/mod.rs index 8b13789..463b66c 100644 --- a/crates/cli/server/mod.rs +++ b/crates/cli/server/mod.rs @@ -1 +1,67 @@ +//! Contains a GRPC server and its utilities. +mod config; +mod signal; + +use std::net::{Ipv4Addr, SocketAddr}; +use std::time::Duration; + +use axiston_rt_schema::instance::instance_server::{Instance, InstanceServer}; +use axiston_rt_schema::registry::registry_server::{Registry, RegistryServer}; +use tonic::transport::Server; + +pub use crate::server::config::ServerConfig; +use crate::server::signal::shutdown_signal; + +/// Runs the supported server. +pub async fn run_supported_server( + server_config: ServerConfig, + instance_server: InstanceServer, + registry_server: RegistryServer, +) -> anyhow::Result<()> +where + T1: Instance, + T2: Registry, +{ + let shutdown_timeout = Duration::from_secs(server_config.shutdown_timeout); + let fut = shutdown_signal(shutdown_timeout); + + let server_addr = SocketAddr::from((Ipv4Addr::LOCALHOST, server_config.port)); + + tracing::debug!( + target: "server:setup", port = server_config.port, + "runtime server is listening on {}", server_addr, + ); + + Server::builder() + .add_service(instance_server) + .add_service(registry_server) + .serve_with_shutdown(server_addr, fut) + .await?; + + Ok(()) +} + +#[cfg(test)] +mod test { + use axiston_rt_server::handler::{InstanceService, RegistryService, Result}; + use axiston_rt_server::service::ServiceState; + + use crate::config::Args; + use crate::server::run_supported_server; + + #[test] + fn run_server() -> Result<()> { + let args = Args::default(); + let state = ServiceState::new(args.service); + + let instance = InstanceService::new(state.clone()); + let instance = instance.into_server(); + + let registry = RegistryService::new(state); + let registry = registry.into_server(); + + let _ = run_supported_server(args.server, instance, registry); + Ok(()) + } +} diff --git a/crates/cli/server/signal.rs b/crates/cli/server/signal.rs new file mode 100644 index 0000000..b03ecdf --- /dev/null +++ b/crates/cli/server/signal.rs @@ -0,0 +1,71 @@ +use std::time::{Duration, Instant}; + +use tokio::signal::ctrl_c; +#[cfg(unix)] +use tokio::signal::unix; + +/// Completes once the terminate signal is received. +/// +/// See [`ctrl_c`] and [`unix::SignalKind::terminate`]. +pub async fn shutdown_signal(timeout: Duration) { + let ctrl_c = async { + ctrl_c().await.expect("failed to install Ctrl+C handler"); + }; + + #[cfg(unix)] + let terminate = async { + unix::signal(unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + }; + + #[cfg(not(unix))] + let terminate = std::future::pending::<()>(); + + tokio::select! { + _ = ctrl_c => {}, + _ = terminate => {}, + } + + let t0 = Instant::now(); + + tracing::warn!( + target: "server:otel", timeout = timeout.as_millis(), + "global tracer provider is closing" + ); + + #[cfg(feature = "support-otel")] + let (tx, rx) = std::sync::mpsc::channel(); + #[cfg(feature = "support-otel")] + let _ = std::thread::spawn(move || { + // TODO: Setup opentelemetry. + // opentelemetry::global::shutdown_tracer_provider(); + tx.send(()).ok() + }); + + #[cfg(feature = "support-otel")] + if rx.recv_timeout(timeout).is_err() { + tracing::error!(target: "server:otel", timeout = timeout.as_millis(), + "global tracer provider failed to close" + ); + } + + let t1 = Instant::now().duration_since(t0); + tracing::warn!( + target: "server", timeout = timeout.as_millis(), + waiting = t1.as_millis(), "server is terminating" + ); +} + +#[cfg(test)] +mod test { + use std::time::Duration; + + use crate::server::signal::shutdown_signal; + + #[test] + fn create_shutdown_signal() { + let _ = shutdown_signal(Duration::default()); + } +} diff --git a/crates/client/Cargo.toml b/crates/client/Cargo.toml index 9530f29..2075363 100644 --- a/crates/client/Cargo.toml +++ b/crates/client/Cargo.toml @@ -22,17 +22,20 @@ path = "lib.rs" [dependencies] axiston-rt-schema = { workspace = true, features = ["client"] } -deadpool = { version = "0.12", features = ["managed", "rt_tokio_1"] } tokio = { workspace = true } -http = { version = "1.2", features = [] } -tracing = { workspace = true } +deadpool = { workspace = true } thiserror = { workspace = true } +tracing = { workspace = true } + tonic = { workspace = true } prost = { workspace = true } -tower = { workspace = true } -tower-http = { workspace = true } +tonic-types = { workspace = true } +prost-types = { workspace = true } +tower = { workspace = true } +http = { workspace = true } derive_more = { workspace = true } serde = { workspace = true } uuid = { workspace = true } +time = { workspace = true } diff --git a/crates/client/README.md b/crates/client/README.md index f819023..da1673b 100644 --- a/crates/client/README.md +++ b/crates/client/README.md @@ -6,10 +6,10 @@ [action-badge]: https://img.shields.io/github/actions/workflow/status/axiston/runtime/build.yaml?branch=main&label=build&logo=github&style=flat-square [action-url]: https://github.com/axiston/runtime/actions/workflows/build.yaml -[crates-badge]: https://img.shields.io/crates/v/axiston-rt-schema.svg?logo=rust&style=flat-square -[crates-url]: https://crates.io/crates/axiston-rt-schema -[docs-badge]: https://img.shields.io/docsrs/axiston-rt-schema?logo=Docs.rs&style=flat-square -[docs-url]: http://docs.rs/axiston-rt-schema +[crates-badge]: https://img.shields.io/crates/v/axiston-rt-client.svg?logo=rust&style=flat-square +[crates-url]: https://crates.io/crates/axiston-rt-client +[docs-badge]: https://img.shields.io/docsrs/axiston-rt-client?logo=Docs.rs&style=flat-square +[docs-url]: http://docs.rs/axiston-rt-client Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. diff --git a/crates/client/config/custom_hooks.rs b/crates/client/config/custom_hooks.rs index 157a4b9..c3b1c97 100644 --- a/crates/client/config/custom_hooks.rs +++ b/crates/client/config/custom_hooks.rs @@ -1,13 +1,13 @@ use deadpool::managed::{HookResult, Metrics}; -use crate::manager::{RuntimeClient, RuntimeError}; +use crate::manager::{RuntimeClient, RuntimePoolError}; /// Custom hook called after a new connection has been established. /// /// See [`PoolBuilder`] for more details. /// /// [`PoolBuilder`]: deadpool::managed::PoolBuilder -pub fn post_create(_conn: &mut RuntimeClient, _metrics: &Metrics) -> HookResult { +pub fn post_create(_conn: &mut RuntimeClient, _metrics: &Metrics) -> HookResult { tracing::trace!(target: "runtime", "post_create"); // Note: should never return an error. @@ -19,7 +19,7 @@ pub fn post_create(_conn: &mut RuntimeClient, _metrics: &Metrics) -> HookResult< /// See [`PoolBuilder`] for more details. /// /// [`PoolBuilder`]: deadpool::managed::PoolBuilder -pub fn pre_recycle(_conn: &mut RuntimeClient, _metrics: &Metrics) -> HookResult { +pub fn pre_recycle(_conn: &mut RuntimeClient, _metrics: &Metrics) -> HookResult { tracing::trace!(target: "runtime", "pre_recycle"); // Note: should never return an error. @@ -31,7 +31,7 @@ pub fn pre_recycle(_conn: &mut RuntimeClient, _metrics: &Metrics) -> HookResult< /// See [`PoolBuilder`] for more details. /// /// [`PoolBuilder`]: deadpool::managed::PoolBuilder -pub fn post_recycle(_conn: &mut RuntimeClient, _metrics: &Metrics) -> HookResult { +pub fn post_recycle(_conn: &mut RuntimeClient, _metrics: &Metrics) -> HookResult { tracing::trace!(target: "runtime", "post_recycle"); // Note: should never return an error. diff --git a/crates/client/config/mod.rs b/crates/client/config/mod.rs index b29752c..7868fa8 100644 --- a/crates/client/config/mod.rs +++ b/crates/client/config/mod.rs @@ -1,13 +1,12 @@ use std::fmt; use deadpool::managed::{Hook, Object, Pool}; -use derive_more::{Deref, DerefMut, From}; -use uuid::Uuid; +use tonic::transport::Endpoint; use crate::config::custom_hooks::{post_create, post_recycle, pre_recycle}; pub use crate::config::pool_config::RuntimeConfig; -use crate::manager::{RuntimeEndpoint, RuntimeManager, RuntimeManagerConfig}; -use crate::Result; +use crate::manager::{RuntimeManager, RuntimeManagerConfig}; +use crate::RuntimeResult; mod custom_hooks; mod pool_config; @@ -20,39 +19,15 @@ mod pool_config; /// - Uses [`RuntimeConfig`] for configuration. #[derive(Clone)] pub struct Runtime { - inner: Pool, -} - -/// `RuntimeConnection` wrapper. -/// -/// Hides connection pool manager types. -#[derive(Debug, From, Deref, DerefMut)] -pub struct RuntimeObject { - inner_object: Object, -} - -impl RuntimeObject { - /// Removes this runtime endpoint from the pool. - pub async fn unregister_self(&self) -> Result<()> { - let Some(runtime_pool) = Object::pool(&self.inner_object) else { - return Ok(()); - }; - - let runtime_manager = runtime_pool.manager(); - runtime_manager - .unregister_endpoint(self.as_endpoint_id()) - .await?; - - Ok(()) - } + conn: Pool, } impl Runtime { /// Returns a new [`Runtime`]. - pub fn new(config: RuntimeConfig) -> Self { - let manager_config = - RuntimeManagerConfig::new().with_recycling_method(config.recycling_method); - let manager = RuntimeManager::new(manager_config); + pub fn new(endpoints: impl Iterator, config: RuntimeConfig) -> Self { + let manager_config = RuntimeManagerConfig::new().recycling_method(config.recycling_method); + + let manager = RuntimeManager::new(endpoints, manager_config); let pool = Pool::builder(manager) .max_size(config.max_conn.unwrap_or(64)) .create_timeout(config.create_timeout) @@ -64,52 +39,45 @@ impl Runtime { .runtime(deadpool::Runtime::Tokio1); let pool = pool.build().expect("should not require runtime"); - Self { inner: pool } - } - - /// Adds the runtime endpoint into the pool. - pub async fn register_endpoint>(&self, rt: E) -> Result<()> { - self.inner - .manager() - .register_endpoint(rt.into()) - .await - .map_err(Into::into) - } - - /// Removes the runtime endpoint from the pool. - pub async fn unregister_endpoint>(&self, rt: E) -> Result<()> { - self.inner - .manager() - .unregister_endpoint(&rt.into()) - .await - .map_err(Into::into) + Self { conn: pool } } - pub async fn get_connection(&self) -> Result { - self.inner.get().await.map(Into::into).map_err(Into::into) + /// Retrieves a connection from this pool or waits for one to become available. + pub async fn get_connection(&self) -> RuntimeResult> { + self.conn.get().await.map_err(Into::into) } } impl Default for Runtime { fn default() -> Self { - Self::new(RuntimeConfig::default()) + let endpoints = Vec::new().into_iter(); + Self::new(endpoints, RuntimeConfig::new()) } } impl fmt::Debug for Runtime { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Runtime").finish_non_exhaustive() + let status = self.conn.status(); + let is_closed = self.conn.is_closed(); + f.debug_struct("Runtime") + .field("size", &status.size) + .field("max_size", &status.max_size) + .field("available", &status.available) + .field("waiting", &status.waiting) + .field("is_closed", &is_closed) + .finish() } } #[cfg(test)] mod test { - use crate::{Result, Runtime, RuntimeConfig}; + use crate::{Runtime, RuntimeConfig, RuntimeResult}; #[test] - fn build_default_runtime() -> Result<()> { + fn build_default_runtime() -> RuntimeResult<()> { + let endpoints = Vec::new().into_iter(); let config = RuntimeConfig::new(); - let _runtime = Runtime::new(config); + let _runtime = Runtime::new(endpoints, config); Ok(()) } } diff --git a/crates/client/config/pool_config.rs b/crates/client/config/pool_config.rs index 7cbc6c8..6d9df0a 100644 --- a/crates/client/config/pool_config.rs +++ b/crates/client/config/pool_config.rs @@ -57,10 +57,10 @@ impl RuntimeConfig { #[cfg(test)] mod test { - use crate::{Result, RuntimeConfig}; + use crate::{RuntimeConfig, RuntimeResult}; #[test] - fn build_default_settings() -> Result<()> { + fn build_default_settings() -> RuntimeResult<()> { let _config = RuntimeConfig::new(); Ok(()) } diff --git a/crates/client/lib.rs b/crates/client/lib.rs index d21e8af..bd6b5b4 100644 --- a/crates/client/lib.rs +++ b/crates/client/lib.rs @@ -5,15 +5,16 @@ //! ### Examples //! //! ```rust -//! use axiston_rt_client::{Runtime, Result, RuntimeEndpoint}; +//! use axiston_rt_client::{Runtime, RuntimeResult, RuntimeConfig}; //! //! //! #[tokio::main] -//! async fn main() -> Result<()> { +//! async fn main() -> RuntimeResult<()> { //! let addr = "https://example.com/"; //! let endpoint = RuntimeEndpoint::from_bytes(addr.into())?; //! -//! let runtime = Runtime::default(); +//! let config = RuntimeConfig::new(); +//! let runtime = Runtime::new(config); //! runtime.register_endpoint(endpoint).await?; //! let _conn = runtime.get_connection().await?; //! @@ -21,53 +22,58 @@ //! } //! ``` +use deadpool::managed::PoolError; + +pub use crate::config::{Runtime, RuntimeConfig}; +pub use crate::manager::RecyclingMethod; +use crate::manager::RuntimePoolError; + mod config; mod manager; mod middleware; -use deadpool::managed::PoolError; -use derive_more::From; - -pub use crate::config::{Runtime, RuntimeConfig, RuntimeObject}; -pub use crate::manager::RuntimeEndpoint; -use crate::manager::RuntimeError; - /// Unrecoverable failure of the [`Runtime`]. /// /// Includes all error types that may occur. #[non_exhaustive] -#[derive(Debug, From, thiserror::Error)] +#[derive(Debug, thiserror::Error)] #[must_use = "errors do nothing unless you use them"] -pub enum Error { - /// Timeout happened. +pub enum RuntimeError { + /// [`deadpool::managed::PoolError::Timeout`]. #[error("timeout happened")] Timout(deadpool::managed::TimeoutType), - /// Runtime: All endpoints have reached the limit. - #[error("runtime: all endpoints have reached the limit")] - EndpointsLimit, - /// Runtime: Connection pool has no endpoints. #[error("runtime: connection pool has no endpoints")] NoEndpoints, - /// Runtime: Transport failure (from the client or server). + ///[`tonic::transport::Error`]. #[error("runtime: transport failure: {0}")] - Transport(tonic::transport::Error), + Transport(#[from] tonic::transport::Error), + + /// GRPC server failure. + #[error("transport failure: {0}")] + Status(#[from] tonic::Status), + + // TODO: Do i even need EndpointsLimit + /// Runtime: All endpoints have reached the limit. + #[error("runtime: all endpoints have reached the limit")] + EndpointsLimit, } -impl From for Error { - fn from(runtime_connection_error: RuntimeError) -> Self { +impl From for RuntimeError { + fn from(runtime_connection_error: RuntimePoolError) -> Self { match runtime_connection_error { - RuntimeError::Transport(transport_failure) => Self::Transport(transport_failure), - RuntimeError::EndpointsLimit => Self::EndpointsLimit, - RuntimeError::NoEndpoints => Self::NoEndpoints, + RuntimePoolError::Transport(transport_failure) => Self::Transport(transport_failure), + RuntimePoolError::Status(server_status) => Self::Status(server_status), + RuntimePoolError::EndpointsLimit => Self::EndpointsLimit, + RuntimePoolError::NoEndpoints => Self::NoEndpoints, } } } -impl From> for Error { - fn from(value: PoolError) -> Self { +impl From> for RuntimeError { + fn from(value: PoolError) -> Self { match value { PoolError::Timeout(timeout_type) => Self::Timout(timeout_type), PoolError::Backend(backend_error) => backend_error.into(), @@ -78,7 +84,5 @@ impl From> for Error { } } -/// Specialized [`Result`] alias for the [`Error`] type. -/// -/// [`Result`]: std::result::Result -pub type Result = std::result::Result; +/// Specialized [`Result`] alias for the [`RuntimeError`] type. +pub type RuntimeResult = Result; diff --git a/crates/client/manager/client.rs b/crates/client/manager/client.rs new file mode 100644 index 0000000..58e9d4a --- /dev/null +++ b/crates/client/manager/client.rs @@ -0,0 +1,65 @@ +use std::fmt; + +use axiston_rt_schema::instance::instance_client::InstanceClient; +use axiston_rt_schema::registry::registry_client::RegistryClient; +use tonic::transport::Endpoint; +use uuid::Uuid; + +use crate::manager::RuntimePoolResult; +use crate::middleware::RuntimeChannel; + +/// Represents a client for interacting with runtime services. +/// +/// The `RuntimeClient` is responsible for managing communication with instance +/// and registry services, identified by a unique endpoint ID. It wraps generated +/// gRPC clients for both instance and registry operations, providing a cohesive +/// interface for runtime service interactions. +#[derive(Clone)] +pub struct RuntimeClient { + pub(crate) endpoint_id: Uuid, + pub(crate) instance_client: InstanceClient, + pub(crate) registry_client: RegistryClient, +} + +impl RuntimeClient { + /// Returns a new [`RuntimeClient`]. + #[inline] + pub fn new(id: Uuid, channel: RuntimeChannel) -> Self { + Self { + endpoint_id: id, + instance_client: InstanceClient::new(channel.clone()), + registry_client: RegistryClient::new(channel), + } + } + + /// Returns a new [`RuntimeClient`]. + pub async fn connect(id: Uuid, endpoint: Endpoint) -> RuntimePoolResult { + let channel = endpoint.connect().await?; + let channel = RuntimeChannel::new(channel); + Ok(Self::new(id, channel)) + } + + /// Returns the reference to the underlying unique endpoint identifier. + #[inline] + pub(crate) fn as_endpoint_id(&mut self) -> &Uuid { + &mut self.endpoint_id + } + + /// Returns the reference to the underlying (generated) instance client. + #[inline] + pub(crate) fn as_instance_client(&mut self) -> &mut InstanceClient { + &mut self.instance_client + } + + /// Returns the reference to the underlying (generated) registry client. + #[inline] + pub(crate) fn as_registry_client(&self) -> &RegistryClient { + &self.registry_client + } +} + +impl fmt::Debug for RuntimeClient { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RuntimeClient").finish_non_exhaustive() + } +} diff --git a/crates/client/manager/manager_config.rs b/crates/client/manager/config.rs similarity index 88% rename from crates/client/manager/manager_config.rs rename to crates/client/manager/config.rs index 3ae2863..fe4f342 100644 --- a/crates/client/manager/manager_config.rs +++ b/crates/client/manager/config.rs @@ -18,7 +18,7 @@ impl RuntimeManagerConfig { } /// Overrides the value of [`RuntimeManagerConfig`]`::recycling_method`. - pub fn with_recycling_method(mut self, recycling_method: RecyclingMethod) -> Self { + pub fn recycling_method(mut self, recycling_method: RecyclingMethod) -> Self { self.recycling_method = recycling_method; self } @@ -27,10 +27,12 @@ impl RuntimeManagerConfig { /// Possible methods of how a connection is recycled. #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub enum RecyclingMethod { - /// Only check for open event bus when recycling existing connections + /// Only check for open event bus when recycling existing connections. + /// /// Unless you have special needs this is a safe choice. #[default] Fast, + /// In addition to checking for open event bus a test query is executed. /// /// This is slower, but guarantees that the database connection is ready to be used. @@ -48,10 +50,10 @@ impl RecyclingMethod { #[cfg(test)] mod test { use crate::manager::RuntimeManagerConfig; - use crate::Result; + use crate::RuntimeResult; #[test] - fn build_default_settings() -> Result<()> { + fn build_default_settings() -> RuntimeResult<()> { let _ = RuntimeManagerConfig::new(); Ok(()) } diff --git a/crates/client/manager/instance_client.rs b/crates/client/manager/instance_client.rs deleted file mode 100644 index 1ac60cf..0000000 --- a/crates/client/manager/instance_client.rs +++ /dev/null @@ -1,89 +0,0 @@ -use std::fmt; - -use axiston_rt_schema::instance::instance_client::InstanceClient; -use axiston_rt_schema::registry::registry_client::RegistryClient; -use derive_more::From; -use tonic::transport::{Channel, Endpoint}; -use uuid::Uuid; -use crate::middleware::RtChannel; - -/// Represents a client for interacting with runtime services. -/// -/// The `RuntimeClient` is responsible for managing communication with instance -/// and registry services, identified by a unique endpoint ID. It wraps generated -/// gRPC clients for both instance and registry operations, providing a cohesive -/// interface for runtime service interactions. -pub struct RuntimeClient { - pub(crate) endpoint_id: Uuid, - pub(crate) instance_client: InstanceClient, - pub(crate) registry_client: RegistryClient, -} - -impl RuntimeClient { - /// Returns a new [`RuntimeClient`]. - #[inline] - pub fn new(id: Uuid, channel: Channel) -> Self { - let rt_channel = RtChannel::new(channel); - - Self { - endpoint_id: id, - instance_client: InstanceClient::new(rt_channel.clone()), - registry_client: RegistryClient::new(rt_channel), - } - } - - /// Returns a new [`RuntimeClient`]. - pub async fn connect(id: Uuid, endpoint: Endpoint) -> RuntimeResult { - let channel = endpoint.connect().await?; - Ok(Self::new(id, channel)) - } - - /// Returns the reference to the underlying unique endpoint identifier. - #[inline] - pub(crate) fn as_endpoint_id(&self) -> &Uuid { - &self.endpoint_id - } - - /// Returns the reference to the underlying (generated) instance client. - #[inline] - pub(crate) fn as_instance_client(&self) -> &InstanceClient { - &self.instance_client - } - - /// Returns the reference to the underlying (generated) instance client. - #[inline] - pub(crate) fn as_registry_client(&self) -> &RegistryClient { - &self.registry_client - } -} - -impl fmt::Debug for RuntimeClient { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RuntimeClient").finish_non_exhaustive() - } -} - -/// Unrecoverable failure of the [`RuntimeClient`]. -/// -/// Includes all error types that may occur. -/// Used to remap from [`PoolError`]. -/// -/// [`PoolError`]: deadpool::managed::PoolError -#[derive(Debug, From, thiserror::Error)] -#[must_use = "errors do nothing unless you use them"] -pub enum RuntimeError { - /// All endpoints have reached the limit. - #[error("all endpoints have reached the limit")] - EndpointsLimit, - - /// Connection pool has no endpoints. - #[error("connection pool has no endpoints")] - NoEndpoints, - - /// Transport failure (from the client or server). - #[error("transport failure: {0}")] - Transport(tonic::transport::Error), -} - -/// Specialized [`Result`] alias for the [`RuntimeError`] type. -pub type RuntimeResult = Result; diff --git a/crates/client/manager/mod.rs b/crates/client/manager/mod.rs index 31890af..2f3c278 100644 --- a/crates/client/manager/mod.rs +++ b/crates/client/manager/mod.rs @@ -1,124 +1,39 @@ //! [`Manager`] of [`RuntimeClient`]s. -//! -mod instance_client; -mod manager_config; -mod runtime_endpoint; +mod client; +mod config; -use std::collections::HashMap; use std::fmt; -use deadpool::managed::{Manager, Metrics, RecycleResult}; -use tokio::sync::Mutex; -use tonic::transport::Channel; -use uuid::Uuid; +use axiston_rt_schema::instance::GetStatusRequest; +use deadpool::managed::{Manager, Metrics, RecycleError, RecycleResult}; +use time::OffsetDateTime; +use tonic::transport::{Channel, Endpoint}; +use uuid::{NoContext, Timestamp, Uuid}; -pub use crate::manager::instance_client::{RuntimeClient, RuntimeError, RuntimeResult}; -pub use crate::manager::manager_config::{RecyclingMethod, RuntimeManagerConfig}; -pub use crate::manager::runtime_endpoint::RuntimeEndpoint; +pub use crate::manager::client::RuntimeClient; +pub use crate::manager::config::{RecyclingMethod, RuntimeManagerConfig}; +use crate::middleware::RuntimeChannel; /// [`Manager`] of [`RuntimeClient`]s. pub struct RuntimeManager { - inner: Mutex, -} - -struct RuntimeManagerInner { - config: RuntimeManagerConfig, - endpoints: HashMap)>, + recycling_method: RecyclingMethod, + runtime_channel: RuntimeChannel, } impl RuntimeManager { /// Returns a new [`RuntimeManager`]. - #[inline] - pub fn new(config: RuntimeManagerConfig) -> Self { - let inner = Mutex::new(RuntimeManagerInner { - endpoints: HashMap::new(), - config, - }); - - Self { inner } - } - - /// Adds the runtime endpoint into the pool. - pub(crate) async fn register_endpoint(&self, endpoint: RuntimeEndpoint) -> RuntimeResult<()> { - let mut manager = self.inner.lock().await; - - // Ensures the UUIDv4 is not duplicated. - let mut endpoint_id = Uuid::new_v4(); - while manager.endpoints.contains_key(&endpoint_id) { - endpoint_id = Uuid::new_v4(); - } - - manager.endpoints.insert(endpoint_id, (endpoint, None)); - Ok(()) - } - - /// Removes the runtime endpoint from the pool. - pub(crate) async fn unregister_endpoint(&self, endpoint_id: &Uuid) -> RuntimeResult<()> { - let mut manager = self.inner.lock().await; - // TODO: Don't remove it, but use a *disable* flag. - let _ = manager.endpoints.remove(endpoint_id); - Ok(()) - } - - /// - Returns the least used channel. - /// - Increases the counter of current connections by 1. - async fn next_channel(&self) -> RuntimeResult<(Uuid, Channel)> { - let mut manager = self.inner.lock().await; - if manager.endpoints.is_empty() { - return Err(RuntimeError::NoEndpoints); - } - - // Returns the endpoint with the least of connections out of the pool - // of endpoints with no limits or if their limit was not reached yet. - let endpoint = manager - .endpoints - .iter_mut() - .filter(|(_, (r, _))| r.limit.is_none() || r.limit.is_some_and(|x| x < r.current)) - .min_by(|(_, (l, _)), (_, (r, _))| l.current.cmp(&r.current)); - - let Some((id, (runtime_endpoint, runtime_channel))) = endpoint else { - return Err(RuntimeError::EndpointsLimit); - }; - - let runtime_channel = if let Some(runtime_channel) = runtime_channel { - runtime_channel.clone() - } else { - let channel = runtime_endpoint.endpoint.connect().await?; - *runtime_channel = Some(channel.clone()); - channel - }; - - runtime_endpoint.current += 1; - Ok((*id, runtime_channel)) - } - - /// Reduces the counter of current connections by 1. - async fn drop_channel(&self, endpoint_id: &Uuid) { - let mut manager = self.inner.lock().await; - if let Some((endpoint, _)) = manager.endpoints.get_mut(endpoint_id) { - endpoint.current -= 1; + pub fn new(endpoints: impl Iterator, config: RuntimeManagerConfig) -> Self { + // TODO: Use Channel::balance_channel instead. + // TODO: Add methods to add/delete Endpoints. + let channel = Channel::balance_list(endpoints); + let channel = RuntimeChannel::new(channel); + + Self { + recycling_method: config.recycling_method, + runtime_channel: channel, } } - - /// Recycles a dropped instance of runtime client. - async fn test_connection(&self, _runtime_client: &mut RuntimeClient) -> RuntimeResult<()> { - let manager = self.inner.lock().await; - - // TODO: Recycle dropped connections. - match manager.config.recycling_method { - RecyclingMethod::Fast => {} - RecyclingMethod::Verified => {} - } - - Ok(()) - } -} - -impl Default for RuntimeManager { - fn default() -> Self { - Self::new(RuntimeManagerConfig::default()) - } } impl fmt::Debug for RuntimeManager { @@ -129,11 +44,19 @@ impl fmt::Debug for RuntimeManager { impl Manager for RuntimeManager { type Type = RuntimeClient; - type Error = RuntimeError; + type Error = RuntimePoolError; async fn create(&self) -> Result { - let (id, channel) = self.next_channel().await?; - Ok(RuntimeClient::new(id, channel)) + let utc_datetime = OffsetDateTime::now_utc(); + let uuid_timestamp = Timestamp::from_unix( + NoContext, + utc_datetime.unix_timestamp() as u64, + utc_datetime.nanosecond(), + ); + + let channel = self.runtime_channel.clone(); + let client_id = Uuid::new_v7(uuid_timestamp); + Ok(RuntimeClient::new(client_id, channel)) } async fn recycle( @@ -141,8 +64,52 @@ impl Manager for RuntimeManager { conn: &mut Self::Type, _metrics: &Metrics, ) -> RecycleResult { - self.drop_channel(&conn.endpoint_id).await; - self.test_connection(conn).await?; + match self.recycling_method { + RecyclingMethod::Fast => return Ok(()), + RecyclingMethod::Verified => {} + } + + // TODO: Delete if it fails to verify. + let conn = conn.as_instance_client(); + let request = conn.get_status(GetStatusRequest { + verbose_metrics: Some(false), + force_latest: Some(false), + sliding_window: Some(0), + }); + + let _response = request + .await + .map_err(|status| RecycleError::Backend(status.into()))?; + Ok(()) } } + +/// Unrecoverable failure of the [`RuntimeClient`]. +/// +/// Includes all error types that may occur. +/// Used to remap from [`PoolError`]. +/// +/// [`PoolError`]: deadpool::managed::PoolError +#[derive(Debug, thiserror::Error)] +#[must_use = "errors do nothing unless you use them"] +pub enum RuntimePoolError { + /// All endpoints have reached the limit. + #[error("all endpoints have reached the limit")] + EndpointsLimit, + + /// Connection pool has no endpoints. + #[error("connection pool has no endpoints")] + NoEndpoints, + + /// Transport failure (from the client or server). + #[error("transport failure: {0}")] + Transport(#[from] tonic::transport::Error), + + /// GRPC server failure. + #[error("transport failure: {0}")] + Status(#[from] tonic::Status), +} + +/// Specialized [`Result`] alias for the [`RuntimePoolError`] type. +pub type RuntimePoolResult = Result; diff --git a/crates/client/manager/runtime_endpoint.rs b/crates/client/manager/runtime_endpoint.rs deleted file mode 100644 index 9683769..0000000 --- a/crates/client/manager/runtime_endpoint.rs +++ /dev/null @@ -1,110 +0,0 @@ -use std::sync::LazyLock; - -use tonic::codegen::Bytes; -use tonic::transport::{Endpoint, Uri}; - -use crate::{Error, Result}; - -/// Builds and configures `HTTP/2` channels. -/// -/// Includes configuration for the manager. -#[derive(Debug, Clone)] -pub struct RuntimeEndpoint { - // TODO: Allow multiple endpoints. - // TODO: Use [`Channel::balance_list`]. - pub(crate) endpoint: Endpoint, - pub(crate) limit: Option, - pub(crate) current: u32, -} - -impl RuntimeEndpoint { - /// Returns a new [`RuntimeEndpoint`]. - pub fn new(endpoint: Endpoint) -> Self { - Self { - endpoint, - limit: None, - current: 0, - } - } - - /// Returns a new [`RuntimeEndpoint`]. - pub fn from_bytes(endpoint: Bytes) -> Result { - let endpoint = Endpoint::from_shared(endpoint)?; - let endpoint = endpoint.user_agent(USER_AGENT.as_str())?; - Ok(Self::new(endpoint)) - } - - /// Overrides the value of [`RuntimeEndpoint`]`::connection_limit`. - #[inline] - pub fn connection_limit(mut self, limit: Option) -> Self { - self.limit = limit; - self - } - - /// Get the endpoint uri. - #[inline] - pub fn uri(&self) -> &Uri { - self.endpoint.uri() - } -} - -impl From for RuntimeEndpoint { - #[inline] - fn from(value: Endpoint) -> Self { - Self { - endpoint: value, - limit: None, - current: 0, - } - } -} - -impl TryFrom<&str> for RuntimeEndpoint { - type Error = Error; - - #[inline] - fn try_from(value: &str) -> Result { - Self::from_bytes(Bytes::copy_from_slice(value.as_bytes())) - } -} - -impl TryFrom for RuntimeEndpoint { - type Error = Error; - - #[inline] - fn try_from(value: Bytes) -> Result { - Self::from_bytes(value) - } -} - -// TODO: Replace with `static USER_AGENT: String`. -static USER_AGENT: LazyLock String> = LazyLock::new(format_user_agent); -fn format_user_agent() -> String { - format!( - "Axiston/{} (Rust; Ver {})", - env!("CARGO_PKG_VERSION"), - env!("CARGO_PKG_RUST_VERSION") - ) -} - -#[cfg(test)] -mod test { - use tonic::transport::Endpoint; - - use crate::manager::RuntimeEndpoint; - use crate::Result; - - #[test] - fn endpoint_from_bytes() -> Result<()> { - let addr = "https://example.com/".into(); - let _endpoint = RuntimeEndpoint::from_bytes(addr)?; - Ok(()) - } - - #[test] - fn endpoint_from_inner() -> Result<()> { - let endpoint = Endpoint::from_static("https://example.com/"); - let _endpoint = RuntimeEndpoint::new(endpoint); - Ok(()) - } -} diff --git a/crates/client/middleware/future.rs b/crates/client/middleware/future.rs index 78c670a..bee282b 100644 --- a/crates/client/middleware/future.rs +++ b/crates/client/middleware/future.rs @@ -1,7 +1,8 @@ //! [`Future`]s for the runtime [`Channel`]. //! -//! [`Channel`]: crate::middleware::RtChannel +//! [`Channel`]: crate::middleware::RuntimeChannel +use std::fmt; use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; @@ -13,23 +14,30 @@ use tonic::transport::Error; /// Response [`Future`] for the runtime [`Channel`]. /// -/// [`Channel`]: crate::middleware::RtChannel -pub struct RtResponseFuture { +/// [`Channel`]: crate::middleware::RuntimeChannel +pub struct RuntimeResponseFuture { inner: ResponseFuture, } -impl RtResponseFuture { - /// Returns a new [`RtResponseFuture`]. +impl RuntimeResponseFuture { + /// Returns a new [`RuntimeResponseFuture`]. #[inline] pub fn new(inner: ResponseFuture) -> Self { Self { inner } } } -impl Future for RtResponseFuture { +impl Future for RuntimeResponseFuture { type Output = Result, Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { Pin::new(&mut self.inner).poll(cx) } } + +impl fmt::Debug for RuntimeResponseFuture { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RuntimeResponseFuture") + .finish_non_exhaustive() + } +} diff --git a/crates/client/middleware/mod.rs b/crates/client/middleware/mod.rs index 78a0ccf..1017ce4 100644 --- a/crates/client/middleware/mod.rs +++ b/crates/client/middleware/mod.rs @@ -1,41 +1,52 @@ -//! TODO. +//! [`Channel`] with [`tower`] middlewares. pub mod future; +use std::fmt; use std::task::{Context, Poll}; +use derive_more::{Deref, DerefMut}; +use http::{Request, Response}; use tonic::body::BoxBody; use tonic::transport::{Channel, Error}; -use http::{Request, Response}; use tower::Service; -use crate::middleware::future::RtResponseFuture; +use crate::middleware::future::RuntimeResponseFuture; -/// TODO. -#[derive(Debug, Clone)] -pub struct RtChannel { +/// [`Channel`] with [`tower`] middlewares. +#[derive(Clone, Deref, DerefMut)] +pub struct RuntimeChannel { inner: Channel, } -impl RtChannel { - /// Returns a new [`RtChannel`]. - #[inline] +impl RuntimeChannel { + /// Returns a new [`RuntimeChannel`]. + #[inline] pub fn new(inner: Channel) -> Self { + // TODO: Apply middlewares. Self { inner } } } -impl Service> for RtChannel { +impl Service> for RuntimeChannel { type Response = Response; type Error = Error; - type Future = RtResponseFuture; + type Future = RuntimeResponseFuture; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - Service::poll_ready(&mut self.inner, cx) + Service::poll_ready(&mut self.inner, cx) } fn call(&mut self, req: Request) -> Self::Future { let fut = Service::call(&mut self.inner, req); - RtResponseFuture::new(fut) + RuntimeResponseFuture::new(fut) + } +} + +impl fmt::Debug for RuntimeChannel { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RuntimeChannel") + .field("channel", &self.inner) + .finish_non_exhaustive() } } diff --git a/crates/jsvm/Cargo.toml b/crates/jsvm/Cargo.toml index af162ec..a2e359d 100644 --- a/crates/jsvm/Cargo.toml +++ b/crates/jsvm/Cargo.toml @@ -21,38 +21,3 @@ rustdoc-args = ["--cfg", "docsrs"] path = "lib.rs" [dependencies] -# Make sure tokio, async_trait and deno_* version are the same. -# https://github.com/denoland/deno/blob/main/Cargo.toml - -axiston-rt-task = { workspace = true } -tokio = { version = "1.36", features = [] } -tracing = { version = "0.1", features = [] } -async-trait = { version = "0.1", features = [] } -ctor = { version = "0.2", features = [] } - -thiserror = { workspace = true } -serde = { workspace = true } -rand = { version = "0.8", features = [] } - -deno_core = { version = "0.324.0", features = [] } -deno_ast = { version = "0.44.0", features = ["transpiling"] } -deno_permissions = { version = "0.42.0", features = [] } -deno_cache_dir = { version = "0.14.0", features = [] } - -deno_console = { version = "0.182.0", features = [] } -deno_crypto = { version = "0.196.0", features = [] } -deno_webidl = { version = "0.182.0", features = [] } -deno_url = { version = "0.182.0", features = [] } - -deno_fs = { version = "0.92.0", features = [] } -deno_io = { version = "0.92.0", features = [] } -deno_fetch = { version = "0.206.0", features = [] } -deno_net = { version = "0.174.0", features = [] } -deno_web = { version = "0.213.0", features = [] } - -deno_http = { version = "0.180.0", features = [] } -deno_tls = { version = "0.169.0", features = [] } -deno_websocket = { version = "0.187.0", features = [] } -deno_webstorage = { version = "0.177.0", features = [] } -deno_canvas = { version = "0.51.0", features = [] } -deno_webgpu = { version = "0.149.0", features = [] } diff --git a/crates/jsvm/README.md b/crates/jsvm/README.md index 6c867e7..28d0e46 100644 --- a/crates/jsvm/README.md +++ b/crates/jsvm/README.md @@ -13,9 +13,6 @@ Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. -Implementation is based on supabase/edge-runtime and deno/deno crates. Mention -rustyscript. - #### Notes - Lorem Ipsum. diff --git a/crates/jsvm/extension/custom_serde.rs b/crates/jsvm/extension/custom_serde.rs deleted file mode 100644 index fd8bdc8..0000000 --- a/crates/jsvm/extension/custom_serde.rs +++ /dev/null @@ -1,62 +0,0 @@ -pub mod serde_option_duration_ms { - //! Serializing and deserializing `Option` as milliseconds. - //! - //! # Example: - //! - //! - `{"duration": 1500}` for `Some(Duration::from_millis(1500))` - //! - `{"duration": null}` for `None` - - use std::time::Duration; - - use serde::{Deserialize, Deserializer, Serializer}; - - /// Serializes an `Option` into milliseconds. - pub fn serialize(option: &Option, serializer: S) -> Result - where - S: Serializer, - { - match option { - Some(duration) => serializer.serialize_some(&(duration.as_millis() as u64)), - None => serializer.serialize_none(), - } - } - - /// Deserializes an `Option` from milliseconds. - pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> - where - D: Deserializer<'de>, - { - let millis: Option = Option::deserialize(deserializer)?; - Ok(millis.map(Duration::from_millis)) - } -} - -pub mod serde_duration_ms { - //! Serializing and deserializing `Duration` as milliseconds. - //! - //! # Example: - //! - //! - `{"duration": 1500}` for `Duration::from_millis(1500)` - - use std::time::Duration; - - use serde::{Deserialize, Deserializer, Serialize, Serializer}; - - /// Serializes a `Duration` into milliseconds. - pub fn serialize(duration: &Duration, serializer: S) -> Result - where - S: Serializer, - { - let millis = duration.as_millis() as u64; - millis.serialize(serializer) - } - - /// Deserializes a `Duration` from milliseconds. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let millis = u64::deserialize(deserializer)?; - Ok(Duration::from_millis(millis)) - } -} diff --git a/crates/jsvm/extension/mod.rs b/crates/jsvm/extension/mod.rs index 9633c9c..8b13789 100644 --- a/crates/jsvm/extension/mod.rs +++ b/crates/jsvm/extension/mod.rs @@ -1,8 +1 @@ -//! Runtime `deno_core::`[`extension`]s. -//! -pub use crate::extension::route::axis_routing; -pub use crate::extension::trace::axis_tracing; - -mod route; -mod trace; diff --git a/crates/jsvm/extension/route/mod.rs b/crates/jsvm/extension/route/mod.rs deleted file mode 100644 index bf6bbc8..0000000 --- a/crates/jsvm/extension/route/mod.rs +++ /dev/null @@ -1,30 +0,0 @@ -//! Runtime [`extension`] for the [`routing`] events. -//! -//! [`routing`]: axiston_runtime_task::registry::Registry - -mod datatype; -mod internal; -mod ops; - -use deno_core::extension; - -use crate::extension::route::ops::{op_register_action, op_register_service, op_register_trigger}; - -extension!( - axis_routing, - ops = [op_register_service, op_register_trigger, op_register_action], - esm_entry_point = "ext:extension/route/ops.js", - esm = [ dir "extension/route", "ops.js" ], -); - -/// Unrecoverable failure during routing ops. -/// -/// Includes all error types that may occur. -#[derive(Debug, thiserror::Error)] -#[must_use = "errors do nothing unless you use them"] -pub enum Error {} - -/// Specialized [`Result`] alias for [`Error`]. -/// -/// [`Result`]: std::result::Result -pub type Result = std::result::Result; diff --git a/crates/jsvm/extension/route/ops.rs b/crates/jsvm/extension/route/ops.rs deleted file mode 100644 index 225b3d9..0000000 --- a/crates/jsvm/extension/route/ops.rs +++ /dev/null @@ -1,18 +0,0 @@ -use deno_core::op2; - -use crate::extension::route::Result; - -#[op2(fast)] -pub fn op_register_service() -> Result<()> { - Ok(()) -} - -#[op2(fast)] -pub fn op_register_trigger() -> Result<()> { - Ok(()) -} - -#[op2(fast)] -pub fn op_register_action() -> Result<()> { - Ok(()) -} diff --git a/crates/jsvm/extension/trace/datatype.rs b/crates/jsvm/extension/trace/datatype.rs deleted file mode 100644 index 8271ce5..0000000 --- a/crates/jsvm/extension/trace/datatype.rs +++ /dev/null @@ -1,24 +0,0 @@ -use serde::Deserialize; - -/// Deserializable options for a [`tracing_*`] op. -/// -/// [`tracing_*`]: crate::ext_tracing -#[derive(Debug, Default, Deserialize)] -#[must_use = "datatypes do nothing unless you deserialize them"] -pub struct TracingOptions { - pub target: Option, -} - -impl TracingOptions {} - -#[cfg(test)] -mod test { - use crate::extension::trace::datatype::TracingOptions; - use crate::extension::trace::Result; - - #[test] - fn instance() -> Result<()> { - let _ = TracingOptions::default(); - Ok(()) - } -} diff --git a/crates/jsvm/extension/trace/internal.rs b/crates/jsvm/extension/trace/internal.rs deleted file mode 100644 index ef9c6fc..0000000 --- a/crates/jsvm/extension/trace/internal.rs +++ /dev/null @@ -1,24 +0,0 @@ -use tracing::{debug, error, info, trace, warn, Level}; - -use crate::extension::trace::datatype::TracingOptions; -use crate::extension::trace::Result; - -/// TODO. -pub fn emit_op_tracing_event( - message: &str, - level: Level, - options: Option, -) -> Result<()> { - let options = options.unwrap_or_default(); - let target = options.target.unwrap_or_default(); - - match level { - Level::TRACE => trace!(message), - Level::DEBUG => debug!(message), - Level::INFO => info!(message), - Level::WARN => warn!(message), - Level::ERROR => error!(message), - }; - - Ok(()) -} diff --git a/crates/jsvm/extension/trace/mod.rs b/crates/jsvm/extension/trace/mod.rs deleted file mode 100644 index 151a5a6..0000000 --- a/crates/jsvm/extension/trace/mod.rs +++ /dev/null @@ -1,39 +0,0 @@ -//! Runtime [`extension`] for the [`tracing`] events. -//! - -use deno_core::extension; - -use crate::extension::trace::ops::{ - op_tracing_debug, op_tracing_debug_fast, op_tracing_error, op_tracing_error_fast, - op_tracing_info, op_tracing_info_fast, op_tracing_trace, op_tracing_trace_fast, - op_tracing_warn, op_tracing_warn_fast, -}; - -mod datatype; -mod internal; -mod ops; - -extension!( - axis_tracing, - ops = [ - op_tracing_trace_fast, op_tracing_trace, op_tracing_debug_fast, op_tracing_debug, - op_tracing_info_fast, op_tracing_info, op_tracing_warn_fast, op_tracing_warn, - op_tracing_error_fast, op_tracing_error], - esm_entry_point = "ext:extension/trace/ops.js", - esm = [ dir "extension/trace", "ops.js" ], -); - -/// Unrecoverable failure during tracing ops. -/// -/// Includes all error types that may occur. -#[derive(Debug, thiserror::Error)] -#[must_use = "errors do nothing unless you use them"] -pub enum Error { - #[error("tracing target reused")] - ReuseTarget, -} - -/// Specialized [`Result`] alias for [`Error`]. -/// -/// [`Result`]: std::result::Result -pub type Result = std::result::Result; diff --git a/crates/jsvm/extension/trace/ops.js b/crates/jsvm/extension/trace/ops.js deleted file mode 100644 index a897080..0000000 --- a/crates/jsvm/extension/trace/ops.js +++ /dev/null @@ -1,17 +0,0 @@ -globalThis.tracing = { - "trace": (args) => { - return Deno.core.ops.op_tracing_trace(args); - }, - "debug": (args) => { - return Deno.core.ops.op_tracing_debug(args); - }, - "info": (args) => { - return Deno.core.ops.op_tracing_info(args); - }, - "warn": (args) => { - return Deno.core.ops.op_tracing_warn(args); - }, - "error": (args) => { - return Deno.core.ops.op_tracing_error(args); - }, -}; diff --git a/crates/jsvm/extension/trace/ops.rs b/crates/jsvm/extension/trace/ops.rs deleted file mode 100644 index 91d70db..0000000 --- a/crates/jsvm/extension/trace/ops.rs +++ /dev/null @@ -1,71 +0,0 @@ -use deno_core::op2; -use tracing::Level; - -use crate::extension::trace::datatype::TracingOptions; -use crate::extension::trace::internal::emit_op_tracing_event; -use crate::extension::trace::Result; - -#[op2(fast)] -pub fn op_tracing_trace_fast(#[string] message: &str) -> Result<()> { - emit_op_tracing_event(message, Level::TRACE, None) -} - -#[op2(fast(op_tracing_trace_fast))] -pub fn op_tracing_trace( - #[string] message: &str, - #[serde] options: Option, -) -> Result<()> { - emit_op_tracing_event(message, Level::TRACE, options) -} - -#[op2(fast)] -pub fn op_tracing_debug_fast(#[string] message: &str) -> Result<()> { - emit_op_tracing_event(message, Level::DEBUG, None) -} - -#[op2(fast(op_tracing_debug_fast))] -pub fn op_tracing_debug( - #[string] message: &str, - #[serde] options: Option, -) -> Result<()> { - emit_op_tracing_event(message, Level::DEBUG, options) -} - -#[op2(fast)] -pub fn op_tracing_info_fast(#[string] message: &str) -> Result<()> { - emit_op_tracing_event(message, Level::INFO, None) -} - -#[op2(fast(op_tracing_info_fast))] -pub fn op_tracing_info( - #[string] message: &str, - #[serde] options: Option, -) -> Result<()> { - emit_op_tracing_event(message, Level::INFO, options) -} - -#[op2(fast)] -pub fn op_tracing_warn_fast(#[string] message: &str) -> Result<()> { - emit_op_tracing_event(message, Level::WARN, None) -} - -#[op2(fast(op_tracing_warn_fast))] -pub fn op_tracing_warn( - #[string] message: &str, - #[serde] options: Option, -) -> Result<()> { - emit_op_tracing_event(message, Level::WARN, options) -} - -#[op2(fast)] -pub fn op_tracing_error_fast(#[string] message: &str) -> Result<()> { - emit_op_tracing_event(message, Level::ERROR, None) -} - -#[op2(fast(op_tracing_error_fast))] -pub fn op_tracing_error( - #[string] message: &str, - #[serde] options: Option, -) -> Result<()> { - emit_op_tracing_event(message, Level::ERROR, options) -} diff --git a/crates/jsvm/lib.rs b/crates/jsvm/lib.rs index 07548ab..206a3a6 100644 --- a/crates/jsvm/lib.rs +++ b/crates/jsvm/lib.rs @@ -2,28 +2,6 @@ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("./README.md")] -//! ### Examples -//! -//! ```rust -//! fn main() {} -//! ``` - mod extension; mod runtime; - -/// Unrecoverable failure of the [`Jsvm`]. -/// -/// Includes all error types that may occur. -/// -/// [`Jsvm`]: runtime::Jsvm -#[derive(Debug, thiserror::Error)] -#[must_use = "errors do nothing unless you use them"] -pub enum Error { - // #[error("called task failure: {0}")] - // Task(#[from] context::TaskError), -} - -/// Specialized [`Result`] alias for the [`Error`] type. -/// -/// [`Result`]: std::result::Result -pub type Result = std::result::Result; +mod utility; diff --git a/crates/jsvm/runtime/cache/mod.rs b/crates/jsvm/runtime/cache/mod.rs deleted file mode 100644 index 8e7716c..0000000 --- a/crates/jsvm/runtime/cache/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! TODO. -//! diff --git a/crates/jsvm/runtime/cert_provider.rs b/crates/jsvm/runtime/cert_provider.rs deleted file mode 100644 index c152e40..0000000 --- a/crates/jsvm/runtime/cert_provider.rs +++ /dev/null @@ -1,23 +0,0 @@ -use deno_core::error::AnyError; -use deno_tls::rustls::RootCertStore; -use deno_tls::RootCertStoreProvider; - -#[derive(Debug, Clone)] -pub struct MyRootCertStoreProvider { - root_cert_store: RootCertStore, -} - -impl MyRootCertStoreProvider { - /// Returns a new [`MyRootCertStoreProvider`]. - #[inline] - pub fn new(root_cert_store: RootCertStore) -> Self { - Self { root_cert_store } - } -} - -impl RootCertStoreProvider for MyRootCertStoreProvider { - #[inline] - fn get_or_try_init(&self) -> Result<&RootCertStore, AnyError> { - Ok(&self.root_cert_store) - } -} diff --git a/crates/jsvm/runtime/deno_runtime.rs b/crates/jsvm/runtime/deno_runtime.rs deleted file mode 100644 index 33b8a03..0000000 --- a/crates/jsvm/runtime/deno_runtime.rs +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -// Copyright 2023-2024 the Supabase authors. All rights reserved. MIT license. - -use std::rc::Rc; -use std::sync::{Arc, LazyLock}; - -use deno_core::anyhow::{self, anyhow}; -use deno_core::{JsRuntime, ModuleCodeString, ModuleLoader, RuntimeOptions}; -use deno_http::DefaultHttpPropertyExtractor; -use deno_io::Stdio; -use deno_tls::rustls::RootCertStore; -use deno_tls::RootCertStoreProvider; - -use crate::runtime::cert_provider::MyRootCertStoreProvider; -use crate::runtime::filesystem::{CompileFs, FileBackedVfs, StaticFs}; -use crate::runtime::module_loader::MyModuleLoader; -use crate::runtime::{axis_permissions, MyPermission}; - -/// Header value of the `User-Agent` key. -static AXISTON_UA: LazyLock = LazyLock::new(|| { - format!( - "Deno/{} (Variant; Axiston/{})", - option_env!("CARGO_PKG_VERSION").unwrap_or("1.0"), - env!("CARGO_PKG_VERSION"), - ) -}); - -#[ctor::ctor] -fn init_v8_platform() { - // Initialize V8 flags. - let v8_flags = std::env::var("V8_FLAGS").unwrap_or_default(); - - if !v8_flags.is_empty() { - let flags: Vec<_> = v8_flags.split(' ').map(|x| x.to_owned()).collect(); - let flags = deno_core::v8_set_flags(flags).iter(); - for flag in flags.filter(|flag| flag.is_empty()) { - // TODO: Setup console tracing_subscriber. - tracing::error!(target: "v8:init", flag = flag, "flag unrecognized"); - } - } - - // NOTE(denoland/deno/20495): Due to new PKU feature introduced in V8 11.6 we need - // to initialize the V8 platform on a parent thread of all threads that will spawn - // V8 isolates. - JsRuntime::init_platform(None, false); -} - -pub struct DenoRuntime { - js_runtime: JsRuntime, -} - -impl DenoRuntime { - async fn new(maybe_seed: Option) -> Self { - let mut root_cert_store = RootCertStore::empty(); - let root_cert_store_provider: Arc = - Arc::new(MyRootCertStoreProvider::new(root_cert_store.clone())); - let mut stdio = Some(Stdio::default()); - - // let op_fs = { - // if is_user_worker { - // Arc::new(StaticFs::new( - // static_files, - // base_dir_path, - // vfs_path, - // vfs, - // npm_snapshot, - // )) as Arc - // } else { - // Arc::new(CompileFs::from_rc(vfs)) as Arc - // } - // }; - - let file_backed_vfs = FileBackedVfs::new(); - let op_fs = Arc::new(CompileFs::new(file_backed_vfs)) as Arc; - - let extensions = vec![ - axis_permissions::init_ops(true, None), - deno_webidl::deno_webidl::init_ops(), - deno_console::deno_console::init_ops(), - deno_url::deno_url::init_ops(), - deno_web::deno_web::init_ops::( - Arc::new(deno_web::BlobStore::default()), - None, - ), - deno_webgpu::deno_webgpu::init_ops(), - deno_canvas::deno_canvas::init_ops(), - deno_fetch::deno_fetch::init_ops::(deno_fetch::Options { - user_agent: AXISTON_UA.clone(), - root_cert_store_provider: Some(root_cert_store_provider.clone()), - ..Default::default() - }), - deno_websocket::deno_websocket::init_ops::( - AXISTON_UA.clone(), - Some(root_cert_store_provider.clone()), - None, - ), - deno_crypto::deno_crypto::init_ops(maybe_seed), - deno_net::deno_net::init_ops::(Some(root_cert_store_provider), None), - deno_tls::deno_tls::init_ops(), - deno_http::deno_http::init_ops::(), - deno_io::deno_io::init_ops(stdio), - deno_fs::deno_fs::init_ops::(op_fs), - // deno_node::init_ops::(Some(node_resolver), Some(npm_resolver), op_fs), - ]; - - let module_loader = Rc::new(MyModuleLoader::new()) as Rc; - - let runtime_options = RuntimeOptions { - extensions, - is_main: true, - module_loader: Some(module_loader), - ..RuntimeOptions::default() - }; - - let mut js_runtime = JsRuntime::new(runtime_options); - - let bootstrap_script = ""; - let bootstrap_module = ModuleCodeString::from(bootstrap_script); - - js_runtime - .execute_script(deno_core::located_script_name!(), bootstrap_module) - .expect("Failed to execute bootstrap script"); - - Self { js_runtime } - } - - pub async fn run(&mut self) -> anyhow::Result<()> { - todo!() - } - - pub async fn inspector(&self) { - todo!() - } - - /// Returns a new [`DenoRuntimeBuilder`]. - #[inline] - pub fn builder() -> DenoRuntimeBuilder { - DenoRuntimeBuilder::new() - } -} - -/// [`DenoRuntime`] builder. -#[must_use = "runtime does nothing unless you use itt"] -#[derive(Debug, Default)] -pub struct DenoRuntimeBuilder { - maybe_seed: Option, -} - -impl DenoRuntimeBuilder { - /// Returns a new [`DenoRuntimeBuilder`]. - #[inline] - pub fn new() -> Self { - Self::default() - } - - /// Adds or overrides the initial seed for the crypto extension. - pub fn with_crypto_seed(mut self, seed: u64) -> Self { - self.maybe_seed = Some(seed); - self - } - - pub fn build(self) -> DenoRuntime { - let extensions = vec![]; - - let options = RuntimeOptions { - extensions, - ..RuntimeOptions::default() - }; - - todo!() - } -} - -#[cfg(test)] -mod test {} diff --git a/crates/jsvm/runtime/filesystem/compile_fs.rs b/crates/jsvm/runtime/filesystem/compile_fs.rs deleted file mode 100644 index 2abd698..0000000 --- a/crates/jsvm/runtime/filesystem/compile_fs.rs +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -// Copyright 2023-2024 the Supabase authors. All rights reserved. MIT license. - -use std::path::{Path, PathBuf}; -use std::rc::Rc; -use std::sync::Arc; - -use deno_fs::{AccessCheckCb, FileSystem, FsDirEntry, FsFileType, OpenOptions, RealFs}; -use deno_io::fs::{File, FsError, FsResult, FsStat}; - -use crate::runtime::filesystem::FileBackedVfs; - -#[derive(Debug, Clone)] -pub struct CompileFs { - vfs: Arc, -} - -impl CompileFs { - /// Returns a new [`CompileFs`]. - #[inline] - pub fn new(vfs: FileBackedVfs) -> Self { - Self::from_rc(Arc::new(vfs)) - } - - /// Returns a new [`CompileFs`]. - #[inline] - pub fn from_rc(vfs: Arc) -> Self { - Self { vfs } - } - - #[inline] - pub fn file_backed_vfs(&self) -> Arc { - self.vfs.clone() - } - - fn error_if_in_vfs(&self, path: &Path) -> FsResult<()> { - if self.vfs.is_path_within(path) { - Err(FsError::NotSupported) - } else { - Ok(()) - } - } -} - -#[async_trait::async_trait(?Send)] -impl FileSystem for CompileFs { - fn cwd(&self) -> FsResult { - todo!() - } - - fn tmp_dir(&self) -> FsResult { - todo!() - } - - fn chdir(&self, path: &Path) -> FsResult<()> { - todo!() - } - - fn umask(&self, mask: Option) -> FsResult { - todo!() - } - - fn open_sync( - &self, - path: &Path, - options: OpenOptions, - access_check: Option, - ) -> FsResult> { - todo!() - } - - async fn open_async<'a>( - &'a self, - path: PathBuf, - options: OpenOptions, - access_check: Option>, - ) -> FsResult> { - todo!() - } - - fn mkdir_sync(&self, path: &Path, recursive: bool, mode: u32) -> FsResult<()> { - todo!() - } - - async fn mkdir_async(&self, path: PathBuf, recursive: bool, mode: u32) -> FsResult<()> { - todo!() - } - - fn chmod_sync(&self, path: &Path, mode: u32) -> FsResult<()> { - todo!() - } - - async fn chmod_async(&self, path: PathBuf, mode: u32) -> FsResult<()> { - todo!() - } - - fn chown_sync(&self, path: &Path, uid: Option, gid: Option) -> FsResult<()> { - todo!() - } - - async fn chown_async(&self, path: PathBuf, uid: Option, gid: Option) -> FsResult<()> { - todo!() - } - - fn lchown_sync(&self, path: &Path, uid: Option, gid: Option) -> FsResult<()> { - todo!() - } - - async fn lchown_async( - &self, - path: PathBuf, - uid: Option, - gid: Option, - ) -> FsResult<()> { - todo!() - } - - fn remove_sync(&self, path: &Path, recursive: bool) -> FsResult<()> { - todo!() - } - - async fn remove_async(&self, path: PathBuf, recursive: bool) -> FsResult<()> { - todo!() - } - - fn copy_file_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> { - todo!() - } - - async fn copy_file_async(&self, oldpath: PathBuf, newpath: PathBuf) -> FsResult<()> { - todo!() - } - - fn cp_sync(&self, path: &Path, new_path: &Path) -> FsResult<()> { - todo!() - } - - async fn cp_async(&self, path: PathBuf, new_path: PathBuf) -> FsResult<()> { - todo!() - } - - fn stat_sync(&self, path: &Path) -> FsResult { - todo!() - } - - async fn stat_async(&self, path: PathBuf) -> FsResult { - todo!() - } - - fn lstat_sync(&self, path: &Path) -> FsResult { - todo!() - } - - async fn lstat_async(&self, path: PathBuf) -> FsResult { - todo!() - } - - fn realpath_sync(&self, path: &Path) -> FsResult { - todo!() - } - - async fn realpath_async(&self, path: PathBuf) -> FsResult { - todo!() - } - - fn read_dir_sync(&self, path: &Path) -> FsResult> { - todo!() - } - - async fn read_dir_async(&self, path: PathBuf) -> FsResult> { - todo!() - } - - fn rename_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> { - todo!() - } - - async fn rename_async(&self, oldpath: PathBuf, newpath: PathBuf) -> FsResult<()> { - todo!() - } - - fn link_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> { - todo!() - } - - async fn link_async(&self, oldpath: PathBuf, newpath: PathBuf) -> FsResult<()> { - todo!() - } - - fn symlink_sync( - &self, - oldpath: &Path, - newpath: &Path, - file_type: Option, - ) -> FsResult<()> { - todo!() - } - - async fn symlink_async( - &self, - old_path: PathBuf, - new_path: PathBuf, - file_type: Option, - ) -> FsResult<()> { - todo!() - } - - fn read_link_sync(&self, path: &Path) -> FsResult { - todo!() - } - - async fn read_link_async(&self, path: PathBuf) -> FsResult { - todo!() - } - - fn truncate_sync(&self, path: &Path, len: u64) -> FsResult<()> { - todo!() - } - - async fn truncate_async(&self, path: PathBuf, len: u64) -> FsResult<()> { - todo!() - } - - fn utime_sync( - &self, - path: &Path, - atime_secs: i64, - atime_nanos: u32, - mtime_secs: i64, - mtime_nanos: u32, - ) -> FsResult<()> { - todo!() - } - - async fn utime_async( - &self, - path: PathBuf, - atime_secs: i64, - atime_nanos: u32, - mtime_secs: i64, - mtime_nanos: u32, - ) -> FsResult<()> { - todo!() - } - - fn lutime_sync( - &self, - path: &Path, - atime_secs: i64, - atime_nanos: u32, - mtime_secs: i64, - mtime_nanos: u32, - ) -> FsResult<()> { - todo!() - } - - async fn lutime_async( - &self, - path: PathBuf, - atime_secs: i64, - atime_nanos: u32, - mtime_secs: i64, - mtime_nanos: u32, - ) -> FsResult<()> { - self.error_if_in_vfs(&path)?; - RealFs - .lutime_async(path, atime_secs, atime_nanos, mtime_secs, mtime_nanos) - .await - } -} diff --git a/crates/jsvm/runtime/filesystem/mod.rs b/crates/jsvm/runtime/filesystem/mod.rs deleted file mode 100644 index e0af754..0000000 --- a/crates/jsvm/runtime/filesystem/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! TODO. - -pub use crate::runtime::filesystem::compile_fs::CompileFs; -pub use crate::runtime::filesystem::static_fs::StaticFs; -pub use crate::runtime::filesystem::virtual_fs::{FileBackedVfs, FileBackedVfsFile}; - -mod compile_fs; -mod static_fs; -mod virtual_fs; diff --git a/crates/jsvm/runtime/filesystem/static_fs.rs b/crates/jsvm/runtime/filesystem/static_fs.rs deleted file mode 100644 index aa51f31..0000000 --- a/crates/jsvm/runtime/filesystem/static_fs.rs +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -// Copyright 2023-2024 the Supabase authors. All rights reserved. MIT license. - -use std::path::{Path, PathBuf}; -use std::rc::Rc; -use std::sync::Arc; - -use deno_fs::{AccessCheckCb, FileSystem, FsDirEntry, FsFileType, OpenOptions}; -use deno_io::fs::{File, FsResult, FsStat}; - -use crate::runtime::filesystem::FileBackedVfs; - -#[derive(Debug, Clone)] -pub struct StaticFs { - inner: Arc, -} - -#[async_trait::async_trait(?Send)] -impl FileSystem for StaticFs { - fn cwd(&self) -> FsResult { - todo!() - } - - fn tmp_dir(&self) -> FsResult { - todo!() - } - - fn chdir(&self, path: &Path) -> FsResult<()> { - todo!() - } - - fn umask(&self, mask: Option) -> FsResult { - todo!() - } - - fn open_sync( - &self, - path: &Path, - options: OpenOptions, - access_check: Option, - ) -> FsResult> { - todo!() - } - - async fn open_async<'a>( - &'a self, - path: PathBuf, - options: OpenOptions, - access_check: Option>, - ) -> FsResult> { - todo!() - } - - fn mkdir_sync(&self, path: &Path, recursive: bool, mode: u32) -> FsResult<()> { - todo!() - } - - async fn mkdir_async(&self, path: PathBuf, recursive: bool, mode: u32) -> FsResult<()> { - todo!() - } - - fn chmod_sync(&self, path: &Path, mode: u32) -> FsResult<()> { - todo!() - } - - async fn chmod_async(&self, path: PathBuf, mode: u32) -> FsResult<()> { - todo!() - } - - fn chown_sync(&self, path: &Path, uid: Option, gid: Option) -> FsResult<()> { - todo!() - } - - async fn chown_async(&self, path: PathBuf, uid: Option, gid: Option) -> FsResult<()> { - todo!() - } - - fn lchown_sync(&self, path: &Path, uid: Option, gid: Option) -> FsResult<()> { - todo!() - } - - async fn lchown_async( - &self, - path: PathBuf, - uid: Option, - gid: Option, - ) -> FsResult<()> { - todo!() - } - - fn remove_sync(&self, path: &Path, recursive: bool) -> FsResult<()> { - todo!() - } - - async fn remove_async(&self, path: PathBuf, recursive: bool) -> FsResult<()> { - todo!() - } - - fn copy_file_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> { - todo!() - } - - async fn copy_file_async(&self, oldpath: PathBuf, newpath: PathBuf) -> FsResult<()> { - todo!() - } - - fn cp_sync(&self, path: &Path, new_path: &Path) -> FsResult<()> { - todo!() - } - - async fn cp_async(&self, path: PathBuf, new_path: PathBuf) -> FsResult<()> { - todo!() - } - - fn stat_sync(&self, path: &Path) -> FsResult { - todo!() - } - - async fn stat_async(&self, path: PathBuf) -> FsResult { - todo!() - } - - fn lstat_sync(&self, path: &Path) -> FsResult { - todo!() - } - - async fn lstat_async(&self, path: PathBuf) -> FsResult { - todo!() - } - - fn realpath_sync(&self, path: &Path) -> FsResult { - todo!() - } - - async fn realpath_async(&self, path: PathBuf) -> FsResult { - todo!() - } - - fn read_dir_sync(&self, path: &Path) -> FsResult> { - todo!() - } - - async fn read_dir_async(&self, path: PathBuf) -> FsResult> { - todo!() - } - - fn rename_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> { - todo!() - } - - async fn rename_async(&self, oldpath: PathBuf, newpath: PathBuf) -> FsResult<()> { - todo!() - } - - fn link_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> { - todo!() - } - - async fn link_async(&self, oldpath: PathBuf, newpath: PathBuf) -> FsResult<()> { - todo!() - } - - fn symlink_sync( - &self, - oldpath: &Path, - newpath: &Path, - file_type: Option, - ) -> FsResult<()> { - todo!() - } - - async fn symlink_async( - &self, - oldpath: PathBuf, - newpath: PathBuf, - file_type: Option, - ) -> FsResult<()> { - todo!() - } - - fn read_link_sync(&self, path: &Path) -> FsResult { - todo!() - } - - async fn read_link_async(&self, path: PathBuf) -> FsResult { - todo!() - } - - fn truncate_sync(&self, path: &Path, len: u64) -> FsResult<()> { - todo!() - } - - async fn truncate_async(&self, path: PathBuf, len: u64) -> FsResult<()> { - todo!() - } - - fn utime_sync( - &self, - path: &Path, - atime_secs: i64, - atime_nanos: u32, - mtime_secs: i64, - mtime_nanos: u32, - ) -> FsResult<()> { - todo!() - } - - async fn utime_async( - &self, - path: PathBuf, - atime_secs: i64, - atime_nanos: u32, - mtime_secs: i64, - mtime_nanos: u32, - ) -> FsResult<()> { - todo!() - } - - fn lutime_sync( - &self, - path: &Path, - atime_secs: i64, - atime_nanos: u32, - mtime_secs: i64, - mtime_nanos: u32, - ) -> FsResult<()> { - todo!() - } - - async fn lutime_async( - &self, - path: PathBuf, - atime_secs: i64, - atime_nanos: u32, - mtime_secs: i64, - mtime_nanos: u32, - ) -> FsResult<()> { - todo!() - } -} diff --git a/crates/jsvm/runtime/filesystem/virtual_fs.rs b/crates/jsvm/runtime/filesystem/virtual_fs.rs deleted file mode 100644 index 820cf28..0000000 --- a/crates/jsvm/runtime/filesystem/virtual_fs.rs +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -// Copyright 2023-2024 the Supabase authors. All rights reserved. MIT license. - -use std::io::SeekFrom; -use std::path::{Path, PathBuf}; -use std::process::Stdio; -use std::rc::Rc; - -use deno_core::{BufMutView, BufView, ResourceHandleFd, WriteOutcome}; -use deno_io::fs::{File, FsError, FsResult, FsStat}; -use serde::{Deserialize, Serialize}; - -#[derive(Debug)] -pub struct FileBackedVfsRoot { - pub directory: VirtualDirectory, - pub root_path: PathBuf, -} - -#[derive(Debug)] -pub struct FileBackedVfs { - fs_root: FileBackedVfsRoot, -} - -impl FileBackedVfs { - /// Returns a new [`FileBackedVfs`]. - pub fn new() -> Self { - todo!() - } - - #[inline] - pub fn is_path_within(&self, path: &Path) -> bool { - path.starts_with(&self.fs_root.root_path) - } -} - -#[derive(Debug, Clone)] -pub struct FileBackedVfsFile {} - -impl FileBackedVfsFile {} - -#[async_trait::async_trait(?Send)] -impl File for FileBackedVfsFile { - fn read_sync(self: Rc, buf: &mut [u8]) -> FsResult { - Err(FsError::NotSupported) - } - - async fn read_byob(self: Rc, buf: BufMutView) -> FsResult<(usize, BufMutView)> { - Err(FsError::NotSupported) - } - - fn write_sync(self: Rc, buf: &[u8]) -> FsResult { - Err(FsError::NotSupported) - } - - async fn write(self: Rc, buf: BufView) -> FsResult { - Err(FsError::NotSupported) - } - - fn write_all_sync(self: Rc, buf: &[u8]) -> FsResult<()> { - Err(FsError::NotSupported) - } - - async fn write_all(self: Rc, buf: BufView) -> FsResult<()> { - Err(FsError::NotSupported) - } - - fn read_all_sync(self: Rc) -> FsResult> { - Err(FsError::NotSupported) - } - - async fn read_all_async(self: Rc) -> FsResult> { - Err(FsError::NotSupported) - } - - fn chmod_sync(self: Rc, path_mode: u32) -> FsResult<()> { - Err(FsError::NotSupported) - } - - async fn chmod_async(self: Rc, mode: u32) -> FsResult<()> { - Err(FsError::NotSupported) - } - - fn seek_sync(self: Rc, pos: SeekFrom) -> FsResult { - Err(FsError::NotSupported) - } - - async fn seek_async(self: Rc, pos: SeekFrom) -> FsResult { - Err(FsError::NotSupported) - } - - fn datasync_sync(self: Rc) -> FsResult<()> { - Err(FsError::NotSupported) - } - - async fn datasync_async(self: Rc) -> FsResult<()> { - Err(FsError::NotSupported) - } - - fn sync_sync(self: Rc) -> FsResult<()> { - Err(FsError::NotSupported) - } - - async fn sync_async(self: Rc) -> FsResult<()> { - Err(FsError::NotSupported) - } - - fn stat_sync(self: Rc) -> FsResult { - Err(FsError::NotSupported) - } - - async fn stat_async(self: Rc) -> FsResult { - Err(FsError::NotSupported) - } - - fn lock_sync(self: Rc, exclusive: bool) -> FsResult<()> { - Err(FsError::NotSupported) - } - - async fn lock_async(self: Rc, exclusive: bool) -> FsResult<()> { - Err(FsError::NotSupported) - } - - fn unlock_sync(self: Rc) -> FsResult<()> { - Err(FsError::NotSupported) - } - - async fn unlock_async(self: Rc) -> FsResult<()> { - Err(FsError::NotSupported) - } - - fn truncate_sync(self: Rc, len: u64) -> FsResult<()> { - Err(FsError::NotSupported) - } - - async fn truncate_async(self: Rc, len: u64) -> FsResult<()> { - Err(FsError::NotSupported) - } - - fn utime_sync( - self: Rc, - atime_secs: i64, - atime_nanos: u32, - mtime_secs: i64, - mtime_nanos: u32, - ) -> FsResult<()> { - Err(FsError::NotSupported) - } - - async fn utime_async( - self: Rc, - atime_secs: i64, - atime_nanos: u32, - mtime_secs: i64, - mtime_nanos: u32, - ) -> FsResult<()> { - Err(FsError::NotSupported) - } - - #[inline] - fn as_stdio(self: Rc) -> FsResult { - Err(FsError::NotSupported) - } - - #[inline] - fn backing_fd(self: Rc) -> Option { - None - } - - #[inline] - fn try_clone_inner(self: Rc) -> FsResult> { - Ok(self) - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub enum VfsEntry { - Directory(VirtualDirectory), - File(VirtualFile), - Symlink(VirtualSymlink), -} - -#[derive(Debug)] -enum VfsEntryRef<'a> { - Directory(&'a VirtualDirectory), - File(&'a VirtualFile), - Symlink(&'a VirtualSymlink), -} - -impl<'a> VfsEntryRef<'a> { - /// Returns a new [`FsStat`]. - pub fn as_fs_stat(&self) -> FsStat { - match self { - VfsEntryRef::Directory(x) => x.as_fs_stat(), - VfsEntryRef::File(x) => x.as_fs_stat(), - VfsEntryRef::Symlink(x) => x.as_fs_stat(), - } - } -} - -impl VfsEntry { - /// Returns a reference to the entries name. - pub fn name(&self) -> &str { - match self { - VfsEntry::Directory(dir) => &dir.name, - VfsEntry::File(file) => &file.name, - VfsEntry::Symlink(sm) => &sm.name, - } - } - - /// Returns a new [`VfsEntryRef`] from this entry. - pub fn as_entry_ref(&self) -> VfsEntryRef<'_> { - match self { - VfsEntry::Directory(dir) => VfsEntryRef::Directory(dir), - VfsEntry::File(file) => VfsEntryRef::File(file), - VfsEntry::Symlink(sm) => VfsEntryRef::Symlink(sm), - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct VirtualDirectory { - pub name: String, - // Should be sorted by name. - pub entries: Vec, -} - -impl VirtualDirectory { - /// Returns a new [`FsStat`]. - pub fn as_fs_stat(&self) -> FsStat { - FsStat { - is_file: false, - is_directory: true, - is_symlink: false, - size: 0, - mtime: None, - atime: None, - birthtime: None, - dev: 0, - ino: 0, - mode: 0, - nlink: 0, - uid: 0, - gid: 0, - rdev: 0, - blksize: 0, - blocks: 0, - is_block_device: false, - is_char_device: false, - is_fifo: false, - is_socket: false, - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct VirtualFile { - pub name: String, - pub offset: u64, - pub len: u64, -} - -impl VirtualFile { - /// Returns a new [`FsStat`]. - pub fn as_fs_stat(&self) -> FsStat { - FsStat { - is_file: true, - is_directory: false, - is_symlink: false, - size: 0, - mtime: None, - atime: None, - birthtime: None, - dev: 0, - ino: 0, - mode: 0, - nlink: 0, - uid: 0, - gid: 0, - rdev: 0, - blksize: 0, - blocks: 0, - is_block_device: false, - is_char_device: false, - is_fifo: false, - is_socket: false, - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct VirtualSymlink { - pub name: String, - pub dest_parts: Vec, -} - -impl VirtualSymlink { - /// Returns a new [`FsStat`]. - pub fn as_fs_stat(&self) -> FsStat { - FsStat { - is_file: false, - is_directory: false, - is_symlink: true, - size: 0, - mtime: None, - atime: None, - birthtime: None, - dev: 0, - ino: 0, - mode: 0, - nlink: 0, - uid: 0, - gid: 0, - rdev: 0, - blksize: 0, - blocks: 0, - is_block_device: false, - is_char_device: false, - is_fifo: false, - is_socket: false, - } - } - - pub fn resolve_dest_from_root(&self, root: &Path) -> PathBuf { - let mut dest = root.to_path_buf(); - for part in &self.dest_parts { - dest.push(part); - } - dest - } -} diff --git a/crates/jsvm/runtime/mod.rs b/crates/jsvm/runtime/mod.rs index 57aa338..8b13789 100644 --- a/crates/jsvm/runtime/mod.rs +++ b/crates/jsvm/runtime/mod.rs @@ -1,47 +1 @@ -//! TODO. -//! -mod cert_provider; -mod deno_runtime; -mod filesystem; -mod module_loader; -mod permissions; -mod transpile; - -use std::fmt; -use std::rc::Rc; - -use tokio::runtime::Runtime as TokioRuntime; - -use crate::runtime::deno_runtime::DenoRuntime; -pub use crate::runtime::permissions::{axis_permissions, MyPermission}; - -pub struct Jsvm { - deno_runtime: DenoRuntime, - tokio_runtime: TokioRuntime, -} - -impl Jsvm { - /// Returns a new [`Jsvm`]. - pub fn new(tokio_runtime: Rc) -> Self { - todo!() - } - - // TODO: load modules -} - -impl fmt::Debug for Jsvm { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Jsvm").finish_non_exhaustive() - } -} - -#[cfg(test)] -mod test { - use crate::Result; - - #[test] - fn build() -> Result<()> { - Ok(()) - } -} diff --git a/crates/jsvm/runtime/module_loader.rs b/crates/jsvm/runtime/module_loader.rs deleted file mode 100644 index 2a848df..0000000 --- a/crates/jsvm/runtime/module_loader.rs +++ /dev/null @@ -1,34 +0,0 @@ -use deno_core::anyhow::Error; -use deno_core::{ - ModuleLoadResponse, ModuleLoader, ModuleSpecifier, RequestedModuleType, ResolutionKind, -}; - -pub struct MyModuleLoader {} - -impl MyModuleLoader { - /// Returns a new [`MyModuleLoader`]. - pub fn new() -> Self { - Self {} - } -} - -impl ModuleLoader for MyModuleLoader { - fn resolve( - &self, - specifier: &str, - referrer: &str, - kind: ResolutionKind, - ) -> Result { - todo!() - } - - fn load( - &self, - module_specifier: &ModuleSpecifier, - maybe_referrer: Option<&ModuleSpecifier>, - is_dyn_import: bool, - requested_module_type: RequestedModuleType, - ) -> ModuleLoadResponse { - todo!() - } -} diff --git a/crates/jsvm/runtime/permissions.rs b/crates/jsvm/runtime/permissions.rs deleted file mode 100644 index 29f7f0e..0000000 --- a/crates/jsvm/runtime/permissions.rs +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -// Copyright 2023-2024 the Supabase authors. All rights reserved. MIT license. - -use std::borrow::Cow; -use std::path::Path; - -use deno_core::error::AnyError; -use deno_core::url::Url; -use deno_fetch::FetchPermissions; -use deno_fs::FsPermissions; -use deno_io::fs::FsError; -use deno_net::NetPermissions; -use deno_permissions::NetDescriptor; -use deno_web::TimersPermission; -use deno_websocket::WebSocketPermissions; - -/// TODO. -#[derive(Debug, Default, Clone)] -pub struct MyPermission { - allow_net: bool, - filter_net: Option>, -} - -deno_core::extension!( - axis_permissions, - options = { allow_net: bool, filter_net: Option> }, - state = |state, options| { - state.put::( - MyPermission::new(options.allow_net, options.filter_net) - ); - } -); - -impl MyPermission { - /// Returns a new [`MyPermission`]. - #[inline] - pub fn new(allow_net: bool, filter_net: Option>) -> Self { - Self { - allow_net, - filter_net, - } - } -} - -impl TimersPermission for MyPermission { - #[inline] - fn allow_hrtime(&mut self) -> bool { - false - } -} - -impl FetchPermissions for MyPermission { - fn check_net_url(&mut self, _url: &Url, api_name: &str) -> Result<(), AnyError> { - Ok(()) - } - - fn check_read(&mut self, _p: &Path, api_name: &str) -> Result<(), AnyError> { - Ok(()) - } -} - -impl NetPermissions for MyPermission { - fn check_net>( - &mut self, - _host: &(T, Option), - _api_name: &str, - ) -> Result<(), AnyError> { - Ok(()) - } - - fn check_read(&mut self, _p: &Path, _api_name: &str) -> Result<(), AnyError> { - Ok(()) - } - - fn check_write(&mut self, _p: &Path, _api_name: &str) -> Result<(), AnyError> { - Ok(()) - } -} - -impl WebSocketPermissions for MyPermission { - fn check_net_url(&mut self, _url: &Url, _api_name: &str) -> Result<(), AnyError> { - Ok(()) - } -} - -impl FsPermissions for MyPermission { - fn check_open<'a>( - &mut self, - resolved: bool, - read: bool, - write: bool, - path: &'a Path, - api_name: &str, - ) -> Result, FsError> { - Ok(Cow::Borrowed(path)) - } - - fn check_read(&mut self, path: &Path, api_name: &str) -> Result<(), AnyError> { - Ok(()) - } - - fn check_read_all(&mut self, api_name: &str) -> Result<(), AnyError> { - Ok(()) - } - - fn check_read_blind( - &mut self, - p: &Path, - display: &str, - api_name: &str, - ) -> Result<(), AnyError> { - Ok(()) - } - - fn check_write(&mut self, path: &Path, api_name: &str) -> Result<(), AnyError> { - Ok(()) - } - - fn check_write_partial(&mut self, path: &Path, api_name: &str) -> Result<(), AnyError> { - Ok(()) - } - - fn check_write_all(&mut self, api_name: &str) -> Result<(), AnyError> { - Ok(()) - } - - fn check_write_blind( - &mut self, - p: &Path, - display: &str, - api_name: &str, - ) -> Result<(), AnyError> { - Ok(()) - } -} diff --git a/crates/jsvm/runtime/transpile/disk_cache.rs b/crates/jsvm/runtime/transpile/disk_cache.rs deleted file mode 100644 index 1ef54d1..0000000 --- a/crates/jsvm/runtime/transpile/disk_cache.rs +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -// Copyright 2023-2024 the Supabase authors. All rights reserved. MIT license. - -use std::ffi::OsStr; -use std::fs::File; -use std::io::{Read, Result, Write}; -#[cfg(target_os = "windows")] -use std::path::{Component, Prefix}; -use std::path::{Path, PathBuf}; - -use deno_cache_dir::url_to_filename; -#[cfg(target_os = "windows")] -use deno_core::url::Host; -use deno_core::url::Url; -#[cfg(target_os = "windows")] -use serde::{Deserialize, Serialize}; - -/// On-disk storage for previously emitted files. -pub struct DiskCache { - root_path: PathBuf, -} - -impl DiskCache { - /// Returns a new [`DiskCache`]. - pub fn new(path: impl AsRef) -> Self { - let path = path.as_ref().to_owned(); - assert!(path.is_absolute()); - Self { root_path: path } - } - - /// Returns the reference to the root path. - #[inline] - pub fn root_path(&self) -> &Path { - self.root_path.as_path() - } - - fn get_filename(&self, url: &Url) -> Option { - let mut out = PathBuf::new(); - let scheme = url.scheme(); - out.push(scheme); - - match scheme { - "wasm" => { - let host = url.host_str()?; - // Windows doesn't support ":" in filenames, so we - // represent port using a special string. - let host_port = url - .port() - .map(|port| format!("{host}_PORT{port}")) - .unwrap_or_else(|| host.to_string()); - out.push(host_port); - out.extend(url.path_segments()?); - } - "http" | "https" | "data" | "blob" => return url_to_filename(url).ok(), - "file" => { - let path = url.to_file_path().ok()?; - let mut components = path.components(); - - // Windows doesn't support ":" in filenames, so we need to extract disk - // prefix, e.g.: file:///C:/deno/js/unit_test_runner.ts should produce: - // file\c\deno\js\unit_test_runner.ts - #[cfg(target_os = "windows")] - if let Some(Component::Prefix(prefix)) = components.next() { - match prefix.kind() { - Prefix::Disk(disk_byte) | Prefix::VerbatimDisk(disk_byte) => { - let disk = (disk_byte as char).to_string(); - out.push(disk); - } - Prefix::UNC(server, share) | Prefix::VerbatimUNC(server, share) => { - out.push("UNC"); - let host = Host::parse(server.to_str()?).ok()?; - let host = host.to_string().replace(':', "_"); - out.push(host); - out.push(share); - } - _ => unreachable!(), - } - } - - // Must be relative, so strip forward slash. - let without_forward_slash = components.as_path().strip_prefix("/"); - out = out.join(without_forward_slash.unwrap_or(components.as_path())); - } - _ => return None, - }; - - Some(out) - } - - pub fn get_filename_with_extension(&self, url: &Url, extension: &str) -> Option { - let base = self.get_filename(url)?; - - match base.extension() { - None => Some(base.with_extension(extension)), - Some(ext) => { - let original_extension = OsStr::to_str(ext).unwrap_or("tmp"); - let final_extension = format!("{original_extension}.{extension}"); - Some(base.with_extension(final_extension)) - } - } - } - - /// Reads the entire contents of a file into a bytes vector. - pub fn read(&self, path: impl AsRef) -> Result> { - let buf = std::fs::read(path)?; - Ok(buf) - } - - /// Writes an entire buffer into the temporary file and then rename the file. - pub fn write(&self, path: impl AsRef, buf: impl AsRef<[u8]>) -> Result<()> { - let path = path.as_ref().to_owned(); - std::fs::create_dir_all(&path)?; - - let temp_path = self.gen_temp_path(&path); - let mut file = File::open(&temp_path)?; - file.write_all(buf.as_ref())?; - - let file_path = self.root_path.join(path); - std::fs::rename(&temp_path, &file_path)?; - Ok(()) - } - - /// Returns the temporary file path. - fn gen_temp_path(&self, path: &Path) -> PathBuf { - let seq: String = (0..4) - .map(|_| format!("{:02x}", rand::random::())) - .collect(); - - self.root_path - .join(path) - .with_file_name(seq) - .with_extension("tmp") - } -} diff --git a/crates/jsvm/runtime/transpile/emit_cache.rs b/crates/jsvm/runtime/transpile/emit_cache.rs deleted file mode 100644 index 9572f0d..0000000 --- a/crates/jsvm/runtime/transpile/emit_cache.rs +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -// Copyright 2023-2024 the Supabase authors. All rights reserved. MIT license. - -use std::path::{Path, PathBuf}; - -use deno_ast::ModuleSpecifier; -use deno_core::anyhow::anyhow; -use deno_core::error::AnyError; -use deno_core::serde_json; -use serde::{Deserialize, Serialize}; - -use crate::runtime::transpile::DiskCache; - -#[derive(Debug, Deserialize, Serialize)] -struct EmitMetadata { - pub source_hash: u64, - pub target_hash: u64, -} - -/// Cache for previously emitted files. -pub struct EmitCache { - disk_cache: DiskCache, - cli_version: &'static str, -} - -impl EmitCache { - /// Returns a new [`EmitCache`]. - pub fn new(path: impl AsRef) -> Self { - Self { - disk_cache: DiskCache::new(path), - cli_version: "", - } - } - - pub fn read_emit(&self, specifier: &ModuleSpecifier, source_hash: u64) -> Option { - let meta_filename = self.get_meta_filename(specifier)?; - let emit_filename = self.get_emit_filename(specifier)?; - - // Load and verify the metadata file is for this source and CLI version. - let bytes = self.disk_cache.read(&meta_filename).ok()?; - let meta: EmitMetadata = serde_json::from_slice(&bytes).ok()?; - if meta.source_hash != source_hash { - return None; - } - - // Load and verify the compilation result is for the metadata. - let emit_bytes = self.disk_cache.read(&emit_filename).ok()?; - if meta.target_hash != compute_emit_hash(&emit_bytes, self.cli_version) { - return None; - } - - String::from_utf8(emit_bytes).ok() - } - - pub fn write_emit(&self, specifier: &ModuleSpecifier, source_hash: u64, target_code: &str) { - if let Err(err) = self.write_emit_inner(specifier, source_hash, target_code) { - // Should never error here, but if it ever does don't fail. - if cfg!(debug_assertions) { - panic!("Error saving emit data ({specifier}): {err}"); - } else { - // log::debug!("Error saving emit data({}): {}", specifier, err); - } - } - } - - fn write_emit_inner( - &self, - specifier: &ModuleSpecifier, - source_hash: u64, - code: &str, - ) -> Result<(), AnyError> { - let meta_filename = self - .get_meta_filename(specifier) - .ok_or_else(|| anyhow!("Could not get meta filename."))?; - let emit_filename = self - .get_emit_filename(specifier) - .ok_or_else(|| anyhow!("Could not get emit filename."))?; - - let target_hash = compute_emit_hash(code.as_bytes(), self.cli_version); - let metadata = EmitMetadata { - source_hash, - target_hash, - }; - - let metadata = serde_json::to_vec(&metadata)?; - self.disk_cache.write(&meta_filename, &metadata)?; - self.disk_cache.write(&emit_filename, code.as_bytes())?; - - Ok(()) - } - - fn get_meta_filename(&self, specifier: &ModuleSpecifier) -> Option { - self.disk_cache - .get_filename_with_extension(specifier, "meta") - } - - fn get_emit_filename(&self, specifier: &ModuleSpecifier) -> Option { - self.disk_cache.get_filename_with_extension(specifier, "js") - } -} - -fn compute_emit_hash(bytes: &[u8], cli_version: &str) -> u64 { - todo!() -} diff --git a/crates/jsvm/runtime/transpile/mod.rs b/crates/jsvm/runtime/transpile/mod.rs deleted file mode 100644 index 12bf7eb..0000000 --- a/crates/jsvm/runtime/transpile/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! TODO. - -pub use crate::runtime::transpile::disk_cache::DiskCache; -pub use crate::runtime::transpile::emit_cache::EmitCache; - -mod disk_cache; -mod emit_cache; - -pub struct Transpiler {} - -impl Transpiler {} - -pub struct Emitter {} - -impl Emitter {} diff --git a/crates/jsvm/runtime/util/mod.rs b/crates/jsvm/runtime/util/mod.rs deleted file mode 100644 index 8e7716c..0000000 --- a/crates/jsvm/runtime/util/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! TODO. -//! diff --git a/crates/source/utils/hashing.rs b/crates/jsvm/utility/hashing.rs similarity index 56% rename from crates/source/utils/hashing.rs rename to crates/jsvm/utility/hashing.rs index d219f90..48a160f 100644 --- a/crates/source/utils/hashing.rs +++ b/crates/jsvm/utility/hashing.rs @@ -10,39 +10,84 @@ use std::path::Path; use sha2::{Digest, Sha256}; use walkdir::WalkDir; -/// Computes the `SHA-256` hash of a single file. +/// Computes the [SHA-256] hash of a single file. /// -/// # Arguments -/// * `path` - A reference to the path of the file to be hashed. +/// See the [`OpenOptions::open`] method for more details. /// -/// # Returns -/// * `Ok(Vec)` - The computed hash as a byte vector if successful. -/// * `Err(io::Error)` - An error if the file could not be read. +/// # Errors +/// +/// This function will return an error if `path` does not already exist. +/// Other errors may also be returned according to [`OpenOptions::open`]. +/// +/// # Examples +/// +/// ```rust,no_run +/// use axiston_rt_source::utils::io::hash_file; +/// +/// fn main() -> std::io::Result<()> { +/// let _ = hash_file("foo.txt")?; +/// Ok(()) +/// } +/// ``` +/// +/// [SHA-256]: https://en.wikipedia.org/wiki/SHA-2 +/// [`OpenOptions::open`]: std::fs::OpenOptions pub fn hash_file(path: impl AsRef) -> io::Result> { _hash_file_impl(path.as_ref()) } -/// Computes the `SHA-256` hash of a directory by traversing its contents. +/// Computes the [SHA-256] hash of a directory by traversing its contents. +/// +/// # Errors +/// +/// This function will return an error in the following situations, but is not +/// limited to just these cases: +/// +/// * The provided `path` doesn't exist. +/// * The process lacks permissions to view the contents. +/// * The `path` points at a non-directory file. +/// +/// # Examples /// -/// # Arguments -/// * `path` - A reference to the path of the directory to be hashed. +/// ```rust,no_run +/// use axiston_rt_source::utils::io::hash_directory; /// -/// # Returns -/// * `Ok(Vec)` - The computed hash as a byte vector if successful. -/// * `Err(io::Error)` - An error if a file cannot be read or traversal fails. +/// fn main() -> std::io::Result<()> { +/// let _ = hash_directory("./foo")?; +/// Ok(()) +/// } +/// ``` pub fn hash_directory(path: impl AsRef) -> io::Result> { _hash_directory_impl(path.as_ref(), None) } -/// Computes the `SHA-256` hash of a directory with a custom file filter. +/// Computes the [SHA-256] hash of a directory with a custom file filter. /// -/// # Arguments -/// * `path` - A reference to the path of the directory to be hashed. -/// * `filter` - A function that determines whether a file should be included. +/// # Errors /// -/// # Returns -/// * `Ok(Vec)` - The computed hash as a byte vector if successful. -/// * `Err(io::Error)` - An error if a file cannot be read or traversal fails. +/// This function will return an error in the following situations, but is not +/// limited to just these cases: +/// +/// * The provided `path` doesn't exist. +/// * The process lacks permissions to view the contents. +/// * The `path` points at a non-directory file. +/// +/// # Examples +/// +/// ```rust,no_run +/// use axiston_rt_source::utils::io::hash_directory_with_filter; +/// +/// fn main() -> std::io::Result<()> { +/// let _ = hash_directory_with_filter("./foo", |path| { +/// // Path is guaranteed to point to the file. +/// path.extension().map_or(false, |ext| ext == "txt") +/// })?; +/// +/// Ok(()) +/// } +/// ``` +/// +/// [SHA-256]: https://en.wikipedia.org/wiki/SHA-2 pub fn hash_directory_with_filter(path: impl AsRef, filter: F) -> io::Result> where F: Fn(&Path) -> bool, @@ -50,7 +95,9 @@ where _hash_directory_impl(path.as_ref(), Some(&filter)) } -/// Computes the `SHA-256` hash of a file. +/// Computes the [SHA-256] hash of a file. +/// +/// [SHA-256]: https://en.wikipedia.org/wiki/SHA-2 fn _hash_file_impl(path: &Path) -> io::Result> { let mut file = File::open(path)?; let mut hasher = Sha256::new(); @@ -60,13 +107,16 @@ fn _hash_file_impl(path: &Path) -> io::Result> { if n == 0 { break; } + hasher.update(&buffer[..n]); } Ok(hasher.finalize().to_vec()) } -/// Computes the `SHA-256` hash of a directory with an optional filter. +/// Computes the [SHA-256] hash of a directory with an optional filter. +/// +/// [SHA-256]: https://en.wikipedia.org/wiki/SHA-2 fn _hash_directory_impl( path: &Path, filter: Option<&dyn Fn(&Path) -> bool>, diff --git a/crates/source/utils/mod.rs b/crates/jsvm/utility/mod.rs similarity index 77% rename from crates/source/utils/mod.rs rename to crates/jsvm/utility/mod.rs index 71aa581..fedcacc 100644 --- a/crates/source/utils/mod.rs +++ b/crates/jsvm/utility/mod.rs @@ -1,4 +1,4 @@ -//! Additional utilities. +//! Additional utilities for [`std::io`] and [`std::fs`]. mod hashing; diff --git a/crates/schema/Cargo.toml b/crates/schema/Cargo.toml index 35c1a70..880c5e4 100644 --- a/crates/schema/Cargo.toml +++ b/crates/schema/Cargo.toml @@ -23,22 +23,18 @@ path = "lib.rs" [features] default = ["client", "server"] -# Enables or disables gRPC client code generation. +# Enables gRPC client code generation. client = [] -# Enables or disables gRPC server code generation. +# Enables gRPC server code generation. server = [] [dependencies] -serde = { workspace = true } -serde_json = { workspace = true } -jsonschema = { workspace = true } - tonic = { workspace = true } prost = { workspace = true } tonic-types = { workspace = true } prost-types = { workspace = true } [build-dependencies] -anyhow = { workspace = true } tonic-build = { workspace = true } prost-build = { workspace = true } +anyhow = { workspace = true } diff --git a/crates/schema/README.md b/crates/schema/README.md index 2fc8313..8356fc0 100644 --- a/crates/schema/README.md +++ b/crates/schema/README.md @@ -1,4 +1,4 @@ -### runtime/task +### runtime/schema [![Build Status][action-badge]][action-url] [![Crate Docs][docs-badge]][docs-url] diff --git a/crates/schema/build.rs b/crates/schema/build.rs index 8264a2b..4b0bedb 100644 --- a/crates/schema/build.rs +++ b/crates/schema/build.rs @@ -3,6 +3,8 @@ use std::path::PathBuf; fn main() -> anyhow::Result<()> { + println!("cargo:rerun-if-changed=./protobuf"); + let generate_client = cfg!(feature = "client"); let generate_server = cfg!(feature = "server"); diff --git a/crates/schema/lib.rs b/crates/schema/lib.rs index 2052c04..10f70c1 100644 --- a/crates/schema/lib.rs +++ b/crates/schema/lib.rs @@ -2,32 +2,55 @@ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("./README.md")] -pub mod policy { +pub mod message { //! Includes files generated by `prost`. + //! Built from `message/*.proto`. - pub mod retry { + pub mod event { //! Includes files generated by `prost`. - //! Built from `policy/retry.proto`. + //! Built from `message/event.proto`. - tonic::include_proto!("rt.policy.retry"); + tonic::include_proto!("rt.message.event"); } - pub mod timeout { + pub mod graph { //! Includes files generated by `prost`. - //! Built from `policy/timeout.proto`. + //! Built from `message/graph.proto`. - tonic::include_proto!("rt.policy.timeout"); + tonic::include_proto!("rt.message.graph"); + } + + pub mod status { + //! Includes files generated by `prost`. + //! Built from `message/status.proto`. + + tonic::include_proto!("rt.message.status"); } } -pub mod internal { +pub mod policy { //! Includes files generated by `prost`. + //! Built from `policy/*.proto`. + + pub mod resource { + //! Includes files generated by `prost`. + //! Built from `policy/resource.proto`. + + tonic::include_proto!("rt.policy.resource"); + } + + pub mod retry { + //! Includes files generated by `prost`. + //! Built from `policy/retry.proto`. - pub mod json { + tonic::include_proto!("rt.policy.retry"); + } + + pub mod timeout { //! Includes files generated by `prost`. - //! Built from `internal/json.proto`. + //! Built from `policy/timeout.proto`. - tonic::include_proto!("rt.internal.json"); + tonic::include_proto!("rt.policy.timeout"); } } @@ -44,24 +67,3 @@ pub mod registry { tonic::include_proto!("rt.registry"); } - -pub mod entity { - //! Includes files generated by `prost`. - //! Built from `entity.proto`. - - tonic::include_proto!("rt.entity"); -} - -pub mod request { - //! Includes files generated by `prost`. - //! Built from `request.proto`. - - tonic::include_proto!("rt.request"); -} - -pub mod response { - //! Includes files generated by `prost`. - //! Built from `response.proto`. - - tonic::include_proto!("rt.response"); -} diff --git a/crates/schema/protobuf/instance.proto b/crates/schema/protobuf/instance.proto index 668d372..861a6cb 100644 --- a/crates/schema/protobuf/instance.proto +++ b/crates/schema/protobuf/instance.proto @@ -2,17 +2,50 @@ syntax = "proto3"; import "google/protobuf/timestamp.proto"; import "google/protobuf/duration.proto"; -import "message/request.proto"; -import "message/response.proto"; +import "message/event.proto"; package rt.instance; +// Requests service status and metrics. +message GetStatusRequest { + // Includes detailed metrics in the response. + optional bool verbose_metrics = 1; + // Forces retrieval of the latest metrics. + optional bool force_latest = 2; + // Sliding window length (used by metrics). + optional uint32 sliding_window = 3; +} + +// Contains service status and performance metrics. +message GetStatusResponse { + // Task-related metrics: + + // Number of tasks waiting in the queue to be processed. + uint32 tasks_waiting = 11; + // Number of tasks currently being processed. + uint32 tasks_running = 12; + // Number of tasks already completed. + uint32 tasks_completed = 13; + + // Time-related metrics: + + // Average waiting time for tasks in the most recent window. + google.protobuf.Duration recent_waiting_time = 21; + // Average running time for tasks in the most recent window. + google.protobuf.Duration recent_running_time = 22; + // Overall average waiting time since the service started. + google.protobuf.Duration average_waiting_time = 23; + // Overall average running time since the service started. + google.protobuf.Duration average_running_time = 24; +} + // Describes the message format for sending events. message EventRequest { // The unique ID of the request message. uint32 request_id = 1; // The unique ID of the message group. - uint32 group_id = 2; + // Initial value is usually set by the Runtime. + optional uint32 group_id = 2; // When the event was recv by the gateway. google.protobuf.Timestamp recv = 3; @@ -22,13 +55,11 @@ message EventRequest { // The content of the message. oneof payload { // Step 1.1: Gateway requests to open a connection. - rt.request.OpenRequest open_request = 11; - // Step 1.3: Gateway request to update policy. - rt.request.PolicyRequest policy_request = 12; + rt.message.event.OpenRequest open_request = 11; // Step 2.1: Gateway submits a task for the execution. - rt.request.ExecuteRequest execute_request = 13; + rt.message.event.ExecuteRequest execute_request = 12; // Step 3.1: Gateway requests to close the connection. - rt.request.CloseRequest close_request = 14; + rt.message.event.CloseRequest close_request = 13; } } @@ -49,47 +80,16 @@ message EventResponse { // The content of the message. oneof payload { // Step 1.2: Runtime acknowledges that the connection is open. - rt.response.OpenResponse open_response = 11; + rt.message.event.OpenResponse open_response = 11; // Step 2.2: Runtime notifies the Gateway about the task's status change. - rt.response.NotifyResponse notify_response = 12; + rt.message.event.NotifyResponse notify_response = 12; // Step 2.3: Runtime responds with the result of executing the task. - rt.response.ExecuteResponse execute_response = 13; + rt.message.event.ExecuteResponse execute_response = 13; // Step 3.2: Runtime confirms the connection is closed. - rt.response.CloseResponse close_response = 14; + rt.message.event.CloseResponse close_response = 14; } } -// Requests service status and metrics. -message GetStatusRequest { - // Includes detailed metrics in the response. - optional bool verbose_metrics = 1; - // Forces retrieval of the latest metrics. - optional bool force_latest = 2; - // Sliding window length (used by metrics). - optional uint32 sliding_window = 3; -} - -// Contains service status and performance metrics. -message GetStatusResponse { - // Task-related metrics: - - // Number of tasks waiting in the queue to be processed. - uint64 tasks_waiting = 11; - // Number of tasks currently being processed. - uint64 tasks_running = 12; - - // Time-related metrics: - - // Average waiting time for tasks in the most recent window. - google.protobuf.Duration recent_waiting_time = 21; - // Average running time for tasks in the most recent window. - google.protobuf.Duration recent_running_time = 22; - // Overall average waiting time since the service started. - google.protobuf.Duration average_waiting_time = 23; - // Overall average running time since the service started. - google.protobuf.Duration average_running_time = 24; -} - // Provides runtime instance management. service Instance { // Retrieves detailed service health and performance metrics. @@ -97,5 +97,5 @@ service Instance { // Provides a bidirectional event streaming RPC for continuous communication // between the gateway (as a client) and the runtime (as a server). - rpc ConnectWorker(stream EventRequest) returns (stream EventResponse); + rpc EventBus(stream EventRequest) returns (stream EventResponse); } diff --git a/crates/schema/protobuf/internal/json.proto b/crates/schema/protobuf/internal/json.proto deleted file mode 100644 index dc62b7b..0000000 --- a/crates/schema/protobuf/internal/json.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -import "google/protobuf/empty.proto"; - -package rt.internal.json; - -// Represents different types of `JSON` values. -message JsonData { - // Specifies that the value field can hold one of several possible types. - oneof value { - // Represents a string type, e.g. `"hello"` or `"world"`. - string string_value = 1; - // Represents an integer type e.g. `4` or `138253`. - int32 integer_value = 2; - // Represents a float type e.g. `2.6` or `6.0004`. - float float_value = 3; - // Represents a boolean type i.e. `true`/`false`. - bool boolean_value = 4; - // Represents a null value i.e. `null`. - google.protobuf.Empty null_value = 5; - // Represents an object type e.g. `{ "key": "value" }`. - JsonObject object_value = 6; - // Represents an array type e.g. `["key", "value"]`. - JsonArray array_value = 7; - } -} - -// Contains the values of a map of `JSON` values. -message JsonObject { - // Contains the map of the field names and their values. - map fields = 1; -} - -// Contains the values of an array of `JSON` values. -message JsonArray { - // Contains the values of the elements in the array. - repeated JsonData items = 1; -} diff --git a/crates/schema/protobuf/message/entity.proto b/crates/schema/protobuf/message/entity.proto deleted file mode 100644 index 1f0f06b..0000000 --- a/crates/schema/protobuf/message/entity.proto +++ /dev/null @@ -1,51 +0,0 @@ -syntax = "proto3"; - -import "google/protobuf/timestamp.proto"; -import "google/protobuf/empty.proto"; -import "internal/json.proto"; - -package rt.entity; - -// Describes service details. -message Service { - // Unique identifier for the service. - string service_id = 1; - // Name of the service (e.g., Google Email). - string name = 21; - // Unique identifier for the service's icon. - string icon = 22; - // Brief description of the service. - string description = 4; - - string version = 5; -} - -// Describes action or trigger details. -message Entity { - // Unique identifier for the entity. - string entity_id = 1; - // Unique identifier for the service. - string service_id = 2; - - // Name of the entity (e.g., Send via Google Email). - string name = 21; - // Unique identifier for the entity's icon. - string icon = 22; - - // Input `JSON` Schema for this entity. - repeated rt.internal.json.JsonData inputs = 7; - // Output `JSON` Schema for this entity. - repeated rt.internal.json.JsonData outputs = 8; - // Error `JSON` Schema for this entity. - repeated rt.internal.json.JsonData errors = 9; -} - -// Describes secrets required by a entity. -message Secret { - // Unique identifier for the secret. - string secret_id = 1; - // Name of the secret (e.g., "API Key"). - string name = 2; - // Description of the secret. - string description = 3; -} diff --git a/crates/schema/protobuf/message/event.proto b/crates/schema/protobuf/message/event.proto new file mode 100644 index 0000000..02de0a3 --- /dev/null +++ b/crates/schema/protobuf/message/event.proto @@ -0,0 +1,103 @@ +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; +import "message/graph.proto"; +import "message/status.proto"; +import "policy/resource.proto"; +import "policy/retry.proto"; +import "policy/timeout.proto"; + +package rt.message.event; + +// Indicates the gateway is ready accept events. +message OpenRequest { + // Token for authenticating the gateway. + string authentication_token = 3; + // Required runtime capabilities. + repeated string runtime_capabilities = 4; + // Preferred communication protocols. + repeated string preferred_protocols = 5; + + // Constraints on resources for the task. + optional rt.policy.resource.ResourceLimits resource_limits = 21; + // Policy for retrying failed tasks. + optional rt.policy.retry.RetryPolicy retry_policy = 31; + // Policy for handling task timeouts. + optional rt.policy.timeout.TimeoutPolicy timeout_policy = 32; +} + +// Request to submit a graph/task for execution by the Runtime. +message ExecuteRequest { + // Includes either a single task of an entire graph. + oneof execute_model { + // Includes an entire graph, returns the output of all tasks. + rt.message.graph.TaskGraph task_graph = 1; + // Includes a single task, returns the output of it. + rt.message.graph.TaskNode task_node = 2; + } + + // Priority level of the graph/task (higher is more important). + optional int32 priority = 21; + // Deadline for the graph/task completion. + optional google.protobuf.Timestamp deadline = 22; + // Whether graph/task dependencies are cached. + optional bool cache_deps = 23; +} + +// Request to close the connection, blocking the Runtime queue. +message CloseRequest { + // Forces immediate closure without waiting. + optional bool force_close = 2; + // Reason for closing the connection. + optional string reason = 3; + // Require acknowledgment before closing. + optional bool ack_required = 4; +} + +// Start execution response message. +message OpenResponse { + // Allocated runtime resources. + optional rt.policy.resource.ResourceAllocated resource = 21; + // Policy for retrying failed tasks. + optional rt.policy.retry.RetryPolicy retry_policy = 31; + // Policy for handling task timeouts. + optional rt.policy.timeout.TimeoutPolicy timeout_policy = 32; +} + +// Intermediate graph status notification sent by the runtime. +message NotifyResponse { + oneof status_info { + // Details for the "waiting" status. + rt.message.status.WaitingStatus waiting = 21; + // Details for the "pre-running" status. + rt.message.status.PreRunningStatus pre_running = 22; + // Details for the "running" status. + rt.message.status.RunningStatus running = 23; + // Details for the "post-running" status. + rt.message.status.PostRunningStatus post_running = 24; + } +} + +// Response message containing the result of graph execution. +message ExecuteResponse { + // Unique identifier for the graph. + string graph_id = 1; + // Graph's return code indicating success or failure. + uint32 return_code = 2; + + // When the graph started. + google.protobuf.Timestamp start_time = 4; + // When the graph completed. + google.protobuf.Timestamp end_time = 5; + // Total time taken for execution. + google.protobuf.Duration execution_time = 6; +} + +// Response to acknowledge that no new graphs will be accepted. +message CloseResponse { + // Indicates if it's safe to terminate the connection. + bool is_safe_to_close = 1; + // Number of graphs still in the queue or running. + int32 remaining_graphs = 2; +} diff --git a/crates/schema/protobuf/message/graph.proto b/crates/schema/protobuf/message/graph.proto new file mode 100644 index 0000000..cb3f80e --- /dev/null +++ b/crates/schema/protobuf/message/graph.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +package rt.message.graph; + +message TaskGraph { + map nodes = 1; + map edges = 2; +} + +message TaskNode { + oneof task_source { + GitRepository git_repo = 1; + BuiltinModule module = 2; + // TarArchive tar_archive = 5; + // ZipArchive zip_archive = 6; + } + + // Custom task parameters as key-value pairs. + map task_fields = 12; + // Sensitive task-specific data (e.g., API keys). + map task_secrets = 13; +} + +message TaskEdge { + // Unique identifier of the source node. + string head_node = 1; + // Unique identifier of the target node. + string tail_node = 2; +} + +message GitRepository { + string url = 11; +} + +// message TarArchive {} +// message ZipArchive {} + +message BuiltinModule { + // Unique identifier for the task. + string task_id = 11; +} diff --git a/crates/schema/protobuf/message/request.proto b/crates/schema/protobuf/message/request.proto deleted file mode 100644 index 1efaf03..0000000 --- a/crates/schema/protobuf/message/request.proto +++ /dev/null @@ -1,66 +0,0 @@ -syntax = "proto3"; - -import "google/protobuf/timestamp.proto"; -import "google/protobuf/duration.proto"; -import "policy/retry.proto"; -import "policy/timeout.proto"; - -package rt.request; - -// Indicates the gateway is ready accept events. -message OpenRequest { - // Token for authenticating the gateway. - string authentication_token = 3; - // Required runtime capabilities. - repeated string runtime_capabilities = 4; - // Preferred communication protocols. - repeated string preferred_protocols = 5; - - // Constraints on resources for the task. - optional ResourceLimits resource_limits = 26; -} - -// Request to update the middleware policy by the Runtime. -message PolicyRequest { - // Policy for retrying failed tasks. - optional rt.policy.retry.RetryPolicy retry_policy = 31; - // Policy for handling task timeouts. - optional rt.policy.timeout.TimeoutPolicy timeout_policy = 32; -} - -// Request to submit a task for execution by the Runtime. -message ExecuteRequest { - // Unique identifier for the task. - string task_id = 1; - // Custom task parameters as key-value pairs. - map task_fields = 2; - // Sensitive task-specific data (e.g., API keys). - map task_secrets = 3; - - // Priority level of the task (higher is more important). - optional int32 priority = 23; - // Deadline for the task completion. - optional google.protobuf.Timestamp deadline = 24; - // Whether task dependencies are cached. - optional bool cache_deps = 25; -} - -// Request to close the connection, blocking the Runtime queue. -message CloseRequest { - // Forces immediate closure without waiting. - optional bool force_close = 2; - // Reason for closing the connection. - optional string reason = 3; - // Require acknowledgment before closing. - optional bool ack_required = 4; -} - -// Limits runtime resources. -message ResourceLimits { - // Maximum used CPU percentage. - uint32 max_cpu_percent = 1; - // Maximum used RAM in MB. - uint32 max_ram_mb = 2; - // Maximum used disk in MB. - uint64 max_disk_mb = 3; -} diff --git a/crates/schema/protobuf/message/response.proto b/crates/schema/protobuf/message/response.proto deleted file mode 100644 index 7454675..0000000 --- a/crates/schema/protobuf/message/response.proto +++ /dev/null @@ -1,129 +0,0 @@ -syntax = "proto3"; - -import "google/protobuf/timestamp.proto"; -import "google/protobuf/duration.proto"; - -package rt.response; - -// Start execution response message. -message OpenResponse { -} - -// Response message containing the result of task execution. -message ExecuteResponse { - // Unique identifier for the task. - string task_id = 1; - // Task's return code indicating success or failure. - uint32 return_code = 2; - - // When the task started. - google.protobuf.Timestamp start_time = 4; - // When the task completed. - google.protobuf.Timestamp end_time = 5; - // Total time taken for execution. - google.protobuf.Duration execution_time = 6; -} - -// Response to acknowledge that no new tasks will be accepted. -message CloseResponse { - // Indicates if it's safe to terminate the connection. - bool is_safe_to_close = 1; - // Number of tasks still in the queue or running. - int32 remaining_tasks = 3; -} - -// Intermediate task status notification sent by the runtime. -message NotifyResponse { - oneof status_info { - // Details for the "waiting" status. - WaitingStatus waiting = 21; - // Details for the "pre-running" status. - PreRunningStatus pre_running = 22; - // Details for the "running" status. - RunningStatus running = 23; - // Details for the "post-running" status. - PostRunningStatus post_running = 24; - } -} - -// Waiting status information. -message WaitingStatus { - // Number of tasks ahead in the queue. - uint32 tasks_before = 1; - // Total number of tasks in the queue. - uint32 queue_size = 2; - // Maximum capacity of the queue. - uint32 queue_capacity = 3; - // Estimated wait time before task starts. - google.protobuf.Duration wait_time = 4; -} - -// Pre-running status information. -message PreRunningStatus { - // Size (in bytes) of data to serialize. - uint64 input_bytes = 1; - // Version of the Runtime. - uint64 runtime_version = 2; - // File system type used ("virtual" or "physical"). - FileSystemType fs_type = 3; - // Whether task dependencies are cached. - bool cache_deps = 4; - // Allocated runtime resources. - ResourceAllocated resource = 5; -} - -// Running status information. -message RunningStatus { - // Identifier of the thread running this task. - int32 thread_id = 8; - // Estimated remaining run time. - google.protobuf.Duration run_time = 1; - // Current progress checkpoint. - int32 checkpoint = 6; -} - -// Post-running status information. -message PostRunningStatus { - // Task's return code indicating success or failure. - uint32 return_code = 1; - // Total bytes read during task execution. - uint64 read_bytes = 2; - // Total bytes written during task execution. - uint64 written_bytes = 3; - // Size (in bytes) of data to deserialize. - uint64 output_bytes = 4; - // Peak or maximum recorded resource usage. - ResourceUsage resource_usage = 5; -} - -// Represents the type of filesystem used. -enum FileSystemType { - // Default value, should not be used. - FILESYSTEM_UNSPECIFIED = 0; - // Virtual filesystem (e.g., in-memory, network-based). - FILESYSTEM_VIRTUAL = 1; - // Physical filesystem (e.g., SSD, HDD). - FILESYSTEM_PHYSICAL = 2; -} - -// Allocated runtime resources. -message ResourceAllocated { - // Allocated CPU cores. - uint32 allocated_cpu_cores = 21; - // Allocated RAM in megabytes. - uint32 allocated_ram_mb = 22; - // Allocated HDD in megabytes. - uint32 allocated_disk_mb = 23; -} - -// Peak or maximum recorded resource usage. -message ResourceUsage { - // Peak CPU usage as a percentage. - uint32 peak_cpu_percent = 21; - // Peak RAM usage in megabytes. - uint32 peak_ram_mb = 22; - // Peak disk usage in megabytes. - uint32 peak_disk_mb = 23; - // Peak GPU usage as a percentage. - uint32 peak_gpu_percent = 24; -} diff --git a/crates/schema/protobuf/message/status.proto b/crates/schema/protobuf/message/status.proto new file mode 100644 index 0000000..3ee758b --- /dev/null +++ b/crates/schema/protobuf/message/status.proto @@ -0,0 +1,55 @@ +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; +import "policy/resource.proto"; + +package rt.message.status; + +// Waiting status information. +message WaitingStatus { + // Number of graphs ahead in the queue. + uint32 graphs_before = 1; + // Total number of graphs in the queue. + uint32 queue_size = 2; + // Maximum capacity of the queue. + uint32 queue_capacity = 3; + // Estimated wait time before the graph starts. + google.protobuf.Duration wait_time = 4; +} + +// Pre-running status information. +message PreRunningStatus { + // Size (in bytes) of data to serialize. + uint64 input_bytes = 1; + // Version of the Runtime. + uint64 runtime_version = 2; + + // Whether task dependencies are cached. + bool cache_deps = 4; +} + +// Running status information. +message RunningStatus { + // Identifier of the thread running the graph. + int32 thread_id = 8; + // Estimated remaining run time. + google.protobuf.Duration run_time = 1; + // Current progress checkpoint. + int32 checkpoint = 6; +} + +// Post-running status information. +message PostRunningStatus { + // Graph's return code indicating success or failure. + uint32 return_code = 1; + // Total bytes read during graph execution. + uint64 read_bytes = 2; + // Total bytes written during graph execution. + uint64 written_bytes = 3; + // Size (in bytes) of data to deserialize. + uint64 output_bytes = 4; + // Peak or maximum recorded resource usage. + rt.policy.resource.ResourceUsage resource_usage = 5; +} + diff --git a/crates/schema/protobuf/policy/resource.proto b/crates/schema/protobuf/policy/resource.proto new file mode 100644 index 0000000..31cc3f4 --- /dev/null +++ b/crates/schema/protobuf/policy/resource.proto @@ -0,0 +1,53 @@ +syntax = "proto3"; + +package rt.policy.resource; + +// Represents the type of filesystem used. +enum FileSystemType { + // Default value, should not be used. + FILESYSTEM_UNSPECIFIED = 0; + // Virtual filesystem only (e.g., in-memory, network-based). + FILESYSTEM_VIRTUAL = 1; + // Physical filesystem only (e.g., SSD, HDD). + FILESYSTEM_PHYSICAL = 2; + // Virtual and physical filesystems. + FILESYSTEM_BOTH = 3; +} + +// Limits runtime resources. +message ResourceLimits { + // Maximum used CPU percentage. + optional uint32 max_cpu_cores = 1; + // Maximum used RAM in MB. + optional uint32 max_ram_mib = 2; + // Maximum used disk in MB. + optional uint64 max_disk_mib = 3; + + // File system type used ("virtual" or "physical"). + optional FileSystemType fs_type = 11; +} + +// Allocated runtime resources. +message ResourceAllocated { + // Allocated CPU cores. + optional uint32 allocated_cpu_cores = 1; + // Allocated RAM in megabytes. + optional uint32 allocated_ram_mib = 2; + // Allocated HDD in megabytes. + optional uint32 allocated_disk_mib = 3; + + // File system type used ("virtual" or "physical"). + FileSystemType fs_type = 11; +} + +// Peak (or maximum) recorded resource usage. +message ResourceUsage { + // Peak CPU usage as a percentage. + uint32 peak_cpu_percent = 1; + // Peak RAM usage in megabytes. + uint32 peak_ram_mb = 2; + // Peak disk usage in megabytes. + uint32 peak_disk_mb = 3; + // Peak GPU usage as a percentage. + uint32 peak_gpu_percent = 4; +} diff --git a/crates/schema/protobuf/policy/retry.proto b/crates/schema/protobuf/policy/retry.proto index 4d9b4f6..6e200e4 100644 --- a/crates/schema/protobuf/policy/retry.proto +++ b/crates/schema/protobuf/policy/retry.proto @@ -1,6 +1,5 @@ syntax = "proto3"; -import "google/protobuf/timestamp.proto"; import "google/protobuf/duration.proto"; package rt.policy.retry; @@ -16,3 +15,29 @@ message RetryPolicy { // Maximum delay between consecutive retries. google.protobuf.Duration max_backoff = 4; } + +// Backoff strategy for retries. +message RetryStrategy { + // Options for different backoff strategies. + oneof strategy { + LinearBackoff linear = 1; + ExponentialBackoff exponential = 2; + } + + // Maximum delay between consecutive retries. + optional google.protobuf.Duration max_backoff = 3; + // Optional jitter percentage to randomize delays (0.0 - 1.0). + // No value applies the default jitter, a value of 0 disables jitter. + optional double jitter_percent = 4; +} + +// Linear backoff configuration. +message LinearBackoff { + optional google.protobuf.Duration step_backoff = 1; +} + +// Exponential backoff configuration. +message ExponentialBackoff { + optional google.protobuf.Duration base_backoff = 1; + optional double exponential_multiplier = 2; +} diff --git a/crates/schema/protobuf/policy/timeout.proto b/crates/schema/protobuf/policy/timeout.proto index 64601e1..c438a7c 100644 --- a/crates/schema/protobuf/policy/timeout.proto +++ b/crates/schema/protobuf/policy/timeout.proto @@ -1,6 +1,5 @@ syntax = "proto3"; -import "google/protobuf/timestamp.proto"; import "google/protobuf/duration.proto"; package rt.policy.timeout; @@ -9,14 +8,12 @@ package rt.policy.timeout; message TimeoutPolicy { // Maximum execution time allowed for the task. google.protobuf.Duration execution_timeout = 1; - // Whether to forcibly terminate the task on timeout. - bool terminate_on_timeout = 2; // Action to take on timeout (e.g., "retry", "terminate"). - TimeoutAction timeout_action = 3; + optional TimeoutAction timeout_action = 2; // Extra time given before final termination after timeout. - optional google.protobuf.Duration grace_period = 4; + optional google.protobuf.Duration grace_period = 3; // Frequency of checking for timeout conditions. - optional google.protobuf.Duration monitor_interval = 5; + optional google.protobuf.Duration monitor_interval = 4; } // Lists all of possible timeout actions. @@ -24,7 +21,9 @@ enum TimeoutAction { // Default value, action unspecified. TIMEOUT_ACTION_UNSPECIFIED = 0; // Task is considered to be failed. Retry the task. + // Timeout is the total time spent for all the retries. TIMEOUT_ACTION_RETRY = 1; // Task is considered to be failed. Do not retry the task. + // Timeout is the total time spent for a single attempt. TIMEOUT_ACTION_TERMINATE = 2; } diff --git a/crates/schema/protobuf/registry.proto b/crates/schema/protobuf/registry.proto index c2877ec..69d1d33 100644 --- a/crates/schema/protobuf/registry.proto +++ b/crates/schema/protobuf/registry.proto @@ -2,21 +2,64 @@ syntax = "proto3"; import "google/protobuf/timestamp.proto"; import "google/protobuf/empty.proto"; -import "message/entity.proto"; package rt.registry; +// Describes service's details. +message Service { + // Unique identifier for the service. + string service_id = 1; + // Current version of the service implementation. + string version = 2; + + // Display name of the service (e.g., Google Email). + string display_name = 21; + // Display unique identifier for the service's icon. + string display_icon = 22; + // Brief description of the service. + string description = 23; +} + +// Describes task's details. +message Task { + // Unique identifier for the task. + string task_id = 1; + // Unique identifier for the service. + string service_id = 2; + + // Display name of the task (e.g., "Send via Google Email"). + string display_name = 21; + // Unique identifier for the task's icon (e.g., "gmail_send"). + string display_icon = 22; + + // Contains a JSON schemas for inputs, outputs and errors. + string json_schema = 31; +} + +// Describes secrets required by a task. +message Secret { + // Unique identifier for the secret. + string secret_id = 1; + + // Display name of the secret (e.g., "Google Email API Key"). + string display_name = 21; + // Unique identifier for the secret's icon (e.g., "gmail_auth"). + string display_icon = 22; + // Brief description of the secret. + string description = 3; +} + // Contains the registry details. message RegistryContentResponse { // Total number of registered services. uint32 total_services = 1; - // Total number of registered entities. - uint32 total_entities = 2; + // Total number of registered tasks. + uint32 total_tasks = 2; // List of registered services. - repeated rt.entity.Service services = 11; - // List of registered entities. - repeated rt.entity.Entity entities = 12; + repeated rt.registry.Service services = 11; + // List of registered tasks. + repeated rt.registry.Task tasks = 12; // Registry registration startup timestamp. google.protobuf.Timestamp first_updated_at = 21; @@ -28,15 +71,17 @@ message RegistryContentResponse { message FindServicesRequest { // Unique identifier for the query. string query_id = 1; - // Searches the query to match services or entities. + // Searches the query to match services or tasks. string query = 2; // Filters by associated tags (if any). repeated string tags = 3; // Limits on the number of search results. uint32 max_results = 11; - // Includes deprecated entities in search results. + // Includes deprecated tasks in search results. bool include_deprecated = 12; + // Includes code-related tasks in search results. + bool include_code = 13; } // Defines the format of the search results. @@ -45,9 +90,9 @@ message FindServicesResponse { string query_id = 1; // Services matching the search criteria. - repeated rt.entity.Service matching_services = 2; - // Entities matching the search criteria. - repeated rt.entity.Entity matching_entities = 3; + repeated rt.registry.Service matching_services = 2; + // tasks matching the search criteria. + repeated rt.registry.Task matching_tasks = 3; // Total number of matches found. uint32 total_matches = 11; @@ -57,8 +102,8 @@ message FindServicesResponse { service Registry { // Retrieves all available tasks and their metadata. - rpc GetRegistryContent(google.protobuf.Empty) returns (RegistryContentResponse); + rpc GetRegistryServices(google.protobuf.Empty) returns (RegistryContentResponse); - // Searches for specific services and entities in the registry. - rpc FindServices(FindServicesRequest) returns (FindServicesResponse); + // Searches for specific services and tasks in the registry. + rpc GetRegistryTasks(FindServicesRequest) returns (FindServicesResponse); } diff --git a/crates/server/Cargo.toml b/crates/server/Cargo.toml index be8ac02..5513616 100644 --- a/crates/server/Cargo.toml +++ b/crates/server/Cargo.toml @@ -17,27 +17,22 @@ documentation = { workspace = true } all-features = true rustdoc-args = ["--cfg", "docsrs"] +[features] +default = ["hashbrown"] + +# Enables high-performance SwissTable hash map. +hashbrown = ["dep:hashbrown"] + [lib] path = "lib.rs" [dependencies] -# axiston-rt-jsvm = { workspace = true } -axiston-rt-schema = { workspace = true } -axiston-rt-task = { workspace = true } +axiston-rt-schema = { workspace = true, features = ["server"] } +axiston-rt-jsvm = { workspace = true, features = [] } -tokio = { workspace = true } -tokio-stream = { workspace = true } +clap = { workspace = true } futures = { workspace = true } -anyhow = { workspace = true } -tracing = { workspace = true } - -serde = { workspace = true } -serde_json = { workspace = true } -jsonschema = { workspace = true } - -derive_more = { workspace = true } -time = { workspace = true } -uuid = { workspace = true } +pin-project-lite = { workspace = true } tonic = { workspace = true } prost = { workspace = true } @@ -46,3 +41,18 @@ prost-types = { workspace = true } tower = { workspace = true } tower-http = { workspace = true } +tracing = { workspace = true } + +serde = { workspace = true } +serde_json = { workspace = true } +derive_more = { workspace = true } +petgraph = { workspace = true } +thiserror = { workspace = true } + +semver = { workspace = true } +jsonschema = { workspace = true } +hashbrown = { workspace = true, optional = true } +ecow = { workspace = true } + +[dev-dependencies] +tokio = { workspace = true } diff --git a/crates/server/handler/instance.rs b/crates/server/handler/instance.rs index bcaf09b..c729dcb 100644 --- a/crates/server/handler/instance.rs +++ b/crates/server/handler/instance.rs @@ -1,33 +1,22 @@ -use std::num::{NonZero, NonZeroU32}; - -use axiston_rt_schema::instance::event_request::Payload as RequestPayload; -use axiston_rt_schema::instance::event_response::Payload as ResponsePayload; use axiston_rt_schema::instance::instance_server::{Instance, InstanceServer}; use axiston_rt_schema::instance::{ EventRequest, EventResponse, GetStatusRequest, GetStatusResponse, }; -use axiston_rt_schema::response::OpenResponse; use futures::stream::BoxStream; -use futures::{Stream, StreamExt}; -use prost_types::Timestamp; -use time::OffsetDateTime; -use tokio::sync::mpsc; -use tokio_stream::wrappers::ReceiverStream; use tonic::{Request, Response, Status, Streaming}; -use crate::handler::ErrorKind; -use crate::service::AppState; +use crate::service::ServiceState; /// Implements [`Instance`] service for the [`InstanceService`]. #[derive(Clone)] pub struct InstanceService { - state: AppState, + state: ServiceState, } impl InstanceService { /// Returns a new [`InstanceService`]. #[inline] - pub fn new(state: AppState) -> Self { + pub fn new(state: ServiceState) -> Self { Self { state } } @@ -36,24 +25,6 @@ impl InstanceService { pub fn into_server(self) -> InstanceServer { InstanceServer::new(self) } - - /// TODO. - async fn process_event_payload( - &self, - payload: RequestPayload, - ) -> Result { - match payload { - RequestPayload::OpenRequest(_) => { - // let _guard = task_counter.guard_running_tasks(); - // task_counter. - } - RequestPayload::PolicyRequest(_) => {} - RequestPayload::ExecuteRequest(_) => {} - RequestPayload::CloseRequest(_) => {} - } - - todo!() - } } #[tonic::async_trait] @@ -62,89 +33,15 @@ impl Instance for InstanceService { &self, request: Request, ) -> Result, Status> { - let (metadata, extension, request) = request.into_parts(); - - let sliding_window = request.sliding_window.and_then(NonZeroU32::new); - // let snapshot = self.state.task_counter.get_snapshot(sliding_window); - - let message = GetStatusResponse { - tasks_waiting: 0, - tasks_running: 0, - recent_waiting_time: None, - recent_running_time: None, - average_waiting_time: None, - average_running_time: None, - }; - - // Ok(Response::new(message)) - todo!() } - type ConnectWorkerStream = BoxStream<'static, Result>; + type EventBusStream = BoxStream<'static, Result>; - async fn connect_worker( + async fn event_bus( &self, request: Request>, - ) -> Result, Status> { - let mut request = request.into_inner(); - let (tx, rx) = mpsc::channel(128); - - // TODO: Create a new queue. - // let task_queue = self.state.task_queue.clone(); - // let task_counter = self.state.task_counter.clone(); - - let this = self.clone(); - let fut = async move { - while let Some(request) = request.next().await { - let recv_data_time = OffsetDateTime::now_utc(); - let recv_timestamp = Timestamp { - seconds: recv_data_time.unix_timestamp(), - nanos: recv_data_time.nanosecond() as i32, - }; - - let event_request = match request { - Ok(event_request) => event_request, - Err(event_status) => todo!(), - }; - - let Some(request_payload) = event_request.payload else { - tx.send(Err(ErrorKind::Unknown.into_status())).await; - continue; - }; - - let response_payload = match request_payload { - RequestPayload::OpenRequest(_) => { - ResponsePayload::OpenResponse(OpenResponse {}) - } - RequestPayload::PolicyRequest(_) => todo!(), - RequestPayload::ExecuteRequest(_) => todo!(), - RequestPayload::CloseRequest(_) => todo!(), - }; - - let send_data_time = OffsetDateTime::now_utc(); - let send_timestamp = Timestamp { - seconds: send_data_time.unix_timestamp(), - nanos: send_data_time.nanosecond() as i32, - }; - - let event_response = EventResponse { - request_id: event_request.request_id, - group_id: event_request.group_id, - response_id: 0, - recv: Some(recv_timestamp), - send: Some(send_timestamp), - payload: Some(response_payload), - }; - - tx.send(Ok(event_response)).await; - } - - // Client closed connection. - }; - - let _handle = tokio::spawn(fut); - let rx = ReceiverStream::new(rx); - Ok(Response::new(Box::pin(rx))) + ) -> Result, Status> { + todo!() } } diff --git a/crates/server/handler/mod.rs b/crates/server/handler/mod.rs index b0372b9..a509fd1 100644 --- a/crates/server/handler/mod.rs +++ b/crates/server/handler/mod.rs @@ -29,8 +29,9 @@ impl Error { } /// Returns a new [`Status`]. + #[inline] pub fn into_status(self) -> Status { - todo!() + self.kind.into_status() } } @@ -58,7 +59,7 @@ impl ErrorKind { } /// Returns a new [`ErrorRepr`]. - pub fn into_repr(self) -> ErrorRepr<'static> { + fn into_repr(self) -> ErrorRepr<'static> { match self { ErrorKind::Unknown => ErrorRepr::INTERNAL_SERVICE_ERROR, ErrorKind::Aborted => ErrorRepr::SERVICE_WAS_ABORTED, diff --git a/crates/server/handler/registry.rs b/crates/server/handler/registry.rs index 7ff7ace..9d94b08 100644 --- a/crates/server/handler/registry.rs +++ b/crates/server/handler/registry.rs @@ -2,20 +2,19 @@ use axiston_rt_schema::registry::registry_server::{Registry, RegistryServer}; use axiston_rt_schema::registry::{ FindServicesRequest, FindServicesResponse, RegistryContentResponse, }; -// use axiston_rt_schema::registry::{CheckRequest, CheckResponse, RegistryRequest, RegistryResponse}; use tonic::{Request, Response, Status}; -use crate::service::AppState; +use crate::service::ServiceState; /// Implements [`Registry`] service for the [`RegistryServer`]. pub struct RegistryService { - state: AppState, + state: ServiceState, } impl RegistryService { /// Returns a new [`RegistryService`]. #[inline] - pub fn new(state: AppState) -> Self { + pub fn new(state: ServiceState) -> Self { Self { state } } @@ -28,14 +27,14 @@ impl RegistryService { #[tonic::async_trait] impl Registry for RegistryService { - async fn get_registry_content( + async fn get_registry_services( &self, request: Request<()>, ) -> Result, Status> { todo!() } - async fn find_services( + async fn get_registry_tasks( &self, request: Request, ) -> Result, Status> { diff --git a/crates/server/lib.rs b/crates/server/lib.rs index 51c5295..4835da1 100644 --- a/crates/server/lib.rs +++ b/crates/server/lib.rs @@ -10,4 +10,5 @@ pub mod handler; pub mod middleware; +pub mod routing; pub mod service; diff --git a/crates/server/middleware/mod.rs b/crates/server/middleware/mod.rs index 8b13789..874de47 100644 --- a/crates/server/middleware/mod.rs +++ b/crates/server/middleware/mod.rs @@ -1 +1 @@ - +//! TODO. diff --git a/crates/server/routing/context.rs b/crates/server/routing/context.rs new file mode 100644 index 0000000..505e788 --- /dev/null +++ b/crates/server/routing/context.rs @@ -0,0 +1,405 @@ +use std::error::Error; +use std::fmt; + +use derive_more::{Deref, DerefMut}; +use jsonschema::ValidationError; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Map, Value}; + +use crate::routing::context::layers::TaskLayers; + +/// Serializable [`TaskHandler`] service request. +/// +/// [`TaskHandler`]: crate::routing::handler::TaskHandler +#[derive(Clone, Serialize, Deserialize, Deref, DerefMut)] +#[must_use = "requests do nothing unless you serialize them"] +pub struct TaskRequest { + #[deref] + #[deref_mut] + inner: T, + + #[serde(rename = "task")] + pub(crate) task_id: String, + #[serde(skip)] + pub(crate) layers: Option, + + #[serde(rename = "inputs")] + pub(crate) inputs: Option, + #[serde(rename = "secrets")] + pub(crate) secrets: Option, +} + +impl TaskRequest { + /// Returns a new [`TaskRequest`]. + #[inline] + pub fn new(task_id: &str, inner: T) -> Self { + Self { + inner, + task_id: task_id.to_owned(), + layers: None, + inputs: None, + secrets: None, + } + } + + /// Merges the provided [`TaskLayers`] with the defaults. + pub fn with_layers(mut self, layers: TaskLayers) -> Self { + let _ = self.layers.get_or_insert(layers); + self + } + + /// Adds other key/value pair into the [`TaskRequest`]`::inputs` object. + pub fn with_inputs(mut self, key: &str, value: impl Into) -> Self { + let inputs = self + .inputs + .get_or_insert_with(|| Value::Object(Map::default())); + let Value::Object(object) = inputs else { + unreachable!(); + }; + + object.insert(key.to_owned(), value.into()); + self + } + + /// Adds other key/value pair into the [`TaskRequest`]`::secrets` object. + pub fn with_secrets(mut self, key: &str, value: impl Into) -> Self { + let inputs = self + .secrets + .get_or_insert_with(|| Value::Object(Map::default())); + let Value::Object(object) = inputs else { + unreachable!(); + }; + + object.insert(key.to_owned(), value.into()); + self + } + + /// Returns the inner data. + #[inline] + pub fn into_inner(self) -> T { + self.inner + } +} + +impl fmt::Debug for TaskRequest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TaskRequest") + .field("inputs", &self.inputs) + .field("secrets", &"*****") + .finish_non_exhaustive() + } +} + +/// Deserializable [`TaskHandler`] service response. +/// +/// [`TaskHandler`]: crate::handler::TaskHandler +#[derive(Clone, Serialize, Deserialize, Deref, DerefMut)] +#[must_use = "responses do nothing unless you deserialize them"] +pub struct TaskResponse { + #[deref] + #[deref_mut] + inner: T, + + #[serde(rename = "outputs")] + pub(crate) outputs: Option, + #[serde(rename = "metrics")] + pub(crate) metrics: Option, +} + +impl TaskResponse { + /// Returns a new [`TaskResponse`]. + #[inline] + pub fn new(inner: T) -> Self { + Self { + inner, + outputs: None, + metrics: None, + } + } + + /// Adds other key/value pair into the [`TaskResponse`]`::outputs` object. + pub fn with_outputs(mut self, key: &str, value: impl Into) -> Self { + let outputs = self + .outputs + .get_or_insert_with(|| Value::Object(Map::default())); + let Value::Object(object) = outputs else { + unreachable!(); + }; + + object.insert(key.to_owned(), value.into()); + self + } + + /// Adds other key/value pair into the [`TaskResponse`]`::metrics` object. + pub fn with_metrics(mut self, key: &str, value: impl Into) -> Self { + let metrics = self + .metrics + .get_or_insert_with(|| Value::Object(Map::default())); + let Value::Object(object) = metrics else { + unreachable!(); + }; + + object.insert(key.to_owned(), value.into()); + self + } + + /// Returns the inner data. + #[inline] + pub fn into_inner(self) -> T { + self.inner + } +} + +impl fmt::Debug for TaskResponse { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TaskResponse") + .field("outputs", &self.outputs) + .field("metrics", &self.metrics) + .finish_non_exhaustive() + } +} + +/// Unrecoverable failure during the [`TaskHandler`] execution. +/// +/// [`TaskHandler`]: crate::handler::TaskHandler +#[derive(Debug, thiserror::Error, Serialize, Deserialize)] +#[error("internal handler error")] +#[must_use = "errors do nothing unless you use them"] +pub struct TaskError { + #[serde(skip)] + pub(crate) error: Option>, + + #[serde(rename = "kind")] + pub(crate) kind: TaskErrorKind, + #[serde(rename = "values")] + pub(crate) values: Option, +} +/// Specifies the general categories of [`TaskError`]s. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[must_use = "errors do nothing unless you use them"] +pub enum TaskErrorKind { + /// Task wih a requested identifier was not found. + NotFound, + /// Request or response schema validation failed. + Schema, + + /// Error caused by the timeout policy. + TimeoutPolicy, + /// Error caused by the retry policy. + RetryPolicy, + + /// Unknown (type-erased) error occurred. + Unknown, +} + +impl TaskError { + /// Returns a new [`TaskError`]. + #[inline] + pub fn new(kind: TaskErrorKind, error: E) -> Self + where + E: Into>, + { + Self { + kind, + error: Some(error.into()), + values: None, + } + } + + /// Overrides the default value of [`TaskError`]`::values`. + #[inline] + pub fn with_values(mut self, values: Value) -> Self { + self.values = Some(values); + self + } +} + +impl<'a> From> for TaskError { + fn from(value: ValidationError<'a>) -> Self { + Self::new( + TaskErrorKind::Schema, + "request or response schema validation failed", + ) + .with_values(json!({ + "instance": value.instance.into_owned(), + })) + } +} + +/// Specialized [`Result`] alias for the [`TaskError`] type. +pub type TaskResult = Result; + +pub mod layers { + //! Declarative `tower::`[`Layer`]s configuration. + //! + //! [`Layer`]: tower::Layer + + use serde::{Deserialize, Serialize}; + + use crate::routing::context::policies::{RetryPolicy, TimeoutPolicy}; + + /// Declarative `tower::`[`Layer`]s configuration. + /// + /// [`Layer`]: tower::Layer + #[derive(Debug, Default, Clone, Serialize, Deserialize)] + #[must_use = "layers do nothing unless you use them"] + pub struct TaskLayers { + #[serde(rename = "timeout")] + pub(crate) timeout_policy: Option, + #[serde(rename = "retry")] + pub(crate) retry_policy: Option, + } + + impl TaskLayers { + /// Returns a new [`TaskLayers`]. + #[inline] + pub fn new() -> Self { + Self::default() + } + + /// Overrides the default value of [`TaskLayers`]`::timeout_policy`. + pub fn with_timeout_policy(mut self, timeout_policy: TimeoutPolicy) -> Self { + self.timeout_policy = Some(timeout_policy); + self + } + + /// Overrides the default value of [`TaskLayers`]`::retry_policy`. + pub fn with_retry_policy(mut self, retry_policy: RetryPolicy) -> Self { + self.retry_policy = Some(retry_policy); + self + } + } +} + +pub mod policies { + //! [`RetryPolicy`] and [`TimeoutPolicy`]. + + use std::time::Duration; + + use serde::{Deserialize, Serialize}; + + /// Defines a policy for handling timeouts. + #[derive(Debug, Clone, Serialize, Deserialize)] + #[must_use = "policies do nothing unless you use them"] + pub struct TimeoutPolicy { + /// The duration after which a timeout will occur. + pub duration: Duration, + /// The action to take when a timeout occurs. + pub action: TimeoutAction, + } + + /// Specifies actions to take when a timeout occurs. + #[derive(Debug, Default, Clone, Serialize, Deserialize)] + #[must_use = "policies do nothing unless you use them"] + pub enum TimeoutAction { + /// Retry the operation after a timeout. + Retry, + /// Terminate the operation after a timeout (default behavior). + #[default] + Terminate, + } + + impl TimeoutPolicy { + /// Returns a new retry [`TimeoutPolicy`] with the specified timeout duration. + pub fn retry(timeout: Duration) -> Self { + Self { + duration: timeout, + action: TimeoutAction::Retry, + } + } + + /// Returns a new terminate [`TimeoutPolicy`] with the specified timeout duration. + pub fn terminate(timeout: Duration) -> Self { + Self { + duration: timeout, + action: TimeoutAction::Terminate, + } + } + } + + /// Defines a policy for handling retries. + #[derive(Debug, Clone, Serialize, Deserialize)] + #[must_use = "policies do nothing unless you use them"] + pub struct RetryPolicy { + /// The maximum number of retry attempts. + pub retries: u32, + /// The strategy to use for determining retry intervals. + pub strategy: RetryStrategy, + } + + /// Specifies strategies for calculating retry intervals. + #[derive(Debug, Clone, Serialize, Deserialize)] + #[must_use = "policies do nothing unless you use them"] + pub enum RetryStrategy { + /// Linear backoff strategy with optional jitter and max backoff duration. + Linear { + step_backoff: Duration, + max_backoff: Option, + jitter_perc: Option, + }, + /// Exponential backoff strategy with optional jitter and max backoff duration. + Exponential { + base_backoff: Duration, + max_backoff: Option, + jitter_perc: Option, + }, + } + + impl RetryPolicy { + /// Returns a new linear [`RetryPolicy`] with the specified retries and base backoff duration. + pub fn linear(retries: u32, base_backoff: Duration) -> Self { + Self { + retries, + strategy: RetryStrategy::Linear { + step_backoff: base_backoff, + max_backoff: None, + jitter_perc: None, + }, + } + } + + /// Returns a new exponential [`RetryPolicy`] with the specified retries, base backoff. + pub fn exponential(retries: u32, base_backoff: Duration) -> Self { + Self { + retries, + strategy: RetryStrategy::Exponential { + base_backoff, + max_backoff: None, + jitter_perc: None, + }, + } + } + + /// Sets the maximum backoff duration and returns the modified policy. + pub fn with_max_backoff(mut self, new_max_backoff: Duration) -> Self { + match self.strategy { + RetryStrategy::Linear { + ref mut max_backoff, + .. + } => *max_backoff = Some(new_max_backoff), + RetryStrategy::Exponential { + ref mut max_backoff, + .. + } => *max_backoff = Some(new_max_backoff), + }; + + self + } + + /// Sets the jitter percentage and returns the modified policy. + pub fn with_jitter_perc(mut self, new_jitter_perc: f64) -> Self { + match self.strategy { + RetryStrategy::Linear { + ref mut jitter_perc, + .. + } => *jitter_perc = Some(new_jitter_perc), + RetryStrategy::Exponential { + ref mut jitter_perc, + .. + } => *jitter_perc = Some(new_jitter_perc), + }; + + self + } + } +} diff --git a/crates/server/routing/handler.rs b/crates/server/routing/handler.rs new file mode 100644 index 0000000..aac1a04 --- /dev/null +++ b/crates/server/routing/handler.rs @@ -0,0 +1,371 @@ +//! [`TaskHandler`], [`TaskHandlerLayer`], its future and metrics. + +use std::fmt; +use std::marker::PhantomData; +use std::task::{Context, Poll}; + +use tower::load::Load; +use tower::util::BoxCloneSyncService; +use tower::{Layer, Service, ServiceBuilder}; + +use crate::routing::context::{TaskError, TaskRequest, TaskResponse}; +use crate::routing::handler::future::TaskFuture; +use crate::routing::handler::metric::{TaskMetrics, TaskMetricsLock}; + +/// Unified `tower::`[`Service`] for executing tasks. +/// +/// Opaque [`BoxCloneSyncService`]<[`TaskRequest`], [`TaskResponse`], [`TaskError`]>. +#[must_use = "services do nothing unless you `.poll_ready` or `.call` them"] +pub struct TaskHandler { + inner: BoxCloneSyncService, TaskResponse, TaskError>, + metrics: TaskMetricsLock, +} + +impl TaskHandler { + /// Returns a new [`TaskHandler`]. + pub fn new(inner: S) -> Self + where + T: 'static, + U: 'static, + S: Service + Clone + Send + Sync + 'static, + Req: From> + 'static, + S::Response: Into> + 'static, + S::Error: Into + 'static, + S::Future: Send + 'static, + { + Self::with_metrics(inner, TaskMetricsLock::default()) + } + + /// Returns a new [`TaskHandler`] with provided [`TaskMetricsLock`]. + /// + /// Allows to share [`TaskMetricsLock`] and the inner [`TaskMetrics`]. + pub fn with_metrics(inner: S, metrics: TaskMetricsLock) -> Self + where + T: 'static, + U: 'static, + S: Service + Clone + Send + Sync + 'static, + Req: From> + 'static, + S::Response: Into> + 'static, + S::Error: Into + 'static, + S::Future: Send + 'static, + { + let inner = ServiceBuilder::new() + .map_request(From::from) + .map_response(Into::into) + .map_err(Into::into) + .service(inner); + + Self { + inner: BoxCloneSyncService::new(inner), + metrics, + } + } + + /// Maps a `TaskHandler` to `TaskHandler` by applying a function to a contained service. + pub fn map(self, f: F) -> TaskHandler + where + F: FnOnce( + BoxCloneSyncService, TaskResponse, TaskError>, + ) -> BoxCloneSyncService, TaskResponse, TaskError>, + { + TaskHandler { + inner: f(self.inner), + metrics: self.metrics, + } + } + + /// Returns a new [`TaskMetrics`]. + #[inline] + pub fn snapshot(&self) -> TaskMetrics { + self.metrics.snapshot() + } +} + +impl Clone for TaskHandler { + #[inline] + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + metrics: self.metrics.clone(), + } + } +} + +impl fmt::Debug for TaskHandler { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TaskHandler").finish_non_exhaustive() + } +} + +impl Service> for TaskHandler +where + T: 'static + Send + Clone, + U: 'static + Send, +{ + type Response = TaskResponse; + type Error = TaskError; + type Future = TaskFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + #[inline] + fn call(&mut self, req: TaskRequest) -> Self::Future { + let Some(layers) = &req.layers else { + return TaskFuture::with_metrics(self.inner.call(req), self.metrics.clone()); + }; + + // let compose = LayerCompose::new(layers); + // let mut svc = compose.apply_layers(self.inner.clone()); + // TaskFuture::with_metrics(svc.call(req), self.metrics.clone()) + + todo!() + } +} + +impl Load for TaskHandler { + type Metric = TaskMetrics; + + #[inline] + fn load(&self) -> Self::Metric { + self.metrics.snapshot() + } +} + +/// `tower::`[`Layer`] that produces a [`TaskHandler`] services. +pub struct TaskHandlerLayer { + metrics: TaskMetricsLock, + inner: PhantomData<(Req, T, U)>, +} + +impl TaskHandlerLayer { + /// Returns a new [`TaskHandlerLayer`]. + #[inline] + pub fn new(metrics: TaskMetricsLock) -> Self { + Self { + metrics, + inner: PhantomData, + } + } +} + +impl Default for TaskHandlerLayer { + #[inline] + fn default() -> Self { + Self { + metrics: TaskMetricsLock::default(), + inner: PhantomData, + } + } +} + +impl Layer for TaskHandlerLayer +where + T: 'static, + U: 'static, + S: Service + Clone + Send + Sync + 'static, + Req: From> + 'static, + S::Response: Into> + 'static, + S::Error: Into + 'static, + S::Future: Send + 'static, +{ + type Service = TaskHandler; + + #[inline] + fn layer(&self, inner: S) -> Self::Service { + TaskHandler::with_metrics(inner, self.metrics.clone()) + } +} + +pub mod future { + //! [`Future`] types for [`TaskHandler`]s. + //! + //! [`TaskHandler`]: crate::routing::handler::TaskHandler + + use std::future::Future; + use std::pin::Pin; + use std::task::{Context, Poll}; + + use futures::future::BoxFuture; + use futures::FutureExt; + use pin_project_lite::pin_project; + + use crate::routing::context::{TaskResponse, TaskResult}; + use crate::routing::handler::metric::TaskMetricsLock; + + pin_project! { + /// Opaque [`Future`] return type for [`TaskHandler::call`]. + /// + /// Contains a single `futures::`[`BoxFuture`]. + /// + /// [`TaskHandler::call`]: crate::routing::handler::TaskHandler + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct TaskFuture { + #[pin] fut: BoxFuture<'static, TaskResult>>, + metrics: Option, + } + } + + impl TaskFuture { + /// Returns a new [`TaskFuture`]. + #[inline] + pub fn new(fut: F) -> Self + where + F: Future>> + Sized + Send + 'static, + { + Self { + fut: fut.boxed(), + metrics: None, + } + } + + /// Returns a new [`TaskFuture`]. + #[inline] + pub fn with_metrics(fut: F, metrics: TaskMetricsLock) -> Self + where + F: Future>> + Sized + Send + 'static, + { + Self { + fut: fut.boxed(), + metrics: Some(metrics), + } + } + } + + impl From>>> for TaskFuture { + #[inline] + fn from(fut: BoxFuture<'static, TaskResult>>) -> Self { + Self { fut, metrics: None } + } + } + + impl Future for TaskFuture { + type Output = TaskResult>; + + #[inline] + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + this.fut.poll(cx) + } + } + + #[cfg(test)] + mod test { + use crate::routing::context::{TaskResponse, TaskResult}; + use crate::routing::handler::future::TaskFuture; + + #[test] + fn from_async_block() -> TaskResult<()> { + let fut = async move { Ok(TaskResponse::new(5)) }; + let _fut = TaskFuture::new(fut); + + Ok(()) + } + } +} + +pub mod metric { + //! `tower::`[`Load`] metric types for [`TaskHandler`]s. + //! + //! [`Load`]: tower::load::Load + //! [`TaskHandler`]: crate::routing::handler::TaskHandler + + use std::sync::{Arc, Mutex}; + + use serde::{Deserialize, Serialize}; + + /// Reference-counting wrapper for [`TaskMetrics`]. + /// + /// Use by [`TaskHandler`]s and [`TaskFuture`]s. + /// + /// [`TaskHandler`]: crate::routing::handler::TaskHandler + /// [`TaskFuture`]: crate::routing::handler::future::TaskFuture + #[derive(Debug, Default, Clone)] + #[must_use = "metrics do nothing unless you serialize them"] + pub struct TaskMetricsLock { + inner: Arc>, + } + + impl TaskMetricsLock { + /// Returns a new [`TaskMetricsLock`]. + #[inline] + pub fn new(metrics: TaskMetrics) -> Self { + Self { + inner: Arc::new(Mutex::new(metrics)), + } + } + + /// Returns a new [`TaskMetrics`]. + pub fn snapshot(&self) -> TaskMetrics { + let guard = self.inner.lock().expect("should not be locked"); + guard.clone() + } + } + + /// `tower::load::`[`Load`] metrics for [`TaskHandler`]s. + /// + /// [`Load`]: tower::load::Load + /// [`TaskHandler`]: crate::routing::handler::TaskHandler + #[derive(Debug, Default, Clone, PartialOrd, PartialEq, Serialize, Deserialize)] + #[must_use = "metrics do nothing unless you serialize them"] + pub struct TaskMetrics { + // TODO: Implement all metrics. + + // pub average_waiting_time: Duration, + // pub average_recent_waiting_time: Duration, + // pub average_running_time: Duration, + // pub average_recent_running_time: Duration, + // pub total_success_runs: u32, + // pub total_failure_runs: u32, + } + + impl TaskMetrics { + /// Returns a new [`TaskMetrics`]. + #[inline] + pub fn new() -> Self { + Self::default() + } + } + + #[cfg(test)] + mod test { + use crate::routing::context::TaskResult; + use crate::routing::handler::metric::{TaskMetrics, TaskMetricsLock}; + + #[test] + fn metrics_lock() -> TaskResult<()> { + let metrics_lock = TaskMetricsLock::default(); + assert_eq!(TaskMetrics::new(), metrics_lock.snapshot()); + Ok(()) + } + } +} + +#[cfg(test)] +mod test { + use tower::{service_fn, ServiceBuilder}; + + use crate::routing::context::{TaskError, TaskRequest, TaskResponse}; + use crate::routing::handler::{TaskHandler, TaskHandlerLayer}; + + async fn handle(request: TaskRequest) -> Result, TaskError> { + Ok(TaskResponse::new(request.into_inner())) + } + + #[test] + fn service_compose() -> Result<(), TaskError> { + let inner = service_fn(handle); + let _service = TaskHandler::new(inner); + Ok(()) + } + + #[test] + fn service_builder() -> Result<(), TaskError> { + let _service = ServiceBuilder::new() + .layer(TaskHandlerLayer::default()) + .service(service_fn(handle)); + Ok(()) + } +} diff --git a/crates/task/routing/manifest.rs b/crates/server/routing/manifest.rs similarity index 59% rename from crates/task/routing/manifest.rs rename to crates/server/routing/manifest.rs index 12944fb..e4e88b9 100644 --- a/crates/task/routing/manifest.rs +++ b/crates/server/routing/manifest.rs @@ -1,12 +1,9 @@ //! [`TaskManifest`] and [`ServiceManifest`]. -use jsonschema::{draft202012, Validator}; use semver::Version; use serde::{Deserialize, Serialize}; use serde_json::Value; -use crate::context::TaskResult; - /// Metadata and properties of a single service. #[derive(Debug, Clone, Serialize, Deserialize)] #[must_use = "manifests do nothing unless you serialize them"] @@ -95,62 +92,4 @@ impl TaskManifest { deprecated: None, } } - - /// Creates schema validators for input, output, and error schemas. - /// - /// Returns a `TaskSchemaValidators` instance containing compiled validators, - /// or an error if any schema compilation fails. - pub(crate) fn create_schema_validators(&self) -> TaskResult { - let Some(schemas) = &self.schemas else { - return Ok(TaskSchemaValidators::default()); - }; - - let inputs_schema = schemas.inputs_schema.as_ref(); - let outputs_schema = schemas.outputs_schema.as_ref(); - let errors_schema = schemas.errors_schema.as_ref(); - - Ok(TaskSchemaValidators { - inputs: inputs_schema.map(draft202012::new).transpose()?, - outputs: outputs_schema.map(draft202012::new).transpose()?, - errors: errors_schema.map(draft202012::new).transpose()?, - }) - } -} - -/// Validators for task schemas to validate input, output, and error structures. -#[derive(Debug, Default)] -pub(crate) struct TaskSchemaValidators { - pub inputs: Option, - pub outputs: Option, - pub errors: Option, -} - -impl TaskSchemaValidators { - pub fn validate_inputs(&self, values: &Value) -> TaskResult<()> { - let Some(schema) = self.inputs.as_ref() else { - return Ok(()); - }; - - schema.validate(values).map_err(From::from) - } - - pub fn validate_outputs(&self, values: &Value) -> TaskResult<()> { - let Some(schema) = self.outputs.as_ref() else { - return Ok(()); - }; - - schema.validate(values).map_err(From::from) - } - - pub fn validate_errors(&self, values: Option<&Value>) -> TaskResult<()> { - let Some(values) = values else { - return Ok(()); - }; - - let Some(schema) = self.errors.as_ref() else { - return Ok(()); - }; - - schema.validate(values).map_err(From::from) - } } diff --git a/crates/task/routing/mod.rs b/crates/server/routing/mod.rs similarity index 72% rename from crates/task/routing/mod.rs rename to crates/server/routing/mod.rs index 991dd6a..b610c68 100644 --- a/crates/task/routing/mod.rs +++ b/crates/server/routing/mod.rs @@ -1,4 +1,4 @@ -//! [`Router`], [`TaskIndex`] and [`manifest`]s. +//! Task routing with [`Router`]<[`TaskRequest`], [`TaskResponse`]>. #[cfg(not(feature = "hashbrown"))] use std::collections::HashMap; @@ -9,18 +9,17 @@ use std::sync::Arc; use hashbrown::HashMap; use tower::ServiceExt; -use crate::context::layers::TaskLayers; -use crate::context::{TaskError, TaskErrorKind, TaskRequest, TaskResponse}; -use crate::handler::metric::TaskMetrics; -use crate::handler::TaskHandler; -use crate::routing::index::{ServiceIndex, TaskIndex}; +use crate::routing::context::layers::TaskLayers; +use crate::routing::context::{TaskError, TaskErrorKind, TaskRequest, TaskResponse, TaskResult}; +use crate::routing::handler::metric::TaskMetrics; +use crate::routing::handler::TaskHandler; use crate::routing::manifest::{ServiceManifest, TaskManifest}; -use crate::routing::route::Route; -use crate::Registry; +use crate::routing::route::{Route, ServiceIndex, TaskIndex}; -pub mod index; +pub mod context; +pub mod handler; pub mod manifest; -mod route; +pub mod route; /// Request data alias for a default [`Router`]. pub type RouteRequest = (); @@ -38,7 +37,7 @@ pub struct Router { } struct RouterInner { - layer_compose: Option, + layer_compose: TaskLayers, service_manifests: HashMap, routes: HashMap>, } @@ -48,7 +47,7 @@ impl Router { #[inline] pub fn new(layers: TaskLayers) -> Self { let router_inner = RouterInner { - layer_compose: Some(layers), + layer_compose: layers, service_manifests: HashMap::default(), routes: HashMap::new(), }; @@ -73,7 +72,17 @@ impl Router { /// Overrides the default value of [`Router`]`::layer_compose`. pub fn with_layers(self, layers: TaskLayers) -> Self { self.inspect_inner_mut(|x| { - x.layer_compose = Some(layers); + x.layer_compose = layers; + }) + } + + /// Registers multiple [`ServiceManifest`]s by their [`ServiceIndex`]es. + pub fn with_services(self, services: Vec) -> Self { + self.inspect_inner_mut(|x| { + for service_manifest in services { + let service_index = ServiceIndex::new(&service_manifest.service_id); + x.service_manifests.insert(service_index, service_manifest); + } }) } @@ -89,6 +98,18 @@ impl Router { }) } + /// Registers multiple [`TaskHandler`]s by their [`TaskIndex`]es. + pub fn with_routes(self, routes: Vec<(TaskManifest, TaskHandler)>) -> Self { + self.inspect_inner_mut(|x| { + for (task_manifest, task_handler) in routes { + let task_index = TaskIndex::new(&task_manifest.route_id); + let route = Route::new(task_handler, task_manifest) + .expect("should not provide malformed manifests"); + x.routes.insert(task_index, route); + } + }) + } + /// Registers another [`TaskHandler`] by its [`TaskIndex`]. pub fn with_route( self, @@ -107,6 +128,49 @@ impl Router { }) } + /// Executes the requested task handler with a given request. + /// + /// # Errors + /// + /// - Returns an error if the task wasn't found in the registry. + /// - Returns an error if the requested handler returns an error. + pub async fn route_task(&self, task_request: TaskRequest) -> TaskResult> + where + T: 'static + Send + Clone, + U: 'static + Send, + { + let task_index = TaskIndex::new(&task_request.task_id); + let task_handler = self.find_task_handler(task_index).ok_or_else(|| { + TaskError::new( + TaskErrorKind::NotFound, + "requested task identifier was not found", + ) + })?; + + self.route_task_with_handler(task_request, task_handler) + .await + } + + /// Executes the provided task handler with a given request. + /// + /// # Errors + /// + /// - Returns an error if the provided handler returns an error. + pub async fn route_task_with_handler( + &self, + mut task_request: TaskRequest, + task_handler: TaskHandler, + ) -> TaskResult> + where + T: 'static + Send + Clone, + U: 'static + Send, + { + let layer_compose = self.inner.layer_compose.clone(); + task_handler + .oneshot(task_request.with_layers(layer_compose)) + .await + } + /// Returns the reference to the [`ServiceManifest`]. pub fn find_service_manifest( &self, @@ -157,51 +221,6 @@ impl Router { .collect(), } } - - /// Executes the requested task handler with a given request. - /// - /// # Errors - /// - /// - Returns an error if the task wasn't found in the registry. - /// - Returns an error if the requested handler returns an error. - pub async fn route_task( - &self, - task_request: TaskRequest, - ) -> Result, TaskError> - where - T: 'static + Send + Clone, - U: 'static + Send, - { - let task_index = TaskIndex::new(&task_request.task_id); - let task_handler = self.find_task_handler(task_index).ok_or_else(|| { - TaskError::new( - TaskErrorKind::NotFound, - "requested task identifier was not found", - ) - })?; - - self.route_task_with_handler(task_request, task_handler) - .await - } - - /// Executes the provided task handler with a given request. - /// - /// # Errors - /// - /// - Returns an error if the provided handler returns an error. - pub async fn route_task_with_handler( - &self, - mut task_request: TaskRequest, - task_handler: TaskHandler, - ) -> Result, TaskError> - where - T: 'static + Send + Clone, - U: 'static + Send, - { - let layer_compose = self.inner.layer_compose.as_ref(); - task_request.apply_default_layers(layer_compose); - task_handler.oneshot(task_request).await - } } impl fmt::Debug for Router { @@ -212,16 +231,14 @@ impl fmt::Debug for Router { impl Default for Router { fn default() -> Self { - Self { - inner: Arc::new(RouterInner::default()), - } + Self::new(TaskLayers::new()) } } impl Default for RouterInner { fn default() -> Self { Self { - layer_compose: None, + layer_compose: TaskLayers::default(), service_manifests: HashMap::default(), routes: HashMap::default(), } @@ -246,15 +263,31 @@ impl Clone for RouterInner { } } +/// Lists all registered services and tasks. +/// +/// Also see [`Router::as_registry`]. +/// +/// [`Router::as_registry`]: routing::Router::as_registry +#[derive(Debug, Default)] +pub struct Registry { + /// List of all registered services. + pub services: std::collections::HashMap, + /// List of all registered tasks. + pub tasks: std::collections::HashMap, +} + #[cfg(test)] mod test { + use std::time::Duration; + use tower::{service_fn, ServiceBuilder}; - use crate::context::layers::TaskLayers; - use crate::context::{TaskError, TaskRequest, TaskResponse, TaskResult}; - use crate::handler::{TaskHandler, TaskHandlerLayer}; - use crate::routing::manifest::ServiceManifest; - use crate::routing::{Router, TaskManifest}; + use crate::routing::context::layers::TaskLayers; + use crate::routing::context::policies::{RetryPolicy, TimeoutPolicy}; + use crate::routing::context::{TaskRequest, TaskResponse, TaskResult}; + use crate::routing::handler::{TaskHandler, TaskHandlerLayer}; + use crate::routing::manifest::{ServiceManifest, TaskManifest}; + use crate::routing::Router; async fn handle_builtin0(request: TaskRequest) -> TaskResult> { Ok(TaskResponse::new(request.into_inner())) @@ -277,17 +310,21 @@ mod test { .layer(TaskHandlerLayer::default()) .service(service_fn(handle_builtin1)); + let default_layers = TaskLayers::new() + .with_retry_policy(RetryPolicy::linear(3, Duration::from_secs(2))) + .with_timeout_policy(TimeoutPolicy::retry(Duration::from_secs(12))); + Router::default() - .with_layers(TaskLayers::new()) + .with_layers(default_layers) .with_service(service0_manifest) .with_route(builtin0_manifest, builtin0_service) .with_route(builtin1_manifest, builtin1_service) } #[tokio::test] - async fn simple_routing() -> Result<(), TaskError> { + async fn simple_routing() -> TaskResult<()> { let router = create_testing_router(); - let request = TaskRequest::builder("builtin0", 5).build()?; + let request = TaskRequest::new("builtin0", 5); let response = router.route_task(request).await?; assert_eq!(response.into_inner(), 5); diff --git a/crates/server/routing/route.rs b/crates/server/routing/route.rs new file mode 100644 index 0000000..a93f165 --- /dev/null +++ b/crates/server/routing/route.rs @@ -0,0 +1,238 @@ +//! [`Route`], [`TaskIndex`] and [`ServiceIndex`]. + +use std::borrow::Cow; +use std::fmt; +use std::sync::Arc; +use std::task::{Context, Poll}; + +use derive_more::{Deref, DerefMut, From}; +use ecow::EcoString; +use jsonschema::{draft202012, Validator}; +use serde_json::Value; +use tower::load::Load; +use tower::Service; + +use crate::routing::context::{TaskError, TaskRequest, TaskResponse, TaskResult}; +use crate::routing::handler::future::TaskFuture; +use crate::routing::handler::metric::TaskMetrics; +use crate::routing::handler::TaskHandler; +use crate::routing::manifest::{TaskManifest, TaskSchemas}; + +/// Routing structure that wraps [`TaskHandler`] with req/resp validation. +#[must_use = "routes do nothing unless you use them"] +pub struct Route { + inner: Arc>, +} + +#[must_use = "routes do nothing unless you use them"] +struct RouteHandler { + task_handler: TaskHandler, + schema_validators: TaskSchemaValidators, + manifest: TaskManifest, +} + +impl Route { + /// Creates a new [`Route`]. + pub fn new(task_handler: TaskHandler, task_manifest: TaskManifest) -> TaskResult { + let schema_validators = TaskSchemaValidators::new(task_manifest.schemas.as_ref())?; + + Ok(Self { + inner: Arc::new(RouteHandler { + task_handler, + schema_validators, + manifest: task_manifest, + }), + }) + } + + /// Returns the reference to the inner [`TaskHandler`]. + #[inline] + pub fn task_handler(&self) -> &TaskHandler { + &self.inner.task_handler + } + + /// Returns [`TaskMetrics`] of the inner [`TaskHandler`]. + #[inline] + pub fn task_handler_metrics(&self) -> TaskMetrics { + self.inner.task_handler.load() + } + + /// Returns the reference to the inner [`TaskManifest`]. + #[inline] + pub fn manifest(&self) -> &TaskManifest { + &self.inner.manifest + } +} + +impl fmt::Debug for Route { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Route").finish_non_exhaustive() + } +} + +impl Clone for Route { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + +impl Service> for Route +where + T: 'static + Send + Clone, + U: 'static + Send, +{ + type Response = TaskResponse; + type Error = TaskError; + type Future = TaskFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + let mut handler = self.inner.task_handler.clone(); + handler.poll_ready(cx) + } + + fn call(&mut self, req: TaskRequest) -> Self::Future { + let this = self.clone(); + let fut = async move { + this.inner + .schema_validators + .validate_inputs(req.inputs.as_ref())?; + + let mut handler = this.inner.task_handler.clone(); + let response = handler.call(req).await; + + response + .and_then(|response| { + this.inner + .schema_validators + .validate_outputs(response.outputs.as_ref()) + .and(Ok(response)) + }) + .map_err(|error| { + match this + .inner + .schema_validators + .validate_errors(error.values.as_ref()) + { + Ok(_) => error, + Err(v_error) => v_error, + } + }) + }; + + TaskFuture::new(fut) + } +} + +/// Validators for task schemas to validate input, output, and error structures. +#[derive(Debug, Default)] +pub(crate) struct TaskSchemaValidators { + pub inputs: Option, + pub outputs: Option, + pub errors: Option, +} + +impl TaskSchemaValidators { + /// Returns a new `TaskSchemaValidators` or an error if any schema compilation fails. + pub(crate) fn new(schemas: Option<&TaskSchemas>) -> TaskResult { + let Some(schemas) = schemas else { + return Ok(TaskSchemaValidators::default()); + }; + + let inputs_schema = schemas.inputs_schema.as_ref(); + let outputs_schema = schemas.outputs_schema.as_ref(); + let errors_schema = schemas.errors_schema.as_ref(); + + Ok(TaskSchemaValidators { + inputs: inputs_schema.map(draft202012::new).transpose()?, + outputs: outputs_schema.map(draft202012::new).transpose()?, + errors: errors_schema.map(draft202012::new).transpose()?, + }) + } + + pub fn validate_inputs(&self, values: Option<&Value>) -> TaskResult<()> { + let Some(values) = values else { + return Ok(()); + }; + + let Some(schema) = self.inputs.as_ref() else { + return Ok(()); + }; + + schema.validate(values).map_err(From::from) + } + + pub fn validate_outputs(&self, values: Option<&Value>) -> TaskResult<()> { + let Some(values) = values else { + return Ok(()); + }; + + let Some(schema) = self.outputs.as_ref() else { + return Ok(()); + }; + + schema.validate(values).map_err(From::from) + } + + pub fn validate_errors(&self, values: Option<&Value>) -> TaskResult<()> { + let Some(values) = values else { + return Ok(()); + }; + + let Some(schema) = self.errors.as_ref() else { + return Ok(()); + }; + + schema.validate(values).map_err(From::from) + } +} + +/// Opaque and unique [`Service`] identifier. +/// +/// [`Service`]: crate::routing::manifest::ServiceManifest +#[derive(Debug, Clone, Eq, PartialEq, Hash, Deref, DerefMut)] +#[must_use = "indexes do nothing unless you serialize them"] +pub struct ServiceIndex { + inner: EcoString, +} + +impl ServiceIndex { + /// Returns a new [`ServiceIndex`]. + #[inline] + pub fn new(inner: impl AsRef) -> Self { + let inner = EcoString::from(inner.as_ref()); + Self { inner } + } + + /// Returns the underlying index. + #[inline] + pub fn into_inner(self) -> EcoString { + self.inner.clone() + } +} + +/// Opaque and unique [`TaskHandler`] identifier. +/// +/// [`TaskHandler`]: crate::handler::TaskHandler +#[derive(Debug, Clone, Eq, PartialEq, Hash, Deref, DerefMut, From)] +#[from(Cow<'static, str>, String, &'static str)] +#[must_use = "indexes do nothing unless you serialize them"] +pub struct TaskIndex { + inner: EcoString, +} + +impl TaskIndex { + /// Returns a new [`TaskIndex`]. + #[inline] + pub fn new(inner: impl AsRef) -> Self { + let inner = EcoString::from(inner.as_ref()); + Self { inner } + } + + /// Returns the underlying index. + #[inline] + pub fn into_inner(self) -> EcoString { + self.inner.clone() + } +} diff --git a/crates/server/service/app_config.rs b/crates/server/service/app_config.rs deleted file mode 100644 index 971bbaa..0000000 --- a/crates/server/service/app_config.rs +++ /dev/null @@ -1,56 +0,0 @@ -/// App [`state`] configuration. -/// -/// [`state`]: crate::service::AppState -#[derive(Debug, Clone)] -#[must_use = "configs do nothing unless you use them"] -pub struct AppConfig {} - -impl AppConfig { - /// Returns a new [`AppBuilder`]. - #[inline] - pub fn builder() -> AppBuilder { - AppBuilder::new() - } -} - -impl Default for AppConfig { - #[inline] - fn default() -> Self { - Self::builder().build() - } -} - -/// [`AppConfig`] builder. -#[derive(Debug, Default, Clone)] -#[must_use = "configs do nothing unless you use them"] -pub struct AppBuilder {} - -impl AppBuilder { - /// Returns a new [`AppBuilder`]. - #[inline] - pub fn new() -> Self { - Self::default() - } - - /// Returns a new [`AppConfig`]. - pub fn build(self) -> AppConfig { - AppConfig {} - } -} - -#[cfg(test)] -mod test { - use crate::service::{AppBuilder, AppConfig}; - - #[test] - fn config_from_default() -> anyhow::Result<()> { - let _ = AppConfig::default(); - Ok(()) - } - - #[test] - fn config_from_builder() -> anyhow::Result<()> { - let _ = AppBuilder::new().build(); - Ok(()) - } -} diff --git a/crates/server/service/config.rs b/crates/server/service/config.rs new file mode 100644 index 0000000..3f1ba0b --- /dev/null +++ b/crates/server/service/config.rs @@ -0,0 +1,42 @@ +/// App [`state`] configuration. +/// +/// [`state`]: crate::service::ServiceState +use clap::Args; +use serde::{Deserialize, Serialize}; + +/// App [`service`] configuration. +/// +/// [`service`]: crate::service +#[derive(Debug, Clone, Serialize, Deserialize, Args)] +#[must_use = "config does nothing unless you use it"] +pub struct ServiceConfig { + /// Task execution timeout (in seconds). + #[arg(long, default_value_t = 120)] + pub running_timeout: u64, + + /// Task retry attempts. + #[arg(long, default_value_t = 3)] + pub retrying_attempts: u32, + + /// Task retry timeout (in seconds). + #[arg(long, default_value_t = 10)] + pub retrying_timeout: u64, +} + +impl ServiceConfig { + /// Returns a new [`ServiceConfig`]. + #[inline] + pub fn new() -> Self { + Self::default() + } +} + +impl Default for ServiceConfig { + fn default() -> Self { + Self { + running_timeout: 120, + retrying_attempts: 3, + retrying_timeout: 10, + } + } +} diff --git a/crates/cli/middleware/utility.rs b/crates/server/service/graph.rs similarity index 100% rename from crates/cli/middleware/utility.rs rename to crates/server/service/graph.rs diff --git a/crates/server/service/mod.rs b/crates/server/service/mod.rs index a1f7d63..3e6d691 100644 --- a/crates/server/service/mod.rs +++ b/crates/server/service/mod.rs @@ -1,14 +1,16 @@ //! Application state and dependency injection. -use axiston_rt_task::routing::layers::Layers; -use axiston_rt_task::routing::Router; +mod config; +mod graph; +mod source; -pub use crate::service::app_config::{AppBuilder, AppConfig}; -pub use crate::service::task_queue::TaskQueue; +use std::time::Duration; -mod app_config; -mod task_metrics; -mod task_queue; +use crate::routing::context::layers::TaskLayers; +use crate::routing::context::policies::{RetryPolicy, TimeoutPolicy}; +use crate::routing::context::{TaskRequest, TaskResponse, TaskResult}; +use crate::routing::{Registry, RouteRequest, RouteResponse, Router}; +pub use crate::service::config::ServiceConfig; /// Application state. /// @@ -17,22 +19,56 @@ mod task_queue; /// [`handlers`]: crate::handler #[derive(Debug, Clone)] #[must_use = "state does nothing unless you use it"] -pub struct AppState { - pub task_router: Router, - pub task_queue: TaskQueue, - // pub task_counter: TaskStatus, - // pub runtime: Rc, +pub struct ServiceState { + router: Router, } -impl AppState { - /// Creates a new [`AppState`]. - #[inline] - pub fn new(config: AppConfig) -> Self { - let layers = Layers::builder().build(); +impl ServiceState { + /// Returns a new [`ServiceState`]. + pub fn new(config: ServiceConfig) -> Self { + let layers = TaskLayers::new() + .with_timeout_policy(TimeoutPolicy::retry(Duration::from_secs( + config.running_timeout, + ))) + .with_retry_policy(RetryPolicy::exponential( + config.retrying_attempts, + Duration::from_secs(config.retrying_timeout), + )); + let router = Router::new(layers); + Self { router } + } +} + +impl Default for ServiceState { + fn default() -> Self { Self { - task_router: Router::new(layers), - task_queue: TaskQueue::new(), + router: Router::default(), } } } + +/// Provides a mechanism for managing and executing tasks within a system. +pub trait RouterExt { + /// Executes the requested task handler with a given request. + fn route_task_request( + &self, + request: TaskRequest, + ) -> TaskResult>; + + /// Returns a new [`Registry`]. + fn get_task_registry(&self) -> TaskResult; +} + +impl RouterExt for ServiceState { + fn route_task_request( + &self, + request: TaskRequest, + ) -> TaskResult> { + self.router.route_task(request) + } + + fn get_task_registry(&self) -> TaskResult { + Ok(self.router.as_registry()) + } +} diff --git a/crates/jsvm/extension/route/internal.rs b/crates/server/service/source.rs similarity index 100% rename from crates/jsvm/extension/route/internal.rs rename to crates/server/service/source.rs diff --git a/crates/server/service/task_metrics.rs b/crates/server/service/task_metrics.rs deleted file mode 100644 index 8b13789..0000000 --- a/crates/server/service/task_metrics.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/crates/server/service/task_queue.rs b/crates/server/service/task_queue.rs deleted file mode 100644 index 67c7c6e..0000000 --- a/crates/server/service/task_queue.rs +++ /dev/null @@ -1,171 +0,0 @@ -use std::cmp::{Ordering, Reverse}; -use std::collections::{BinaryHeap, HashMap, HashSet}; -use std::fmt; -use std::sync::{Arc, Mutex}; - -use derive_more::{Deref, DerefMut}; -use time::OffsetDateTime; -use tokio::sync::mpsc::{channel, Receiver, Sender}; -use uuid::{NoContext, Timestamp, Uuid}; - -/// TODO. -#[derive(Default, Clone)] -pub struct TaskQueue { - inner: Arc>, -} - -#[derive(Default, Deref, DerefMut)] -struct TaskQueueInner { - tasks: BinaryHeap, -} - -/// Represents a single change in task execution status. -#[derive(Debug, Clone)] -pub enum TaskQueueEvent { - Waiting, - PreRunning, - Running, - PostRunning, -} - -impl TaskQueue { - /// Returns a new [`TaskQueue`]. - #[inline] - pub fn new() -> Self { - Self::default() - } - - /// Returns a new [`TaskQueueHandler`]. - #[inline] - pub fn handler(&self) -> TaskQueueHandler { - TaskQueueHandler::new(self.clone(), 128) - } - - /// Adds the task into the task queue. - fn add_task(&self, tx: Sender) -> Uuid { - let utc_datetime = OffsetDateTime::now_utc(); - let uuid_timestamp = Timestamp::from_unix( - NoContext, - utc_datetime.unix_timestamp() as u64, - utc_datetime.nanosecond(), - ); - - let mut guard = self.inner.lock().expect("should not be held"); - - // Makes sure that UUIDv7 is not duplicated. - let mut task_id = Uuid::new_v7(uuid_timestamp); - while guard.iter().any(|task| task.id.0 == task_id) { - task_id = Uuid::new_v7(uuid_timestamp); - } - - guard.push(TaskData::new(task_id, tx)); - task_id - } - - /// Removes the task from the task queue. - pub fn remove_task(&self, id: Uuid) { - todo!() - } -} - -impl fmt::Debug for TaskQueue { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TaskQueue").finish_non_exhaustive() - } -} - -/// Contains all the data required to execute a single task. -struct TaskData { - /// - [`BinaryHeap`] is a max-heap by default, so `cmp::`[`Reverse`] is used. - /// - UUID `v7` should be used to remain sortable by a timestamp. - id: Reverse, - tx: Sender, -} - -impl TaskData { - /// Returns a new [`TaskData`]. - #[inline] - pub fn new(id: Uuid, tx: Sender) -> Self { - Self { - id: Reverse(id), - tx, - } - } -} - -impl PartialEq for TaskData { - #[inline] - fn eq(&self, other: &Self) -> bool { - PartialEq::eq(&self.id, &other.id) - } -} - -impl Eq for TaskData {} - -impl PartialOrd for TaskData { - #[inline] - fn partial_cmp(&self, other: &Self) -> Option { - PartialOrd::partial_cmp(&self.id, &other.id) - } -} - -impl Ord for TaskData { - #[inline] - fn cmp(&self, other: &Self) -> Ordering { - Ord::cmp(&self.id, &other.id) - } -} - -/// TODO. -#[derive(Deref, DerefMut)] -pub struct TaskQueueHandler { - task_queue: TaskQueue, - send_event: Sender, - task_ids: HashSet, - - #[deref] - #[deref_mut] - recv_event: Receiver, -} - -impl TaskQueueHandler { - /// Returns a new [`TaskQueueHandler`]. - fn new(task_queue: TaskQueue, channel_cap: usize) -> Self { - let (tx, rx) = channel::(channel_cap); - Self { - task_queue, - send_event: tx.clone(), - task_ids: HashSet::new(), - recv_event: rx, - } - } - - /// Adds the task into the task queue. - #[inline] - pub fn add_task(&self) -> Uuid { - self.task_queue.add_task(self.send_event.clone()) - } - - /// Removes the task from the task queue. - #[inline] - pub fn remove_task(&self, id: Uuid) { - self.task_queue.remove_task(id) - } -} - -impl fmt::Debug for TaskQueueHandler { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("NotifyGuard").finish_non_exhaustive() - } -} - -impl Drop for TaskQueueHandler { - fn drop(&mut self) { - let inner = &self.task_queue.inner; - let mut guard = inner.lock().expect("should not be held"); - guard.tasks.retain(|data| { - let Reverse(id) = data.id; - !self.task_ids.contains(&id) - }); - } -} diff --git a/crates/source/Cargo.toml b/crates/source/Cargo.toml deleted file mode 100644 index 4c9be73..0000000 --- a/crates/source/Cargo.toml +++ /dev/null @@ -1,49 +0,0 @@ -# https://doc.rust-lang.org/cargo/reference/manifest.html - -[package] -name = "axiston-rt-source" -version = { workspace = true } -edition = { workspace = true } -license = { workspace = true } -publish = { workspace = true } -readme = "./README.md" - -authors = { workspace = true } -repository = { workspace = true } -homepage = { workspace = true } -documentation = { workspace = true } - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[lib] -path = "lib.rs" - -[features] -default = [] -# Enables source loading from git links. -source-git = ["dep:git2"] -# Enables source loading from zip archives. -source-zip = ["dep:zip"] -# Enables source loading from tar archives. -source-tar = ["dep:tar"] - -[dependencies] -tracing = { workspace = true } -thiserror = { workspace = true } - -serde = { workspace = true } -serde_json = { workspace = true } -bytes = { workspace = true } - -tempfile = { version = "3.15", features = [] } -sha2 = { version = "0.10", features = [] } -walkdir = { version = "2.5", features = [] } - -git2 = { version = "0.20", optional = true, features = [] } -zip = { version = "2.2", optional = true, features = [] } -tar = { version = "0.4", optional = true, features = [] } - -[dev-dependencies] -hex = { version = "0.4", features = [] } diff --git a/crates/source/README.md b/crates/source/README.md deleted file mode 100644 index 408e732..0000000 --- a/crates/source/README.md +++ /dev/null @@ -1,20 +0,0 @@ -### runtime/source - -[![Build Status][action-badge]][action-url] -[![Crate Docs][docs-badge]][docs-url] -[![Crate Version][crates-badge]][crates-url] - -[action-badge]: https://img.shields.io/github/actions/workflow/status/axiston/runtime/build.yaml?branch=main&label=build&logo=github&style=flat-square -[action-url]: https://github.com/axiston/runtime/actions/workflows/build.yaml -[crates-badge]: https://img.shields.io/crates/v/axiston-rt-server.svg?logo=rust&style=flat-square -[crates-url]: https://crates.io/crates/axiston-rt-server -[docs-badge]: https://img.shields.io/docsrs/axiston-rt-server?logo=Docs.rs&style=flat-square -[docs-url]: http://docs.rs/axiston-rt-server - -Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. - -#### Notes - -- Lorem Ipsum. -- Lorem Ipsum. -- Lorem Ipsum. diff --git a/crates/source/lib.rs b/crates/source/lib.rs deleted file mode 100644 index 7525265..0000000 --- a/crates/source/lib.rs +++ /dev/null @@ -1,33 +0,0 @@ -#![forbid(unsafe_code)] -#![cfg_attr(docsrs, feature(doc_cfg))] -#![doc = include_str!("./README.md")] - -//! ### Examples -//! -//! ```rust -//! fn main() {} -//! ``` - -pub mod loader; -mod script; -mod utils; - -pub use crate::script::{Source, SourceBuilder, SourceContainer, SourceMetadata}; - -/// Unrecoverable failure of the [`SourceLoader`]. -/// -/// Includes all error types that may occur. -/// -/// [`SourceLoader`]: loader::SourceLoader -#[derive(Debug, thiserror::Error)] -#[must_use = "errors do nothing unless you use them"] -pub enum Error { - /// Underlying I/O error. - #[error("underlying i/o error: {0}")] - Io(#[from] std::io::Error), -} - -/// Specialized [`Result`] alias for the [`Error`] type. -/// -/// [`Result`]: std::result::Result -pub type Result = std::result::Result; diff --git a/crates/source/loader/from_git.rs b/crates/source/loader/from_git.rs deleted file mode 100644 index c5eff04..0000000 --- a/crates/source/loader/from_git.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::path::Path; - -use crate::loader::SourceLoader; -use crate::{Result, Source, SourceContainer, SourceMetadata}; - -/// TODO. -/// -/// # Notes -/// -/// - Uses [`git2`] crate. -pub struct GitSourceLoader {} - -impl GitSourceLoader { - /// Returns a new [`GitSourceLoader`]. - pub fn new() -> Self { - Self {} - } -} - -impl SourceLoader for GitSourceLoader { - async fn load_source_script( - self, - output_dir_path: &Path, - needs_metadata: bool, - ) -> Result { - // let module_main_path = output_dir_path.join("./main.ts"); - // let metadata_path = output_dir_path.join("./meta.toml"); - - todo!() - } -} - -#[cfg(test)] -mod test { - use crate::loader::GitSourceLoader; - use crate::Result; - - #[test] - fn build_source_loader() -> Result<()> { - let _ = GitSourceLoader::new(); - Ok(()) - } -} diff --git a/crates/source/loader/from_tar.rs b/crates/source/loader/from_tar.rs deleted file mode 100644 index 7e88207..0000000 --- a/crates/source/loader/from_tar.rs +++ /dev/null @@ -1,52 +0,0 @@ -use std::future::Future; -use std::io::Cursor; -use std::path::Path; - -use crate::loader::{SourceBuffer, SourceLoader}; -use crate::script::{SourceContainer, SourceMetadata}; -use crate::{Result, Source}; - -/// TODO. -/// -/// # Notes -/// -/// - Uses [`tar`] crate. -pub struct TarSourceLoader<'a> { - buf: SourceBuffer<'a>, -} - -impl<'a> TarSourceLoader<'a> { - /// Returns a new [`TarSourceLoader`]. - #[inline] - pub fn new(buf: SourceBuffer<'a>) -> Self { - Self { buf } - } - - pub fn into_reader(self) -> Result>> { - let buf = self.buf.into_bytes()?; - Ok(Cursor::new(buf)) - } -} - -impl SourceLoader for TarSourceLoader<'_> { - async fn load_source_script( - self, - output_dir_path: &Path, - needs_metadata: bool, - ) -> Result { - todo!() - } -} - -#[cfg(test)] -mod test { - use crate::loader::{SourceBuffer, TarSourceLoader}; - use crate::Result; - - #[test] - fn build_source_loader() -> Result<()> { - let buf = SourceBuffer::default(); - let _ = TarSourceLoader::new(buf); - Ok(()) - } -} diff --git a/crates/source/loader/from_zip.rs b/crates/source/loader/from_zip.rs deleted file mode 100644 index 96480e6..0000000 --- a/crates/source/loader/from_zip.rs +++ /dev/null @@ -1,46 +0,0 @@ -use std::future::Future; -use std::path::Path; - -use crate::loader::{SourceBuffer, SourceLoader}; -use crate::script::{SourceContainer, SourceMetadata}; -use crate::{Result, Source}; - -/// TODO. -/// -/// # Notes -/// -/// - Uses [`zip`] crate. -pub struct ZipSourceLoader<'a> { - buf: SourceBuffer<'a>, -} - -impl<'a> ZipSourceLoader<'a> { - /// Returns a new [`ZipSourceLoader`]. - #[inline] - pub fn new(buf: SourceBuffer<'a>) -> Self { - Self { buf } - } -} - -impl SourceLoader for ZipSourceLoader<'_> { - async fn load_source_script( - self, - output_dir_path: &Path, - needs_metadata: bool, - ) -> Result { - todo!() - } -} - -#[cfg(test)] -mod test { - use crate::loader::{SourceBuffer, ZipSourceLoader}; - use crate::Result; - - #[test] - fn build_source_loader() -> Result<()> { - let buf = SourceBuffer::default(); - let _ = ZipSourceLoader::new(buf); - Ok(()) - } -} diff --git a/crates/source/loader/mod.rs b/crates/source/loader/mod.rs deleted file mode 100644 index e04a70a..0000000 --- a/crates/source/loader/mod.rs +++ /dev/null @@ -1,119 +0,0 @@ -//! All available [`SourceLoader`] implementations. - -#[cfg(feature = "source-git")] -mod from_git; -#[cfg(feature = "source-tar")] -mod from_tar; -#[cfg(feature = "source-zip")] -mod from_zip; - -use std::borrow::Cow; -use std::future::Future; -use std::io::{Error, ErrorKind, Read}; -use std::path::Path; - -#[cfg(feature = "source-git")] -#[cfg_attr(docsrs, doc(cfg(feature = "source-git")))] -pub use crate::loader::from_git::GitSourceLoader; -#[cfg(feature = "source-tar")] -#[cfg_attr(docsrs, doc(cfg(feature = "source-tar")))] -pub use crate::loader::from_tar::TarSourceLoader; -#[cfg(feature = "source-zip")] -#[cfg_attr(docsrs, doc(cfg(feature = "source-zip")))] -pub use crate::loader::from_zip::ZipSourceLoader; -use crate::{Result, Source}; - -/// TODO. -pub trait SourceLoader { - /// TODO. - fn load_source_script( - self, - output_dir_path: &Path, - loading_metadata: bool, - ) -> impl Future>; -} - -/// TODO. -pub struct SourceBuffer<'a> { - buf: ResolveBuffer<'a>, -} - -enum ResolveBuffer<'a> { - Bytes { buf: Cow<'a, [u8]> }, - String { buf: Cow<'a, str> }, -} - -impl SourceBuffer<'static> { - /// Clones the provided byte sequence into the new [`SourceBuffer`]. - pub fn from_bytes(buf: impl AsRef<[u8]>) -> Self { - let buf = ResolveBuffer::Bytes { - buf: Cow::Owned(buf.as_ref().to_owned()), - }; - - Self { buf } - } - - /// Clones the provided string into the new [`SourceBuffer`]. - pub fn from_string(buf: impl AsRef) -> Self { - let buf = ResolveBuffer::String { - buf: Cow::Owned(buf.as_ref().to_owned()), - }; - - Self { buf } - } - - /// Reads all bytes from the provided reader. - /// - /// # Errors - /// - /// If the data in the buffer is not valid UTF-8 then an error is returned. - /// See [`Read::read_to_end`] for the semantics. - pub fn from_reader(mut reader: impl Read) -> Result { - let mut buf = Vec::default(); - let _ = reader.read_to_end(&mut buf)?; - let buf = ResolveBuffer::Bytes { - buf: Cow::Owned(buf), - }; - - Ok(Self { buf }) - } -} - -impl SourceBuffer<'_> { - /// Returns the underlying byte sequence. - fn into_bytes(self) -> Result> { - match self.buf { - ResolveBuffer::Bytes { buf } => Ok(buf.into_owned()), - ResolveBuffer::String { buf } => Ok(buf.into_owned().into_bytes()), - } - } - - /// Returns the underlying byte sequence as a string, validates UTF-8 if needed. - /// - /// # Errors - /// - /// If the data in the buffer is not valid UTF-8 then an error is returned. - /// See [`Read::read_to_string`] and [`String::from_utf8`] for the semantics. - fn into_string(self) -> Result { - let buf = match self.buf { - ResolveBuffer::Bytes { buf } => buf, - ResolveBuffer::String { buf } => return Ok(buf.into_owned()), - }; - - let buf = String::from_utf8(buf.into_owned()).map_err(|_| { - Error::new(ErrorKind::InvalidData, "stream did not contain valid UTF-8") - })?; - - Ok(buf) - } -} - -impl Default for SourceBuffer<'static> { - fn default() -> Self { - let buf = ResolveBuffer::Bytes { - buf: Cow::Owned(Vec::new()), - }; - - Self { buf } - } -} diff --git a/crates/source/script/builder.rs b/crates/source/script/builder.rs deleted file mode 100644 index 4b01824..0000000 --- a/crates/source/script/builder.rs +++ /dev/null @@ -1,40 +0,0 @@ -use tempfile::TempDir; - -use crate::loader::SourceLoader; -use crate::{Result, Source}; - -/// TODO. -pub struct SourceBuilder { - loader: L, - loading_metadata: bool, -} - -impl SourceBuilder { - /// Returns a new [`SourceBuilder`]. - #[inline] - pub fn new(loader: L) -> Self { - Self { - loader, - loading_metadata: true, - } - } - - /// TODO. - pub fn without_metadata(mut self) -> Self { - self.loading_metadata = false; - self - } -} - -impl SourceBuilder -where - L: SourceLoader, -{ - /// TODO. - pub async fn build(self, temp_dir: TempDir) -> Result { - let output_dir = temp_dir.path(); - self.loader - .load_source_script(output_dir, self.loading_metadata) - .await - } -} diff --git a/crates/source/script/container.rs b/crates/source/script/container.rs deleted file mode 100644 index 2696717..0000000 --- a/crates/source/script/container.rs +++ /dev/null @@ -1,54 +0,0 @@ -use std::fmt; -use std::hash::Hash; -use std::path::PathBuf; - -use bytes::Bytes; -use tempfile::TempDir; - -use crate::utils::io::hash_directory_with_filter; -use crate::Result; - -/// Represents all downloaded and unarchived module files. -#[must_use = "metadata does nothing unless you use it"] -pub struct SourceContainer { - temp_module_dir: TempDir, - module_main_path: PathBuf, - metadata_path: Option, -} - -impl SourceContainer { - /// Returns a new [`SourceContainer`]. - pub(crate) fn new( - temp_module_dir: TempDir, - module_main_path: PathBuf, - metadata_path: Option, - ) -> Self { - Self { - temp_module_dir, - module_main_path, - metadata_path, - } - } - - pub(crate) fn sha256(&self) -> Result { - let module_dir_path = self.temp_module_dir.path(); - hash_directory_with_filter(module_dir_path, |path| { - path.extension() - .map_or(false, |ext| ext == "ts" || ext == "js") - }) - .map(From::from) - .map_err(From::from) - } - - /// Closes and removes the temporary directory. - pub(crate) fn close(self) -> Result<()> { - self.temp_module_dir.close()?; - Ok(()) - } -} - -impl fmt::Debug for SourceContainer { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SourceContainer").finish_non_exhaustive() - } -} diff --git a/crates/source/script/metadata.rs b/crates/source/script/metadata.rs deleted file mode 100644 index 3fe6d48..0000000 --- a/crates/source/script/metadata.rs +++ /dev/null @@ -1,54 +0,0 @@ -use std::path::Path; - -use serde::{Deserialize, Serialize}; - -use crate::Result; - -/// TODO. -#[derive(Debug, Clone, Serialize, Deserialize)] -#[must_use = "metadata does nothing unless you use it"] -pub struct SourceMetadata { - pub name: String, - pub version: String, - - pub created_at: (), - pub accessed_at: (), -} - -impl SourceMetadata { - /// TODO. - pub fn from_dir(dir_path: impl AsRef) -> Result> { - Self::from_dir_impl(dir_path.as_ref()) - } - - /// TODO. - pub fn from_file(file_path: impl AsRef) -> Result> { - Self::from_file(file_path.as_ref()) - } - - /// TODO. - fn from_dir_impl(dir_path: &Path) -> Result> { - todo!() - } - - /// TODO. - fn from_file_impl(file_path: &Path) -> Result> { - todo!() - } - - /// TODO. - fn from_json(buf: &[u8]) -> Result { - todo!() - } - - /// TODO. - fn into_json(self) -> Result> { - todo!() - } -} - -impl Default for SourceMetadata { - fn default() -> Self { - todo!() - } -} diff --git a/crates/source/script/mod.rs b/crates/source/script/mod.rs deleted file mode 100644 index 526643b..0000000 --- a/crates/source/script/mod.rs +++ /dev/null @@ -1,80 +0,0 @@ -//! TODO. - -use tempfile::tempdir_in; - -use crate::loader::{SourceBuffer, SourceLoader}; -pub use crate::script::builder::SourceBuilder; -pub use crate::script::container::SourceContainer; -pub use crate::script::metadata::SourceMetadata; -use crate::Result; - -mod builder; -mod container; -mod metadata; - -/// TODO. -#[derive(Debug)] -#[must_use = "metadata does nothing unless you use it"] -pub struct Source { - pub(crate) source_container: SourceContainer, - pub(crate) source_metadata: Option, -} - -impl Source { - /// Returns a new [`Source`]. - #[inline] - pub fn new(container: SourceContainer, metadata: SourceMetadata) -> Self { - Self { - source_container: container, - source_metadata: Some(metadata), - } - } - - /// Loads a new [`Source`] using a [`GitSourceLoader`]. - /// - /// [`GitSourceLoader`]: crate::loader::GitSourceLoader - #[cfg(feature = "source-git")] - #[cfg_attr(docsrs, doc(cfg(feature = "source-git")))] - pub async fn from_git() -> Result { - use crate::loader::GitSourceLoader; - let loader = GitSourceLoader::new(); - Self::from_loader(loader).await - } - - /// Loads a new [`Source`] using a [`TarSourceLoader`]. - /// - /// [`TarSourceLoader`]: crate::loader::TarSourceLoader - #[cfg(feature = "source-tar")] - #[cfg_attr(docsrs, doc(cfg(feature = "source-tar")))] - pub async fn from_tar(buf: SourceBuffer<'_>) -> Result { - use crate::loader::TarSourceLoader; - let loader = TarSourceLoader::new(buf); - Self::from_loader(loader).await - } - - /// Loads a new [`Source`] using a [`ZipSourceLoader`]. - /// - /// [`ZipSourceLoader`]: crate::loader::ZipSourceLoader - #[cfg(feature = "source-zip")] - #[cfg_attr(docsrs, doc(cfg(feature = "source-zip")))] - pub async fn from_zip(buf: SourceBuffer<'_>) -> Result { - use crate::loader::ZipSourceLoader; - let loader = ZipSourceLoader::new(buf); - Self::from_loader(loader).await - } - - /// Loads a new [`Source`] using a provided [`SourceLoader`]. - pub async fn from_loader(loader: L) -> Result - where - L: SourceLoader, - { - let temp_dir = tempdir_in("./")?; - SourceBuilder::new(loader).build(temp_dir).await - } - - /// Closes and removes the temporary directory. - pub fn close(self) -> Result<()> { - self.source_container.close()?; - Ok(()) - } -} diff --git a/crates/task/Cargo.toml b/crates/task/Cargo.toml deleted file mode 100644 index 996a977..0000000 --- a/crates/task/Cargo.toml +++ /dev/null @@ -1,46 +0,0 @@ -# https://doc.rust-lang.org/cargo/reference/manifest.html - -[package] -name = "axiston-rt-task" -version = { workspace = true } -edition = { workspace = true } -license = { workspace = true } -publish = { workspace = true } -readme = "./README.md" - -authors = { workspace = true } -repository = { workspace = true } -homepage = { workspace = true } -documentation = { workspace = true } - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[features] -default = ["hashbrown"] -# Enables high-performance SwissTable hash map. -hashbrown = ["dep:hashbrown"] - -[lib] -path = "lib.rs" - -[dependencies] -futures = { workspace = true } -pin-project-lite = { workspace = true } -tracing = { workspace = true } -thiserror = { workspace = true } - -jsonschema = { version = "0.28", features = [] } -hashbrown = { version = "0.15", optional = true, features = ["serde"] } -semver = { version = "1.0", features = ["serde"] } -replace_with = { version = "0.1", features = [] } - -derive_more = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -ecow = { workspace = true } -tower = { workspace = true } - -[dev-dependencies] -tokio = { workspace = true } diff --git a/crates/task/README.md b/crates/task/README.md deleted file mode 100644 index 8734298..0000000 --- a/crates/task/README.md +++ /dev/null @@ -1,20 +0,0 @@ -### runtime/task - -[![Build Status][action-badge]][action-url] -[![Crate Docs][docs-badge]][docs-url] -[![Crate Version][crates-badge]][crates-url] - -[action-badge]: https://img.shields.io/github/actions/workflow/status/axiston/runtime/build.yaml?branch=main&label=build&logo=github&style=flat-square -[action-url]: https://github.com/axiston/runtime/actions/workflows/build.yaml -[crates-badge]: https://img.shields.io/crates/v/axiston-rt-task.svg?logo=rust&style=flat-square -[crates-url]: https://crates.io/crates/axiston-rt-task -[docs-badge]: https://img.shields.io/docsrs/axiston-rt-task?logo=Docs.rs&style=flat-square -[docs-url]: http://docs.rs/axiston-rt-task - -Lorem Ipsum. Lorem Ipsum. Lorem Ipsum. - -#### Notes - -- Lorem Ipsum. -- Lorem Ipsum. -- Lorem Ipsum. diff --git a/crates/task/context/error.rs b/crates/task/context/error.rs deleted file mode 100644 index f106ea2..0000000 --- a/crates/task/context/error.rs +++ /dev/null @@ -1,91 +0,0 @@ -use std::error::Error; - -use jsonschema::ValidationError; -use serde::{Deserialize, Serialize}; -use serde_json::{json, Value}; - -/// Unrecoverable failure during the [`TaskHandler`] execution. -/// -/// [`TaskHandler`]: crate::handler::TaskHandler -#[derive(Debug, thiserror::Error, Serialize, Deserialize)] -#[error("internal handler error")] -#[must_use = "errors do nothing unless you use them"] -pub struct TaskError { - #[serde(skip)] - pub(crate) error: Option>, - - #[serde(rename = "kind")] - pub(crate) kind: TaskErrorKind, - #[serde(rename = "values")] - pub(crate) values: Option, -} - -/// A list specifying general categories of [`TaskError`]s. -#[derive(Debug, Clone, Serialize, Deserialize)] -#[must_use = "errors do nothing unless you use them"] -pub enum TaskErrorKind { - /// Task wih a requested identifier was not found. - NotFound, - /// Request or response schema validation failed. - Schema, - - /// TODO. - TimeoutPolicy, - /// TODO. - RetryPolicy, - - /// Unknown (type-erased) error occurred. - Unknown, -} - -impl TaskError { - /// Returns a new [`TaskError`]. - #[inline] - pub fn new(kind: TaskErrorKind, error: E) -> Self - where - E: Into>, - { - Self { - kind, - error: Some(error.into()), - values: None, - } - } - - /// Overrides the default value of [`TaskError`]`::values`. - #[inline] - pub fn with_values(mut self, values: Value) -> Self { - self.values = Some(values); - self - } -} - -impl<'a> From> for TaskError { - fn from(value: ValidationError<'a>) -> Self { - Self::new( - TaskErrorKind::Schema, - "request or response schema validation failed", - ) - .with_values(json!({ - "instance": value.instance.into_owned(), - })) - } -} - -/// Specialized [`Result`] alias for the [`TaskError`] type. -pub type TaskResult = Result; - -#[cfg(test)] -mod test { - use serde_json::Value; - - use crate::context::{TaskError, TaskErrorKind, TaskResult}; - - #[test] - fn build_empty_error() -> TaskResult<()> { - let _error = TaskError::new(TaskErrorKind::NotFound, "requested entity was not found") - .with_values(Value::default()); - - Ok(()) - } -} diff --git a/crates/task/context/layers.rs b/crates/task/context/layers.rs deleted file mode 100644 index edf818f..0000000 --- a/crates/task/context/layers.rs +++ /dev/null @@ -1,86 +0,0 @@ -//! [`TaskLayers`] and its [`TaskLayersBuilder`] utility. - -use serde::{Deserialize, Serialize}; - -use crate::context::policies::{RetryPolicy, TimeoutPolicy}; -use crate::context::TaskResult; - -/// Declarative `tower::`[`Layer`]s configuration. -/// -/// [`Layer`]: tower::Layer -#[derive(Debug, Default, Clone, Serialize, Deserialize)] -#[must_use = "layers do nothing unless you use them"] -pub struct TaskLayers { - #[serde(rename = "timeout")] - pub(crate) timeout_policy: Option, - #[serde(rename = "retry")] - pub(crate) retry_policy: Option, -} - -impl TaskLayers { - /// Returns a new [`TaskLayers`]. - #[inline] - pub fn new() -> Self { - Self::default() - } - - /// Returns a new [`TaskLayersBuilder`]. - #[inline] - pub fn builder() -> TaskLayersBuilder { - TaskLayersBuilder::new() - } -} - -/// [`TaskLayers`] builder. -#[derive(Debug, Default, Clone)] -pub struct TaskLayersBuilder { - pub(crate) timeout_policy: Option, - pub(crate) retry_policy: Option, -} - -impl TaskLayersBuilder { - /// Returns a new [`TaskLayersBuilder`]. - #[inline] - pub fn new() -> Self { - Self::default() - } - - /// Overrides the default value of [`TaskLayersBuilder`]`::timeout_policy`. - pub fn with_timeout_policy(mut self, timeout_policy: TimeoutPolicy) -> Self { - self.timeout_policy = Some(timeout_policy); - self - } - - /// Overrides the default value of [`TaskLayersBuilder`]`::retry_policy`. - pub fn with_retry_policy(mut self, retry_policy: RetryPolicy) -> Self { - self.retry_policy = Some(retry_policy); - self - } - - /// Returns a new [`TaskLayers`]. - pub fn build(self) -> TaskResult { - Ok(TaskLayers { - timeout_policy: self.timeout_policy, - retry_policy: self.retry_policy, - }) - } -} - -#[cfg(test)] -mod test { - use std::time::Duration; - - use crate::context::layers::TaskLayers; - use crate::context::policies::{RetryPolicy, TimeoutPolicy}; - use crate::context::TaskResult; - - #[test] - fn from_layers_builder() -> TaskResult<()> { - let _layers = TaskLayers::builder() - .with_retry_policy(RetryPolicy::linear(3, Duration::from_secs(2))) - .with_timeout_policy(TimeoutPolicy::retry(Duration::from_secs(12))) - .build()?; - - Ok(()) - } -} diff --git a/crates/task/context/mod.rs b/crates/task/context/mod.rs deleted file mode 100644 index a1f1e59..0000000 --- a/crates/task/context/mod.rs +++ /dev/null @@ -1,21 +0,0 @@ -//! [`TaskRequest`], [`TaskResponse`] and [`TaskError`]. - -pub mod builders { - //! [`TaskRequest`] and [`TaskResponse`] builders. - //! - //! [`TaskRequest`]: crate::context::TaskRequest - //! [`TaskResponse`]: crate::context::TaskResponse - - pub use super::request::TaskRequestBuilder; - pub use super::response::TaskResponseBuilder; -} - -pub use crate::context::error::{TaskError, TaskErrorKind, TaskResult}; -pub use crate::context::request::TaskRequest; -pub use crate::context::response::TaskResponse; - -mod error; -pub mod layers; -pub mod policies; -mod request; -mod response; diff --git a/crates/task/context/policies.rs b/crates/task/context/policies.rs deleted file mode 100644 index e4a049c..0000000 --- a/crates/task/context/policies.rs +++ /dev/null @@ -1,130 +0,0 @@ -//! [`RetryPolicy`] and [`TimeoutPolicy`]. - -use std::time::Duration; - -use serde::{Deserialize, Serialize}; - -/// Defines a policy for handling timeouts. -#[derive(Debug, Clone, Serialize, Deserialize)] -#[must_use = "policies do nothing unless you use them"] -pub struct TimeoutPolicy { - /// The duration after which a timeout will occur. - pub duration: Duration, - /// The action to take when a timeout occurs. - pub action: TimeoutAction, -} - -/// Specifies actions to take when a timeout occurs. -#[derive(Debug, Default, Clone, Serialize, Deserialize)] -#[must_use = "policies do nothing unless you use them"] -pub enum TimeoutAction { - /// Retry the operation after a timeout. - Retry, - /// Terminate the operation after a timeout (default behavior). - #[default] - Terminate, -} - -impl TimeoutPolicy { - /// Returns a new retry [`TimeoutPolicy`] with the specified timeout duration. - pub fn retry(timeout: Duration) -> Self { - Self { - duration: timeout, - action: TimeoutAction::Retry, - } - } - - /// Returns a new terminate [`TimeoutPolicy`] with the specified timeout duration. - pub fn terminate(timeout: Duration) -> Self { - Self { - duration: timeout, - action: TimeoutAction::Terminate, - } - } -} - -/// Defines a policy for handling retries. -#[derive(Debug, Clone, Serialize, Deserialize)] -#[must_use = "policies do nothing unless you use them"] -pub struct RetryPolicy { - /// The maximum number of retry attempts. - pub retries: u32, - /// The strategy to use for determining retry intervals. - pub strategy: RetryStrategy, -} - -/// Specifies strategies for calculating retry intervals. -#[derive(Debug, Clone, Serialize, Deserialize)] -#[must_use = "policies do nothing unless you use them"] -pub enum RetryStrategy { - /// Linear backoff strategy with optional jitter and max backoff duration. - Linear { - step_backoff: Duration, - max_backoff: Option, - jitter_perc: Option, - }, - /// Exponential backoff strategy with optional jitter and max backoff duration. - Exponential { - base_backoff: Duration, - max_backoff: Option, - jitter_perc: Option, - }, -} - -impl RetryPolicy { - /// Returns a new linear [`RetryPolicy`] with the specified retries and base backoff duration. - pub fn linear(retries: u32, base_backoff: Duration) -> Self { - Self { - retries, - strategy: RetryStrategy::Linear { - step_backoff: base_backoff, - max_backoff: None, - jitter_perc: None, - }, - } - } - - /// Returns a new exponential [`RetryPolicy`] with the specified retries, base backoff. - pub fn exponential(retries: u32, base_backoff: Duration) -> Self { - Self { - retries, - strategy: RetryStrategy::Exponential { - base_backoff, - max_backoff: None, - jitter_perc: None, - }, - } - } - - /// Sets the maximum backoff duration and returns the modified policy. - pub fn with_max_backoff(mut self, new_max_backoff: Duration) -> Self { - match self.strategy { - RetryStrategy::Linear { - ref mut max_backoff, - .. - } => *max_backoff = Some(new_max_backoff), - RetryStrategy::Exponential { - ref mut max_backoff, - .. - } => *max_backoff = Some(new_max_backoff), - }; - - self - } - - /// Sets the jitter percentage and returns the modified policy. - pub fn with_jitter_perc(mut self, new_jitter_perc: f64) -> Self { - match self.strategy { - RetryStrategy::Linear { - ref mut jitter_perc, - .. - } => *jitter_perc = Some(new_jitter_perc), - RetryStrategy::Exponential { - ref mut jitter_perc, - .. - } => *jitter_perc = Some(new_jitter_perc), - }; - - self - } -} diff --git a/crates/task/context/request.rs b/crates/task/context/request.rs deleted file mode 100644 index 7becc5b..0000000 --- a/crates/task/context/request.rs +++ /dev/null @@ -1,163 +0,0 @@ -use std::fmt; - -use derive_more::{Deref, DerefMut}; -use serde::{Deserialize, Serialize}; -use serde_json::{Map, Value}; - -use crate::context::layers::TaskLayers; -use crate::context::TaskResult; - -/// Serializable [`TaskHandler`] service request. -/// -/// [`TaskHandler`]: crate::handler::TaskHandler -#[derive(Clone, Serialize, Deserialize, Deref, DerefMut)] -#[must_use = "requests do nothing unless you serialize them"] -pub struct TaskRequest { - #[deref] - #[deref_mut] - inner: T, - - #[serde(rename = "task")] - pub(crate) task_id: String, - #[serde(skip)] - pub(crate) layers: Option, - - #[serde(rename = "inputs")] - pub(crate) inputs: Value, - #[serde(rename = "secrets")] - pub(crate) secrets: Value, -} - -impl TaskRequest { - /// Returns a new [`TaskRequest`]. - #[inline] - pub fn new(index: &str, inner: T) -> Self { - Self { - inner, - task_id: index.to_owned(), - layers: None, - inputs: Value::Object(Map::new()), - secrets: Value::Object(Map::new()), - } - } - - /// Returns a new [`TaskRequestBuilder`]. - #[inline] - pub fn builder(index: &str, inner: T) -> TaskRequestBuilder { - TaskRequestBuilder::new(index, inner) - } - - /// Returns the inner data. - #[inline] - pub fn into_inner(self) -> T { - self.inner - } - - /// Applies the default [`TaskLayers`] if it has none. - pub(crate) fn apply_default_layers(&mut self, layers: Option<&TaskLayers>) { - if self.layers.is_some() { - return; - } - - // self.layers.is_none() && - if let Some(layers) = layers { - self.layers.replace(layers.clone()); - } - } -} - -impl fmt::Debug for TaskRequest { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TaskRequest") - .field("inputs", &self.inputs) - .field("secrets", &"*****") - .finish_non_exhaustive() - } -} - -/// [`TaskHandler`] service request builder. -/// -/// [`TaskHandler`]: crate::handler::TaskHandler -#[derive(Debug, Clone)] -#[must_use = "requests do nothing unless you serialize them"] -pub struct TaskRequestBuilder { - inner: T, - index: String, - layers: Option, - inputs: Option, - secrets: Option, -} - -impl TaskRequestBuilder { - /// Returns a new [`TaskRequestBuilder`]. - #[inline] - pub fn new(index: &str, inner: T) -> Self { - Self { - inner, - index: index.to_owned(), - layers: None, - inputs: None, - secrets: None, - } - } - - /// Adds other key/value pair into the [`TaskRequest`]`::inputs` object. - pub fn with_inputs(mut self, key: &str, value: impl Into) -> Self { - let inputs = self - .inputs - .get_or_insert_with(|| Value::Object(Map::default())); - let Value::Object(object) = inputs else { - unreachable!(); - }; - - object.insert(key.to_owned(), value.into()); - self - } - - /// Adds other key/value pair into the [`TaskRequest`]`::secrets` object. - pub fn with_secrets(mut self, key: &str, value: impl Into) -> Self { - let inputs = self - .secrets - .get_or_insert_with(|| Value::Object(Map::default())); - let Value::Object(object) = inputs else { - unreachable!(); - }; - - object.insert(key.to_owned(), value.into()); - self - } - - /// Overrides the default value of [`TaskRequest`]`::layers`. - pub fn with_layers(mut self, layers: TaskLayers) -> Self { - self.layers = Some(layers); - self - } - - /// Returns a new [`TaskRequest`]. - pub fn build(self) -> TaskResult> { - Ok(TaskRequest { - inner: self.inner, - task_id: self.index, - layers: self.layers, - inputs: self.inputs.unwrap_or_default(), - secrets: self.secrets.unwrap_or_default(), - }) - } -} - -#[cfg(test)] -mod test { - use crate::context::layers::TaskLayers; - use crate::context::{TaskRequest, TaskResult}; - - #[test] - fn build_empty_request() -> TaskResult<()> { - let _request = TaskRequest::builder("builtin0", 5) - .with_inputs("input0", 5) - .with_secrets("secret0", "qwerty") - .with_layers(TaskLayers::new()) - .build()?; - - Ok(()) - } -} diff --git a/crates/task/context/response.rs b/crates/task/context/response.rs deleted file mode 100644 index 1214a61..0000000 --- a/crates/task/context/response.rs +++ /dev/null @@ -1,118 +0,0 @@ -use std::fmt; - -use derive_more::{Deref, DerefMut}; -use serde::{Deserialize, Serialize}; -use serde_json::map::Map; -use serde_json::Value; - -use crate::context::TaskResult; - -/// Deserializable [`TaskHandler`] service response. -/// -/// [`TaskHandler`]: crate::handler::TaskHandler -#[derive(Clone, Serialize, Deserialize, Deref, DerefMut)] -#[must_use = "responses do nothing unless you serialize them"] -pub struct TaskResponse { - #[deref] - #[deref_mut] - inner: T, - - #[serde(rename = "outputs")] - pub(crate) outputs: Value, - #[serde(rename = "metrics")] - pub(crate) metrics: Value, -} - -impl TaskResponse { - /// Returns a new [`TaskResponse`]. - #[inline] - pub fn new(inner: T) -> Self { - Self { - inner, - outputs: Value::Object(Map::new()), - metrics: Value::Object(Map::new()), - } - } - - /// Returns a new [`TaskResponseBuilder`]. - #[inline] - pub fn builder(inner: T) -> TaskResponseBuilder { - TaskResponseBuilder::new(inner) - } - - /// Returns the inner data. - #[inline] - pub fn into_inner(self) -> T { - self.inner - } -} - -impl fmt::Debug for TaskResponse { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TaskResponse") - .field("outputs", &self.outputs) - .field("metrics", &self.metrics) - .finish_non_exhaustive() - } -} - -/// [`TaskHandler`] service response builder. -/// -/// [`TaskHandler`]: crate::handler::TaskHandler -#[derive(Debug, Default, Clone)] -#[must_use = "responses do nothing unless you serialize them"] -pub struct TaskResponseBuilder { - inner: T, - outputs: Option, - metrics: Option, -} - -impl TaskResponseBuilder { - /// Returns a new [`TaskResponseBuilder`]. - #[inline] - pub fn new(inner: T) -> Self { - Self { - inner, - outputs: None, - metrics: None, - } - } - - /// Adds other key/value pair into the [`TaskResponse`]`::outputs` object. - pub fn with_outputs(mut self, values: Value) -> Self { - self.outputs = Some(values); - self - } - - /// Adds other key/value pair into the [`TaskResponse`]`::metrics` object. - pub fn with_metrics(mut self, values: Value) -> Self { - self.metrics = Some(values); - self - } - - /// Returns a new [`TaskResponse`]. - pub fn build(self) -> TaskResult> { - Ok(TaskResponse { - inner: self.inner, - outputs: self.outputs.unwrap_or_default(), - metrics: self.metrics.unwrap_or_default(), - }) - } -} - -#[cfg(test)] -mod test { - use serde_json::Value; - - use crate::context::{TaskResponse, TaskResult}; - - #[test] - fn build_empty_response() -> TaskResult<()> { - let _response = TaskResponse::builder(5) - .with_outputs(Value::default()) - .with_metrics(Value::default()) - .build()?; - - Ok(()) - } -} diff --git a/crates/task/handler/compose.rs b/crates/task/handler/compose.rs deleted file mode 100644 index 8161bad..0000000 --- a/crates/task/handler/compose.rs +++ /dev/null @@ -1,174 +0,0 @@ -//! [`LayerCompose`]. - -use std::borrow::Cow; -use std::time::Duration; - -use tower::retry::backoff::{ExponentialBackoff, ExponentialBackoffMaker, MakeBackoff}; -use tower::retry::RetryLayer; -use tower::timeout::error::Elapsed; -use tower::timeout::TimeoutLayer; -use tower::util::rng::HasherRng; -use tower::util::{BoxCloneSyncService, Either, MapErrLayer}; -use tower::{BoxError, ServiceBuilder}; - -use crate::context::layers::TaskLayers; -use crate::context::policies::{RetryStrategy, TimeoutAction}; -use crate::context::{TaskError, TaskErrorKind, TaskRequest, TaskResponse}; -use crate::handler::retry::{BackoffError, BackoffPolicy}; - -/// TODO. -#[derive(Debug, Default, Clone)] -pub struct LayerCompose<'a> { - layers: Cow<'a, TaskLayers>, -} - -type BoxErrorHandler0 = fn(BoxError) -> TaskError; -type BoxErrorHandler1 = fn(TaskError) -> BoxError; - -type LinearRetryLayer = RetryLayer>; -type ExponentialRetryLayer = RetryLayer>; -type EitherRetryLayer = Either; - -impl<'a> LayerCompose<'a> { - /// Returns a new [`LayerCompose`]. - #[inline] - pub fn new(layers: &'a TaskLayers) -> Self { - Self { - layers: Cow::Borrowed(layers), - } - } - - fn error_handle_pre_layer(&self) -> MapErrLayer { - let handler = |x: TaskError| -> BoxError { Box::new(x) }; - MapErrLayer::new(handler) - } - - fn error_handle_post_layer(&self) -> MapErrLayer { - let handler = |x: BoxError| -> TaskError { - let box_error = match x.downcast::() { - Ok(backoff_error) => backoff_error.into_inner(), - Err(box_error) => box_error, - }; - - let box_error = match box_error.downcast::() { - Ok(_) => { - return TaskError::new( - TaskErrorKind::TimeoutPolicy, - "timeout policy error occurred", - ) - } - Err(box_error) => box_error, - }; - - match box_error.downcast::() { - Ok(task_error) => *task_error, - Err(_) => TaskError::new( - TaskErrorKind::Unknown, - "unknown (type-erased) error occurred", - ), - } - }; - - MapErrLayer::new(handler) - } - - /// Returns the optional `tower::`[`RetryLayer`]. - fn optional_retry_layer(&self) -> Option { - let Some(retry_policy) = &self.layers.retry_policy else { - return None; - }; - - // TODO: Make RetryStrategy::Linear actually linear. - let maker = match retry_policy.strategy { - RetryStrategy::Linear { - step_backoff, - max_backoff, - jitter_perc, - } => ExponentialBackoffMaker::new( - step_backoff, - max_backoff.unwrap_or_else(|| Duration::from_secs(u64::MAX)), - jitter_perc.unwrap_or_default(), - HasherRng::new(), - ), - RetryStrategy::Exponential { - base_backoff, - max_backoff, - jitter_perc, - } => ExponentialBackoffMaker::new( - base_backoff, - max_backoff.unwrap_or_else(|| Duration::from_secs(u64::MAX)), - jitter_perc.unwrap_or_default(), - HasherRng::new(), - ), - }; - - let backoff = maker.unwrap().make_backoff(); - let policy = BackoffPolicy::new(retry_policy.retries, backoff); - Some(Either::Right(RetryLayer::new(policy))) - } - - /// Returns the optional `tower::`[`TimeoutLayer`]. - fn optional_timeout_layer(&self) -> Option { - let Some(timeout_policy) = &self.layers.timeout_policy else { - return None; - }; - - Some(TimeoutLayer::new(timeout_policy.duration)) - } - - /// TODO. - pub fn apply_layers( - self, - task_handler: BoxCloneSyncService, TaskResponse, TaskError>, - ) -> BoxCloneSyncService, TaskResponse, TaskError> - where - T: 'static + Send + Clone, - U: 'static + Send, - { - let timeout_policy = self.layers.timeout_policy.as_ref(); - let is_retry = timeout_policy - .map(|x| matches!(x.action, TimeoutAction::Retry)) - .unwrap_or_default(); - - if is_retry { - let service = ServiceBuilder::new() - .layer(self.error_handle_post_layer()) - .option_layer(self.optional_retry_layer()) - .option_layer(self.optional_timeout_layer()) - .layer(self.error_handle_pre_layer()) - .service(task_handler); - - BoxCloneSyncService::new(service) - } else { - let service = ServiceBuilder::new() - .layer(self.error_handle_post_layer()) - .option_layer(self.optional_timeout_layer()) - .option_layer(self.optional_retry_layer()) - .layer(self.error_handle_pre_layer()) - .service(task_handler); - - BoxCloneSyncService::new(service) - } - } -} - -#[cfg(test)] -mod test { - use std::time::Duration; - - use crate::context::layers::TaskLayers; - use crate::context::policies::{RetryPolicy, TimeoutPolicy}; - use crate::context::TaskResult; - use crate::handler::compose::LayerCompose; - - #[test] - fn compose() -> TaskResult<()> { - let layers = TaskLayers::builder() - .with_retry_policy(RetryPolicy::linear(3, Duration::from_secs(2))) - .with_timeout_policy(TimeoutPolicy::retry(Duration::from_secs(12))) - .build()?; - - let _compose = LayerCompose::new(&layers); - Ok(()) - } -} diff --git a/crates/task/handler/future.rs b/crates/task/handler/future.rs deleted file mode 100644 index 5cbbbcb..0000000 --- a/crates/task/handler/future.rs +++ /dev/null @@ -1,84 +0,0 @@ -//! [`Future`] types for [`TaskHandler`]s. -//! -//! [`TaskHandler`]: crate::handler::TaskHandler - -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; - -use futures::future::BoxFuture; -use futures::FutureExt; -use pin_project_lite::pin_project; - -use crate::context::{TaskResponse, TaskResult}; -use crate::handler::metric::TaskMetricsLock; - -pin_project! { - /// Opaque [`Future`] return type for [`TaskHandler::call`]. - /// - /// Contains a single `futures::`[`BoxFuture`]. - /// - /// [`TaskHandler::call`]: crate::context::TaskHandler - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct TaskFuture { - #[pin] fut: BoxFuture<'static, TaskResult>>, - metrics: Option, - } -} - -impl TaskFuture { - /// Returns a new [`TaskFuture`]. - #[inline] - pub fn new(fut: F) -> Self - where - F: Future>> + Sized + Send + 'static, - { - Self { - fut: fut.boxed(), - metrics: None, - } - } - - /// Returns a new [`TaskFuture`]. - #[inline] - pub fn with_metrics(fut: F, metrics: TaskMetricsLock) -> Self - where - F: Future>> + Sized + Send + 'static, - { - Self { - fut: fut.boxed(), - metrics: Some(metrics), - } - } -} - -impl From>>> for TaskFuture { - #[inline] - fn from(fut: BoxFuture<'static, TaskResult>>) -> Self { - Self { fut, metrics: None } - } -} - -impl Future for TaskFuture { - type Output = TaskResult>; - - #[inline] - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.project(); - this.fut.poll(cx) - } -} - -#[cfg(test)] -mod test { - use crate::context::{TaskResponse, TaskResult}; - use crate::handler::future::TaskFuture; - - #[test] - fn from_async_block() -> TaskResult<()> { - let fut = async move { Ok(TaskResponse::new(5)) }; - let _fut = TaskFuture::new(fut); - - Ok(()) - } -} diff --git a/crates/task/handler/metric.rs b/crates/task/handler/metric.rs deleted file mode 100644 index f5f8a4a..0000000 --- a/crates/task/handler/metric.rs +++ /dev/null @@ -1,74 +0,0 @@ -//! `tower::`[`Load`] metric types for [`TaskHandler`]s. -//! -//! [`Load`]: tower::load::Load -//! [`TaskHandler`]: crate::handler::TaskHandler - -use std::sync::{Arc, Mutex}; - -use serde::{Deserialize, Serialize}; - -/// Reference-counting wrapper for [`TaskMetrics`]. -/// -/// Use by [`TaskHandler`]s and [`TaskFuture`]s. -/// -/// [`TaskHandler`]: crate::handler::TaskHandler -/// [`TaskFuture`]: crate::handler::future::TaskFuture -#[derive(Debug, Default, Clone)] -#[must_use = "metrics do nothing unless you serialize them"] -pub struct TaskMetricsLock { - inner: Arc>, -} - -impl TaskMetricsLock { - /// Returns a new [`TaskMetricsLock`]. - #[inline] - pub fn new(metrics: TaskMetrics) -> Self { - Self { - inner: Arc::new(Mutex::new(metrics)), - } - } - - /// Returns a new [`TaskMetrics`]. - pub fn snapshot(&self) -> TaskMetrics { - let guard = self.inner.lock().expect("should not be locked"); - guard.clone() - } -} - -/// `tower::load::`[`Load`] metrics for [`TaskHandler`]s. -/// -/// [`Load`]: tower::load::Load -/// [`TaskHandler`]: crate::handler::TaskHandler -#[derive(Debug, Default, Clone, PartialOrd, PartialEq, Serialize, Deserialize)] -#[must_use = "metrics do nothing unless you serialize them"] -pub struct TaskMetrics { - // TODO: Implement all metrics. - - // pub average_waiting_time: Duration, - // pub average_recent_waiting_time: Duration, - // pub average_running_time: Duration, - // pub average_recent_running_time: Duration, - // pub total_success_runs: u32, - // pub total_failure_runs: u32, -} - -impl TaskMetrics { - /// Returns a new [`TaskMetrics`]. - #[inline] - pub fn new() -> Self { - Self::default() - } -} - -#[cfg(test)] -mod test { - use crate::context::TaskResult; - use crate::handler::metric::TaskMetricsLock; - - #[test] - fn metrics_lock() -> TaskResult<()> { - let metrics_lock = TaskMetricsLock::default(); - let _metrics = metrics_lock.snapshot(); - Ok(()) - } -} diff --git a/crates/task/handler/mod.rs b/crates/task/handler/mod.rs deleted file mode 100644 index 8e0e42c..0000000 --- a/crates/task/handler/mod.rs +++ /dev/null @@ -1,211 +0,0 @@ -//! [`TaskHandler`], [`TaskHandlerLayer`], its future and metrics. - -use std::fmt; -use std::marker::PhantomData; -use std::task::{Context, Poll}; - -use tower::load::Load; -use tower::util::BoxCloneSyncService; -use tower::{Layer, Service, ServiceBuilder}; - -use crate::context::{TaskError, TaskRequest, TaskResponse}; -use crate::handler::compose::LayerCompose; -use crate::handler::future::TaskFuture; -use crate::handler::metric::{TaskMetrics, TaskMetricsLock}; - -mod compose; -pub mod future; -pub mod metric; -mod retry; - -/// Unified `tower::`[`Service`] for executing tasks. -/// -/// Opaque [`BoxCloneSyncService`]<[`TaskRequest`], [`TaskResponse`], [`TaskError`]>. -#[must_use = "services do nothing unless you `.poll_ready` or `.call` them"] -pub struct TaskHandler { - inner: BoxCloneSyncService, TaskResponse, TaskError>, - metrics: TaskMetricsLock, -} - -impl TaskHandler { - /// Returns a new [`TaskHandler`]. - pub fn new(inner: S) -> Self - where - T: 'static, - U: 'static, - S: Service + Clone + Send + Sync + 'static, - Req: From> + 'static, - S::Response: Into> + 'static, - S::Error: Into + 'static, - S::Future: Send + 'static, - { - Self::with_metrics(inner, TaskMetricsLock::default()) - } - - /// Returns a new [`TaskHandler`] with provided [`TaskMetricsLock`]. - /// - /// Allows to share [`TaskMetricsLock`] and the inner [`TaskMetrics`]. - pub fn with_metrics(inner: S, metrics: TaskMetricsLock) -> Self - where - T: 'static, - U: 'static, - S: Service + Clone + Send + Sync + 'static, - Req: From> + 'static, - S::Response: Into> + 'static, - S::Error: Into + 'static, - S::Future: Send + 'static, - { - let inner = ServiceBuilder::new() - .map_request(From::from) - .map_response(Into::into) - .map_err(Into::into) - .service(inner); - - Self { - inner: BoxCloneSyncService::new(inner), - metrics, - } - } - - /// Maps a `TaskHandler` to `TaskHandler` by applying a function to a contained service. - pub fn map(self, f: F) -> TaskHandler - where - F: FnOnce( - BoxCloneSyncService, TaskResponse, TaskError>, - ) -> BoxCloneSyncService, TaskResponse, TaskError>, - { - TaskHandler { - inner: f(self.inner), - metrics: self.metrics, - } - } - - /// Returns a new [`TaskMetrics`]. - #[inline] - pub fn snapshot(&self) -> TaskMetrics { - self.metrics.snapshot() - } -} - -impl Clone for TaskHandler { - #[inline] - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - metrics: self.metrics.clone(), - } - } -} - -impl fmt::Debug for TaskHandler { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TaskHandler").finish_non_exhaustive() - } -} - -impl Service> for TaskHandler -where - T: 'static + Send + Clone, - U: 'static + Send, -{ - type Response = TaskResponse; - type Error = TaskError; - type Future = TaskFuture; - - #[inline] - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - self.inner.poll_ready(cx) - } - - #[inline] - fn call(&mut self, req: TaskRequest) -> Self::Future { - let Some(layers) = &req.layers else { - return TaskFuture::with_metrics(self.inner.call(req), self.metrics.clone()); - }; - - let compose = LayerCompose::new(layers); - let mut svc = compose.apply_layers(self.inner.clone()); - TaskFuture::with_metrics(svc.call(req), self.metrics.clone()) - } -} - -impl Load for TaskHandler { - type Metric = TaskMetrics; - - #[inline] - fn load(&self) -> Self::Metric { - self.metrics.snapshot() - } -} - -/// `tower::`[`Layer`] that produces a [`TaskHandler`] services. -pub struct TaskHandlerLayer { - metrics: TaskMetricsLock, - inner: PhantomData<(Req, T, U)>, -} - -impl TaskHandlerLayer { - /// Returns a new [`TaskHandlerLayer`]. - #[inline] - pub fn new(metrics: TaskMetricsLock) -> Self { - Self { - metrics, - inner: PhantomData, - } - } -} - -impl Default for TaskHandlerLayer { - #[inline] - fn default() -> Self { - Self { - metrics: TaskMetricsLock::default(), - inner: PhantomData, - } - } -} - -impl Layer for TaskHandlerLayer -where - T: 'static, - U: 'static, - S: Service + Clone + Send + Sync + 'static, - Req: From> + 'static, - S::Response: Into> + 'static, - S::Error: Into + 'static, - S::Future: Send + 'static, -{ - type Service = TaskHandler; - - #[inline] - fn layer(&self, inner: S) -> Self::Service { - TaskHandler::with_metrics(inner, self.metrics.clone()) - } -} - -#[cfg(test)] -mod test { - use tower::{service_fn, ServiceBuilder}; - - use crate::context::{TaskError, TaskRequest, TaskResponse}; - use crate::handler::{TaskHandler, TaskHandlerLayer}; - - async fn handle(request: TaskRequest) -> Result, TaskError> { - Ok(TaskResponse::new(request.into_inner())) - } - - #[test] - fn service_compose() -> Result<(), TaskError> { - let inner = service_fn(handle); - let _service = TaskHandler::new(inner); - Ok(()) - } - - #[test] - fn service_builder() -> Result<(), TaskError> { - let _service = ServiceBuilder::new() - .layer(TaskHandlerLayer::default()) - .service(service_fn(handle)); - Ok(()) - } -} diff --git a/crates/task/handler/retry.rs b/crates/task/handler/retry.rs deleted file mode 100644 index 643b76a..0000000 --- a/crates/task/handler/retry.rs +++ /dev/null @@ -1,72 +0,0 @@ -use replace_with::replace_with_or_abort; -use tower::retry::backoff::{Backoff, ExponentialBackoff, ExponentialBackoffMaker, MakeBackoff}; -use tower::retry::Policy; -use tower::BoxError; - -/// TODO. -#[derive(Debug, Clone)] -pub struct BackoffPolicy { - retries: u32, - backoff: B, -} - -impl BackoffPolicy { - /// Returns a new [`BackoffPolicy`]. - pub fn new(retries: u32, backoff: B) -> Self { - Self { retries, backoff } - } -} - -impl Default for BackoffPolicy { - fn default() -> Self { - let mut maker = ExponentialBackoffMaker::default(); - Self::new(3, maker.make_backoff()) - } -} - -impl Policy for BackoffPolicy -where - Req: 'static + Clone, - Resp: 'static, - B: Backoff, -{ - type Future = B::Future; - - fn retry(&mut self, _req: &mut Req, resp: &mut Result) -> Option { - match resp.as_mut() { - Ok(_) => return None, - Err(e) if self.retries == 0 => { - replace_with_or_abort(e, |e| Box::new(BackoffError::new(e))) - } - Err(_) => {} - } - - self.retries -= 1; - Some(self.backoff.next_backoff()) - } - - #[inline] - fn clone_request(&mut self, req: &Req) -> Option { - Some(req.clone()) - } -} - -#[derive(Debug, thiserror::Error)] -#[error("retry layer has failed: {inner}")] -#[must_use = "errors do nothing unless you use them"] -pub struct BackoffError { - inner: BoxError, -} - -impl BackoffError { - /// Returns a new [`BackoffError`]. - pub fn new(inner: BoxError) -> Self { - Self { inner } - } - - /// Returns the underlying boxed error. - #[inline] - pub fn into_inner(self) -> BoxError { - self.inner - } -} diff --git a/crates/task/lib.rs b/crates/task/lib.rs deleted file mode 100644 index 23786d7..0000000 --- a/crates/task/lib.rs +++ /dev/null @@ -1,76 +0,0 @@ -#![forbid(unsafe_code)] -#![cfg_attr(docsrs, feature(doc_cfg))] -#![doc = include_str!("./README.md")] - -//! ```rust -//! use std::time::Duration; -//! use tower::{ServiceBuilder, service_fn}; -//! -//! use axiston_rt_task::context::{TaskRequest, TaskResponse, TaskResult}; -//! use axiston_rt_task::context::layers::TaskLayers; -//! use axiston_rt_task::context::policies::{RetryPolicy, TimeoutPolicy}; -//! use axiston_rt_task::handler::{TaskHandlerLayer, TaskHandler}; -//! use axiston_rt_task::routing::manifest::{TaskManifest, ServiceManifest}; -//! use axiston_rt_task::Router; -//! -//! async fn handler(request: TaskRequest) -> TaskResult> { -//! Ok(TaskResponse::new(request.into_inner())) -//! } -//! -//! #[tokio::main] -//! async fn main() -> TaskResult<()> { -//! let service_manifest = ServiceManifest::new("service"); -//! let task_manifest = TaskManifest::new("task"); -//! -//! let task_handler: TaskHandler = ServiceBuilder::new() -//! .layer(TaskHandlerLayer::default()) -//! .service(service_fn(handler)); -//! -//! let layers = TaskLayers::builder() -//! .with_retry_policy(RetryPolicy::linear(3, Duration::from_secs(2))) -//! .with_timeout_policy(TimeoutPolicy::retry(Duration::from_secs(12))) -//! .build()?; -//! -//! let router = Router::default() -//! .with_layers(layers) -//! .with_service(service_manifest) -//! .with_route(task_manifest, task_handler); -//! -//! let request = TaskRequest::builder("task", 5).build()?; -//! let response = router.route_task(request).await?; -//! assert_eq!(response.into_inner(), 5); -//! -//! Ok(()) -//! } -//! ``` - -use std::collections::HashMap; - -use crate::routing::index::{ServiceIndex, TaskIndex}; -use crate::routing::manifest::{ServiceManifest, TaskManifest}; -pub use crate::routing::Router; - -pub mod context; -pub mod handler; -pub mod routing; - -/// Lists all registered services and tasks. -/// -/// Also see [`Router::as_registry`]. -/// -/// [`Router::as_registry`]: routing::Router::as_registry -#[derive(Debug, Default)] -pub struct Registry { - /// List of all registered services. - pub services: HashMap, - /// List of all registered tasks. - pub tasks: HashMap, -} - -impl Registry { - /// Returns an empty [`Registry`]. - #[inline] - pub fn new() -> Self { - Self::default() - } -} diff --git a/crates/task/routing/index.rs b/crates/task/routing/index.rs deleted file mode 100644 index 4bd5d55..0000000 --- a/crates/task/routing/index.rs +++ /dev/null @@ -1,55 +0,0 @@ -//! [`TaskIndex`] and [`ServiceIndex`]. - -use std::borrow::Cow; - -use derive_more::{Deref, DerefMut, From}; -use ecow::EcoString; - -/// Opaque and unique [`Service`] identifier. -/// -/// [`Service`]: crate::routing::manifest::ServiceManifest -#[derive(Debug, Clone, Eq, PartialEq, Hash, Deref, DerefMut)] -#[must_use = "indexes do nothing unless you serialize them"] -pub struct ServiceIndex { - inner: EcoString, -} - -impl ServiceIndex { - /// Returns a new [`ServiceIndex`]. - #[inline] - pub fn new(inner: impl AsRef) -> Self { - let inner = EcoString::from(inner.as_ref()); - Self { inner } - } - - /// Returns the underlying index. - #[inline] - pub fn into_inner(self) -> EcoString { - self.inner.clone() - } -} - -/// Opaque and unique [`TaskHandler`] identifier. -/// -/// [`TaskHandler`]: crate::handler::TaskHandler -#[derive(Debug, Clone, Eq, PartialEq, Hash, Deref, DerefMut, From)] -#[from(Cow<'static, str>, String, &'static str)] -#[must_use = "indexes do nothing unless you serialize them"] -pub struct TaskIndex { - inner: EcoString, -} - -impl TaskIndex { - /// Returns a new [`TaskIndex`]. - #[inline] - pub fn new(inner: impl AsRef) -> Self { - let inner = EcoString::from(inner.as_ref()); - Self { inner } - } - - /// Returns the underlying index. - #[inline] - pub fn into_inner(self) -> EcoString { - self.inner.clone() - } -} diff --git a/crates/task/routing/route.rs b/crates/task/routing/route.rs deleted file mode 100644 index ae35678..0000000 --- a/crates/task/routing/route.rs +++ /dev/null @@ -1,115 +0,0 @@ -use std::sync::Arc; -use std::task::{Context, Poll}; - -use tower::load::Load; -use tower::Service; - -use crate::context::{TaskError, TaskRequest, TaskResponse}; -use crate::handler::future::TaskFuture; -use crate::handler::metric::TaskMetrics; -use crate::handler::TaskHandler; -use crate::routing::manifest::TaskSchemaValidators; -use crate::routing::TaskManifest; - -/// Routing structure that wraps [`TaskHandler`] and req/resp validation. -#[must_use = "routes do nothing unless you use them"] -pub struct Route { - route_handler: Arc>, -} - -#[must_use = "routes do nothing unless you use them"] -struct RouteHandler { - task_handler: TaskHandler, - schema_validators: TaskSchemaValidators, - manifest: TaskManifest, -} - -impl Route { - /// Creates a new [`Route`]. - pub fn new( - task_handler: TaskHandler, - task_manifest: TaskManifest, - ) -> Result { - let schema_validators = task_manifest.create_schema_validators()?; - Ok(Self { - route_handler: Arc::new(RouteHandler { - task_handler, - schema_validators, - manifest: task_manifest, - }), - }) - } - - /// Returns the reference to the inner [`TaskHandler`]. - #[inline] - pub fn task_handler(&self) -> &TaskHandler { - &self.route_handler.task_handler - } - - /// Returns [`TaskMetrics`] of the inner [`TaskHandler`]. - #[inline] - pub fn task_handler_metrics(&self) -> TaskMetrics { - self.route_handler.task_handler.load() - } - - /// Returns the reference to the inner [`TaskManifest`]. - #[inline] - pub fn manifest(&self) -> &TaskManifest { - &self.route_handler.manifest - } -} - -impl Clone for Route { - fn clone(&self) -> Self { - Self { - route_handler: Arc::clone(&self.route_handler), - } - } -} - -impl Service> for Route -where - T: 'static + Send + Clone, - U: 'static + Send, -{ - type Response = TaskResponse; - type Error = TaskError; - type Future = TaskFuture; - - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - let mut handler = self.route_handler.task_handler.clone(); - handler.poll_ready(cx) - } - - fn call(&mut self, req: TaskRequest) -> Self::Future { - let this = self.clone(); - let fut = async move { - this.route_handler - .schema_validators - .validate_inputs(&req.inputs)?; - - let mut handler = this.route_handler.task_handler.clone(); - let response = handler.call(req).await; - - response - .and_then(|response| { - this.route_handler - .schema_validators - .validate_outputs(&response.outputs) - .and(Ok(response)) - }) - .map_err(|error| { - match this - .route_handler - .schema_validators - .validate_errors(error.values.as_ref()) - { - Ok(_) => error, - Err(v_error) => v_error, - } - }) - }; - - TaskFuture::new(fut) - } -} diff --git a/crates/jsvm/extension/route/ops.js b/docs/INSTALLATION.md similarity index 100% rename from crates/jsvm/extension/route/ops.js rename to docs/INSTALLATION.md diff --git a/docs/REQUIREMENTS.md b/docs/REQUIREMENTS.md new file mode 100644 index 0000000..e69de29 diff --git a/modules/assert/README.md b/modules/assert/README.md index 3b49c9d..ba40fc5 100644 --- a/modules/assert/README.md +++ b/modules/assert/README.md @@ -1,5 +1,13 @@ ### @axiston/assert +[![Build Status][action-badge]][action-url] +[![Crate Coverage][coverage-badge]][coverage-url] + +[action-badge]: https://img.shields.io/github/actions/workflow/status/axiston/runtime/build.yaml +[action-url]: https://github.com/axiston/runtime/actions/workflows/build.yaml +[coverage-badge]: https://img.shields.io/codecov/c/github/axiston/runtime +[coverage-url]: https://app.codecov.io/gh/axiston/runtime + Lorem ipsum. Lorem ipsum. Lorem ipsum. #### Features diff --git a/modules/assert/deno.jsonc b/modules/assert/deno.jsonc index 8b49956..fd8c661 100644 --- a/modules/assert/deno.jsonc +++ b/modules/assert/deno.jsonc @@ -1,13 +1,13 @@ { - "name": "@axiston/assert", - "version": "0.1.0", - "exports": { - ".": "./mod.ts", - "./match": "./match.ts" - }, - "imports": { - "@std/assert": "jsr:@std/assert@^1.0.0", - "@std/internal": "jsr:@std/internal@^1.0.1", - "@std/text": "jsr:@std/text@^1.0.0" - } + "name": "@axiston/assert", + "version": "0.1.0", + "exports": { + ".": "./mod.ts", + "./match": "./match.ts" + }, + "imports": { + "@std/assert": "jsr:@std/assert@^1.0.0", + "@std/internal": "jsr:@std/internal@^1.0.1", + "@std/text": "jsr:@std/text@^1.0.0" + } } diff --git a/modules/runtime/README.md b/modules/runtime/README.md index e0ebc4a..41499e2 100644 --- a/modules/runtime/README.md +++ b/modules/runtime/README.md @@ -1,5 +1,13 @@ ### @axiston/runtime +[![Build Status][action-badge]][action-url] +[![Crate Coverage][coverage-badge]][coverage-url] + +[action-badge]: https://img.shields.io/github/actions/workflow/status/axiston/runtime/build.yaml +[action-url]: https://github.com/axiston/runtime/actions/workflows/build.yaml +[coverage-badge]: https://img.shields.io/codecov/c/github/axiston/runtime +[coverage-url]: https://app.codecov.io/gh/axiston/runtime + Lorem ipsum. Lorem ipsum. Lorem ipsum. #### Features diff --git a/modules/runtime/deno.jsonc b/modules/runtime/deno.jsonc index 112abce..3e32fc4 100644 --- a/modules/runtime/deno.jsonc +++ b/modules/runtime/deno.jsonc @@ -1,15 +1,15 @@ { - "name": "@axiston/runtime", - "version": "0.1.0", - "exports": { - ".": "./mod.ts", - "./lifecycle": "./lifecycle.ts", - "./request": "./request.ts", - "./response": "./response.ts" - }, - "imports": { - "@std/assert": "jsr:@std/assert@^1.0.0", - "@std/internal": "jsr:@std/internal@^1.0.1", - "@std/text": "jsr:@std/text@^1.0.0" - } + "name": "@axiston/runtime", + "version": "0.1.0", + "exports": { + ".": "./mod.ts", + "./lifecycle": "./lifecycle.ts", + "./request": "./request.ts", + "./response": "./response.ts" + }, + "imports": { + "@std/assert": "jsr:@std/assert@^1.0.0", + "@std/internal": "jsr:@std/internal@^1.0.1", + "@std/text": "jsr:@std/text@^1.0.0" + } } diff --git a/modules/runtime/lifecycle.ts b/modules/runtime/lifecycle.ts new file mode 100644 index 0000000..e69de29 diff --git a/modules/runtime/lifecycle_test.ts b/modules/runtime/lifecycle_test.ts new file mode 100644 index 0000000..e69de29 diff --git a/modules/testing/README.md b/modules/testing/README.md index 021b0a3..461096b 100644 --- a/modules/testing/README.md +++ b/modules/testing/README.md @@ -1,5 +1,13 @@ ### @axiston/testing +[![Build Status][action-badge]][action-url] +[![Crate Coverage][coverage-badge]][coverage-url] + +[action-badge]: https://img.shields.io/github/actions/workflow/status/axiston/runtime/build.yaml +[action-url]: https://github.com/axiston/runtime/actions/workflows/build.yaml +[coverage-badge]: https://img.shields.io/codecov/c/github/axiston/runtime +[coverage-url]: https://app.codecov.io/gh/axiston/runtime + Lorem ipsum. Lorem ipsum. Lorem ipsum. #### Features diff --git a/modules/testing/deno.jsonc b/modules/testing/deno.jsonc index a8aae78..0cca5de 100644 --- a/modules/testing/deno.jsonc +++ b/modules/testing/deno.jsonc @@ -1,12 +1,12 @@ { - "name": "@axiston/testing", - "version": "0.1.0", - "exports": { - ".": "./mod.ts", - "./setup": "./setup.ts" - }, - "imports": { - "@std/internal": "jsr:@std/internal@^1.0.1", - "@std/text": "jsr:@std/text@^1.0.0" - } + "name": "@axiston/testing", + "version": "0.1.0", + "exports": { + ".": "./mod.ts", + "./setup": "./setup.ts" + }, + "imports": { + "@std/internal": "jsr:@std/internal@^1.0.1", + "@std/text": "jsr:@std/text@^1.0.0" + } }