Skip to content

Commit

Permalink
Merge pull request #4 from AlexErrant/fsrs-browser
Browse files Browse the repository at this point in the history
  • Loading branch information
AlexErrant authored Apr 27, 2024
2 parents c973870 + fa38efe commit fff7b26
Show file tree
Hide file tree
Showing 33 changed files with 892 additions and 470 deletions.
216 changes: 125 additions & 91 deletions Cargo.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ crossterm = "0.27"
futures-intrusive = "0.5"
text_placeholder = "0.5.0"
pollster = "0.3"
wgpu = "0.18.0"
wgpu = "0.19.4"

# Benchmarks and Burnbench
arboard = "3.3.2"
Expand Down
4 changes: 4 additions & 0 deletions backend-comparison/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,10 @@ name = "custom-gelu"
path = "benches/custom_gelu.rs"
harness = false

[[bench]]
name = "autodiff"
harness = false

[[bin]]
name = "burnbench"
path = "src/bin/burnbench.rs"
81 changes: 81 additions & 0 deletions backend-comparison/benches/autodiff.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
use backend_comparison::persistence::save;
use burn::{
module::Module,
nn,
tensor::{
backend::{AutodiffBackend, Backend},
Distribution, Tensor,
},
};
use burn_common::benchmark::{run_benchmark, Benchmark};

pub struct AutodiffOverheadBenchmark<B: AutodiffBackend> {
config: nn::LstmConfig,
lstm: nn::Lstm<B>,
device: B::Device,
}

impl<B: AutodiffBackend> Benchmark for AutodiffOverheadBenchmark<B> {
type Args = Tensor<B, 3>;

fn name(&self) -> String {
"autodiff_overhead".into()
}

fn shapes(&self) -> Vec<Vec<usize>> {
vec![]
}

fn execute(&self, input: Self::Args) {
for _ in 0..20 {
let input = input.clone().detach();
let mut cell = input.clone();
let lstm = self.lstm.clone().fork(&input.device());

for _ in 0..10 {
let (cells, _) = lstm.forward(input.clone(), None);
cell = cell + cells;
}

cell.backward();
}
}

fn prepare(&self) -> Self::Args {
let shape = [1, 3, self.config.d_hidden];
Tensor::random(shape, Distribution::Default, &self.device)
}

fn sync(&self) {
B::sync(&self.device)
}
}

#[allow(dead_code)]
fn bench<B: Backend>(
device: &B::Device,
feature_name: &str,
url: Option<&str>,
token: Option<&str>,
) {
let config = nn::LstmConfig::new(3, 3, true);
let lstm = config.init(device);
let benchmark = AutodiffOverheadBenchmark::<burn::backend::Autodiff<B>> {
lstm,
config,
device: device.clone(),
};

save::<B>(
vec![run_benchmark(benchmark)],
device,
feature_name,
url,
token,
)
.unwrap();
}

fn main() {
backend_comparison::bench_on_backend!();
}
2 changes: 2 additions & 0 deletions backend-comparison/src/burnbenchapp/base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,8 @@ enum BenchmarkValues {
MaxPool2d,
#[strum(to_string = "load-record")]
LoadRecord,
#[strum(to_string = "autodiff")]
Autodiff,
}

pub fn execute() {
Expand Down
1 change: 1 addition & 0 deletions backend-comparison/src/persistence/system_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ impl BenchmarkSystemInfo {
let instance = wgpu::Instance::default();
let adapters: Vec<wgpu::Adapter> = instance
.enumerate_adapters(burn_wgpu::AutoGraphicsApi::backend().into())
.into_iter()
.filter(|adapter| {
let info = adapter.get_info();
info.device_type == wgpu::DeviceType::DiscreteGpu
Expand Down
1 change: 1 addition & 0 deletions burn-book/book.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ authors = [
"Louis Fortier-Dubois",
"Dilshod Tadjibaev",
"Guillaume Lagrange",
"Sylvain Benner",
]
language = "en"
multilingual = false
Expand Down
16 changes: 13 additions & 3 deletions burn-book/src/basic-workflow/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,19 @@ This guide will walk you through the process of creating a custom model built wi
train a simple convolutional neural network model on the MNIST dataset and prepare it for inference.

For clarity, we sometimes omit imports in our code snippets. For more details, please refer to the
corresponding code in the `examples/guide` [directory](https://github.com/tracel-ai/burn/tree/main/examples/guide). We
reproduce this example in a step-by-step fashion, from dataset creation to modeling and training in the following
sections.
corresponding code in the `examples/guide` [directory](https://github.com/tracel-ai/burn/tree/main/examples/guide).
We reproduce this example in a step-by-step fashion, from dataset creation to modeling and training
in the following sections. It is recommended to use the capabilities of your IDE or text editor to
automatically add the missing imports as you add the code snippets to your code.

<div class="warning">

Be sure to checkout the git branch corresponding to the version of Burn you are using to follow
this guide.

The current version of Burn is `0.13.1` and the corresponding branch to checkout is `release/0.13`.
</div>

The code for this demo can be executed from Burn's base directory using the command:

```bash
Expand Down
69 changes: 10 additions & 59 deletions burn-book/src/basic-workflow/backend.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,76 +7,21 @@ entrypoint of our program, namely the `main` function defined in `src/main.rs`.
```rust , ignore
use burn::optim::AdamConfig;
use burn::backend::{Autodiff, Wgpu, wgpu::AutoGraphicsApi};
use guide::model::ModelConfig;
use crate::model::ModelConfig;

fn main() {
type MyBackend = Wgpu<AutoGraphicsApi, f32, i32>;
type MyAutodiffBackend = Autodiff<MyBackend>;

let device = burn::backend::wgpu::WgpuDevice::default();
guide::training::train::<MyAutodiffBackend>(
crate::training::train::<MyAutodiffBackend>(
"/tmp/guide",
guide::training::TrainingConfig::new(ModelConfig::new(10, 512), AdamConfig::new()),
crate::training::TrainingConfig::new(ModelConfig::new(10, 512), AdamConfig::new()),
device,
);
}
```

<details>
<summary><strong>🦀 Packages, Crates and Modules</strong></summary>

You might be wondering why we use the `guide` prefix to bring the different modules we just
implemented into scope. Instead of including the code in the current guide in a single file, we
separated it into different files which group related code into _modules_. The `guide` is simply the
name we gave to our _crate_, which contains the different files. If you named your project crate
as `my-first-burn-model`,
you can equivalently replace all usages of `guide` above with `my-first-burn-model`. Below is a brief explanation of the
different parts of the Rust module system.

A **package** is a bundle of one or more crates that provides a set of functionality. A package
contains a `Cargo.toml` file that describes how to build those crates. Burn is a package.

A **crate** is a compilation unit in Rust. It could be a single file, but it is often easier to
split up crates into multiple _modules_ and possibly multiple files. A crate can come in one of two
forms: a binary crate or a library crate. When compiling a crate, the compiler first looks in the
crate root file (usually `src/lib.rs` for a library crate or `src/main.rs` for a binary crate). Any
module declared in the crate root file will be inserted in the crate for compilation. For this demo example, we will
define a library crate where all the individual modules (model, data, training, etc.) are listed inside `src/lib.rs` as
follows:

```
pub mod data;
pub mod inference;
pub mod model;
pub mod training;
```

A **module** lets us organize code within a crate for readability and easy reuse. Modules also allow
us to control the _privacy_ of items. The `pub` keyword used above, for example, is employed to make a module publicly
available inside the crate.

The entry point of our program is the `main` function, defined in the `examples/guide.rs` file. The file structure
for this example is illustrated below:

```
guide
├── Cargo.toml
├── examples
│ └── guide.rs
└── src
├── data.rs
├── inference.rs
├── lib.rs
├── model.rs
└── training.rs
```

The source for this guide can be found in our
[GitHub repository](https://github.com/tracel-ai/burn/tree/main/examples/guide) which can be used to run this basic
workflow example end-to-end.\

</details><br>

In this example, we use the `Wgpu` backend which is compatible with any operating system and will
use the GPU. For other options, see the Burn README. This backend type takes the graphics api, the
float type and the int type as generic arguments that will be used during the training. By leaving
Expand All @@ -89,6 +34,12 @@ the model (the number of digit classes is 10 and the hidden dimension is 512), t
configuration which in our case will be the default Adam configuration, and the device which can be
obtained from the backend.

When running the example, we can see the training progression through a basic CLI dashboard:
You can now train your freshly created model with the command:

```console
cargo run --release
```

When running the example, you should see the training progression through a basic CLI dashboard:

<img title="a title" alt="Alt text" src="./training-output.png">
15 changes: 15 additions & 0 deletions burn-book/src/basic-workflow/inference.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,3 +35,18 @@ Finally we can init the model with the configuration and the record. For simplic
same batcher used during the training to pass from a MnistItem to a tensor.

By running the infer function, you should see the predictions of your model!

Add the call to `infer` to the `main.rs` file after the `train` function call:

```rust , ignore
crate::inference::infer::<MyBackend>(
artifact_dir,
device,
burn::data::dataset::vision::MnistDataset::test()
.get(42)
.unwrap(),
);
```

The number `42` is the index of the image in the MNIST dataset. You can explore and verify them using
this [MNIST viewer](https://observablehq.com/@davidalber/mnist-viewer).
9 changes: 9 additions & 0 deletions burn-book/src/basic-workflow/model.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ As [mentioned previously](../getting-started.md#creating-a-burn-application), th
your `my-first-burn-model` project directory with a `Cargo.toml` and a `src/main.rs` file.

In the `Cargo.toml` file, add the `burn` dependency with `train`, `wgpu` and `vision` features.
Then run `cargo build` to build the project and import all the dependencies.

```toml
[package]
Expand Down Expand Up @@ -158,6 +159,14 @@ There are two major things going on in this code sample.

</details><br>

Note that each time you create a new file in the `src` directory you also need to add explicitly this
module to the `main.rs` file. For instance after creating the `model.rs`, you need to add the following
at the top of the main file:

```rust , ignore
mod model;
```

Next, we need to instantiate the model for training.

```rust , ignore
Expand Down
80 changes: 76 additions & 4 deletions burn-book/src/getting-started.md
Original file line number Diff line number Diff line change
Expand Up @@ -199,14 +199,86 @@ For the sake of simplicity, the subsequent chapters of this book will all use th

</div>

## Running examples
## Explore examples

Many additional Burn examples available in the
[examples](https://github.com/tracel-ai/burn/tree/main/examples) directory. To run one, please refer
to the example's README.md for the specific command to execute.
In the [next chapter](./basic-workflow) you'll have the opportunity to implement the whole Burn
`guide` example yourself in a step by step manner.

Many additional Burn examples are available in the
[examples](https://github.com/tracel-ai/burn/tree/main/examples) directory. Burn examples are
organized as library crates with one or more examples that are executable binaries. An example
can then be executed using the following cargo command line in the root of the Burn repository:

```bash
cargo run --example <example name>
```

To learn more about crates and examples, read the Rust section below.

<details>
<summary><strong>🦀 About Rust crates</strong></summary>

Each Burn example is a **package** which are subdirectories of the `examples` directory. A package
is composed of one or more **crates**.

A package is a bundle of one or more crates that provides a set of functionality. A package
contains a `Cargo.toml` file that describes how to build those crates.

A crate is a compilation unit in Rust. It could be a single file, but it is often easier to
split up crates into multiple **modules**.

A module lets us organize code within a crate for readability and easy reuse. Modules also allow
us to control the _privacy_ of items. For instance the `pub(crate)` keyword is employed to make
a module publicly available inside the crate. In the snippet below there are four modules declared,
two of them are public and visible to the users of the crates, one of them is public inside the crate
only and crate users cannot see it, at last one is private when there is no keyword.
These modules can be single files or a directory with a `mod.rs` file inside.

```rust, ignore
pub mod data;
pub mod inference;
pub(crate) mod model;
mod training;
```

A crate can come in one of two forms: a **binary crate** or a **library crate**. When compiling a crate,
the compiler first looks in the crate root file (`src/lib.rs` for a library crate and `src/main.rs`
for a binary crate). Any module declared in the crate root file will be inserted in the crate for
compilation.

All Burn examples are library crates and they can contain one or more executable examples that
uses the library. We even have some Burn examples that uses the library crate of other examples.

The examples are unique files under the `examples` directory. Each file produces an executable file
with the same name. Each example can then be executed with `cargo run --example <executable name>`.

Below is an file tree of a typical Burn example package:

```
examples/burn-example
├── Cargo.toml
├── examples
│ ├── example1.rs
│ ├── example2.rs
│ └── ...
└── src
├── lib.rs
├── module1.rs
├── module2.rs
└── ...
```

</details><br>

For more information on each example, see their respective `README.md` file.

<div class="warning">

Note that some examples use the
[`datasets` library by HuggingFace](https://huggingface.co/docs/datasets/index) to download the
datasets required in the examples. This is a Python library, which means that you will need to
install Python before running these examples. This requirement will be clearly indicated in the
example's README when applicable.

</div>

1 change: 1 addition & 0 deletions crates/burn-autodiff/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ version.workspace = true
default = ["std"]
export_tests = ["burn-tensor-testgen"]
std = []
async = [] # Require std

[dependencies]
burn-common = { path = "../burn-common", version = "0.13.1" }
Expand Down
3 changes: 2 additions & 1 deletion crates/burn-autodiff/src/graph/base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,12 @@ use std::collections::HashMap;
pub trait Step: Send + std::fmt::Debug {
/// Executes the step and consumes it.
fn step(self: Box<Self>, grads: &mut Gradients, checkpointer: &mut Checkpointer);
/// Depth of the operation relative to the first node added to a graph.
fn depth(&self) -> usize;
/// The node associated to the step.
fn node(&self) -> NodeID;
/// The parents of the node associated to the step.
fn parents(&self) -> Vec<NodeID>;
fn order(&self) -> usize;
}

pub type StepBoxed = Box<dyn Step>;
Expand Down
Loading

0 comments on commit fff7b26

Please sign in to comment.