From 5b4d354a98dcf5d765b42df1592432eb07ee3021 Mon Sep 17 00:00:00 2001 From: suttang Date: Tue, 4 Jun 2019 10:49:10 +0900 Subject: [PATCH] Replace site URL blue-oil.org to blueoil.org --- README.md | 2 +- package.json | 8 ++++---- src/index.html | 16 ++++++++-------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 303667a..cffc823 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# blue-oil.org +# blueoil.org ![CircleCI](https://circleci.com/gh/blue-oil/blue-oil.org/tree/master.svg?style=shield&&circle-token=bd2fcdbe8f614bcab7f4f6e25858c3d7f3114f7b) diff --git a/package.json b/package.json index fa600ed..f1d67fa 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { - "name": "blue-oil.org", + "name": "blueoil.org", "version": "1.0.0", - "description": "The blue-oil.org website", + "description": "The blueoil.org website", "scripts": { "start": "npm run build", "build": "npm run build:html && npm run build:assets && npm run build:css && npm run build:js", @@ -18,8 +18,8 @@ "build:js-shape-overlays": "node-minify --compressor uglify-es --input 'src/assets/lib/ShapeOverlays/shape-overlays.js' --output 'dist/assets/lib/ShapeOverlays/shape-overlays.js'", "clean": "rimraf dist" }, - "repository": "git@github.com:blue-oil/blue-oil.org.git", - "homepage": "https://blue-oil.org/", + "repository": "git@github.com:blue-oil/blueoil.org.git", + "homepage": "https://blueoil.org/", "private": true, "devDependencies": { "cpx": "^1.5.0", diff --git a/src/index.html b/src/index.html index 1694149..7723c63 100644 --- a/src/index.html +++ b/src/index.html @@ -20,9 +20,9 @@ - + - + @@ -32,13 +32,13 @@ - + @@ -59,9 +59,9 @@ @@ -84,7 +84,7 @@

Bring Deep Learning to small devices

An open source deep learning platform for low bit computation

- Get Started + Get Started
@@ -115,7 +115,7 @@

Key features

What is Blueoil?

Blueoil is a software stack dedicated to neural networks. It includes special training method for quantization and original networks designed to be highly compatible with FPGA devices. This is done in order to operate a neural network at high speed on a low-powered FPGA. New models can be trained easily by only preparing data. The finished model can be converted into a binary file that runs on FPGA or CPU devices with a single command.

- Get Started + Get Started