Skip to content

Commit

Permalink
Merge pull request #1 from hanayik/get-working-with-module
Browse files Browse the repository at this point in the history
refactor and make things run
  • Loading branch information
neurolabusc authored Jul 15, 2024
2 parents 739896b + 6d36dbd commit 8563bd0
Show file tree
Hide file tree
Showing 6 changed files with 140 additions and 63 deletions.
43 changes: 43 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
.DS_Store
node_modules
/build
/dist
distForTests
tests-out
dist_intermediate
devdocs
/_md_docs
/docs/niivue*.js
/demos/niivue*.js
/demos/dist
/tests/niivue*.js
niivue.es.js
niivue.umd.js
/downloads
__diff_output__
/coverage
# local env files
.env.local
.env.*.local

# Log files
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*

# Editor directories and files
.idea
.vscode
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
.DS_Store
/test-results/
/playwright-report/
/blob-report/
/playwright/.cache/
/playwright/e2e/index.js
/tests/index.js
9 changes: 8 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,15 @@ You can serve a hot-reloadable web page that allows you to interactively modify

```bash
git [email protected]:neurolabusc/niivue-onnx.git
cd niivue-neglect
cd niivue-onnx
npm install
npm run dev
```

#### to build and serve the built version

```bash
npm run build
npx http-server dist/
```

2 changes: 1 addition & 1 deletion index.html
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<link rel="stylesheet" href="./niivue.css" />
<title>Niivue Neglect Predictions</title>
<title>Niivue ONNX</title>
</head>

<body>
Expand Down
138 changes: 82 additions & 56 deletions main.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import { Niivue } from '@niivue/niivue'
import * as ort from 'onnxruntime-web';
// IMPORTANT: we need to import this specific file.
import * as ort from "./node_modules/onnxruntime-web/dist/ort.all.mjs"
console.log(ort);
async function main() {
aboutBtn.onclick = function () {
let url = "https://github.com/axinging/mlmodel-convension-demo/blob/main/onnx/onnx-brainchop.html"
Expand All @@ -17,79 +19,103 @@ async function main() {
const nv1 = new Niivue(defaults)
nv1.attachToCanvas(gl1)
await nv1.loadVolumes([{ url: './t1_crop.nii.gz' }])
// FIXME: Do we want to conform?
const conformed = await nv1.conform(
nv1.volumes[0],
false,
true,
true
)
nv1.removeVolume(nv1.volumes[0])
nv1.addVolume(conformed)

let feedsInfo = [];
function getFeedInfo(feed, type, data, dims) {
const warmupTimes = 0;
const runTimes = 1;
for (let i = 0; i < warmupTimes + runTimes; i++) {
let typedArray;
let typeBytes;
if (type === 'bool') {
data = [data];
dims = [1];
typeBytes = 1;
} else if (type === 'int8') {
typedArray = Int8Array;
} else if (type === 'float16') {
typedArray = Uint16Array;
} else if (type === 'int32') {
typedArray = Int32Array;
} else if (type === 'uint32') {
typedArray = Uint32Array;
} else if (type === 'float32') {
typedArray = Float32Array;
} else if (type === 'int64') {
typedArray = BigInt64Array;
}
if (typeBytes === undefined) {
typeBytes = typedArray.BYTES_PER_ELEMENT;
}
const warmupTimes = 0;
const runTimes = 1;
for (let i = 0; i < warmupTimes + runTimes; i++) {
let typedArray;
let typeBytes;
if (type === 'bool') {
data = [data];
dims = [1];
typeBytes = 1;
} else if (type === 'int8') {
typedArray = Int8Array;
} else if (type === 'float16') {
typedArray = Uint16Array;
} else if (type === 'int32') {
typedArray = Int32Array;
} else if (type === 'uint32') {
typedArray = Uint32Array;
} else if (type === 'float32') {
typedArray = Float32Array;
} else if (type === 'int64') {
typedArray = BigInt64Array;
}
if (typeBytes === undefined) {
typeBytes = typedArray.BYTES_PER_ELEMENT;
}

let size, _data;
if (Array.isArray(data) || ArrayBuffer.isView(data)) {
size = data.length;
_data = data;
} else {
size = dims.reduce((a, b) => a * b);
if (data === 'random') {
_data = typedArray.from({ length: size }, () => getRandom(type));
} else {
_data = typedArray.from({ length: size }, () => data);
}
}
let size, _data;
if (Array.isArray(data) || ArrayBuffer.isView(data)) {
size = data.length;
_data = data;
} else {
size = dims.reduce((a, b) => a * b);
if (data === 'random') {
_data = typedArray.from({ length: size }, () => getRandom(type));
} else {
_data = typedArray.from({ length: size }, () => data);
}
}

if (i > feedsInfo.length - 1) {
feedsInfo.push(new Map());
}
feedsInfo[i].set(feed, [type, _data, dims, Math.ceil(size * typeBytes / 16) * 16]);
if (i > feedsInfo.length - 1) {
feedsInfo.push(new Map());
}
return feedsInfo;
feedsInfo[i].set(feed, [type, _data, dims, Math.ceil(size * typeBytes / 16) * 16]);
}
return feedsInfo;
}
const option = {
executionProviders: [
{
//name: 'webgpu',
name: 'webgl',
},
],
graphOptimizationLevel: 'extended',
optimizedModelFilepath: 'opt.onnx'
executionProviders: [
{
name: 'webgpu',
},
],
graphOptimizationLevel: 'extended',
optimizedModelFilepath: 'opt.onnx'
};

const session = await ort.InferenceSession.create('./model_5_channels.onnx', option);
const shape = [1, 1, 256, 256, 256];
const temp = getFeedInfo("input.1", "float32", 0, shape);
// FIXME: Do we want to use a real image for inference?
const imgData = nv1.volumes[0].img;
const expectedLength = shape.reduce((a, b) => a * b);
// FIXME: Do we need want this?
if (imgData.length !== expectedLength) {
throw new Error(`imgData length (${imgData.length}) does not match expected tensor length (${expectedLength})`);
}

const temp = getFeedInfo("input.1", "float32", imgData, shape);
let dataA = temp[0].get('input.1')[1];
// let dataTemp = await loadJSON("./onnx-branchchop-input64.jsonc");
// dataA = dataTemp['data'];
const tensorA = new ort.Tensor('float32', dataA, shape);

const feeds = { "input.1": tensorA };
// feed inputs and run
console.log("before run");
const results = await session.run(feeds);
console.log("after run");
console.log(results);
console.log("after run")
// FIXME: is this really the output data? It doesn't make sense when rendered,
// but then again, maybe the input was wrong?
const outData = results[39].data
const newImg = nv1.cloneVolume(0);
newImg.img = outData
// Add the output to niivue
nv1.addVolume(newImg)
nv1.setColormap(newImg.id, "red")
nv1.setOpacity(1, 0.5)
}

main()
3 changes: 1 addition & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 5 additions & 3 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,14 @@
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"build": "vite build && npm run copyMJS && npm run copyWASM",
"copyMJS": "cp ./node_modules/onnxruntime-web/dist/*.mjs ./dist/assets/",
"copyWASM": "cp ./node_modules/onnxruntime-web/dist/*.wasm ./dist/assets/",
"preview": "vite preview"
},
"dependencies": {
"@niivue/niivue":"^0.43.3",
"onnxruntime-web": "1.19.0-dev.20240713-281ed8c12d"
"@niivue/niivue": "^0.43.3",
"onnxruntime-web": "^1.19.0-dev.20240713-281ed8c12d"
},
"devDependencies": {
"vite": "^5.2.0"
Expand Down

0 comments on commit 8563bd0

Please sign in to comment.