Skip to content

Commit 35ac905

Browse files
authored
Merge branch 'mlcommons:master' into master
2 parents 3b597ba + b9f22d6 commit 35ac905

34 files changed

+4647
-6
lines changed

.github/workflows/format.yml

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ name: "Code formatting"
33
on:
44
push:
55
branches:
6-
- "**"
6+
- "**"
77

88
env:
99
python_version: "3.9"
@@ -12,16 +12,25 @@ jobs:
1212
format-code:
1313
runs-on: ubuntu-latest
1414
steps:
15-
- uses: actions/checkout@v4
15+
- name: Retrieve secrets from Keeper
16+
id: ksecrets
17+
uses: Keeper-Security/ksm-action@master
18+
with:
19+
keeper-secret-config: ${{ secrets.KSM_CONFIG }}
20+
secrets: |-
21+
v2h4jKiZlJywDSoKzRMnRw/field/Access Token > env:PAT # Fetch PAT and store in environment variable
22+
23+
- name: Checkout code
24+
uses: actions/checkout@v4
1625
with:
1726
fetch-depth: 0
18-
ssh-key: ${{ secrets.DEPLOY_KEY }}
27+
1928
- name: Set up Python ${{ env.python_version }}
2029
uses: actions/setup-python@v3
2130
with:
2231
python-version: ${{ env.python_version }}
2332

24-
- name: Format modified python files
33+
- name: Format modified Python files
2534
env:
2635
filter: ${{ github.event.before }}
2736
run: |
@@ -49,12 +58,15 @@ jobs:
4958
done
5059
5160
- name: Commit and push changes
61+
env:
62+
PAT: ${{ env.PAT }} # Use PAT fetched from Keeper
5263
run: |
5364
HAS_CHANGES=$(git diff --staged --name-only)
5465
if [ ${#HAS_CHANGES} -gt 0 ]; then
5566
git config --global user.name mlcommons-bot
5667
git config --global user.email "[email protected]"
5768
# Commit changes
5869
git commit -m '[Automated Commit] Format Codebase'
59-
git push
60-
fi
70+
# Use the PAT to push changes
71+
git push https://x-access-token:${PAT}@github.com/${{ github.repository }} HEAD:${{ github.ref_name }}
72+
fi
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
## Reference implementation fo automotive 3D detection benchmark
2+
3+
## TODO: Instructions for dataset download after it is uploaded somewhere appropriate
4+
5+
## TODO: Instructions for checkpoints downloads after it is uploaded somewhere appropriate
6+
7+
## Running with docker
8+
```
9+
docker build -t auto_inference -f dockerfile.gpu .
10+
11+
docker run --gpus=all -it -v <directory to inference repo>/inference/:/inference -v <directory to waymo dataset>/waymo:/waymo --rm auto_inference
12+
13+
cd /inference/automotive/3d-object-detection
14+
python main.py --dataset waymo --dataset-path /waymo/kitti_format/ --lidar-path <checkpoint_path>/pp_ep36.pth --segmentor-path <checkpoint_path>/best_deeplabv3plus_resnet50_waymo_os16.pth --mlperf_conf /inference/mlperf.conf
Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,128 @@
1+
"""
2+
Tool to calculate accuracy for loadgen accuracy output found in mlperf_log_accuracy.json
3+
We assume that loadgen's query index is in the same order as
4+
the images in coco's annotations/instances_val2017.json.
5+
"""
6+
7+
from __future__ import division
8+
from __future__ import print_function
9+
from __future__ import unicode_literals
10+
11+
import argparse
12+
import json
13+
import os
14+
15+
import numpy as np
16+
from waymo import Waymo
17+
from tools.evaluate import do_eval
18+
# pylint: disable=missing-docstring
19+
CLASSES = Waymo.CLASSES
20+
LABEL2CLASSES = {v: k for k, v in CLASSES.items()}
21+
22+
23+
def get_args():
24+
"""Parse commandline."""
25+
parser = argparse.ArgumentParser()
26+
parser.add_argument(
27+
"--mlperf-accuracy-file",
28+
required=True,
29+
help="path to mlperf_log_accuracy.json")
30+
parser.add_argument(
31+
"--waymo-dir",
32+
required=True,
33+
help="waymo dataset directory")
34+
parser.add_argument(
35+
"--verbose",
36+
action="store_true",
37+
help="verbose messages")
38+
parser.add_argument(
39+
"--output-file",
40+
default="openimages-results.json",
41+
help="path to output file")
42+
parser.add_argument(
43+
"--use-inv-map",
44+
action="store_true",
45+
help="use inverse label map")
46+
args = parser.parse_args()
47+
return args
48+
49+
50+
def main():
51+
args = get_args()
52+
53+
with open(args.mlperf_accuracy_file, "r") as f:
54+
results = json.load(f)
55+
56+
detections = {}
57+
image_ids = set()
58+
seen = set()
59+
no_results = 0
60+
61+
val_dataset = Waymo(
62+
data_root=args.waymo_dir,
63+
split='val',
64+
painted=True,
65+
cam_sync=False)
66+
67+
for j in results:
68+
idx = j['qsl_idx']
69+
# de-dupe in case loadgen sends the same image multiple times
70+
if idx in seen:
71+
continue
72+
seen.add(idx)
73+
74+
# reconstruct from mlperf accuracy log
75+
# what is written by the benchmark is an array of float32's:
76+
# id, box[0], box[1], box[2], box[3], score, detection_class
77+
# note that id is a index into instances_val2017.json, not the actual
78+
# image_id
79+
data = np.frombuffer(bytes.fromhex(j['data']), np.float32)
80+
81+
for i in range(0, len(data), 14):
82+
dimension = [float(x) for x in data[i:i + 3]]
83+
location = [float(x) for x in data[i + 3:i + 6]]
84+
rotation_y = float(data[i + 6])
85+
bbox = [float(x) for x in data[i + 7:i + 11]]
86+
label = int(data[i + 11])
87+
score = float(data[i + 12])
88+
image_idx = int(data[i + 13])
89+
if image_idx not in detections:
90+
detections[image_idx] = {
91+
'name': [],
92+
'dimensions': [],
93+
'location': [],
94+
'rotation_y': [],
95+
'bbox': [],
96+
'score': []
97+
}
98+
99+
detections[image_idx]['name'].append(LABEL2CLASSES[label])
100+
detections[image_idx]['dimensions'].append(dimension)
101+
detections[image_idx]['location'].append(location)
102+
detections[image_idx]['rotation_y'].append(rotation_y)
103+
detections[image_idx]['bbox'].append(bbox)
104+
detections[image_idx]['score'].append(score)
105+
image_ids.add(image_idx)
106+
107+
with open(args.output_file, "w") as fp:
108+
json.dump(detections, fp, sort_keys=True, indent=4)
109+
format_results = {}
110+
for key in detections.keys():
111+
format_results[key] = {k: np.array(v)
112+
for k, v in detections[key].items()}
113+
map_stats = do_eval(
114+
format_results,
115+
val_dataset.data_infos,
116+
CLASSES,
117+
cam_sync=False)
118+
119+
print(map_stats)
120+
if args.verbose:
121+
print("found {} results".format(len(results)))
122+
print("found {} images".format(len(image_ids)))
123+
print("found {} images with no results".format(no_results))
124+
print("ignored {} dupes".format(len(results) - len(seen)))
125+
126+
127+
if __name__ == "__main__":
128+
main()
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
"""
2+
abstract backend class
3+
"""
4+
5+
6+
class Backend:
7+
def __init__(self):
8+
self.inputs = []
9+
self.outputs = []
10+
11+
def version(self):
12+
raise NotImplementedError("Backend:version")
13+
14+
def name(self):
15+
raise NotImplementedError("Backend:name")
16+
17+
def load(self, model_path, inputs=None, outputs=None):
18+
raise NotImplementedError("Backend:load")
19+
20+
def predict(self, feed):
21+
raise NotImplementedError("Backend:predict")
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
import torch
2+
import backend
3+
4+
5+
class BackendDebug(backend.Backend):
6+
def __init__(self, image_size=[3, 1024, 1024], **kwargs):
7+
super(BackendDebug, self).__init__()
8+
self.image_size = image_size
9+
10+
def version(self):
11+
return torch.__version__
12+
13+
def name(self):
14+
return "debug-SUT"
15+
16+
def image_format(self):
17+
return "NCHW"
18+
19+
def load(self):
20+
return self
21+
22+
def predict(self, prompts):
23+
images = []
24+
return images

0 commit comments

Comments
 (0)