Skip to content

Commit

Permalink
Merge branch 'mlcommons:master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
arjunsuresh authored Jan 6, 2025
2 parents 3b597ba + b9f22d6 commit 35ac905
Show file tree
Hide file tree
Showing 34 changed files with 4,647 additions and 6 deletions.
24 changes: 18 additions & 6 deletions .github/workflows/format.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ name: "Code formatting"
on:
push:
branches:
- "**"
- "**"

env:
python_version: "3.9"
Expand All @@ -12,16 +12,25 @@ jobs:
format-code:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Retrieve secrets from Keeper
id: ksecrets
uses: Keeper-Security/ksm-action@master
with:
keeper-secret-config: ${{ secrets.KSM_CONFIG }}
secrets: |-
v2h4jKiZlJywDSoKzRMnRw/field/Access Token > env:PAT # Fetch PAT and store in environment variable
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
ssh-key: ${{ secrets.DEPLOY_KEY }}

- name: Set up Python ${{ env.python_version }}
uses: actions/setup-python@v3
with:
python-version: ${{ env.python_version }}

- name: Format modified python files
- name: Format modified Python files
env:
filter: ${{ github.event.before }}
run: |
Expand Down Expand Up @@ -49,12 +58,15 @@ jobs:
done
- name: Commit and push changes
env:
PAT: ${{ env.PAT }} # Use PAT fetched from Keeper
run: |
HAS_CHANGES=$(git diff --staged --name-only)
if [ ${#HAS_CHANGES} -gt 0 ]; then
git config --global user.name mlcommons-bot
git config --global user.email "[email protected]"
# Commit changes
git commit -m '[Automated Commit] Format Codebase'
git push
fi
# Use the PAT to push changes
git push https://x-access-token:${PAT}@github.com/${{ github.repository }} HEAD:${{ github.ref_name }}
fi
14 changes: 14 additions & 0 deletions automotive/3d-object-detection/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
## Reference implementation fo automotive 3D detection benchmark

## TODO: Instructions for dataset download after it is uploaded somewhere appropriate

## TODO: Instructions for checkpoints downloads after it is uploaded somewhere appropriate

## Running with docker
```
docker build -t auto_inference -f dockerfile.gpu .
docker run --gpus=all -it -v <directory to inference repo>/inference/:/inference -v <directory to waymo dataset>/waymo:/waymo --rm auto_inference
cd /inference/automotive/3d-object-detection
python main.py --dataset waymo --dataset-path /waymo/kitti_format/ --lidar-path <checkpoint_path>/pp_ep36.pth --segmentor-path <checkpoint_path>/best_deeplabv3plus_resnet50_waymo_os16.pth --mlperf_conf /inference/mlperf.conf
128 changes: 128 additions & 0 deletions automotive/3d-object-detection/accuracy_waymo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
"""
Tool to calculate accuracy for loadgen accuracy output found in mlperf_log_accuracy.json
We assume that loadgen's query index is in the same order as
the images in coco's annotations/instances_val2017.json.
"""

from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import argparse
import json
import os

import numpy as np
from waymo import Waymo
from tools.evaluate import do_eval
# pylint: disable=missing-docstring
CLASSES = Waymo.CLASSES
LABEL2CLASSES = {v: k for k, v in CLASSES.items()}


def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--mlperf-accuracy-file",
required=True,
help="path to mlperf_log_accuracy.json")
parser.add_argument(
"--waymo-dir",
required=True,
help="waymo dataset directory")
parser.add_argument(
"--verbose",
action="store_true",
help="verbose messages")
parser.add_argument(
"--output-file",
default="openimages-results.json",
help="path to output file")
parser.add_argument(
"--use-inv-map",
action="store_true",
help="use inverse label map")
args = parser.parse_args()
return args


def main():
args = get_args()

with open(args.mlperf_accuracy_file, "r") as f:
results = json.load(f)

detections = {}
image_ids = set()
seen = set()
no_results = 0

val_dataset = Waymo(
data_root=args.waymo_dir,
split='val',
painted=True,
cam_sync=False)

for j in results:
idx = j['qsl_idx']
# de-dupe in case loadgen sends the same image multiple times
if idx in seen:
continue
seen.add(idx)

# reconstruct from mlperf accuracy log
# what is written by the benchmark is an array of float32's:
# id, box[0], box[1], box[2], box[3], score, detection_class
# note that id is a index into instances_val2017.json, not the actual
# image_id
data = np.frombuffer(bytes.fromhex(j['data']), np.float32)

for i in range(0, len(data), 14):
dimension = [float(x) for x in data[i:i + 3]]
location = [float(x) for x in data[i + 3:i + 6]]
rotation_y = float(data[i + 6])
bbox = [float(x) for x in data[i + 7:i + 11]]
label = int(data[i + 11])
score = float(data[i + 12])
image_idx = int(data[i + 13])
if image_idx not in detections:
detections[image_idx] = {
'name': [],
'dimensions': [],
'location': [],
'rotation_y': [],
'bbox': [],
'score': []
}

detections[image_idx]['name'].append(LABEL2CLASSES[label])
detections[image_idx]['dimensions'].append(dimension)
detections[image_idx]['location'].append(location)
detections[image_idx]['rotation_y'].append(rotation_y)
detections[image_idx]['bbox'].append(bbox)
detections[image_idx]['score'].append(score)
image_ids.add(image_idx)

with open(args.output_file, "w") as fp:
json.dump(detections, fp, sort_keys=True, indent=4)
format_results = {}
for key in detections.keys():
format_results[key] = {k: np.array(v)
for k, v in detections[key].items()}
map_stats = do_eval(
format_results,
val_dataset.data_infos,
CLASSES,
cam_sync=False)

print(map_stats)
if args.verbose:
print("found {} results".format(len(results)))
print("found {} images".format(len(image_ids)))
print("found {} images with no results".format(no_results))
print("ignored {} dupes".format(len(results) - len(seen)))


if __name__ == "__main__":
main()
21 changes: 21 additions & 0 deletions automotive/3d-object-detection/backend.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
"""
abstract backend class
"""


class Backend:
def __init__(self):
self.inputs = []
self.outputs = []

def version(self):
raise NotImplementedError("Backend:version")

def name(self):
raise NotImplementedError("Backend:name")

def load(self, model_path, inputs=None, outputs=None):
raise NotImplementedError("Backend:load")

def predict(self, feed):
raise NotImplementedError("Backend:predict")
24 changes: 24 additions & 0 deletions automotive/3d-object-detection/backend_debug.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import torch
import backend


class BackendDebug(backend.Backend):
def __init__(self, image_size=[3, 1024, 1024], **kwargs):
super(BackendDebug, self).__init__()
self.image_size = image_size

def version(self):
return torch.__version__

def name(self):
return "debug-SUT"

def image_format(self):
return "NCHW"

def load(self):
return self

def predict(self, prompts):
images = []
return images
Loading

0 comments on commit 35ac905

Please sign in to comment.