Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Detect target precision profile #167

Draft
wants to merge 23 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
90a0aa7
Initialized file with bootcamp code
SubwayMan Oct 14, 2023
306a928
Merge branch 'main' into decision_command_struct
SubwayMan Oct 14, 2023
d7e87e8
Renamed and modified functions to match list from asana, adjusted data
SubwayMan Oct 14, 2023
342fd65
added z axis to command struct
SubwayMan Oct 15, 2023
a17d917
changed documentation and docstrings
SubwayMan Oct 15, 2023
fa95d05
Merge branch 'main' into decision_command_struct
SubwayMan Oct 16, 2023
aba88a9
Renamed class to DecisionCommand
SubwayMan Oct 17, 2023
0fd0635
added relative landing command
SubwayMan Oct 17, 2023
1516e26
modified docstrings and command parameter names
SubwayMan Oct 17, 2023
30bfaf9
Updated all coordinate command descriptions with NED
SubwayMan Oct 18, 2023
7e92cb4
PR fixes: fixed argument indentation and corrected small docstring mi…
SubwayMan Oct 20, 2023
2158f7b
removed extraneous newline
SubwayMan Oct 23, 2023
6de6668
Merge branch 'main' into decision_command_struct
SubwayMan Nov 21, 2023
ad8b210
Single image profiling code
SubwayMan Nov 26, 2023
b9009b2
moved profiling
SubwayMan Nov 26, 2023
1b163f9
removed worker
SubwayMan Dec 3, 2023
749d8d6
added profiling functionality
KarthiU Feb 26, 2024
0f7373c
Merge branch 'detect_target_precision_profile' of https://github.com/…
KarthiU Feb 26, 2024
dd7cd28
pulled and renamed file(s)
KarthiU Feb 26, 2024
6c6389c
Integrate data merge worker (#166)
DylanFinlay Feb 26, 2024
4ccf36e
fixed profiler
KarthiU Mar 20, 2024
c22952f
bug fixes
KarthiU Mar 20, 2024
96d3da4
removed imgs
KarthiU Mar 20, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
queue_max_size: 10

log_directory_path: "logs"
profiling_length: 300

video_input:
camera_name: 0
Expand All @@ -19,3 +20,6 @@ flight_interface:
address: "tcp:127.0.0.1:14550"
timeout: 10.0 # seconds
worker_period: 0.1 # seconds

data_merge:
timeout: 10.0 # seconds
81 changes: 48 additions & 33 deletions main_2024.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from modules.detect_target import detect_target_worker
from modules.flight_interface import flight_interface_worker
from modules.video_input import video_input_worker
from modules.data_merge import data_merge_worker
from utilities.workers import queue_proxy_wrapper
from utilities.workers import worker_controller
from utilities.workers import worker_manager
Expand Down Expand Up @@ -76,6 +77,8 @@ def main() -> int:
FLIGHT_INTERFACE_ADDRESS = config["flight_interface"]["address"]
FLIGHT_INTERFACE_TIMEOUT = config["flight_interface"]["timeout"]
FLIGHT_INTERFACE_WORKER_PERIOD = config["flight_interface"]["worker_period"]

DATA_MERGE_TIMEOUT = config["data_merge"]["timeout"]
except KeyError:
print("Config key(s) not found")
return -1
Expand All @@ -90,13 +93,17 @@ def main() -> int:
mp_manager,
QUEUE_MAX_SIZE,
)
detect_target_to_main_queue = queue_proxy_wrapper.QueueProxyWrapper(
detect_target_to_data_merge_queue = queue_proxy_wrapper.QueueProxyWrapper(
mp_manager,
QUEUE_MAX_SIZE,
)
flight_interface_to_main_queue = queue_proxy_wrapper.QueueProxyWrapper(
flight_interface_to_data_merge_queue = queue_proxy_wrapper.QueueProxyWrapper(
mp_manager,
QUEUE_MAX_SIZE
QUEUE_MAX_SIZE,
)
data_merge_to_main_queue = queue_proxy_wrapper.QueueProxyWrapper(
mp_manager,
QUEUE_MAX_SIZE,
)

video_input_manager = worker_manager.WorkerManager()
Expand All @@ -123,7 +130,7 @@ def main() -> int:
DETECT_TARGET_SHOW_ANNOTATED,
DETECT_TARGET_SAVE_PREFIX,
video_input_to_detect_target_queue,
detect_target_to_main_queue,
detect_target_to_data_merge_queue,
controller,
),
)
Expand All @@ -136,7 +143,20 @@ def main() -> int:
FLIGHT_INTERFACE_ADDRESS,
FLIGHT_INTERFACE_TIMEOUT,
FLIGHT_INTERFACE_WORKER_PERIOD,
flight_interface_to_main_queue,
flight_interface_to_data_merge_queue,
controller,
),
)

data_merge_manager = worker_manager.WorkerManager()
data_merge_manager.create_workers(
1,
data_merge_worker.data_merge_worker,
(
DATA_MERGE_TIMEOUT,
detect_target_to_data_merge_queue,
flight_interface_to_data_merge_queue,
data_merge_to_main_queue,
controller,
),
)
Expand All @@ -145,36 +165,29 @@ def main() -> int:
video_input_manager.start_workers()
detect_target_manager.start_workers()
flight_interface_manager.start_workers()
data_merge_manager.start_workers()

while True:
try:
detections = detect_target_to_main_queue.queue.get_nowait()
merged_data = data_merge_to_main_queue.queue.get_nowait()
except queue.Empty:
detections = None

if detections is not None:
print("timestamp: " + str(detections.timestamp))
print("detections: " + str(len(detections.detections)))
for detection in detections.detections:
print(" label: " + str(detection.label))
print(" confidence: " + str(detection.confidence))
print("")

odometry_and_time_info: "odometry_and_time.OdometryAndTime | None" = \
flight_interface_to_main_queue.queue.get()

if odometry_and_time_info is not None:
timestamp = odometry_and_time_info.timestamp
position = odometry_and_time_info.odometry_data.position
orientation = odometry_and_time_info.odometry_data.orientation.orientation

print("timestamp: " + str(timestamp))
print("north: " + str(position.north))
print("east: " + str(position.east))
print("down: " + str(position.down))
print("yaw: " + str(orientation.yaw))
print("roll: " + str(orientation.roll))
print("pitch: " + str(orientation.pitch))
merged_data = None

if merged_data is not None:
position = merged_data.odometry_local.position
orientation = merged_data.odometry_local.orientation.orientation
detections = merged_data.detections

print("merged north: " + str(position.north))
print("merged east: " + str(position.east))
print("merged down: " + str(position.down))
print("merged yaw: " + str(orientation.yaw))
print("merged roll: " + str(orientation.roll))
print("merged pitch: " + str(orientation.pitch))
print("merged detections count: " + str(len(detections)))
for detection in detections:
print("merged label: " + str(detection.label))
print("merged confidence: " + str(detection.confidence))
print("")

if cv2.waitKey(1) == ord('q'):
Expand All @@ -185,12 +198,14 @@ def main() -> int:
controller.request_exit()

video_input_to_detect_target_queue.fill_and_drain_queue()
detect_target_to_main_queue.fill_and_drain_queue()
flight_interface_to_main_queue.fill_and_drain_queue()
detect_target_to_data_merge_queue.fill_and_drain_queue()
flight_interface_to_data_merge_queue.fill_and_drain_queue()
data_merge_to_main_queue.fill_and_drain_queue()

video_input_manager.join_workers()
detect_target_manager.join_workers()
flight_interface_manager.join_workers()
data_merge_manager.join_workers()

cv2.destroyAllWindows()

Expand Down
18 changes: 18 additions & 0 deletions modules/detect_target/detect_target.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,8 @@ def run(self,

Return: Success and the detections.
"""
start_time = time.time()

image = data.image
predictions = self.__model.predict(
source=image,
Expand Down Expand Up @@ -90,6 +92,22 @@ def run(self,
assert detection is not None
detections.append(detection)

stop_time = time.time()

elapsed_time = stop_time - start_time

for pred in predictions:
with open('profiler.txt', 'a') as file:
speeds = pred.speed
Comment on lines +97 to +101
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We don't want to add logic within the detect_target.py class for profiling. Rather we could time the worker outside of the call to detect target.

Copy link
Collaborator

@Xierumeng Xierumeng Feb 26, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The worker isn't the thing to test, something like this:

# profiling_or_whatever_file_name.py

def profile_detector(detector: detect_target.DetectTarget, images: "list[np.ndarray]") -> ...:
    for image in images:
        gc.disable()  # This disables the garbage collector
        start = time.time_ns()
        result, value = detector.run(image)  # Might or might not want to keep the bounding boxes
        end = time.time_ns()
        gc.enable()  # This enables the garbage collector
        if not result:
            # Handle error
      
        # Save results somewhere
        time_ns = end - start
        ...

def main() -> int:
    images = load_many_images()
    detector_half = detect_target.DetectTarget(...)
    detector_full = detect_target.DetectTarget(...)

    # Initial run just to warm up CUDA
    _ = profile_detector(detector_full, images[:10])
    time_half = profile_detector(detector_half, images)
    time_full = profile_detector(detector_full, images)

    # Record the results
    ...

preprocess_speed = round(speeds['preprocess'], 3)
inference_speed = round(speeds['inference'], 3)
postprocess_speed = round(speeds['postprocess'], 3)
elapsed_time_ms = elapsed_time * 1000
precision_string = "half" if self.__enable_half_precision else "full"


file.write(f"{preprocess_speed}, {inference_speed}, {postprocess_speed}, {elapsed_time_ms}, {precision_string}\n")

# Logging
if self.__filename_prefix != "":
filename = self.__filename_prefix + str(self.__counter)
Expand Down
Empty file added profiler/__init__.py
Empty file.
185 changes: 185 additions & 0 deletions profiler_detect_target_2024.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,185 @@
"""
Profile detect target using full/half precision.
"""
import multiprocessing as mp
import time
import gc
import pathlib
import yaml
import argparse
import cv2


import numpy as np
import os
import pandas as pd


from modules.detect_target import detect_target
from modules.image_and_time import ImageAndTime






CONFIG_FILE_PATH = pathlib.Path("config.yaml")


GRASS_DATA_DIR = "profiler/profile_data/Grass"
ASPHALT_DATA_DIR = "profiler/profile_data/Asphalt"
FIELD_DATA_DIR = "profiler/profile_data/Field"


MS_TO_NS_CONV = 1000000




def load_images(dir):
images = []
for filename in os.listdir(dir):
if filename.endswith(".png"):
img = cv2.imread(os.path.join(dir, filename))
if img is not None:
success, image_with_time = ImageAndTime.create(img)
if success:
images.append(image_with_time)
return images


def profile_detector(detector: detect_target.DetectTarget, images: "list[np.ndarray]") -> dict:
times_arr = []
for image in images:
gc.disable() # This disables the garbage collector
start = time.time_ns()
result, value = detector.run(image) # Might or might not want to keep the bounding boxes
end = time.time_ns()
gc.enable() # This enables the garbage collector
if not result:
pass
# Handle error
else:
times_arr.append(end - start)

if len(times_arr) > 0:
average = np.nanmean(times_arr) / MS_TO_NS_CONV
mins = np.nanmin(times_arr) /MS_TO_NS_CONV
maxs = np.nanmax(times_arr) / MS_TO_NS_CONV
median = np.median(times_arr) /MS_TO_NS_CONV
else:
average, mins, maxs, median = -1,-1,-1,-1


data = {
"Average (ms)": average,
"Min (ms)": mins,
"Max (ms)": maxs,
"Median (ms)": median
}




# Create and prints DF
return data


def run_detector(detector_full: detect_target.DetectTarget, detector_half: detect_target.DetectTarget, images: "list[np.ndarray]") -> pd.DataFrame:
# Initial run just to warm up CUDA
_ = profile_detector(detector_full, images[:10])
half_data = profile_detector(detector_half, images)
full_data = profile_detector(detector_full, images)


full_df = pd.DataFrame(full_data, index=['full'])
half_df = pd.DataFrame(half_data, index=['half'])
return pd.concat([half_df, full_df])


def main() -> int:
#Configurations
try:
with CONFIG_FILE_PATH.open("r", encoding="utf8") as file:
try:
config = yaml.safe_load(file)
except yaml.YAMLError as exc:
print(f"Error parsing YAML file: {exc}")
return -1
except FileNotFoundError:
print(f"File not found: {CONFIG_FILE_PATH}")
return -1
except IOError as exc:
print(f"Error when opening file: {exc}")
return -1



parser = argparse.ArgumentParser()
parser.add_argument("--cpu", action="store_true", help="option to force cpu")
args = parser.parse_args()


DETECT_TARGET_MODEL_PATH = config["detect_target"]["model_path"]
DETECT_TARGET_DEVICE = "cpu" if args.cpu else config["detect_target"]["device"]


#Optional logging parameters
LOG_DIRECTORY_PATH = config["log_directory_path"]
DETECT_TARGET_SAVE_NAME_PREFIX = config["detect_target"]["save_prefix"]
DETECT_TARGET_SAVE_PREFIX = f"{LOG_DIRECTORY_PATH}/{DETECT_TARGET_SAVE_NAME_PREFIX}"


#Creating detector instances
detector_half = detect_target.DetectTarget(
DETECT_TARGET_DEVICE,
DETECT_TARGET_MODEL_PATH,
False,
"" #not logging imgs
)
detector_full = detect_target.DetectTarget(
DETECT_TARGET_DEVICE,
DETECT_TARGET_MODEL_PATH,
True, #forces full precision
"" #not logging imgs
)


#Loading images
grass_images = load_images(GRASS_DATA_DIR)
asphalt_images = load_images(ASPHALT_DATA_DIR)
field_images = load_images(FIELD_DATA_DIR)


#Running detector
grass_results = run_detector(detector_full, detector_half, grass_images)
asphalt_results = run_detector(detector_full, detector_half, asphalt_images)
field_results = run_detector(detector_full, detector_half, field_images)




#Printing results to console
print("=================GRASS==================")
print(grass_results)
print("=================ASPHALT==================")
print(asphalt_results)
print("=================FIELD==================")
print(field_results)


#save to csvs
grass_results.to_csv(f"profiler/profile_data/results/results_grass.csv")
asphalt_results.to_csv(f"profiler/profile_data/results/results_asphalt.csv")
field_results.to_csv(f"profiler/profile_data/results/results_field.csv")


if __name__ == "__main__":
main()







Loading