Skip to content

chore: Enhance type checking with mypy and improve code quality #285

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jul 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -1 +1,20 @@
# Python cache files
__pycache__
*.pyc

# IDE settings
.vscode/

# Version control
.git/

# Distribution / packaging
build/
dist/
*.egg-info/

# Virtual environments
.venv

# Testing
/tests/manual/data
6 changes: 2 additions & 4 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,8 @@ jobs:
run: |
python -m pip install --upgrade pip
pip install ".[dev]"
- name: 🧹 Lint
- name: 🧹 Check code quality
run: |
make check_code_quality
- name: Check types with mypy
run: mypy .
- name: 🧪 Test
- name: 🧪 Run tests
run: "python -m unittest"
3 changes: 2 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
.PHONY: style check_code_quality
.PHONY: style check_code_quality publish

export PYTHONPATH = .
check_dirs := roboflow
Expand All @@ -10,6 +10,7 @@ style:
check_code_quality:
ruff format $(check_dirs) --check
ruff check $(check_dirs)
mypy $(check_dirs)

publish:
python setup.py sdist bdist_wheel
Expand Down
4 changes: 1 addition & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,7 @@ target-version = "py38"
line-length = 120

[tool.ruff.lint]
select = [
"ALL",
]
select = ["ALL"]
ignore = [
"A",
"ANN",
Expand Down
29 changes: 8 additions & 21 deletions roboflow/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import sys
import time
from getpass import getpass
from pathlib import Path
from urllib.parse import urlparse

import requests
Expand Down Expand Up @@ -59,27 +60,16 @@ def check_key(api_key, model, notebook, num_retries=0):
return "onboarding"


def auth(api_key):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It is backward incompatible. I'd prefer to keep this function.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay, but I am curious about the use case, at this moment the calling of this function raises an exception
TypeError: auth() missing 1 required positional argument: 'api_key'

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah ok , I didn't realized it is totally broken. 👍🏻 Yes, we can remove it.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you remove it again? 😅 I reviewed all the PR already (all good), I'll wait for this last change.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure! Done!

r = check_key(api_key)
w = r["workspace"]

return Roboflow(api_key, w)


def login(workspace=None, force=False):
os_name = os.name

if os_name == "nt":
default_path = os.path.join(os.getenv("USERPROFILE"), "roboflow/config.json")
default_path = str(Path.home() / "roboflow" / "config.json")
else:
default_path = os.path.join(os.getenv("HOME"), ".config/roboflow/config.json")
default_path = str(Path.home() / ".config" / "roboflow" / "config.json")

# default configuration location
conf_location = os.getenv(
"ROBOFLOW_CONFIG_DIR",
default=default_path,
)

conf_location = os.getenv("ROBOFLOW_CONFIG_DIR", default=default_path)
if os.path.isfile(conf_location) and not force:
write_line("You are already logged into Roboflow. To make a different login," "run roboflow.login(force=True).")
return None
Expand Down Expand Up @@ -141,10 +131,7 @@ def initialize_roboflow(the_workspace=None):

global active_workspace

conf_location = os.getenv(
"ROBOFLOW_CONFIG_DIR",
default=os.getenv("HOME") + "/.config/roboflow/config.json",
)
conf_location = os.getenv("ROBOFLOW_CONFIG_DIR", default=str(Path.home() / ".config" / "roboflow" / "config.json"))

if not os.path.isfile(conf_location):
raise RuntimeError("To use this method, you must first login - run roboflow.login()")
Expand Down Expand Up @@ -176,7 +163,7 @@ def load_model(model_url):
project = path_parts[2]
version = int(path_parts[-1])
else:
raise ("Model URL must be from either app.roboflow.com or universe.roboflow.com")
raise ValueError("Model URL must be from either app.roboflow.com or universe.roboflow.com")

project = operate_workspace.project(project)
version = project.version(version)
Expand Down Expand Up @@ -204,7 +191,7 @@ def download_dataset(dataset_url, model_format, location=None):
version = int(path_parts[-1])
the_workspace = path_parts[1]
else:
raise ("Model URL must be from either app.roboflow.com or universe.roboflow.com")
raise ValueError("Model URL must be from either app.roboflow.com or universe.roboflow.com")
operate_workspace = initialize_roboflow(the_workspace=the_workspace)

project = operate_workspace.project(project)
Expand Down Expand Up @@ -239,7 +226,7 @@ def auth(self):
self.universe = True
return self
else:
w = r["workspace"]
w = r["workspace"] # type: ignore[arg-type]
self.current_workspace = w
return self

Expand Down
20 changes: 10 additions & 10 deletions roboflow/core/project.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def generate_version(self, settings):
try:
r_json = r.json()
except Exception:
raise "Error when requesting to generate a new version for project."
raise RuntimeError("Error when requesting to generate a new version for project.")

# if the generation succeeds, return the version that is being generated
if r.status_code == 200:
Expand All @@ -256,7 +256,7 @@ def train(
speed=None,
checkpoint=None,
plot_in_notebook=False,
) -> bool:
):
"""
Ask the Roboflow API to train a previously exported version's dataset.

Expand Down Expand Up @@ -503,7 +503,7 @@ def single_upload(
sequence_size=sequence_size,
**kwargs,
)
image_id = uploaded_image["id"]
image_id = uploaded_image["id"] # type: ignore[index]
upload_retry_attempts = retry.retries
except BaseException as e:
uploaded_image = {"error": e}
Expand All @@ -518,10 +518,10 @@ def single_upload(
uploaded_annotation = rfapi.save_annotation(
self.__api_key,
project_url,
annotation_name,
annotation_str,
annotation_name, # type: ignore[type-var]
annotation_str, # type: ignore[type-var]
image_id,
job_name=batch_name,
job_name=batch_name, # type: ignore[type-var]
is_prediction=is_prediction,
annotation_labelmap=annotation_labelmap,
overwrite=annotation_overwrite,
Expand All @@ -543,10 +543,10 @@ def _annotation_params(self, annotation_path):
if isinstance(annotation_path, dict) and annotation_path.get("rawText"):
annotation_name = annotation_path["name"]
annotation_string = annotation_path["rawText"]
elif os.path.exists(annotation_path):
with open(annotation_path):
annotation_string = open(annotation_path).read()
annotation_name = os.path.basename(annotation_path)
elif os.path.exists(annotation_path): # type: ignore[arg-type]
with open(annotation_path): # type: ignore[arg-type]
annotation_string = open(annotation_path).read() # type: ignore[arg-type]
annotation_name = os.path.basename(annotation_path) # type: ignore[arg-type]
elif self.type == "classification":
print(f"-> using {annotation_path} as classname for classification project")
annotation_string = annotation_path
Expand Down
2 changes: 1 addition & 1 deletion roboflow/core/version.py
Original file line number Diff line number Diff line change
Expand Up @@ -766,7 +766,7 @@ def bar_progress(current, total, width=80):

# write the zip file to the desired location
with open(location + "/roboflow.zip", "wb") as f:
total_length = int(response.headers.get("content-length"))
total_length = int(response.headers.get("content-length")) # type: ignore[arg-type]
desc = None if TQDM_DISABLE else f"Downloading Dataset Version Zip in {location} to {format}:"
for chunk in tqdm(
response.iter_content(chunk_size=1024),
Expand Down
12 changes: 6 additions & 6 deletions roboflow/core/workspace.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import json
import os
import sys
from typing import List
from typing import Any, List

import numpy as np
import requests
Expand Down Expand Up @@ -179,7 +179,7 @@ def two_stage(
print(self.project(first_stage_model_name))

# perform first inference
predictions = stage_one_model.predict(image)
predictions = stage_one_model.predict(image) # type: ignore[attribute-error]

if stage_one_project.type == "object-detection" and stage_two_project == "classification":
# interact with each detected object from stage one inference results
Expand All @@ -199,7 +199,7 @@ def two_stage(
croppedImg.save("./temp.png")

# capture results of second stage inference from cropped image
results.append(stage_two_model.predict("./temp.png")[0])
results.append(stage_two_model.predict("./temp.png")[0]) # type: ignore[attribute-error]

# delete the written image artifact
try:
Expand Down Expand Up @@ -244,7 +244,7 @@ def two_stage_ocr(
stage_one_model = stage_one_project.version(first_stage_model_version).model

# perform first inference
predictions = stage_one_model.predict(image)
predictions = stage_one_model.predict(image) # type: ignore[attribute-error]

# interact with each detected object from stage one inference results
if stage_one_project.type == "object-detection":
Expand Down Expand Up @@ -391,7 +391,7 @@ def active_learning(
upload_destination: str = "",
conditionals: dict = {},
use_localhost: bool = False,
) -> str:
) -> Any:
"""perform inference on each image in directory and upload based on conditions
@params:
raw_data_location: (str) = folder of frames to be processed
Expand Down Expand Up @@ -470,7 +470,7 @@ def active_learning(
print(image2 + " --> similarity too high to --> " + image1)
continue # skip this image if too similar or counter hits limit

predictions = inference_model.predict(image).json()["predictions"]
predictions = inference_model.predict(image).json()["predictions"] # type: ignore[attribute-error]
# collect all predictions to return to user at end
prediction_results.append({"image": image, "predictions": predictions})

Expand Down
2 changes: 1 addition & 1 deletion roboflow/models/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def __init__(
print(f"initalizing local classification model hosted at : {local}")
self.base_url = local

def predict(self, image_path, hosted=False):
def predict(self, image_path, hosted=False): # type: ignore[override]
"""
Run inference on an image.

Expand Down
5 changes: 2 additions & 3 deletions roboflow/models/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,8 +121,7 @@ def predict(self, image_path, prediction_type=None, **kwargs):
params["api_key"] = self.__api_key

params.update(**kwargs)

url = f"{self.api_url}?{urllib.parse.urlencode(params)}"
url = f"{self.api_url}?{urllib.parse.urlencode(params)}" # type: ignore[attr-defined]
response = requests.post(url, **request_kwargs)
response.raise_for_status()

Expand Down Expand Up @@ -390,7 +389,7 @@ def download(self, format="pt", location="."):

# write the zip file to the desired location
with open(location + "/weights.pt", "wb") as f:
total_length = int(response.headers.get("content-length"))
total_length = int(response.headers.get("content-length")) # type: ignore[arg-type]
for chunk in tqdm(
response.iter_content(chunk_size=1024),
desc=f"Downloading weights to {location}/weights.pt",
Expand Down
2 changes: 1 addition & 1 deletion roboflow/models/instance_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def __init__(
self.colors = {} if colors is None else colors
self.preprocessing = {} if preprocessing is None else preprocessing

def predict(self, image_path, confidence=40):
def predict(self, image_path, confidence=40): # type: ignore[override]
"""
Infers detections based on image from a specified model and image path.

Expand Down
2 changes: 1 addition & 1 deletion roboflow/models/keypoint_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def __init__(
print(f"initalizing local keypoint detection model hosted at : {local}")
self.base_url = local

def predict(self, image_path, hosted=False):
def predict(self, image_path, hosted=False): # type: ignore[override]
"""
Run inference on an image.

Expand Down
13 changes: 7 additions & 6 deletions roboflow/models/object_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def load_model(
format=format,
)

def predict(
def predict( # type: ignore[override]
self,
image_path,
hosted=False,
Expand Down Expand Up @@ -175,6 +175,7 @@ def predict(
self.__exception_check(image_path_check=image_path)

resize = False
original_dimensions = None
# If image is local image
if not hosted:
if isinstance(image_path, str):
Expand Down Expand Up @@ -219,7 +220,7 @@ def predict(
retval, buffer = cv2.imencode(".jpg", image_path)
# Currently cv2.imencode does not properly return shape
dimensions = buffer.shape
img_str = base64.b64encode(buffer)
img_str = base64.b64encode(buffer) # type: ignore[arg-type]
img_str = img_str.decode("ascii")
resp = requests.post(
self.api_url,
Expand All @@ -243,7 +244,7 @@ def predict(
if self.format == "json":
resp_json = resp.json()

if resize:
if resize and original_dimensions is not None:
new_preds = []
for p in resp_json["predictions"]:
p["x"] = int(p["x"] * (int(original_dimensions[0]) / int(self.preprocessing["resize"]["width"])))
Expand Down Expand Up @@ -310,8 +311,8 @@ def plot_one_box(x, img, color=None, label=None, line_thickness=None, colors=Non

self.colors = {} if colors is None else colors

if label in colors.keys() and label is not None:
color = colors[label]
if label in self.colors and label is not None:
color = self.colors[label]
color = color.lstrip("#")
color = tuple(int(color[i : i + 2], 16) for i in (0, 2, 4))
else:
Expand Down Expand Up @@ -391,7 +392,7 @@ def view(button):
frame = cv2.flip(frame, 1) # if your camera reverses your image

_, frame_upload = cv2.imencode(".jpeg", frame)
img_str = base64.b64encode(frame_upload)
img_str = base64.b64encode(frame_upload) # type: ignore[arg-type]
img_str = img_str.decode("ascii")

# post frame to the Roboflow API
Expand Down
2 changes: 1 addition & 1 deletion roboflow/util/image_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def mask_image(image, encoded_mask, transparency=60):
:param transparency: alpha transparency of masks for semantic overlays
:returns: CV2 image / numpy.ndarray matrix
"""
np_data = np.fromstring(base64.b64decode(encoded_mask), np.uint8)
np_data = np.fromstring(base64.b64decode(encoded_mask), np.uint8) # type: ignore[no-overload]
mask = cv2.imdecode(np_data, cv2.IMREAD_UNCHANGED)

# Fallback in case the API returns an incorrectly sized mask
Expand Down
5 changes: 3 additions & 2 deletions roboflow/util/prediction.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def plot_image(image_path):
img = Image.open(io.BytesIO(response.content))

figure, axes = plt.subplots()
axes.imshow(img)
axes.imshow(img) # type: ignore[attr-defined]
return figure, axes


Expand All @@ -55,7 +55,7 @@ def plot_annotation(axes, prediction=None, stroke=1, transparency=60, colors=Non
# Object Detection annotation

colors = {} if colors is None else colors

prediction = prediction or {}
stroke_color = "r"

if prediction["prediction_type"] == OBJECT_DETECTION_MODEL:
Expand Down Expand Up @@ -283,6 +283,7 @@ def add_prediction(self, prediction=None):

:param prediction: Prediction to add to the prediction group
"""
prediction = prediction or {}
# If not a Prediction object then do not allow into the prediction group
# Also checks if prediction types are the same
# (i.e. object detection predictions in object detection groups)
Expand Down