Skip to content

Commit 5d01515

Browse files
committed
Add python samples and code
1 parent 7a7bcaa commit 5d01515

File tree

5 files changed

+459
-0
lines changed

5 files changed

+459
-0
lines changed

python/README.md

+41
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
# Python samples and code
2+
3+
The `files` directory contains skeleton code that can be modified to run a GRIP-generated pipeline.
4+
5+
The `samples` directory has sample programs that can be run directly. Just clone this repository and run the main python files.
6+
7+
## Using the samples
8+
9+
The samples use Python 3 and require three libraries:
10+
11+
1. OpenCV
12+
2. numpy (rquired by OpenCV)
13+
3. pynetworktables
14+
15+
These can all be installed with pip3.
16+
17+
```bash
18+
$ pip3 install numpy opencv-python pynetworktables
19+
```
20+
21+
(if your default python version is at least 3+, the normal `pip` command should work)
22+
23+
24+
Then `cd` into the directory of the sample you want to run and use the `python3` command to run the sample, eg
25+
26+
```bash
27+
$ cd samples/frc_find_red_areas
28+
$ python3 frc_find_red_areas.py
29+
```
30+
31+
(Again, if your default python version is at least 3+, just the `python` command should work)
32+
33+
34+
## Binaries
35+
36+
Since the Raspberry Pi 3 is a cheap ($35) and easy to use co-processor, we've created a Python 3 package for OpenCV for it and is available on the releases page. To install it, you'll need to have the python `wheel` tool
37+
38+
```bash
39+
$ pip3 install wheel
40+
$ python3 -m wheel install opencv_python_rpi3-...whl
41+
```

python/files/GripRunner.py

+42
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
#!/usr/bin/python3
2+
3+
"""
4+
Simple skeleton program for running an OpenCV pipeline generated by GRIP and using NetworkTables to send data.
5+
6+
Users need to:
7+
8+
1. Import the generated GRIP pipeline, which should be generated in the same directory as this file.
9+
2. Set the network table server IP. This is usually the robots address (roborio-TEAM-frc.local) or localhost
10+
3. Handle putting the generated code into NetworkTables
11+
"""
12+
13+
import cv2
14+
from networktables import NetworkTable
15+
from grip import GripPipeline # TODO change the default module and class, if needed
16+
17+
18+
def extra_processing(pipeline: GripPipeline):
19+
"""
20+
Performs extra processing on the pipeline's outputs and publishes data to NetworkTables.
21+
:param pipeline: the pipeline that just processed an image
22+
:return: None
23+
"""
24+
# TODO: Users need to implement this.
25+
# Useful for converting OpenCV objects (e.g. contours) to something NetworkTables can understand.
26+
pass
27+
28+
29+
def main():
30+
NetworkTable.setTeam('0000') # TODO set your team number
31+
NetworkTable.initialize()
32+
cap = cv2.VideoCapture(0)
33+
pipeline = GripPipeline()
34+
while True:
35+
ret, frame = cap.read()
36+
if ret:
37+
pipeline.process(frame) # TODO add extra parameters if the pipeline takes more than just a single image
38+
extra_processing(pipeline)
39+
40+
41+
if __name__ == '__main__':
42+
main()
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
#!/usr/bin/python3
2+
3+
"""
4+
Sample program that uses a generated GRIP pipeline to detect red areas in an image and publish them to NetworkTables.
5+
"""
6+
7+
import cv2
8+
from ntcore import NetworkTables
9+
from grip import GripPipeline
10+
11+
12+
def extra_processing(pipeline):
13+
"""
14+
Performs extra processing on the pipeline's outputs and publishes data to NetworkTables.
15+
:param pipeline: the pipeline that just processed an image
16+
:return: None
17+
"""
18+
center_x_positions = []
19+
center_y_positions = []
20+
widths = []
21+
heights = []
22+
23+
# Find the bounding boxes of the contours to get x, y, width, and height
24+
for contour in pipeline.filter_contours_output:
25+
x, y, w, h = cv2.boundingRect(contour)
26+
center_x_positions.append(x + w / 2) # X and Y are coordinates of the top-left corner of the bounding box
27+
center_y_positions.append(y + h / w)
28+
widths.append(w)
29+
heights.append(y)
30+
31+
# Publish to the '/vision/red_areas' network table
32+
table = NetworkTables.getTable('/vision/red_areas')
33+
table.putNumberArray('x', center_x_positions)
34+
table.putNumberArray('y', center_y_positions)
35+
table.putNumberArray('width', widths)
36+
table.putNumberArray('height', heights)
37+
38+
39+
def main():
40+
print('Initializing NetworkTables')
41+
NetworkTables.setClientMode()
42+
NetworkTables.setIPAddress('localhost')
43+
NetworkTables.initialize()
44+
45+
print('Creating video capture')
46+
cap = cv2.VideoCapture(0)
47+
48+
print('Creating pipeline')
49+
pipeline = GripPipeline()
50+
51+
print('Running pipeline')
52+
while cap.isOpened():
53+
have_frame, frame = cap.read()
54+
if have_frame:
55+
pipeline.process(frame)
56+
extra_processing(pipeline)
57+
58+
print('Capture closed')
59+
60+
61+
if __name__ == '__main__':
62+
main()
+160
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,160 @@
1+
import cv2
2+
import numpy
3+
import math
4+
from enum import Enum
5+
6+
class GripPipeline:
7+
"""
8+
An OpenCV pipeline generated by GRIP.
9+
"""
10+
11+
def __init__(self):
12+
"""initializes all values to presets or None if need to be set
13+
"""
14+
15+
self.__resize_image_width = 320.0
16+
self.__resize_image_height = 240.0
17+
self.__resize_image_interpolation = cv2.INTER_CUBIC
18+
19+
self.resize_image_output = None
20+
21+
self.__rgb_threshold_input = self.resize_image_output
22+
self.__rgb_threshold_red = [91.72661870503596, 255.0]
23+
self.__rgb_threshold_green = [0.0, 53.684210526315795]
24+
self.__rgb_threshold_blue = [0.0, 73.16638370118847]
25+
26+
self.rgb_threshold_output = None
27+
28+
self.__find_contours_input = self.rgb_threshold_output
29+
self.__find_contours_external_only = True
30+
31+
self.find_contours_output = None
32+
33+
self.__filter_contours_contours = self.find_contours_output
34+
self.__filter_contours_min_area = 20.0
35+
self.__filter_contours_min_perimeter = 0
36+
self.__filter_contours_min_width = 0
37+
self.__filter_contours_max_width = 1000
38+
self.__filter_contours_min_height = 0
39+
self.__filter_contours_max_height = 1000
40+
self.__filter_contours_solidity = [0, 100]
41+
self.__filter_contours_max_vertices = 1000000
42+
self.__filter_contours_min_vertices = 0
43+
self.__filter_contours_min_ratio = 0
44+
self.__filter_contours_max_ratio = 1000
45+
46+
self.filter_contours_output = None
47+
48+
49+
def process(self, source0):
50+
"""
51+
Runs the pipeline and sets all outputs to new values.
52+
"""
53+
# Step Resize_Image0:
54+
self.__resize_image_input = source0
55+
(self.resize_image_output) = self.__resize_image(self.__resize_image_input, self.__resize_image_width, self.__resize_image_height, self.__resize_image_interpolation)
56+
57+
# Step RGB_Threshold0:
58+
self.__rgb_threshold_input = self.resize_image_output
59+
(self.rgb_threshold_output) = self.__rgb_threshold(self.__rgb_threshold_input, self.__rgb_threshold_red, self.__rgb_threshold_green, self.__rgb_threshold_blue)
60+
61+
# Step Find_Contours0:
62+
self.__find_contours_input = self.rgb_threshold_output
63+
(self.find_contours_output) = self.__find_contours(self.__find_contours_input, self.__find_contours_external_only)
64+
65+
# Step Filter_Contours0:
66+
self.__filter_contours_contours = self.find_contours_output
67+
(self.filter_contours_output) = self.__filter_contours(self.__filter_contours_contours, self.__filter_contours_min_area, self.__filter_contours_min_perimeter, self.__filter_contours_min_width, self.__filter_contours_max_width, self.__filter_contours_min_height, self.__filter_contours_max_height, self.__filter_contours_solidity, self.__filter_contours_max_vertices, self.__filter_contours_min_vertices, self.__filter_contours_min_ratio, self.__filter_contours_max_ratio)
68+
69+
70+
@staticmethod
71+
def __resize_image(input, width, height, interpolation):
72+
"""Scales and image to an exact size.
73+
Args:
74+
input: A numpy.ndarray.
75+
Width: The desired width in pixels.
76+
Height: The desired height in pixels.
77+
interpolation: Opencv enum for the type fo interpolation.
78+
Returns:
79+
A numpy.ndarray of the new size.
80+
"""
81+
return cv2.resize(input, ((int)(width), (int)(height)), 0, 0, interpolation)
82+
83+
@staticmethod
84+
def __rgb_threshold(input, red, green, blue):
85+
"""Segment an image based on color ranges.
86+
Args:
87+
input: A BGR numpy.ndarray.
88+
red: A list of two numbers the are the min and max red.
89+
green: A list of two numbers the are the min and max green.
90+
blue: A list of two numbers the are the min and max blue.
91+
Returns:
92+
A black and white numpy.ndarray.
93+
"""
94+
out = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)
95+
return cv2.inRange(out, (red[0], green[0], blue[0]), (red[1], green[1], blue[1]))
96+
97+
@staticmethod
98+
def __find_contours(input, external_only):
99+
"""Sets the values of pixels in a binary image to their distance to the nearest black pixel.
100+
Args:
101+
input: A numpy.ndarray.
102+
external_only: A boolean. If true only external contours are found.
103+
Return:
104+
A list of numpy.ndarray where each one represents a contour.
105+
"""
106+
if(external_only):
107+
mode = cv2.RETR_EXTERNAL
108+
else:
109+
mode = cv2.RETR_LIST
110+
method = cv2.CHAIN_APPROX_SIMPLE
111+
im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
112+
return contours
113+
114+
@staticmethod
115+
def __filter_contours(input_contours, min_area, min_perimeter, min_width, max_width,
116+
min_height, max_height, solidity, max_vertex_count, min_vertex_count,
117+
min_ratio, max_ratio):
118+
"""Filters out contours that do not meet certain criteria.
119+
Args:
120+
input_contours: Contours as a list of numpy.ndarray.
121+
min_area: The minimum area of a contour that will be kept.
122+
min_perimeter: The minimum perimeter of a contour that will be kept.
123+
min_width: Minimum width of a contour.
124+
max_width: MaxWidth maximum width.
125+
min_height: Minimum height.
126+
max_height: Maximimum height.
127+
solidity: The minimum and maximum solidity of a contour.
128+
min_vertex_count: Minimum vertex Count of the contours.
129+
max_vertex_count: Maximum vertex Count.
130+
min_ratio: Minimum ratio of width to height.
131+
max_ratio: Maximum ratio of width to height.
132+
Returns:
133+
Contours as a list of numpy.ndarray.
134+
"""
135+
output = []
136+
for contour in input_contours:
137+
x,y,w,h = cv2.boundingRect(contour)
138+
if (w < min_width or w > max_width):
139+
continue
140+
if (h < min_height or h > max_height):
141+
continue
142+
area = cv2.contourArea(contour)
143+
if (area < min_area):
144+
continue
145+
if (cv2.arcLength(contour, True) < min_perimeter):
146+
continue
147+
hull = cv2.convexHull(contour)
148+
solid = 100 * area / cv2.contourArea(hull)
149+
if (solid < solidity[0] or solid > solidity[1]):
150+
continue
151+
if (len(contour) < min_vertex_count or len(contour) > max_vertex_count):
152+
continue
153+
ratio = (float)(w) / h
154+
if (ratio < min_ratio or ratio > max_ratio):
155+
continue
156+
output.append(contour)
157+
return output
158+
159+
160+

0 commit comments

Comments
 (0)