Skip to content

Commit b3c9750

Browse files
author
reinvantveer
committed
so far so good (WIP)
1 parent ab9afe3 commit b3c9750

7 files changed

+197
-0
lines changed

__init__.py

Whitespace-only changes.

model/__init__.py

Whitespace-only changes.

model/loss_function.py

+25
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
import torch
2+
3+
4+
def loss_function(reconstruction_target: torch.Tensor, model_output: torch.Tensor) -> torch.Tensor:
5+
assert type(reconstruction_target).__name__ == 'Tensor', 'The reconstruction target should be a tensor'
6+
assert type(model_output).__name__ == 'Tensor', 'The model output should be a tensor'
7+
assert reconstruction_target.size(0) > 0, 'The reconstruction target should be a tensor with length > 0'
8+
assert model_output.size(0) > 0, 'The model output should be a tensor with length > 0'
9+
assert reconstruction_target.size(1) == 7, 'The reconstruction target should be a tensor of shape(?, 7)'
10+
assert model_output.size(1) == 7, 'The model output should be a tensor of shape(?, 7)'
11+
12+
# Get rid of anything from and after the final stop
13+
reconstruction_final_stop_entries = reconstruction_target[:, 6]
14+
reconstruction_final_stop_index = (torch.round(reconstruction_final_stop_entries) == 1.).nonzero()
15+
reconstruction_target = reconstruction_target[:reconstruction_final_stop_index]
16+
17+
model_output_final_stop_entries = model_output[:, 6]
18+
model_output_final_stop_index = (torch.round(model_output_final_stop_entries) == 1.).nonzero()
19+
model_output = model_output[:model_output_final_stop_index]
20+
21+
reconstruction_target, _ = torch.topk(reconstruction_target, k=reconstruction_target.shape[0], dim=0)
22+
model_output, _ = torch.topk(model_output, k=model_output.shape[0], dim=0)
23+
difference = torch.abs(torch.sum(reconstruction_target - model_output))
24+
25+
return difference

model/pytorch_efd.py

+48
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
import math
2+
import torch
3+
4+
5+
def efd(polygon: torch.Tensor, order=2) -> torch.Tensor:
6+
"""
7+
Pytorch function for creating elliptic fourier descriptors. Refactored from
8+
https://github.com/hbldh/pyefd/blob/master/pyefd.py thank you Henrik Blidh
9+
:param order: number of harmonics for the elliptic fourier descriptors
10+
:param polygon: a Pytorch tensor of shape (?, 2)
11+
:return:
12+
"""
13+
assert type(polygon).__name__ == 'Tensor', 'The polygon should be a tensor'
14+
15+
# Following https://discuss.pytorch.org/t/equivalent-function-like-numpy-diff-in-pytorch/35327/2
16+
distances_between_nodes = polygon[1:] - polygon[:-1]
17+
18+
positive_distances = torch.pow(distances_between_nodes, 2)
19+
positive_distances = torch.sum(positive_distances, dim=1)
20+
positive_distances = torch.sqrt(positive_distances)
21+
22+
cumulative_distances = torch.cumsum(positive_distances, dim=0)
23+
zeros = torch.zeros((1,), dtype=torch.double)
24+
cumulative_distances = torch.cat((zeros, cumulative_distances))
25+
26+
total_distance = cumulative_distances[-1]
27+
normalized_distances = (2 * math.pi * cumulative_distances) / total_distance # pyefd: phi
28+
29+
efd_orders = torch.arange(1, order + 1, dtype=torch.double)
30+
31+
const = total_distance / (2 * efd_orders * efd_orders * math.pi * math.pi)
32+
print('normalized distances:', normalized_distances.numpy())
33+
print('efd_orders:', efd_orders.numpy())
34+
normalized_distances_for_order = torch.dot(normalized_distances * efd_orders)
35+
d_cos_phi_n = torch.cos(normalized_distances_for_order[1:]) - torch.cos(normalized_distances_for_order[:-1])
36+
d_sin_phi_n = torch.sin(normalized_distances_for_order[1:]) - torch.sin(normalized_distances_for_order[:-1])
37+
coefficient_a_for_order = const * torch.sum((distances_between_nodes[:, 0] / positive_distances) * d_cos_phi_n)
38+
coefficient_b_for_order = const * torch.sum((distances_between_nodes[:, 0] / positive_distances) * d_sin_phi_n)
39+
coefficient_c_for_order = const * torch.sum((distances_between_nodes[:, 1] / positive_distances) * d_cos_phi_n)
40+
coefficient_d_for_order = const * torch.sum((distances_between_nodes[:, 1] / positive_distances) * d_sin_phi_n)
41+
42+
efd_coefficients = torch.stack((coefficient_a_for_order,
43+
coefficient_b_for_order,
44+
coefficient_c_for_order,
45+
coefficient_d_for_order))
46+
print('efd coeffs:', efd_coefficients.numpy())
47+
48+
return efd_coefficients

model/stand_in_efd.py

+41
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
import numpy as np
2+
3+
4+
def elliptic_fourier_descriptors(contour, order=2, normalize=False):
5+
"""Calculate elliptical Fourier descriptors for a contour.
6+
:param numpy.ndarray contour: A contour array of size ``[M x 2]``.
7+
:param int order: The order of Fourier coefficients to calculate.
8+
:param bool normalize: If the coefficients should be normalized;
9+
see references for details.
10+
:return: A ``[order x 4]`` array of Fourier coefficients.
11+
:rtype: :py:class:`numpy.ndarray`
12+
"""
13+
dxy = np.diff(contour, axis=0)
14+
15+
dt = dxy ** 2
16+
dt = dt.sum(axis=1)
17+
dt = np.sqrt(dt)
18+
19+
t = np.cumsum(dt)
20+
t = np.concatenate([([0.]), t])
21+
T = t[-1]
22+
23+
phi = (2 * np.pi * t) / T
24+
coeffs = np.zeros((order, 4))
25+
26+
order = 1
27+
for n in range(1, order + 1):
28+
const = T / (2 * n * n * np.pi * np.pi)
29+
phi_n = phi * n
30+
print('phi_n:', phi_n)
31+
d_cos_phi_n = np.cos(phi_n[1:]) - np.cos(phi_n[:-1])
32+
d_sin_phi_n = np.sin(phi_n[1:]) - np.sin(phi_n[:-1])
33+
a_n = const * np.sum((dxy[:, 0] / dt) * d_cos_phi_n)
34+
b_n = const * np.sum((dxy[:, 0] / dt) * d_sin_phi_n)
35+
c_n = const * np.sum((dxy[:, 1] / dt) * d_cos_phi_n)
36+
d_n = const * np.sum((dxy[:, 1] / dt) * d_sin_phi_n)
37+
38+
coeffs[n - 1, :] = a_n, b_n, c_n, d_n
39+
# print('pyefd coeffs:', coeffs)
40+
41+
return coeffs

model/test_loss_function.py

+52
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
import unittest
2+
3+
import torch
4+
from deep_geometry import vectorizer as gv
5+
6+
from model.loss_function import loss_function
7+
8+
9+
class TestLossFunction(unittest.TestCase):
10+
def test_loss_function(self):
11+
geom1 = 'POLYGON((0 0, 1 0, 1 1, 0 1, 0 0))'
12+
geom2 = 'POLYGON((1 1, 0 1, 0 0, 1 0, 1 1))'
13+
geom_extra_node = 'POLYGON((0 0, 1 0, 1 1, 0 1, 0 0.5, 0 0))'
14+
15+
diamond = 'POLYGON((1 0, 2 1, 1 2, 0 1, 1 0))'
16+
17+
test_square = gv.vectorize_wkt(geom1)
18+
test_square = torch.from_numpy(test_square)
19+
20+
test_square_extra_node = gv.vectorize_wkt(geom_extra_node)
21+
test_square_extra_node = torch.from_numpy(test_square_extra_node)
22+
23+
output_square = gv.vectorize_wkt(geom2)
24+
output_square = torch.from_numpy(output_square)
25+
26+
test_diamond = gv.vectorize_wkt(diamond)
27+
test_diamond = torch.from_numpy(test_diamond)
28+
29+
with self.subTest('It accepts only tensors of length 7 on the second axis'):
30+
loss_function(test_square, output_square)
31+
32+
with self.subTest('It returns a tensor'):
33+
loss = loss_function(test_square, output_square)
34+
self.assertEqual(type(loss).__name__, 'Tensor')
35+
36+
with self.subTest('It returns a loss of 0 for geometries that are really identical'):
37+
loss = loss_function(test_square, test_square)
38+
loss = loss.numpy()
39+
self.assertEqual(loss, 0.)
40+
41+
with self.subTest('It returns a loss of 0 for geometries that are almost identical'):
42+
loss = loss_function(test_square, output_square)
43+
loss = loss.numpy()
44+
self.assertEqual(loss, 0.)
45+
46+
with self.subTest('It returns a non-zero tensor for non-identical geometries'):
47+
loss = loss_function(test_square, test_diamond)
48+
loss = loss.numpy()
49+
self.assertGreater(loss, 0.)
50+
51+
# with self.subTest('It returns a zero or almost zero loss for a set of same squares where one has an extra node'):
52+
# loss = loss_function(test_square, test_square_extra_node)

model/test_pytorch_efd.py

+31
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
import unittest
2+
3+
import torch
4+
from deep_geometry import vectorizer as gv
5+
6+
from model.stand_in_efd import elliptic_fourier_descriptors
7+
from model.pytorch_efd import efd
8+
9+
10+
class TestEllipticFourierDescriptor(unittest.TestCase):
11+
def test_efd(self):
12+
geom1 = 'POLYGON((0 0, 1 0, 1 1, 0 1, 0 0))'
13+
geom1 = gv.vectorize_wkt(geom1)
14+
15+
geom2 = 'POLYGON((1 1, 0 1, 0 0, 1 0, 1 1))'
16+
geom_extra_node = 'POLYGON((0 0, 1 0, 1 1, 0 1, 0 0.5, 0 0))'
17+
18+
reference_descriptors = elliptic_fourier_descriptors(geom1[:, :2]).flatten().tolist()
19+
20+
with self.subTest('It does not accept a numpy ndarray'):
21+
with self.assertRaises(AssertionError):
22+
efd(geom1)
23+
24+
with self.subTest('It creates an elliptic fourier descriptor of a geometry, the same as pyefd creates'):
25+
polygon_tensor = geom1[:, :2]
26+
polygon_tensor = torch.from_numpy(polygon_tensor)
27+
28+
our_descriptors = efd(polygon_tensor).numpy()
29+
30+
print('torch efd:', our_descriptors)
31+
self.assertListEqual(reference_descriptors, our_descriptors)

0 commit comments

Comments
 (0)