Skip to content

Commit ee77812

Browse files
Reduce the complexity of other/scoring_algorithm.py (TheAlgorithms#8045)
* Increase the --max-complexity threshold in the file .flake8
1 parent 069a14b commit ee77812

File tree

1 file changed

+43
-14
lines changed

1 file changed

+43
-14
lines changed

other/scoring_algorithm.py

+43-14
Original file line numberDiff line numberDiff line change
@@ -23,29 +23,29 @@
2323
"""
2424

2525

26-
def procentual_proximity(
27-
source_data: list[list[float]], weights: list[int]
28-
) -> list[list[float]]:
26+
def get_data(source_data: list[list[float]]) -> list[list[float]]:
2927
"""
30-
weights - int list
31-
possible values - 0 / 1
32-
0 if lower values have higher weight in the data set
33-
1 if higher values have higher weight in the data set
34-
35-
>>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1])
36-
[[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]]
28+
>>> get_data([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]])
29+
[[20.0, 23.0, 22.0], [60.0, 90.0, 50.0], [2012.0, 2015.0, 2011.0]]
3730
"""
38-
39-
# getting data
4031
data_lists: list[list[float]] = []
4132
for data in source_data:
4233
for i, el in enumerate(data):
4334
if len(data_lists) < i + 1:
4435
data_lists.append([])
4536
data_lists[i].append(float(el))
37+
return data_lists
38+
4639

40+
def calculate_each_score(
41+
data_lists: list[list[float]], weights: list[int]
42+
) -> list[list[float]]:
43+
"""
44+
>>> calculate_each_score([[20, 23, 22], [60, 90, 50], [2012, 2015, 2011]],
45+
... [0, 0, 1])
46+
[[1.0, 0.0, 0.33333333333333337], [0.75, 0.0, 1.0], [0.25, 1.0, 0.0]]
47+
"""
4748
score_lists: list[list[float]] = []
48-
# calculating each score
4949
for dlist, weight in zip(data_lists, weights):
5050
mind = min(dlist)
5151
maxd = max(dlist)
@@ -72,14 +72,43 @@ def procentual_proximity(
7272

7373
score_lists.append(score)
7474

75+
return score_lists
76+
77+
78+
def generate_final_scores(score_lists: list[list[float]]) -> list[float]:
79+
"""
80+
>>> generate_final_scores([[1.0, 0.0, 0.33333333333333337],
81+
... [0.75, 0.0, 1.0],
82+
... [0.25, 1.0, 0.0]])
83+
[2.0, 1.0, 1.3333333333333335]
84+
"""
7585
# initialize final scores
7686
final_scores: list[float] = [0 for i in range(len(score_lists[0]))]
7787

78-
# generate final scores
7988
for slist in score_lists:
8089
for j, ele in enumerate(slist):
8190
final_scores[j] = final_scores[j] + ele
8291

92+
return final_scores
93+
94+
95+
def procentual_proximity(
96+
source_data: list[list[float]], weights: list[int]
97+
) -> list[list[float]]:
98+
"""
99+
weights - int list
100+
possible values - 0 / 1
101+
0 if lower values have higher weight in the data set
102+
1 if higher values have higher weight in the data set
103+
104+
>>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1])
105+
[[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]]
106+
"""
107+
108+
data_lists = get_data(source_data)
109+
score_lists = calculate_each_score(data_lists, weights)
110+
final_scores = generate_final_scores(score_lists)
111+
83112
# append scores to source data
84113
for i, ele in enumerate(final_scores):
85114
source_data[i].append(ele)

0 commit comments

Comments
 (0)