Skip to content

Commit 81ca965

Browse files
authored
Introducing WorkUnit model names (#98)
Now `WorkUnit` fields `name: str` and `measurement: str` moves to `model_name: dict[str, Any]`, also `name` becomes `granular_name`. This introduces universal format for works unique identification.
1 parent ab3e103 commit 81ca965

File tree

25 files changed

+249
-156
lines changed

25 files changed

+249
-156
lines changed

examples/simple_generation.ipynb

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,41 @@
9999
" uniq_resources=100)"
100100
]
101101
},
102+
{
103+
"cell_type": "code",
104+
"execution_count": 4,
105+
"outputs": [
106+
{
107+
"name": "stdout",
108+
"output_type": "stream",
109+
"text": [
110+
"{1: 'hello'}\n"
111+
]
112+
},
113+
{
114+
"data": {
115+
"text/plain": "dict"
116+
},
117+
"execution_count": 4,
118+
"metadata": {},
119+
"output_type": "execute_result"
120+
}
121+
],
122+
"source": [
123+
"d = {1: 'hello'}\n",
124+
"print(d)\n",
125+
"\n",
126+
"v = eval(str(d))\n",
127+
"type(v)"
128+
],
129+
"metadata": {
130+
"collapsed": false,
131+
"ExecuteTime": {
132+
"end_time": "2025-09-17T08:15:05.619946Z",
133+
"start_time": "2025-09-17T08:15:05.611943300Z"
134+
}
135+
}
136+
},
102137
{
103138
"cell_type": "markdown",
104139
"metadata": {

examples/simple_synthetic_graph_scheduling.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131

3232
# Get information about created WorkGraph's attributes
3333
works_count = len(wg.nodes)
34-
work_names_count = len(set(n.work_unit.name for n in wg.nodes))
34+
work_names_count = len(set(n.work_unit.model_name for n in wg.nodes))
3535
res_kind_count = len(set(req.kind for req in chain(*[n.work_unit.worker_reqs for n in wg.nodes])))
3636
print(works_count, work_names_count, res_kind_count)
3737

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "sampo"
3-
version = "0.1.1.353"
3+
version = "0.1.2"
44
description = "Open-source framework for adaptive manufacturing processes scheduling"
55
authors = ["iAirLab <[email protected]>"]
66
license = "BSD-3-Clause"

sampo/generator/pipeline/cluster.py

Lines changed: 47 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -17,15 +17,18 @@ def _add_addition_work(probability: float, rand: Random | None = None) -> bool:
1717
def _get_roads(parents: list[GraphNode], cluster_name: str, dist: float,
1818
rand: Random | None = None) -> tuple[dict[str, GraphNode], int]:
1919
road_nodes = dict()
20-
min_r = WorkUnit(uuid_str(rand), 'minimal road',
21-
scale_reqs(wr.MIN_ROAD, dist), group=f'{cluster_name}:road', volume=dist, volume_type='km')
20+
min_r = WorkUnit(uuid_str(rand), {'granular_name': 'minimal road',
21+
'measurement': 'km'},
22+
scale_reqs(wr.MIN_ROAD, dist), group=f'{cluster_name}:road', volume=dist)
2223
road_nodes['min'] = GraphNode(min_r, parents)
23-
temp_r = WorkUnit(uuid_str(rand), 'temporary road',
24-
scale_reqs(wr.TEMP_ROAD, dist), group=f'{cluster_name}:road', volume=dist, volume_type='km')
24+
temp_r = WorkUnit(uuid_str(rand), {'granular_name': 'temporary road',
25+
'measurement': 'km'},
26+
scale_reqs(wr.TEMP_ROAD, dist), group=f'{cluster_name}:road', volume=dist)
2527
road_nodes['temp'] = GraphNode(temp_r, [(road_nodes['min'], wr.ATOMIC_ROAD_LEN, EdgeType.LagFinishStart)])
2628

27-
final_r = WorkUnit(uuid_str(rand), 'final road', scale_reqs(wr.FINAL_ROAD, dist), group=f'{cluster_name}:road',
28-
volume=dist, volume_type='km')
29+
final_r = WorkUnit(uuid_str(rand), {'granular_name': 'final road',
30+
'measurement': 'km'},
31+
scale_reqs(wr.FINAL_ROAD, dist), group=f'{cluster_name}:road', volume=dist)
2932
road_nodes['final'] = GraphNode(final_r, [(road_nodes['temp'], wr.ATOMIC_ROAD_LEN, EdgeType.LagFinishStart)])
3033
return road_nodes, len(road_nodes)
3134

@@ -43,21 +46,27 @@ def _get_engineering_preparation(parents: list[GraphNode], cluster_name: str, bo
4346
def _get_power_lines(parents: list[GraphNode], cluster_name: str, dist_line: float,
4447
dist_high_line: float | None = None, rand: Random | None = None) -> tuple[list[GraphNode], int]:
4548
worker_req = wr.scale_reqs(wr.POWER_LINE, dist_line)
46-
power_line_1 = WorkUnit(uuid_str(rand), 'power line', worker_req,
49+
power_line_1 = WorkUnit(uuid_str(rand),
50+
{'granular_name': 'power line', 'measurement': 'km'},
51+
worker_req,
4752
group=f'{cluster_name}:electricity',
48-
volume=dist_line, volume_type='km')
49-
power_line_2 = WorkUnit(uuid_str(rand), 'power line', worker_req,
53+
volume=dist_line)
54+
power_line_2 = WorkUnit(uuid_str(rand),
55+
{'granular_name': 'power line', 'measurement': 'km'},
56+
worker_req,
5057
group=f'{cluster_name}:electricity',
51-
volume=dist_line, volume_type='km')
58+
volume=dist_line)
5259

5360
power_lines = [
5461
GraphNode(power_line_1, parents),
5562
GraphNode(power_line_2, parents),
5663
]
5764
if dist_high_line is not None:
5865
worker_req_high = wr.scale_reqs(wr.POWER_LINE, dist_high_line)
59-
high_power_line = WorkUnit(uuid_str(rand), 'high power line', worker_req_high,
60-
group=f'{cluster_name}:electricity', volume=dist_high_line, volume_type='km')
66+
high_power_line = WorkUnit(uuid_str(rand),
67+
{'granular_name': 'high power line', 'measurement': 'km'},
68+
worker_req_high,
69+
group=f'{cluster_name}:electricity', volume=dist_high_line)
6170
power_lines.append(GraphNode(high_power_line, parents))
6271

6372
return power_lines, len(power_lines)
@@ -66,23 +75,32 @@ def _get_power_lines(parents: list[GraphNode], cluster_name: str, dist_line: flo
6675
def _get_pipe_lines(parents: list[GraphNode], cluster_name: str, pipe_dists: list[float],
6776
rand: Random | None = None) -> tuple[list[GraphNode], int]:
6877
worker_req_pipe = wr.scale_reqs(wr.PIPE_LINE, pipe_dists[0])
69-
first_pipe = WorkUnit(uuid_str(rand), 'pipe', worker_req_pipe, group=f'{cluster_name}:oil_gas_long_pipes',
70-
volume=pipe_dists[0], volume_type='km')
78+
first_pipe = WorkUnit(uuid_str(rand),
79+
{'granular_name': 'pipe', 'measurement': 'km'},
80+
worker_req_pipe, group=f'{cluster_name}:oil_gas_long_pipes',
81+
volume=pipe_dists[0])
7182

7283
graph_nodes = [GraphNode(first_pipe, parents)]
7384
for i in range(1, len(pipe_dists)):
74-
node_work = WorkUnit(uuid_str(rand), 'node', wr.PIPE_NODE,
85+
node_work = WorkUnit(uuid_str(rand),
86+
{'granular_name': 'node'},
87+
wr.PIPE_NODE,
7588
group=f'{cluster_name}:oil_gas_long_pipes')
7689
graph_nodes.append(GraphNode(node_work, parents))
7790
worker_req_pipe = wr.scale_reqs(wr.PIPE_LINE, pipe_dists[i])
78-
pipe_work = WorkUnit(uuid_str(rand), 'pipe', worker_req_pipe,
91+
pipe_work = WorkUnit(uuid_str(rand),
92+
{'granular_name': 'pipe', 'measurement': 'km'},
93+
worker_req_pipe,
7994
group=f'{cluster_name}:oil_gas_long_pipes',
80-
volume=pipe_dists[i], volume_type='km')
95+
volume=pipe_dists[i])
8196
graph_nodes.append(GraphNode(pipe_work, parents))
8297

8398
worker_req_loop = wr.scale_reqs(wr.PIPE_LINE, pipe_dists[0])
84-
looping = WorkUnit(uuid_str(rand), 'looping', worker_req_loop, group=f'{cluster_name}:oil_gas_long_pipes',
85-
volume=pipe_dists[0], volume_type='km')
99+
looping = WorkUnit(uuid_str(rand),
100+
{'granular_name': 'looping', 'measurement': 'km'},
101+
worker_req_loop,
102+
group=f'{cluster_name}:oil_gas_long_pipes',
103+
volume=pipe_dists[0])
86104
graph_nodes.append(GraphNode(looping, graph_nodes[0:1]))
87105
return graph_nodes, len(graph_nodes)
88106

@@ -147,18 +165,23 @@ def _get_boreholes_equipment_general(parents: list[GraphNode], cluster_name: str
147165
dist = gen_c.DIST_BETWEEN_BOREHOLES.rand_float(rand)
148166
dists_sum += dist
149167
worker_req_pipe = scale_reqs(wr.POWER_NETWORK, dist)
150-
pipe_net_work = WorkUnit(uuid_str(rand), 'elem of pipe_network', worker_req_pipe,
151-
group=f'{cluster_name}:oil_gas_pipe_net', volume=dist, volume_type='km')
168+
pipe_net_work = WorkUnit(uuid_str(rand),
169+
{'granular_name': 'elem of pipe_network', 'measurement': 'km'},
170+
worker_req_pipe,
171+
group=f'{cluster_name}:oil_gas_pipe_net',
172+
volume=dist)
152173
nodes.append(GraphNode(pipe_net_work, parents))
153174

154175
worker_req_power = scale_reqs(wr.POWER_NETWORK, dists_sum)
155-
power_net_work = WorkUnit(uuid_str(rand), 'power network', worker_req_power,
176+
power_net_work = WorkUnit(uuid_str(rand), {'granular_name': 'power network', 'measurement': 'km'},
177+
worker_req_power,
156178
group=f'{cluster_name}:electricity',
157-
volume=dists_sum, volume_type='km')
179+
volume=dists_sum)
158180
nodes.append(GraphNode(power_net_work, parents))
159181

160182
for i in range(masts_count):
161-
light_mast_work = WorkUnit(uuid_str(rand), 'mast', wr.LIGHT_MAST,
183+
light_mast_work = WorkUnit(uuid_str(rand), {'granular_name': 'mast', 'measurement': 'km'},
184+
wr.LIGHT_MAST,
162185
group=f'{cluster_name}:light_masts')
163186
nodes.append(GraphNode(light_mast_work, parents))
164187
return nodes, len(nodes)

sampo/scheduler/multi_agency/block_generator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def generate_wg(mode, i):
9393
bg.add_edge(global_start, node)
9494
bg.add_edge(node, global_end)
9595

96-
logger(f'{graph_type.name} ' + ' '.join([str(mode.name) for i, mode in enumerate(modes)
96+
logger(f'{graph_type.name} ' + ' '.join([str(mode.model_name) for i, mode in enumerate(modes)
9797
if nodes[i].vertex_count != EMPTY_GRAPH_VERTEX_COUNT]))
9898
return bg
9999

sampo/scheduler/utils/local_optimization.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -247,16 +247,16 @@ def optimize(self, scheduled_works: dict[GraphNode, ScheduledWork], node_order:
247247
satisfy = True
248248

249249
for candidate_worker in candidate_schedule.workers:
250-
my_worker = my_workers.get(candidate_worker.name, None)
250+
my_worker = my_workers.get(candidate_worker.model_name, None)
251251
if my_worker is None: # these two works are not compete for this worker
252252
continue
253253

254-
need_me = my_workers[candidate_worker.name].count
254+
need_me = my_workers[candidate_worker.model_name].count
255255
need_candidate = candidate_worker.count
256256

257257
total = need_me + need_candidate
258-
my_req = my_schedule_reqs[candidate_worker.name]
259-
candidate_req = candidate_schedule_reqs[candidate_worker.name]
258+
my_req = my_schedule_reqs[candidate_worker.model_name]
259+
candidate_req = candidate_schedule_reqs[candidate_worker.model_name]
260260
needed_min = my_req.min_count + candidate_req.min_count
261261

262262
if needed_min > total: # these two works can't run in parallel
@@ -273,17 +273,17 @@ def optimize(self, scheduled_works: dict[GraphNode, ScheduledWork], node_order:
273273
my_worker_count += add_me
274274
candidate_worker_count += add_candidate
275275

276-
new_my_workers[candidate_worker.name] = my_worker_count
277-
new_candidate_workers[candidate_worker.name] = candidate_worker_count
276+
new_my_workers[candidate_worker.model_name] = my_worker_count
277+
new_candidate_workers[candidate_worker.model_name] = candidate_worker_count
278278

279279
if satisfy: # replacement found, apply changes and leave candidates bruteforce
280280
print(f'Found! {candidate.work_unit.name} {node.work_unit.name}')
281281
for worker in my_schedule.workers:
282-
worker_count = new_my_workers.get(worker.name, None)
282+
worker_count = new_my_workers.get(worker.model_name, None)
283283
if worker_count is not None:
284284
worker.count = worker_count
285285
for worker in candidate_schedule.workers:
286-
worker_count = new_candidate_workers.get(worker.name, None)
286+
worker_count = new_candidate_workers.get(worker.model_name, None)
287287
if worker_count is not None:
288288
worker.count = worker_count
289289
# candidate_schedule.start_time = my_schedule.start_time

sampo/schemas/schedule.py

Lines changed: 30 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,34 @@
11
from copy import deepcopy
22
from datetime import datetime
33
from functools import lru_cache
4-
from typing import Iterable, Union
4+
from operator import itemgetter
5+
from typing import Iterable, Union, Any
56

67
from pandas import DataFrame
78

89
from sampo.schemas.graph import WorkGraph, GraphNode
910
from sampo.schemas.scheduled_work import ScheduledWork
1011
from sampo.schemas.serializable import JSONSerializable, T
1112
from sampo.schemas.time import Time
13+
from sampo.utilities.collections_util import first
1214
from sampo.utilities.schedule import fix_split_tasks, offset_schedule
1315

1416
ResourceSchedule = dict[str, list[tuple[Time, Time]]]
1517
ScheduleWorkDict = dict[str, ScheduledWork]
1618

1719

20+
def _get_granular_name_columns(sworks: Iterable[ScheduledWork] | None = None):
21+
return list(sorted(first(sworks).model_name.keys()))
22+
23+
1824
# TODO: Rebase object onto ScheduleWorkDict and ordered ScheduledWork list
1925
class Schedule(JSONSerializable['Schedule']):
2026
"""
2127
Represents work schedule. Is a wrapper around DataFrame with specific structure.
2228
"""
2329

24-
_data_columns: list[str] = ['idx', 'task_id', 'task_name', 'task_name_mapped', 'contractor', 'cost',
25-
'volume', 'measurement', 'start',
30+
_data_columns: list[str] = ['idx', 'task_id', 'task_name', 'contractor', 'cost',
31+
'volume', 'start',
2632
'finish', 'duration', 'workers']
2733
_scheduled_work_column: str = 'scheduled_work_object'
2834

@@ -47,7 +53,7 @@ def pure_schedule_df(self) -> DataFrame:
4753
return self._schedule[~self._schedule.apply(
4854
lambda row: row[self._scheduled_work_column].is_service_unit,
4955
axis=1
50-
)][self._data_columns]
56+
)][self._data_columns + _get_granular_name_columns(self._schedule[self._scheduled_work_column])]
5157

5258
@property
5359
def works(self) -> Iterable[ScheduledWork]:
@@ -118,7 +124,7 @@ def unite_stages(self) -> 'Schedule':
118124
def f(row):
119125
swork: ScheduledWork = deepcopy(row[self._scheduled_work_column])
120126
row[self._scheduled_work_column] = swork
121-
swork.name = row['task_name_mapped']
127+
swork.model_name['granular_name'] = row['granular_name']
122128
swork.display_name = row['task_name']
123129
swork.volume = float(row['volume'])
124130
swork.start_end_time = Time(int(row['start'])), Time(int(row['finish']))
@@ -141,7 +147,7 @@ def from_scheduled_works(works: Iterable[ScheduledWork],
141147
"""
142148
ordered_task_ids = order_nodes_by_start_time(works, wg) if wg else None
143149

144-
def sed(time1, time2, swork) -> tuple:
150+
def sed(time1, time2) -> tuple:
145151
"""
146152
Sorts times and calculates difference.
147153
:param time1: time 1.
@@ -151,19 +157,24 @@ def sed(time1, time2, swork) -> tuple:
151157
start, end = tuple(sorted((time1, time2)))
152158
return start, end, end - start
153159

154-
data_frame = [(i, # idx
155-
w.id, # task_id
156-
w.display_name, # task_name
157-
w.name, # task_name_mapped
158-
w.contractor, # contractor info
159-
w.cost, # work cost
160-
w.volume, # work volume
161-
w.volume_type, # work volume type
162-
*sed(*(t.value for t in w.start_end_time), w), # start, end, duration
163-
repr(dict((i.name, i.count) for i in w.workers)), # workers
164-
w # full ScheduledWork info
160+
model_name_columns = _get_granular_name_columns(works)
161+
162+
def make_model_name_columns(swork: ScheduledWork) -> list[Any]:
163+
return list(map(itemgetter(1), sorted(swork.model_name.items(), key=itemgetter(0))))
164+
165+
data_frame = [(i, # idx
166+
w.id, # task_id
167+
w.display_name, # task_name
168+
w.contractor, # contractor info
169+
w.cost, # work cost
170+
w.volume, # work volume
171+
*sed(*(t.value for t in w.start_end_time)), # start, end, duration
172+
repr(dict((i.name, i.count) for i in w.workers)), # workers
173+
w, # full ScheduledWork info
174+
*make_model_name_columns(w), # model_name columns
165175
) for i, w in enumerate(works)]
166-
data_frame = DataFrame.from_records(data_frame, columns=Schedule._columns)
176+
177+
data_frame = DataFrame.from_records(data_frame, columns=Schedule._columns + model_name_columns)
167178

168179
data_frame = data_frame.set_index('idx', drop=False)
169180

@@ -173,7 +184,7 @@ def sed(time1, time2, swork) -> tuple:
173184
data_frame = data_frame.sort_values(['task_id'])
174185
data_frame.task_id = data_frame.task_id.astype(str)
175186

176-
data_frame = data_frame.reindex(columns=Schedule._columns)
187+
data_frame = data_frame.reindex(columns=Schedule._columns + model_name_columns)
177188
data_frame = data_frame.reset_index(drop=True)
178189

179190
return Schedule(data_frame)

sampo/schemas/scheduled_work.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,11 +38,10 @@ def __init__(self,
3838
materials: MaterialDelivery | None = None,
3939
c_object: ConstructionObject | None = None):
4040
self.id = work_unit.id
41-
self.name = work_unit.name
41+
self.model_name = work_unit.model_name
4242
self.display_name = work_unit.display_name
4343
self.is_service_unit = work_unit.is_service_unit
4444
self.volume = work_unit.volume
45-
self.volume_type = work_unit.volume_type
4645
self.priority = work_unit.priority
4746
self.start_end_time = start_end_time
4847
self.workers = workers if workers is not None else []
@@ -123,7 +122,7 @@ def duration(self) -> Time:
123122
def to_dict(self) -> dict[str, Any]:
124123
return {
125124
'task_id': self.id,
126-
'task_name': self.name,
125+
'task_name': self.model_name,
127126
'start': self.start_time.value,
128127
'finish': self.finish_time.value,
129128
'contractor_id': self.contractor,

0 commit comments

Comments
 (0)