Skip to content

Commit 49b79da

Browse files
authored
Merge pull request #1 from CSS-Electronics/develop
Restructuring of script to enable increased modularity
2 parents 53fa6be + 1023db4 commit 49b79da

File tree

5 files changed

+233
-173
lines changed

5 files changed

+233
-173
lines changed

.gitignore

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
11
__pycache__
2-
inputs_test.py
32
*.whl
4-
temp-variables.py
3+
lambda_handler.py

README.md

+19-11
Original file line numberDiff line numberDiff line change
@@ -24,27 +24,35 @@ To use the script, install dependencies via the `requirements.txt`:
2424
---
2525
## Deployment
2626

27-
### 1: Test script with sample data
28-
1. Clone the script folder incl. the J1939 sample data and demo DBC
27+
### 1: Test script with sample data
28+
1. Download this repository incl. the J1939 data and demo DBC
2929
2. In `inputs.py` add your InfluxDB details, then run `python main.py` via the command line
3030

3131
*Note: If you use a free InfluxDB Cloud user, the sample data will be removed after a period (as it is >30 days old).*
3232

33-
### 2: Modify script with your own details
34-
1. Local data: Add your own data next to the scripts as per the SD structure:
33+
### 2: Modify script with your own details
34+
1. Local disk: Add your own data next to the scripts as per the SD structure:
3535
`LOG/<device_ID>/<session>/<split>.MF4`
36-
2. S3 server: Add your S3 server details in `inputs.py` and set `use_s3 = True`
37-
3. In `inputs.py` update the DBC path and the device list to match yours
38-
4. In `last_run.txt`, specify from when you wish to load log files (e.g. `2020-01-13 00:00:00`)
36+
2. S3 server: Add your S3 server details in `inputs.py` and set `s3 = True`
37+
3. In `inputs.py` update the DBC path list and the device list to match yours
38+
4. In `last_run.txt`, specify when you wish to load log files from (e.g. `2020-01-13 00:00:00`)
3939
5. Optionally modify the signal filters or resampling frequency
4040

4141

4242
### 3: Enable dynamic start time
43-
1. In `inputs.py` set `dynamic = True`
43+
1. In `inputs.py` set `dynamic = True`
4444
2. Follow the CANedge Intro guide for setting up e.g. Windows Task Scheduler
4545

4646
---
47-
## Deleting data from InfluxDB
48-
If you need to delete data in InfluxDB that you e.g. uploaded as part of a test, you can use the `delete_influx(name)` function from `utils.py`. Call it by parsing the name of the 'measurement' to delete (i.e. the device serial number):
47+
## Other practical information
4948

50-
``delete_influx("958D2219")``
49+
### Change verbosity
50+
By default, summary information is printed as part of the processing. You can parse `verbose=False` as an input argument in `list_log_files`, `SetupInflux` and `DataWriter` to avoid this.
51+
52+
### Delete data from InfluxDB
53+
If you need to delete data in InfluxDB that you e.g. uploaded as part of a test, you can use the `delete_influx(name)` function from the `SetupInflux` class. Call it by parsing the name of the 'measurement' to delete (i.e. the device serial number):
54+
55+
``influx.delete_influx("958D2219")``
56+
57+
### Multiple channels
58+
If your log files contain data from two CAN channels, you may need to adjust the script in case you have duplicate signal names across both channels. For example, if you're extracting the signal `EngineSpeed` from both channels.

dashboard-writer/inputs.py

+12-15
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
from datetime import datetime, timezone
2-
31
# -----------------------------------------------
42
# specify your InfluxDB details
53
influx_bucket = "influx_bucket_name"
@@ -9,28 +7,27 @@
97

108
# -----------------------------------------------
119
# specify devices to process (from a local folder or S3 bucket)
12-
# If local, ensure files are organized as on SD (folder/device_id/session/split.MF4)
13-
devices = ["LOG/958D2219"] # local: ["folder/device_id"] | S3: ["bucket/device_id"]
10+
# If local, ensure logs are organized correctly: folder/device_id/session/split.MF4
11+
# Syntax: Local: ["folder/device_id"] | S3: ["bucket/device_id"]
12+
devices = ["LOG/958D2219"]
13+
1414

1515
# -----------------------------------------------
16-
# specify DBC path and the list of signals to process ([]: process all signals)
17-
dbc_path = "CSS-Electronics-SAE-J1939-DEMO.dbc"
16+
# specify DBC paths and a list of signals to process ([]: include all signals)
17+
dbc_paths = ["CSS-Electronics-SAE-J1939-DEMO.dbc"]
1818
signals = []
1919

20-
# optionally modify resampling frequency ("": disable resampling)
20+
# specify resampling frequency ("": no resampling)
2121
res = "1S"
2222

2323
# -----------------------------------------------
24-
# specify your S3 details (if relevant)
25-
use_s3 = False
24+
# specify whether to load data from S3 (and add server details if relevant)
25+
s3 = True
2626
key = "s3_key"
2727
secret = "s3_secret"
2828
endpoint = "s3_endpoint"
29-
# cert = "path/to/cert.crt" # if MinIO + TLS, add path to cert and update utils.py/setup_fs_s3 to verify
30-
29+
# cert = "path/to/cert.crt" # if MinIO + TLS, add path to cert and update utils.py/setup_fs to verify
3130

31+
# -----------------------------------------------
3232
# toggle whether to update the last execution datetime on each script execution
33-
use_dynamic = False
34-
35-
# set stop date to a point in the future to load all log files after start date
36-
stop = datetime(year=2099, month=1, day=1, tzinfo=timezone.utc)
33+
dynamic = False

dashboard-writer/main.py

+10-61
Original file line numberDiff line numberDiff line change
@@ -1,65 +1,14 @@
1-
import mdf_iter, canedge_browser, can_decoder
2-
from utils import setup_fs, setup_fs_s3, write_influx, load_last_run, set_last_run, print_summary
1+
from utils import setup_fs, load_dbc_files, list_log_files, SetupInflux, DataWriter
32
import inputs
4-
from pathlib import Path
53

4+
# setup filesystem (local or S3), load DBC files and list log files for processing
5+
fs = setup_fs(inputs.s3, inputs.key, inputs.secret, inputs.endpoint)
6+
db_list = load_dbc_files(inputs.dbc_paths)
7+
log_files = list_log_files(fs, inputs.devices, inputs.dynamic)
68

7-
# function for loading raw CAN data from S3, DBC converting it and writing it to InfluxDB
8-
def process_data_and_write():
9+
# initialize connection to InfluxDB
10+
influx = SetupInflux(influx_url=inputs.influx_url, token=inputs.token, org_id=inputs.org_id, influx_bucket=inputs.influx_bucket)
911

10-
# initialize DBC converter, file loader and start date
11-
db = can_decoder.load_dbc(Path(__file__).parent / inputs.dbc_path)
12-
df_decoder = can_decoder.DataFrameDecoder(db)
13-
14-
if inputs.use_s3:
15-
fs = setup_fs_s3()
16-
else:
17-
fs = setup_fs()
18-
19-
start = load_last_run()
20-
21-
if inputs.use_dynamic:
22-
set_last_run()
23-
24-
log_files = canedge_browser.get_log_files(fs, inputs.devices, start_date=start, stop_date=inputs.stop)
25-
print(f"Found {len(log_files)} log files")
26-
27-
for log_file in log_files:
28-
# open log file, get device_id and extract dataframe with raw CAN data
29-
with fs.open(log_file, "rb") as handle:
30-
mdf_file = mdf_iter.MdfFile(handle)
31-
device_id = mdf_file.get_metadata()["HDComment.Device Information.serial number"]["value_raw"]
32-
df_raw = mdf_file.get_data_frame()
33-
34-
# DBC convert the raw CAN dataframe
35-
df_phys = df_decoder.decode_frame(df_raw)
36-
if df_phys.empty:
37-
continue
38-
39-
print_summary(device_id, log_file, df_phys)
40-
41-
# group the data to enable a signal-by-signal loop
42-
df_phys_grouped = df_phys.groupby("Signal")["Physical Value"]
43-
44-
# for each signal in your list, resample the data and write to InfluxDB
45-
for signal, group in df_phys_grouped:
46-
if signal in inputs.signals or len(inputs.signals) == 0:
47-
df_signal = group.to_frame().rename(columns={"Physical Value": signal})
48-
print(f"Signal: {signal} (mean: {round(df_signal[signal].mean(),2)})")
49-
50-
if inputs.res != "":
51-
cnt = len(df_signal)
52-
df_signal = df_signal.resample(inputs.res).pad().dropna()
53-
print(f"- Resampling to {inputs.res} ({cnt} --> {len(df_signal)} records)")
54-
55-
# print(df_signal)
56-
write_influx(device_id, df_signal)
57-
58-
return
59-
60-
61-
# execute the script
62-
if __name__ == "__main__":
63-
64-
process_data_and_write()
65-
pass
12+
# process the log files and write extracted signals to InfluxDB
13+
writer = DataWriter(fs=fs, db_list=db_list, signals=inputs.signals, res=inputs.res, db_func=influx.write_influx)
14+
writer.decode_log_files(log_files)

0 commit comments

Comments
 (0)