Skip to content

Commit

Permalink
Merge v24.1.16 into main (#1053)
Browse files Browse the repository at this point in the history
* Update VERSION to 24.1.13

* Fixes for parameter file value lists (#1025)

* rm conversion to str on value list

* add check to ensure list is not empty

* Add method to check and skip duplicate content uploads to S3 - for 24.1-release (#1037)

* get Kim's code on 24.1-release branch

* fix flake8

* fix flake8

* remove pyblake2 from pip requirements.txt file as LORIS-MRI uses hashlib now (#1038)

* update version to 24.1.14 for bug fix release (#1039)

* Add a script to correct incorrectly saved list in parameter file (#1034)

* query parameter_file for the lists incorrectly saved

* finalize the update query

* finalize script help section

* [EEG BIDS] Support assembly_bids source for datasets (LORIS 24.1) (#1046)

* [EEG BIDS] Support assembly_bids source for datasets

* flake8

* Add executable permission

* QA comment

* Add support for config prePackagedDownloadPath and EEGChunksPath

* Update the bids_import script logic to explicitely add the --no-copy option

* Update set file path override and chunks logic (#1050)

* Update set file path override and chunks logic

* flake8

* Update set file path override and chunks logic - bugfix (#1051)

* fix bad resolution

* fix bad resolution

---------

Co-authored-by: breen0074 <[email protected]>
Co-authored-by: Laetitia Fesselier <[email protected]>
  • Loading branch information
3 people authored Mar 18, 2024
1 parent 9fb842c commit a021200
Show file tree
Hide file tree
Showing 4 changed files with 58 additions and 45 deletions.
20 changes: 19 additions & 1 deletion python/extract_eeg_bids_archive.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from lib.database_lib.config import Config
from lib.exitcode import SUCCESS, BAD_CONFIG_SETTING
from lib.log import Log
import lib.utilities as utilities

__license__ = "GPLv3"

Expand Down Expand Up @@ -191,8 +192,25 @@ def main():
if not error:
for modality in modalities:
tmp_eeg_modality_path = os.path.join(tmp_eeg_session_path, modality)
s3_data_dir = config_db_obj.get_config("EEGS3DataPath")

# if the EEG file was a set file, then update the filename for the .set
# and .fdt files in the .set file so it can find the proper file for
# visualization and analyses
set_files = [
os.path.join(tmp_eeg_modality_path, file)
for file in os.listdir(tmp_eeg_modality_path)
if os.path.splitext(file)[1] == '.set'
]
for set_full_path in set_files:
width_fdt_file = os.path.isfile(set_full_path.replace(".set", ".fdt"))

file_paths_updated = utilities.update_set_file_path_info(set_full_path, width_fdt_file)
if not file_paths_updated:
message = "WARNING: cannot update the set file " \
+ os.path.basename(set_full_path) + " path info"
print(message)

s3_data_dir = config_db_obj.get_config("EEGS3DataPath")
if s3_obj and s3_data_dir and s3_data_dir.startswith('s3://'):
s3_data_eeg_modality_path = os.path.join(s3_data_dir, eeg_session_rel_path, modality)

Expand Down
29 changes: 15 additions & 14 deletions python/lib/eeg.py
Original file line number Diff line number Diff line change
Expand Up @@ -528,19 +528,20 @@ def fetch_and_insert_eeg_files(self, derivatives=False, detect=True):
eeg_file_info, eeg_file_data
)

# if the EEG file was a set file, then update the filename for the .set
# and .fdt files in the .set file so it can find the proper file for
# visualization and analyses
file_paths_updated = file_type != 'set'
if not file_paths_updated:
set_full_path = os.path.join(self.data_dir, eeg_path)
fdt_full_path = eeg_file_data['fdt_file'] if 'fdt_file' in eeg_file_data.keys() else None

if fdt_full_path:
fdt_full_path = os.path.join(self.data_dir, eeg_file_data['fdt_file'])
file_paths_updated = utilities.update_set_file_path_info(set_full_path, fdt_full_path)

if file_paths_updated:
if self.loris_bids_root_dir:
# If we copy the file in assembly_bids and
# if the EEG file was a set file, then update the filename for the .set
# and .fdt files in the .set file so it can find the proper file for
# visualization and analyses
if file_type == 'set':
set_full_path = os.path.join(self.data_dir, eeg_path)
width_fdt_file = True if 'fdt_file' in eeg_file_data.keys() else False

file_paths_updated = utilities.update_set_file_path_info(set_full_path, width_fdt_file)
if not file_paths_updated:
message = "WARNING: cannot update the set file " + eeg_path + " path info"
print(message)

inserted_eegs.append({
'file_id': physio_file_id,
'file_path': eeg_path,
Expand Down Expand Up @@ -1145,7 +1146,7 @@ def create_and_insert_event_archive(self, files_to_archive, archive_rel_name, ee
# insert the archive into the physiological_annotation_archive table
blake2 = utilities.compute_blake2b_hash(archive_full_path)
physiological_event_archive_obj.insert(eeg_file_id, blake2, archive_rel_name)

def create_archive(self, files_to_archive, archive_rel_name):
# create the archive file
package_path = self.config_db_obj.get_config("prePackagedDownloadPath")
Expand Down
10 changes: 5 additions & 5 deletions python/lib/utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,16 +162,16 @@ def create_archive(files_to_archive, archive_path):
tar.close()


def update_set_file_path_info(set_file, fdt_file):
def update_set_file_path_info(set_file, with_fdt_file):
"""
Updates the path info of the set file with the correct filenames for .set and
.fdt files (for cases that had to be relabelled to include a Visit Label at
the time of import.
:param set_file: complete path of the .set file
:type set_file: str
:param fdt_file: complete path of the .fdt file
:type fdt_file: str
:param with_fdt_file: Confirm presence of a matching .fdt file
:type with_fdt_file: bool
"""

# grep the basename without the extension of set_file
Expand All @@ -190,7 +190,7 @@ def update_set_file_path_info(set_file, fdt_file):
dataset['setname'] = numpy.array(basename)
if 'EEG' in dataset.keys():
dataset['EEG'][0][0][1] = set_file_name
if fdt_file and 'EEG' in dataset.keys():
if with_fdt_file and 'EEG' in dataset.keys():
dataset['EEG'][0][0][15] = fdt_file_name
dataset['EEG'][0][0][40] = fdt_file_name

Expand All @@ -206,7 +206,7 @@ def update_set_file_path_info(set_file, fdt_file):
.format(set_file_name))
return False

if fdt_file:
if with_fdt_file:
if 'datfile' not in dataset.keys() or \
dataset['datfile'] != fdt_file_name:
print('Expected `datfile` field: {}'
Expand Down
44 changes: 19 additions & 25 deletions python/react-series-data-viewer/chunking.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,35 +87,22 @@ def write_index_json(
chunk_dir,
time_interval,
series_range,
from_channel_index,
channel_count,
channel_names,
channel_ranges,
channel_metadata,
chunk_size,
downsamplings,
channel_chunks_list,
valid_samples_in_last_chunk,
shapes,
trace_types={}
):
json_dict = OrderedDict([
('timeInterval', list(time_interval)),
('seriesRange', series_range),
('chunkSize', chunk_size),
('downsamplings', list(downsamplings)),
('shapes', [
list(downsampled.shape)
for downsampled in channel_chunks_list
]),
('validSamples', valid_samples_in_last_chunk),
('downsamplings', downsamplings),
('shapes', shapes),
('traceTypes', trace_types),
('channelMetadata', [
{
'name': channel_names[i],
'seriesRange': channel_ranges[i],
'index': from_channel_index + i
}
for i in range(len(channel_ranges))
])
('channelMetadata', channel_metadata)
])
create_path_dirs(chunk_dir)

Expand Down Expand Up @@ -229,17 +216,24 @@ def write_chunk_directory(path, chunk_size, loader, from_channel_index=0, from_c

if downsamplings is not None:
channel_chunks_list = channel_chunks_list[:downsamplings]

channel_metadata = [
{
'name': channel_names[i],
'seriesRange': channel_ranges[i],
'index': from_channel_index + i
}
for i in range(len(channel_ranges))
]

write_index_json(
chunk_dir,
time_interval,
signal_range,
from_channel_index,
channel_count,
channel_names,
channel_ranges,
channel_metadata,
chunk_size,
range(len(channel_chunks_list)),
channel_chunks_list,
valid_samples_in_last_chunk
valid_samples_in_last_chunk,
list(range(len(channel_chunks_list))),
[list(downsampled.shape) for downsampled in channel_chunks_list]
)
write_chunks(chunk_dir, channel_chunks_list, from_channel_index)

0 comments on commit a021200

Please sign in to comment.