Skip to content

Commit 9d54630

Browse files
authored
Backwards compatibility for sync stream in OpenEphysBinary (#3839)
1 parent 12b3470 commit 9d54630

File tree

1 file changed

+128
-93
lines changed

1 file changed

+128
-93
lines changed

src/spikeinterface/extractors/neoextractors/openephys.py

Lines changed: 128 additions & 93 deletions
Original file line numberDiff line numberDiff line change
@@ -139,21 +139,47 @@ class OpenEphysBinaryRecordingExtractor(NeoBaseRecordingExtractor):
139139
all_annotations : bool, default: False
140140
Load exhaustively all annotation from neo
141141
142+
Notes
143+
-----
144+
If no stream is explicitly specified and there are exactly two streams (neural data and
145+
synchronization data), the neural data stream will be automatically selected.
142146
"""
143147

144148
NeoRawIOClass = "OpenEphysBinaryRawIO"
145149

146150
def __init__(
147151
self,
148-
folder_path,
149-
load_sync_channel=False,
150-
load_sync_timestamps=False,
151-
experiment_names=None,
152-
stream_id=None,
153-
stream_name=None,
154-
block_index=None,
155-
all_annotations=False,
152+
folder_path: str | Path,
153+
load_sync_channel: bool = False,
154+
load_sync_timestamps: bool = False,
155+
experiment_names: str | list | None = None,
156+
stream_id: str = None,
157+
stream_name: str = None,
158+
block_index: int = None,
159+
all_annotations: bool = False,
156160
):
161+
162+
if load_sync_channel:
163+
import warnings
164+
165+
warning_message = (
166+
"OpenEphysBinaryRecordingExtractor: load_sync_channel is deprecated and will"
167+
"be removed in version 0.104, use the stream_name or stream_id to load the sync stream if needed"
168+
)
169+
warnings.warn(warning_message, DeprecationWarning, stacklevel=2)
170+
171+
stream_is_not_specified = stream_name is None and stream_id is None
172+
if stream_is_not_specified:
173+
available_stream_names, _ = self.get_streams(folder_path, load_sync_channel, experiment_names)
174+
175+
# Auto-select neural data stream when there are exactly two streams (neural + sync)
176+
# and no stream was explicitly specified
177+
if len(available_stream_names) == 2:
178+
has_sync_stream = any("SYNC" in stream for stream in available_stream_names)
179+
if has_sync_stream:
180+
neural_stream_name = next(stream for stream in available_stream_names if "SYNC" not in stream)
181+
stream_name = neural_stream_name
182+
157183
neo_kwargs = self.map_to_neo_kwargs(folder_path, load_sync_channel, experiment_names)
158184
NeoBaseRecordingExtractor.__init__(
159185
self,
@@ -163,95 +189,104 @@ def __init__(
163189
all_annotations=all_annotations,
164190
**neo_kwargs,
165191
)
166-
# get streams to find correct probe
167-
stream_names, stream_ids = self.get_streams(folder_path, load_sync_channel, experiment_names)
168-
if stream_name is None and stream_id is None:
169-
stream_name = stream_names[0]
170-
elif stream_name is None:
171-
stream_name = stream_names[stream_ids.index(stream_id)]
172-
173-
# find settings file
174-
if "#" in stream_name:
175-
record_node, oe_stream = stream_name.split("#")
176-
else:
177-
record_node = ""
178-
oe_stream = stream_name
179-
exp_ids = sorted(list(self.neo_reader.folder_structure[record_node]["experiments"].keys()))
180-
if block_index is None:
181-
exp_id = exp_ids[0]
182-
else:
183-
exp_id = exp_ids[block_index]
184-
rec_ids = sorted(
185-
list(self.neo_reader.folder_structure[record_node]["experiments"][exp_id]["recordings"].keys())
186-
)
187-
188-
# do not load probe for NIDQ stream or if load_sync_channel is True
189-
if "NI-DAQmx" not in stream_name and not load_sync_channel:
190-
settings_file = self.neo_reader.folder_structure[record_node]["experiments"][exp_id]["settings_file"]
191192

192-
if Path(settings_file).is_file():
193-
probe = probeinterface.read_openephys(
194-
settings_file=settings_file, stream_name=stream_name, raise_error=False
195-
)
193+
stream_is_sync = "SYNC" in self.stream_name
194+
if not stream_is_sync:
195+
# get streams to find correct probe
196+
stream_names, stream_ids = self.get_streams(folder_path, load_sync_channel, experiment_names)
197+
if stream_name is None and stream_id is None:
198+
stream_name = stream_names[0]
199+
elif stream_name is None:
200+
stream_name = stream_names[stream_ids.index(stream_id)]
201+
202+
# find settings file
203+
if "#" in stream_name:
204+
record_node, oe_stream = stream_name.split("#")
205+
else:
206+
record_node = ""
207+
oe_stream = stream_name
208+
exp_ids = sorted(list(self.neo_reader.folder_structure[record_node]["experiments"].keys()))
209+
if block_index is None:
210+
exp_id = exp_ids[0]
196211
else:
197-
probe = None
212+
exp_id = exp_ids[block_index]
213+
rec_ids = sorted(
214+
list(self.neo_reader.folder_structure[record_node]["experiments"][exp_id]["recordings"].keys())
215+
)
198216

199-
if probe is not None:
200-
if probe.shank_ids is not None:
201-
self.set_probe(probe, in_place=True, group_mode="by_shank")
202-
else:
203-
self.set_probe(probe, in_place=True)
204-
205-
# this handles a breaking change in probeinterface after v0.2.18
206-
# in the new version, the Neuropixels model name is stored in the "model_name" annotation,
207-
# rather than in the "probe_name" annotation
208-
model_name = probe.annotations.get("model_name", None)
209-
if model_name is None:
210-
model_name = probe.annotations["probe_name"]
211-
212-
# load num_channels_per_adc depending on probe type
213-
if "2.0" in model_name:
214-
num_channels_per_adc = 16
215-
num_cycles_in_adc = 16
216-
total_channels = 384
217-
else: # NP1.0
218-
num_channels_per_adc = 12
219-
num_cycles_in_adc = 13 if "AP" in stream_name else 12
220-
total_channels = 384
221-
222-
# sample_shifts is generated from total channels (384) channels
223-
# when only some channels are saved we need to slice this vector (like we do for the probe)
224-
sample_shifts = get_neuropixels_sample_shifts(total_channels, num_channels_per_adc, num_cycles_in_adc)
225-
if self.get_num_channels() != total_channels:
226-
# need slice because not all channel are saved
227-
chans = probeinterface.get_saved_channel_indices_from_openephys_settings(settings_file, oe_stream)
228-
# lets clip to 384 because this contains also the synchro channel
229-
chans = chans[chans < total_channels]
230-
sample_shifts = sample_shifts[chans]
231-
self.set_property("inter_sample_shift", sample_shifts)
232-
233-
# load synchronized timestamps and set_times to recording
234-
recording_folder = Path(folder_path) / record_node
235-
stream_folders = []
236-
for segment_index, rec_id in enumerate(rec_ids):
237-
stream_folder = recording_folder / f"experiment{exp_id}" / f"recording{rec_id}" / "continuous" / oe_stream
238-
stream_folders.append(stream_folder)
239-
if load_sync_timestamps:
240-
if (stream_folder / "sample_numbers.npy").is_file():
241-
# OE version>=v0.6
242-
sync_times = np.load(stream_folder / "timestamps.npy")
243-
elif (stream_folder / "synchronized_timestamps.npy").is_file():
244-
# version<v0.6
245-
sync_times = np.load(stream_folder / "synchronized_timestamps.npy")
217+
# do not load probe for NIDQ stream or if load_sync_channel is True
218+
if "NI-DAQmx" not in stream_name and not load_sync_channel:
219+
settings_file = self.neo_reader.folder_structure[record_node]["experiments"][exp_id]["settings_file"]
220+
221+
if Path(settings_file).is_file():
222+
probe = probeinterface.read_openephys(
223+
settings_file=settings_file, stream_name=stream_name, raise_error=False
224+
)
246225
else:
247-
sync_times = None
248-
try:
249-
self.set_times(times=sync_times, segment_index=segment_index, with_warning=False)
250-
except:
251-
warnings.warn(f"Could not load synchronized timestamps for {stream_name}")
252-
253-
self.annotate(experiment_name=f"experiment{exp_id}")
254-
self._stream_folders = stream_folders
226+
probe = None
227+
228+
if probe is not None:
229+
if probe.shank_ids is not None:
230+
self.set_probe(probe, in_place=True, group_mode="by_shank")
231+
else:
232+
self.set_probe(probe, in_place=True)
233+
234+
# this handles a breaking change in probeinterface after v0.2.18
235+
# in the new version, the Neuropixels model name is stored in the "model_name" annotation,
236+
# rather than in the "probe_name" annotation
237+
model_name = probe.annotations.get("model_name", None)
238+
if model_name is None:
239+
model_name = probe.annotations["probe_name"]
240+
241+
# load num_channels_per_adc depending on probe type
242+
if "2.0" in model_name:
243+
num_channels_per_adc = 16
244+
num_cycles_in_adc = 16
245+
total_channels = 384
246+
else: # NP1.0
247+
num_channels_per_adc = 12
248+
num_cycles_in_adc = 13 if "AP" in stream_name else 12
249+
total_channels = 384
250+
251+
# sample_shifts is generated from total channels (384) channels
252+
# when only some channels are saved we need to slice this vector (like we do for the probe)
253+
sample_shifts = get_neuropixels_sample_shifts(
254+
total_channels, num_channels_per_adc, num_cycles_in_adc
255+
)
256+
if self.get_num_channels() != total_channels:
257+
# need slice because not all channel are saved
258+
chans = probeinterface.get_saved_channel_indices_from_openephys_settings(
259+
settings_file, oe_stream
260+
)
261+
# lets clip to 384 because this contains also the synchro channel
262+
chans = chans[chans < total_channels]
263+
sample_shifts = sample_shifts[chans]
264+
self.set_property("inter_sample_shift", sample_shifts)
265+
266+
# load synchronized timestamps and set_times to recording
267+
recording_folder = Path(folder_path) / record_node
268+
stream_folders = []
269+
for segment_index, rec_id in enumerate(rec_ids):
270+
stream_folder = (
271+
recording_folder / f"experiment{exp_id}" / f"recording{rec_id}" / "continuous" / oe_stream
272+
)
273+
stream_folders.append(stream_folder)
274+
if load_sync_timestamps:
275+
if (stream_folder / "sample_numbers.npy").is_file():
276+
# OE version>=v0.6
277+
sync_times = np.load(stream_folder / "timestamps.npy")
278+
elif (stream_folder / "synchronized_timestamps.npy").is_file():
279+
# version<v0.6
280+
sync_times = np.load(stream_folder / "synchronized_timestamps.npy")
281+
else:
282+
sync_times = None
283+
try:
284+
self.set_times(times=sync_times, segment_index=segment_index, with_warning=False)
285+
except:
286+
warnings.warn(f"Could not load synchronized timestamps for {stream_name}")
287+
288+
self.annotate(experiment_name=f"experiment{exp_id}")
289+
self._stream_folders = stream_folders
255290

256291
self._kwargs.update(
257292
dict(

0 commit comments

Comments
 (0)