-
Notifications
You must be signed in to change notification settings - Fork 312
/
Copy pathrecord.py
3126 lines (2784 loc) · 113 KB
/
record.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import datetime
import multiprocessing.dummy
import posixpath
import os
import re
import numpy as np
import pandas as pd
from wfdb.io import _header
from wfdb.io import _signal
from wfdb.io import _url
from wfdb.io import download
from wfdb.io import header
from wfdb.io import util
# -------------- WFDB Signal Calibration and Classification ---------- #
# Unit scales used for default display scales. The unit scale that the
# class should measure. 'No Unit' will also be allowed in all cases.
# * Will it always be 1?
unit_scale = {
"voltage": ["pV", "nV", "uV", "mV", "V", "kV"],
"temperature": ["C", "F"],
"pressure": ["mmHg"],
"no_unit": ["NU"],
"percentage": ["%"],
"heart_rate": ["bpm"],
}
"""
Signal classes that WFDB signals should fall under. The indexes are the
abbreviated class names.
Notes
-----
This will be used to automatically classify signals in classes based
on their names.
"""
SIGNAL_CLASSES = pd.DataFrame(
index=[
"bp",
"co2",
"co",
"ecg",
"eeg",
"emg",
"eog",
"hr",
"mmg",
"o2",
"pleth",
"resp",
"scg",
"stat",
"st",
"temp",
"unknown",
],
columns=["description", "unit_scale", "signal_names"],
data=[
["Blood Pressure", "pressure", ["bp", "abp", "pap", "cvp"]], # bp
["Carbon Dioxide", "percentage", ["co2", "pco2"]], # co2
["Carbon Monoxide", "percentage", ["co"]], # co
[
"Electrocardiogram",
"voltage",
["i", "ii", "iii", "iv", "v", "avr"],
], # ecg
["Electroencephalogram", "voltage", ["eeg"]], # eeg
["Electromyograph", "voltage", ["emg"]], # emg
["Electrooculograph", "voltage", ["eog"]], # eog
["Heart Rate", "heart_rate", ["hr"]], # hr
["Magnetomyograph", "voltage", ["mmg"]], # mmg
["Oxygen", "percentage", ["o2", "spo2"]], # o2
["Plethysmograph", "pressure", ["pleth"]], # pleth
["Respiration", "no_unit", ["resp"]], # resp
["Seismocardiogram", "no_unit", ["scg"]], # scg
["Status", "no_unit", ["stat", "status"]], # stat
["ST Segment", "", ["st"]], # st. This is not a signal?
["Temperature", "temperature", ["temp"]], # temp
["Unknown Class", "no_unit", []], # unknown. special class.
],
)
"""
All of the default units to be used if the value for unit
while reading files returns "N/A".
Note
----
All of the key values here are in all lowercase characters
to remove duplicates by different cases.
"""
SIG_UNITS = {
"a": "uV",
"abdomen": "uV",
"abdo": "V",
"abp": "mmHg",
"airflow": "V",
"ann": "units",
"art": "mmHg",
"atip": "mV",
"av": "mV",
"bp": "mmHg",
"c": "uV",
"c.o.": "lpm",
"co": "Lpm",
"cs": "mV",
"cvp": "mmHg",
"direct": "uV",
"ecg": "mV",
"edr": "units",
"eeg": "mV",
"emg": "mV",
"eog": "mV",
"event": "mV",
"f": "uV",
"fecg": "mV",
"fhr": "bpm",
"foobar": "mmHg",
"hr": "bpm",
"hva": "mV",
"i": "mV",
"ibp": "mmHg",
"mcl": "mV",
"nbp": "mmHg",
"o": "uV",
"p": "mmHg",
"pap": "mmHg",
"pawp": "mmHg",
"pcg": "mV",
"pleth": "mV",
"pr": "bpm",
"pulse": "bpm",
"record": "mV",
"resp": "l",
"sao2": "%",
"so2": "%",
"spo2": "%",
"sv": "ml",
"t": "uV",
"tblood": "degC",
"temp": "degC",
"thorax": "mV",
"thor": "V",
"v": "mV",
"uc": "nd",
"vtip": "mV",
}
class BaseRecord(object):
"""
The base WFDB class extended by the Record and MultiRecord classes.
Attributes
----------
record_name : str, optional
The name of the WFDB record to be read, without any file
extensions. If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/BASE_RECORD.
Both relative and absolute paths are accepted. If the `pn_dir`
parameter is set, this parameter should contain just the base
record name, and the files fill be searched for remotely.
Otherwise, the data files will be searched for in the local path.
n_sig : int, optional
Total number of signals.
fs : int, float, optional
The sampling frequency of the record.
counter_freq : float, optional
The frequency used to start counting.
base_counter : float, optional
The counter used at the start of the file.
sig_len : int, optional
The total length of the signal.
base_time : datetime.time, optional
The time of day at the beginning of the record.
base_date : datetime.date, optional
The date at the beginning of the record.
base_datetime : datetime.datetime, optional
The date and time at the beginning of the record, equivalent to
`datetime.combine(base_date, base_time)`.
comments : list, optional
A list of string comments to be written to the header file.
sig_name : str, optional
A list of strings giving the signal name of each signal channel.
"""
# The base WFDB class extended by the Record and MultiRecord classes.
def __init__(
self,
record_name=None,
n_sig=None,
fs=None,
counter_freq=None,
base_counter=None,
sig_len=None,
base_time=None,
base_date=None,
base_datetime=None,
comments=None,
sig_name=None,
):
self.record_name = record_name
self.n_sig = n_sig
self.fs = fs
self.counter_freq = counter_freq
self.base_counter = base_counter
self.sig_len = sig_len
if base_datetime is not None:
if base_time is not None:
raise TypeError(
"cannot specify both base_time and base_datetime"
)
if base_date is not None:
raise TypeError(
"cannot specify both base_date and base_datetime"
)
self.base_datetime = base_datetime
else:
self.base_time = base_time
self.base_date = base_date
self.comments = comments
self.sig_name = sig_name
@property
def base_datetime(self):
if self.base_date is None or self.base_time is None:
return None
else:
return datetime.datetime.combine(
date=self.base_date, time=self.base_time
)
@base_datetime.setter
def base_datetime(self, value):
if value is None:
self.base_date = None
self.base_time = None
elif isinstance(value, datetime.datetime) and value.tzinfo is None:
self.base_date = value.date()
self.base_time = value.time()
else:
raise TypeError(f"invalid base_datetime value: {value!r}")
def get_frame_number(self, time_value):
"""
Convert a time value to a frame number.
A time value may be specified as:
- An integer or floating-point number, representing the number of
WFDB frames elapsed from the start of the record.
- A `datetime.timedelta` object, representing elapsed time from the
start of the record.
- A `datetime.datetime` object, representing an absolute date and
time (if the record starting time is known.)
Note that this function may return a value that is less than zero
or greater than the actual length of the record.
Parameters
----------
time_value : number or timedelta or datetime
A time value.
Returns
-------
frame_number : float
Frame number (possibly a fractional frame number).
"""
if hasattr(time_value, "__float__"):
return float(time_value)
if isinstance(time_value, datetime.datetime):
if not self.base_datetime:
raise ValueError(
"base_datetime is unknown; cannot convert absolute "
"date/time to a frame number"
)
time_value -= self.base_datetime
if isinstance(time_value, datetime.timedelta):
return time_value.total_seconds() * self.fs
raise TypeError(f"invalid time value: {time_value!r}")
def get_elapsed_time(self, time_value):
"""
Convert a time value to an elapsed time in seconds.
A time value may be specified as:
- An integer or floating-point number, representing the number of
WFDB frames elapsed from the start of the record.
- A `datetime.timedelta` object, representing elapsed time from the
start of the record.
- A `datetime.datetime` object, representing an absolute date and
time (if the record starting time is known.)
Parameters
----------
time_value : number or timedelta or datetime
A time value.
Returns
-------
elapsed_time : timedelta
Elapsed time from the start of the record.
"""
time_value = self.get_frame_number(time_value)
return datetime.timedelta(seconds=time_value / self.fs)
def get_absolute_time(self, time_value):
"""
Convert a time value to an absolute date and time.
A time value may be specified as:
- An integer or floating-point number, representing the number of
WFDB frames elapsed from the start of the record.
- A `datetime.timedelta` object, representing elapsed time from the
start of the record.
- A `datetime.datetime` object, representing an absolute date and
time (if the record starting time is known.)
Parameters
----------
time_value : number or timedelta or datetime
A time value.
Returns
-------
absolute_time : datetime
Absolute date and time.
"""
time_value = self.get_elapsed_time(time_value)
if not self.base_datetime:
raise ValueError(
"base_datetime is unknown; cannot convert frame number "
"to an absolute date/time"
)
return time_value + self.base_datetime
def check_field(self, field, required_channels="all"):
"""
Check whether a single field is valid in its basic form. Does
not check compatibility with other fields.
Parameters
----------
field : str
The field name.
required_channels : list, optional
Used for signal specification fields. All channels are
checked for their integrity if present, but channels that do
not lie in this field may be None.
Returns
-------
N/A
Notes
-----
This function is called from wrheader to check fields before
writing. It is also supposed to be usable at any point to
check a specific field.
"""
item = getattr(self, field)
if item is None:
raise Exception("Missing field required: %s" % field)
# We should have a list specifying these automatically.
# Whether the item should be a list. Watch out for required_channels for `segments`
expect_list = True if field in LIST_FIELDS else False
# Check the type of the field (and of its elements if it should
# be a list)
_check_item_type(
item,
field_name=field,
allowed_types=ALLOWED_TYPES[field],
expect_list=expect_list,
required_channels=required_channels,
)
# Individual specific field checks
if field in ["d_signal", "p_signal"]:
check_np_array(
item=item,
field_name=field,
ndim=2,
parent_class=(
lambda f: np.integer if f == "d_signal" else np.floating
)(field),
)
elif field in ["e_d_signal", "e_p_signal"]:
for ch in range(len(item)):
check_np_array(
item=item[ch],
field_name=field,
ndim=1,
parent_class=(
lambda f: np.integer
if f == "e_d_signal"
else np.floating
)(field),
channel_num=ch,
)
# Record specification fields
elif field == "record_name":
# Allow letters, digits, hyphens, and underscores.
accepted_string = re.match(r"[-\w]+", self.record_name)
if (
not accepted_string
or accepted_string.string != self.record_name
):
raise ValueError(
"record_name must only comprise of letters, digits, hyphens, and underscores."
)
elif field == "n_seg":
if self.n_seg <= 0:
raise ValueError("n_seg must be a positive integer")
elif field == "n_sig":
if self.n_sig <= 0:
raise ValueError("n_sig must be a positive integer")
elif field == "fs":
if self.fs <= 0:
raise ValueError("fs must be a positive number")
elif field == "counter_freq":
if self.counter_freq <= 0:
raise ValueError("counter_freq must be a positive number")
elif field == "base_counter":
if self.base_counter <= 0:
raise ValueError("base_counter must be a positive number")
elif field == "sig_len":
if self.sig_len < 0:
raise ValueError("sig_len must be a non-negative integer")
# Signal specification fields
elif field in _header.SIGNAL_SPECS.index:
if required_channels == "all":
required_channels = range(len(item))
for ch in range(len(item)):
# If the element is allowed to be None
if ch not in required_channels:
if item[ch] is None:
continue
if field == "file_name":
# Check for file_name characters
accepted_string = re.match(r"[-\w]+\.?[\w]+", item[ch])
if (
not accepted_string
or accepted_string.string != item[ch]
):
raise ValueError(
"File names should only contain alphanumerics, hyphens, and an extension. eg. record-100.dat"
)
# Check that dat files are grouped together
if not util.is_monotonic(self.file_name):
raise ValueError(
"Signals in a record that share a given file must be consecutive."
)
elif field == "fmt":
if item[ch] not in _signal.DAT_FMTS:
raise ValueError(
"File formats must be valid WFDB dat formats:",
_signal.DAT_FMTS,
)
elif field == "samps_per_frame":
if item[ch] < 1:
raise ValueError(
"samps_per_frame values must be positive integers"
)
elif field == "skew":
if item[ch] < 0:
raise ValueError(
"skew values must be non-negative integers"
)
elif field == "byte_offset":
if item[ch] < 0:
raise ValueError(
"byte_offset values must be non-negative integers"
)
elif field == "adc_gain":
if item[ch] <= 0:
raise ValueError("adc_gain values must be positive")
elif field == "baseline":
# Original WFDB library 10.5.24 only has 4 bytes for baseline.
if item[ch] < -2147483648 or item[ch] > 2147483648:
raise ValueError(
"baseline values must be between -2147483648 (-2^31) and 2147483647 (2^31 -1)"
)
elif field == "units":
if re.search(r"\s", item[ch]):
raise ValueError(
"units strings may not contain whitespaces."
)
elif field == "adc_res":
if item[ch] < 0:
raise ValueError(
"adc_res values must be non-negative integers"
)
elif field == "block_size":
if item[ch] < 0:
raise ValueError(
"block_size values must be non-negative integers"
)
elif field == "sig_name":
if item[ch][:1].isspace() or item[ch][-1:].isspace():
raise ValueError(
"sig_name strings may not begin or end with "
"whitespace."
)
if re.search(r"[\x00-\x1f\x7f-\x9f]", item[ch]):
raise ValueError(
"sig_name strings may not contain "
"control characters."
)
if len(set(item)) != len(item):
raise ValueError("sig_name strings must be unique.")
# Segment specification fields and comments
elif field in _header.SEGMENT_SPECS.index:
for ch in range(len(item)):
if field == "seg_name":
# Segment names must be alphanumerics or just a single '~'
if item[ch] == "~":
continue
accepted_string = re.match(r"[-\w]+", item[ch])
if (
not accepted_string
or accepted_string.string != item[ch]
):
raise ValueError(
"Non-null segment names may only contain alphanumerics and dashes. Null segment names must be set to '~'"
)
elif field == "seg_len":
# For records with more than 1 segment, the first
# segment may be the layout specification segment
# with a length of 0
min_len = 0 if ch == 0 else 1
if item[ch] < min_len:
raise ValueError(
"seg_len values must be positive integers. Only seg_len[0] may be 0 to indicate a layout segment"
)
# Comment field
elif field == "comments":
if item[ch].startswith("#"):
print(
"Note: comment strings do not need to begin with '#'. This library adds them automatically."
)
if re.search("[\t\n\r\f\v]", item[ch]):
raise ValueError(
"comments may not contain tabs or newlines (they may contain spaces and underscores)."
)
def check_read_inputs(
self, sampfrom, sampto, channels, physical, smooth_frames, return_res
):
"""
Ensure that input read parameters (from rdsamp) are valid for
the record.
Parameters
----------
sampfrom : int
The starting sample number to read for all channels.
sampto : int, 'end'
The sample number at which to stop reading for all channels.
Reads the entire duration by default.
channels : list
List of integer indices specifying the channels to be read.
Reads all channels by default.
physical : bool
Specifies whether to return signals in physical units in the
`p_signal` field (True), or digital units in the `d_signal`
field (False).
smooth_frames : bool
Used when reading records with signals having multiple samples
per frame. Specifies whether to smooth the samples in signals
with more than one sample per frame and return an (MxN) uniform
numpy array as the `d_signal` or `p_signal` field (True), or to
return a list of 1d numpy arrays containing every expanded
sample as the `e_d_signal` or `e_p_signal` field (False).
return_res : int
The numpy array dtype of the returned signals. Options are: 64,
32, 16, and 8, where the value represents the numpy int or float
dtype. Note that the value cannot be 8 when physical is True
since there is no float8 format.
Returns
-------
N/A
"""
# Data Type Check
if not hasattr(sampfrom, "__index__"):
raise TypeError("sampfrom must be an integer")
if not hasattr(sampto, "__index__"):
raise TypeError("sampto must be an integer")
if not isinstance(channels, list):
raise TypeError("channels must be a list of integers")
# Duration Ranges
if sampfrom < 0:
raise ValueError("sampfrom must be a non-negative integer")
if sampfrom > self.sig_len:
raise ValueError("sampfrom must be shorter than the signal length")
if sampto < 0:
raise ValueError("sampto must be a non-negative integer")
if sampto > self.sig_len:
raise ValueError("sampto must be shorter than the signal length")
if sampto <= sampfrom:
raise ValueError("sampto must be greater than sampfrom")
# Channel Ranges
if len(channels):
if min(channels) < 0:
raise ValueError(
"Input channels must all be non-negative integers"
)
if max(channels) > self.n_sig - 1:
raise ValueError(
"Input channels must all be lower than the total number of channels"
)
if return_res not in [64, 32, 16, 8]:
raise ValueError(
"return_res must be one of the following: 64, 32, 16, 8"
)
if physical and return_res == 8:
raise ValueError(
"return_res must be one of the following when physical is True: 64, 32, 16"
)
def _adjust_datetime(self, sampfrom: int):
"""
Adjust date and time fields to reflect user input if possible.
Helper function for the `_arrange_fields` of both Record and
MultiRecord objects.
Parameters
----------
sampfrom : int
The starting sample number to read for all channels.
Returns
-------
N/A
"""
if sampfrom:
dt_seconds = sampfrom / self.fs
if self.base_date and self.base_time:
self.base_datetime += datetime.timedelta(seconds=dt_seconds)
# We can calculate the time even if there is no date
elif self.base_time:
tmp_datetime = datetime.datetime.combine(
datetime.datetime.today().date(), self.base_time
)
self.base_time = (
tmp_datetime + datetime.timedelta(seconds=dt_seconds)
).time()
# Cannot calculate date or time if there is only date
class Record(BaseRecord, _header.HeaderMixin, _signal.SignalMixin):
"""
The class representing single segment WFDB records.
Record objects can be created using the initializer, by reading a WFDB
header with `rdheader`, or a WFDB record (header and associated dat files)
with `rdrecord`.
The attributes of the Record object give information about the record as
specified by: https://www.physionet.org/physiotools/wag/header-5.htm
In addition, the d_signal and p_signal attributes store the digital and
physical signals of WFDB records with at least one channel.
Attributes
----------
p_signal : ndarray, optional
An (MxN) 2d numpy array, where M is the signal length. Gives the
physical signal values intended to be written. Either p_signal or
d_signal must be set, but not both. If p_signal is set, this method will
use it to perform analogue-digital conversion, writing the resultant
digital values to the dat file(s). If fmt is set, gain and baseline must
be set or unset together. If fmt is unset, gain and baseline must both
be unset.
d_signal : ndarray, optional
An (MxN) 2d numpy array, where M is the signal length. Gives the
digital signal values intended to be directly written to the dat
file(s). The dtype must be an integer type. Either p_signal or d_signal
must be set, but not both. In addition, if d_signal is set, fmt, gain
and baseline must also all be set.
e_p_signal : ndarray, optional
The expanded physical conversion of the signal. Either a 2d numpy
array or a list of 1d numpy arrays.
e_d_signal : ndarray, optional
The expanded digital conversion of the signal. Either a 2d numpy
array or a list of 1d numpy arrays.
record_name : str, optional
The name of the WFDB record to be read, without any file
extensions. If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/BASE_RECORD.
Both relative and absolute paths are accepted. If the `pn_dir`
parameter is set, this parameter should contain just the base
record name, and the files fill be searched for remotely.
Otherwise, the data files will be searched for in the local path.
n_sig : int, optional
Total number of signals.
fs : float, optional
The sampling frequency of the record.
counter_freq : float, optional
The frequency used to start counting.
base_counter : float, optional
The counter used at the start of the file.
sig_len : int, optional
The total length of the signal.
base_time : datetime.time, optional
The time of day at the beginning of the record.
base_date : datetime.date, optional
The date at the beginning of the record.
base_datetime : datetime.datetime, optional
The date and time at the beginning of the record, equivalent to
`datetime.combine(base_date, base_time)`.
file_name : str, optional
The name of the file used for analysis.
fmt : list, optional
A list of strings giving the WFDB format of each file used to store each
channel. Accepted formats are: '80','212','16','24', and '32'. There are
other WFDB formats as specified by:
https://www.physionet.org/physiotools/wag/signal-5.htm
but this library will not write (though it will read) those file types.
samps_per_frame : int, optional
The total number of samples per frame.
skew : float, optional
The offset used to allign signals.
byte_offset : int, optional
The byte offset used to allign signals.
adc_gain : list, optional
A list of numbers specifying the ADC gain.
baseline : list, optional
A list of integers specifying the digital baseline.
units : list, optional
A list of strings giving the units of each signal channel.
adc_res: int, optional
The value produced by the ADC given a given Volt input.
adc_zero: int, optional
The value produced by the ADC given a 0 Volt input.
init_value : list, optional
The initial value of the signal.
checksum : list, int, optional
The checksum of the signal.
block_size : str, optional
The dimensions of the field data.
sig_name : list, optional
A list of strings giving the signal name of each signal channel.
comments : list, optional
A list of string comments to be written to the header file.
Examples
--------
>>> record = wfdb.Record(record_name='r1', fs=250, n_sig=2, sig_len=1000,
file_name=['r1.dat','r1.dat'])
"""
def __init__(
self,
p_signal=None,
d_signal=None,
e_p_signal=None,
e_d_signal=None,
record_name=None,
n_sig=None,
fs=None,
counter_freq=None,
base_counter=None,
sig_len=None,
base_time=None,
base_date=None,
base_datetime=None,
file_name=None,
fmt=None,
samps_per_frame=None,
skew=None,
byte_offset=None,
adc_gain=None,
baseline=None,
units=None,
adc_res=None,
adc_zero=None,
init_value=None,
checksum=None,
block_size=None,
sig_name=None,
comments=None,
):
# Note the lack of the 'n_seg' field. Single segment records cannot
# have this field. Even n_seg = 1 makes the header a multi-segment
# header.
super(Record, self).__init__(
record_name=record_name,
n_sig=n_sig,
fs=fs,
counter_freq=counter_freq,
base_counter=base_counter,
sig_len=sig_len,
base_time=base_time,
base_date=base_date,
base_datetime=base_datetime,
comments=comments,
sig_name=sig_name,
)
self.p_signal = p_signal
self.d_signal = d_signal
self.e_p_signal = e_p_signal
self.e_d_signal = e_d_signal
self.file_name = file_name
self.fmt = fmt
self.samps_per_frame = samps_per_frame
self.skew = skew
self.byte_offset = byte_offset
self.adc_gain = adc_gain
self.baseline = baseline
self.units = units
self.adc_res = adc_res
self.adc_zero = adc_zero
self.init_value = init_value
self.checksum = checksum
self.block_size = block_size
# Equal comparison operator for objects of this type
def __eq__(self, other, verbose=False):
"""
Equal comparison operator for objects of this type.
Parameters
----------
other : object
The object that is being compared to self.
verbose : bool, optional
Whether to print details about equality (True) or not (False).
Returns
-------
bool
Determines if the objects are equal (True) or not equal (False).
"""
att1 = self.__dict__
att2 = other.__dict__
if set(att1.keys()) != set(att2.keys()):
if verbose:
print("Attributes members mismatch.")
return False
for k in att1.keys():
v1 = att1[k]
v2 = att2[k]
if type(v1) != type(v2):
if verbose:
print("Mismatch in attribute: %s" % k, v1, v2)
return False
if isinstance(v1, np.ndarray):
# Necessary for nans
np.testing.assert_array_equal(v1, v2)
elif (
isinstance(v1, list)
and len(v1) == len(v2)
and all(isinstance(e, np.ndarray) for e in v1)
):
for e1, e2 in zip(v1, v2):
np.testing.assert_array_equal(e1, e2)
else:
if v1 != v2:
if verbose:
print("Mismatch in attribute: %s" % k, v1, v2)
return False
return True
def wrsamp(self, expanded=False, write_dir=""):
"""
Write a WFDB header file and any associated dat files from this
object.
Parameters
----------
expanded : bool, optional
Whether to write the expanded signal (e_d_signal) instead
of the uniform signal (d_signal).
write_dir : str, optional
The directory in which to write the files.
Returns
-------
N/A
"""
# Update the checksum field (except for channels that did not have
# a checksum to begin with, or where the checksum was already
# valid.)
if self.checksum is not None:
checksums = self.calc_checksum(expanded=expanded)
for ch, old_val in enumerate(self.checksum):
if old_val is None or (checksums[ch] - old_val) % 65536 == 0:
checksums[ch] = old_val
self.checksum = checksums
# Perform field validity and cohesion checks, and write the
# header file.
self.wrheader(write_dir=write_dir, expanded=expanded)
if self.n_sig > 0:
# Perform signal validity and cohesion checks, and write the
# associated dat files.
self.wr_dats(expanded=expanded, write_dir=write_dir)
def _arrange_fields(self, channels, sampfrom, smooth_frames):
"""
Arrange/edit object fields to reflect user channel and/or signal
range input.
Parameters
----------
channels : list
List of channel numbers specified.
sampfrom : int
Starting sample number read.
smooth_frames : bool
Whether to convert the expanded signal array (e_d_signal) into
a smooth signal array (d_signal).
Returns
-------
N/A
"""
# Rearrange signal specification fields
for field in _header.SIGNAL_SPECS.index:
item = getattr(self, field)
setattr(self, field, [item[c] for c in channels])
# Expanded signals - multiple samples per frame.
if not smooth_frames:
# Checksum and init_value to be updated if present
# unless the whole signal length was input
if self.sig_len != int(
len(self.e_d_signal[0]) / self.samps_per_frame[0]
):
self.checksum = self.calc_checksum(True)
self.init_value = [s[0] for s in self.e_d_signal]
self.n_sig = len(channels)
self.sig_len = int(
len(self.e_d_signal[0]) / self.samps_per_frame[0]
)
# MxN numpy array d_signal
else:
self.d_signal = self.smooth_frames("digital")
self.e_d_signal = None
# Checksum and init_value to be updated if present
# unless the whole signal length was input
if self.sig_len != self.d_signal.shape[0]:
if self.checksum is not None:
self.checksum = self.calc_checksum()
if self.init_value is not None:
ival = list(self.d_signal[0, :])
self.init_value = [int(i) for i in ival]
# Update record specification parameters
# Important that these get updated after^^
self.n_sig = len(channels)
self.sig_len = self.d_signal.shape[0]
# Adjust date and time if necessary