Skip to content

Commit 8b13986

Browse files
authored
adding PLProcessing test data (#6)
* PLProcessing working * datasets tweak
1 parent 4473377 commit 8b13986

File tree

5 files changed

+2646
-170
lines changed

5 files changed

+2646
-170
lines changed

examples/PLProcessing.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,27 @@
11

22
# Process PL Data from Wright group
33

4-
import pathlib
54
import makeitwright as mw
6-
5+
from makeitwright import datasets
76

87
andor = mw.andor
98
roi = mw.helpers.roi
109
parse = mw.parsers.parse
1110
plot = mw.spectra.plot_spectra
1211

12+
p = datasets.PL
13+
filepath = p.parent
14+
filename = p.stem
1315

14-
filepath = pathlib.Path().expanduser() / "Desktop" / "Research Data" / "Wright Table" / "Original" / "test"
15-
filename = "PEAPbI on FPEASnI PL 77K 4 2 hour wait for cool"
16-
obj = 10 # Objective magnification (5x, 10x, 50x, 100x)
17-
ROI_lower = 1000 # Lower and upper bounds of ROI
18-
ROI_upper = 1047
16+
obj = 10 # Objective magnification (5x, 10x, 50x, 100x)
17+
ROI_lower = 575 # Lower and upper bounds of ROI
18+
ROI_upper = 600
1919
plotx_lower = 500 # Lower and upper bounds of built-in plot x-axis
2020
plotx_upper = 800
2121

2222

2323
# Read data
24-
data = parse(filepath, objective=obj, keywords=filename + ".asc")
24+
data = parse(filepath, objective=str(obj), keywords=filename + ".asc")
2525

2626

2727
# Check object area
@@ -34,7 +34,6 @@
3434
if con == '1':
3535
quit()
3636

37-
3837
# Process PL data
3938
PL_ROI = roi(data, {'y': ([ROI_lower, ROI_upper], 'average')})
4039
plot(PL_ROI, channel=0, xrange=[plotx_lower, plotx_upper]) # Can add vrange=[ , ] (y axis scale)

makeitwright/core/parsers/__init__.py

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,18 @@
1111
from .xrd import fromBruker
1212

1313

14+
px_per_um = {
15+
'5x-Jin' : 0.893,
16+
'20x-Jin' : 3.52,
17+
'100x-Wright' : 18.2,
18+
'5' : 0.893,
19+
'20' : 3.52,
20+
'100' : 18.2,
21+
}
22+
# add 10x for now with approximation
23+
px_per_um["10"] = px_per_um["10x-Jin"] = 2 * px_per_um["5"]
24+
25+
1426
def typeID(*fpaths):
1527
"""
1628
Infer what kind of data the file contains.
@@ -78,14 +90,14 @@ def parse(fdir, objective, select_types=None, keywords:list|str=[], exclude=[]):
7890
keywords = [keywords]
7991
for kw in keywords:
8092
for i, f in enumerate(files):
81-
if kw not in f:
93+
if kw not in str(f):
8294
include[i]=0
8395
if exclude:
8496
if type(exclude) is not list:
8597
exclude = [exclude]
8698
for x in exclude:
8799
for i, f in enumerate(files):
88-
if x in f:
100+
if x in str(f):
89101
include[i]=0
90102

91103
files = [file for i, file in zip(include, files) if i]
@@ -118,15 +130,15 @@ def parse(fdir, objective, select_types=None, keywords:list|str=[], exclude=[]):
118130
too_much_data = True
119131
if len(ftypes) > 200:
120132
too_much_data = True
121-
if sum([f.state()["st_size"] for f in files]) > virtual_memory().available:
133+
if sum([f.stat().st_size for f in files]) > virtual_memory().available:
122134
too_much_data = True
123135

124136
if too_much_data:
125137
raise MemoryError("too much data in directory, parsing cancelled to prevent storage overflow")
126138

127139
d = []
128140
for fpath, dtype in ftypes.items():
129-
basename = fpath.split('/')[-1].split('.')[0]
141+
basename = fpath.stem
130142

131143
if dtype.startswith('LabramHR'):
132144
d.append(fromLabramHR(fpath, name=basename))
@@ -148,9 +160,10 @@ def parse(fdir, objective, select_types=None, keywords:list|str=[], exclude=[]):
148160

149161
elif dtype=='ASCII':
150162
try:
151-
d.append(fromAndorNeo(fpath, name=basename, objective_lens=objective))
152-
except:
163+
d.append(fromAndorNeo(fpath, name=basename, px_per_um=px_per_um[objective] if objective else None))
164+
except Exception as e:
153165
print(f'attempted to extract ASCII data from path <{fpath}> but it was not recognized by the andor module')
166+
raise e
154167
print(basename)
155168

156169
elif dtype=='wt5':

makeitwright/core/parsers/andor.py

Lines changed: 22 additions & 155 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,11 @@
11
import WrightTools as wt
2-
import numpy as np
3-
import pathlib
4-
from os import fspath
52

63

7-
def fromAndorNeo(fpath, name=None, objective_lens='prompt', cps=False):
4+
# builtin conversion from pixels to um (from somebody's records)
5+
# for different objectives
6+
# roughly, pixel size in microns / magnification
7+
8+
def fromAndorNeo(fpath, name=None, px_per_um=None):
89
"""Create a data object from Andor Solis software (ascii exports).
910
1011
Parameters
@@ -16,169 +17,35 @@ def fromAndorNeo(fpath, name=None, objective_lens='prompt', cps=False):
1617
name : string (optional)
1718
Name to give to the created data object. If None, filename is used.
1819
Default is None.
20+
px_per_um : float-like (optional)
21+
if present, camera spatial dimensions will be mapped in micron units.
22+
if not present, spatial variables of camera will be a unitless index
1923
2024
Returns
2125
-------
2226
data
2327
New data object.
2428
"""
25-
26-
objective_lenses = {
27-
'5x-Jin' : 0.893,
28-
'20x-Jin' : 3.52,
29-
'100x-Wright' : 18.2,
30-
'5' : 0.893,
31-
'20' : 3.52,
32-
'100' : 18.2,
33-
5 : 0.893,
34-
20 : 3.52,
35-
100 : 18.2
36-
}
37-
3829
# parse filepath
39-
filepath = pathlib.Path(fpath)
40-
41-
if not ".asc" in filepath.suffixes:
42-
wt.exceptions.WrongFileTypeWarning.warn(filepath, ".asc")
43-
# parse name
44-
if name is None:
45-
name = filepath.name.split("/")[-1]
30+
data:wt.Data = wt.data.from_Solis(fpath, name=name, verbose=True)
31+
data.rename_variables(xindex="x", yindex="y", wm="wl")
32+
data.rename_channels(signal="sig")
4633

47-
if objective_lens=='prompt':
48-
objective_lens = input(f'enter magnification for data at {name}: ')
49-
if not objective_lens:
50-
objective_lens = 0
34+
for var in {"x", "y"} & set(data.variable_names):
35+
if px_per_um:
36+
data[var][:] = data[var][:] / px_per_um
37+
data[var].units = 'µm'
5138

52-
# create data
53-
ds = np.DataSource(None)
54-
f = ds.open(fspath(fpath), "rt")
55-
axis0 = []
56-
arr = []
57-
attrs = {}
39+
dtype = "image" if "x" in data.variable_names else "spectralprofile"
40+
data.attrs.update(dtype=dtype)
5841

59-
line0 = f.readline().strip()[:-1]
60-
line0 = [float(x) for x in line0.split(",")] # TODO: robust to space, tab, comma
61-
axis0.append(line0.pop(0))
62-
arr.append(line0)
63-
64-
def get_frames(f, arr, axis0):
65-
axis0_written = False
66-
while True:
67-
line = f.readline().strip()[:-1]
68-
if len(line) == 0:
69-
break
70-
else:
71-
line = [float(x) for x in line.split(",")]
72-
# signature of new frames is restart of axis0
73-
if not axis0_written and (line[0] == axis0[0]):
74-
axis0_written = True
75-
if axis0_written:
76-
line.pop(0)
77-
else:
78-
axis0.append(line.pop(0))
79-
arr.append(line)
80-
return arr, axis0
42+
if "wl" in data.variable_names:
43+
data["wl"].attrs['label'] = "wavelength (nm)" if data["wl"].units == "nm" else "wavenumber (cm-1)"
8144

82-
arr, axis0 = get_frames(f, arr, axis0)
83-
nframes = len(arr) // len(axis0)
84-
85-
i = 0
86-
while i < 3:
87-
line = f.readline().strip()
88-
if len(line) == 0:
89-
i += 1
90-
else:
91-
try:
92-
key, val = line.split(":", 1)
93-
except ValueError:
94-
pass
95-
else:
96-
attrs[key.strip()] = val.strip()
97-
98-
f.close()
99-
100-
#create data object
101-
arr = np.array(arr)
102-
axis0 = np.array(axis0)
103-
data = wt.Data(name=name)
104-
if float(attrs["Grating Groove Density (l/mm)"]) == 0:
105-
xname = 'x'
106-
dtype = 'image'
107-
try:
108-
axis0 = axis0/objective_lenses[objective_lens]
109-
xunits = 'µm'
110-
except KeyError:
111-
xunits = 'px'
45+
if data.sig.units == "Hz":
46+
data.sig.label = "intensity (cps)"
11247
else:
113-
xname = 'wl'
114-
xunits = 'nm'
115-
dtype = 'spectralprofile'
116-
117-
axis1 = np.arange(arr.shape[-1])
118-
yname='y'
119-
try:
120-
axis1 = axis1/objective_lenses[objective_lens]
121-
yunits = 'µm'
122-
except KeyError:
123-
yunits = 'px'
124-
125-
axes = [xname, yname]
48+
data.sig.label = "counts"
12649

127-
if nframes == 1:
128-
arr = np.array(arr)
129-
data.create_variable(name=xname, values=axis0[:, None], units=xunits)
130-
data.create_variable(name=yname, values=axis1[None, :], units=yunits)
131-
else:
132-
frames = np.arange(nframes)
133-
try:
134-
ct = float(attrs["Kinetic Cycle Time (secs)"])
135-
frames = frames*ct
136-
tname = 't'
137-
tunits = 's'
138-
except KeyError:
139-
tname = 'frame'
140-
tunits = None
141-
arr = np.array(arr).reshape(nframes, len(axis0), len(arr[0]))
142-
data.create_variable(name=tname, values=frames[:, None, None], units=tunits)
143-
data.create_variable(name=xname, values=axis0[None, :, None], units=xunits)
144-
data.create_variable(name=yname, values=axis1[None, None, :], units=yunits)
145-
axes = [tname] + axes
146-
147-
if xname=='wl':
148-
if xunits=='nm':
149-
data[xname].attrs['label'] = "wavelength (nm)"
150-
if xunits=='wn':
151-
data[xname].attrs['label'] = "wavenumber (cm-1)"
152-
if xname=='x':
153-
data[xname].attrs['label'] = "x (µm)"
154-
if yname=='y':
155-
data[yname].attrs['label'] = "y (µm)"
156-
157-
data.transform(*axes)
158-
if cps:
159-
try:
160-
arr = arr/float(attrs["Exposure Time (secs)"])
161-
except KeyError:
162-
pass
163-
try:
164-
arr = arr/int(attrs["Number of Accumulations"])
165-
except KeyError:
166-
pass
167-
168-
data.create_channel(name='sig', values=arr, signed=False)
169-
if cps:
170-
data['sig'].attrs['label'] = "intensity (cps)"
171-
else:
172-
data['sig'].attrs['label'] = "counts"
173-
174-
for key, val in attrs.items():
175-
data.attrs[key] = val
176-
177-
# finish
178-
print("data created at {0}".format(data.fullpath))
179-
print(" axes: {0}".format(data.axis_names))
180-
print(" shape: {0}".format(data.shape))
181-
data.attrs['dtype']=dtype
18250

18351
return data
184-

0 commit comments

Comments
 (0)