Skip to content

Commit 9ca7b72

Browse files
authored
Merge pull request #58 from softwareunderground/pr_jan
[BUG] Add `location` when specifying `position` in welly and fix to pandas versions larger 1.4 Further fix bugs introduced by newer versions of xarray and welly
2 parents fb5e416 + d6ef289 commit 9ca7b72

File tree

15 files changed

+113
-44
lines changed

15 files changed

+113
-44
lines changed

.github/workflows/docs.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ jobs:
4747

4848
- name: Setup Headless Display
4949
run: |
50-
sudo apt-get install libgl1-mesa-glx
50+
sudo apt-get update && sudo apt-get install libgl1-mesa-glx xvfb -y
5151
sudo apt-get install -y xvfb
5252
Xvfb :99 -screen 0 1024x768x24 > /dev/null 2>&1 &
5353
sleep 3

.github/workflows/main.yml

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -43,11 +43,7 @@ jobs:
4343

4444
steps:
4545

46-
# Cancel any previous run of the test job; [pin v0.8.0 (2021-02-13)]
47-
- name: Cancel Previous Runs
48-
uses: styfle/cancel-workflow-action@3d86a7cc43670094ac248017207be0295edbc31d
49-
with:
50-
access_token: ${{ github.token }}
46+
5147

5248
- name: Set up Python
5349
uses: actions/setup-python@v2
@@ -57,7 +53,7 @@ jobs:
5753
- uses: actions/checkout@v2
5854
- name: Setup Headless Display
5955
run: |
60-
sudo apt-get install libgl1-mesa-glx
56+
sudo apt-get update && sudo apt-get install libgl1-mesa-glx xvfb -y
6157
sudo apt-get install -y xvfb
6258
Xvfb :99 -screen 0 1024x768x24 > /dev/null 2>&1 &
6359
sleep 3

.gitignore

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,3 +85,21 @@ venv.bak/
8585
/.idea/subsurface.iml
8686
/.idea/vcs.xml
8787
/.idea/other.xml
88+
89+
.local_settings.json
90+
91+
# User-specific stuff:
92+
.idea/workspace.xml
93+
.idea/tasks.xml
94+
.idea/dictionaries
95+
.idea/vcs.xml
96+
.idea/jsLibraryMappings.xml
97+
98+
# Sensitive or high-churn files:
99+
.idea/dataSources.ids
100+
.idea/dataSources.xml
101+
.idea/dataSources.local.xml
102+
.idea/sqlDataSources.xml
103+
.idea/dynamic.xml
104+
.idea/uiDesigner.xml
105+
/.idea/codeStyles/codeStyleConfig.xml

subsurface/interfaces/liquid_earth/rest_client.py

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,11 @@ def add_data_to_project(self, project_id: str, data_name: str, data_type: DataTy
9595
self._put_file_in_project(project_id, data_name + ".le", data_type, body)
9696
self._put_file_in_project(project_id, data_name + ".json", data_type, json.dumps(header))
9797

98+
def add_texture_to_mesh(self, project_id: str, data_name: str, data_type: DataTypes,
99+
texture):
100+
self._post_update_meta_data(project_id, data_name, data_type)
101+
self._put_file_in_project(project_id, data_name + ".png", data_type, texture)
102+
98103
def _put_file_in_project(self, project_id: str, data_name, data_type: DataTypes, file):
99104
blob_path = data_type.value + "/" + data_name
100105

@@ -105,9 +110,18 @@ def _put_file_in_project(self, project_id: str, data_name, data_type: DataTypes,
105110
raise Exception(f"Request failed: {response.text}")
106111
elif response.status_code >= 200:
107112
print(response.text)
113+
else:
114+
print(response.status_code)
108115

109116
def _post_update_meta_data(self, project_id: str, data_name: str, data_type: DataTypes):
110-
query_param = f"?project_id={project_id}&data_id={data_name}&data_type={data_type.value}"
117+
118+
# ! data_type Collar and tubes should be just mapped to well
119+
if data_type == DataTypes.collars or data_type == DataTypes.cylinder:
120+
address_in_cosmos = "wells"
121+
else:
122+
address_in_cosmos = data_type.value
123+
124+
query_param = f"?project_id={project_id}&data_id={data_name}&data_type={address_in_cosmos}"
111125
end_point = "subsurface-lite/v1/update_project_meta" + query_param
112126

113127
response = requests.post(self.host + end_point, headers=self.header)

subsurface/reader/readers_data.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,18 +6,25 @@
66
import numpy as np
77
import pandas as pd
88
import xarray as xr
9-
from pandas._typing import FilePath, ReadCsvBuffer, ReadCsvBuffer
9+
1010

1111
from subsurface.utils.utils_core import get_extension
1212

1313

1414
__all__ = ['ReaderFilesHelper', 'ReaderUnstructuredHelper',
1515
'ReaderWellsHelper', 'RawDataOptions', 'RawDataUnstructured']
1616

17+
if pd.__version__ < '1.4.0':
18+
from pandas._typing import FilePathOrBuffer
19+
fb = FilePathOrBuffer
20+
elif pd.__version__ >= '1.4.0':
21+
from pandas._typing import FilePath, ReadCsvBuffer
22+
fb = Union[FilePath, ReadCsvBuffer[bytes], ReadCsvBuffer[str]]
1723

1824
@dataclass
1925
class ReaderFilesHelper:
20-
file_or_buffer: Union[FilePath, ReadCsvBuffer[bytes], ReadCsvBuffer[str]]
26+
file_or_buffer: fb
27+
2128

2229
usecols: Union[List[str], List[int]] = None # Use a subset of columns
2330
col_names: List[Union[str, int]] = None # Give a name

subsurface/reader/topography/topo_core.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,12 @@ def read_structured_topography_to_unstructured(path) -> UnstructuredData:
2929

3030
def rasterio_dataset_to_structured_data(dataset):
3131
data = dataset.read(1)
32+
data = np.fliplr(data.T)
3233
shape = data.shape
3334
coords = {
3435
'x': np.linspace(
35-
dataset.bounds.right,
3636
dataset.bounds.left,
37+
dataset.bounds.right,
3738
shape[0]
3839
),
3940
'y': np.linspace(

subsurface/reader/volume/volume_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def interpolate_unstructured_data_to_structured_data(
3131
grid = np.meshgrid(*coords.values())
3232

3333
interpolated_attributes = griddata(ud.vertex,
34-
ud.points_attributes.loc[:, attr_name],
34+
ud.attributes.loc[:, attr_name],
3535
tuple(grid), method=interpolation_method.value)
3636

3737
sd = StructuredData.from_numpy(

subsurface/reader/wells/pandas_to_welly.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,8 @@ def add_datum(self, data: pd.DataFrame):
9898
w = self.p.get_well(b)
9999
datum = data.loc[[b]]
100100
assert datum.shape[1] == 3, 'datum must be XYZ coord'
101-
w.position = datum.values[0]
101+
102+
w.location.position = datum.values[0]
102103

103104
return self.p
104105

@@ -114,8 +115,10 @@ def add_striplog(self, data: pd.DataFrame):
114115
for b in unique_borehole:
115116
w = self.p.get_well(b)
116117
data_dict = data.loc[[b]].to_dict('list')
117-
118-
s = Striplog.from_dict_advanced(data_dict, points=True)
118+
data_csv = data.loc[[b]].to_csv()
119+
#s = Striplog.from_dict_advanced(data_dict, points=True)
120+
#s = Striplog.from_dict(data_dict)
121+
s = Striplog.from_csv(text=data_csv)
119122

120123
try:
121124
n_basis = w.location.md.shape[0]

subsurface/reader/wells/well_files_reader.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import warnings
22
from typing import Dict
33

4+
import pandas
45
import pandas as pd
56
import numpy as np
67

@@ -89,7 +90,7 @@ def read_survey_df_from_las(reader_helper: ReaderFilesHelper, well_name: str) ->
8990
welly_well = _create_welly_well_from_las(well_name, reader_helper.file_or_buffer)
9091
survey_df = welly_well.df()[reader_helper.usecols]
9192
map_rows_and_cols_inplace(survey_df, reader_helper)
92-
survey_df["well_name"] = "Cottessen"
93+
survey_df["well_name"] = well_name
9394
survey_df.set_index("well_name", inplace=True)
9495
return survey_df
9596

subsurface/reader/wells/welly_reader.py

Lines changed: 17 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,13 @@ def vertex_and_cells_from_welly_trajectory(cells: np.ndarray, elev: bool,
9797
welly_trajectory_kwargs: dict,
9898
last_index: int, n_vertex_for_well: int,
9999
vertex: np.ndarray, w: Well):
100-
xyz = w.location.trajectory(w.position, elev, n_vertex_for_well, **welly_trajectory_kwargs)
100+
101+
try:
102+
datum = w.location.datum
103+
except AttributeError:
104+
datum = None
105+
106+
xyz = w.location.trajectory(datum=datum, elev=elev, points=n_vertex_for_well, **welly_trajectory_kwargs) #w.location.position
101107
# Make sure deviation is there
102108
a = np.arange(0 + last_index, xyz.shape[0] - 1 + last_index, dtype=np.int_)
103109
b = np.arange(1 + last_index, xyz.shape[0] + last_index, dtype=np.int_)
@@ -145,6 +151,16 @@ def _create_well(uwi: str = 'dummy_uwi'):
145151
def add_curves_from_las(w: Well, las_folder: str) -> Well:
146152
""" Add curves from las file. """
147153

154+
def _read_curves_to_welly_object(well: Well, curve_path: str = '.') -> Well:
155+
las_files = glob.glob(curve_path + '*.las')
156+
# throw error if no las files found
157+
if len(las_files) == 0:
158+
raise Exception('No las files found in ' + curve_path)
159+
160+
for curve in las_files:
161+
well.add_curves_from_las(curve)
162+
return well
163+
148164
_read_curves_to_welly_object(w, curve_path=las_folder)
149165

150166
w_df = w.df()
@@ -166,12 +182,4 @@ def _make_deviation_df(well_df: DataFrame, inclination_header: str, azimuth_head
166182
return deviation_complete
167183

168184

169-
def _read_curves_to_welly_object(well: Well, curve_path: str = '.') -> Well:
170-
las_files = glob.glob(curve_path + '*.las')
171-
# throw error if no las files found
172-
if len(las_files) == 0:
173-
raise Exception('No las files found in ' + curve_path)
174185

175-
for curve in las_files:
176-
well.add_curves_from_las(curve)
177-
return well

subsurface/structs/base_structures/unstructured_data.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77

88
from subsurface.reader.readers_data import RawDataUnstructured
99

10-
1110
__all__ = ['UnstructuredData', ]
1211

1312

@@ -119,14 +118,15 @@ def from_data_arrays_dict(cls, xarray_dict: Dict[str, xr.DataArray],
119118
default_cells_attributes_name="cell_attrs",
120119
default_points_attributes_name="vertex_attrs"):
121120

121+
# TODO: xr.Dataset seems to have been changed with 2022.06. needs to be adapted for indexing
122122
ds = xr.Dataset(xarray_dict, coords=coords, attrs=xarray_attributes)
123123

124124
# Try to unstack pandas dataframe if exist
125125
# TODO: This is an issue in wells. If it is only there maybe we should move it there
126126
try:
127127
ds = ds.reset_index('cell')
128-
except KeyError:
129-
pass
128+
except (KeyError, ValueError) as e:
129+
print(f"{e} xarray dataset must include 'cell' key (KeyError) or xarray 'cell' has no index (ValueError).")
130130

131131
return cls(ds, default_cells_attributes_name, default_points_attributes_name)
132132

@@ -256,7 +256,7 @@ def points_attributes_to_dict(self, orient='list'):
256256
def extent(self):
257257
max = self.vertex.max(axis=0)
258258
min = self.vertex.min(axis=0)
259-
extent = np.stack((min, max), axis = 1).ravel()
259+
extent = np.stack((min, max), axis=1).ravel()
260260
return extent
261261

262262
def to_xarray(self):

subsurface/writer/to_binary.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,10 @@
22

33

44
def base_structs_to_binary_file(path, base_struct, order='F'):
5-
bytearray_le, header = base_struct.default_data_array_to_binary(order=order)
5+
try:
6+
bytearray_le, header = base_struct.default_data_array_to_binary(order=order)
7+
except AttributeError:
8+
bytearray_le, header = base_struct.to_binary(order=order)
69
with open(path+'.json', 'w') as outfile:
710
json.dump(header, outfile)
811

2.16 KB
Binary file not shown.

tests/test_interfaces/test_to_binary.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,7 @@ def wells(data_path):
3030

3131

3232
def test_wells_to_binary(wells):
33-
bytearray_le, header = wells.default_data_array_to_binary()
34-
print(header)
33+
bytearray_le, header = wells.to_binary()
3534

3635
with open('well_f.json', 'w') as outfile:
3736
json.dump(header, outfile)
@@ -78,7 +77,7 @@ def test_profile_to_binary(data_path):
7877
import pandas as pd
7978

8079
unstruct = UnstructuredData.from_array(v, e, vertex_attr=pd.DataFrame(uv, columns=['u', 'v']))
81-
mesh_binary, mesh_header = unstruct.default_data_array_to_binary()
80+
mesh_binary, mesh_header = unstruct.to_binary()
8281

8382
with open('mesh_uv.json', 'w') as outfile:
8483
import json

tests/test_io/test_welly_to_subsurface.py

Lines changed: 30 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919

2020
pf = pathlib.Path(__file__).parent.absolute()
2121
data_path = pf.joinpath('../data/borehole/')
22+
import sys
23+
sys.path.insert(0, '../../../striplog_miguel/striplog/')
2224
from striplog import Striplog, Component
2325

2426

@@ -228,13 +230,19 @@ def test_excel_to_subsurface():
228230
well_names = data[well_name_column].unique()
229231

230232
foo = data.groupby(well_name_column).get_group(well_names[0])
231-
data_dict = foo.to_dict('list')
233+
foo.columns = foo.columns.map({'DEPTH_FROM': 'top',
234+
'DEPTH_TO': 'base',
235+
'LITHOLOGY': 'component lith',
236+
'SITE_ID': 'description'})
237+
foo_csv = foo.to_csv(index=False)
238+
#data_dict = foo.to_dict('list')
232239

233240
# Load striplog
234-
s = Striplog.from_dict_advanced(data_dict, remap={'DEPTH_FROM': 'top',
235-
'DEPTH_TO': 'base',
236-
'LITHOLOGY': 'component lith',
237-
'SITE_ID': 'description'})
241+
s = Striplog.from_csv(text=foo_csv)
242+
# s = Striplog.from_dict_advanced(data_dict, remap={'DEPTH_FROM': 'top',
243+
# 'DEPTH_TO': 'base',
244+
# 'LITHOLOGY': 'component lith',
245+
# 'SITE_ID': 'description'})
238246

239247
s.plot()
240248
plt.show(block=False)
@@ -272,12 +280,23 @@ def test_striplog_2():
272280
well_name_column = 'SITE_ID'
273281
well_names = data[well_name_column].unique()
274282
foo = data.groupby(well_name_column).get_group(well_names[0])
275-
data_dict = foo.to_dict('list')
276283

277-
s = Striplog.from_dict_advanced(data_dict, remap={'DEPTH_FROM': 'top',
278-
'DEPTH_TO': 'base',
279-
'LITHOLOGY': 'component lith',
280-
'SITE_ID': 'description'})
284+
foo.columns = foo.columns.map({'DEPTH_FROM': 'top',
285+
'DEPTH_TO': 'base',
286+
'LITHOLOGY': 'component lith',
287+
'SITE_ID': 'description'})
288+
foo_csv = foo.to_csv(index=False)
289+
# data_dict = foo.to_dict('list')
290+
291+
# Load striplog
292+
s = Striplog.from_csv(text=foo_csv)
293+
# TODO: Maybe get rid of from_dict_advanced, as it does not show a benefit over csv?
294+
# data_dict = foo.to_dict('list')
295+
#
296+
# s = Striplog.from_dict_advanced(data_dict, remap={'DEPTH_FROM': 'top',
297+
# 'DEPTH_TO': 'base',
298+
# 'LITHOLOGY': 'component lith',
299+
# 'SITE_ID': 'description'})
281300

282301
s.plot()
283302
plt.show(block=False)
@@ -417,7 +436,7 @@ def test_read_kim_default_component_table():
417436
subsurface.visualization.pv_plot([pyvista_mesh], image_2d=True)
418437

419438

420-
def test_read_wells():
439+
def test_read_wells(): #TODO: fix trajectory IndexError of Well.location.trajectory() Missing NC-10 End, added a dummy
421440

422441
file_name = 'wells-database-small.xlsx'
423442

0 commit comments

Comments
 (0)