Skip to content

Commit 9ecb7d8

Browse files
michaelgrundMeghan Jonesweiji14seisman
authored
Add new function to load fractures sample data (#1101)
Co-authored-by: Meghan Jones <[email protected]> Co-authored-by: Wei Ji <[email protected]> Co-authored-by: Dongdong Tian <[email protected]>
1 parent 65f5aee commit 9ecb7d8

File tree

4 files changed

+38
-0
lines changed

4 files changed

+38
-0
lines changed

doc/api/index.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,7 @@ and store them in the GMT cache folder.
142142
datasets.load_ocean_ridge_points
143143
datasets.load_sample_bathymetry
144144
datasets.load_usgs_quakes
145+
datasets.load_fractures_compilation
145146

146147
.. automodule:: pygmt.exceptions
147148

pygmt/datasets/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44

55
from pygmt.datasets.earth_relief import load_earth_relief
66
from pygmt.datasets.samples import (
7+
load_fractures_compilation,
78
load_japan_quakes,
89
load_ocean_ridge_points,
910
load_sample_bathymetry,

pygmt/datasets/samples.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,3 +101,25 @@ def load_usgs_quakes():
101101
fname = which("@usgs_quakes_22.txt", download="c")
102102
data = pd.read_csv(fname)
103103
return data
104+
105+
106+
def load_fractures_compilation():
107+
"""
108+
Load a table of fracture lengths and azimuths as hypothetically digitized
109+
from geological maps as a pandas.DataFrame.
110+
111+
This is the ``@fractures_06.txt`` dataset used in the GMT tutorials.
112+
113+
The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
114+
first time you invoke this function. Afterwards, it will load the data from
115+
the cache. So you'll need an internet connection the first time around.
116+
117+
Returns
118+
-------
119+
data : pandas.DataFrame
120+
The data table. Use ``print(data.describe())`` to see the available
121+
columns.
122+
"""
123+
fname = which("@fractures_06.txt", download="c")
124+
data = pd.read_csv(fname, header=None, sep=r"\s+", names=["azimuth", "length"])
125+
return data[["length", "azimuth"]]

pygmt/tests/test_datasets_samples.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
Test basic functionality for loading sample datasets.
33
"""
44
from pygmt.datasets import (
5+
load_fractures_compilation,
56
load_japan_quakes,
67
load_ocean_ridge_points,
78
load_sample_bathymetry,
@@ -58,3 +59,16 @@ def test_usgs_quakes():
5859
"""
5960
data = load_usgs_quakes()
6061
assert data.shape == (1197, 22)
62+
63+
64+
def test_fractures_compilation():
65+
"""
66+
Check that the @fractures_06.txt dataset loads without errors.
67+
"""
68+
data = load_fractures_compilation()
69+
assert data.shape == (361, 2)
70+
summary = data.describe()
71+
assert summary.loc["min", "length"] == 98.6561
72+
assert summary.loc["max", "length"] == 984.652
73+
assert summary.loc["min", "azimuth"] == 0.0
74+
assert summary.loc["max", "azimuth"] == 360.0

0 commit comments

Comments
 (0)