-
Notifications
You must be signed in to change notification settings - Fork 90
Expand file tree
/
Copy pathcartesian.py
More file actions
137 lines (110 loc) · 4.76 KB
/
cartesian.py
File metadata and controls
137 lines (110 loc) · 4.76 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
# Copyright (c) 2018 The Harmonica Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
Gridding and upward continuation
================================
Most potential field surveys gather data along scattered and uneven flight
lines or ground measurements. For a great number of applications we may need to
interpolate these data points onto a regular grid at a constant altitude.
Upward-continuation is also a routine task for smoothing, noise attenuation,
source separation, etc.
Both tasks can be done simultaneously through an *equivalent sources*
[Dampney1969]_ (a.k.a *equivalent layer*). We will use
:class:`harmonica.EquivalentSources` to estimate the coefficients of a set of
point sources that fit the observed data. The fitted sources can then be used
to predict data values wherever we want, like on a grid at a certain altitude.
By default, the sources for :class:`~harmonica.EquivalentSources` are placed
one beneath each data point at a relative depth from the elevation of the data
point following [Cordell1992]_.
The advantage of using equivalent sources is that it takes into account the 3D
nature of the observations, not just their horizontal positions. It also allows
data uncertainty to be taken into account and noise to be suppressed though the
least-squares fitting process. The main disadvantage is the increased
computational load (both in terms of time and memory).
"""
import ensaio
import numpy as np
import pandas as pd
import pygmt
import pyproj
import verde as vd
import harmonica as hm
# Fetch the sample total-field magnetic anomaly data from Great Britain
fname = ensaio.fetch_britain_magnetic(version=1)
data = pd.read_csv(fname)
# Slice a smaller portion of the survey data to speed-up calculations for this
# example
region = [-5.5, -4.7, 57.8, 58.5]
inside = vd.inside((data.longitude, data.latitude), region)
data = data[inside]
print("Number of data points:", data.shape[0])
print("Mean height of observations:", data.height_m.mean())
# Since this is a small area, we'll project our data and use Cartesian
# coordinates
projection = pyproj.Proj(proj="merc", lat_ts=data.latitude.mean())
easting, northing = projection(data.longitude.values, data.latitude.values)
coordinates = (easting, northing, data.height_m)
xy_region = vd.get_region((easting, northing))
# Create the equivalent sources.
# We'll use the default point source configuration at a relative depth beneath
# each observation point.
# The damping parameter helps smooth the predicted data and ensure stability.
eqs = hm.EquivalentSources(depth=1000, damping=1)
# Fit the sources coefficients to the observed magnetic anomaly.
eqs.fit(coordinates, data.total_field_anomaly_nt)
# Evaluate the data fit by calculating an R² score against the observed data.
# This is a measure of how well the sources fit the data, NOT how good the
# interpolation will be.
print("R² score:", eqs.score(coordinates, data.total_field_anomaly_nt))
# Interpolate data on a regular grid with 500 m spacing. The interpolation
# requires the height of the grid points (upward coordinate). By passing in
# 1500 m, we're effectively upward-continuing the data (mean flight height is
# 500 m).
grid_coords = vd.grid_coordinates(region=xy_region, spacing=500, extra_coords=1500)
grid = eqs.grid(coordinates=grid_coords, data_names=["magnetic_anomaly"])
# The grid is a xarray.Dataset with values, coordinates, and metadata
print("\nGenerated grid:\n", grid)
# Set figure properties
w, e, s, n = xy_region
fig_height = 10
fig_width = fig_height * (e - w) / (n - s)
fig_ratio = (n - s) / (fig_height / 100)
fig_proj = f"x1:{fig_ratio}"
# Plot original magnetic anomaly and the gridded and upward-continued version
fig = pygmt.Figure()
title = "Observed magnetic anomaly data"
# Get the 99.9th percentile of the absolute value of the point data to use as color
# scale limits
cpt_lim = np.quantile(np.abs(data.total_field_anomaly_nt), 0.999)
# Make colormap of data
pygmt.makecpt(
cmap="balance+h0",
series=[-cpt_lim, cpt_lim],
background=True,
)
with pygmt.config(FONT_TITLE="12p"):
fig.plot(
projection=fig_proj,
region=xy_region,
frame=[f"WSne+t{title}", "xa10000", "ya10000"],
x=easting,
y=northing,
fill=data.total_field_anomaly_nt,
style="c0.1c",
cmap=True,
)
fig.colorbar(cmap=True, frame=["x+lnT"], position="+e")
fig.shift_origin(xshift=fig_width + 1)
title = "Gridded and upward-continued"
with pygmt.config(FONT_TITLE="12p"):
fig.grdimage(
frame=[f"ESnw+t{title}", "xa10000", "ya10000"],
grid=grid.magnetic_anomaly,
cmap=True,
)
fig.colorbar(cmap=True, frame=["x+lnT"], position="+e")
fig.show()