-
Notifications
You must be signed in to change notification settings - Fork 90
Expand file tree
/
Copy pathblock_averaged_sources.py
More file actions
147 lines (118 loc) · 5.25 KB
/
block_averaged_sources.py
File metadata and controls
147 lines (118 loc) · 5.25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
# Copyright (c) 2018 The Harmonica Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
Gridding with block-averaged equivalent sources
===============================================
By default, the :class:`harmonica.EquivalentSources` class locates one point
source beneath each data point during the fitting process. Alternatively, we
can use another strategy: the *block-averaged sources*, introduced in
[Soler2021]_.
This method divides the survey region (defined by the data) into square blocks
of equal size, computes the median coordinates of the data points that fall
inside each block and locates one source beneath every averaged position. This
way, we define one equivalent source per block, with the exception of empty
blocks that won't get any source.
This method has two main benefits:
- It lowers the amount of sources involved in the interpolation, therefore it
reduces the computer memory requirements and the computation time of the
fitting and prediction processes.
- It might avoid to produce aliasing on the output grids, specially for
surveys with oversampling along a particular direction, like airborne ones.
We can make use of the block-averaged sources within the
:class:`harmonica.EquivalentSources` class by passing a value to the
``block_size`` parameter, which controls the size of the blocks. We recommend
using a ``block_size`` not larger than the desired resolution of the
interpolation grid.
The depth of the sources can be set analogously to the regular equivalent
sources: we can use a ``constant`` depth (every source is located at the same
depth) or a ``relative`` depth (where each source is located at a constant
shift beneath the median location obtained during the block-averaging process).
The depth of the sources can be set through the ``depth`` parameter.
"""
import ensaio
import numpy as np
import pandas as pd
import pygmt
import pyproj
import verde as vd
import harmonica as hm
# Fetch the sample total-field magnetic anomaly data from Great Britain
fname = ensaio.fetch_britain_magnetic(version=1)
data = pd.read_csv(fname)
# Slice a smaller portion of the survey data to speed-up calculations for this
# example
region = [-5.5, -4.7, 57.8, 58.5]
inside = vd.inside((data.longitude, data.latitude), region)
data = data[inside]
print("Number of data points:", data.shape[0])
print("Mean height of observations:", data.height_m.mean())
# Since this is a small area, we'll project our data and use Cartesian
# coordinates
projection = pyproj.Proj(proj="merc", lat_ts=data.latitude.mean())
easting, northing = projection(data.longitude.values, data.latitude.values)
coordinates = (easting, northing, data.height_m)
xy_region = vd.get_region((easting, northing))
# Create the equivalent sources.
# We'll use block-averaged sources at given depth beneath the observation
# points. We will interpolate on a grid with a resolution of 500m, so we will
# use blocks of the same size. The damping parameter helps smooth the predicted
# data and ensure stability.
eqs = hm.EquivalentSources(depth=1000, damping=1, block_size=500)
# Fit the sources coefficients to the observed magnetic anomaly.
eqs.fit(coordinates, data.total_field_anomaly_nt)
# Evaluate the data fit by calculating an R² score against the observed data.
# This is a measure of how well the sources fit the data, NOT how good the
# interpolation will be.
print("R² score:", eqs.score(coordinates, data.total_field_anomaly_nt))
# Interpolate data on a regular grid with 500 m spacing. The interpolation
# requires the height of the grid points (upward coordinate). By passing in
# 1500 m, we're effectively upward-continuing the data (mean flight height is
# 500 m).
grid_coords = vd.grid_coordinates(region=xy_region, spacing=500, extra_coords=1500)
grid = eqs.grid(coordinates=grid_coords, data_names=["magnetic_anomaly"])
# The grid is a xarray.Dataset with values, coordinates, and metadata
print("\nGenerated grid:\n", grid)
# Set figure properties
w, e, s, n = xy_region
fig_height = 10
fig_width = fig_height * (e - w) / (n - s)
fig_ratio = (n - s) / (fig_height / 100)
fig_proj = f"x1:{fig_ratio}"
# Plot original magnetic anomaly and the gridded and upward-continued version
fig = pygmt.Figure()
title = "Observed magnetic anomaly data"
# Get the 99.9th percentile of the absolute value of the point data to use as color
# scale limits
cpt_lim = np.quantile(np.abs(data.total_field_anomaly_nt), 0.999)
# Make colormap of data
pygmt.makecpt(
cmap="balance+h0",
series=[-cpt_lim, cpt_lim],
background=True,
)
with pygmt.config(FONT_TITLE="12p"):
fig.plot(
projection=fig_proj,
region=xy_region,
frame=[f"WSne+t{title}", "xa10000", "ya10000"],
x=easting,
y=northing,
fill=data.total_field_anomaly_nt,
style="c0.1c",
cmap=True,
)
fig.colorbar(cmap=True, frame=["x+lnT"], position="+e")
fig.shift_origin(xshift=fig_width + 1)
title = "Gridded and upward-continued"
with pygmt.config(FONT_TITLE="12p"):
fig.grdimage(
frame=[f"ESnw+t{title}", "xa10000", "ya10000"],
grid=grid.magnetic_anomaly,
cmap=True,
)
fig.colorbar(cmap=True, frame=["x+lnT"], position="+e")
fig.show()