Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
- changed log format, too many tabs
- adding more logs
- change generator time step on GridCollection, to work it needed more files than necessary.
- added coherence computation
  • Loading branch information
ludwigVonKoopa committed Jun 18, 2021
commit 0834ed25301e0672957a0277f3e3f30e69fd6352
3 changes: 3 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,12 @@ Changed
^^^^^^^
Fixed
^^^^^
- GridCollection get_next_time_step & get_previous_time_step needed more files to work in the dataset list.
The loop needed explicitly self.dataset[i+-1] even when i==0, therefore indice went out of range
Added
^^^^^


[3.4.0] - 2021-03-29
--------------------
Changed
Expand Down
4 changes: 1 addition & 3 deletions src/py_eddy_tracker/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,7 @@


def start_logger():
FORMAT_LOG = (
"%(levelname)-8s %(asctime)s %(module)s.%(funcName)s :\n\t\t\t\t\t%(message)s"
)
FORMAT_LOG = "%(levelname)-8s %(asctime)s %(module)s.%(funcName)s :\n\t%(message)s"
logger = logging.getLogger("pet")
if len(logger.handlers) == 0:
# set up logging to CONSOLE
Expand Down
22 changes: 11 additions & 11 deletions src/py_eddy_tracker/dataset/grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -2257,11 +2257,13 @@ def from_netcdf_cube(cls, filename, x_name, y_name, t_name, heigth=None):
@classmethod
def from_netcdf_list(cls, filenames, t, x_name, y_name, indexs=None, heigth=None):
new = cls()
for i, t in enumerate(t):
d = RegularGridDataset(filenames[i], x_name, y_name, indexs=indexs)
for i, _t in enumerate(t):
filename = filenames[i]
logger.debug(f"load file {i:02d}/{len(t)} t={_t} : {filename}")
d = RegularGridDataset(filename, x_name, y_name, indexs=indexs)
if heigth is not None:
d.add_uv(heigth)
new.datasets.append((t, d))
new.datasets.append((_t, d))
return new

def shift_files(self, t, filename, heigth=None, **rgd_kwargs):
Expand All @@ -2273,6 +2275,7 @@ def shift_files(self, t, filename, heigth=None, **rgd_kwargs):
if heigth is not None:
d.add_uv(heigth)
self.datasets.append((t, d))
logger.debug(f"shift and adding i={len(self.datasets)} t={t} : {filename}")

def interp(self, grid_name, t, lons, lats, method="bilinear"):
"""
Expand Down Expand Up @@ -2433,6 +2436,7 @@ def advect(
else:
mask_particule += isnan(x) + isnan(y)
while True:
logger.debug(f"advect : t={t}")
if (backward and t <= t1) or (not backward and t >= t1):
t0, u0, v0, m0 = t1, u1, v1, m1
t1, d1 = generator.__next__()
Expand All @@ -2459,25 +2463,21 @@ def advect(
yield t, x, y

def get_next_time_step(self, t_init):
first = True
for i, (t, dataset) in enumerate(self.datasets):
if t < t_init:
continue
if first:
first = False
yield self.datasets[i - 1]

logger.debug(f"i={i}, t={t}, dataset={dataset}")
yield t, dataset

def get_previous_time_step(self, t_init):
first = True
i = len(self.datasets)
for t, dataset in reversed(self.datasets):
i -= 1
if t > t_init:
continue
if first:
first = False
yield self.datasets[i + 1]

logger.debug(f"i={i}, t={t}, dataset={dataset}")
yield t, dataset


Expand Down
66 changes: 57 additions & 9 deletions src/py_eddy_tracker/observations/groups.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@

from numba import njit
from numba import types as nb_types
from numpy import arange, int32, interp, median, where, zeros
from numpy import arange, int32, interp, median, where, zeros, meshgrid, concatenate

from .observation import EddiesObservations
from ..poly import group_obs

logger = logging.getLogger("pet")

Expand Down Expand Up @@ -87,7 +88,53 @@ def advect(x, y, c, t0, n_days):
return t, x, y


def particle_candidate(x, y, c, eddies, t_start, i_target, pct, **kwargs):
def create_particles(eddies, step):
"""create particles only inside speed contour. Avoir creating too large numpy arrays, only to me masked

:param eddies: network where eddies are
:type eddies: network
:param step: step for particles
:type step: float
:return: lon, lat and indices of particles in contour speed
:rtype: tuple(np.array)
"""

lon = eddies.contour_lon_s
lat = eddies.contour_lat_s

# compute bounding boxes of each eddies
lonMins = lon.min(axis=1)
lonMins = lonMins - (lonMins % step)
lonMaxs = lon.max(axis=1)
lonMaxs = lonMaxs - (lonMaxs % step) + step * 2

latMins = lat.min(axis=1)
latMins = latMins - (latMins % step)
latMaxs = lat.max(axis=1)
latMaxs = latMaxs - (latMaxs % step) + step * 2

lon = []
lat = []
# for each eddies, create mesh with particles then concatenate
for lonMin, lonMax, latMin, latMax in zip(lonMins, lonMaxs, latMins, latMaxs):
x0, y0 = meshgrid(arange(lonMin, lonMax, step), arange(latMin, latMax, step))

x0, y0 = x0.reshape(-1), y0.reshape(-1)
lon.append(x0)
lat.append(y0)

x = concatenate(lon)
y = concatenate(lat)

_, i = group_obs(x, y, 1, 360)
x, y = x[i], y[i]

i_start = eddies.contains(x, y, intern=True)
m = i_start != -1
return x[m], y[m], i_start[m]


def particle_candidate(c, eddies, step_mesh, t_start, i_target, pct, **kwargs):
"""Select particles within eddies, advect them, return target observation and associated percentages

:param np.array(float) x: longitude of particles
Expand All @@ -100,27 +147,28 @@ def particle_candidate(x, y, c, eddies, t_start, i_target, pct, **kwargs):
:params dict kwargs: dict of params given to `advect`

"""

# Obs from initial time
m_start = eddies.time == t_start

e = eddies.extract_with_mask(m_start)

# to be able to get global index
translate_start = where(m_start)[0]
# Identify particle in eddies (only in core)
i_start = e.contains(x, y, intern=True)
m = i_start != -1

x, y, i_start = x[m], y[m], i_start[m]
# Advect
x, y, i_start = create_particles(e, step_mesh)

# Advection
t_end, x, y = advect(x, y, c, t_start, **kwargs)

# eddies at last date
m_end = eddies.time == t_end / 86400
e_end = eddies.extract_with_mask(m_end)

# to be able to get global index
translate_end = where(m_end)[0]

# Id eddies for each alive particle (in core and extern)
i_end = e_end.contains(x, y)

# compute matrix and fill target array
get_matrix(i_start, i_end, translate_start, translate_end, i_target, pct)

Expand Down
Loading