forked from ExpDev07/coronavirus-tracker-api
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathjhu.py
More file actions
222 lines (176 loc) · 7.4 KB
/
jhu.py
File metadata and controls
222 lines (176 loc) · 7.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
"""app.services.location.jhu.py"""
import csv
import logging
import os
from datetime import datetime
from pprint import pformat as pf
from asyncache import cached
from cachetools import TTLCache
from ...caches import check_cache, load_cache
from ...coordinates import Coordinates
from ...location import TimelinedLocation
from ...models import Timeline
from ...utils import countries
from ...utils import date as date_util
from ...utils import httputils
from . import LocationService
LOGGER = logging.getLogger("services.location.jhu")
PID = os.getpid()
class JhuLocationService(LocationService):
"""
Service for retrieving locations from Johns Hopkins CSSE (https://github.com/CSSEGISandData/COVID-19).
"""
async def get_all(self):
# Get the locations.
locations = await get_locations()
return locations
async def get(self, loc_id): # pylint: disable=arguments-differ
# Get location at the index equal to provided id.
locations = await self.get_all()
return locations[loc_id]
# ---------------------------------------------------------------
# Base URL for fetching category.
BASE_URL = "https://raw.githubusercontent.com/CSSEGISandData/2019-nCoV/master/csse_covid_19_data/csse_covid_19_time_series/"
@cached(cache=TTLCache(maxsize=4, ttl=1800))
async def get_category(category):
"""
Retrieves the data for the provided category. The data is cached for 30 minutes locally, 1 hour via shared Redis.
:returns: The data for category.
:rtype: dict
"""
# Adhere to category naming standard.
category = category.lower()
data_id = f"jhu.{category}"
# check shared cache
cache_results = await check_cache(data_id)
if cache_results:
LOGGER.info(f"{data_id} using shared cache results")
results = cache_results
else:
LOGGER.info(f"{data_id} shared cache empty")
# URL to request data from.
url = BASE_URL + "time_series_covid19_%s_global.csv" % category
# Request the data
LOGGER.info(f"{data_id} Requesting data...")
async with httputils.CLIENT_SESSION.get(url) as response:
text = await response.text()
LOGGER.debug(f"{data_id} Data received")
# Parse the CSV.
data = list(csv.DictReader(text.splitlines()))
LOGGER.debug(f"{data_id} CSV parsed")
# The normalized locations.
locations = []
for item in data:
# Filter out all the dates.
dates = dict(filter(lambda element: date_util.is_date(element[0]), item.items()))
# Make location history from dates.
history = {date: int(float(amount or 0)) for date, amount in dates.items()}
# Latest data insert value.
latest = list(history.values())[-1]
# Country for this location.
country = item["Country/Region"]
# Normalize the item and append to locations.
locations.append({
"country": country,
"country_code": countries.country_code(country),
"province": item["Province/State"],
"coordinates": {
"lat": item["Lat"],
"long": item["Long"],
},
"history": history,
"latest": int(latest or 0),
})
LOGGER.debug(f"{data_id} Data normalized")
# Latest total.
latest = sum(map(lambda location: location["latest"], locations))
# Return the final data.
results = {
"locations": locations,
"latest": latest,
"last_updated": datetime.utcnow().isoformat() + "Z",
"source": "https://github.com/ExpDev07/coronavirus-tracker-api",
}
# save the results to distributed cache
await load_cache(data_id, results)
LOGGER.info(f"{data_id} results:\n{pf(results, depth=1)}")
return results
@cached(cache=TTLCache(maxsize=1, ttl=1800))
async def get_locations():
"""
Retrieves the locations from the categories. The locations are cached for 1 hour.
:returns: The locations.
:rtype: List[Location]
"""
data_id = "jhu.locations"
LOGGER.info(f"pid:{PID}: {data_id} Requesting data...")
# Get all of the data categories locations.
confirmed = await get_category("confirmed")
deaths = await get_category("deaths")
recovered = await get_category("recovered")
locations_confirmed = confirmed["locations"]
locations_deaths = deaths["locations"]
locations_recovered = recovered["locations"]
# Final locations to return.
locations = []
# ***************************************************************************
# TODO: This iteration approach assumes the indexes remain the same
# and opens us to a CRITICAL ERROR. The removal of a column in the data source
# would break the API or SHIFT all the data confirmed, deaths, recovery producting
# incorrect data to consumers.
# ***************************************************************************
# Go through locations.
for index, location in enumerate(locations_confirmed):
# Get the timelines.
# TEMP: Fix for merging recovery data. See TODO above for more details.
key = (location["country"], location["province"])
timelines = {
"confirmed": location["history"],
"deaths": parse_history(key, locations_deaths),
"recovered": parse_history(key, locations_recovered),
}
# Grab coordinates.
coordinates = location["coordinates"]
# Create location (supporting timelines) and append.
locations.append(
TimelinedLocation(
id=index,
country=location["country"],
province=location["province"],
coordinates=Coordinates(latitude=coordinates["lat"], longitude=coordinates["long"]),
last_updated=datetime.utcnow().isoformat() + "Z",
timelines={
"confirmed": Timeline(
timeline={
datetime.strptime(date, "%m/%d/%y").isoformat() + "Z": amount
for date, amount in timelines["confirmed"].items()
}
),
"deaths": Timeline(
timeline={
datetime.strptime(date, "%m/%d/%y").isoformat() + "Z": amount
for date, amount in timelines["deaths"].items()
}
),
"recovered": Timeline(
timeline={
datetime.strptime(date, "%m/%d/%y").isoformat() + "Z": amount
for date, amount in timelines["recovered"].items()
}
),
},
)
)
LOGGER.info(f"{data_id} Data normalized")
return locations
def parse_history(key: tuple, locations: list):
"""
Helper for validating and extracting history content from
locations data based on key. Validates with the current country/province
key to make sure no index/column issue.
"""
for i, location in enumerate(locations):
if (location["country"], location["province"]) == key:
return location["history"]
LOGGER.debug(f"iteration data merge error: {key}")
return {}