diff --git a/Procfile b/Procfile index aa7dd193..517e2a0c 100644 --- a/Procfile +++ b/Procfile @@ -1 +1 @@ -web: gunicorn app.main:APP -w 2 --max-requests 1000 --max-requests-jitter 400 -k uvicorn.workers.UvicornWorker +web: gunicorn app.main:APP -w 3 -k uvicorn.workers.UvicornWorker diff --git a/app/services/location/jhu.py b/app/services/location/jhu.py index 06fa3fe0..6f488742 100644 --- a/app/services/location/jhu.py +++ b/app/services/location/jhu.py @@ -140,22 +140,31 @@ async def get_locations(): # Get all of the data categories locations. confirmed = await get_category("confirmed") deaths = await get_category("deaths") - # recovered = await get_category("recovered") + recovered = await get_category("recovered") locations_confirmed = confirmed["locations"] locations_deaths = deaths["locations"] - # locations_recovered = recovered["locations"] + locations_recovered = recovered["locations"] # Final locations to return. locations = [] - + # *************************************************************************** + # TODO: This iteration approach assumes the indexes remain the same + # and opens us to a CRITICAL ERROR. The removal of a column in the data source + # would break the API or SHIFT all the data confirmed, deaths, recovery producting + # incorrect data to consumers. + # *************************************************************************** # Go through locations. for index, location in enumerate(locations_confirmed): # Get the timelines. + + # TEMP: Fix for merging recovery data. See TODO above for more details. + key = (location["country"], location["province"]) + timelines = { - "confirmed": locations_confirmed[index]["history"], - "deaths": locations_deaths[index]["history"], - # 'recovered' : locations_recovered[index]['history'], + "confirmed": location["history"], + "deaths": parse_history(key, locations_deaths, index), + "recovered": parse_history(key, locations_recovered, index), } # Grab coordinates. @@ -186,7 +195,12 @@ async def get_locations(): for date, amount in timelines["deaths"].items() } ), - "recovered": Timeline({}), + "recovered": Timeline( + { + datetime.strptime(date, "%m/%d/%y").isoformat() + "Z": amount + for date, amount in timelines["recovered"].items() + } + ), }, ) ) @@ -194,3 +208,21 @@ async def get_locations(): # Finally, return the locations. return locations + + +def parse_history(key: tuple, locations: list, index: int): + """ + Helper for validating and extracting history content from + locations data based on index. Validates with the current country/province + key to make sure no index/column issue. + + TEMP: solution because implement a more efficient and better approach in the refactor. + """ + location_history = {} + try: + if key == (locations[index]["country"], locations[index]["province"]): + location_history = locations[index]["history"] + except (IndexError, KeyError): + LOGGER.debug(f"iteration data merge error: {index} {key}") + + return location_history diff --git a/tests/test_jhu.py b/tests/test_jhu.py index 3790218d..f6af4b9e 100644 --- a/tests/test_jhu.py +++ b/tests/test_jhu.py @@ -22,3 +22,27 @@ async def test_get_locations(mock_client_session): # `jhu.get_locations()` creates id based on confirmed list location_confirmed = await jhu.get_category("confirmed") assert len(output) == len(location_confirmed["locations"]) + + # `jhu.get_locations()` creates id based on deaths list + location_deaths = await jhu.get_category("deaths") + assert len(output) == len(location_deaths["locations"]) + + # `jhu.get_locations()` creates id based on recovered list + location_recovered = await jhu.get_category("recovered") + assert len(output) == len(location_recovered["locations"]) + + +@pytest.mark.parametrize( + "key, locations, index, expected", + [ + (("Thailand", "TH"), [{"country": "Thailand", "province": "TH", "history": {"test": "yes"}}], 0, {"test": "yes"}), # Success + (("Deutschland", "DE"), [{"country": "Deutschland", "province": "DE", "history": {"test": "no"}}], 1, {}), # IndexError + (("US", "NJ"), [{"country": "Deutschland", "province": "DE", "history": {"test": "no"}}], 0, {}), # Invaid Key Merge + ], +) +def test_parse_history(key, locations, index, expected): + """ + Test validating and extracting history content from + locations data based on index. + """ + assert jhu.parse_history(key, locations, index) == expected diff --git a/tests/test_routes.py b/tests/test_routes.py index eea153bc..5c81641b 100644 --- a/tests/test_routes.py +++ b/tests/test_routes.py @@ -112,8 +112,7 @@ async def test_v2_locations(self): with open(filepath, "r") as file: expected_json_output = file.read() - # TODO: Why is this failing? - # assert return_data == json.loads(expected_json_output) + assert return_data == json.loads(expected_json_output) async def test_v2_locations_id(self): state = "locations" @@ -132,8 +131,7 @@ async def test_v2_locations_id(self): with open(filepath, "r") as file: expected_json_output = file.read() - # TODO: Why is this failing? - # assert return_data == expected_json_output + assert return_data == json.loads(expected_json_output) @pytest.mark.asyncio @@ -187,4 +185,4 @@ async def test_latest(async_api_client, query_params, mock_client_session): assert response.status_code == 200 assert response_json["latest"]["confirmed"] - assert response_json["latest"]["deaths"] + assert response_json["latest"]["deaths"] \ No newline at end of file