Skip to content

Commit

Permalink
Merge pull request #80 from sfbrigade/marin-county
Browse files Browse the repository at this point in the history
Add Marin County Scraper
  • Loading branch information
elaguerta authored Sep 3, 2020
2 parents 9f6cb7f + 6d55284 commit df5bff8
Show file tree
Hide file tree
Showing 4 changed files with 279 additions and 6 deletions.
3 changes: 2 additions & 1 deletion covid19_sfbayarea/data/__init__.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
from typing import Dict, Any
from . import alameda
from . import san_francisco
from . import marin
from . import sonoma
from . import solano

scrapers: Dict[str, Any] = {
'alameda': alameda,
# 'contra_costa': None,
# 'marin': None,
'marin': marin,
# 'napa': None,
'san_francisco': san_francisco,
# 'san_mateo': None,
Expand Down
256 changes: 256 additions & 0 deletions covid19_sfbayarea/data/marin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,256 @@
#!/usr/bin/env python3
import csv
from typing import List, Dict, Tuple
from bs4 import BeautifulSoup # type: ignore
from urllib.parse import unquote_plus
from datetime import datetime
from contextlib import contextmanager
import time


from ..webdriver import get_firefox
from .utils import get_data_model

def get_county() -> Dict:
"""Main method for populating county data"""

url = 'https://coronavirus.marinhhs.org/surveillance'
model = get_data_model()

chart_ids = {"cases": "Eq6Es", "deaths": "Eq6Es", "age": "zSHDs", "gender": "FEciW", "race_eth": "aBeEd", "tests": '7sHQq'}
# The time series data for negative tests is gone, so I've just scraped positive test data using the new chart referenced above.

model['name'] = "Marin County"
model['update_time'] = datetime.today().isoformat()
model["meta_from_baypd"] = ""
model['source_url'] = url
model['meta_from_source'] = get_chart_meta(url, chart_ids)

model["series"]["cases"] = get_series_data(chart_ids["cases"], url, ['Date', 'Total Cases', 'Total Recovered*', 'Total Hospitalized', 'Total Deaths'], "cumul_cases", 'Total Cases', 'cases')
model["series"]["deaths"] = get_series_data(chart_ids["deaths"], url, ['Date', 'Total Cases', 'Total Recovered*', 'Total Hospitalized', 'Total Deaths'], "cumul_deaths", 'Total Deaths', 'deaths')

model["series"]["tests"] = get_test_series(chart_ids["tests"], url)
model["case_totals"]["age_group"], model["death_totals"]["age_group"] = get_breakdown_age(chart_ids["age"], url)
model["case_totals"]["gender"], model["death_totals"]["gender"] = get_breakdown_gender(chart_ids["gender"], url)
model["case_totals"]["race_eth"], model["death_totals"]["race_eth"] = get_breakdown_race_eth(chart_ids["race_eth"], url)
return model

@contextmanager
def chart_frame(driver, chart_id: str): # type: ignore
# is this bad practice? I didn't know what type to specify here for the frame.
frame = driver.find_element_by_css_selector(f'iframe[src*="//datawrapper.dwcdn.net/{chart_id}/"]')
driver.switch_to.frame(frame)
try:
yield frame
finally:
driver.switch_to.default_content()
driver.quit()

def get_chart_data(url: str, chart_id: str) -> List[str]:
"""This method extracts parsed csv data from the csv linked in the data wrapper charts."""
with get_firefox() as driver:
driver.implicitly_wait(30)
driver.get(url)

with chart_frame(driver, chart_id):
csv_data = driver.find_element_by_class_name('dw-data-link').get_attribute('href')
# Deal with the data
if csv_data.startswith('data:'):
media, data = csv_data[5:].split(',', 1)
# Will likely always have this kind of data type
if media != 'application/octet-stream;charset=utf-8':
raise ValueError(f'Cannot handle media type "{media}"')
csv_string = unquote_plus(data)
csv_data = csv_string.splitlines()
else:
raise ValueError('Cannot handle this csv_data href')

return csv_data

def get_chart_meta(url: str, chart_ids: Dict[str, str]) -> Tuple[List, List]:
"""This method gets all the metadata underneath the data wrapper charts and the metadata at the top of the county dashboard."""
metadata: set = set()
chart_metadata: set = set()

with get_firefox() as driver:
driver.implicitly_wait(30)
driver.get(url)
soup = BeautifulSoup(driver.page_source, 'html5lib')

for soup_obj in soup.findAll('div', attrs={"class":"surveillance-data-text"}):
if soup_obj.findAll('p'):
metadata = set({paragraph.text.replace("\u2014","").replace("\u00a0", "").replace("\u2019","") for paragraph in soup_obj.findAll('p')})
else:
raise ValueError('Metadata location has changed.')

with get_firefox() as driver: # I keep getting a connection error so maybe I need to do this again? seems weird.
driver.implicitly_wait(30)
driver.get(url)
# Metadata for each chart visualizing the data of the csv file I'll pull.
# I had to change my metadata function b/c for whatever reason, my usual code didn't pick up on the class notes block.
# There's something weird with the website that Ricardo and I couldn't quite pinpoint.
source_list: set = set()
for chart_id in chart_ids.values():
driver.implicitly_wait(30)
source = driver.find_element_by_css_selector(f'iframe[src*="//datawrapper.dwcdn.net/{chart_id}/"]').get_attribute('src')
source_list.add(source)

with get_firefox() as driver:
for source in source_list:
driver.get(source)
#breakpoint()
time.sleep(5) # this ensures there's enough time for the soup to find the elements and for the chart_metadata to populate.
# From the source code it seems that .get() should be synchronous but it's not working like that :(
soup = BeautifulSoup(driver.page_source, 'html5lib')
for data in soup.findAll('div', attrs = {'class': 'notes-block'}):
#breakpoint()
chart_metadata.add(data.text.strip())

# Manually adding in metadata about testing data
chart_metadata.add("Negative and pending tests are excluded from the Marin County test data.")
chart_metadata.add("Note that this test data is about tests done by Marin County residents, not about all tests done in Marin County (includes residents and non-residents).")

# Return the metadata. I take the set of the chart_metadata since there are repeating metadata strings.
return list(metadata), list(chart_metadata)

def get_series_data(chart_id: str, url: str, headers: list, model_typ: str, typ: str, new_count: str) -> List:
"""This method extracts the date, number of cases/deaths, and new cases/deaths."""

csv_data = get_chart_data(url, chart_id)
csv_reader = csv.DictReader(csv_data)

keys = csv_reader.fieldnames

series: list = list()

if keys != headers:
raise ValueError('The headers have changed')

history: list = list()

for row in csv_reader:
daily: dict = dict()
date_time_obj = datetime.strptime(row['Date'], '%m/%d/%Y')
daily["date"] = date_time_obj.strftime('%Y-%m-%d')
# Collect the case totals in order to compute the change in cases per day
history.append(int(row[typ]))
daily[model_typ] = int(row[typ])
series.append(daily)

history_diff: list = list()
# Since i'm substracting pairwise elements, I need to adjust the range so I don't get an off by one error.
for i in range(0, len(history)-1):
history_diff.append((int(history[i+1]) - int(history[i])) + int(series[0][model_typ]))
# from what I've seen, series[0]["cumul_cases"] will be 0, but I shouldn't assume that.
history_diff.insert(0, int(series[0][model_typ]))

for val, num in enumerate(history_diff):
series[val][new_count] = num
return series

def get_breakdown_age(chart_id: str, url: str) -> Tuple[List, List]:
"""This method gets the breakdown of cases and deaths by age."""
csv_data = get_chart_data(url, chart_id)
csv_reader = csv.DictReader(csv_data)

keys = csv_reader.fieldnames

c_brkdown: list = list()
d_brkdown: list = list()

if keys != ['Age Category', 'POPULATION', 'Cases', 'Hospitalizations', 'Deaths']:
raise ValueError('The headers have changed')

key_mapping = {"0-9": "0_to_9", "10-18": "10_to_18", "19-34": "19_to_34", "35-49": "35_to_49", "50-64": "50_to_64", "65-79": "65_to_79", "80-94": "80_to_94", "95+": "95_and_older"}

for row in csv_reader:
c_age: dict = dict()
d_age: dict = dict()
# Extracting the age group and the raw count for both cases and deaths.
c_age["group"], d_age["group"] = row['Age Category'], row['Age Category']
if c_age["group"] not in key_mapping:
raise ValueError(str(c_age["group"]) + ' is not in the list of age groups. The age groups have changed.')
else:
c_age["group"] = key_mapping[c_age["group"]]
c_age["raw_count"] = int(row["Cases"])
d_age["group"] = key_mapping[d_age["group"]]
d_age["raw_count"] = int(row["Deaths"])
c_brkdown.append(c_age)
d_brkdown.append(d_age)

return c_brkdown, d_brkdown

def get_breakdown_gender(chart_id: str, url: str) -> Tuple[Dict, Dict]:
"""This method gets the breakdown of cases and deaths by gender."""
csv_data = get_chart_data(url, chart_id)
csv_reader = csv.DictReader(csv_data)

keys = csv_reader.fieldnames

if keys != ['Gender', 'POPULATION', 'Cases', 'Hospitalizations', 'Deaths']:
raise ValueError('The headers have changed.')

genders = ['male', 'female']
c_gender: dict = dict()
d_gender: dict = dict()

for row in csv_reader:
# Extracting the gender and the raw count (the 3rd and 5th columns, respectively) for both cases and deaths.
# Each new row has data for a different gender.
gender = row["Gender"].lower()
if gender not in genders:
raise ValueError("The genders have changed.")
c_gender[gender] = int(row["Cases"])
d_gender[gender] = int(row["Deaths"])

return c_gender, d_gender

def get_breakdown_race_eth(chart_id: str, url: str) -> Tuple[Dict, Dict]:
"""This method gets the breakdown of cases and deaths by race/ethnicity."""

csv_data = get_chart_data(url, chart_id)
csv_reader = csv.DictReader(csv_data)

keys = csv_reader.fieldnames

if keys != ['Race/Ethnicity', 'COUNTY POPULATION', 'Cases', 'Case Percent', 'Hospitalizations', 'Hospitalizations Percent', 'Deaths', 'Deaths Percent']:
raise ValueError("The headers have changed.")

key_mapping = {"Black/African American":"African_Amer", "Hispanic/Latino": "Latinx_or_Hispanic", "White": "White", "Asian": "Asian", "Native Hawaiian/Pacific Islander": "Pacific_Islander", "American Indian/Alaska Native": "Native_Amer", "Multi or Other Race": "Multi_or_Other"}

c_race_eth: dict = dict()
d_race_eth: dict = dict()

for row in csv_reader:
race_eth = row["Race/Ethnicity"]
if race_eth not in key_mapping:
raise ValueError("The race_eth groups have changed.")
else:
c_race_eth[key_mapping[race_eth]] = int(row["Cases"])
d_race_eth[key_mapping[race_eth]] = int(row["Deaths"])

return c_race_eth, d_race_eth

def get_test_series(chart_id: str, url: str) -> List:
"""This method gets the date, the number of new positive tests on that date, and the number of cumulative positive tests."""
csv_data = get_chart_data(url, chart_id)
csv_reader = csv.DictReader(csv_data)

keys = csv_reader.fieldnames

if keys != ['Test Date', 'Positive Tests']:
raise ValueError("The headers have changed.")

test_series: list = list()

cumul_pos = 0
for row in csv_reader:
daily: dict = dict()
date_time_obj = datetime.strptime(row['Test Date'], '%m/%d/%Y')
daily["date"] = date_time_obj.strftime('%Y-%m-%d')
daily["positive"] = int(row["Positive Tests"])
cumul_pos += daily["positive"]
daily["cumul_positive"] = cumul_pos
test_series.append(daily)

return test_series
16 changes: 15 additions & 1 deletion data_models/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@ Below are the tabulations we are making by gender, age group, race/ethnicity, an
"Pacific_Islander":-1,
"White":-1,
"Unknown":-1
"Multi_or_Other": -1
},
"underlying_cond": {
"none":-1,
Expand Down Expand Up @@ -137,7 +138,18 @@ The fields will be used for normalizing the county case and death tabulations, a
}
```

5. __Hospitalization Data__
6. __Inmate Data__

Data collection is pending resolution of #108.

```
"inmates": {
"cases": -1,
"deaths": -1
}
```

6. __Hospitalization Data__

California COVID-19 hospitalization data is retrieved separately from the the
[California Health and Human Services Open Data Portal
Expand Down Expand Up @@ -205,6 +217,8 @@ Scraper authors, please keep an eye out for amendments to the data model.
# Race and Ethnicity
We need to collapse counties that report race and ethnicity into one race/ethnicity dimension. This section will be updated pending information about San Francisco County's methods for reporting race and ethnicity.

The category "Multi_or_Other" was included because Marin rolls up the numbers from "Multi" and "Other" into one. Please note that this category is not relevant for counties that report 'Multiple Race' and 'Other Race' separately.

# Gender
One future potential issue is that some counties still lump non-binary and cis-gender people under "Other", and other counties have started to differentiate. Our data model would ideally match the most detailed county's gender categories. A county with only the "Other" county would have the value of -1 for the non male/female categories, indicating that they are not collecting that information. However, this means that our `"Other"` category would not be internally comparable or consistent. The `"Other"` category for a county that has "Male, Female, Other, MTF, FTM" as separate datapoints should really be called `"Other - not MTF, not FTM"` and is not comparable to the `"Other"` category for a county that only has "Male, Female, Other".

Expand Down
10 changes: 6 additions & 4 deletions data_models/data_model.json
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@
"meta_from_baypd": "STORE IMPORTANT NOTES ABOUT OUR METHODS HERE",
"series": {
"cases": [
{ "date": "yyyy-mm-dd", "cases": -1, "cumul_cases": -1},
{ "date": "yyyy-mm-dd", "cases": -1, "cumul_cases": -1 },
{ "date": "yyyy-mm-dd", "cases": -1, "cumul_cases": -1 }
],
"deaths": [
{ "date": "yyyy-mm-dd", "deaths": -1, "cumul_deaths": -1 },
{ "date": "yyyy-mm-dd", "deaths": -1, "cumul_deaths": -1}
{ "date": "yyyy-mm-dd", "deaths": -1, "cumul_deaths": -1 }
],
"tests": [
{
Expand Down Expand Up @@ -57,7 +57,8 @@
"Other": -1,
"Pacific_Islander":-1,
"White":-1,
"Unknown":-1
"Unknown":-1,
"Multi_or_Other": -1
},
"transmission_cat": {
"community": -1,
Expand All @@ -84,7 +85,8 @@
"Other": -1,
"Pacific_Islander":-1,
"White":-1,
"Unknown":-1
"Unknown":-1,
"Multi_or_Other": -1
},
"underlying_cond": {
"none":-1,
Expand Down

0 comments on commit df5bff8

Please sign in to comment.