Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Different policies for node groups #37

Open
wants to merge 19 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@
*.swp

.DS_Store
/seirsplus/.ipynb_checkpoints/
2 changes: 1 addition & 1 deletion examples/Extended_SEIRS_Workplace_TTI_Demo.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -3279,4 +3279,4 @@
},
"nbformat": 4,
"nbformat_minor": 4
}
}
Binary file added seirsplus/__pycache__/FARZ.cpython-37.pyc
Binary file not shown.
Binary file added seirsplus/__pycache__/__init__.cpython-37.pyc
Binary file not shown.
Binary file added seirsplus/__pycache__/models.cpython-37.pyc
Binary file not shown.
Binary file added seirsplus/__pycache__/networks.cpython-37.pyc
Binary file not shown.
Binary file added seirsplus/__pycache__/sim_loops.cpython-37.pyc
Binary file not shown.
Binary file added seirsplus/__pycache__/utilities.cpython-37.pyc
Binary file not shown.
35 changes: 29 additions & 6 deletions seirsplus/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -1880,6 +1880,7 @@ def __init__(self, G, beta, sigma, lamda, gamma,
# Initialize other node metadata:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.tested = numpy.array([False]*self.numNodes).reshape((self.numNodes,1))
self.testedTime = numpy.array([-1] * self.numNodes).reshape((self.numNodes, 1)) # the time that the node was last tested: negative means it was not tested
self.positive = numpy.array([False]*self.numNodes).reshape((self.numNodes,1))
self.numTested = numpy.zeros(6*self.numNodes)
self.numPositive = numpy.zeros(6*self.numNodes)
Expand Down Expand Up @@ -1933,6 +1934,21 @@ def __init__(self, G, beta, sigma, lamda, gamma,

#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Two helper functions to get the node list and mask for a group
# If groupName is `all` then returns this for all vertices,
# even if nodeGroups were not set.
def get_nodes(self,groupName='all'):
if groupName=='all':
return range(self.numNodes)
return self.nodeGroupData[groupName]['nodes']

def get_mask(self,groupName='all'):
if groupName=='all':
return numpy.ones(shape=(self.numNodes,1))
return self.nodeGroupData[groupName]['mask']

# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

def update_parameters(self):

Expand Down Expand Up @@ -2470,6 +2486,8 @@ def set_isolation(self, node, isolate):

def set_tested(self, node, tested):
self.tested[node] = tested
if tested:
self.testedTime[node] = self.t # set time that the node was tested to current time
self.testedInCurrentState[node] = tested

#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Expand All @@ -2480,12 +2498,17 @@ def set_positive(self, node, positive):
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

def introduce_exposures(self, num_new_exposures):
exposedNodes = numpy.random.choice(range(self.numNodes), size=num_new_exposures, replace=False)
for exposedNode in exposedNodes:
if(self.X[exposedNode]==self.S):
self.X[exposedNode] = self.E
elif(self.X[exposedNode]==self.Q_S):
self.X[exposedNode] = self.Q_E
# If num_new_exposure is dictionary of the form {"group_1": num_1, "group_2": num_2 , ... }
# then introduce num_i exposures to group_i
if not isinstance(num_new_exposures,dict):
num_new_exposures = {"all": num_new_exposures }
for group,num in num_new_exposures.items():
exposedNodes = numpy.random.choice(self.get_nodes(group), size=num, replace=False)
for exposedNode in exposedNodes:
if(self.X[exposedNode]==self.S):
self.X[exposedNode] = self.E
elif(self.X[exposedNode]==self.Q_S):
self.X[exposedNode] = self.Q_E


#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Expand Down
104 changes: 89 additions & 15 deletions seirsplus/sim_loops.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import numpy

import time

import random


def run_tti_sim(model, T,
Expand All @@ -17,7 +17,18 @@ def run_tti_sim(model, T,
isolation_compliance_positive_individual=[None], isolation_compliance_positive_groupmate=[None],
isolation_compliance_positive_contact=[None], isolation_compliance_positive_contactgroupmate=[None],
isolation_lag_symptomatic=1, isolation_lag_positive=1, isolation_lag_contact=0, isolation_groups=None,
cadence_testing_days=None, cadence_cycle_length=28, temporal_falseneg_rates=None
cadence_testing_days=None, cadence_cycle_length=28, temporal_falseneg_rates=None,
test_priority = 'random',
# test_priority: how to to choose which nodes to test:
# 'random' - use test budget for random fraction of eligible population, 'last_tested' - sort according to the time passed since testing (breaking ties randomly)
# if test_priority is callable then use as a key to sort nodes (lower value is higher priority)
history = None,
# history is a dictionary that, if provided, will be updated with history and summary information for logging
# it preferably should be OrderedDict if we want to preserve ordering of logs
stopping_policy=None,
# stopping_policy: function that takes as input the model and current history and decides whether to stop execution
# returns True to stop, False to continue running
verbose = True, # suppress printing if verbose is false - useful for running many simulations in parallel
):

#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Expand Down Expand Up @@ -75,23 +86,65 @@ def run_tti_sim(model, T,

model.tmax = T
running = True


def log(d):
# log values in dictionary d into history dict
#nonlocal history # uncomment for Python 3.x
#nonlocal model # uncomment for Python 3.x
if history is None: return #o/w assume it's a dictionary
if model.t in history:
history[model.t].update(d)
else:
history[model.t] = dict(d)

def vprint(s):
# print s if verbose is true
if verbose: print(s)


while running:

running = model.run_iteration()

if not (history is None): # log current state of the model
d = {}
statistics = ["numS","numE","numI_pre","numI_sym","numI_asym","numH","numR","numF","numQ_S","numQ_E","numQ_pre","numQ_sym","numQ_asym","numQ_R"]
for att in statistics:
d[att] = getattr(model,att)[model.tidx]
if (model.nodeGroupData):
for groupName in model.nodeGroupData:
groupData = model.nodeGroupData[groupName]
d[groupName+"/"+att] = groupData[att][model.tidx]
log(d)


if running and stopping_policy:
running = not stopping_policy(model,history)
if not running:
model.finalize_data_series()



#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Introduce exogenous exposures randomly:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(int(model.t)!=int(timeOfLastIntroduction)):

timeOfLastIntroduction = model.t

numNewExposures = numpy.random.poisson(lam=average_introductions_per_day)
if isinstance(average_introductions_per_day,dict):
numNewExposures = {}
for group,num in average_introductions_per_day.items():
numNewExposures[group] = numpy.random.poisson(lam=num)
else:
numNewExposures = numpy.random.poisson(lam=average_introductions_per_day)

model.introduce_exposures(num_new_exposures=numNewExposures)
log({"numNewExposures": numNewExposures})

if(numNewExposures > 0):
print("[NEW EXPOSURE @ t = %.2f (%d exposed)]" % (model.t, numNewExposures))
vprint("[NEW EXPOSURE @ t = %.2f (%d exposed)]" % (model.t, numNewExposures))

#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Execute testing policy at designated intervals:
Expand All @@ -105,14 +158,15 @@ def run_tti_sim(model, T,

currentNumInfected = model.total_num_infected()[model.tidx]
currentPctInfected = model.total_num_infected()[model.tidx]/model.numNodes
log({"currentNumInfected": currentNumInfected})

if(currentPctInfected >= intervention_start_pct_infected and not interventionOn):
interventionOn = True
interventionStartTime = model.t

if(interventionOn):

print("[INTERVENTIONS @ t = %.2f (%d (%.2f%%) infected)]" % (model.t, currentNumInfected, currentPctInfected*100))
vprint("[INTERVENTIONS @ t = %.2f (%d (%.2f%%) infected)]" % (model.t, currentNumInfected, currentPctInfected*100))

nodeStates = model.X.flatten()
nodeTestedStatuses = model.tested.flatten()
Expand Down Expand Up @@ -257,8 +311,15 @@ def run_tti_sim(model, T,
testingPool_degrees = model.degree.flatten()[testingPool]
testingPool_degreeWeights = numpy.power(testingPool_degrees,random_testing_degree_bias)/numpy.sum(numpy.power(testingPool_degrees,random_testing_degree_bias))

if(len(testingPool) > 0):
randomSelection = testingPool[numpy.random.choice(len(testingPool), numRandomTests, p=testingPool_degreeWeights, replace=False)]
poolSize = len(testingPool)
if(poolSize > 0):
if callable(test_priority):
randomSelection = sorted(testingPool, key=test_priority)[:numRandomTests]
elif test_priority == 'last_tested':
# sort the pool according to the time they were last tested, breaking ties randomly
randomSelection = sorted(testingPool,key = lambda i: (model.testedTime[i], random.randint(0,poolSize*poolSize)))[:numRandomTests]
else:
randomSelection = testingPool[numpy.random.choice(len(testingPool), numRandomTests, p=testingPool_degreeWeights, replace=False)]


#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Expand Down Expand Up @@ -372,14 +433,14 @@ def run_tti_sim(model, T,
tracingPoolQueue.append(newTracingPool)


print("\t"+str(numTested_symptomatic) +"\ttested due to symptoms [+ "+str(numPositive_symptomatic)+" positive (%.2f %%) +]" % (numPositive_symptomatic/numTested_symptomatic*100 if numTested_symptomatic>0 else 0))
print("\t"+str(numTested_tracing) +"\ttested as traces [+ "+str(numPositive_tracing)+" positive (%.2f %%) +]" % (numPositive_tracing/numTested_tracing*100 if numTested_tracing>0 else 0))
print("\t"+str(numTested_random) +"\ttested randomly [+ "+str(numPositive_random)+" positive (%.2f %%) +]" % (numPositive_random/numTested_random*100 if numTested_random>0 else 0))
print("\t"+str(numTested) +"\ttested TOTAL [+ "+str(numPositive)+" positive (%.2f %%) +]" % (numPositive/numTested*100 if numTested>0 else 0))
vprint("\t"+str(numTested_symptomatic) +"\ttested due to symptoms [+ "+str(numPositive_symptomatic)+" positive (%.2f %%) +]" % (numPositive_symptomatic/numTested_symptomatic*100 if numTested_symptomatic>0 else 0))
vprint("\t"+str(numTested_tracing) +"\ttested as traces [+ "+str(numPositive_tracing)+" positive (%.2f %%) +]" % (numPositive_tracing/numTested_tracing*100 if numTested_tracing>0 else 0))
vprint("\t"+str(numTested_random) +"\ttested randomly [+ "+str(numPositive_random)+" positive (%.2f %%) +]" % (numPositive_random/numTested_random*100 if numTested_random>0 else 0))
vprint("\t"+str(numTested) +"\ttested TOTAL [+ "+str(numPositive)+" positive (%.2f %%) +]" % (numPositive/numTested*100 if numTested>0 else 0))

print("\t"+str(numSelfIsolated_symptoms) +" will isolate due to symptoms ("+str(numSelfIsolated_symptomaticGroupmate)+" as groupmates of symptomatic)")
print("\t"+str(numPositive) +" will isolate due to positive test ("+str(numIsolated_positiveGroupmate)+" as groupmates of positive)")
print("\t"+str(numSelfIsolated_positiveContact) +" will isolate due to positive contact ("+str(numSelfIsolated_positiveContactGroupmate)+" as groupmates of contact)")
vprint("\t"+str(numSelfIsolated_symptoms) +" will isolate due to symptoms ("+str(numSelfIsolated_symptomaticGroupmate)+" as groupmates of symptomatic)")
vprint("\t"+str(numPositive) +" will isolate due to positive test ("+str(numIsolated_positiveGroupmate)+" as groupmates of positive)")
vprint("\t"+str(numSelfIsolated_positiveContact) +" will isolate due to positive contact ("+str(numSelfIsolated_positiveContactGroupmate)+" as groupmates of contact)")

#----------------------------------------
# Update the status of nodes who are to be isolated:
Expand All @@ -402,7 +463,20 @@ def run_tti_sim(model, T,
model.set_isolation(isolationNode, True)
numIsolated += 1

print("\t"+str(numIsolated)+" entered isolation")
vprint("\t"+str(numIsolated)+" entered isolation")
log({"numTested_symptomatic": numTested_symptomatic,
"numPositive_symptomatic" : numPositive_symptomatic,
"numTested_tracing" : numTested_tracing,
"numPositive_tracing" : numPositive_tracing,
"numTested" : numTested,
"numSelfIsolated_symptoms": numSelfIsolated_symptoms,
"numSelfIsolated_symptomaticGroupmate": numSelfIsolated_symptomaticGroupmate,
"numPositive" : numPositive,
"numIsolated_positiveGroupmate" : numIsolated_positiveGroupmate,
"numSelfIsolated_positiveContact" : numSelfIsolated_positiveContact,
"numIsolated" : numIsolated
})


#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Expand Down
38 changes: 38 additions & 0 deletions seirsplus/utilities.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import sys
import numpy
import matplotlib.pyplot as pyplot

Expand Down Expand Up @@ -61,6 +62,43 @@ def results_summary(model):
print("peak pct hospitalized: %0.2f%%" % (numpy.max(model.numH)/model.numNodes * 100) )


#########################################################################################################################################
# Logging packages - requires pandas

try:
import pandas as pd

def last(x):
"""Return last element of a pandas Series"""
return x.iloc[-1]


def hist2df(history):
"""Take history dictionary and return:
pandas DataFrame of all history
pandas Series of the summary of history, taking the last value and the sum, as well average over time (sum of scaled)"""
L = [{'time': t, **d} for t, d in history.items()]
tmax = L[-1]['time']
n = len(L)
df = pd.DataFrame(L)
df['interval_length'] = (df['time'] - df['time'].shift(1)).fillna(0)
temp = df.copy().fillna(0)
for col in df.columns:
if col == 'time': continue
temp[col + "/scaled"] = temp[col] * temp['interval_length'] / tmax
summary = temp.agg([last, numpy.sum])
summary = summary.stack()
summary.index = ['/'.join(reversed(col)).strip() for col in summary.index.values]
return df, summary

except ImportError:
print("Warning: pandas missing - some logging functions will not work", file=sys.stderr)
def last(x):
raise NotImplementedError("This function requires pandas to work")

def hist2df(history):
raise NotImplementedError("This function requires pandas to work")




Expand Down