-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathMC.py
147 lines (121 loc) · 3.98 KB
/
MC.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
import numpy as np
import math
import random
from matplotlib import pyplot as plt
from IPython.display import clear_output
PI = 3.1415926
e = 2.71828
def get_rand_number(min_value, max_value):
"""
This function gets a random number from a uniform distribution between
the two input values [min_value, max_value] inclusively
Args:
- min_value (float)
- max_value (float)
Return:
- Random number between this range (float)
"""
range = max_value - min_value
choice = random.uniform(0,1)
return min_value + range*choice
def f_of_x(x):
"""
This is the main function we want to integrate over.
Args:
- x (float) : input to function; must be in radians
Return:
- output of function f(x) (float)
"""
return (e**(-1*x))/(1+(x-1)**2)
def crude_monte_carlo(num_samples=5000):
"""
This function performs the Crude Monte Carlo for our
specific function f(x) on the range x=0 to x=5.
Notice that this bound is sufficient because f(x)
approaches 0 at around PI.
Args:
- num_samples (float) : number of samples
Return:
- Crude Monte Carlo estimation (float)
"""
lower_bound = 0
upper_bound = 5
sum_of_samples = 0
for i in range(num_samples):
x = get_rand_number(lower_bound, upper_bound)
sum_of_samples += f_of_x(x)
return (upper_bound - lower_bound) * float(sum_of_samples/num_samples)
def get_crude_MC_variance(num_samples):
"""
This function returns the variance fo the Crude Monte Carlo.
Note that the inputed number of samples does not neccissarily
need to correspond to number of samples used in the Monte
Carlo Simulation.
Args:
- num_samples (int)
Return:
- Variance for Crude Monte Carlo approximation of f(x) (float)
"""
int_max = 5 # this is the max of our integration range
# get the average of squares
running_total = 0
for i in range(num_samples):
x = get_rand_number(0, int_max)
running_total += f_of_x(x)**2
sum_of_sqs = running_total*int_max / num_samples
# get square of average
running_total = 0
for i in range(num_samples):
x = get_rand_number(0, int_max)
running_total = f_of_x(x)
sq_ave = (int_max*running_total/num_samples)**2
return sum_of_sqs - sq_ave
# visualization
xs = [float(i/50) for i in range(int(50*PI*2))]
ys = [f_of_x(x) for x in xs]
plt.plot(xs,ys)
plt.title("f(x)");
# this is the template of our weight function g(x)
def g_of_x(x, A, lamda):
e = 2.71828
return A*math.pow(e, -1*lamda*x)
def inverse_G_of_r(r, lamda):
return (-1 * math.log(float(r)))/lamda
def get_IS_variance(lamda, num_samples):
"""
This function calculates the variance if a Monte Carlo
using importance sampling.
Args:
- lamda (float) : lamdba value of g(x) being tested
Return:
- Variance
"""
A = lamda
int_max = 5
# get sum of squares
running_total = 0
for i in range(num_samples):
x = get_rand_number(0, int_max)
running_total += (f_of_x(x)/g_of_x(x, A, lamda))**2
sum_of_sqs = running_total / num_samples
# get squared average
running_total = 0
for i in range(num_samples):
x = get_rand_number(0, int_max)
running_total += f_of_x(x)/g_of_x(x, A, lamda)
sq_ave = (running_total/num_samples)**2
return sum_of_sqs - sq_ave
# get variance as a function of lambda by testing many
# different lambdas
test_lamdas = [i*0.05 for i in range(1, 61)]
variances = []
for i, lamda in enumerate(test_lamdas):
print(f"lambda {i+1}/{len(test_lamdas)}: {lamda}")
A = lamda
variances.append(get_IS_variance(lamda, 10000))
clear_output(wait=True)
optimal_lamda = test_lamdas[np.argmin(np.asarray(variances))]
IS_variance = variances[np.argmin(np.asarray(variances))]
print(f"Optimal Lambda: {optimal_lamda}")
print(f"Optimal Variance: {IS_variance}")
print(f"Error: {(IS_variance/10000)**0.5}")