|
7 | 7 | import tqdm
|
8 | 8 |
|
9 | 9 | from lib.dataset_generation import assemble_dataset_from_gpickle
|
10 |
| -from solvers.pCQO_MIS import pCQOMIS |
11 |
| -from solvers.pCQO_MIS_GD import pCQOMIS_GD |
12 |
| -from solvers.CPSAT_MIS import CPSATMIS |
13 |
| -from solvers.Gurobi_MIS import GurobiMIS |
14 |
| -from solvers.KaMIS import ReduMIS |
15 |
| -from solvers.dNN_Alkhouri_MIS import DNNMIS |
| 10 | +from solvers.pCQO_MIS import pCQOMIS_MGD |
| 11 | +# from solvers.CPSAT_MIS import CPSATMIS |
| 12 | +# from solvers.Gurobi_MIS import GurobiMIS |
| 13 | +# from solvers.KaMIS import ReduMIS |
| 14 | +# from solvers.dNN_Alkhouri_MIS import DNNMIS |
16 | 15 |
|
17 | 16 | logger = logging.getLogger(__name__)
|
18 |
| -logging.basicConfig(filename='benchmark.log', level=logging.INFO, style="{") |
| 17 | +logging.basicConfig(filename='benchmark.log', level=logging.ERROR, style="{") |
19 | 18 |
|
20 | 19 | # Interval for saving solution checkpoints
|
21 | 20 | SOLUTION_SAVE_INTERVAL = 2
|
|
25 | 24 | # List of directories containing graph data
|
26 | 25 | graph_directories = [
|
27 | 26 | ### ER 700-800 Graphs ###
|
28 |
| - # "./graphs/er_700-800" |
| 27 | + "./graphs/er_700-800" |
| 28 | + ### ER 9000-11000 Graphs ### |
| 29 | + # "./graphs/er_9000_11000" |
29 | 30 | ### GNM 300 Convergence Graphs ###
|
30 | 31 | # "./graphs/gnm_random_graph_convergence",
|
31 | 32 | ### SATLIB Graphs ###
|
32 |
| - "./graphs/satlib/m403", |
33 |
| - "./graphs/satlib/m411", |
34 |
| - "./graphs/satlib/m418", |
35 |
| - "./graphs/satlib/m423", |
36 |
| - "./graphs/satlib/m429", |
37 |
| - "./graphs/satlib/m435", |
38 |
| - "./graphs/satlib/m441", |
39 |
| - "./graphs/satlib/m449", |
| 33 | + # "./graphs/satlib/m403", |
| 34 | + # "./graphs/satlib/m411", |
| 35 | + # "./graphs/satlib/m418", |
| 36 | + # "./graphs/satlib/m423", |
| 37 | + # "./graphs/satlib/m429", |
| 38 | + # "./graphs/satlib/m435", |
| 39 | + # "./graphs/satlib/m441", |
| 40 | + # "./graphs/satlib/m449", |
40 | 41 | ### ER density test Graphs ###
|
41 | 42 | # "./graphs/er_05",
|
42 | 43 | # "./graphs/er_10",
|
|
53 | 54 | base_solvers = [
|
54 | 55 | # {"name": "Gurobi", "class": GurobiMIS, "params": {"time_limit": 30}},
|
55 | 56 | # {"name": "CPSAT", "class": CPSATMIS, "params": {"time_limit": 30}},
|
56 |
| - # {"name": "ReduMIS", "class": ReduMIS, "params": {}}, |
57 |
| - # { |
58 |
| - # "name": "pCQO_MIS ER", |
59 |
| - # "class": pCQOMIS, |
60 |
| - # "params": { |
61 |
| - # "adam_beta_1": 0.1, |
62 |
| - # "adam_beta_2": 0.25, |
63 |
| - # "learning_rate": 0.6, |
64 |
| - # "number_of_steps": 9900, |
65 |
| - # "gamma": 775, |
66 |
| - # "batch_size": 256, |
67 |
| - # "std": 2.25, |
68 |
| - # "threshold": 0.00, |
69 |
| - # "steps_per_batch": 50, |
70 |
| - # "graphs_per_optimizer": 256, |
71 |
| - # "output_interval": 9900, |
72 |
| - # "value_initializer": "degree" |
73 |
| - # }, |
74 |
| - # }, |
75 |
| - # { |
76 |
| - # "name": "pCQO_MIS ER GD", |
77 |
| - # "class": pCQOMIS_GD, |
78 |
| - # "params": { |
79 |
| - # "learning_rate": 0.000009, |
80 |
| - # "momentum": 0.9, |
81 |
| - # "number_of_steps": 9000, |
82 |
| - # "gamma": 350, |
83 |
| - # "beta": 7, |
84 |
| - # "batch_size": 256, |
85 |
| - # "std": 2.25, |
86 |
| - # "threshold": 0.00, |
87 |
| - # "steps_per_batch": 450, |
88 |
| - # "output_interval": 25000, |
89 |
| - # "value_initializer": "degree" |
90 |
| - # }, |
91 |
| - # }, |
| 57 | + # {"name": "ReduMIS", "class": ReduMIS, "params": {"time_limit":30}}, |
| 58 | + { |
| 59 | + "name": "pCQO_MIS ER 700-800 MGD", |
| 60 | + "class": pCQOMIS_MGD, |
| 61 | + "params": { |
| 62 | + "learning_rate": 0.000009, |
| 63 | + "momentum": 0.9, |
| 64 | + "number_of_steps": 225000, |
| 65 | + "gamma": 350, |
| 66 | + "gamma_prime": 7, |
| 67 | + "batch_size": 256, |
| 68 | + "std": 2.25, |
| 69 | + "threshold": 0.00, |
| 70 | + "steps_per_batch": 450, |
| 71 | + "output_interval": 225002, |
| 72 | + "value_initializer": "degree", |
| 73 | + "checkpoints": [450] + list(range(4500, 225001, 4500)) |
| 74 | + }, |
| 75 | + }, |
92 | 76 | # Uncomment and configure the following solver for SATLIB datasets if needed
|
93 | 77 | # {
|
94 |
| - # "name": "pCQO_MIS SATLIB", |
95 |
| - # "class": pCQOMIS, |
| 78 | + # "name": "pCQO_MIS SATLIB MGD", |
| 79 | + # "class": pCQOMIS_MGD, |
96 | 80 | # "params": {
|
97 |
| - # "adam_beta_1": 0.9, |
98 |
| - # "adam_beta_2": 0.99, |
99 |
| - # "learning_rate": 0.9, |
| 81 | + # "learning_rate": 0.0003, |
| 82 | + # "momentum": 0.875, |
100 | 83 | # "number_of_steps": 3000,
|
101 |
| - # "gamma": 775, |
102 |
| - # "batch_size": 256, |
103 |
| - # "std": 2.25, |
104 |
| - # "threshold": 0.00, |
105 |
| - # "steps_per_batch": 50, |
106 |
| - # "output_interval": 10000, |
107 |
| - # "value_initializer": "degree" |
108 |
| - # }, |
109 |
| - # } |
110 |
| - # { |
111 |
| - # "name": "pCQO_MIS SATLIB modified", |
112 |
| - # "class": pCQOMIS, |
113 |
| - # "params": { |
114 |
| - # "adam_beta_1": 0.9, |
115 |
| - # "adam_beta_2": 0.99, |
116 |
| - # "learning_rate": 0.9, |
117 |
| - # "number_of_steps": 1500, |
118 |
| - # "gamma": 775, |
| 84 | + # "gamma": 900, |
| 85 | + # "gamma_prime": 1, |
119 | 86 | # "batch_size": 256,
|
120 | 87 | # "std": 2.25,
|
121 | 88 | # "threshold": 0.00,
|
122 |
| - # "steps_per_batch": 50, |
| 89 | + # "steps_per_batch": 30, |
123 | 90 | # "output_interval": 10000,
|
124 |
| - # "value_initializer": "degree" |
| 91 | + # "value_initializer": "degree", |
| 92 | + # "number_of_terms": "three", |
| 93 | + # "sample_previous_batch_best": True, |
| 94 | + # "checkpoints": [30] + list(range(300,3300,300)), |
125 | 95 | # },
|
126 | 96 | # }
|
127 |
| - { |
128 |
| - "name": "pCQO_MIS SATLIB GD", |
129 |
| - "class": pCQOMIS_GD, |
130 |
| - "params": { |
131 |
| - "learning_rate": 0.0003, |
132 |
| - "momentum": 0.875, |
133 |
| - "number_of_steps": 3000, |
134 |
| - "gamma": 900, |
135 |
| - "batch_size": 256, |
136 |
| - "std": 2.25, |
137 |
| - "threshold": 0.00, |
138 |
| - "steps_per_batch": 50, |
139 |
| - "output_interval": 10000, |
140 |
| - "value_initializer": "degree" |
141 |
| - }, |
142 |
| - } |
143 | 97 | ]
|
144 | 98 |
|
145 |
| -# solvers = base_solvers |
| 99 | +solvers = base_solvers |
146 | 100 |
|
147 | 101 | ## Grid Search (Commented Out)
|
148 | 102 | # Uncomment and configure the following section for hyperparameter tuning
|
|
151 | 105 | # for learning_rate in [0.0003]:
|
152 | 106 | # for momentum in [0.875]:
|
153 | 107 | # for steps_per_batch in [30]:
|
154 |
| -# for gamma_beta in [(900, 1)]: |
| 108 | +# for gamma_gamma_prime in [(900, 1)]: |
155 | 109 | # for batch_size in [256]:
|
156 | 110 | # for terms in ["three"]:
|
157 | 111 | # modified_solver = deepcopy(solver)
|
158 | 112 | # modified_solver["name"] = (
|
159 |
| -# f"{modified_solver['name']} batch_size={batch_size}, learning_rate={learning_rate}, momentum={momentum}, steps_per_batch={steps_per_batch}, gamma={gamma_beta[0]}, beta={gamma_beta[1]}, terms={terms}" |
| 113 | +# f"{modified_solver['name']} batch_size={batch_size}, learning_rate={learning_rate}, momentum={momentum}, steps_per_batch={steps_per_batch}, gamma={gamma_gamma_prime[0]}, gamma_prime={gamma_gamma_prime[1]}, terms={terms}" |
160 | 114 | # )
|
161 | 115 | # modified_solver["params"]["learning_rate"] = learning_rate
|
162 | 116 | # modified_solver["params"]["momentum"] = momentum
|
163 | 117 | # modified_solver["params"]["steps_per_batch"] = steps_per_batch
|
164 |
| -# modified_solver["params"]["gamma"] = gamma_beta[0] |
165 |
| -# modified_solver["params"]["beta"] = gamma_beta[1] |
| 118 | +# modified_solver["params"]["gamma"] = gamma_gamma_prime[0] |
| 119 | +# modified_solver["params"]["gamma_prime"] = gamma_gamma_prime[1] |
166 | 120 | # modified_solver["params"]["number_of_terms"] = terms
|
167 | 121 | # modified_solver["params"]["batch_size"] = batch_size
|
168 | 122 | # solvers.append(modified_solver)
|
169 | 123 |
|
170 |
| -# solvers = [] |
171 |
| -# for solver in base_solvers: |
172 |
| -# for terms in ["two","three"]: |
173 |
| -# modified_solver = deepcopy(solver) |
174 |
| -# modified_solver["name"] = ( |
175 |
| -# f"{modified_solver['name']} terms={terms}" |
176 |
| -# ) |
177 |
| -# modified_solver["params"]["number_of_terms"] = terms |
178 |
| -# solvers.append(modified_solver) |
179 |
| - |
180 | 124 |
|
181 | 125 | #### SOLUTION OUTPUT FUNCTION ####
|
182 | 126 | def table_output(solutions, datasets, current_stage, total_stages):
|
@@ -226,35 +170,37 @@ def table_output(solutions, datasets, current_stage, total_stages):
|
226 | 170 |
|
227 | 171 | #### BENCHMARKING CODE ####
|
228 | 172 | solutions = []
|
| 173 | +path_solutions = [] |
229 | 174 |
|
230 | 175 | # Calculate total number of stages
|
231 | 176 | stage = 0
|
232 | 177 | stages = len(solvers) * len(dataset)
|
233 | 178 |
|
234 |
| -# Optional: Load SDP initializer if needed |
235 |
| -# initializations = pickle.load(open("./solutions/SDP/SDP_Generation_SATLIB", "rb")) |
236 |
| - |
237 | 179 | # Iterate over each graph in the dataset
|
238 | 180 | for graph in tqdm.tqdm(dataset, desc=" Iterating Through Graphs", position=0):
|
239 | 181 | for solver in tqdm.tqdm(solvers, desc=" Iterating Solvers for Each Graph"):
|
240 | 182 | solver_instance = solver["class"](graph["data"], solver["params"])
|
241 | 183 |
|
242 |
| - # Optional: Apply SDP-based initializer if needed |
243 |
| - # solver_instance.value_initializer = lambda _: torch.normal( |
244 |
| - # mean=initializations[graph["name"]]["SDP_solution"], |
245 |
| - # std=torch.sqrt(torch.ones((len(initializations[graph["name"]]["SDP_solution"])))) * solver["params"]["std"] |
246 |
| - # ) |
247 |
| - |
248 | 184 | # Solve the problem using the current solver
|
249 | 185 | solver_instance.solve()
|
250 |
| - solution = { |
251 |
| - "solution_method": solver["name"], |
252 |
| - "dataset_name": graph["name"], |
253 |
| - "data": deepcopy(solver_instance.solution), |
254 |
| - "time_taken": deepcopy(solver_instance.solution_time), |
255 |
| - } |
256 |
| - logging.info("CSV: %s, %s, %s", graph['name'], solution['data']['size'], solution['time_taken']) |
257 |
| - solutions.append(solution) |
| 186 | + if hasattr(solver_instance, "solutions") and len(solver_instance.solutions) > 0: |
| 187 | + for solution in solver_instance.solutions: |
| 188 | + pretty_solution = { |
| 189 | + "solution_method": f"{solver['name']} at step {solution['number_of_steps']}", |
| 190 | + "dataset_name": graph["name"], |
| 191 | + "data": deepcopy(solution), |
| 192 | + "time_taken": deepcopy(solution["time"]), |
| 193 | + } |
| 194 | + solutions.append(pretty_solution) |
| 195 | + else: |
| 196 | + solution = { |
| 197 | + "solution_method": solver["name"], |
| 198 | + "dataset_name": graph["name"], |
| 199 | + "data": deepcopy(solver_instance.solution), |
| 200 | + "time_taken": deepcopy(solver_instance.solution_time), |
| 201 | + } |
| 202 | + logging.info("CSV: %s, %s, %s", graph['name'], solution['data']['size'], solution['time_taken']) |
| 203 | + solutions.append(solution) |
258 | 204 | del solver_instance
|
259 | 205 |
|
260 | 206 | # Update progress and save checkpoint if necessary
|
|
0 commit comments