Skip to content

Commit a1de07b

Browse files
bobrenjc93facebook-github-bot
authored andcommitted
delete code that died a long time ago (#154802)
Summary: X-link: pytorch/pytorch#154802 Approved by: https://github.com/Skylion007 Reviewed By: izaitsevfb Differential Revision: D75767761 fbshipit-source-id: 86068e684b098f4823d62a3d4573a940069a62b6
1 parent 7efb806 commit a1de07b

File tree

1 file changed

+0
-79
lines changed

1 file changed

+0
-79
lines changed

userbenchmark/dynamo/dynamobench/common.py

Lines changed: 0 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -1018,9 +1018,6 @@ def speedup_experiment(args, model_iter_fn, model, example_inputs, **kwargs):
10181018
10191019
Writes to ./speedups.csv
10201020
"""
1021-
# if args.dynamic_shapes:
1022-
# return speedup_experiment_ds(args, model_iter_fn, model, example_inputs)
1023-
10241021
timings = np.zeros((args.repeat, 2), np.float64)
10251022
# if we randomize the input, we should also check the result is correct
10261023
should_randomize_input = args.randomize_input
@@ -1179,82 +1176,6 @@ def maybe_mark_profile(*args, **kwargs):
11791176
return msg
11801177

11811178

1182-
# WARNING: This code is currently dead
1183-
def speedup_experiment_ds(args, model_iter_fn, model, example_inputs):
1184-
"""
1185-
Run dynamic shapes benchmarks.
1186-
1187-
Requires dynamic shape compatible models, which provide a list of example inputs.
1188-
1189-
Warms up using the first input example and then iterates the inputs,
1190-
measuring (and expecting minimal) variance between the runtime for different examples.
1191-
1192-
"""
1193-
timings = np.zeros((args.repeat, len(example_inputs), 2), np.float64)
1194-
1195-
if args.repeat > 5:
1196-
print(
1197-
f"\ndynamic shapes experiments are slow, consider setting --repeat less than {args.repeat}\n"
1198-
)
1199-
1200-
nwarmup = 4
1201-
for rep in range(args.repeat):
1202-
# Start each rep fresh, e.g. only warmup on example 0
1203-
torch._dynamo.reset()
1204-
optimized_model_iter_fn = optimize_ctx(model_iter_fn)
1205-
for _ in range(nwarmup):
1206-
optimized_model_iter_fn(model, example_inputs[0])
1207-
1208-
for input_idx, inputs in enumerate(example_inputs):
1209-
# interleave the runs to handle frequency scaling and load changes
1210-
timings[rep, input_idx, 0] = timed(
1211-
model, model_iter_fn, inputs, return_result=False
1212-
)
1213-
# different from regular speedup_experiment, we _DO_ want to allow recompilation
1214-
timings[rep, input_idx, 1] = timed(
1215-
model, optimized_model_iter_fn, inputs, return_result=False
1216-
)
1217-
medians = np.median(timings, axis=0)
1218-
speedups = list(medians[:, 0] / medians[:, 1])
1219-
speedups_mean = np.mean(speedups)
1220-
speedups_median = np.median(speedups)
1221-
speedups_var = np.var(speedups)
1222-
1223-
# TODO this x[0] is not going to work in general but bert only has 1 input
1224-
shapes = [x[0].shape for x in example_inputs]
1225-
shape_keys = sorted(set(shapes))
1226-
shape_speedups = {
1227-
shape: [
1228-
it[1] for it in filter(lambda it: it[0] == shape, zip(shapes, speedups))
1229-
]
1230-
for shape in shape_keys
1231-
}
1232-
output_str = (
1233-
f"mean: {speedups_mean:.3f}, median: {speedups_median:.3f}, var: {speedups_var:.3f}"
1234-
+ "\nSpeedups by shape: "
1235-
+ "\n".join(
1236-
[
1237-
f"{shape}: "
1238-
+ ", ".join([f"{speedup: .3g}" for speedup in shape_speedups[shape]])
1239-
for shape in shape_keys
1240-
]
1241-
)
1242-
)
1243-
write_outputs(
1244-
output_filename,
1245-
("dev", "name", "batch_size", "speedup mean", "speedup median", "speedup var"),
1246-
[
1247-
current_device,
1248-
current_name,
1249-
current_batch_size,
1250-
speedups_mean,
1251-
speedups_median,
1252-
speedups_var,
1253-
],
1254-
)
1255-
return output_str
1256-
1257-
12581179
def overhead_experiment(*args, model_iter_fn):
12591180
"""
12601181
Measure overheads of TorchDynamo by running with no backend (only

0 commit comments

Comments
 (0)