IlyasMoutawwakil HF staff commited on
Commit
7724866
1 Parent(s): 6ea5c03

fix speedups and savings

Browse files
Files changed (3) hide show
  1. app.py +1 -1
  2. configs.py → config_store.py +0 -0
  3. run.py +44 -35
app.py CHANGED
@@ -5,7 +5,7 @@ from optimum_benchmark.task_utils import (
5
  infer_task_from_model_name_or_path,
6
  )
7
  from run import run_benchmark
8
- from configs import (
9
  get_training_config,
10
  get_inference_config,
11
  get_neural_compressor_config,
 
5
  infer_task_from_model_name_or_path,
6
  )
7
  from run import run_benchmark
8
+ from config_store import (
9
  get_training_config,
10
  get_inference_config,
11
  get_neural_compressor_config,
configs.py → config_store.py RENAMED
File without changes
run.py CHANGED
@@ -97,41 +97,7 @@ def run_benchmark(kwargs):
97
  table = pd.read_csv(f"runs/{experiment_name}/{benchmark}_results.csv", index_col=0)
98
  # concat tables
99
  table = pd.concat([baseline_table, table], axis=0)
100
- table["experiment_name"] = ["baseline", experiment_name]
101
- table = table.set_index("experiment_name")
102
- table.reset_index(inplace=True)
103
- # compute speedups
104
- if "forward.latency(s)" in table.columns:
105
- table["forward.latency.speedup(%)"] = (
106
- table["forward.latency(s)"] / table["forward.latency(s)"].iloc[0] - 1
107
- ) * 100
108
- table["forward.latency.speedup(%)"] = table["forward.latency.speedup(%)"].round(2)
109
- if "forward.throughput(samples/s)" in table.columns:
110
- table["forward.throughput.speedup(%)"] = (
111
- table["forward.throughput(samples/s)"] / table["forward.throughput(samples/s)"].iloc[0] - 1
112
- ) * 100
113
- table["forward.throughput.speedup(%)"] = table["forward.throughput.speedup(%)"].round(2)
114
- if "forward.peak_memory(MB)" in table.columns:
115
- table["forward.peak_memory.savings(%)"] = (
116
- table["forward.peak_memory(MB)"] / table["forward.peak_memory(MB)"].iloc[0] - 1
117
- ) * 100
118
- table["forward.peak_memory.savings(%)"] = table["forward.peak_memory.savings(%)"].round(2)
119
- if "generate.latency(s)" in table.columns:
120
- table["generate.latency.speedup(%)"] = (
121
- table["generate.latency(s)"] / table["generate.latency(s)"].iloc[0] - 1
122
- ) * 100
123
- table["generate.latency.speedup(%)"] = table["generate.latency.speedup(%)"].round(2)
124
- if "generate.throughput(tokens/s)" in table.columns:
125
- table["generate.throughput.speedup(%)"] = (
126
- table["generate.throughput(tokens/s)"] / table["generate.throughput(tokens/s)"].iloc[0] - 1
127
- ) * 100
128
- table["generate.throughput.speedup(%)"] = table["generate.throughput.speedup(%)"].round(2)
129
- if "generate.peak_memory(MB)" in table.columns:
130
- table["generate.peak_memory.savings(%)"] = (
131
- table["generate.peak_memory(MB)"] / table["generate.peak_memory(MB)"].iloc[0] - 1
132
- ) * 100
133
- table["generate.peak_memory.savings(%)"] = table["generate.peak_memory.savings(%)"].round(2)
134
-
135
  else:
136
  table = pd.read_csv(f"runs/{experiment_name}/{benchmark}_results.csv", index_col=0)
137
 
@@ -176,3 +142,46 @@ def run_experiment(args, html_text=""):
176
  yield gr.update(value=cumul_html_text), gr.update(interactive=False), gr.update(visible=False)
177
 
178
  return process.returncode, cumul_html_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  table = pd.read_csv(f"runs/{experiment_name}/{benchmark}_results.csv", index_col=0)
98
  # concat tables
99
  table = pd.concat([baseline_table, table], axis=0)
100
+ table = postprocess_table(table, experiment_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  else:
102
  table = pd.read_csv(f"runs/{experiment_name}/{benchmark}_results.csv", index_col=0)
103
 
 
142
  yield gr.update(value=cumul_html_text), gr.update(interactive=False), gr.update(visible=False)
143
 
144
  return process.returncode, cumul_html_text
145
+
146
+
147
+ def postprocess_table(table, experiment_name):
148
+ table["experiment_name"] = ["baseline", experiment_name]
149
+ table = table.set_index("experiment_name")
150
+ table.reset_index(inplace=True)
151
+ if "forward.latency(s)" in table.columns:
152
+ table["forward.latency.speedup(%)"] = (
153
+ 1 - table["forward.latency(s)"] / table["forward.latency(s)"].iloc[0]
154
+ ) * 100
155
+ table["forward.latency.speedup(%)"] = table["forward.latency.speedup(%)"].round(2)
156
+
157
+ if "forward.throughput(samples/s)" in table.columns:
158
+ table["forward.throughput.speedup(%)"] = (
159
+ table["forward.throughput(samples/s)"] / table["forward.throughput(samples/s)"].iloc[0] - 1
160
+ ) * 100
161
+ table["forward.throughput.speedup(%)"] = table["forward.throughput.speedup(%)"].round(2)
162
+
163
+ if "forward.peak_memory(MB)" in table.columns:
164
+ table["forward.peak_memory.savings(%)"] = (
165
+ 1 - table["forward.peak_memory(MB)"] / table["forward.peak_memory(MB)"].iloc[0]
166
+ ) * 100
167
+ table["forward.peak_memory.savings(%)"] = table["forward.peak_memory.savings(%)"].round(2)
168
+
169
+ if "generate.latency(s)" in table.columns:
170
+ table["generate.latency.speedup(%)"] = (
171
+ 1 - table["generate.latency(s)"] / table["generate.latency(s)"].iloc[0]
172
+ ) * 100
173
+ table["generate.latency.speedup(%)"] = table["generate.latency.speedup(%)"].round(2)
174
+
175
+ if "generate.throughput(tokens/s)" in table.columns:
176
+ table["generate.throughput.speedup(%)"] = (
177
+ table["generate.throughput(tokens/s)"] / table["generate.throughput(tokens/s)"].iloc[0] - 1
178
+ ) * 100
179
+ table["generate.throughput.speedup(%)"] = table["generate.throughput.speedup(%)"].round(2)
180
+
181
+ if "generate.peak_memory(MB)" in table.columns:
182
+ table["generate.peak_memory.savings(%)"] = (
183
+ 1 - table["generate.peak_memory(MB)"] / table["generate.peak_memory(MB)"].iloc[0]
184
+ ) * 100
185
+ table["generate.peak_memory.savings(%)"] = table["generate.peak_memory.savings(%)"].round(2)
186
+
187
+ return table