def convert_to_pytorch_benchmark_format(
args: argparse.Namespace, metrics: dict[str, list], extra_info: dict[str, Any]
) -> list:
"""
Save the benchmark results in the format used by PyTorch OSS benchmark with
on metric per record
https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database
"""
records = []
if not os.environ.get("SAVE_TO_PYTORCH_BENCHMARK_FORMAT", False):
return records
for name, benchmark_values in metrics.items():
if not isinstance(benchmark_values, list):
raise TypeError(
f"benchmark_values for metric '{name}' must be a list, "
f"but got {type(benchmark_values).__name__}"
)
record = {
"benchmark": {
"name": "vLLM benchmark",
"extra_info": {
"args": vars(args),
"compilation_config.mode": extract_field(
args, extra_info, "compilation_config.mode"
),
"optimization_level": extract_field(
args, extra_info, "optimization_level"
),
# A boolean field used by vLLM benchmark HUD dashboard
"use_compile": use_compile(args, extra_info),
},
},
"model": {
"name": args.model,
},
"metric": {
"name": name,
"benchmark_values": benchmark_values,
"extra_info": extra_info,
},
}
tp = record["benchmark"]["extra_info"]["args"].get("tensor_parallel_size")
# Save tensor_parallel_size parameter if it's part of the metadata
if not tp and "tensor_parallel_size" in extra_info:
record["benchmark"]["extra_info"]["args"]["tensor_parallel_size"] = (
extra_info["tensor_parallel_size"]
)
records.append(record)
return records