Skip to content
40 changes: 31 additions & 9 deletions graph_net/fault_locator/torch/compiler_evaluator.py
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

不要改这个文件。你新写你的代码

Original file line number Diff line number Diff line change
Expand Up @@ -51,21 +51,43 @@ def _prepare_workspace(self, tmp_dir: Path, rel_model_path: str) -> Path:

def _execute_benchmark(self, allow_list_path: Path, log_file: Path):
"""
Invokes the torch.test_compiler module and redirects output to a log file.
Invokes the torch.eval_backend_diff module and redirects output to a log file.
Uses sys.executable to ensure the same Python environment is used.
"""
import base64
import json

ref_config = {
"backend_config": {
"compiler": "nope",
"device": self.config["device"],
"model_path_prefix": self.config["model_path_prefix"],
}
}

target_config = {
"backend_config": {
"compiler": self.config["compiler"],
"device": self.config["device"],
"model_path_prefix": self.config["model_path_prefix"],
}
}

ref_config_b64 = base64.b64encode(json.dumps(ref_config).encode()).decode()
target_config_b64 = base64.b64encode(
json.dumps(target_config).encode()
).decode()

cmd = [
sys.executable,
"-m",
"graph_net_bench.torch.test_compiler",
"--model-path-prefix",
f"{self.config['model_path_prefix']}/",
"--allow-list",
"graph_net_bench.torch.eval_backend_diff",
"--model-path-list",
str(allow_list_path),
"--compiler",
self.config["compiler"],
"--device",
self.config["device"],
"--reference-config",
ref_config_b64,
"--target-config",
target_config_b64,
]
print(" ".join(cmd))
with log_file.open("w") as f:
Expand Down
44 changes: 31 additions & 13 deletions graph_net_bench/torch/eval_backend_diff.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def eval_multi_models(args, model_path_prefix=None, use_model_list=False):
failed_samples = []
for sample_idx, model_path in enumerate(model_paths):
print(
f"[{sample_idx}] {module_name}, model_path: {model_path}",
f"[{sample_idx}][Processing] {module_name}, model_path: {model_path}",
file=sys.stderr,
flush=True,
)
Expand Down Expand Up @@ -189,20 +189,37 @@ def eval_multi_models(args, model_path_prefix=None, use_model_list=False):
print(f"- {model_path}", file=sys.stderr, flush=True)


def _build_args_for_perf(
config: dict, model_path: str, output_path: str
) -> types.SimpleNamespace:
"""
TODO: Remove or modify this function for args building
when finish refactoring eval_backend_perf to new runner-backend structure.
"""
backend_config = config.get("backend_config", {})
return types.SimpleNamespace(
model_path=model_path,
output_path=output_path,
compiler=backend_config.get("compiler", ""),
device=backend_config.get("device", "cuda"),
seed=backend_config.get("seed", 123),
warmup=config.get("warmup", 3),
trials=config.get("trials", 5),
log_prompt=backend_config.get("log_prompt", "graph-net-bench-log"),
model_path_prefix=backend_config.get("model_path_prefix"),
backend_config=backend_config.get("backend_config"),
)


def eval_single_model(args):
ref_config = test_compiler_util.convert_to_dict(args.reference_config)
target_config = test_compiler_util.convert_to_dict(args.target_config)

ref_dir = "/tmp/eval_perf_diff/reference"
target_dir = "/tmp/eval_perf_diff/target"

ref_args = types.SimpleNamespace(
model_path=args.model_path,
output_path=ref_dir,
**test_compiler_util.convert_to_dict(args.reference_config),
)
target_args = types.SimpleNamespace(
model_path=args.model_path,
output_path=target_dir,
**test_compiler_util.convert_to_dict(args.target_config),
)
ref_args = _build_args_for_perf(ref_config, args.model_path, ref_dir)
target_args = _build_args_for_perf(target_config, args.model_path, target_dir)

eval_single_model_with_single_backend(ref_args)
eval_single_model_with_single_backend(target_args)
Expand Down Expand Up @@ -230,11 +247,12 @@ def eval_single_model(args):

def main(args):
ref_config = test_compiler_util.convert_to_dict(args.reference_config)
model_path_prefix = ref_config.get("model_path_prefix")
backend_config = ref_config.get("backend_config", {})
model_path_prefix = backend_config.get("model_path_prefix")

if args.model_path_list and model_path_prefix:
eval_multi_models(args, model_path_prefix, use_model_list=True)
elif os.path.isdir(args.model_path):
elif args.model_path and os.path.isdir(args.model_path):
if path_utils.is_single_model_dir(args.model_path):
eval_single_model(args)
else:
Expand Down
26 changes: 20 additions & 6 deletions test/eval_backend_diff_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,21 +10,35 @@ python3 -m graph_net_bench.torch.eval_backend_diff \
--model-path-list $model_list \
--reference-config $(base64 -w 0 <<EOF
{
"compiler": "nope",
"device": "cuda",
"runner_type": "local",
"backend_path": "graph_net_bench.torch.backend.nope_backend",
"backend_class": "NopeBackend",
"warmup": 1,
"trials": 1,
"model_path_prefix": "$AI4C_ROOT"
"backend_config": {
"compiler": "nope",
"device": "cuda",
"seed": 123,
"log_prompt": "graph-net-bench-log",
"model_path_prefix": "$AI4C_ROOT"
}
}
EOF
) \
--target-config $(base64 -w 0 <<EOF
{
"compiler": "nope",
"device": "cuda",
"runner_type": "local",
"backend_path": "graph_net_bench.torch.backend.nope_backend",
"backend_class": "NopeBackend",
"warmup": 1,
"trials": 1,
"model_path_prefix": "$AI4C_ROOT"
"backend_config": {
"compiler": "nope",
"device": "cuda",
"seed": 123,
"log_prompt": "graph-net-bench-log",
"model_path_prefix": "$AI4C_ROOT"
}
}
EOF
) 2>&1 | tee "$OUTPUT_PATH/validation.log"