1941 lines
66 KiB
Python
1941 lines
66 KiB
Python
#!/usr/bin/env python3
|
||
"""SLWChipVerify:面向小型 Verilog 工程的轻量级 RTL 验证 CLI。
|
||
|
||
当前支持两类规格:
|
||
1) truth_table:组合逻辑真值表验证
|
||
2) sequential:基于周期采样的时序验证(含 clk/rst)
|
||
"""
|
||
|
||
from __future__ import annotations
|
||
|
||
import argparse
|
||
import csv
|
||
import json
|
||
import re
|
||
import shutil
|
||
import subprocess
|
||
import sys
|
||
import tempfile
|
||
import xml.etree.ElementTree as ET
|
||
from dataclasses import dataclass
|
||
from datetime import datetime
|
||
from pathlib import Path
|
||
from typing import Any
|
||
|
||
TOOL_NAME = "SLWChipVerify"
|
||
LOG_PREFIX = f"[{TOOL_NAME}]"
|
||
TB_TOP = "__slwchipverify_tb"
|
||
MAX_EXACT_COVERAGE_BITS = 12
|
||
|
||
|
||
class ChipVerifyError(Exception):
|
||
"""SLWChipVerify 的统一异常类型。"""
|
||
|
||
|
||
@dataclass
|
||
class Signal:
|
||
"""逻辑信号定义:名称与位宽。"""
|
||
|
||
name: str
|
||
width: int = 1
|
||
|
||
|
||
@dataclass
|
||
class Case:
|
||
"""组合逻辑测试用例:输入向量与期望输出。"""
|
||
|
||
name: str
|
||
input_values: dict[str, int]
|
||
output_values: dict[str, int]
|
||
|
||
|
||
@dataclass
|
||
class ObserveSignal:
|
||
"""时序观测信号:名称、表达式与位宽。"""
|
||
|
||
name: str
|
||
expr: str
|
||
width: int = 1
|
||
|
||
|
||
@dataclass
|
||
class SeqExpectation:
|
||
"""断言中的单个期望条件。"""
|
||
|
||
signal: str
|
||
op: str
|
||
value: int
|
||
width: int
|
||
|
||
|
||
@dataclass
|
||
class SeqAssertion:
|
||
"""时序断言:支持单周期与窗口模式。"""
|
||
|
||
name: str
|
||
cycle: int
|
||
expectations: list[SeqExpectation]
|
||
mode: str = "cycle"
|
||
within: int = 0
|
||
until_expectations: list[SeqExpectation] | None = None
|
||
|
||
|
||
@dataclass
|
||
class SeqStimulusEvent:
|
||
"""时序激励事件:在指定周期写入一组信号值。"""
|
||
|
||
cycle: int
|
||
values: dict[str, int]
|
||
|
||
|
||
@dataclass
|
||
class ClockConfig:
|
||
"""时钟配置。"""
|
||
|
||
name: str
|
||
period_ns: int
|
||
initial: int
|
||
|
||
|
||
@dataclass
|
||
class ResetConfig:
|
||
"""复位配置。"""
|
||
|
||
name: str
|
||
active: int
|
||
cycles: int
|
||
|
||
|
||
@dataclass
|
||
class AutoStimulusConfig:
|
||
"""自动激励配置:是否启用、起始周期、模式与随机种子。"""
|
||
|
||
enabled: bool
|
||
start_cycle: int
|
||
mode: str
|
||
seed: int
|
||
|
||
|
||
@dataclass
|
||
class SequentialSpec:
|
||
"""完整的时序验证规格数据结构。"""
|
||
|
||
clock: ClockConfig
|
||
reset: ResetConfig | None
|
||
inputs: list[Signal]
|
||
outputs: list[Signal]
|
||
observes: list[ObserveSignal]
|
||
init_values: dict[str, int]
|
||
stimulus: list[SeqStimulusEvent]
|
||
assertions: list[SeqAssertion]
|
||
max_cycles: int
|
||
auto_stimulus: AutoStimulusConfig
|
||
|
||
def signal_widths(self) -> dict[str, int]:
|
||
"""返回可采样信号到位宽的映射。"""
|
||
|
||
mapping: dict[str, int] = {self.clock.name: 1}
|
||
if self.reset is not None:
|
||
mapping[self.reset.name] = 1
|
||
for sig in self.inputs:
|
||
mapping[sig.name] = sig.width
|
||
for sig in self.outputs:
|
||
mapping[sig.name] = sig.width
|
||
for obs in self.observes:
|
||
mapping[obs.name] = obs.width
|
||
return mapping
|
||
|
||
def coverage_signal_widths(self) -> dict[str, int]:
|
||
"""返回用于覆盖率统计的信号位宽映射。"""
|
||
|
||
return self.signal_widths()
|
||
|
||
|
||
def parse_args() -> argparse.Namespace:
|
||
"""解析命令行参数。"""
|
||
|
||
parser = argparse.ArgumentParser(
|
||
prog="slwchipverify",
|
||
description=(
|
||
"SLWChipVerify:Verilog 组合/时序验证工具。"
|
||
),
|
||
)
|
||
sub = parser.add_subparsers(dest="command", required=True)
|
||
|
||
run = sub.add_parser("run", help="Run verification from a JSON spec")
|
||
run.add_argument(
|
||
"-d",
|
||
"--design",
|
||
action="append",
|
||
required=True,
|
||
help="Verilog source file (can be provided multiple times)",
|
||
)
|
||
run.add_argument("--top", required=True, help="DUT top module name")
|
||
run.add_argument("--spec", required=True, help="Path to verification spec JSON")
|
||
run.add_argument(
|
||
"--workdir",
|
||
default=".slwchipverify_build",
|
||
help="Build directory used when --keep is set",
|
||
)
|
||
run.add_argument(
|
||
"--report",
|
||
help="Optional output path for JSON report (default: no JSON file)",
|
||
)
|
||
run.add_argument(
|
||
"--junit",
|
||
help="Optional output path for JUnit XML report",
|
||
)
|
||
run.add_argument(
|
||
"--csv",
|
||
help="Optional output path for CSV report",
|
||
)
|
||
run.add_argument("--iverilog", default="iverilog", help="iverilog executable path")
|
||
run.add_argument("--vvp", default="vvp", help="vvp executable path")
|
||
run.add_argument(
|
||
"--timeout",
|
||
type=int,
|
||
default=30,
|
||
help="Simulation timeout in seconds",
|
||
)
|
||
run.add_argument(
|
||
"--dump-vcd",
|
||
help="Optional waveform path relative to workdir, e.g. run.vcd",
|
||
)
|
||
run.add_argument(
|
||
"--keep",
|
||
action="store_true",
|
||
help="Keep intermediate files in --workdir",
|
||
)
|
||
|
||
return parser.parse_args()
|
||
|
||
|
||
def _parse_signal_item(item: Any, field_name: str) -> Signal:
|
||
"""解析输入/输出信号定义项。"""
|
||
|
||
if isinstance(item, str):
|
||
return Signal(name=item, width=1)
|
||
|
||
if isinstance(item, dict):
|
||
name = item.get("name")
|
||
width = item.get("width", 1)
|
||
if not isinstance(name, str) or not name:
|
||
raise ChipVerifyError(f"{field_name} signal name must be a non-empty string")
|
||
if not isinstance(width, int) or width <= 0:
|
||
raise ChipVerifyError(f"{field_name} signal {name!r} width must be positive int")
|
||
return Signal(name=name, width=width)
|
||
|
||
raise ChipVerifyError(
|
||
f"Invalid {field_name} signal entry: expected string or object, got {type(item)}"
|
||
)
|
||
|
||
|
||
def _parse_observe_item(item: Any) -> ObserveSignal:
|
||
"""解析 observes 数组中的单项配置。"""
|
||
|
||
if not isinstance(item, dict):
|
||
raise ChipVerifyError("Observe entry must be an object")
|
||
|
||
name = item.get("name")
|
||
expr = item.get("expr")
|
||
width = item.get("width", 1)
|
||
|
||
if not isinstance(name, str) or not name:
|
||
raise ChipVerifyError("Observe signal name must be a non-empty string")
|
||
if not isinstance(expr, str) or not expr:
|
||
raise ChipVerifyError(f"Observe signal {name!r} must provide non-empty expr")
|
||
if not isinstance(width, int) or width <= 0:
|
||
raise ChipVerifyError(f"Observe signal {name!r} width must be positive int")
|
||
|
||
return ObserveSignal(name=name, expr=expr, width=width)
|
||
|
||
|
||
def _parse_verilog_literal(value: str) -> int | None:
|
||
"""解析 Verilog 字面量(如 8'h1f),失败时返回 None。"""
|
||
|
||
m = re.fullmatch(r"(\d+)'([bBdDhHoO])([0-9a-fA-F_]+)", value)
|
||
if not m:
|
||
return None
|
||
|
||
_width_txt, base_ch, digits = m.groups()
|
||
base_map = {"b": 2, "d": 10, "h": 16, "o": 8}
|
||
base = base_map[base_ch.lower()]
|
||
digits = digits.replace("_", "")
|
||
return int(digits, base)
|
||
|
||
|
||
def parse_value(raw_value: Any, width: int, signal_name: str) -> int:
|
||
"""把规格中的值解析为整数并校验位宽范围。"""
|
||
|
||
if isinstance(raw_value, bool):
|
||
value = int(raw_value)
|
||
elif isinstance(raw_value, int):
|
||
value = raw_value
|
||
elif isinstance(raw_value, str):
|
||
txt = raw_value.strip()
|
||
verilog_val = _parse_verilog_literal(txt)
|
||
if verilog_val is not None:
|
||
value = verilog_val
|
||
elif txt.startswith(("0b", "0B")):
|
||
value = int(txt[2:].replace("_", ""), 2)
|
||
elif txt.startswith(("0x", "0X")):
|
||
value = int(txt[2:].replace("_", ""), 16)
|
||
elif txt.startswith(("0o", "0O")):
|
||
value = int(txt[2:].replace("_", ""), 8)
|
||
else:
|
||
value = int(txt.replace("_", ""), 10)
|
||
else:
|
||
raise ChipVerifyError(
|
||
f"Signal {signal_name!r} value must be int/str/bool, got {type(raw_value)}"
|
||
)
|
||
|
||
if value < 0:
|
||
raise ChipVerifyError(f"Signal {signal_name!r} value cannot be negative")
|
||
|
||
max_value = (1 << width) - 1
|
||
if value > max_value:
|
||
raise ChipVerifyError(
|
||
f"Signal {signal_name!r} value {value} exceeds width {width} (max {max_value})"
|
||
)
|
||
|
||
return value
|
||
|
||
|
||
def load_raw_spec(spec_path: Path) -> dict[str, Any]:
|
||
"""读取并校验规格 JSON 的根结构。"""
|
||
|
||
if not spec_path.exists():
|
||
raise ChipVerifyError(f"Spec file not found: {spec_path}")
|
||
|
||
try:
|
||
spec = json.loads(spec_path.read_text(encoding="utf-8"))
|
||
except json.JSONDecodeError as exc:
|
||
raise ChipVerifyError(f"Invalid JSON in spec file: {exc}") from exc
|
||
|
||
if not isinstance(spec, dict):
|
||
raise ChipVerifyError("Spec root must be a JSON object")
|
||
|
||
return spec
|
||
|
||
|
||
def parse_truth_table_spec(spec: dict[str, Any]) -> tuple[list[Signal], list[Signal], list[Case]]:
|
||
"""解析 truth_table 规格并转换为内部结构。"""
|
||
|
||
raw_inputs = spec.get("inputs")
|
||
raw_outputs = spec.get("outputs")
|
||
raw_cases = spec.get("cases")
|
||
|
||
if not isinstance(raw_inputs, list) or not raw_inputs:
|
||
raise ChipVerifyError("Spec field 'inputs' must be a non-empty array")
|
||
if not isinstance(raw_outputs, list) or not raw_outputs:
|
||
raise ChipVerifyError("Spec field 'outputs' must be a non-empty array")
|
||
if not isinstance(raw_cases, list) or not raw_cases:
|
||
raise ChipVerifyError("Spec field 'cases' must be a non-empty array")
|
||
|
||
inputs = [_parse_signal_item(item, "input") for item in raw_inputs]
|
||
outputs = [_parse_signal_item(item, "output") for item in raw_outputs]
|
||
|
||
input_names = {s.name for s in inputs}
|
||
output_names = {s.name for s in outputs}
|
||
|
||
if len(input_names) != len(inputs):
|
||
raise ChipVerifyError("Duplicate names found in input signals")
|
||
if len(output_names) != len(outputs):
|
||
raise ChipVerifyError("Duplicate names found in output signals")
|
||
|
||
overlap = input_names & output_names
|
||
if overlap:
|
||
joined = ", ".join(sorted(overlap))
|
||
raise ChipVerifyError(f"Signal names cannot be both input and output: {joined}")
|
||
|
||
input_map = {s.name: s for s in inputs}
|
||
output_map = {s.name: s for s in outputs}
|
||
|
||
cases: list[Case] = []
|
||
for idx, raw_case in enumerate(raw_cases):
|
||
if not isinstance(raw_case, dict):
|
||
raise ChipVerifyError(f"Case #{idx} must be an object")
|
||
|
||
in_obj = raw_case.get("in")
|
||
out_obj = raw_case.get("out")
|
||
if not isinstance(in_obj, dict) or not isinstance(out_obj, dict):
|
||
raise ChipVerifyError(f"Case #{idx} must include object fields 'in' and 'out'")
|
||
|
||
missing_inputs = input_names - set(in_obj.keys())
|
||
extra_inputs = set(in_obj.keys()) - input_names
|
||
missing_outputs = output_names - set(out_obj.keys())
|
||
extra_outputs = set(out_obj.keys()) - output_names
|
||
|
||
if missing_inputs:
|
||
raise ChipVerifyError(f"Case #{idx} missing input fields: {sorted(missing_inputs)}")
|
||
if extra_inputs:
|
||
raise ChipVerifyError(f"Case #{idx} has unknown input fields: {sorted(extra_inputs)}")
|
||
if missing_outputs:
|
||
raise ChipVerifyError(f"Case #{idx} missing output fields: {sorted(missing_outputs)}")
|
||
if extra_outputs:
|
||
raise ChipVerifyError(f"Case #{idx} has unknown output fields: {sorted(extra_outputs)}")
|
||
|
||
in_vals = {
|
||
name: parse_value(raw, input_map[name].width, name) for name, raw in in_obj.items()
|
||
}
|
||
out_vals = {
|
||
name: parse_value(raw, output_map[name].width, name)
|
||
for name, raw in out_obj.items()
|
||
}
|
||
|
||
case_name = str(raw_case.get("name", f"case_{idx}"))
|
||
cases.append(Case(name=case_name, input_values=in_vals, output_values=out_vals))
|
||
|
||
return inputs, outputs, cases
|
||
|
||
|
||
def _expectation_operator(raw_op: str) -> str:
|
||
"""标准化并校验断言比较运算符。"""
|
||
|
||
op = raw_op.lower()
|
||
allowed = {"eq", "ne", "gt", "ge", "lt", "le"}
|
||
if op not in allowed:
|
||
raise ChipVerifyError(f"Unsupported expectation op: {raw_op}")
|
||
return op
|
||
|
||
|
||
def parse_sequential_spec(spec: dict[str, Any]) -> SequentialSpec:
|
||
"""解析 sequential 规格并生成内部时序验证对象。"""
|
||
|
||
raw_clock = spec.get("clock", {})
|
||
if not isinstance(raw_clock, dict):
|
||
raise ChipVerifyError("Spec field 'clock' must be an object")
|
||
|
||
clock_name = str(raw_clock.get("name", "clk"))
|
||
clock_period = raw_clock.get("period_ns", 10)
|
||
clock_initial = raw_clock.get("initial", 0)
|
||
|
||
if not isinstance(clock_period, int) or clock_period <= 0:
|
||
raise ChipVerifyError("clock.period_ns must be a positive integer")
|
||
if clock_initial not in (0, 1):
|
||
raise ChipVerifyError("clock.initial must be 0 or 1")
|
||
|
||
clock = ClockConfig(name=clock_name, period_ns=clock_period, initial=clock_initial)
|
||
|
||
reset: ResetConfig | None = None
|
||
raw_reset = spec.get("reset")
|
||
if raw_reset is not None:
|
||
if not isinstance(raw_reset, dict):
|
||
raise ChipVerifyError("Spec field 'reset' must be an object when provided")
|
||
reset_name = str(raw_reset.get("name", "rst"))
|
||
reset_active = raw_reset.get("active", 1)
|
||
reset_cycles = raw_reset.get("cycles", 2)
|
||
if reset_active not in (0, 1):
|
||
raise ChipVerifyError("reset.active must be 0 or 1")
|
||
if not isinstance(reset_cycles, int) or reset_cycles < 0:
|
||
raise ChipVerifyError("reset.cycles must be a non-negative integer")
|
||
reset = ResetConfig(name=reset_name, active=reset_active, cycles=reset_cycles)
|
||
|
||
raw_inputs = spec.get("inputs", [])
|
||
raw_outputs = spec.get("outputs", [])
|
||
raw_observes = spec.get("observes", [])
|
||
|
||
if not isinstance(raw_inputs, list):
|
||
raise ChipVerifyError("Spec field 'inputs' must be an array")
|
||
if not isinstance(raw_outputs, list):
|
||
raise ChipVerifyError("Spec field 'outputs' must be an array")
|
||
if not isinstance(raw_observes, list):
|
||
raise ChipVerifyError("Spec field 'observes' must be an array")
|
||
|
||
inputs = [_parse_signal_item(item, "input") for item in raw_inputs]
|
||
outputs = [_parse_signal_item(item, "output") for item in raw_outputs]
|
||
observes = [_parse_observe_item(item) for item in raw_observes]
|
||
|
||
input_names = {s.name for s in inputs}
|
||
output_names = {s.name for s in outputs}
|
||
observe_names = {s.name for s in observes}
|
||
|
||
if len(input_names) != len(inputs):
|
||
raise ChipVerifyError("Duplicate names found in input signals")
|
||
if len(output_names) != len(outputs):
|
||
raise ChipVerifyError("Duplicate names found in output signals")
|
||
if len(observe_names) != len(observes):
|
||
raise ChipVerifyError("Duplicate names found in observe signals")
|
||
|
||
overlaps = [
|
||
("input/output", input_names & output_names),
|
||
("input/observe", input_names & observe_names),
|
||
("output/observe", output_names & observe_names),
|
||
]
|
||
for label, names in overlaps:
|
||
if names:
|
||
raise ChipVerifyError(f"Signal name collision in {label}: {sorted(names)}")
|
||
|
||
if clock.name in input_names or clock.name in output_names or clock.name in observe_names:
|
||
raise ChipVerifyError("clock.name cannot overlap with input/output/observe signal names")
|
||
if reset is not None and (
|
||
reset.name in input_names or reset.name in output_names or reset.name in observe_names
|
||
):
|
||
raise ChipVerifyError("reset.name cannot overlap with input/output/observe signal names")
|
||
|
||
if not inputs and not outputs and not observes:
|
||
raise ChipVerifyError(
|
||
"Sequential spec requires at least one signal in inputs/outputs/observes"
|
||
)
|
||
|
||
signal_widths: dict[str, int] = {clock.name: 1}
|
||
if reset is not None:
|
||
signal_widths[reset.name] = 1
|
||
for sig in inputs:
|
||
signal_widths[sig.name] = sig.width
|
||
for sig in outputs:
|
||
signal_widths[sig.name] = sig.width
|
||
for sig in observes:
|
||
signal_widths[sig.name] = sig.width
|
||
|
||
raw_init = spec.get("init", {})
|
||
if not isinstance(raw_init, dict):
|
||
raise ChipVerifyError("Spec field 'init' must be an object")
|
||
|
||
init_values: dict[str, int] = {}
|
||
writable_init_names = set(input_names)
|
||
if reset is not None:
|
||
writable_init_names.add(reset.name)
|
||
|
||
for name, raw in raw_init.items():
|
||
if name not in writable_init_names:
|
||
raise ChipVerifyError(
|
||
f"init field {name!r} is not writable (allowed: inputs and reset only)"
|
||
)
|
||
init_values[name] = parse_value(raw, signal_widths[name], name)
|
||
|
||
raw_auto = spec.get("auto_stimulus", {"enabled": True})
|
||
if isinstance(raw_auto, bool):
|
||
auto_stimulus = AutoStimulusConfig(
|
||
enabled=raw_auto,
|
||
start_cycle=0,
|
||
mode="binary_count",
|
||
seed=1,
|
||
)
|
||
elif isinstance(raw_auto, dict):
|
||
enabled = bool(raw_auto.get("enabled", True))
|
||
start_cycle = raw_auto.get("start_cycle", 0)
|
||
mode = str(raw_auto.get("mode", "binary_count"))
|
||
seed = raw_auto.get("seed", 1)
|
||
|
||
if not isinstance(start_cycle, int) or start_cycle < 0:
|
||
raise ChipVerifyError("auto_stimulus.start_cycle must be a non-negative integer")
|
||
if mode not in {"binary_count", "random"}:
|
||
raise ChipVerifyError(
|
||
"auto_stimulus.mode currently supports 'binary_count' or 'random'"
|
||
)
|
||
if not isinstance(seed, int):
|
||
raise ChipVerifyError("auto_stimulus.seed must be an integer")
|
||
|
||
auto_stimulus = AutoStimulusConfig(
|
||
enabled=enabled,
|
||
start_cycle=start_cycle,
|
||
mode=mode,
|
||
seed=seed,
|
||
)
|
||
else:
|
||
raise ChipVerifyError("Spec field 'auto_stimulus' must be bool or object")
|
||
|
||
raw_stimulus = spec.get("stimulus", [])
|
||
if not isinstance(raw_stimulus, list):
|
||
raise ChipVerifyError("Spec field 'stimulus' must be an array")
|
||
|
||
stimulus: list[SeqStimulusEvent] = []
|
||
writable_stimulus_names = set(input_names)
|
||
if reset is not None:
|
||
writable_stimulus_names.add(reset.name)
|
||
|
||
for idx, item in enumerate(raw_stimulus):
|
||
if not isinstance(item, dict):
|
||
raise ChipVerifyError(f"Stimulus #{idx} must be an object")
|
||
|
||
cycle = item.get("cycle")
|
||
set_obj = item.get("set")
|
||
if not isinstance(cycle, int) or cycle < 0:
|
||
raise ChipVerifyError(f"Stimulus #{idx} cycle must be a non-negative integer")
|
||
if not isinstance(set_obj, dict) or not set_obj:
|
||
raise ChipVerifyError(f"Stimulus #{idx} set must be a non-empty object")
|
||
|
||
values: dict[str, int] = {}
|
||
for name, raw in set_obj.items():
|
||
if name not in writable_stimulus_names:
|
||
raise ChipVerifyError(
|
||
f"Stimulus #{idx} writes unknown/non-writable signal {name!r}"
|
||
)
|
||
values[name] = parse_value(raw, signal_widths[name], name)
|
||
|
||
stimulus.append(SeqStimulusEvent(cycle=cycle, values=values))
|
||
|
||
raw_assertions = spec.get("assertions", [])
|
||
if not isinstance(raw_assertions, list):
|
||
raise ChipVerifyError("Spec field 'assertions' must be an array")
|
||
|
||
def _parse_assert_expectations(
|
||
raw_expect_obj: dict[str, Any],
|
||
assertion_idx: int,
|
||
field_label: str,
|
||
) -> list[SeqExpectation]:
|
||
parsed: list[SeqExpectation] = []
|
||
|
||
for signal_name, raw_exp in raw_expect_obj.items():
|
||
if signal_name not in signal_widths:
|
||
raise ChipVerifyError(
|
||
f"Assertion #{assertion_idx} references unknown signal {signal_name!r}"
|
||
)
|
||
|
||
width = signal_widths[signal_name]
|
||
op = "eq"
|
||
raw_value: Any
|
||
|
||
if isinstance(raw_exp, dict):
|
||
op = _expectation_operator(str(raw_exp.get("op", "eq")))
|
||
if "value" not in raw_exp:
|
||
raise ChipVerifyError(
|
||
f"Assertion #{assertion_idx} signal {signal_name!r} in {field_label} missing field 'value'"
|
||
)
|
||
raw_value = raw_exp["value"]
|
||
else:
|
||
raw_value = raw_exp
|
||
|
||
value = parse_value(raw_value, width, signal_name)
|
||
parsed.append(
|
||
SeqExpectation(signal=signal_name, op=op, value=value, width=width)
|
||
)
|
||
|
||
return parsed
|
||
|
||
assertions: list[SeqAssertion] = []
|
||
for idx, item in enumerate(raw_assertions):
|
||
if not isinstance(item, dict):
|
||
raise ChipVerifyError(f"Assertion #{idx} must be an object")
|
||
|
||
cycle = item.get("cycle")
|
||
raw_expect = item.get("expect")
|
||
|
||
if not isinstance(cycle, int) or cycle < 0:
|
||
raise ChipVerifyError(f"Assertion #{idx} cycle must be a non-negative integer")
|
||
if not isinstance(raw_expect, dict) or not raw_expect:
|
||
raise ChipVerifyError(f"Assertion #{idx} expect must be a non-empty object")
|
||
|
||
name = str(item.get("name", f"assert_{idx}"))
|
||
|
||
mode = str(item.get("mode", "cycle")).lower()
|
||
within = 0
|
||
|
||
raw_window = item.get("window")
|
||
if raw_window is not None:
|
||
if not isinstance(raw_window, dict):
|
||
raise ChipVerifyError(f"Assertion #{idx} window must be an object")
|
||
window_type = str(raw_window.get("type", "eventually")).lower()
|
||
window_within = raw_window.get("within")
|
||
if window_type not in {"eventually", "always", "never", "until"}:
|
||
raise ChipVerifyError(
|
||
f"Assertion #{idx} window.type currently supports only 'eventually', 'always', 'never', 'until'"
|
||
)
|
||
if not isinstance(window_within, int) or window_within < 0:
|
||
raise ChipVerifyError(
|
||
f"Assertion #{idx} window.within must be a non-negative integer"
|
||
)
|
||
mode = window_type
|
||
within = window_within
|
||
elif mode in {"eventually", "always", "never", "until"}:
|
||
raw_within = item.get("within")
|
||
if not isinstance(raw_within, int) or raw_within < 0:
|
||
raise ChipVerifyError(
|
||
f"Assertion #{idx} with mode={mode} requires non-negative within"
|
||
)
|
||
within = raw_within
|
||
elif mode == "cycle":
|
||
within = 0
|
||
else:
|
||
raise ChipVerifyError(
|
||
f"Assertion #{idx} mode must be 'cycle', 'eventually', 'always', 'never', or 'until'"
|
||
)
|
||
|
||
expectations = _parse_assert_expectations(raw_expect, idx, "expect")
|
||
until_expectations: list[SeqExpectation] | None = None
|
||
|
||
if mode == "until":
|
||
raw_until_expect = item.get("until_expect")
|
||
if not isinstance(raw_until_expect, dict) or not raw_until_expect:
|
||
raise ChipVerifyError(
|
||
f"Assertion #{idx} with mode=until requires non-empty 'until_expect' object"
|
||
)
|
||
until_expectations = _parse_assert_expectations(
|
||
raw_until_expect,
|
||
idx,
|
||
"until_expect",
|
||
)
|
||
|
||
assertions.append(
|
||
SeqAssertion(
|
||
name=name,
|
||
cycle=cycle,
|
||
expectations=expectations,
|
||
mode=mode,
|
||
within=within,
|
||
until_expectations=until_expectations,
|
||
)
|
||
)
|
||
|
||
max_cycles = spec.get("max_cycles", 16)
|
||
if not isinstance(max_cycles, int) or max_cycles <= 0:
|
||
raise ChipVerifyError("Spec field 'max_cycles' must be a positive integer")
|
||
|
||
return SequentialSpec(
|
||
clock=clock,
|
||
reset=reset,
|
||
inputs=inputs,
|
||
outputs=outputs,
|
||
observes=observes,
|
||
init_values=init_values,
|
||
stimulus=stimulus,
|
||
assertions=assertions,
|
||
max_cycles=max_cycles,
|
||
auto_stimulus=auto_stimulus,
|
||
)
|
||
|
||
|
||
def verilog_decl(keyword: str, sig: Signal) -> str:
|
||
"""生成 Verilog 信号声明语句。"""
|
||
|
||
if sig.width == 1:
|
||
return f"{keyword} {sig.name};"
|
||
return f"{keyword} [{sig.width - 1}:0] {sig.name};"
|
||
|
||
|
||
def verilog_const(width: int, value: int) -> str:
|
||
"""把整数转为指定位宽的 Verilog 十进制常量。"""
|
||
|
||
return f"{width}'d{value}"
|
||
|
||
|
||
def _format_delay_number(value: float) -> str:
|
||
"""把延时值格式化为简洁文本,避免多余尾零。"""
|
||
|
||
txt = f"{value:.6f}".rstrip("0").rstrip(".")
|
||
return txt or "0"
|
||
|
||
|
||
def generate_truth_table_testbench(
|
||
top_module: str,
|
||
inputs: list[Signal],
|
||
outputs: list[Signal],
|
||
cases: list[Case],
|
||
dump_vcd: str | None,
|
||
) -> str:
|
||
"""为组合逻辑规格生成可直接仿真的 testbench 文本。"""
|
||
|
||
lines: list[str] = []
|
||
|
||
lines.append("`timescale 1ns/1ps")
|
||
lines.append(f"module {TB_TOP};")
|
||
lines.append("")
|
||
|
||
for sig in inputs:
|
||
lines.append(f" {verilog_decl('reg', sig)}")
|
||
for sig in outputs:
|
||
lines.append(f" {verilog_decl('wire', sig)}")
|
||
|
||
lines.append("")
|
||
lines.append(" integer pass_count;")
|
||
lines.append(" integer fail_count;")
|
||
lines.append(" integer case_fail;")
|
||
lines.append("")
|
||
|
||
conn = ", ".join([f".{s.name}({s.name})" for s in (inputs + outputs)])
|
||
lines.append(f" {top_module} dut ({conn});")
|
||
lines.append("")
|
||
lines.append(" initial begin")
|
||
lines.append(" pass_count = 0;")
|
||
lines.append(" fail_count = 0;")
|
||
|
||
if dump_vcd:
|
||
lines.append(f" $dumpfile(\"{dump_vcd}\");")
|
||
lines.append(f" $dumpvars(0, {TB_TOP});")
|
||
|
||
for idx, case in enumerate(cases):
|
||
lines.append("")
|
||
lines.append(f" // {case.name}")
|
||
for sig in inputs:
|
||
value = case.input_values[sig.name]
|
||
lines.append(f" {sig.name} = {verilog_const(sig.width, value)};")
|
||
lines.append(" #1;")
|
||
lines.append(" case_fail = 0;")
|
||
|
||
for sig in outputs:
|
||
expected = case.output_values[sig.name]
|
||
exp_v = verilog_const(sig.width, expected)
|
||
lines.append(f" if ({sig.name} !== {exp_v}) begin")
|
||
lines.append(" case_fail = 1;")
|
||
lines.append(
|
||
f" $display(\"MISMATCH case={idx} name={case.name} signal={sig.name} expected=%0d got=%0d\", {exp_v}, {sig.name});"
|
||
)
|
||
lines.append(" end")
|
||
|
||
lines.append(" if (case_fail == 0) begin")
|
||
lines.append(" pass_count = pass_count + 1;")
|
||
lines.append(
|
||
f" $display(\"CASE_PASS case={idx} name={case.name}\");"
|
||
)
|
||
lines.append(" end else begin")
|
||
lines.append(" fail_count = fail_count + 1;")
|
||
lines.append(
|
||
f" $display(\"CASE_FAIL case={idx} name={case.name}\");"
|
||
)
|
||
lines.append(" end")
|
||
|
||
lines.append("")
|
||
lines.append(
|
||
f" $display(\"SUMMARY total={len(cases)} pass=%0d fail=%0d\", pass_count, fail_count);"
|
||
)
|
||
lines.append(" $finish;")
|
||
lines.append(" end")
|
||
lines.append("endmodule")
|
||
|
||
return "\n".join(lines) + "\n"
|
||
|
||
|
||
def generate_sequential_testbench(
|
||
top_module: str,
|
||
seq_spec: SequentialSpec,
|
||
dump_vcd: str | None,
|
||
) -> str:
|
||
"""为时序规格生成带周期采样与覆盖率输出的 testbench。"""
|
||
|
||
lines: list[str] = []
|
||
|
||
lines.append("`timescale 1ns/1ps")
|
||
lines.append(f"module {TB_TOP};")
|
||
lines.append("")
|
||
|
||
lines.append(f" reg {seq_spec.clock.name};")
|
||
if seq_spec.reset is not None:
|
||
lines.append(f" reg {seq_spec.reset.name};")
|
||
|
||
for sig in seq_spec.inputs:
|
||
lines.append(f" {verilog_decl('reg', sig)}")
|
||
for sig in seq_spec.outputs:
|
||
lines.append(f" {verilog_decl('wire', sig)}")
|
||
|
||
if seq_spec.observes:
|
||
lines.append("")
|
||
for obs in seq_spec.observes:
|
||
if obs.width == 1:
|
||
lines.append(f" wire {obs.name};")
|
||
else:
|
||
lines.append(f" wire [{obs.width - 1}:0] {obs.name};")
|
||
lines.append(f" assign {obs.name} = {obs.expr};")
|
||
|
||
lines.append("")
|
||
lines.append(" integer cycle;")
|
||
|
||
if seq_spec.auto_stimulus.enabled and seq_spec.auto_stimulus.mode == "random" and seq_spec.inputs:
|
||
lines.append(" integer rand_seed;")
|
||
|
||
port_signals: list[str] = [seq_spec.clock.name]
|
||
if seq_spec.reset is not None:
|
||
port_signals.append(seq_spec.reset.name)
|
||
port_signals.extend(sig.name for sig in seq_spec.inputs)
|
||
port_signals.extend(sig.name for sig in seq_spec.outputs)
|
||
|
||
conn = ", ".join([f".{name}({name})" for name in port_signals])
|
||
lines.append("")
|
||
lines.append(f" {top_module} dut ({conn});")
|
||
|
||
half_period = _format_delay_number(seq_spec.clock.period_ns / 2.0)
|
||
lines.append("")
|
||
lines.append(" initial begin")
|
||
lines.append(f" {seq_spec.clock.name} = 1'd{seq_spec.clock.initial};")
|
||
lines.append(f" forever #{half_period} {seq_spec.clock.name} = ~{seq_spec.clock.name};")
|
||
lines.append(" end")
|
||
|
||
lines.append("")
|
||
lines.append(" initial begin")
|
||
|
||
if seq_spec.auto_stimulus.enabled and seq_spec.auto_stimulus.mode == "random" and seq_spec.inputs:
|
||
lines.append(f" rand_seed = {seq_spec.auto_stimulus.seed};")
|
||
|
||
if seq_spec.reset is not None:
|
||
if seq_spec.reset.name in seq_spec.init_values:
|
||
init_rst = seq_spec.init_values[seq_spec.reset.name]
|
||
elif seq_spec.reset.cycles > 0:
|
||
init_rst = seq_spec.reset.active
|
||
else:
|
||
init_rst = 1 - seq_spec.reset.active
|
||
lines.append(f" {seq_spec.reset.name} = 1'd{init_rst};")
|
||
|
||
for sig in seq_spec.inputs:
|
||
init_value = seq_spec.init_values.get(sig.name, 0)
|
||
lines.append(f" {sig.name} = {verilog_const(sig.width, init_value)};")
|
||
|
||
if dump_vcd:
|
||
lines.append(f" $dumpfile(\"{dump_vcd}\");")
|
||
lines.append(f" $dumpvars(0, {TB_TOP});")
|
||
|
||
lines.append("")
|
||
lines.append(f" for (cycle = 0; cycle < {seq_spec.max_cycles}; cycle = cycle + 1) begin")
|
||
|
||
if seq_spec.reset is not None:
|
||
inactive = 1 - seq_spec.reset.active
|
||
lines.append(f" if (cycle < {seq_spec.reset.cycles}) begin")
|
||
lines.append(f" {seq_spec.reset.name} = 1'd{seq_spec.reset.active};")
|
||
lines.append(" end else begin")
|
||
lines.append(f" {seq_spec.reset.name} = 1'd{inactive};")
|
||
lines.append(" end")
|
||
|
||
if seq_spec.auto_stimulus.enabled and seq_spec.inputs:
|
||
lines.append(f" if (cycle >= {seq_spec.auto_stimulus.start_cycle}) begin")
|
||
|
||
if seq_spec.auto_stimulus.mode == "binary_count":
|
||
offset = 0
|
||
for sig in seq_spec.inputs:
|
||
mask = (1 << sig.width) - 1
|
||
if offset == 0:
|
||
expr = f"((cycle - {seq_spec.auto_stimulus.start_cycle}) & {mask})"
|
||
else:
|
||
expr = (
|
||
f"(((cycle - {seq_spec.auto_stimulus.start_cycle}) >> {offset}) & {mask})"
|
||
)
|
||
lines.append(
|
||
f" {sig.name} = {verilog_const(sig.width, 0)} + {expr};"
|
||
)
|
||
offset += sig.width
|
||
else:
|
||
for sig in seq_spec.inputs:
|
||
mask = (1 << sig.width) - 1
|
||
lines.append(
|
||
f" {sig.name} = $random(rand_seed) & {mask};"
|
||
)
|
||
|
||
lines.append(" end")
|
||
|
||
for event in seq_spec.stimulus:
|
||
lines.append(f" if (cycle == {event.cycle}) begin")
|
||
for name, value in event.values.items():
|
||
width = 1
|
||
if seq_spec.reset is not None and name == seq_spec.reset.name:
|
||
width = 1
|
||
else:
|
||
for sig in seq_spec.inputs:
|
||
if sig.name == name:
|
||
width = sig.width
|
||
break
|
||
lines.append(f" {name} = {verilog_const(width, value)};")
|
||
lines.append(" end")
|
||
|
||
lines.append(f" @(posedge {seq_spec.clock.name});")
|
||
lines.append(" #1;")
|
||
|
||
coverage_signal_names = [seq_spec.clock.name]
|
||
if seq_spec.reset is not None:
|
||
coverage_signal_names.append(seq_spec.reset.name)
|
||
coverage_signal_names.extend(sig.name for sig in seq_spec.inputs)
|
||
coverage_signal_names.extend(sig.name for sig in seq_spec.outputs)
|
||
coverage_signal_names.extend(obs.name for obs in seq_spec.observes)
|
||
|
||
for name in coverage_signal_names:
|
||
lines.append(
|
||
f" $display(\"COV_SAMPLE cycle=%0d signal={name} value=%0d\", cycle, {name});"
|
||
)
|
||
|
||
lines.append(" end")
|
||
lines.append("")
|
||
lines.append(f" $display(\"SEQ_DONE cycles=%0d\", {seq_spec.max_cycles});")
|
||
lines.append(" $finish;")
|
||
lines.append(" end")
|
||
lines.append("endmodule")
|
||
|
||
return "\n".join(lines) + "\n"
|
||
|
||
|
||
def run_command(cmd: list[str], timeout: int) -> subprocess.CompletedProcess[str]:
|
||
"""执行外部命令并统一超时控制与输出捕获策略。"""
|
||
|
||
return subprocess.run(
|
||
cmd,
|
||
capture_output=True,
|
||
text=True,
|
||
timeout=timeout,
|
||
check=False,
|
||
)
|
||
|
||
|
||
def parse_truth_table_summary(stdout: str) -> tuple[int, int, int]:
|
||
"""从仿真输出中提取组合验证总计信息。"""
|
||
|
||
m = re.search(r"SUMMARY total=(\d+) pass=(\d+) fail=(\d+)", stdout)
|
||
if not m:
|
||
raise ChipVerifyError("Simulation finished without SUMMARY line")
|
||
|
||
total_s, pass_s, fail_s = m.groups()
|
||
return int(total_s), int(pass_s), int(fail_s)
|
||
|
||
|
||
def parse_sequential_done(stdout: str) -> int:
|
||
"""从仿真输出中提取时序验证实际运行周期数。"""
|
||
|
||
m = re.search(r"SEQ_DONE cycles=(\d+)", stdout)
|
||
if not m:
|
||
raise ChipVerifyError("Simulation finished without SEQ_DONE line")
|
||
return int(m.group(1))
|
||
|
||
|
||
def parse_cov_samples(stdout: str) -> dict[int, dict[str, int]]:
|
||
"""解析覆盖率采样日志,按周期聚合信号值。"""
|
||
|
||
pattern = re.compile(r"^COV_SAMPLE cycle=(\d+) signal=([A-Za-z_][A-Za-z0-9_]*) value=(\d+)$")
|
||
samples: dict[int, dict[str, int]] = {}
|
||
|
||
for line in stdout.splitlines():
|
||
m = pattern.match(line.strip())
|
||
if not m:
|
||
continue
|
||
cycle_s, signal_name, value_s = m.groups()
|
||
cycle = int(cycle_s)
|
||
value = int(value_s)
|
||
if cycle not in samples:
|
||
samples[cycle] = {}
|
||
samples[cycle][signal_name] = value
|
||
|
||
return samples
|
||
|
||
|
||
def compute_coverage(
|
||
samples: dict[int, dict[str, int]],
|
||
signal_widths: dict[str, int],
|
||
) -> dict[str, dict[str, Any]]:
|
||
"""根据采样数据计算唯一值覆盖与翻转次数等指标。"""
|
||
|
||
stats: dict[str, dict[str, Any]] = {}
|
||
for name, width in signal_widths.items():
|
||
stats[name] = {
|
||
"width": width,
|
||
"samples": 0,
|
||
"unique_values": set(),
|
||
"toggle_count": 0,
|
||
"_last_value": None,
|
||
}
|
||
|
||
for cycle in sorted(samples.keys()):
|
||
cycle_values = samples[cycle]
|
||
for name, value in cycle_values.items():
|
||
if name not in stats:
|
||
continue
|
||
entry = stats[name]
|
||
entry["samples"] += 1
|
||
entry["unique_values"].add(value)
|
||
last_value = entry["_last_value"]
|
||
if last_value is not None and value != last_value:
|
||
entry["toggle_count"] += 1
|
||
entry["_last_value"] = value
|
||
|
||
result: dict[str, dict[str, Any]] = {}
|
||
for name, entry in stats.items():
|
||
width = int(entry["width"])
|
||
unique_values = sorted(entry["unique_values"])
|
||
signal_result: dict[str, Any] = {
|
||
"width": width,
|
||
"samples": int(entry["samples"]),
|
||
"unique_count": len(unique_values),
|
||
"unique_values": unique_values,
|
||
"toggle_count": int(entry["toggle_count"]),
|
||
}
|
||
|
||
if width <= MAX_EXACT_COVERAGE_BITS:
|
||
total_bins = 1 << width
|
||
signal_result["total_bins"] = total_bins
|
||
signal_result["value_coverage_pct"] = round(
|
||
100.0 * len(unique_values) / total_bins,
|
||
2,
|
||
)
|
||
else:
|
||
signal_result["total_bins"] = None
|
||
signal_result["value_coverage_pct"] = None
|
||
|
||
result[name] = signal_result
|
||
|
||
return result
|
||
|
||
|
||
def _eval_relation(actual: int, op: str, expected: int) -> bool:
|
||
"""按比较操作符判断断言是否成立。"""
|
||
|
||
if op == "eq":
|
||
return actual == expected
|
||
if op == "ne":
|
||
return actual != expected
|
||
if op == "gt":
|
||
return actual > expected
|
||
if op == "ge":
|
||
return actual >= expected
|
||
if op == "lt":
|
||
return actual < expected
|
||
if op == "le":
|
||
return actual <= expected
|
||
raise ChipVerifyError(f"Unsupported assertion operator: {op}")
|
||
|
||
|
||
def _expectation_ok(sample: dict[str, int], exp: SeqExpectation) -> bool:
|
||
"""判断单个期望在当前采样点是否满足。"""
|
||
|
||
actual = sample.get(exp.signal)
|
||
if actual is None:
|
||
return False
|
||
return _eval_relation(actual, exp.op, exp.value)
|
||
|
||
|
||
def _expectations_all_ok(sample: dict[str, int], expectations: list[SeqExpectation]) -> bool:
|
||
"""判断一组期望在当前采样点是否全部满足。"""
|
||
|
||
return all(_expectation_ok(sample, exp) for exp in expectations)
|
||
|
||
|
||
def evaluate_sequential_assertions(
|
||
assertions: list[SeqAssertion],
|
||
samples: dict[int, dict[str, int]],
|
||
cycles: int,
|
||
) -> tuple[list[dict[str, Any]], list[str]]:
|
||
"""评估时序断言结果,并产出可读失败信息与机器可解析日志。"""
|
||
|
||
results: list[dict[str, Any]] = []
|
||
mismatch_lines: list[str] = []
|
||
|
||
for assertion in assertions:
|
||
if assertion.mode == "cycle":
|
||
cycle_values = samples.get(assertion.cycle)
|
||
failed_items: list[str] = []
|
||
|
||
if cycle_values is None:
|
||
failed_items.append(
|
||
f"cycle {assertion.cycle} not sampled"
|
||
)
|
||
else:
|
||
for exp in assertion.expectations:
|
||
if exp.signal not in cycle_values:
|
||
failed_items.append(
|
||
f"signal {exp.signal} missing at cycle {assertion.cycle}"
|
||
)
|
||
mismatch_lines.append(
|
||
"MISMATCH_ASSERT "
|
||
f"mode=cycle cycle={assertion.cycle} name={assertion.name} "
|
||
f"signal={exp.signal} op={exp.op} expected={exp.value} got=NA"
|
||
)
|
||
continue
|
||
|
||
actual = cycle_values[exp.signal]
|
||
if not _eval_relation(actual, exp.op, exp.value):
|
||
failed_items.append(
|
||
f"{exp.signal} {exp.op} {exp.value} (got {actual})"
|
||
)
|
||
mismatch_lines.append(
|
||
"MISMATCH_ASSERT "
|
||
f"mode=cycle cycle={assertion.cycle} name={assertion.name} "
|
||
f"signal={exp.signal} op={exp.op} expected={exp.value} got={actual}"
|
||
)
|
||
|
||
if failed_items:
|
||
if cycle_values is None:
|
||
mismatch_lines.append(
|
||
"MISMATCH_ASSERT "
|
||
f"mode=cycle cycle={assertion.cycle} name={assertion.name} "
|
||
"signal=* op=eq expected=NA got=NA"
|
||
)
|
||
results.append(
|
||
{
|
||
"name": assertion.name,
|
||
"status": "fail",
|
||
"mode": assertion.mode,
|
||
"cycle": assertion.cycle,
|
||
"within": assertion.within,
|
||
"matched_cycle": None,
|
||
"message": f"cycle assertion failed at cycle {assertion.cycle}",
|
||
"details": "; ".join(failed_items),
|
||
}
|
||
)
|
||
else:
|
||
results.append(
|
||
{
|
||
"name": assertion.name,
|
||
"status": "pass",
|
||
"mode": assertion.mode,
|
||
"cycle": assertion.cycle,
|
||
"within": assertion.within,
|
||
"matched_cycle": assertion.cycle,
|
||
"message": f"cycle assertion passed at cycle {assertion.cycle}",
|
||
"details": "",
|
||
}
|
||
)
|
||
continue
|
||
|
||
if assertion.mode not in {"eventually", "always", "never", "until"}:
|
||
raise ChipVerifyError(f"Unsupported assertion mode: {assertion.mode}")
|
||
|
||
window_start = assertion.cycle
|
||
window_end = assertion.cycle + assertion.within
|
||
effective_end = min(window_end, cycles - 1)
|
||
|
||
observed_cycles: list[int] = []
|
||
if effective_end >= window_start:
|
||
observed_cycles = list(range(window_start, effective_end + 1))
|
||
|
||
observed_details: list[str] = []
|
||
for c in observed_cycles:
|
||
cycle_values = samples.get(c, {})
|
||
signal_pairs = []
|
||
for exp in assertion.expectations:
|
||
if exp.signal in cycle_values:
|
||
signal_pairs.append(f"{exp.signal}={cycle_values[exp.signal]}")
|
||
else:
|
||
signal_pairs.append(f"{exp.signal}=NA")
|
||
observed_details.append(f"{c}[{', '.join(signal_pairs)}]")
|
||
|
||
window_partially_outside = window_end >= cycles
|
||
observed_detail_text = "observed: " + "; ".join(observed_details)
|
||
|
||
if assertion.mode == "eventually":
|
||
matched_cycle: int | None = None
|
||
for c in observed_cycles:
|
||
if _expectations_all_ok(samples.get(c, {}), assertion.expectations):
|
||
matched_cycle = c
|
||
break
|
||
|
||
if matched_cycle is not None:
|
||
results.append(
|
||
{
|
||
"name": assertion.name,
|
||
"status": "pass",
|
||
"mode": assertion.mode,
|
||
"cycle": assertion.cycle,
|
||
"within": assertion.within,
|
||
"matched_cycle": matched_cycle,
|
||
"message": (
|
||
"eventually assertion satisfied "
|
||
f"at cycle {matched_cycle} in [{window_start}, {window_end}]"
|
||
),
|
||
"details": "",
|
||
}
|
||
)
|
||
continue
|
||
|
||
reason = "not_satisfied"
|
||
if window_partially_outside:
|
||
reason = "not_satisfied_in_observed_window"
|
||
|
||
mismatch_lines.append(
|
||
"MISMATCH_ASSERT "
|
||
f"mode=eventually window={window_start}:{window_end} "
|
||
f"name={assertion.name} reason={reason}"
|
||
)
|
||
|
||
results.append(
|
||
{
|
||
"name": assertion.name,
|
||
"status": "fail",
|
||
"mode": assertion.mode,
|
||
"cycle": assertion.cycle,
|
||
"within": assertion.within,
|
||
"matched_cycle": None,
|
||
"message": (
|
||
"eventually assertion not satisfied "
|
||
f"in [{window_start}, {window_end}]"
|
||
),
|
||
"details": observed_detail_text,
|
||
}
|
||
)
|
||
continue
|
||
|
||
if assertion.mode == "always":
|
||
violating_cycle: int | None = None
|
||
for c in observed_cycles:
|
||
if not _expectations_all_ok(samples.get(c, {}), assertion.expectations):
|
||
violating_cycle = c
|
||
break
|
||
|
||
if violating_cycle is not None:
|
||
mismatch_lines.append(
|
||
"MISMATCH_ASSERT "
|
||
f"mode=always window={window_start}:{window_end} "
|
||
f"name={assertion.name} reason=violation cycle={violating_cycle}"
|
||
)
|
||
results.append(
|
||
{
|
||
"name": assertion.name,
|
||
"status": "fail",
|
||
"mode": assertion.mode,
|
||
"cycle": assertion.cycle,
|
||
"within": assertion.within,
|
||
"matched_cycle": None,
|
||
"message": (
|
||
"always assertion violated "
|
||
f"at cycle {violating_cycle} in [{window_start}, {window_end}]"
|
||
),
|
||
"details": observed_detail_text,
|
||
}
|
||
)
|
||
continue
|
||
|
||
if window_partially_outside:
|
||
mismatch_lines.append(
|
||
"MISMATCH_ASSERT "
|
||
f"mode=always window={window_start}:{window_end} "
|
||
f"name={assertion.name} reason=incomplete_window"
|
||
)
|
||
results.append(
|
||
{
|
||
"name": assertion.name,
|
||
"status": "fail",
|
||
"mode": assertion.mode,
|
||
"cycle": assertion.cycle,
|
||
"within": assertion.within,
|
||
"matched_cycle": None,
|
||
"message": (
|
||
"always assertion window exceeds simulated cycles"
|
||
),
|
||
"details": observed_detail_text,
|
||
}
|
||
)
|
||
continue
|
||
|
||
results.append(
|
||
{
|
||
"name": assertion.name,
|
||
"status": "pass",
|
||
"mode": assertion.mode,
|
||
"cycle": assertion.cycle,
|
||
"within": assertion.within,
|
||
"matched_cycle": window_end,
|
||
"message": (
|
||
"always assertion satisfied "
|
||
f"for [{window_start}, {window_end}]"
|
||
),
|
||
"details": "",
|
||
}
|
||
)
|
||
continue
|
||
|
||
if assertion.mode == "never":
|
||
hit_cycle: int | None = None
|
||
for c in observed_cycles:
|
||
if _expectations_all_ok(samples.get(c, {}), assertion.expectations):
|
||
hit_cycle = c
|
||
break
|
||
|
||
if hit_cycle is not None:
|
||
mismatch_lines.append(
|
||
"MISMATCH_ASSERT "
|
||
f"mode=never window={window_start}:{window_end} "
|
||
f"name={assertion.name} reason=hit cycle={hit_cycle}"
|
||
)
|
||
results.append(
|
||
{
|
||
"name": assertion.name,
|
||
"status": "fail",
|
||
"mode": assertion.mode,
|
||
"cycle": assertion.cycle,
|
||
"within": assertion.within,
|
||
"matched_cycle": hit_cycle,
|
||
"message": (
|
||
"never assertion violated "
|
||
f"at cycle {hit_cycle} in [{window_start}, {window_end}]"
|
||
),
|
||
"details": observed_detail_text,
|
||
}
|
||
)
|
||
continue
|
||
|
||
if window_partially_outside:
|
||
mismatch_lines.append(
|
||
"MISMATCH_ASSERT "
|
||
f"mode=never window={window_start}:{window_end} "
|
||
f"name={assertion.name} reason=incomplete_window"
|
||
)
|
||
results.append(
|
||
{
|
||
"name": assertion.name,
|
||
"status": "fail",
|
||
"mode": assertion.mode,
|
||
"cycle": assertion.cycle,
|
||
"within": assertion.within,
|
||
"matched_cycle": None,
|
||
"message": "never assertion window exceeds simulated cycles",
|
||
"details": observed_detail_text,
|
||
}
|
||
)
|
||
continue
|
||
|
||
results.append(
|
||
{
|
||
"name": assertion.name,
|
||
"status": "pass",
|
||
"mode": assertion.mode,
|
||
"cycle": assertion.cycle,
|
||
"within": assertion.within,
|
||
"matched_cycle": None,
|
||
"message": (
|
||
"never assertion satisfied "
|
||
f"for [{window_start}, {window_end}]"
|
||
),
|
||
"details": "",
|
||
}
|
||
)
|
||
continue
|
||
|
||
until_expectations = assertion.until_expectations or []
|
||
if not until_expectations:
|
||
raise ChipVerifyError(
|
||
f"Assertion {assertion.name!r} with mode=until requires until_expectations"
|
||
)
|
||
|
||
trigger_cycle: int | None = None
|
||
for c in observed_cycles:
|
||
if _expectations_all_ok(samples.get(c, {}), until_expectations):
|
||
trigger_cycle = c
|
||
break
|
||
|
||
if trigger_cycle is None:
|
||
reason = "no_trigger"
|
||
if window_partially_outside:
|
||
reason = "no_trigger_in_observed_window"
|
||
mismatch_lines.append(
|
||
"MISMATCH_ASSERT "
|
||
f"mode=until window={window_start}:{window_end} "
|
||
f"name={assertion.name} reason={reason}"
|
||
)
|
||
results.append(
|
||
{
|
||
"name": assertion.name,
|
||
"status": "fail",
|
||
"mode": assertion.mode,
|
||
"cycle": assertion.cycle,
|
||
"within": assertion.within,
|
||
"matched_cycle": None,
|
||
"message": (
|
||
"until assertion failed because trigger condition was not met "
|
||
f"in [{window_start}, {window_end}]"
|
||
),
|
||
"details": observed_detail_text,
|
||
}
|
||
)
|
||
continue
|
||
|
||
violating_cycle: int | None = None
|
||
for c in observed_cycles:
|
||
if c >= trigger_cycle:
|
||
break
|
||
if not _expectations_all_ok(samples.get(c, {}), assertion.expectations):
|
||
violating_cycle = c
|
||
break
|
||
|
||
if violating_cycle is not None:
|
||
mismatch_lines.append(
|
||
"MISMATCH_ASSERT "
|
||
f"mode=until window={window_start}:{window_end} "
|
||
f"name={assertion.name} reason=precondition_violation cycle={violating_cycle}"
|
||
)
|
||
results.append(
|
||
{
|
||
"name": assertion.name,
|
||
"status": "fail",
|
||
"mode": assertion.mode,
|
||
"cycle": assertion.cycle,
|
||
"within": assertion.within,
|
||
"matched_cycle": trigger_cycle,
|
||
"message": (
|
||
"until assertion violated before trigger "
|
||
f"at cycle {violating_cycle} (trigger at {trigger_cycle})"
|
||
),
|
||
"details": observed_detail_text,
|
||
}
|
||
)
|
||
continue
|
||
|
||
results.append(
|
||
{
|
||
"name": assertion.name,
|
||
"status": "pass",
|
||
"mode": assertion.mode,
|
||
"cycle": assertion.cycle,
|
||
"within": assertion.within,
|
||
"matched_cycle": trigger_cycle,
|
||
"message": (
|
||
"until assertion satisfied "
|
||
f"with trigger at cycle {trigger_cycle} in [{window_start}, {window_end}]"
|
||
),
|
||
"details": "",
|
||
}
|
||
)
|
||
|
||
return results, mismatch_lines
|
||
|
||
|
||
def parse_truth_table_case_results(stdout: str) -> list[dict[str, Any]]:
|
||
"""解析组合逻辑逐用例通过/失败明细。"""
|
||
|
||
pass_re = re.compile(r"^CASE_PASS case=(\d+) name=(.+)$")
|
||
fail_re = re.compile(r"^CASE_FAIL case=(\d+) name=(.+)$")
|
||
mismatch_re = re.compile(
|
||
r"^MISMATCH case=(\d+) name=(.*?) signal=([A-Za-z_][A-Za-z0-9_]*) expected=(\d+) got=(\d+)$"
|
||
)
|
||
|
||
status_map: dict[int, tuple[str, str]] = {}
|
||
mismatch_map: dict[int, list[str]] = {}
|
||
|
||
for line in stdout.splitlines():
|
||
txt = line.strip()
|
||
|
||
m = pass_re.match(txt)
|
||
if m:
|
||
idx = int(m.group(1))
|
||
name = m.group(2)
|
||
status_map[idx] = (name, "pass")
|
||
continue
|
||
|
||
m = fail_re.match(txt)
|
||
if m:
|
||
idx = int(m.group(1))
|
||
name = m.group(2)
|
||
status_map[idx] = (name, "fail")
|
||
continue
|
||
|
||
m = mismatch_re.match(txt)
|
||
if m:
|
||
idx = int(m.group(1))
|
||
signal_name = m.group(3)
|
||
expected = m.group(4)
|
||
got = m.group(5)
|
||
mismatch_map.setdefault(idx, []).append(
|
||
f"{signal_name}: expected {expected}, got {got}"
|
||
)
|
||
continue
|
||
|
||
case_indices = sorted(set(status_map.keys()) | set(mismatch_map.keys()))
|
||
results: list[dict[str, Any]] = []
|
||
|
||
for idx in case_indices:
|
||
name, status = status_map.get(idx, (f"case_{idx}", "fail"))
|
||
detail_lines = mismatch_map.get(idx, [])
|
||
|
||
if status == "pass":
|
||
message = "case passed"
|
||
details = ""
|
||
else:
|
||
message = "case failed"
|
||
details = "; ".join(detail_lines)
|
||
|
||
results.append(
|
||
{
|
||
"name": name,
|
||
"status": status,
|
||
"case": idx,
|
||
"message": message,
|
||
"details": details,
|
||
"mode": "case",
|
||
"cycle": None,
|
||
"within": None,
|
||
"matched_cycle": None,
|
||
}
|
||
)
|
||
|
||
return results
|
||
|
||
|
||
def ensure_executable(name: str, path: str) -> None:
|
||
"""校验外部可执行程序是否可用。"""
|
||
|
||
if shutil.which(path) is None:
|
||
raise ChipVerifyError(f"Executable not found for {name}: {path}")
|
||
|
||
|
||
def _load_spec_kind(spec: dict[str, Any]) -> str:
|
||
"""读取并校验规格类型字段。"""
|
||
|
||
kind = str(spec.get("kind", "truth_table"))
|
||
if kind not in {"truth_table", "sequential"}:
|
||
raise ChipVerifyError("Spec field 'kind' must be 'truth_table' or 'sequential'")
|
||
return kind
|
||
|
||
|
||
def _emit_reports(
|
||
args: argparse.Namespace,
|
||
report: dict[str, Any],
|
||
testcases: list[dict[str, Any]],
|
||
) -> None:
|
||
"""按用户参数输出 JSON/JUnit/CSV 三类报告。"""
|
||
|
||
write_report(args.report, report)
|
||
write_junit_report(args.junit, report, testcases)
|
||
write_csv_report(args.csv, report, testcases)
|
||
|
||
|
||
def verify_run(args: argparse.Namespace) -> int:
|
||
"""执行一次完整验证流程(编译、仿真、判定、落盘报告)。"""
|
||
|
||
ensure_executable("iverilog", args.iverilog)
|
||
ensure_executable("vvp", args.vvp)
|
||
|
||
design_files = [Path(p).resolve() for p in args.design]
|
||
for vf in design_files:
|
||
if not vf.exists():
|
||
raise ChipVerifyError(f"Design file not found: {vf}")
|
||
|
||
spec_path = Path(args.spec).resolve()
|
||
raw_spec = load_raw_spec(spec_path)
|
||
spec_kind = _load_spec_kind(raw_spec)
|
||
|
||
report: dict[str, Any] = {
|
||
"timestamp": datetime.now().isoformat(timespec="seconds"),
|
||
"tool": "SLWChipVerify",
|
||
"top": args.top,
|
||
"spec": str(spec_path),
|
||
"kind": spec_kind,
|
||
"design_files": [str(p) for p in design_files],
|
||
"result": "error",
|
||
}
|
||
|
||
coverage_widths: dict[str, int] | None = None
|
||
seq_spec: SequentialSpec | None = None
|
||
|
||
if spec_kind == "truth_table":
|
||
inputs, outputs, cases = parse_truth_table_spec(raw_spec)
|
||
tb_text = generate_truth_table_testbench(
|
||
args.top,
|
||
inputs,
|
||
outputs,
|
||
cases,
|
||
args.dump_vcd,
|
||
)
|
||
else:
|
||
seq_spec = parse_sequential_spec(raw_spec)
|
||
tb_text = generate_sequential_testbench(
|
||
args.top,
|
||
seq_spec,
|
||
args.dump_vcd,
|
||
)
|
||
coverage_widths = seq_spec.coverage_signal_widths()
|
||
|
||
temp_dir_ctx: tempfile.TemporaryDirectory[str] | None = None
|
||
if args.keep:
|
||
workdir = Path(args.workdir).resolve()
|
||
workdir.mkdir(parents=True, exist_ok=True)
|
||
else:
|
||
temp_dir_ctx = tempfile.TemporaryDirectory(prefix="slwchipverify_")
|
||
workdir = Path(temp_dir_ctx.name).resolve()
|
||
|
||
try:
|
||
tb_path = workdir / f"{TB_TOP}.v"
|
||
sim_bin = workdir / "slwchipverify_sim.out"
|
||
|
||
tb_path.write_text(tb_text, encoding="utf-8")
|
||
|
||
compile_cmd = [
|
||
args.iverilog,
|
||
"-g2012",
|
||
"-s",
|
||
TB_TOP,
|
||
"-o",
|
||
str(sim_bin),
|
||
*[str(p) for p in design_files],
|
||
str(tb_path),
|
||
]
|
||
compile_result = run_command(compile_cmd, args.timeout)
|
||
report["compile"] = {
|
||
"command": compile_cmd,
|
||
"returncode": compile_result.returncode,
|
||
"stdout": compile_result.stdout,
|
||
"stderr": compile_result.stderr,
|
||
}
|
||
|
||
if compile_result.returncode != 0:
|
||
report["result"] = "compile_error"
|
||
error_text = compile_result.stderr.strip() or compile_result.stdout.strip()
|
||
print(f"{LOG_PREFIX} Compile failed")
|
||
print(error_text)
|
||
_emit_reports(
|
||
args,
|
||
report,
|
||
[
|
||
{
|
||
"name": "compile",
|
||
"status": "error",
|
||
"message": "compile_error",
|
||
"details": error_text,
|
||
"mode": "meta",
|
||
"cycle": None,
|
||
"within": None,
|
||
"matched_cycle": None,
|
||
}
|
||
],
|
||
)
|
||
return 2
|
||
|
||
sim_cmd = [args.vvp, str(sim_bin)]
|
||
sim_result = run_command(sim_cmd, args.timeout)
|
||
report["simulate"] = {
|
||
"command": sim_cmd,
|
||
"returncode": sim_result.returncode,
|
||
"stdout": sim_result.stdout,
|
||
"stderr": sim_result.stderr,
|
||
}
|
||
|
||
if sim_result.returncode != 0:
|
||
report["result"] = "runtime_error"
|
||
error_text = sim_result.stderr.strip() or sim_result.stdout.strip()
|
||
print(f"{LOG_PREFIX} Simulation failed")
|
||
print(error_text)
|
||
_emit_reports(
|
||
args,
|
||
report,
|
||
[
|
||
{
|
||
"name": "simulation",
|
||
"status": "error",
|
||
"message": "runtime_error",
|
||
"details": error_text,
|
||
"mode": "meta",
|
||
"cycle": None,
|
||
"within": None,
|
||
"matched_cycle": None,
|
||
}
|
||
],
|
||
)
|
||
return 2
|
||
|
||
if spec_kind == "truth_table":
|
||
total, passed, failed = parse_truth_table_summary(sim_result.stdout)
|
||
mismatch_lines = [
|
||
line for line in sim_result.stdout.splitlines() if line.startswith("MISMATCH ")
|
||
]
|
||
|
||
case_results = parse_truth_table_case_results(sim_result.stdout)
|
||
report["summary"] = {"total": total, "passed": passed, "failed": failed}
|
||
report["mismatches"] = mismatch_lines
|
||
report["case_results"] = case_results
|
||
|
||
if failed == 0:
|
||
report["result"] = "pass"
|
||
print(f"{LOG_PREFIX} PASS: {passed}/{total} cases")
|
||
_emit_reports(args, report, case_results)
|
||
return 0
|
||
|
||
report["result"] = "fail"
|
||
print(f"{LOG_PREFIX} FAIL: passed={passed}, failed={failed}, total={total}")
|
||
for line in mismatch_lines:
|
||
print(line)
|
||
_emit_reports(args, report, case_results)
|
||
return 1
|
||
|
||
if seq_spec is None:
|
||
raise ChipVerifyError("Internal error: sequential spec is missing")
|
||
|
||
cycles = parse_sequential_done(sim_result.stdout)
|
||
samples = parse_cov_samples(sim_result.stdout)
|
||
coverage = compute_coverage(samples, coverage_widths or {})
|
||
|
||
assertion_results, mismatch_lines = evaluate_sequential_assertions(
|
||
seq_spec.assertions,
|
||
samples,
|
||
cycles,
|
||
)
|
||
passed = sum(1 for item in assertion_results if item["status"] == "pass")
|
||
failed = sum(1 for item in assertion_results if item["status"] == "fail")
|
||
|
||
report["summary"] = {
|
||
"cycles": cycles,
|
||
"assertions": {
|
||
"total": len(assertion_results),
|
||
"passed": passed,
|
||
"failed": failed,
|
||
},
|
||
}
|
||
report["mismatches"] = mismatch_lines
|
||
report["assertion_results"] = assertion_results
|
||
report["coverage"] = coverage
|
||
|
||
if failed == 0:
|
||
report["result"] = "pass"
|
||
print(
|
||
f"{LOG_PREFIX} PASS: "
|
||
f"cycles={cycles}, assertions={passed}/{len(assertion_results)}, assert_fail=0"
|
||
)
|
||
_emit_reports(args, report, assertion_results)
|
||
return 0
|
||
|
||
report["result"] = "fail"
|
||
print(
|
||
f"{LOG_PREFIX} FAIL: "
|
||
f"cycles={cycles}, assert_pass={passed}, assert_fail={failed}"
|
||
)
|
||
for line in mismatch_lines:
|
||
print(line)
|
||
_emit_reports(args, report, assertion_results)
|
||
return 1
|
||
|
||
finally:
|
||
if temp_dir_ctx is not None:
|
||
temp_dir_ctx.cleanup()
|
||
|
||
|
||
def write_report(path: str | None, report: dict[str, Any]) -> None:
|
||
"""输出 JSON 总报告。"""
|
||
|
||
if not path:
|
||
return
|
||
|
||
out_path = Path(path).resolve()
|
||
out_path.parent.mkdir(parents=True, exist_ok=True)
|
||
out_path.write_text(json.dumps(report, indent=2, ensure_ascii=False), encoding="utf-8")
|
||
print(f"{LOG_PREFIX} Report written: {out_path}")
|
||
|
||
|
||
def write_junit_report(
|
||
path: str | None,
|
||
report: dict[str, Any],
|
||
testcases: list[dict[str, Any]],
|
||
) -> None:
|
||
"""输出 JUnit XML 报告,便于 CI 平台直接展示测试结果。"""
|
||
|
||
if not path:
|
||
return
|
||
|
||
cases = testcases
|
||
if not cases:
|
||
cases = [
|
||
{
|
||
"name": "slwchipverify_run",
|
||
"status": "pass" if report.get("result") == "pass" else "fail",
|
||
"message": str(report.get("result", "unknown")),
|
||
"details": "",
|
||
"mode": "meta",
|
||
"cycle": None,
|
||
"within": None,
|
||
"matched_cycle": None,
|
||
}
|
||
]
|
||
|
||
failures = sum(1 for case in cases if case.get("status") == "fail")
|
||
errors = sum(1 for case in cases if case.get("status") == "error")
|
||
|
||
suite = ET.Element(
|
||
"testsuite",
|
||
{
|
||
"name": f"slwchipverify.{report.get('kind', 'unknown')}",
|
||
"tests": str(len(cases)),
|
||
"failures": str(failures),
|
||
"errors": str(errors),
|
||
},
|
||
)
|
||
|
||
classname = f"{report.get('top', 'dut')}.{report.get('kind', 'unknown')}"
|
||
for case in cases:
|
||
testcase = ET.SubElement(
|
||
suite,
|
||
"testcase",
|
||
{
|
||
"classname": classname,
|
||
"name": str(case.get("name", "unnamed")),
|
||
},
|
||
)
|
||
|
||
status = str(case.get("status", "pass"))
|
||
message = str(case.get("message", ""))
|
||
details = str(case.get("details", ""))
|
||
|
||
if status == "fail":
|
||
failure = ET.SubElement(testcase, "failure", {"message": message})
|
||
failure.text = details
|
||
elif status == "error":
|
||
error = ET.SubElement(testcase, "error", {"message": message})
|
||
error.text = details
|
||
|
||
out_path = Path(path).resolve()
|
||
out_path.parent.mkdir(parents=True, exist_ok=True)
|
||
tree = ET.ElementTree(suite)
|
||
tree.write(out_path, encoding="utf-8", xml_declaration=True)
|
||
print(f"{LOG_PREFIX} JUnit report written: {out_path}")
|
||
|
||
|
||
def write_csv_report(
|
||
path: str | None,
|
||
report: dict[str, Any],
|
||
testcases: list[dict[str, Any]],
|
||
) -> None:
|
||
"""输出逐用例 CSV 报告,便于后处理与统计。"""
|
||
|
||
if not path:
|
||
return
|
||
|
||
out_path = Path(path).resolve()
|
||
out_path.parent.mkdir(parents=True, exist_ok=True)
|
||
|
||
fieldnames = [
|
||
"kind",
|
||
"top",
|
||
"name",
|
||
"status",
|
||
"message",
|
||
"details",
|
||
"mode",
|
||
"cycle",
|
||
"within",
|
||
"matched_cycle",
|
||
]
|
||
|
||
with out_path.open("w", encoding="utf-8", newline="") as f:
|
||
writer = csv.DictWriter(f, fieldnames=fieldnames)
|
||
writer.writeheader()
|
||
|
||
rows = testcases
|
||
if not rows:
|
||
rows = [
|
||
{
|
||
"name": "slwchipverify_run",
|
||
"status": "pass" if report.get("result") == "pass" else "fail",
|
||
"message": str(report.get("result", "unknown")),
|
||
"details": "",
|
||
"mode": "meta",
|
||
"cycle": None,
|
||
"within": None,
|
||
"matched_cycle": None,
|
||
}
|
||
]
|
||
|
||
for row in rows:
|
||
writer.writerow(
|
||
{
|
||
"kind": report.get("kind", "unknown"),
|
||
"top": report.get("top", "dut"),
|
||
"name": row.get("name", ""),
|
||
"status": row.get("status", ""),
|
||
"message": row.get("message", ""),
|
||
"details": row.get("details", ""),
|
||
"mode": row.get("mode", ""),
|
||
"cycle": row.get("cycle", ""),
|
||
"within": row.get("within", ""),
|
||
"matched_cycle": row.get("matched_cycle", ""),
|
||
}
|
||
)
|
||
|
||
print(f"{LOG_PREFIX} CSV report written: {out_path}")
|
||
|
||
|
||
def main() -> int:
|
||
"""CLI 主入口。"""
|
||
|
||
args = parse_args()
|
||
|
||
try:
|
||
if args.command == "run":
|
||
return verify_run(args)
|
||
raise ChipVerifyError(f"Unknown command: {args.command}")
|
||
except ChipVerifyError as exc:
|
||
print(f"{LOG_PREFIX} ERROR: {exc}", file=sys.stderr)
|
||
return 2
|
||
except subprocess.TimeoutExpired as exc:
|
||
print(f"{LOG_PREFIX} ERROR: command timeout: {exc}", file=sys.stderr)
|
||
return 2
|
||
|
||
|
||
if __name__ == "__main__":
|
||
raise SystemExit(main())
|