Skip to content

Reporting

The reporting module provides flexible report generation in multiple formats: JSON, Markdown, and HTML.

ReportGenerator

The ReportGenerator class is the main interface for converting evaluation results into human-readable reports.

from truthfulness_evaluator.reporting import ReportGenerator
from truthfulness_evaluator.models import TruthfulnessReport

# Create a generator with your evaluation report
generator = ReportGenerator(report)

# Generate report in different formats
json_output = generator.to_json()
markdown_output = generator.to_markdown()
html_output = generator.to_html()

# Save to file (format auto-detected from extension)
generator.save("report.md")
generator.save("report.json")
generator.save("report.html")

Output Formats

JSON

Generates a structured JSON representation of the entire report, including all claims, verifications, evidence, and statistics.

json_report = generator.to_json(indent=2)

Markdown

Creates a human-readable Markdown report with: - Executive summary with grade and confidence - Statistics table - Detailed claims with verdicts and evidence - Unvalidated claims list

Perfect for documentation, email sharing, or integration with knowledge bases.

markdown_report = generator.to_markdown()

HTML

Generates a styled, interactive HTML report with: - Professional styling with responsive design - Colored grade badges (A, B, C, D, F) - Evidence summary with relevance scores - Model vote breakdowns - Unvalidated claims section

Great for web integration or email distribution.

html_report = generator.to_html()

Quick Functions

For simple use cases, use the standalone generate_report() function:

from truthfulness_evaluator.reporting import generate_report

# Generate report in desired format
json_str = generate_report(report, format="json")
md_str = generate_report(report, format="markdown")
html_str = generate_report(report, format="html")

API Reference

truthfulness_evaluator.reporting.ReportGenerator

Generate reports in various formats.

Source code in src/truthfulness_evaluator/reporting/generator.py
class ReportGenerator:
    """Generate reports in various formats."""

    def __init__(self, report: TruthfulnessReport):
        self.report = report

    def to_json(self, indent: int = 2) -> str:
        """Generate JSON report."""
        return self.report.model_dump_json(indent=indent)

    def to_markdown(self) -> str:
        """Generate Markdown report."""
        lines = []

        # Header
        lines.append("# Truthfulness Evaluation Report")
        lines.append("")
        lines.append(f"**Document:** {self.report.source_document}")
        lines.append(f"**Date:** {datetime.now().strftime('%Y-%m-%d %H:%M')}")
        lines.append("")

        # Summary
        lines.append("## Summary")
        lines.append("")
        lines.append("| Metric | Value |")
        lines.append("|--------|-------|")
        lines.append(f"| **Grade** | {self.report.overall_grade or 'N/A'} |")
        lines.append(f"| **Confidence** | {self.report.overall_confidence:.1%} |")
        lines.append(f"| **Total Claims** | {self.report.statistics.total_claims} |")
        lines.append(f"| **Supported** | {self.report.statistics.supported} |")
        lines.append(f"| **Refuted** | {self.report.statistics.refuted} |")
        lines.append(f"| **Not Enough Info** | {self.report.statistics.not_enough_info} |")
        lines.append("")

        if self.report.summary:
            lines.append(f"**Summary:** {self.report.summary}")
            lines.append("")

        # Detailed Results
        lines.append("## Detailed Results")
        lines.append("")

        for verification in self.report.verifications:
            claim = next((c for c in self.report.claims if c.id == verification.claim_id), None)
            if not claim:
                continue

            # Claim header with verdict emoji
            emoji = {"SUPPORTS": "✅", "REFUTES": "❌", "NOT_ENOUGH_INFO": "⚠️"}.get(
                verification.verdict, "❓"
            )

            lines.append(f"### {emoji} {claim.text}")
            lines.append("")

            # Verdict details
            lines.append(f"**Verdict:** {verification.verdict}")
            lines.append(f"**Confidence:** {verification.confidence:.1%}")
            lines.append("")

            # Model votes
            if verification.model_votes:
                lines.append("**Model Votes:**")
                for model, verdict in verification.model_votes.items():
                    model_short = model.split("-")[0]
                    lines.append(f"- {model_short}: {verdict}")
                lines.append("")

            # Evidence
            if verification.evidence:
                lines.append("**Evidence:**")
                for ev in verification.evidence[:3]:  # Top 3
                    source = Path(ev.source).name if "/" in ev.source else ev.source
                    lines.append(f"- {source} (relevance: {ev.relevance_score:.0%})")
                lines.append("")

            # Explanation
            if verification.explanation:
                lines.append("**Explanation:**")
                lines.append(f"> {verification.explanation[:500]}")
                if len(verification.explanation) > 500:
                    lines.append("> ...")
                lines.append("")

            lines.append("---")
            lines.append("")

        # Unvalidated claims
        if self.report.unvalidated_claims:
            lines.append("## Unvalidated Claims")
            lines.append("")
            for claim in self.report.unvalidated_claims:
                lines.append(f"- {claim.text}")
            lines.append("")

        return "\n".join(lines)

    def to_html(self) -> str:
        """Generate HTML report with proper styling."""
        env = _setup_jinja_env()
        template = env.get_template("report.html.j2")

        context = {
            "report": self.report,
            "generated_date": datetime.now().strftime("%Y-%m-%d %H:%M"),
        }

        return template.render(context)

    def save(self, path: str, format: str = "auto") -> None:
        """Save report to file."""
        output_path = Path(path)

        # Auto-detect format from extension
        if format == "auto":
            if output_path.suffix == ".json":
                format = "json"
            elif output_path.suffix in [".md", ".markdown"]:
                format = "markdown"
            elif output_path.suffix == ".html":
                format = "html"
            else:
                format = "markdown"  # Default

        # Generate content
        if format == "json":
            content = self.to_json()
        elif format == "html":
            content = self.to_html()
        else:  # markdown
            content = self.to_markdown()

        # Write file
        output_path.write_text(content, encoding="utf-8")

save(path, format='auto')

Save report to file.

Source code in src/truthfulness_evaluator/reporting/generator.py
def save(self, path: str, format: str = "auto") -> None:
    """Save report to file."""
    output_path = Path(path)

    # Auto-detect format from extension
    if format == "auto":
        if output_path.suffix == ".json":
            format = "json"
        elif output_path.suffix in [".md", ".markdown"]:
            format = "markdown"
        elif output_path.suffix == ".html":
            format = "html"
        else:
            format = "markdown"  # Default

    # Generate content
    if format == "json":
        content = self.to_json()
    elif format == "html":
        content = self.to_html()
    else:  # markdown
        content = self.to_markdown()

    # Write file
    output_path.write_text(content, encoding="utf-8")

to_html()

Generate HTML report with proper styling.

Source code in src/truthfulness_evaluator/reporting/generator.py
def to_html(self) -> str:
    """Generate HTML report with proper styling."""
    env = _setup_jinja_env()
    template = env.get_template("report.html.j2")

    context = {
        "report": self.report,
        "generated_date": datetime.now().strftime("%Y-%m-%d %H:%M"),
    }

    return template.render(context)

to_json(indent=2)

Generate JSON report.

Source code in src/truthfulness_evaluator/reporting/generator.py
def to_json(self, indent: int = 2) -> str:
    """Generate JSON report."""
    return self.report.model_dump_json(indent=indent)

to_markdown()

Generate Markdown report.

Source code in src/truthfulness_evaluator/reporting/generator.py
def to_markdown(self) -> str:
    """Generate Markdown report."""
    lines = []

    # Header
    lines.append("# Truthfulness Evaluation Report")
    lines.append("")
    lines.append(f"**Document:** {self.report.source_document}")
    lines.append(f"**Date:** {datetime.now().strftime('%Y-%m-%d %H:%M')}")
    lines.append("")

    # Summary
    lines.append("## Summary")
    lines.append("")
    lines.append("| Metric | Value |")
    lines.append("|--------|-------|")
    lines.append(f"| **Grade** | {self.report.overall_grade or 'N/A'} |")
    lines.append(f"| **Confidence** | {self.report.overall_confidence:.1%} |")
    lines.append(f"| **Total Claims** | {self.report.statistics.total_claims} |")
    lines.append(f"| **Supported** | {self.report.statistics.supported} |")
    lines.append(f"| **Refuted** | {self.report.statistics.refuted} |")
    lines.append(f"| **Not Enough Info** | {self.report.statistics.not_enough_info} |")
    lines.append("")

    if self.report.summary:
        lines.append(f"**Summary:** {self.report.summary}")
        lines.append("")

    # Detailed Results
    lines.append("## Detailed Results")
    lines.append("")

    for verification in self.report.verifications:
        claim = next((c for c in self.report.claims if c.id == verification.claim_id), None)
        if not claim:
            continue

        # Claim header with verdict emoji
        emoji = {"SUPPORTS": "✅", "REFUTES": "❌", "NOT_ENOUGH_INFO": "⚠️"}.get(
            verification.verdict, "❓"
        )

        lines.append(f"### {emoji} {claim.text}")
        lines.append("")

        # Verdict details
        lines.append(f"**Verdict:** {verification.verdict}")
        lines.append(f"**Confidence:** {verification.confidence:.1%}")
        lines.append("")

        # Model votes
        if verification.model_votes:
            lines.append("**Model Votes:**")
            for model, verdict in verification.model_votes.items():
                model_short = model.split("-")[0]
                lines.append(f"- {model_short}: {verdict}")
            lines.append("")

        # Evidence
        if verification.evidence:
            lines.append("**Evidence:**")
            for ev in verification.evidence[:3]:  # Top 3
                source = Path(ev.source).name if "/" in ev.source else ev.source
                lines.append(f"- {source} (relevance: {ev.relevance_score:.0%})")
            lines.append("")

        # Explanation
        if verification.explanation:
            lines.append("**Explanation:**")
            lines.append(f"> {verification.explanation[:500]}")
            if len(verification.explanation) > 500:
                lines.append("> ...")
            lines.append("")

        lines.append("---")
        lines.append("")

    # Unvalidated claims
    if self.report.unvalidated_claims:
        lines.append("## Unvalidated Claims")
        lines.append("")
        for claim in self.report.unvalidated_claims:
            lines.append(f"- {claim.text}")
        lines.append("")

    return "\n".join(lines)

truthfulness_evaluator.reporting.generate_report(report, format='markdown')

Quick report generation.

Source code in src/truthfulness_evaluator/reporting/generator.py
def generate_report(report: TruthfulnessReport, format: str = "markdown") -> str:
    """Quick report generation."""
    generator = ReportGenerator(report)

    if format == "json":
        return generator.to_json()
    elif format == "html":
        return generator.to_html()
    else:
        return generator.to_markdown()