Skip to content

Analyzer

provide.foundation.file.quality.analyzer

Quality analyzer for file operation detection.

Classes

QualityAnalyzer

QualityAnalyzer(detector: OperationDetector | None = None)

Analyzer for measuring file operation detection quality.

Initialize the quality analyzer.

Parameters:

Name Type Description Default
detector OperationDetector | None

Operation detector to analyze. If None, creates default.

None
Source code in provide/foundation/file/quality/analyzer.py
def __init__(self, detector: OperationDetector | None = None) -> None:
    """Initialize the quality analyzer.

    Args:
        detector: Operation detector to analyze. If None, creates default.
    """
    if not HAS_OPERATIONS_MODULE:
        raise ImportError("File operations module not available")

    self.detector = detector or OperationDetector()
    self.scenarios: list[OperationScenario] = []
    self.results: list[QualityResult] = []
Functions
add_scenario
add_scenario(scenario: OperationScenario) -> None

Add a scenario for analysis.

Parameters:

Name Type Description Default
scenario OperationScenario

Scenario to add

required
Source code in provide/foundation/file/quality/analyzer.py
def add_scenario(self, scenario: OperationScenario) -> None:
    """Add a scenario for analysis.

    Args:
        scenario: Scenario to add
    """
    self.scenarios.append(scenario)
generate_report
generate_report(
    results: (
        dict[AnalysisMetric, QualityResult] | None
    ) = None,
) -> str

Generate a quality analysis report.

Parameters:

Name Type Description Default
results dict[AnalysisMetric, QualityResult] | None

Results to include in report. If None, uses latest results.

None

Returns:

Type Description
str

Formatted report string

Source code in provide/foundation/file/quality/analyzer.py
def generate_report(self, results: dict[AnalysisMetric, QualityResult] | None = None) -> str:
    """Generate a quality analysis report.

    Args:
        results: Results to include in report. If None, uses latest results.

    Returns:
        Formatted report string
    """
    if results is None:
        # Group latest results by metric
        latest_results = {}
        for result in reversed(self.results):
            if result.metric not in latest_results:
                latest_results[result.metric] = result
        results = latest_results

    if not results:
        return "No analysis results available."

    report_lines = [
        "File Operation Detection Quality Report",
        "=" * 45,
        f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
        f"Scenarios: {len(self.scenarios)}",
        "",
        "Metrics:",
    ]

    for metric, result in results.items():
        report_lines.append(f"  {metric.value.replace('_', ' ').title()}: {result.value:.3f}")

        # Add details for key metrics
        if metric == AnalysisMetric.ACCURACY and "percentage" in result.details:
            report_lines.append(f"    ({result.details['percentage']:.1f}%)")
        elif metric == AnalysisMetric.DETECTION_TIME and "average_ms" in result.details:
            report_lines.append(
                f"    (avg: {result.details['average_ms']:.2f}ms, p95: {result.details.get('p95_ms', 0):.2f}ms)"
            )
        elif metric == AnalysisMetric.CONFIDENCE_DISTRIBUTION and "by_type" in result.details:
            report_lines.append("    By operation type:")
            for op_type, stats in result.details["by_type"].items():
                report_lines.append(f"      {op_type}: {stats['average']:.3f} (count: {stats['count']})")

    return "\n".join(report_lines)
run_analysis
run_analysis(
    metrics: list[AnalysisMetric] | None = None,
) -> dict[AnalysisMetric, QualityResult]

Run quality analysis on all scenarios.

Parameters:

Name Type Description Default
metrics list[AnalysisMetric] | None

Metrics to analyze. If None, runs all metrics.

None

Returns:

Type Description
dict[AnalysisMetric, QualityResult]

Dictionary mapping metrics to their results

Source code in provide/foundation/file/quality/analyzer.py
def run_analysis(self, metrics: list[AnalysisMetric] | None = None) -> dict[AnalysisMetric, QualityResult]:
    """Run quality analysis on all scenarios.

    Args:
        metrics: Metrics to analyze. If None, runs all metrics.

    Returns:
        Dictionary mapping metrics to their results
    """
    if not self.scenarios:
        raise ValueError("No scenarios available for analysis")

    if metrics is None:
        metrics = list(AnalysisMetric)

    # Collect detection results and timing data
    detection_results, timing_results = self._collect_detection_data()

    # Calculate all requested metrics
    results = self._calculate_metrics(metrics, detection_results, timing_results)

    self.results.extend(results.values())
    return results