Skip to content

qc

ContextExportableObj

ContextExportableObj(obj: TExportable)

Bases: Generic[TExportable]

Container for exportable objects in test contexts.

Provides a standardized way to include exportable objects (like figures or reports) in test result contexts, allowing them to be properly handled by reporting tools.

Attributes:

Name Type Description
_obj

The exportable object being wrapped.

Examples:

import matplotlib.pyplot as plt
from contraqctor.qc._context_extensions import ContextExportableObj
from contraqctor.qc.base import Suite

class VisualizationTestSuite(Suite):
    def test_create_plot(self):
        # Create a matplotlib figure
        fig, ax = plt.subplots()
        ax.plot([1, 2, 3], [4, 5, 6])
        ax.set_title("Test Plot")

        # Add the figure to the test context
        context = ContextExportableObj.as_context(fig)

        # Return test result with the figure in context
        return self.pass_test(True, "Plot created successfully", context=context)

Initialize the context exportable object container.

Parameters:

Name Type Description Default
obj TExportable

The object to wrap for export.

required
Source code in src/contraqctor/qc/_context_extensions.py
39
40
41
42
43
44
45
def __init__(self, obj: TExportable) -> None:
    """Initialize the context exportable object container.

    Args:
        obj: The object to wrap for export.
    """
    self._obj = obj

asset property

asset: TExportable

Get the wrapped exportable object.

Returns:

Name Type Description
TExportable TExportable

The wrapped object.

asset_type property

asset_type: Type

Get the type of the wrapped asset.

Returns:

Name Type Description
Type Type

Type of the wrapped object.

as_context classmethod

as_context(
    asset: TExportable,
) -> Dict[str, ContextExportableObj[TExportable]]

Create a standardized context dictionary for the exportable object.

This method wraps the provided asset in a ContextExportableObj and includes it in a dictionary under a reserved keyword. This allows for consistent handling of exportable objects in test result contexts.

Parameters:

Name Type Description Default
asset TExportable

The object to wrap and include in the context.

required

Returns:

Type Description
Dict[str, ContextExportableObj[TExportable]]

Dict[str, ContextExportableObj]: A dictionary containing the wrapped

Dict[str, ContextExportableObj[TExportable]]

asset under the reserved key.

Examples:

import matplotlib.pyplot as plt
from contraqctor.qc._context_extensions import ContextExportableObj

# Create a visualization
fig, ax = plt.subplots()
ax.plot([1, 2, 3], [4, 5, 6])

# Create a context dictionary with the figure
context = ContextExportableObj.as_context(fig)

# The context can now be passed to test result methods
return self.pass_test(True, "Analysis succeeded", context=context)
Source code in src/contraqctor/qc/_context_extensions.py
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
@classmethod
def as_context(self, asset: TExportable) -> t.Dict[str, "ContextExportableObj[TExportable]"]:
    """Create a standardized context dictionary for the exportable object.

    This method wraps the provided asset in a `ContextExportableObj` and
    includes it in a dictionary under a reserved keyword. This allows for
    consistent handling of exportable objects in test result contexts.

    Args:
        asset: The object to wrap and include in the context.

    Returns:
        Dict[str, ContextExportableObj]: A dictionary containing the wrapped
        asset under the reserved key.

    Examples:
        ```python
        import matplotlib.pyplot as plt
        from contraqctor.qc._context_extensions import ContextExportableObj

        # Create a visualization
        fig, ax = plt.subplots()
        ax.plot([1, 2, 3], [4, 5, 6])

        # Create a context dictionary with the figure
        context = ContextExportableObj.as_context(fig)

        # The context can now be passed to test result methods
        return self.pass_test(True, "Analysis succeeded", context=context)
        ```
    """
    return {ASSET_RESERVED_KEYWORD: ContextExportableObj(asset)}

Result dataclass

Result(
    status: Status,
    result: TResult,
    test_name: str,
    suite_name: str,
    message: Optional[str] = None,
    context: Optional[Any] = None,
    description: Optional[str] = None,
    exception: Optional[Exception] = None,
    traceback: Optional[str] = None,
    test_reference: Optional[ITest] = None,
    suite_reference: Optional[Suite] = None,
)

Bases: Generic[TResult]

Container for test execution results.

Stores the outcome of a test execution including status, returned value, contextual information, and any exception details.

Attributes:

Name Type Description
status Status

The status of the test execution.

result TResult

The value returned by the test.

test_name str

Name of the test that generated this result.

suite_name str

Name of the test suite containing the test.

message Optional[str]

Optional message describing the test outcome.

context Optional[Any]

Optional contextual data for the test result.

description Optional[str]

Optional description of the test.

exception Optional[Exception]

Optional exception that occurred during test execution.

traceback Optional[str]

Optional traceback string if an exception occurred.

test_reference Optional[ITest]

Optional reference to the test function.

suite_reference Optional[Suite]

Optional reference to the suite that ran this test.

ResultsStatistics dataclass

ResultsStatistics(
    passed: int,
    failed: int,
    error: int,
    skipped: int,
    warnings: int,
)

Statistics about test results.

Aggregates counts of test results by status and provides methods for calculating statistics like pass rate.

Attributes:

Name Type Description
passed int

Number of passed tests.

failed int

Number of failed tests.

error int

Number of tests that produced errors.

skipped int

Number of skipped tests.

warnings int

Number of tests with warnings.

total property

total: int

Get the total number of tests.

Returns:

Name Type Description
int int

Sum of all test result counts.

pass_rate property

pass_rate: float

Calculate the pass rate.

Returns:

Name Type Description
float float

Ratio of passed tests to total tests, or 0 if no tests.

get_status_summary

get_status_summary() -> str

Generate a compact string summary of result counts.

Returns:

Name Type Description
str str

Summary string with counts for each status type.

Source code in src/contraqctor/qc/base.py
840
841
842
843
844
845
846
def get_status_summary(self) -> str:
    """Generate a compact string summary of result counts.

    Returns:
        str: Summary string with counts for each status type.
    """
    return f"P:{self[Status.PASSED]} F:{self[Status.FAILED]} E:{self[Status.ERROR]} S:{self[Status.SKIPPED]} W:{self[Status.WARNING]}"

from_results classmethod

from_results(results: List[Result]) -> ResultsStatistics

Create statistics from a list of test results.

Parameters:

Name Type Description Default
results List[Result]

List of test results to analyze.

required

Returns:

Name Type Description
ResultsStatistics ResultsStatistics

Statistics object summarizing the results.

Source code in src/contraqctor/qc/base.py
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
@classmethod
def from_results(cls, results: t.List[Result]) -> "ResultsStatistics":
    """Create statistics from a list of test results.

    Args:
        results: List of test results to analyze.

    Returns:
        ResultsStatistics: Statistics object summarizing the results.
    """
    stats = {status: sum(1 for r in results if r.status == status) for status in Status}
    return cls(
        passed=stats[Status.PASSED],
        failed=stats[Status.FAILED],
        error=stats[Status.ERROR],
        skipped=stats[Status.SKIPPED],
        warnings=stats[Status.WARNING],
    )

Runner

Runner(console: Optional[Console] = None)

Test runner for executing suites and reporting results.

Handles executing test suites, collecting results, and generating reports.

Attributes:

Name Type Description
suites Dict[Optional[str], List[Suite]]

Dictionary mapping group names to lists of test suites.

_results Optional[List[_TaggedResult]]

Optional dictionary of collected test results by group.

Examples:

from contraqctor.qc.base import Runner

# Create test suites
suite1 = MyTestSuite(component1)
suite2 = AnotherTestSuite(component2)
suite3 = YetAnotherTestSuite(component2)

# Create runner and add suites with group names
runner = Runner()
runner.add_suite(suite1, "Component Tests")
runner.add_suite(suite2, "Integration Tests")
runner.add_suite(suite3, "Integration Tests")

# Run all tests with progress display
results = runner.run_all_with_progress()

# Access results by group
component_results = results["Component Tests"]

Initialize the test runner.

Parameters:

Name Type Description Default
console Optional[Console]

Optional rich Console instance for progress display.

None
Source code in src/contraqctor/qc/base.py
1043
1044
1045
1046
1047
1048
1049
1050
1051
def __init__(self, console: t.Optional[Console] = None):
    """Initialize the test runner.

    Args:
        console: Optional rich Console instance for progress display.
    """
    self.suites: t.Dict[t.Optional[str], t.List[Suite]] = {}
    self._results: t.Optional[t.List[_TaggedResult]] = None
    self._console = console or Console()

add_suite

add_suite(suite: Suite) -> Self
add_suite(suite: Suite, group: str) -> Self
add_suite(
    suite: Suite, group: Optional[str] = None
) -> Self

Add a test suite to the runner.

Parameters:

Name Type Description Default
suite Suite

Test suite to add.

required
group Optional[str]

Optional group name for organizing suites. Defaults to None.

None

Returns:

Name Type Description
Runner Self

Self for method chaining.

Examples:

runner = Runner()

# Add a suite without a group
runner.add_suite(BasicSuite())

# Add suites with named groups for organization
runner.add_suite(DataSuite(), "Data Validation")
runner.add_suite(VisualizationSuite(), "Data Validation")
runner.add_suite(ApiSuite(), "API Tests")
Source code in src/contraqctor/qc/base.py
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
def add_suite(self, suite: Suite, group: t.Optional[str] = None) -> t.Self:
    """Add a test suite to the runner.

    Args:
        suite: Test suite to add.
        group: Optional group name for organizing suites. Defaults to None.

    Returns:
        Runner: Self for method chaining.

    Examples:
        ```python
        runner = Runner()

        # Add a suite without a group
        runner.add_suite(BasicSuite())

        # Add suites with named groups for organization
        runner.add_suite(DataSuite(), "Data Validation")
        runner.add_suite(VisualizationSuite(), "Data Validation")
        runner.add_suite(ApiSuite(), "API Tests")
        ```
    """
    self._update_suites(suite, group)
    return self

run_all

run_all() -> Dict[Optional[str], List[Result]]

Run all tests in all suites without progress display.

Executes all tests and collects results without visual progress reporting.

Returns:

Type Description
Dict[Optional[str], List[Result]]

Dict[Optional[str], List[Result]]: Results grouped by test group name.

Source code in src/contraqctor/qc/base.py
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
def run_all(self) -> t.Dict[t.Optional[str], t.List[Result]]:
    """Run all tests in all suites without progress display.

    Executes all tests and collects results without visual progress reporting.

    Returns:
        Dict[Optional[str], List[Result]]: Results grouped by test group name.
    """
    collected_tests = self._collect_tests()
    collected_results: t.List[_TaggedResult] = []

    for group, tests_in_group in _TaggedTest.group_by_group(collected_tests):
        for suite, tests_in_suite in _TaggedTest.group_by_suite(tests_in_group):
            results: t.List[Result] = []
            for test in tests_in_suite:
                results.extend(suite.run_test(test.test))
            for result in results:
                collected_results.append(
                    _TaggedResult(suite=suite, group=group, result=result, test=result.test_reference)
                )

    self._results = collected_results

    out: t.Dict[t.Optional[str], t.List[Result]] = {}
    for group, grouped_results in _TaggedResult.group_by_group(collected_results):
        out[group] = [tagged_result.result for tagged_result in grouped_results]
    return out

run_all_with_progress

run_all_with_progress(
    *,
    reporter: Optional[Reporter] = None,
    **reporter_kwargs: Any,
) -> Dict[Optional[str], List[Result]]

Run all tests in all suites with a rich progress display.

Executes all tests with a visual progress bar and detailed reporting of test outcomes.

Parameters:

Name Type Description Default
reporter Optional[Reporter]

Optional reporter to use for output. If None, uses ConsoleReporter.

None

Returns:

Type Description
Dict[Optional[str], List[Result]]

Dict[Optional[str], List[Result]]: Results grouped by test group name.

Examples:

from contraqctor.qc.base import Runner
from contraqctor.qc.reporters import ConsoleReporter, HtmlReporter

runner = Runner()
runner.add_suite(DataValidationSuite(), "Validation")
runner.add_suite(PerformanceSuite(), "Performance")

# Run with default console reporter
results = runner.run_all_with_progress()

# Run with HTML reporter
html_reporter = HtmlReporter("test_report.html")
results = runner.run_all_with_progress(reporter=html_reporter)

# Run with simplified output (no context or traceback)
results = runner.run_all_with_progress(
    render_context=False,
    render_traceback=False
)

# Check if any tests failed
all_passed = all(
    result.status == Status.PASSED
    for group_results in results.values()
    for result in group_results
)
Source code in src/contraqctor/qc/base.py
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
def run_all_with_progress(
    self,
    *,
    reporter: t.Optional["Reporter"] = None,
    **reporter_kwargs: t.Any,
) -> t.Dict[t.Optional[str], t.List[Result]]:
    """Run all tests in all suites with a rich progress display.

    Executes all tests with a visual progress bar and detailed reporting
    of test outcomes.

    Args:
        reporter: Optional reporter to use for output. If None, uses ConsoleReporter.

    Returns:
        Dict[Optional[str], List[Result]]: Results grouped by test group name.

    Examples:
        ```python
        from contraqctor.qc.base import Runner
        from contraqctor.qc.reporters import ConsoleReporter, HtmlReporter

        runner = Runner()
        runner.add_suite(DataValidationSuite(), "Validation")
        runner.add_suite(PerformanceSuite(), "Performance")

        # Run with default console reporter
        results = runner.run_all_with_progress()

        # Run with HTML reporter
        html_reporter = HtmlReporter("test_report.html")
        results = runner.run_all_with_progress(reporter=html_reporter)

        # Run with simplified output (no context or traceback)
        results = runner.run_all_with_progress(
            render_context=False,
            render_traceback=False
        )

        # Check if any tests failed
        all_passed = all(
            result.status == Status.PASSED
            for group_results in results.values()
            for result in group_results
        )
        ```
    """
    from contraqctor.qc.reporters import ConsoleReporter

    if reporter is None:
        reporter = ConsoleReporter(console=self._console)

    collected_tests = self._collect_tests()
    total_test_count = len(collected_tests)

    suite_name_lengths = [len(suite.name) for suite, _ in _TaggedTest.group_by_suite(collected_tests)]
    group_lengths = [
        len(group) + 2 for group, _ in _TaggedTest.group_by_group(collected_tests) if group is not None
    ]
    full_name_width = max(suite_name_lengths + group_lengths) if suite_name_lengths else 10
    test_name_width = 20
    bar_width = 20

    progress_format = [
        f"[progress.description]{{task.description:<{full_name_width + test_name_width + 5}}}",
        rich.progress.BarColumn(),
        "[progress.percentage]{task.percentage:>3.0f}%",
        "•",
        rich.progress.TimeElapsedColumn(),
    ]

    with rich.progress.Progress(*progress_format, console=self._console) as progress:
        total_task = progress.add_task(
            "[bold green]TOTAL PROGRESS".ljust(full_name_width + test_name_width + 5), total=total_test_count
        )

        collected_results: t.List[_TaggedResult] = []
        for group, tests_in_group in _TaggedTest.group_by_group(collected_tests):
            _title = (
                rich.markup.escape(f"[{group}]") if group else rich.markup.escape(f"[{self._DEFAULT_TEST_GROUP}]")
            )
            group_task = progress.add_task(
                f"[honeydew2]{_title}".ljust(full_name_width + test_name_width + 5),
                total=len(tests_in_group),
            )
            for suite, tests_in_suite in _TaggedTest.group_by_suite(tests_in_group):
                results = self._run_suite_tests(
                    progress,
                    suite,
                    [t.test for t in tests_in_suite],
                    full_name_width,
                    test_name_width,
                    total_task,
                    group_task,
                )
                for result in results:
                    collected_results.append(
                        _TaggedResult(suite=suite, group=group, result=result, test=result.test_reference)
                    )

            if len(_TaggedResult.get_by_group(collected_results, group)) > 0:
                group_results = [
                    tagged_result.result for tagged_result in _TaggedResult.get_by_group(collected_results, group)
                ]
                group_stats = ResultsStatistics.from_results(group_results)
                group_status_bar = self._render_status_bar(group_stats, bar_width)
                padding_width = max(0, full_name_width - len(self._rich_unscape(_title)))
                group_line = f"[honeydew2]{_title}{' ' * padding_width} | {group_status_bar} | {group_stats.get_status_summary()}"
                progress.update(group_task, description=group_line)

        if total_test_count > 0:
            total_stats = ResultsStatistics.from_results(
                [tagged_result.result for tagged_result in collected_results]
            )
            total_status_bar = self._render_status_bar(total_stats, bar_width)

            _title = "TOTAL PROGRESS"
            padding_width = max(0, full_name_width - len(_title))
            total_line = f"[bold green]{_title}{' ' * padding_width} | {total_status_bar} | {total_stats.get_status_summary()}"
            progress.update(total_task, description=total_line)

    self._results = collected_results
    if self._results:
        reporter.report_results(
            self._results,
            **reporter_kwargs,
        )

    out: t.Dict[t.Optional[str], t.List[Result]] = {}
    for group, grouped_results in _TaggedResult.group_by_group(collected_results):
        out[group] = [tagged_result.result for tagged_result in grouped_results]
    return out

Status

Bases: Enum

Enum representing possible test result statuses.

Defines the different states a test can be in after execution.

Suite

Bases: ABC

Base class for test suites.

Provides the core functionality for defining, running, and reporting on tests. All test suites should inherit from this class and implement test methods that start with 'test'.

Examples:

from contraqctor.qc.base import Suite

class MyTestSuite(Suite):
    """Test suite for validating my component."""

    def __init__(self, component):
        self.component = component

    def test_has_required_property(self):
        if hasattr(self.component, "required_property"):
            return self.pass_test(True, "Component has required property")
        else:
            return self.fail_test(False, "Component is missing required property")

    def test_performs_calculation(self):
        try:
            result = self.component.calculate(10)
            if result == 20:
                return self.pass_test(result, "Calculation correct")
            else:
                return self.fail_test(result, f"Expected 20 but got {result}")
        except Exception as e:
            return self.fail_test(None, f"Calculation failed: {str(e)}")

description property

description: Optional[str]

Get the description of the test suite from its docstring.

Returns:

Type Description
Optional[str]

Optional[str]: The docstring of the class, or None if not available.

name property

name: str

Get the name of the test suite.

Returns:

Name Type Description
str str

The name of the test suite class.

get_tests

get_tests() -> Generator[ITest, None, None]

Find all methods starting with 'test'.

Yields:

Name Type Description
ITest ITest

Test methods found in the suite.

Source code in src/contraqctor/qc/base.py
323
324
325
326
327
328
329
330
331
def get_tests(self) -> t.Generator[ITest, None, None]:
    """Find all methods starting with 'test'.

    Yields:
        ITest: Test methods found in the suite.
    """
    for name, method in inspect.getmembers(self, predicate=inspect.ismethod):
        if name.startswith("test"):
            yield method

pass_test

pass_test() -> Result
pass_test(result: Any) -> Result
pass_test(result: Any, message: str) -> Result
pass_test(result: Any, *, context: Any) -> Result
pass_test(
    result: Any, message: str, *, context: Any
) -> Result
pass_test(
    result: Any = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a passing test result.

Parameters:

Name Type Description Default
result Any

The value to include in the test result.

None
message Optional[str]

Optional message describing why the test passed.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with PASSED status.

Source code in src/contraqctor/qc/base.py
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
def pass_test(
    self, result: t.Any = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a passing test result.

    Args:
        result: The value to include in the test result.
        message: Optional message describing why the test passed.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with PASSED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.PASSED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

warn_test

warn_test() -> Result
warn_test(result: Any) -> Result
warn_test(result: Any, message: str) -> Result
warn_test(result: Any, *, context: Any) -> Result
warn_test(
    result: Any, message: str, *, context: Any
) -> Result
warn_test(
    result: Any = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a warning test result.

Creates a result with WARNING status, or FAILED if warnings are elevated.

Parameters:

Name Type Description Default
result Any

The value to include in the test result.

None
message Optional[str]

Optional message describing the warning.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with WARNING or FAILED status.

Source code in src/contraqctor/qc/base.py
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
def warn_test(
    self, result: t.Any = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a warning test result.

    Creates a result with WARNING status, or FAILED if warnings are elevated.

    Args:
        result: The value to include in the test result.
        message: Optional message describing the warning.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with WARNING or FAILED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.WARNING if not _elevate_warning.get() else Status.FAILED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

fail_test

fail_test() -> Result
fail_test(result: Any) -> Result
fail_test(result: Any, message: str) -> Result
fail_test(
    result: Any, message: str, *, context: Any
) -> Result
fail_test(
    result: Optional[Any] = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a failing test result.

Parameters:

Name Type Description Default
result Optional[Any]

The value to include in the test result.

None
message Optional[str]

Optional message describing why the test failed.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with FAILED status.

Source code in src/contraqctor/qc/base.py
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
def fail_test(
    self, result: t.Optional[t.Any] = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a failing test result.

    Args:
        result: The value to include in the test result.
        message: Optional message describing why the test failed.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with FAILED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.FAILED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

skip_test

skip_test() -> Result
skip_test(message: str) -> Result
skip_test(message: str, *, context: Any) -> Result
skip_test(
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a skipped test result.

Creates a result with SKIPPED status, or FAILED if skips are elevated.

Parameters:

Name Type Description Default
message Optional[str]

Optional message explaining why the test was skipped.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with SKIPPED or FAILED status.

Source code in src/contraqctor/qc/base.py
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
def skip_test(self, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None) -> Result:
    """Create a skipped test result.

    Creates a result with SKIPPED status, or FAILED if skips are elevated.

    Args:
        message: Optional message explaining why the test was skipped.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with SKIPPED or FAILED status.
    """
    calling_func_name, description = self._get_caller_info()
    return Result(
        status=Status.SKIPPED if not _elevate_skippable.get() else Status.FAILED,
        result=None,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

setup

setup() -> None

Run before each test method.

This method can be overridden by subclasses to implement setup logic that runs before each test.

Source code in src/contraqctor/qc/base.py
687
688
689
690
691
692
693
def setup(self) -> None:
    """Run before each test method.

    This method can be overridden by subclasses to implement
    setup logic that runs before each test.
    """
    pass

teardown

teardown() -> None

Run after each test method.

This method can be overridden by subclasses to implement teardown logic that runs after each test.

Source code in src/contraqctor/qc/base.py
695
696
697
698
699
700
701
def teardown(self) -> None:
    """Run after each test method.

    This method can be overridden by subclasses to implement
    teardown logic that runs after each test.
    """
    pass

run_test

run_test(
    test_method: ITest,
) -> Generator[Result, None, None]

Run a single test method and yield its results.

Handles setup, test execution, result processing, and teardown.

Parameters:

Name Type Description Default
test_method ITest

The test method to run.

required

Yields:

Name Type Description
Result Result

Result objects produced by the test method.

Source code in src/contraqctor/qc/base.py
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
def run_test(self, test_method: ITest) -> t.Generator[Result, None, None]:
    """Run a single test method and yield its results.

    Handles setup, test execution, result processing, and teardown.

    Args:
        test_method: The test method to run.

    Yields:
        Result: Result objects produced by the test method.
    """
    test_name = test_method.__name__
    suite_name = self.name
    test_description = getattr(test_method, "__doc__", None)

    try:
        self.setup()
        result = test_method()
        if inspect.isgenerator(result):
            for sub_result in result:
                yield self._process_test_result(sub_result, test_method, test_name, test_description)
        else:
            yield self._process_test_result(result, test_method, test_name, test_description)
    except Exception as e:
        tb = traceback.format_exc()
        yield Result(
            status=Status.ERROR,
            result=None,
            test_name=test_name,
            suite_name=suite_name,
            description=test_description,
            message=f"Error during test execution: {str(e)}",
            exception=e,
            traceback=tb,
            test_reference=test_method,
            suite_reference=self,
        )
    finally:
        self.teardown()

run_all

run_all() -> Generator[Result, None, None]

Run all test methods in the suite.

Finds all test methods and runs them in sequence.

Yields:

Name Type Description
Result Result

Result objects produced by all test methods.

Source code in src/contraqctor/qc/base.py
787
788
789
790
791
792
793
794
795
796
797
def run_all(self) -> t.Generator[Result, None, None]:
    """Run all test methods in the suite.

    Finds all test methods and runs them in sequence.

    Yields:
        Result: Result objects produced by all test methods.
    """

    for test in self.get_tests():
        yield from self.run_test(test)

ConsoleReporter

ConsoleReporter(
    console: Optional[Console] = None,
    include_status: Set[Status] = frozenset(
        {FAILED, ERROR, WARNING}
    ),
    default_group_name: str = "Ungrouped",
)

Bases: Reporter

Reporter that outputs test results to a rich console.

Parameters:

Name Type Description Default
console Optional[Console]

Optional rich Console instance. If not provided, creates a new one.

None
include_status Set[Status]

Set of statuses to include in detailed output.

frozenset({FAILED, ERROR, WARNING})
default_group_name str

Name to use for ungrouped tests.

'Ungrouped'
Source code in src/contraqctor/qc/reporters.py
72
73
74
75
76
77
78
79
80
def __init__(
    self,
    console: t.Optional[Console] = None,
    include_status: t.Set[Status] = frozenset({Status.FAILED, Status.ERROR, Status.WARNING}),
    default_group_name: str = "Ungrouped",
):
    self.console = console or Console()
    self.include_status = include_status
    self.default_group_name = default_group_name

report_results

report_results(
    results: dict[str | None, list[Result]]
    | list[_TaggedResult],
    *,
    render_context: bool = True,
    render_description: bool = True,
    render_traceback: bool = True,
    render_message: bool = True,
    serialize_context_exportable_obj: bool = False,
    asset_output_dir: str | Path = Path("./report/assets"),
    **kwargs,
) -> None

Print detailed test results to the console.

Parameters:

Name Type Description Default
results dict[str | None, list[Result]] | list[_TaggedResult]

List of tagged test results.

required
statistics

Overall statistics for the test run.

required
render_context bool

Whether to include test context.

True
render_description bool

Whether to include test descriptions.

True
render_traceback bool

Whether to include tracebacks for errors.

True
render_message bool

Whether to include test result messages.

True
serialize_context_exportable_obj bool

Whether to serialize ContextExportableObj instances.

False
asset_output_dir str | Path

Directory for saving serialized assets. Defaults to "./report/assets".

Path('./report/assets')
Source code in src/contraqctor/qc/reporters.py
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
def report_results(
    self,
    results: dict[str | None, list[Result]] | list[_TaggedResult],
    *,
    render_context: bool = True,
    render_description: bool = True,
    render_traceback: bool = True,
    render_message: bool = True,
    serialize_context_exportable_obj: bool = False,
    asset_output_dir: str | Path = Path("./report/assets"),
    **kwargs,
) -> None:
    """Print detailed test results to the console.

    Args:
        results: List of tagged test results.
        statistics: Overall statistics for the test run.
        render_context: Whether to include test context.
        render_description: Whether to include test descriptions.
        render_traceback: Whether to include tracebacks for errors.
        render_message: Whether to include test result messages.
        serialize_context_exportable_obj: Whether to serialize ContextExportableObj instances.
        asset_output_dir: Directory for saving serialized assets. Defaults to "./report/assets".
    """
    if not results:
        return

    results = _normalize_results(results)
    # Setup serializer and serialize ALL results if needed
    # (not just the ones being displayed)
    serializer = None
    output_dir = None
    serialized_contexts = {}

    if serialize_context_exportable_obj:
        serializer = ContextExportableObjSerializer()
        if asset_output_dir is None:
            output_dir = Path("./report/assets")
        else:
            output_dir = Path(asset_output_dir)

        # Serialize all results, not just displayed ones
        for idx, tagged_result in enumerate(results):
            if tagged_result.result.context is not None:
                serialized_contexts[idx] = serializer.serialize_as_file(
                    tagged_result.result.context, output_dir, f"test_{idx}"
                )

    all_included_results = [
        tagged_result for tagged_result in results if tagged_result.result.status in self.include_status
    ]

    if not all_included_results:
        return

    self.console.print()
    self.console.print(f"[bold]contraqctor v{__version__}[/bold]")
    self.console.print(f"[dim]Test run: {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')}[/dim]")
    self.console.print()
    self._print_status_header(self.include_status)
    self.console.print()

    idx = 0
    for group, test_results in _TaggedResult.group_by_group(all_included_results):
        group_name = group or self.default_group_name
        for result in test_results:
            # Find the original index in the full results list
            original_idx = results.index(result)
            context = serialized_contexts.get(original_idx, result.result.context)

            self._print_test_result(
                result.result,
                group_name,
                idx,
                render_message,
                render_description,
                render_traceback,
                render_context,
                context,
            )
            self.console.print()
            idx += 1

HtmlReporter

HtmlReporter(
    output_path: Union[str, Path] = "report.html",
    template_dir: Optional[Union[str, Path]] = None,
    default_group_name: str = "Ungrouped",
    serializer: Optional[
        ContextExportableObjSerializer
    ] = None,
)

Bases: Reporter

Reporter that generates HTML output for test results.

Parameters:

Name Type Description Default
output_path Union[str, Path]

Path where the HTML report should be written.

'report.html'
template_dir Optional[Union[str, Path]]

Optional directory containing custom Jinja2 templates.

None
default_group_name str

Name to use for ungrouped tests.

'Ungrouped'
serializer Optional[ContextExportableObjSerializer]

Optional custom ContextExportableObjSerializer instance.

None
Source code in src/contraqctor/qc/reporters.py
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
def __init__(
    self,
    output_path: t.Union[str, Path] = "report.html",
    template_dir: t.Optional[t.Union[str, Path]] = None,
    default_group_name: str = "Ungrouped",
    serializer: t.Optional[ContextExportableObjSerializer] = None,
):
    self.output_path = Path(output_path)
    self.default_group_name = default_group_name
    self.serializer = serializer or ContextExportableObjSerializer()

    if template_dir:
        self.template_dir = Path(template_dir)
    else:
        self.template_dir = Path(__file__).parent / "templates"

    self.env = jinja2.Environment(
        loader=jinja2.FileSystemLoader(str(self.template_dir)), autoescape=jinja2.select_autoescape(["html", "xml"])
    )

report_results

report_results(
    results: dict[str | None, list[Result]]
    | list[_TaggedResult],
    *,
    render_context: bool = True,
    render_description: bool = True,
    render_traceback: bool = True,
    render_message: bool = True,
    serialize_context_exportable_obj: bool = True,
    **kwargs,
) -> None

Generate HTML report of test results.

Parameters:

Name Type Description Default
results dict[str | None, list[Result]] | list[_TaggedResult]

List of tagged test results.

required
statistics

Overall statistics for the test run.

required
render_context bool

Whether to include test context.

True
render_description bool

Whether to include test descriptions.

True
render_traceback bool

Whether to include tracebacks for errors.

True
render_message bool

Whether to include test result messages.

True
serialize_context_exportable_obj bool

Whether to serialize ContextExportableObj instances.

True
asset_output_dir

Directory for saving serialized assets (not used for HTML, uses base64).

required
Source code in src/contraqctor/qc/reporters.py
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
def report_results(
    self,
    results: dict[str | None, list[Result]] | list[_TaggedResult],
    *,
    render_context: bool = True,
    render_description: bool = True,
    render_traceback: bool = True,
    render_message: bool = True,
    serialize_context_exportable_obj: bool = True,
    **kwargs,
) -> None:
    """Generate HTML report of test results.

    Args:
        results: List of tagged test results.
        statistics: Overall statistics for the test run.
        render_context: Whether to include test context.
        render_description: Whether to include test descriptions.
        render_traceback: Whether to include tracebacks for errors.
        render_message: Whether to include test result messages.
        serialize_context_exportable_obj: Whether to serialize ContextExportableObj instances.
        asset_output_dir: Directory for saving serialized assets (not used for HTML, uses base64).
    """
    template = self.env.get_template("report.html")

    results = _normalize_results(results)

    grouped_results = []
    for group, test_results in _TaggedResult.group_by_group(results):
        group_name = group or self.default_group_name
        group_stats = ResultsStatistics.from_results([tr.result for tr in test_results])

        # Group by suite within the group
        suites: dict[str, list[dict]] = {}
        for tr in test_results:
            suite_name = tr.suite.name
            if suite_name not in suites:
                suites[suite_name] = []

            # Serialize context if requested
            context = tr.result.context
            if serialize_context_exportable_obj and context is not None:
                context = self.serializer.serialize_as_bytes(context)

            suites[suite_name].append(
                {
                    "result": tr.result,
                    "suite_name": suite_name,
                    "serialized_context": context,
                }
            )

        grouped_results.append(
            {
                "name": group_name,
                "statistics": group_stats,
                "suites": suites,
                "results": [
                    {
                        "result": tr.result,
                        "suite_name": tr.suite.name,
                        "serialized_context": self.serializer.serialize_as_bytes(tr.result.context)
                        if serialize_context_exportable_obj and tr.result.context is not None
                        else tr.result.context,
                    }
                    for tr in test_results
                ],
            }
        )

    html_content = template.render(
        groups=grouped_results,
        statistics=ResultsStatistics.from_results([tr.result for tr in results]),
        status_color=STATUS_COLOR,
        render_context=render_context,
        render_description=render_description,
        render_traceback=render_traceback,
        render_message=render_message,
        serialize_context_exportable_obj=serialize_context_exportable_obj,
        version=__version__,
        timestamp=datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC"),
    )

    self.output_path.write_text(html_content, encoding="utf-8")

Reporter

Bases: ABC

Base class for test result reporters.

Reporters handle the presentation of test results in different formats such as console output or HTML files.

report_results abstractmethod

report_results(
    results: dict[str | None, list[Result]]
    | list[_TaggedResult],
    *,
    render_context: bool = True,
    render_description: bool = True,
    render_traceback: bool = True,
    render_message: bool = True,
    serialize_context_exportable_obj: bool = False,
    **kwargs,
) -> None

Report test results.

Parameters:

Name Type Description Default
results dict[str | None, list[Result]] | list[_TaggedResult]

List of tagged test results.

required
statistics

Overall statistics for the test run.

required
render_context bool

Whether to include test context.

True
render_description bool

Whether to include test descriptions.

True
render_traceback bool

Whether to include tracebacks for errors.

True
render_message bool

Whether to include test result messages.

True
serialize_context_exportable_obj bool

Whether to serialize ContextExportableObj instances.

False
Source code in src/contraqctor/qc/reporters.py
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
@abc.abstractmethod
def report_results(
    self,
    results: dict[str | None, list[Result]] | list[_TaggedResult],
    *,
    render_context: bool = True,
    render_description: bool = True,
    render_traceback: bool = True,
    render_message: bool = True,
    serialize_context_exportable_obj: bool = False,
    **kwargs,
) -> None:
    """Report test results.

    Args:
        results: List of tagged test results.
        statistics: Overall statistics for the test run.
        render_context: Whether to include test context.
        render_description: Whether to include test descriptions.
        render_traceback: Whether to include tracebacks for errors.
        render_message: Whether to include test result messages.
        serialize_context_exportable_obj: Whether to serialize ContextExportableObj instances.
    """
    pass

allow_null_as_pass

allow_null_as_pass(value: bool = True)

Context manager to control whether null results are allowed as pass.

When enabled, tests that return None will be treated as passing tests rather than producing errors.

Parameters:

Name Type Description Default
value bool

True to allow null results as passing, False otherwise.

True

Examples:

from contraqctor.qc.base import allow_null_as_pass, Runner

# Create a test suite with methods that return None
class SimpleTestSuite(Suite):
    def test_basic_check(self):
        # This method returns None, which would normally be an error
        print("Performing a check")
        # No explicit return

# Run with allow_null_as_pass to treat None returns as passing
suite = SimpleTestSuite()
runner = Runner().add_suite(suite)

with allow_null_as_pass():
    # None returns will be treated as passing tests
    results = runner.run_all_with_progress()

# Outside the context manager, None returns would cause errors
Source code in src/contraqctor/qc/base.py
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
@contextmanager
def allow_null_as_pass(value: bool = True):
    """Context manager to control whether null results are allowed as pass.

    When enabled, tests that return None will be treated as passing tests
    rather than producing errors.

    Args:
        value: True to allow null results as passing, False otherwise.

    Examples:
        ```python
        from contraqctor.qc.base import allow_null_as_pass, Runner

        # Create a test suite with methods that return None
        class SimpleTestSuite(Suite):
            def test_basic_check(self):
                # This method returns None, which would normally be an error
                print("Performing a check")
                # No explicit return

        # Run with allow_null_as_pass to treat None returns as passing
        suite = SimpleTestSuite()
        runner = Runner().add_suite(suite)

        with allow_null_as_pass():
            # None returns will be treated as passing tests
            results = runner.run_all_with_progress()

        # Outside the context manager, None returns would cause errors
        ```
    """
    token = _allow_null_as_pass_ctx.set(value)
    try:
        yield
    finally:
        _allow_null_as_pass_ctx.reset(token)

elevated_skips

elevated_skips(value: bool = True)

Context manager to control whether skipped tests are treated as failures.

When enabled, skipped tests will be treated as failing tests rather than being merely marked as skipped.

Parameters:

Name Type Description Default
value bool

True to elevate skipped tests to failures, False otherwise.

True

Examples:

from contraqctor.qc.base import elevated_skips, Runner

# Create a test suite with some skipped tests
class FeatureTestSuite(Suite):
    def test_implemented_feature(self):
        return self.pass_test(True, "Feature works")

    def test_unimplemented_feature(self):
        return self.skip_test("Feature not yet implemented")

# Run with elevated_skips to fail when tests are skipped
suite = FeatureTestSuite()
runner = Runner().add_suite(suite)

with elevated_skips():
    # Skipped tests will be treated as failures
    results = runner.run_all_with_progress()

# Without the context manager, skips are just marked as skipped
Source code in src/contraqctor/qc/base.py
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
@contextmanager
def elevated_skips(value: bool = True):
    """Context manager to control whether skipped tests are treated as failures.

    When enabled, skipped tests will be treated as failing tests rather than
    being merely marked as skipped.

    Args:
        value: True to elevate skipped tests to failures, False otherwise.

    Examples:
        ```python
        from contraqctor.qc.base import elevated_skips, Runner

        # Create a test suite with some skipped tests
        class FeatureTestSuite(Suite):
            def test_implemented_feature(self):
                return self.pass_test(True, "Feature works")

            def test_unimplemented_feature(self):
                return self.skip_test("Feature not yet implemented")

        # Run with elevated_skips to fail when tests are skipped
        suite = FeatureTestSuite()
        runner = Runner().add_suite(suite)

        with elevated_skips():
            # Skipped tests will be treated as failures
            results = runner.run_all_with_progress()

        # Without the context manager, skips are just marked as skipped
        ```
    """
    token = _elevate_skippable.set(value)
    try:
        yield
    finally:
        _elevate_skippable.reset(token)

elevated_warnings

elevated_warnings(value: bool = True)

Context manager to control whether warnings are treated as failures.

When enabled, warning results will be treated as failing tests rather than just being marked as warnings.

Parameters:

Name Type Description Default
value bool

True to elevate warnings to failures, False otherwise.

True

Examples:

from contraqctor.qc.base import elevated_warnings, Runner

# Create a test suite with warning conditions
class PerformanceTestSuite(Suite):
    def test_response_time(self):
        response_time = measure_response()

        if response_time < 100:
            return self.pass_test(response_time, "Response time acceptable")
        elif response_time < 200:
            # This would normally be a warning
            return self.warn_test(response_time, "Response time degraded")
        else:
            return self.fail_test(response_time, "Response time unacceptable")

# Run with elevated_warnings to fail on warnings
suite = PerformanceTestSuite()
runner = Runner().add_suite(suite)

with elevated_warnings():
    # Warning results will be treated as failures
    # Useful in CI/CD pipelines where warnings should trigger failures
    results = runner.run_all_with_progress()
Source code in src/contraqctor/qc/base.py
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
@contextmanager
def elevated_warnings(value: bool = True):
    """Context manager to control whether warnings are treated as failures.

    When enabled, warning results will be treated as failing tests rather than
    just being marked as warnings.

    Args:
        value: True to elevate warnings to failures, False otherwise.

    Examples:
        ```python
        from contraqctor.qc.base import elevated_warnings, Runner

        # Create a test suite with warning conditions
        class PerformanceTestSuite(Suite):
            def test_response_time(self):
                response_time = measure_response()

                if response_time < 100:
                    return self.pass_test(response_time, "Response time acceptable")
                elif response_time < 200:
                    # This would normally be a warning
                    return self.warn_test(response_time, "Response time degraded")
                else:
                    return self.fail_test(response_time, "Response time unacceptable")

        # Run with elevated_warnings to fail on warnings
        suite = PerformanceTestSuite()
        runner = Runner().add_suite(suite)

        with elevated_warnings():
            # Warning results will be treated as failures
            # Useful in CI/CD pipelines where warnings should trigger failures
            results = runner.run_all_with_progress()
        ```
    """
    token = _elevate_warning.set(value)
    try:
        yield
    finally:
        _elevate_warning.reset(token)