Skip to content

qc

ContextExportableObj

ContextExportableObj(obj: TExportable)

Bases: Generic[TExportable]

Container for exportable objects in test contexts.

Provides a standardized way to include exportable objects (like figures or reports) in test result contexts, allowing them to be properly handled by reporting tools.

Attributes:

Name Type Description
_obj

The exportable object being wrapped.

Examples:

import matplotlib.pyplot as plt
from contraqctor.qc._context_extensions import ContextExportableObj
from contraqctor.qc.base import Suite

class VisualizationTestSuite(Suite):
    def test_create_plot(self):
        # Create a matplotlib figure
        fig, ax = plt.subplots()
        ax.plot([1, 2, 3], [4, 5, 6])
        ax.set_title("Test Plot")

        # Add the figure to the test context
        context = ContextExportableObj.as_context(fig)

        # Return test result with the figure in context
        return self.pass_test(True, "Plot created successfully", context=context)

Initialize the context exportable object container.

Parameters:

Name Type Description Default
obj TExportable

The object to wrap for export.

required
Source code in src/contraqctor/qc/_context_extensions.py
39
40
41
42
43
44
45
def __init__(self, obj: TExportable) -> None:
    """Initialize the context exportable object container.

    Args:
        obj: The object to wrap for export.
    """
    self._obj = obj

asset property

asset: TExportable

Get the wrapped exportable object.

Returns:

Name Type Description
TExportable TExportable

The wrapped object.

asset_type property

asset_type: Type

Get the type of the wrapped asset.

Returns:

Name Type Description
Type Type

Type of the wrapped object.

as_context classmethod

as_context(
    asset: TExportable,
) -> Dict[str, ContextExportableObj[TExportable]]

Create a standardized context dictionary for the exportable object.

This method wraps the provided asset in a ContextExportableObj and includes it in a dictionary under a reserved keyword. This allows for consistent handling of exportable objects in test result contexts.

Parameters:

Name Type Description Default
asset TExportable

The object to wrap and include in the context.

required

Returns:

Type Description
Dict[str, ContextExportableObj[TExportable]]

Dict[str, ContextExportableObj]: A dictionary containing the wrapped

Dict[str, ContextExportableObj[TExportable]]

asset under the reserved key.

Examples:

import matplotlib.pyplot as plt
from contraqctor.qc._context_extensions import ContextExportableObj

# Create a visualization
fig, ax = plt.subplots()
ax.plot([1, 2, 3], [4, 5, 6])

# Create a context dictionary with the figure
context = ContextExportableObj.as_context(fig)

# The context can now be passed to test result methods
return self.pass_test(True, "Analysis succeeded", context=context)
Source code in src/contraqctor/qc/_context_extensions.py
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
@classmethod
def as_context(self, asset: TExportable) -> t.Dict[str, "ContextExportableObj[TExportable]"]:
    """Create a standardized context dictionary for the exportable object.

    This method wraps the provided asset in a `ContextExportableObj` and
    includes it in a dictionary under a reserved keyword. This allows for
    consistent handling of exportable objects in test result contexts.

    Args:
        asset: The object to wrap and include in the context.

    Returns:
        Dict[str, ContextExportableObj]: A dictionary containing the wrapped
        asset under the reserved key.

    Examples:
        ```python
        import matplotlib.pyplot as plt
        from contraqctor.qc._context_extensions import ContextExportableObj

        # Create a visualization
        fig, ax = plt.subplots()
        ax.plot([1, 2, 3], [4, 5, 6])

        # Create a context dictionary with the figure
        context = ContextExportableObj.as_context(fig)

        # The context can now be passed to test result methods
        return self.pass_test(True, "Analysis succeeded", context=context)
        ```
    """
    return {ASSET_RESERVED_KEYWORD: ContextExportableObj(asset)}

Result dataclass

Result(
    status: Status,
    result: TResult,
    test_name: str,
    suite_name: str,
    message: Optional[str] = None,
    context: Optional[Any] = None,
    description: Optional[str] = None,
    exception: Optional[Exception] = None,
    traceback: Optional[str] = None,
    test_reference: Optional[ITest] = None,
    suite_reference: Optional[Suite] = None,
)

Bases: Generic[TResult]

Container for test execution results.

Stores the outcome of a test execution including status, returned value, contextual information, and any exception details.

Attributes:

Name Type Description
status Status

The status of the test execution.

result TResult

The value returned by the test.

test_name str

Name of the test that generated this result.

suite_name str

Name of the test suite containing the test.

message Optional[str]

Optional message describing the test outcome.

context Optional[Any]

Optional contextual data for the test result.

description Optional[str]

Optional description of the test.

exception Optional[Exception]

Optional exception that occurred during test execution.

traceback Optional[str]

Optional traceback string if an exception occurred.

test_reference Optional[ITest]

Optional reference to the test function.

suite_reference Optional[Suite]

Optional reference to the suite that ran this test.

ResultsStatistics dataclass

ResultsStatistics(
    passed: int,
    failed: int,
    error: int,
    skipped: int,
    warnings: int,
)

Statistics about test results.

Aggregates counts of test results by status and provides methods for calculating statistics like pass rate.

Attributes:

Name Type Description
passed int

Number of passed tests.

failed int

Number of failed tests.

error int

Number of tests that produced errors.

skipped int

Number of skipped tests.

warnings int

Number of tests with warnings.

total property

total: int

Get the total number of tests.

Returns:

Name Type Description
int int

Sum of all test result counts.

pass_rate property

pass_rate: float

Calculate the pass rate.

Returns:

Name Type Description
float float

Ratio of passed tests to total tests, or 0 if no tests.

get_status_summary

get_status_summary() -> str

Generate a compact string summary of result counts.

Returns:

Name Type Description
str str

Summary string with counts for each status type.

Source code in src/contraqctor/qc/base.py
837
838
839
840
841
842
843
def get_status_summary(self) -> str:
    """Generate a compact string summary of result counts.

    Returns:
        str: Summary string with counts for each status type.
    """
    return f"P:{self[Status.PASSED]} F:{self[Status.FAILED]} E:{self[Status.ERROR]} S:{self[Status.SKIPPED]} W:{self[Status.WARNING]}"

from_results classmethod

from_results(results: List[Result]) -> ResultsStatistics

Create statistics from a list of test results.

Parameters:

Name Type Description Default
results List[Result]

List of test results to analyze.

required

Returns:

Name Type Description
ResultsStatistics ResultsStatistics

Statistics object summarizing the results.

Source code in src/contraqctor/qc/base.py
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
@classmethod
def from_results(cls, results: t.List[Result]) -> "ResultsStatistics":
    """Create statistics from a list of test results.

    Args:
        results: List of test results to analyze.

    Returns:
        ResultsStatistics: Statistics object summarizing the results.
    """
    stats = {status: sum(1 for r in results if r.status == status) for status in Status}
    return cls(
        passed=stats[Status.PASSED],
        failed=stats[Status.FAILED],
        error=stats[Status.ERROR],
        skipped=stats[Status.SKIPPED],
        warnings=stats[Status.WARNING],
    )

Runner

Runner()

Test runner for executing suites and reporting results.

Handles executing test suites, collecting results, and generating reports.

Attributes:

Name Type Description
suites Dict[Optional[str], List[Suite]]

Dictionary mapping group names to lists of test suites.

_results Optional[List[_TaggedResult]]

Optional dictionary of collected test results by group.

Examples:

from contraqctor.qc.base import Runner

# Create test suites
suite1 = MyTestSuite(component1)
suite2 = AnotherTestSuite(component2)
suite3 = YetAnotherTestSuite(component2)

# Create runner and add suites with group names
runner = Runner()
runner.add_suite(suite1, "Component Tests")
runner.add_suite(suite2, "Integration Tests")
runner.add_suite(suite3, "Integration Tests")

# Run all tests with progress display
results = runner.run_all_with_progress()

# Access results by group
component_results = results["Component Tests"]

Initialize the test runner.

Source code in src/contraqctor/qc/base.py
1040
1041
1042
1043
def __init__(self):
    """Initialize the test runner."""
    self.suites: t.Dict[t.Optional[str], t.List[Suite]] = {}
    self._results: t.Optional[t.List[_TaggedResult]] = None

add_suite

add_suite(suite: Suite) -> Self
add_suite(suite: Suite, group: str) -> Self
add_suite(
    suite: Suite, group: Optional[str] = None
) -> Self

Add a test suite to the runner.

Parameters:

Name Type Description Default
suite Suite

Test suite to add.

required
group Optional[str]

Optional group name for organizing suites. Defaults to None.

None

Returns:

Name Type Description
Runner Self

Self for method chaining.

Examples:

runner = Runner()

# Add a suite without a group
runner.add_suite(BasicSuite())

# Add suites with named groups for organization
runner.add_suite(DataSuite(), "Data Validation")
runner.add_suite(VisualizationSuite(), "Data Validation")
runner.add_suite(ApiSuite(), "API Tests")
Source code in src/contraqctor/qc/base.py
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
def add_suite(self, suite: Suite, group: t.Optional[str] = None) -> t.Self:
    """Add a test suite to the runner.

    Args:
        suite: Test suite to add.
        group: Optional group name for organizing suites. Defaults to None.

    Returns:
        Runner: Self for method chaining.

    Examples:
        ```python
        runner = Runner()

        # Add a suite without a group
        runner.add_suite(BasicSuite())

        # Add suites with named groups for organization
        runner.add_suite(DataSuite(), "Data Validation")
        runner.add_suite(VisualizationSuite(), "Data Validation")
        runner.add_suite(ApiSuite(), "API Tests")
        ```
    """
    self._update_suites(suite, group)
    return self

run_all_with_progress

run_all_with_progress(
    *,
    render_context: bool = True,
    render_description: bool = True,
    render_traceback: bool = True,
    render_message: bool = True,
) -> Dict[Optional[str], List[Result]]

Run all tests in all suites with a rich progress display.

Executes all tests with a visual progress bar and detailed reporting of test outcomes.

Parameters:

Name Type Description Default
render_context bool

Whether to render test context in result output.

True
render_description bool

Whether to render test descriptions in result output.

True
render_traceback bool

Whether to render tracebacks for errors in result output.

True
render_message bool

Whether to render test result messages in result output.

True

Returns:

Type Description
Dict[Optional[str], List[Result]]

Dict[Optional[str], List[Result]]: Results grouped by test group name.

Examples:

runner = Runner()
runner.add_suite(DataValidationSuite(), "Validation")
runner.add_suite(PerformanceSuite(), "Performance")

# Run all tests with progress display and complete output
results = runner.run_all_with_progress()

# Run with simplified output (no context or traceback)
results = runner.run_all_with_progress(
    render_context=False,
    render_traceback=False
)

# Check if any tests failed
all_passed = all(
    result.status == Status.PASSED
    for group_results in results.values()
    for result in group_results
)
Source code in src/contraqctor/qc/base.py
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
def run_all_with_progress(
    self,
    *,
    render_context: bool = True,
    render_description: bool = True,
    render_traceback: bool = True,
    render_message: bool = True,
) -> t.Dict[t.Optional[str], t.List[Result]]:
    """Run all tests in all suites with a rich progress display.

    Executes all tests with a visual progress bar and detailed reporting
    of test outcomes.

    Args:
        render_context: Whether to render test context in result output.
        render_description: Whether to render test descriptions in result output.
        render_traceback: Whether to render tracebacks for errors in result output.
        render_message: Whether to render test result messages in result output.

    Returns:
        Dict[Optional[str], List[Result]]: Results grouped by test group name.

    Examples:
        ```python
        runner = Runner()
        runner.add_suite(DataValidationSuite(), "Validation")
        runner.add_suite(PerformanceSuite(), "Performance")

        # Run all tests with progress display and complete output
        results = runner.run_all_with_progress()

        # Run with simplified output (no context or traceback)
        results = runner.run_all_with_progress(
            render_context=False,
            render_traceback=False
        )

        # Check if any tests failed
        all_passed = all(
            result.status == Status.PASSED
            for group_results in results.values()
            for result in group_results
        )
        ```
    """

    collected_tests = self._collect_tests()
    total_test_count = len(collected_tests)

    suite_name_lengths = [len(suite.name) for suite, _ in _TaggedTest.group_by_suite(collected_tests)]
    # we sum 2 to account for brackets
    group_lengths = [
        len(group) + 2 for group, _ in _TaggedTest.group_by_group(collected_tests) if group is not None
    ]
    full_name_width = max(suite_name_lengths + group_lengths) if suite_name_lengths else 10
    test_name_width = 20  # To render the test name during progress
    bar_width = 20

    progress_format = [
        f"[progress.description]{{task.description:<{full_name_width + test_name_width + 5}}}",
        rich.progress.BarColumn(),
        "[progress.percentage]{task.percentage:>3.0f}%",
        "•",
        rich.progress.TimeElapsedColumn(),
    ]

    with rich.progress.Progress(*progress_format) as progress:
        total_task = progress.add_task(
            "[bold green]TOTAL PROGRESS".ljust(full_name_width + test_name_width + 5), total=total_test_count
        )

        collected_results: t.List[_TaggedResult] = []
        for group, tests_in_group in _TaggedTest.group_by_group(collected_tests):
            _title = (
                rich.markup.escape(f"[{group}]") if group else rich.markup.escape(f"[{self._DEFAULT_TEST_GROUP}]")
            )
            group_task = progress.add_task(
                f"[honeydew2]{_title}".ljust(full_name_width + test_name_width + 5),
                total=len(tests_in_group),
            )
            for suite, tests_in_suite in _TaggedTest.group_by_suite(tests_in_group):
                results = self._run_suite_tests(
                    progress,
                    suite,
                    [t.test for t in tests_in_suite],
                    full_name_width,
                    test_name_width,
                    total_task,
                    group_task,
                )
                for result in results:
                    collected_results.append(
                        _TaggedResult(suite=suite, group=group, result=result, test=result.test_reference)
                    )

            if len(_TaggedResult.get_by_group(collected_results, group)) > 0:
                group_results = [
                    tagged_result.result for tagged_result in _TaggedResult.get_by_group(collected_results, group)
                ]
                group_stats = ResultsStatistics.from_results(group_results)
                group_status_bar = self._render_status_bar(group_stats, bar_width)
                padding_width = max(0, full_name_width - len(self._rich_unscape(_title)))
                group_line = f"[honeydew2]{_title}{' ' * padding_width} | {group_status_bar} | {group_stats.get_status_summary()}"
                progress.update(group_task, description=group_line)

        if total_test_count > 0:
            total_stats = ResultsStatistics.from_results(
                [tagged_result.result for tagged_result in collected_results]
            )
            total_status_bar = self._render_status_bar(total_stats, bar_width)

            _title = "TOTAL PROGRESS"
            # Fix: Use max() to ensure padding width is never negative
            padding_width = max(0, full_name_width - len(_title))
            total_line = f"[bold green]{_title}{' ' * padding_width} | {total_status_bar} | {total_stats.get_status_summary()}"
            progress.update(total_task, description=total_line)

    self._results = collected_results
    if self._results:
        self._print_results(
            self._results,
            render_description=render_description,
            render_traceback=render_traceback,
            render_message=render_message,
            render_context=render_context,
        )

    out: t.Dict[t.Optional[str], t.List[Result]] = {}
    for group, grouped_results in _TaggedResult.group_by_group(collected_results):
        out[group] = [tagged_result.result for tagged_result in grouped_results]
    return out

Status

Bases: Enum

Enum representing possible test result statuses.

Defines the different states a test can be in after execution.

Suite

Bases: ABC

Base class for test suites.

Provides the core functionality for defining, running, and reporting on tests. All test suites should inherit from this class and implement test methods that start with 'test'.

Examples:

from contraqctor.qc.base import Suite

class MyTestSuite(Suite):
    """Test suite for validating my component."""

    def __init__(self, component):
        self.component = component

    def test_has_required_property(self):
        if hasattr(self.component, "required_property"):
            return self.pass_test(True, "Component has required property")
        else:
            return self.fail_test(False, "Component is missing required property")

    def test_performs_calculation(self):
        try:
            result = self.component.calculate(10)
            if result == 20:
                return self.pass_test(result, "Calculation correct")
            else:
                return self.fail_test(result, f"Expected 20 but got {result}")
        except Exception as e:
            return self.fail_test(None, f"Calculation failed: {str(e)}")

description property

description: Optional[str]

Get the description of the test suite from its docstring.

Returns:

Type Description
Optional[str]

Optional[str]: The docstring of the class, or None if not available.

name property

name: str

Get the name of the test suite.

Returns:

Name Type Description
str str

The name of the test suite class.

get_tests

get_tests() -> Generator[ITest, None, None]

Find all methods starting with 'test'.

Yields:

Name Type Description
ITest ITest

Test methods found in the suite.

Source code in src/contraqctor/qc/base.py
321
322
323
324
325
326
327
328
329
def get_tests(self) -> t.Generator[ITest, None, None]:
    """Find all methods starting with 'test'.

    Yields:
        ITest: Test methods found in the suite.
    """
    for name, method in inspect.getmembers(self, predicate=inspect.ismethod):
        if name.startswith("test"):
            yield method

pass_test

pass_test() -> Result
pass_test(result: Any) -> Result
pass_test(result: Any, message: str) -> Result
pass_test(result: Any, *, context: Any) -> Result
pass_test(
    result: Any, message: str, *, context: Any
) -> Result
pass_test(
    result: Any = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a passing test result.

Parameters:

Name Type Description Default
result Any

The value to include in the test result.

None
message Optional[str]

Optional message describing why the test passed.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with PASSED status.

Source code in src/contraqctor/qc/base.py
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
def pass_test(
    self, result: t.Any = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a passing test result.

    Args:
        result: The value to include in the test result.
        message: Optional message describing why the test passed.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with PASSED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.PASSED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

warn_test

warn_test() -> Result
warn_test(result: Any) -> Result
warn_test(result: Any, message: str) -> Result
warn_test(result: Any, *, context: Any) -> Result
warn_test(
    result: Any, message: str, *, context: Any
) -> Result
warn_test(
    result: Any = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a warning test result.

Creates a result with WARNING status, or FAILED if warnings are elevated.

Parameters:

Name Type Description Default
result Any

The value to include in the test result.

None
message Optional[str]

Optional message describing the warning.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with WARNING or FAILED status.

Source code in src/contraqctor/qc/base.py
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
def warn_test(
    self, result: t.Any = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a warning test result.

    Creates a result with WARNING status, or FAILED if warnings are elevated.

    Args:
        result: The value to include in the test result.
        message: Optional message describing the warning.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with WARNING or FAILED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.WARNING if not _elevate_warning.get() else Status.FAILED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

fail_test

fail_test() -> Result
fail_test(result: Any) -> Result
fail_test(result: Any, message: str) -> Result
fail_test(
    result: Any, message: str, *, context: Any
) -> Result
fail_test(
    result: Optional[Any] = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a failing test result.

Parameters:

Name Type Description Default
result Optional[Any]

The value to include in the test result.

None
message Optional[str]

Optional message describing why the test failed.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with FAILED status.

Source code in src/contraqctor/qc/base.py
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
def fail_test(
    self, result: t.Optional[t.Any] = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a failing test result.

    Args:
        result: The value to include in the test result.
        message: Optional message describing why the test failed.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with FAILED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.FAILED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

skip_test

skip_test() -> Result
skip_test(message: str) -> Result
skip_test(message: str, *, context: Any) -> Result
skip_test(
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a skipped test result.

Creates a result with SKIPPED status, or FAILED if skips are elevated.

Parameters:

Name Type Description Default
message Optional[str]

Optional message explaining why the test was skipped.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with SKIPPED or FAILED status.

Source code in src/contraqctor/qc/base.py
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
def skip_test(self, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None) -> Result:
    """Create a skipped test result.

    Creates a result with SKIPPED status, or FAILED if skips are elevated.

    Args:
        message: Optional message explaining why the test was skipped.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with SKIPPED or FAILED status.
    """
    calling_func_name, description = self._get_caller_info()
    return Result(
        status=Status.SKIPPED if not _elevate_skippable.get() else Status.FAILED,
        result=None,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

setup

setup() -> None

Run before each test method.

This method can be overridden by subclasses to implement setup logic that runs before each test.

Source code in src/contraqctor/qc/base.py
685
686
687
688
689
690
691
def setup(self) -> None:
    """Run before each test method.

    This method can be overridden by subclasses to implement
    setup logic that runs before each test.
    """
    pass

teardown

teardown() -> None

Run after each test method.

This method can be overridden by subclasses to implement teardown logic that runs after each test.

Source code in src/contraqctor/qc/base.py
693
694
695
696
697
698
699
def teardown(self) -> None:
    """Run after each test method.

    This method can be overridden by subclasses to implement
    teardown logic that runs after each test.
    """
    pass

run_test

run_test(
    test_method: ITest,
) -> Generator[Result, None, None]

Run a single test method and yield its results.

Handles setup, test execution, result processing, and teardown.

Parameters:

Name Type Description Default
test_method ITest

The test method to run.

required

Yields:

Name Type Description
Result Result

Result objects produced by the test method.

Source code in src/contraqctor/qc/base.py
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
def run_test(self, test_method: ITest) -> t.Generator[Result, None, None]:
    """Run a single test method and yield its results.

    Handles setup, test execution, result processing, and teardown.

    Args:
        test_method: The test method to run.

    Yields:
        Result: Result objects produced by the test method.
    """
    test_name = test_method.__name__
    suite_name = self.name
    test_description = getattr(test_method, "__doc__", None)

    try:
        self.setup()
        result = test_method()
        if inspect.isgenerator(result):
            for sub_result in result:
                yield self._process_test_result(sub_result, test_method, test_name, test_description)
        else:
            yield self._process_test_result(result, test_method, test_name, test_description)
    except Exception as e:
        tb = traceback.format_exc()
        yield Result(
            status=Status.ERROR,
            result=None,
            test_name=test_name,
            suite_name=suite_name,
            description=test_description,
            message=f"Error during test execution: {str(e)}",
            exception=e,
            traceback=tb,
            test_reference=test_method,
            suite_reference=self,
        )
    finally:
        self.teardown()

run_all

run_all() -> Generator[Result, None, None]

Run all test methods in the suite.

Finds all test methods and runs them in sequence.

Yields:

Name Type Description
Result Result

Result objects produced by all test methods.

Source code in src/contraqctor/qc/base.py
785
786
787
788
789
790
791
792
793
794
def run_all(self) -> t.Generator[Result, None, None]:
    """Run all test methods in the suite.

    Finds all test methods and runs them in sequence.

    Yields:
        Result: Result objects produced by all test methods.
    """
    for test in self.get_tests():
        yield from self.run_test(test)

allow_null_as_pass

allow_null_as_pass(value: bool = True)

Context manager to control whether null results are allowed as pass.

When enabled, tests that return None will be treated as passing tests rather than producing errors.

Parameters:

Name Type Description Default
value bool

True to allow null results as passing, False otherwise.

True

Examples:

from contraqctor.qc.base import allow_null_as_pass, Runner

# Create a test suite with methods that return None
class SimpleTestSuite(Suite):
    def test_basic_check(self):
        # This method returns None, which would normally be an error
        print("Performing a check")
        # No explicit return

# Run with allow_null_as_pass to treat None returns as passing
suite = SimpleTestSuite()
runner = Runner().add_suite(suite)

with allow_null_as_pass():
    # None returns will be treated as passing tests
    results = runner.run_all_with_progress()

# Outside the context manager, None returns would cause errors
Source code in src/contraqctor/qc/base.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
@contextmanager
def allow_null_as_pass(value: bool = True):
    """Context manager to control whether null results are allowed as pass.

    When enabled, tests that return None will be treated as passing tests
    rather than producing errors.

    Args:
        value: True to allow null results as passing, False otherwise.

    Examples:
        ```python
        from contraqctor.qc.base import allow_null_as_pass, Runner

        # Create a test suite with methods that return None
        class SimpleTestSuite(Suite):
            def test_basic_check(self):
                # This method returns None, which would normally be an error
                print("Performing a check")
                # No explicit return

        # Run with allow_null_as_pass to treat None returns as passing
        suite = SimpleTestSuite()
        runner = Runner().add_suite(suite)

        with allow_null_as_pass():
            # None returns will be treated as passing tests
            results = runner.run_all_with_progress()

        # Outside the context manager, None returns would cause errors
        ```
    """
    token = _allow_null_as_pass_ctx.set(value)
    try:
        yield
    finally:
        _allow_null_as_pass_ctx.reset(token)

elevated_skips

elevated_skips(value: bool = True)

Context manager to control whether skipped tests are treated as failures.

When enabled, skipped tests will be treated as failing tests rather than being merely marked as skipped.

Parameters:

Name Type Description Default
value bool

True to elevate skipped tests to failures, False otherwise.

True

Examples:

from contraqctor.qc.base import elevated_skips, Runner

# Create a test suite with some skipped tests
class FeatureTestSuite(Suite):
    def test_implemented_feature(self):
        return self.pass_test(True, "Feature works")

    def test_unimplemented_feature(self):
        return self.skip_test("Feature not yet implemented")

# Run with elevated_skips to fail when tests are skipped
suite = FeatureTestSuite()
runner = Runner().add_suite(suite)

with elevated_skips():
    # Skipped tests will be treated as failures
    results = runner.run_all_with_progress()

# Without the context manager, skips are just marked as skipped
Source code in src/contraqctor/qc/base.py
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
@contextmanager
def elevated_skips(value: bool = True):
    """Context manager to control whether skipped tests are treated as failures.

    When enabled, skipped tests will be treated as failing tests rather than
    being merely marked as skipped.

    Args:
        value: True to elevate skipped tests to failures, False otherwise.

    Examples:
        ```python
        from contraqctor.qc.base import elevated_skips, Runner

        # Create a test suite with some skipped tests
        class FeatureTestSuite(Suite):
            def test_implemented_feature(self):
                return self.pass_test(True, "Feature works")

            def test_unimplemented_feature(self):
                return self.skip_test("Feature not yet implemented")

        # Run with elevated_skips to fail when tests are skipped
        suite = FeatureTestSuite()
        runner = Runner().add_suite(suite)

        with elevated_skips():
            # Skipped tests will be treated as failures
            results = runner.run_all_with_progress()

        # Without the context manager, skips are just marked as skipped
        ```
    """
    token = _elevate_skippable.set(value)
    try:
        yield
    finally:
        _elevate_skippable.reset(token)

elevated_warnings

elevated_warnings(value: bool = True)

Context manager to control whether warnings are treated as failures.

When enabled, warning results will be treated as failing tests rather than just being marked as warnings.

Parameters:

Name Type Description Default
value bool

True to elevate warnings to failures, False otherwise.

True

Examples:

from contraqctor.qc.base import elevated_warnings, Runner

# Create a test suite with warning conditions
class PerformanceTestSuite(Suite):
    def test_response_time(self):
        response_time = measure_response()

        if response_time < 100:
            return self.pass_test(response_time, "Response time acceptable")
        elif response_time < 200:
            # This would normally be a warning
            return self.warn_test(response_time, "Response time degraded")
        else:
            return self.fail_test(response_time, "Response time unacceptable")

# Run with elevated_warnings to fail on warnings
suite = PerformanceTestSuite()
runner = Runner().add_suite(suite)

with elevated_warnings():
    # Warning results will be treated as failures
    # Useful in CI/CD pipelines where warnings should trigger failures
    results = runner.run_all_with_progress()
Source code in src/contraqctor/qc/base.py
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
@contextmanager
def elevated_warnings(value: bool = True):
    """Context manager to control whether warnings are treated as failures.

    When enabled, warning results will be treated as failing tests rather than
    just being marked as warnings.

    Args:
        value: True to elevate warnings to failures, False otherwise.

    Examples:
        ```python
        from contraqctor.qc.base import elevated_warnings, Runner

        # Create a test suite with warning conditions
        class PerformanceTestSuite(Suite):
            def test_response_time(self):
                response_time = measure_response()

                if response_time < 100:
                    return self.pass_test(response_time, "Response time acceptable")
                elif response_time < 200:
                    # This would normally be a warning
                    return self.warn_test(response_time, "Response time degraded")
                else:
                    return self.fail_test(response_time, "Response time unacceptable")

        # Run with elevated_warnings to fail on warnings
        suite = PerformanceTestSuite()
        runner = Runner().add_suite(suite)

        with elevated_warnings():
            # Warning results will be treated as failures
            # Useful in CI/CD pipelines where warnings should trigger failures
            results = runner.run_all_with_progress()
        ```
    """
    token = _elevate_warning.set(value)
    try:
        yield
    finally:
        _elevate_warning.reset(token)