Skip to content

qc.harp

HarpDeviceTestSuite

HarpDeviceTestSuite(
    harp_device: HarpDevice,
    harp_device_commands: Optional[HarpDevice] = None,
    *,
    min_core_version: Optional[str] = None,
)

Bases: Suite

Test suite for generic HARP devices.

Provides a set of standard tests that all HARP devices are expected to pass, checking basic functionality and data integrity.

Attributes:

Name Type Description
harp_device

The HarpDevice data stream to test.

harp_device_commands

Optional HarpDevice data stream with device commands.

min_core_version

Optional minimum required core version.

Examples:

from contraqctor.contract.harp import HarpDevice
from contraqctor.qc.harp import HarpDeviceTestSuite
from contraqctor.qc.base import Runner

# Create HarpDevice streams
device = HarpDevice("behavior", reader_params=params).load()
commands = HarpDevice("behavior_commands", reader_params=command_params).load()

# Create and run test suite
suite = HarpDeviceTestSuite(device, commands, min_core_version="1.2.0")
runner = Runner().add_suite(suite)
results = runner.run_all_with_progress()

Initialize the HARP device test suite.

Parameters:

Name Type Description Default
harp_device HarpDevice

The HarpDevice data stream to test.

required
harp_device_commands Optional[HarpDevice]

Optional HarpDevice data stream with command history. If None, tests requiring the commands will be skipped.

None
min_core_version Optional[str]

Optional minimum required core version for validation.

None
Source code in src/contraqctor/qc/harp.py
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
def __init__(
    self,
    harp_device: HarpDevice,
    harp_device_commands: t.Optional[HarpDevice] = None,
    *,
    min_core_version: t.Optional[str] = None,
):
    """Initialize the HARP device test suite.

    Args:
        harp_device: The HarpDevice data stream to test.
        harp_device_commands: Optional HarpDevice data stream with command history.
            If None, tests requiring the commands will be skipped.
        min_core_version: Optional minimum required core version for validation.
    """
    self.harp_device = harp_device
    self.harp_device_commands = harp_device_commands
    self.min_core_version = min_core_version

description property

description: Optional[str]

Get the description of the test suite from its docstring.

Returns:

Type Description
Optional[str]

Optional[str]: The docstring of the class, or None if not available.

name property

name: str

Get the name of the test suite.

Returns:

Name Type Description
str str

The name of the test suite class.

test_has_whoami

test_has_whoami()

Check if the harp board data stream is present and return its value

Source code in src/contraqctor/qc/harp.py
 98
 99
100
101
102
103
104
105
106
107
108
def test_has_whoami(self):
    """Check if the harp board data stream is present and return its value"""
    who_am_i_reg: HarpRegister = self.harp_device["WhoAmI"]
    if not who_am_i_reg.has_data:
        return self.fail_test(None, "WhoAmI does not have loaded data")
    if len(who_am_i_reg.data) == 0:
        return self.fail_test(None, "WhoAmI file is empty")
    who_am_i = self._get_whoami(self.harp_device)
    if not bool(0000 <= who_am_i <= 9999):
        return self.fail_test(who_am_i, "WhoAmI value is not in the range 0000-9999")
    return self.pass_test(int(who_am_i))

test_match_whoami_to_yml

test_match_whoami_to_yml()

Check if the WhoAmI value matches the device's WhoAmI

Source code in src/contraqctor/qc/harp.py
110
111
112
113
114
115
def test_match_whoami_to_yml(self):
    """Check if the WhoAmI value matches the device's WhoAmI"""
    if self._get_whoami(self.harp_device) == self.harp_device.device_reader.device.whoAmI:
        return self.pass_test(True, "WhoAmI value matches the device's WhoAmI")
    else:
        return self.fail_test(False, "WhoAmI value does not match the device's WhoAmI")

test_read_dump_is_complete

test_read_dump_is_complete()

Check if the read dump from an harp device is complete

Source code in src/contraqctor/qc/harp.py
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
def test_read_dump_is_complete(self):
    """
    Check if the read dump from an harp device is complete
    """
    expected_regs = self.harp_device.device_reader.device.registers.keys()
    ds = [stream for stream in self.harp_device]
    missing_regs = [reg_name for reg_name in expected_regs if reg_name not in [r.name for r in ds]]
    if len(missing_regs) > 0:
        return self.fail_test(
            False,
            "Read dump is not complete. Some registers are missing.",
            context={"missing_registers": missing_regs},
        )
    missing_read_dump = [
        r.name for r in ds if not (r.name in expected_regs and (self._get_last_read(r) is not None))
    ]
    return (
        self.pass_test(True, "Read dump is complete")
        if len(missing_read_dump) == 0
        else self.fail_test(False, "Read dump is not complete", context={"missing_registers": missing_read_dump})
    )

test_request_response

test_request_response()

Check that each request to the device has a corresponding response

Source code in src/contraqctor/qc/harp.py
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
def test_request_response(self):
    """Check that each request to the device has a corresponding response"""
    if self.harp_device_commands is None:
        return self.skip_test("No harp device commands provided")

    op_ctr: pd.DataFrame = self.harp_device_commands["OperationControl"].data
    op_ctr = op_ctr[op_ctr["MessageType"] == "WRITE"]
    op_ctr = op_ctr.index.values[0]

    reg_error = []
    for req_reg in self.harp_device_commands:
        if req_reg.has_data:  # Only data streams with data can be checked
            # Only "Writes" will be considered, but in theory we could also check "Reads"
            requests: pd.DataFrame = req_reg.data[req_reg.data["MessageType"] == "WRITE"]
            rep_reg = self.harp_device[req_reg.name]
            replies: pd.DataFrame = rep_reg.data[rep_reg.data["MessageType"] == "WRITE"]

            # All responses must, by definition, be timestamped AFTER the request
            if len(requests) > 0:
                requests = requests[requests.index >= op_ctr]
                replies = replies[replies.index >= op_ctr]
                if len(requests) != len(replies):
                    reg_error.append(
                        {"register": req_reg.name, "requests": len(requests), "responses": len(replies)}
                    )

    if len(reg_error) == 0:
        return self.pass_test(
            None,
            "Request/Response check passed. All requests have a corresponding response.",
        )
    else:
        return self.fail_test(
            None,
            "Request/Response check failed. Some requests do not have a corresponding response.",
            context={"register_errors": reg_error},
        )

test_registers_are_monotonicity

test_registers_are_monotonicity()

Check that the all the harp device registers' timestamps are monotonic

Source code in src/contraqctor/qc/harp.py
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
def test_registers_are_monotonicity(self):
    """
    Check that the all the harp device registers' timestamps are monotonic
    """
    reg_errors = []
    reg: HarpRegister
    for reg in self.harp_device:
        for message_type, reg_type_data in reg.data.groupby("MessageType", observed=True):
            if not reg_type_data.index.is_monotonic_increasing:
                reg_errors.append(
                    {
                        "register": reg.name,
                        "message_type": message_type,
                        "is_monotonic": reg.data.index.is_monotonic_increasing,
                    }
                )
    if len(reg_errors) == 0:
        return self.pass_test(
            None,
            "Monotonicity check passed. All registers are monotonic.",
        )
    else:
        return self.fail_test(
            None,
            "Monotonicity check failed. Some registers are not monotonic.",
            context={"register_errors": reg_errors},
        )

test_fw_version_matches_reader

test_fw_version_matches_reader()

Check if the firmware version of the device matches the one in the reader

Source code in src/contraqctor/qc/harp.py
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
def test_fw_version_matches_reader(self):
    """Check if the firmware version of the device matches the one in the reader"""
    reader = self.harp_device.device_reader

    fw = self._try_parse_semver(reader.device.firmwareVersion)
    device_fw = self._try_parse_semver(
        f"{self._get_last_read(self.harp_device['FirmwareVersionHigh']).iloc[0]}.{self._get_last_read(self.harp_device['FirmwareVersionLow']).iloc[0]}"
    )

    if (fw is None) or (device_fw is None):
        return self.fail_test(
            None, f"Firmware version is not a valid semver version. Expected {fw} and got {device_fw}"
        )
    if fw > device_fw:
        return self.fail_test(
            False,
            f"Expected version {fw} is greater than the device's version {device_fw}. Consider updating the device firmware.",
        )
    elif fw == device_fw:
        return self.pass_test(True, f"Expected version {fw} matches the device's version {device_fw}")
    else:
        return self.warn_test(
            False,
            f"Expected version {fw} is less than the device's version {device_fw}. Consider updating interface package.",
        )

test_core_version

test_core_version()

Check if the core version of the device matches the one provided

Source code in src/contraqctor/qc/harp.py
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
def test_core_version(self):
    """Check if the core version of the device matches the one provided"""
    core = self._try_parse_semver(self.min_core_version) if self.min_core_version else None
    device_core = self._try_parse_semver(
        f"{self._get_last_read(self.harp_device['CoreVersionHigh']).iloc[0]}.{self._get_last_read(self.harp_device['CoreVersionLow']).iloc[0]}"
    )

    if core is None:
        return self.skip_test("Core version not specified, skipping test.")
    if device_core is None:
        return self.fail_test("Core version is not a valid semver version.")

    if core > device_core:
        return self.fail_test(
            False,
            f"Core version {core} is greater than the device's version {device_core}. Consider updating the device firmware.",
        )
    elif core == device_core:
        return self.pass_test(True, f"Core version {core} matches the device's version {device_core}")
    else:
        return self.warn_test(False, f"Core version {core} is less than the device's version {device_core}")

get_tests

get_tests() -> Generator[ITest, None, None]

Find all methods starting with 'test'.

Yields:

Name Type Description
ITest ITest

Test methods found in the suite.

Source code in src/contraqctor/qc/base.py
321
322
323
324
325
326
327
328
329
def get_tests(self) -> t.Generator[ITest, None, None]:
    """Find all methods starting with 'test'.

    Yields:
        ITest: Test methods found in the suite.
    """
    for name, method in inspect.getmembers(self, predicate=inspect.ismethod):
        if name.startswith("test"):
            yield method

pass_test

pass_test() -> Result
pass_test(result: Any) -> Result
pass_test(result: Any, message: str) -> Result
pass_test(result: Any, *, context: Any) -> Result
pass_test(
    result: Any, message: str, *, context: Any
) -> Result
pass_test(
    result: Any = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a passing test result.

Parameters:

Name Type Description Default
result Any

The value to include in the test result.

None
message Optional[str]

Optional message describing why the test passed.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with PASSED status.

Source code in src/contraqctor/qc/base.py
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
def pass_test(
    self, result: t.Any = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a passing test result.

    Args:
        result: The value to include in the test result.
        message: Optional message describing why the test passed.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with PASSED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.PASSED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

warn_test

warn_test() -> Result
warn_test(result: Any) -> Result
warn_test(result: Any, message: str) -> Result
warn_test(result: Any, *, context: Any) -> Result
warn_test(
    result: Any, message: str, *, context: Any
) -> Result
warn_test(
    result: Any = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a warning test result.

Creates a result with WARNING status, or FAILED if warnings are elevated.

Parameters:

Name Type Description Default
result Any

The value to include in the test result.

None
message Optional[str]

Optional message describing the warning.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with WARNING or FAILED status.

Source code in src/contraqctor/qc/base.py
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
def warn_test(
    self, result: t.Any = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a warning test result.

    Creates a result with WARNING status, or FAILED if warnings are elevated.

    Args:
        result: The value to include in the test result.
        message: Optional message describing the warning.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with WARNING or FAILED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.WARNING if not _elevate_warning.get() else Status.FAILED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

fail_test

fail_test() -> Result
fail_test(result: Any) -> Result
fail_test(result: Any, message: str) -> Result
fail_test(
    result: Any, message: str, *, context: Any
) -> Result
fail_test(
    result: Optional[Any] = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a failing test result.

Parameters:

Name Type Description Default
result Optional[Any]

The value to include in the test result.

None
message Optional[str]

Optional message describing why the test failed.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with FAILED status.

Source code in src/contraqctor/qc/base.py
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
def fail_test(
    self, result: t.Optional[t.Any] = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a failing test result.

    Args:
        result: The value to include in the test result.
        message: Optional message describing why the test failed.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with FAILED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.FAILED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

skip_test

skip_test() -> Result
skip_test(message: str) -> Result
skip_test(message: str, *, context: Any) -> Result
skip_test(
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a skipped test result.

Creates a result with SKIPPED status, or FAILED if skips are elevated.

Parameters:

Name Type Description Default
message Optional[str]

Optional message explaining why the test was skipped.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with SKIPPED or FAILED status.

Source code in src/contraqctor/qc/base.py
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
def skip_test(self, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None) -> Result:
    """Create a skipped test result.

    Creates a result with SKIPPED status, or FAILED if skips are elevated.

    Args:
        message: Optional message explaining why the test was skipped.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with SKIPPED or FAILED status.
    """
    calling_func_name, description = self._get_caller_info()
    return Result(
        status=Status.SKIPPED if not _elevate_skippable.get() else Status.FAILED,
        result=None,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

setup

setup() -> None

Run before each test method.

This method can be overridden by subclasses to implement setup logic that runs before each test.

Source code in src/contraqctor/qc/base.py
685
686
687
688
689
690
691
def setup(self) -> None:
    """Run before each test method.

    This method can be overridden by subclasses to implement
    setup logic that runs before each test.
    """
    pass

teardown

teardown() -> None

Run after each test method.

This method can be overridden by subclasses to implement teardown logic that runs after each test.

Source code in src/contraqctor/qc/base.py
693
694
695
696
697
698
699
def teardown(self) -> None:
    """Run after each test method.

    This method can be overridden by subclasses to implement
    teardown logic that runs after each test.
    """
    pass

run_test

run_test(
    test_method: ITest,
) -> Generator[Result, None, None]

Run a single test method and yield its results.

Handles setup, test execution, result processing, and teardown.

Parameters:

Name Type Description Default
test_method ITest

The test method to run.

required

Yields:

Name Type Description
Result Result

Result objects produced by the test method.

Source code in src/contraqctor/qc/base.py
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
def run_test(self, test_method: ITest) -> t.Generator[Result, None, None]:
    """Run a single test method and yield its results.

    Handles setup, test execution, result processing, and teardown.

    Args:
        test_method: The test method to run.

    Yields:
        Result: Result objects produced by the test method.
    """
    test_name = test_method.__name__
    suite_name = self.name
    test_description = getattr(test_method, "__doc__", None)

    try:
        self.setup()
        result = test_method()
        if inspect.isgenerator(result):
            for sub_result in result:
                yield self._process_test_result(sub_result, test_method, test_name, test_description)
        else:
            yield self._process_test_result(result, test_method, test_name, test_description)
    except Exception as e:
        tb = traceback.format_exc()
        yield Result(
            status=Status.ERROR,
            result=None,
            test_name=test_name,
            suite_name=suite_name,
            description=test_description,
            message=f"Error during test execution: {str(e)}",
            exception=e,
            traceback=tb,
            test_reference=test_method,
            suite_reference=self,
        )
    finally:
        self.teardown()

run_all

run_all() -> Generator[Result, None, None]

Run all test methods in the suite.

Finds all test methods and runs them in sequence.

Yields:

Name Type Description
Result Result

Result objects produced by all test methods.

Source code in src/contraqctor/qc/base.py
785
786
787
788
789
790
791
792
793
794
def run_all(self) -> t.Generator[Result, None, None]:
    """Run all test methods in the suite.

    Finds all test methods and runs them in sequence.

    Yields:
        Result: Result objects produced by all test methods.
    """
    for test in self.get_tests():
        yield from self.run_test(test)

HarpHubTestSuite

HarpHubTestSuite(
    clock_generator_device: HarpDevice,
    devices: List[HarpDevice],
    *,
    read_dump_jitter_threshold_s: Optional[float] = 0.05,
)

Bases: Suite

Test suite for a hub of HARP devices.

Tests a collection of HARP devices that share the same clock generator source, verifying proper synchronization and configuration.

Attributes:

Name Type Description
clock_generator_device

The HARP device acting as the clock generator.

devices

List of subordinate HARP devices to test.

read_dump_jitter_threshold_s

Maximum allowed time difference for read dumps.

Examples:

from contraqctor.contract.harp import HarpDevice
from contraqctor.qc.harp import HarpHubTestSuite
from contraqctor.qc.base import Runner

# Create HarpDevice streams
clock_gen = HarpDevice("clock_gen", reader_params=clock_params).load()
device1 = HarpDevice("device1", reader_params=params1).load()
device2 = HarpDevice("device2", reader_params=params2).load()

# Create and run hub test suite
suite = HarpHubTestSuite(clock_gen, [device1, device2])
runner = Runner().add_suite(suite)
results = runner.run_all_with_progress()

Initialize the HARP hub test suite.

Parameters:

Name Type Description Default
clock_generator_device HarpDevice

The HARP device acting as the clock generator.

required
devices List[HarpDevice]

List of HARP devices to test as part of the hub.

required
read_dump_jitter_threshold_s Optional[float]

Maximum allowed time difference (in seconds) between devices' read dumps. Defaults to 0.05.

0.05
Source code in src/contraqctor/qc/harp.py
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
def __init__(
    self,
    clock_generator_device: HarpDevice,
    devices: t.List[HarpDevice],
    *,
    read_dump_jitter_threshold_s: t.Optional[float] = 0.05,
):
    """Initialize the HARP hub test suite.

    Args:
        clock_generator_device: The HARP device acting as the clock generator.
        devices: List of HARP devices to test as part of the hub.
        read_dump_jitter_threshold_s: Maximum allowed time difference (in seconds)
            between devices' read dumps. Defaults to 0.05.
    """
    self.clock_generator_device = clock_generator_device
    self.devices = [device for device in devices if device is not clock_generator_device]
    self.read_dump_jitter_threshold_s = read_dump_jitter_threshold_s

description property

description: Optional[str]

Get the description of the test suite from its docstring.

Returns:

Type Description
Optional[str]

Optional[str]: The docstring of the class, or None if not available.

name property

name: str

Get the name of the test suite.

Returns:

Name Type Description
str str

The name of the test suite class.

test_clock_generator_reg

test_clock_generator_reg()

Checks if the clock generator device is actually a clock generator

Source code in src/contraqctor/qc/harp.py
319
320
321
322
323
324
325
326
327
def test_clock_generator_reg(self):
    """Checks if the clock generator device is actually a clock generator"""
    if "ClockConfiguration" not in [x.name for x in self.clock_generator_device]:
        return self.fail_test(None, "ClockConfiguration data stream is not present")
    clock_reg = self.clock_generator_device["ClockConfiguration"].data.iloc[-1]
    if clock_reg["ClockGenerator"]:
        return self.pass_test(True, "Clock generator is a clock generator")
    else:
        return self.fail_test(False, "Clock generator is not a clock generator")

test_devices_are_subordinate

test_devices_are_subordinate()

Checks if the devices are subordinate to the clock generator

Source code in src/contraqctor/qc/harp.py
329
330
331
332
333
334
335
336
337
def test_devices_are_subordinate(self):
    """Checks if the devices are subordinate to the clock generator"""
    for device in self.devices:
        if "ClockConfiguration" not in [x.name for x in device]:
            yield self.fail_test(None, f"ClockConfiguration data stream is not present in {device.name}")
        elif device["ClockConfiguration"].data.iloc[-1]["ClockGenerator"]:
            yield self.fail_test(False, f"Device {device.name} is not subordinate to the clock generator")
        else:
            yield self.pass_test(True, f"Device {device.name} is subordinate to the clock generator")

test_is_read_dump_synchronized

test_is_read_dump_synchronized()

Check if the read dump from the devices arrives are roughly the same time

Source code in src/contraqctor/qc/harp.py
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
def test_is_read_dump_synchronized(self):
    """Check if the read dump from the devices arrives are roughly the same time"""
    if self.read_dump_jitter_threshold_s is None:
        return self.skip_test("No read dump jitter threshold provided, skipping test.")
    clock_dump_time = self._get_read_dump_time(self.clock_generator_device)
    for device in self.devices:
        t_dump = self._get_read_dump_time(device)
        if t_dump is None:
            yield self.fail_test(None, f"Device {device.name} does not have a requested read dump")
        elif (dt := abs(t_dump - clock_dump_time)) > self.read_dump_jitter_threshold_s:
            yield self.fail_test(
                False,
                f"Device {device.name} read dump is not synchronized with the clock generator's. dt = {dt:.3f} s vs threshold {self.read_dump_jitter_threshold_s:.3f} s",
            )
        else:
            yield self.pass_test(True, f"Device {device.name} read dump is synchronized with the clock generator's")

get_tests

get_tests() -> Generator[ITest, None, None]

Find all methods starting with 'test'.

Yields:

Name Type Description
ITest ITest

Test methods found in the suite.

Source code in src/contraqctor/qc/base.py
321
322
323
324
325
326
327
328
329
def get_tests(self) -> t.Generator[ITest, None, None]:
    """Find all methods starting with 'test'.

    Yields:
        ITest: Test methods found in the suite.
    """
    for name, method in inspect.getmembers(self, predicate=inspect.ismethod):
        if name.startswith("test"):
            yield method

pass_test

pass_test() -> Result
pass_test(result: Any) -> Result
pass_test(result: Any, message: str) -> Result
pass_test(result: Any, *, context: Any) -> Result
pass_test(
    result: Any, message: str, *, context: Any
) -> Result
pass_test(
    result: Any = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a passing test result.

Parameters:

Name Type Description Default
result Any

The value to include in the test result.

None
message Optional[str]

Optional message describing why the test passed.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with PASSED status.

Source code in src/contraqctor/qc/base.py
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
def pass_test(
    self, result: t.Any = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a passing test result.

    Args:
        result: The value to include in the test result.
        message: Optional message describing why the test passed.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with PASSED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.PASSED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

warn_test

warn_test() -> Result
warn_test(result: Any) -> Result
warn_test(result: Any, message: str) -> Result
warn_test(result: Any, *, context: Any) -> Result
warn_test(
    result: Any, message: str, *, context: Any
) -> Result
warn_test(
    result: Any = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a warning test result.

Creates a result with WARNING status, or FAILED if warnings are elevated.

Parameters:

Name Type Description Default
result Any

The value to include in the test result.

None
message Optional[str]

Optional message describing the warning.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with WARNING or FAILED status.

Source code in src/contraqctor/qc/base.py
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
def warn_test(
    self, result: t.Any = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a warning test result.

    Creates a result with WARNING status, or FAILED if warnings are elevated.

    Args:
        result: The value to include in the test result.
        message: Optional message describing the warning.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with WARNING or FAILED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.WARNING if not _elevate_warning.get() else Status.FAILED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

fail_test

fail_test() -> Result
fail_test(result: Any) -> Result
fail_test(result: Any, message: str) -> Result
fail_test(
    result: Any, message: str, *, context: Any
) -> Result
fail_test(
    result: Optional[Any] = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a failing test result.

Parameters:

Name Type Description Default
result Optional[Any]

The value to include in the test result.

None
message Optional[str]

Optional message describing why the test failed.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with FAILED status.

Source code in src/contraqctor/qc/base.py
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
def fail_test(
    self, result: t.Optional[t.Any] = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a failing test result.

    Args:
        result: The value to include in the test result.
        message: Optional message describing why the test failed.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with FAILED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.FAILED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

skip_test

skip_test() -> Result
skip_test(message: str) -> Result
skip_test(message: str, *, context: Any) -> Result
skip_test(
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a skipped test result.

Creates a result with SKIPPED status, or FAILED if skips are elevated.

Parameters:

Name Type Description Default
message Optional[str]

Optional message explaining why the test was skipped.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with SKIPPED or FAILED status.

Source code in src/contraqctor/qc/base.py
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
def skip_test(self, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None) -> Result:
    """Create a skipped test result.

    Creates a result with SKIPPED status, or FAILED if skips are elevated.

    Args:
        message: Optional message explaining why the test was skipped.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with SKIPPED or FAILED status.
    """
    calling_func_name, description = self._get_caller_info()
    return Result(
        status=Status.SKIPPED if not _elevate_skippable.get() else Status.FAILED,
        result=None,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

setup

setup() -> None

Run before each test method.

This method can be overridden by subclasses to implement setup logic that runs before each test.

Source code in src/contraqctor/qc/base.py
685
686
687
688
689
690
691
def setup(self) -> None:
    """Run before each test method.

    This method can be overridden by subclasses to implement
    setup logic that runs before each test.
    """
    pass

teardown

teardown() -> None

Run after each test method.

This method can be overridden by subclasses to implement teardown logic that runs after each test.

Source code in src/contraqctor/qc/base.py
693
694
695
696
697
698
699
def teardown(self) -> None:
    """Run after each test method.

    This method can be overridden by subclasses to implement
    teardown logic that runs after each test.
    """
    pass

run_test

run_test(
    test_method: ITest,
) -> Generator[Result, None, None]

Run a single test method and yield its results.

Handles setup, test execution, result processing, and teardown.

Parameters:

Name Type Description Default
test_method ITest

The test method to run.

required

Yields:

Name Type Description
Result Result

Result objects produced by the test method.

Source code in src/contraqctor/qc/base.py
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
def run_test(self, test_method: ITest) -> t.Generator[Result, None, None]:
    """Run a single test method and yield its results.

    Handles setup, test execution, result processing, and teardown.

    Args:
        test_method: The test method to run.

    Yields:
        Result: Result objects produced by the test method.
    """
    test_name = test_method.__name__
    suite_name = self.name
    test_description = getattr(test_method, "__doc__", None)

    try:
        self.setup()
        result = test_method()
        if inspect.isgenerator(result):
            for sub_result in result:
                yield self._process_test_result(sub_result, test_method, test_name, test_description)
        else:
            yield self._process_test_result(result, test_method, test_name, test_description)
    except Exception as e:
        tb = traceback.format_exc()
        yield Result(
            status=Status.ERROR,
            result=None,
            test_name=test_name,
            suite_name=suite_name,
            description=test_description,
            message=f"Error during test execution: {str(e)}",
            exception=e,
            traceback=tb,
            test_reference=test_method,
            suite_reference=self,
        )
    finally:
        self.teardown()

run_all

run_all() -> Generator[Result, None, None]

Run all test methods in the suite.

Finds all test methods and runs them in sequence.

Yields:

Name Type Description
Result Result

Result objects produced by all test methods.

Source code in src/contraqctor/qc/base.py
785
786
787
788
789
790
791
792
793
794
def run_all(self) -> t.Generator[Result, None, None]:
    """Run all test methods in the suite.

    Finds all test methods and runs them in sequence.

    Yields:
        Result: Result objects produced by all test methods.
    """
    for test in self.get_tests():
        yield from self.run_test(test)

HarpDeviceTypeTestSuite

HarpDeviceTypeTestSuite(harp_device: HarpDevice)

Bases: Suite, ABC

Base test suite for specific types of HARP devices.

Abstract base class providing common functionality for testing specific HARP device types with known WhoAmI identifiers.

Attributes:

Name Type Description
harp_device

The HARP device to test.

_WHOAMI int

Class variable defining the expected WhoAmI value for this device type.

Initialize the device type test suite.

Parameters:

Name Type Description Default
harp_device HarpDevice

The HARP device to test.

required
Source code in src/contraqctor/qc/harp.py
389
390
391
392
393
394
395
def __init__(self, harp_device: HarpDevice):
    """Initialize the device type test suite.

    Args:
        harp_device: The HARP device to test.
    """
    self.harp_device = harp_device

whoami property

whoami: int

Get the expected WhoAmI value for this device type.

Returns:

Name Type Description
int int

The expected WhoAmI identifier.

description property

description: Optional[str]

Get the description of the test suite from its docstring.

Returns:

Type Description
Optional[str]

Optional[str]: The docstring of the class, or None if not available.

name property

name: str

Get the name of the test suite.

Returns:

Name Type Description
str str

The name of the test suite class.

test_whoami

test_whoami()

Check if the WhoAmI value is correct

Source code in src/contraqctor/qc/harp.py
406
407
408
409
410
411
412
413
414
def test_whoami(self):
    """Check if the WhoAmI value is correct"""
    try:
        who_am_i = self.harp_device["WhoAmI"].data["WhoAmI"].iloc[-1]
    except KeyError:
        return self.fail_test(None, "WhoAmI data stream is not present")
    if who_am_i != self.whoami:
        return self.fail_test(False, f"Expected WhoAmI value {self.whoami} but got {who_am_i}")
    return self.pass_test(True, f"WhoAmI value is {who_am_i} as expected")

get_tests

get_tests() -> Generator[ITest, None, None]

Find all methods starting with 'test'.

Yields:

Name Type Description
ITest ITest

Test methods found in the suite.

Source code in src/contraqctor/qc/base.py
321
322
323
324
325
326
327
328
329
def get_tests(self) -> t.Generator[ITest, None, None]:
    """Find all methods starting with 'test'.

    Yields:
        ITest: Test methods found in the suite.
    """
    for name, method in inspect.getmembers(self, predicate=inspect.ismethod):
        if name.startswith("test"):
            yield method

pass_test

pass_test() -> Result
pass_test(result: Any) -> Result
pass_test(result: Any, message: str) -> Result
pass_test(result: Any, *, context: Any) -> Result
pass_test(
    result: Any, message: str, *, context: Any
) -> Result
pass_test(
    result: Any = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a passing test result.

Parameters:

Name Type Description Default
result Any

The value to include in the test result.

None
message Optional[str]

Optional message describing why the test passed.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with PASSED status.

Source code in src/contraqctor/qc/base.py
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
def pass_test(
    self, result: t.Any = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a passing test result.

    Args:
        result: The value to include in the test result.
        message: Optional message describing why the test passed.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with PASSED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.PASSED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

warn_test

warn_test() -> Result
warn_test(result: Any) -> Result
warn_test(result: Any, message: str) -> Result
warn_test(result: Any, *, context: Any) -> Result
warn_test(
    result: Any, message: str, *, context: Any
) -> Result
warn_test(
    result: Any = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a warning test result.

Creates a result with WARNING status, or FAILED if warnings are elevated.

Parameters:

Name Type Description Default
result Any

The value to include in the test result.

None
message Optional[str]

Optional message describing the warning.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with WARNING or FAILED status.

Source code in src/contraqctor/qc/base.py
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
def warn_test(
    self, result: t.Any = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a warning test result.

    Creates a result with WARNING status, or FAILED if warnings are elevated.

    Args:
        result: The value to include in the test result.
        message: Optional message describing the warning.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with WARNING or FAILED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.WARNING if not _elevate_warning.get() else Status.FAILED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

fail_test

fail_test() -> Result
fail_test(result: Any) -> Result
fail_test(result: Any, message: str) -> Result
fail_test(
    result: Any, message: str, *, context: Any
) -> Result
fail_test(
    result: Optional[Any] = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a failing test result.

Parameters:

Name Type Description Default
result Optional[Any]

The value to include in the test result.

None
message Optional[str]

Optional message describing why the test failed.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with FAILED status.

Source code in src/contraqctor/qc/base.py
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
def fail_test(
    self, result: t.Optional[t.Any] = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a failing test result.

    Args:
        result: The value to include in the test result.
        message: Optional message describing why the test failed.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with FAILED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.FAILED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

skip_test

skip_test() -> Result
skip_test(message: str) -> Result
skip_test(message: str, *, context: Any) -> Result
skip_test(
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a skipped test result.

Creates a result with SKIPPED status, or FAILED if skips are elevated.

Parameters:

Name Type Description Default
message Optional[str]

Optional message explaining why the test was skipped.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with SKIPPED or FAILED status.

Source code in src/contraqctor/qc/base.py
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
def skip_test(self, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None) -> Result:
    """Create a skipped test result.

    Creates a result with SKIPPED status, or FAILED if skips are elevated.

    Args:
        message: Optional message explaining why the test was skipped.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with SKIPPED or FAILED status.
    """
    calling_func_name, description = self._get_caller_info()
    return Result(
        status=Status.SKIPPED if not _elevate_skippable.get() else Status.FAILED,
        result=None,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

setup

setup() -> None

Run before each test method.

This method can be overridden by subclasses to implement setup logic that runs before each test.

Source code in src/contraqctor/qc/base.py
685
686
687
688
689
690
691
def setup(self) -> None:
    """Run before each test method.

    This method can be overridden by subclasses to implement
    setup logic that runs before each test.
    """
    pass

teardown

teardown() -> None

Run after each test method.

This method can be overridden by subclasses to implement teardown logic that runs after each test.

Source code in src/contraqctor/qc/base.py
693
694
695
696
697
698
699
def teardown(self) -> None:
    """Run after each test method.

    This method can be overridden by subclasses to implement
    teardown logic that runs after each test.
    """
    pass

run_test

run_test(
    test_method: ITest,
) -> Generator[Result, None, None]

Run a single test method and yield its results.

Handles setup, test execution, result processing, and teardown.

Parameters:

Name Type Description Default
test_method ITest

The test method to run.

required

Yields:

Name Type Description
Result Result

Result objects produced by the test method.

Source code in src/contraqctor/qc/base.py
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
def run_test(self, test_method: ITest) -> t.Generator[Result, None, None]:
    """Run a single test method and yield its results.

    Handles setup, test execution, result processing, and teardown.

    Args:
        test_method: The test method to run.

    Yields:
        Result: Result objects produced by the test method.
    """
    test_name = test_method.__name__
    suite_name = self.name
    test_description = getattr(test_method, "__doc__", None)

    try:
        self.setup()
        result = test_method()
        if inspect.isgenerator(result):
            for sub_result in result:
                yield self._process_test_result(sub_result, test_method, test_name, test_description)
        else:
            yield self._process_test_result(result, test_method, test_name, test_description)
    except Exception as e:
        tb = traceback.format_exc()
        yield Result(
            status=Status.ERROR,
            result=None,
            test_name=test_name,
            suite_name=suite_name,
            description=test_description,
            message=f"Error during test execution: {str(e)}",
            exception=e,
            traceback=tb,
            test_reference=test_method,
            suite_reference=self,
        )
    finally:
        self.teardown()

run_all

run_all() -> Generator[Result, None, None]

Run all test methods in the suite.

Finds all test methods and runs them in sequence.

Yields:

Name Type Description
Result Result

Result objects produced by all test methods.

Source code in src/contraqctor/qc/base.py
785
786
787
788
789
790
791
792
793
794
def run_all(self) -> t.Generator[Result, None, None]:
    """Run all test methods in the suite.

    Finds all test methods and runs them in sequence.

    Yields:
        Result: Result objects produced by all test methods.
    """
    for test in self.get_tests():
        yield from self.run_test(test)

HarpSniffDetectorTestSuite

HarpSniffDetectorTestSuite(
    harp_device: HarpDevice,
    quantization_ratio_thr: float = 0.1,
    clustering_thr: float = 0.05,
    clipping_thr: float = 0.05,
    sudden_jumps_thr: float = 0.001,
    notch_filter_freq: float = 50,
)

Bases: HarpDeviceTypeTestSuite

Test suite for HARP Sniff Detector devices.

Provides tests specific to the Sniff Detector device, including signal quality analysis and breathing rate detection.

Attributes:

Name Type Description
harp_device

The HARP Sniff Detector device to test.

data DataFrame

The raw voltage data from the device.

fs float

The sampling frequency of the device.

quantization_ratio_thr

Threshold for the quantization ratio test.

clustering_thr

Threshold for the clustering ratio test.

clipping_thr

Threshold for the clipping detection test.

sudden_jumps_thr

Threshold for the sudden jumps detection test.

notch_filter_freq

Frequency (Hz) for the notch filter.

Examples:

from contraqctor.contract.harp import HarpDevice
from contraqctor.qc.harp import HarpSniffDetectorTestSuite
from contraqctor.qc.base import Runner

# Create and load the sniff detector device
device = HarpDevice("sniff", reader_params=params).load()

# Create the test suite with custom thresholds
suite = HarpSniffDetectorTestSuite(
    device,
    quantization_ratio_thr=0.1,
    clustering_thr=0.05,
    notch_filter_freq=60  # For 60Hz power
)

# Run tests
runner = Runner().add_suite(suite)
results = runner.run_all_with_progress()

Initialize the Sniff Detector test suite.

Parameters:

Name Type Description Default
harp_device HarpDevice

The HARP Sniff Detector device to test.

required
quantization_ratio_thr float

Threshold for the quantization ratio test. Defaults to 0.1.

0.1
clustering_thr float

Threshold for the clustering ratio test. Defaults to 0.05.

0.05
clipping_thr float

Threshold for the clipping detection test. Defaults to 0.05.

0.05
sudden_jumps_thr float

Threshold for the sudden jumps detection test. Defaults to 0.001.

0.001
notch_filter_freq float

Frequency (Hz) for the notch filter. Defaults to 50.

50
Source code in src/contraqctor/qc/harp.py
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
@override
def __init__(
    self,
    harp_device: HarpDevice,
    quantization_ratio_thr: float = 0.1,
    clustering_thr: float = 0.05,
    clipping_thr: float = 0.05,
    sudden_jumps_thr: float = 0.001,
    notch_filter_freq: float = 50,
):
    """Initialize the Sniff Detector test suite.

    Args:
        harp_device: The HARP Sniff Detector device to test.
        quantization_ratio_thr: Threshold for the quantization ratio test. Defaults to 0.1.
        clustering_thr: Threshold for the clustering ratio test. Defaults to 0.05.
        clipping_thr: Threshold for the clipping detection test. Defaults to 0.05.
        sudden_jumps_thr: Threshold for the sudden jumps detection test. Defaults to 0.001.
        notch_filter_freq: Frequency (Hz) for the notch filter. Defaults to 50.
    """
    super().__init__(harp_device)
    self.harp_device = harp_device
    self.data: pd.DataFrame = self.harp_device["RawVoltage"].data
    self.data = self.data[self.data["MessageType"] == "EVENT"]["RawVoltage"]
    self.fs: float = self.harp_device["RawVoltageDispatchRate"].data.iloc[-1].values[0]
    self.quantization_ratio_thr = quantization_ratio_thr
    self.clustering_thr = clustering_thr
    self.clipping_thr = clipping_thr
    self.sudden_jumps_thr = sudden_jumps_thr
    self.notch_filter_freq = notch_filter_freq

description property

description: Optional[str]

Get the description of the test suite from its docstring.

Returns:

Type Description
Optional[str]

Optional[str]: The docstring of the class, or None if not available.

name property

name: str

Get the name of the test suite.

Returns:

Name Type Description
str str

The name of the test suite class.

whoami property

whoami: int

Get the expected WhoAmI value for this device type.

Returns:

Name Type Description
int int

The expected WhoAmI identifier.

test_sniff_detector_sampling_rate

test_sniff_detector_sampling_rate()

Tests if the sampling rate of the sniff detector is within nominal values

Source code in src/contraqctor/qc/harp.py
490
491
492
493
494
495
496
497
498
499
def test_sniff_detector_sampling_rate(self):
    """Tests if the sampling rate of the sniff detector is within nominal values"""
    period = self.data.index.diff().dropna()
    mean_period = np.mean(period)
    if abs((dfps := (1.0 / mean_period)) - self.fs) > 0.1:
        return self.fail_test(
            dfps,
            f"Sampling rate is not within nominal values. Expected {self.fs} Hz but got {1.0 / mean_period:.2f} Hz",
        )
    return self.pass_test(dfps, f"Sampling rate is {dfps:.2f} Hz. Expected {self.fs} Hz")

test_sniff_detector_signal_quality

test_sniff_detector_signal_quality()

Tests the quality of the sniff detector signal by analyzing quantization, clustering, clipping, and sudden jumps.

Source code in src/contraqctor/qc/harp.py
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
def test_sniff_detector_signal_quality(self):
    """Tests the quality of the sniff detector signal by analyzing quantization, clustering, clipping, and sudden jumps."""
    metrics = {}
    TOTAL_SAMPLES = len(self.data)

    metrics["quantization_ratio"] = len(np.unique(self.data.values)) / self._FULL_BIT_DEPTH

    hist, _ = np.histogram(self.data.values, bins=self._FULL_BIT_DEPTH)
    metrics["clustering_ratio"] = np.max(hist) / TOTAL_SAMPLES

    # Check for clipping:
    tol = (np.max(self.data) - np.min(self.data)) * 0.01

    metrics["min_clipping"] = np.sum(np.abs(self.data - np.min(self.data)) < tol) / TOTAL_SAMPLES
    metrics["max_clipping"] = np.sum(np.abs(self.data - np.max(self.data)) < tol) / TOTAL_SAMPLES

    # Check for weird discontinuities
    derivative = np.diff(self.data.values) / np.diff(self.data.index)
    sudden_jumps_ratio = (np.sum(np.abs(derivative) > 3 * np.std(derivative))) / TOTAL_SAMPLES
    metrics["sudden_jumps_ratio"] = sudden_jumps_ratio

    is_ok = (
        metrics["quantization_ratio"] > self.quantization_ratio_thr
        and metrics["clustering_ratio"] < self.clustering_thr
        and metrics["min_clipping"] < self.clipping_thr
        and metrics["max_clipping"] < self.clipping_thr
        and metrics["sudden_jumps_ratio"] < self.sudden_jumps_thr
    )

    if is_ok:
        return self.pass_test(True, "Signal quality is good", context=metrics)
    else:
        return self.fail_test(
            False,
            "Signal quality is not good",
            context=metrics,
        )

test_sniff_detector_physiology

test_sniff_detector_physiology()

Tests if the sniff detector is actually detecting sniffs by analyzing peaks in the signal.

Source code in src/contraqctor/qc/harp.py
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
def test_sniff_detector_physiology(self):
    """Tests if the sniff detector is actually detecting sniffs by analyzing peaks in the signal."""

    t = self.data.index.values
    signal = self.data.values
    dt = 1.0 / self.fs
    t_uniform = np.arange(t[0], t[-1], dt)

    interp_func = interp1d(t, signal, kind="linear", bounds_error=False, fill_value="extrapolate")
    y_uniform = interp_func(t_uniform)

    Q = 30.0
    b_notch, a_notch = iirnotch(self.notch_filter_freq, Q, self.fs)
    y_notch = filtfilt(b_notch, a_notch, y_uniform)

    b_high, a_high = butter(2, 0.2, "highpass", fs=self.fs)
    y_filtered = filtfilt(b_high, a_high, y_notch)

    b_low, a_low = butter(2, 15, "lowpass", fs=self.fs)
    y_filtered = filtfilt(b_low, a_low, y_filtered)

    peaks, _ = find_peaks(y_filtered, height=0.5 * np.std(y_filtered), prominence=2.5)

    # Create the asset and pass it in the context
    fig, axes = plt.subplots(2, 1, figsize=(10, 8))

    axes[0].plot(t_uniform, y_uniform, "b-")
    axes[0].plot(t_uniform[peaks], y_uniform[peaks], "ro")
    axes[0].set_title("Filtered Breathing Signal with Detected Peaks")
    axes[0].set_xlabel("Time (s)")
    axes[0].set_ylabel("Amplitude")
    middle_time = (t_uniform[0] + t_uniform[-1]) / 2
    axes[0].set_xlim(middle_time - 30 / 2, middle_time + 30 / 2)

    if len(peaks) >= 2:
        ipi = np.diff(peaks) * dt
        breathing_rate = 1.0 / np.mean(ipi)
        metrics = {
            "num_peaks": len(peaks),
            "mean_ipi": np.mean(ipi),
            "std_ipi": np.std(ipi),
            "breathing_rate_hz": breathing_rate,
            "perc99": 1.0 / np.percentile(ipi, 0.99),
            "perc01": 1.0 / np.percentile(ipi, 0.01),
        }

        axes[1].hist(ipi, bins=np.arange(0, 1, 0.025), alpha=0.7)
        axes[1].axvline(
            np.mean(ipi),
            color="r",
            linestyle="--",
            label=f"Mean: {np.mean(ipi):.3f}s ({breathing_rate * 60:.1f} BPM)",
        )
        axes[1].set_title("Histogram of Inter-Peak Intervals")
        axes[1].set_xlabel("Interval (s)")
        axes[1].set_ylabel("Count")
        axes[1].legend()

        fig.tight_layout()

        context = ContextExportableObj.as_context(fig)
        context.update(metrics)
        min_max_breathing_rate = (2, 10)  # in Hz
        if min_max_breathing_rate[0] <= breathing_rate <= min_max_breathing_rate[1]:
            return self.pass_test(metrics, f"Breathing rate is {breathing_rate} Hz", context=context)
        else:
            return self.warn_test(
                metrics,
                f"Breathing rate is {breathing_rate} Hz. Expected between {min_max_breathing_rate[0]} and {min_max_breathing_rate[1]} Hz",
                context=context,
            )

    else:
        return self.fail_test(
            {"num_peaks": len(peaks)}, "Failed to detect sufficient peaks in the breathing signal", context=context
        )

get_tests

get_tests() -> Generator[ITest, None, None]

Find all methods starting with 'test'.

Yields:

Name Type Description
ITest ITest

Test methods found in the suite.

Source code in src/contraqctor/qc/base.py
321
322
323
324
325
326
327
328
329
def get_tests(self) -> t.Generator[ITest, None, None]:
    """Find all methods starting with 'test'.

    Yields:
        ITest: Test methods found in the suite.
    """
    for name, method in inspect.getmembers(self, predicate=inspect.ismethod):
        if name.startswith("test"):
            yield method

pass_test

pass_test() -> Result
pass_test(result: Any) -> Result
pass_test(result: Any, message: str) -> Result
pass_test(result: Any, *, context: Any) -> Result
pass_test(
    result: Any, message: str, *, context: Any
) -> Result
pass_test(
    result: Any = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a passing test result.

Parameters:

Name Type Description Default
result Any

The value to include in the test result.

None
message Optional[str]

Optional message describing why the test passed.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with PASSED status.

Source code in src/contraqctor/qc/base.py
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
def pass_test(
    self, result: t.Any = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a passing test result.

    Args:
        result: The value to include in the test result.
        message: Optional message describing why the test passed.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with PASSED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.PASSED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

warn_test

warn_test() -> Result
warn_test(result: Any) -> Result
warn_test(result: Any, message: str) -> Result
warn_test(result: Any, *, context: Any) -> Result
warn_test(
    result: Any, message: str, *, context: Any
) -> Result
warn_test(
    result: Any = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a warning test result.

Creates a result with WARNING status, or FAILED if warnings are elevated.

Parameters:

Name Type Description Default
result Any

The value to include in the test result.

None
message Optional[str]

Optional message describing the warning.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with WARNING or FAILED status.

Source code in src/contraqctor/qc/base.py
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
def warn_test(
    self, result: t.Any = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a warning test result.

    Creates a result with WARNING status, or FAILED if warnings are elevated.

    Args:
        result: The value to include in the test result.
        message: Optional message describing the warning.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with WARNING or FAILED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.WARNING if not _elevate_warning.get() else Status.FAILED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

fail_test

fail_test() -> Result
fail_test(result: Any) -> Result
fail_test(result: Any, message: str) -> Result
fail_test(
    result: Any, message: str, *, context: Any
) -> Result
fail_test(
    result: Optional[Any] = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a failing test result.

Parameters:

Name Type Description Default
result Optional[Any]

The value to include in the test result.

None
message Optional[str]

Optional message describing why the test failed.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with FAILED status.

Source code in src/contraqctor/qc/base.py
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
def fail_test(
    self, result: t.Optional[t.Any] = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a failing test result.

    Args:
        result: The value to include in the test result.
        message: Optional message describing why the test failed.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with FAILED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.FAILED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

skip_test

skip_test() -> Result
skip_test(message: str) -> Result
skip_test(message: str, *, context: Any) -> Result
skip_test(
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a skipped test result.

Creates a result with SKIPPED status, or FAILED if skips are elevated.

Parameters:

Name Type Description Default
message Optional[str]

Optional message explaining why the test was skipped.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with SKIPPED or FAILED status.

Source code in src/contraqctor/qc/base.py
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
def skip_test(self, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None) -> Result:
    """Create a skipped test result.

    Creates a result with SKIPPED status, or FAILED if skips are elevated.

    Args:
        message: Optional message explaining why the test was skipped.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with SKIPPED or FAILED status.
    """
    calling_func_name, description = self._get_caller_info()
    return Result(
        status=Status.SKIPPED if not _elevate_skippable.get() else Status.FAILED,
        result=None,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

setup

setup() -> None

Run before each test method.

This method can be overridden by subclasses to implement setup logic that runs before each test.

Source code in src/contraqctor/qc/base.py
685
686
687
688
689
690
691
def setup(self) -> None:
    """Run before each test method.

    This method can be overridden by subclasses to implement
    setup logic that runs before each test.
    """
    pass

teardown

teardown() -> None

Run after each test method.

This method can be overridden by subclasses to implement teardown logic that runs after each test.

Source code in src/contraqctor/qc/base.py
693
694
695
696
697
698
699
def teardown(self) -> None:
    """Run after each test method.

    This method can be overridden by subclasses to implement
    teardown logic that runs after each test.
    """
    pass

run_test

run_test(
    test_method: ITest,
) -> Generator[Result, None, None]

Run a single test method and yield its results.

Handles setup, test execution, result processing, and teardown.

Parameters:

Name Type Description Default
test_method ITest

The test method to run.

required

Yields:

Name Type Description
Result Result

Result objects produced by the test method.

Source code in src/contraqctor/qc/base.py
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
def run_test(self, test_method: ITest) -> t.Generator[Result, None, None]:
    """Run a single test method and yield its results.

    Handles setup, test execution, result processing, and teardown.

    Args:
        test_method: The test method to run.

    Yields:
        Result: Result objects produced by the test method.
    """
    test_name = test_method.__name__
    suite_name = self.name
    test_description = getattr(test_method, "__doc__", None)

    try:
        self.setup()
        result = test_method()
        if inspect.isgenerator(result):
            for sub_result in result:
                yield self._process_test_result(sub_result, test_method, test_name, test_description)
        else:
            yield self._process_test_result(result, test_method, test_name, test_description)
    except Exception as e:
        tb = traceback.format_exc()
        yield Result(
            status=Status.ERROR,
            result=None,
            test_name=test_name,
            suite_name=suite_name,
            description=test_description,
            message=f"Error during test execution: {str(e)}",
            exception=e,
            traceback=tb,
            test_reference=test_method,
            suite_reference=self,
        )
    finally:
        self.teardown()

run_all

run_all() -> Generator[Result, None, None]

Run all test methods in the suite.

Finds all test methods and runs them in sequence.

Yields:

Name Type Description
Result Result

Result objects produced by all test methods.

Source code in src/contraqctor/qc/base.py
785
786
787
788
789
790
791
792
793
794
def run_all(self) -> t.Generator[Result, None, None]:
    """Run all test methods in the suite.

    Finds all test methods and runs them in sequence.

    Yields:
        Result: Result objects produced by all test methods.
    """
    for test in self.get_tests():
        yield from self.run_test(test)

test_whoami

test_whoami()

Check if the WhoAmI value is correct

Source code in src/contraqctor/qc/harp.py
406
407
408
409
410
411
412
413
414
def test_whoami(self):
    """Check if the WhoAmI value is correct"""
    try:
        who_am_i = self.harp_device["WhoAmI"].data["WhoAmI"].iloc[-1]
    except KeyError:
        return self.fail_test(None, "WhoAmI data stream is not present")
    if who_am_i != self.whoami:
        return self.fail_test(False, f"Expected WhoAmI value {self.whoami} but got {who_am_i}")
    return self.pass_test(True, f"WhoAmI value is {who_am_i} as expected")