Skip to content

qc.camera

CameraTestSuite

CameraTestSuite(
    data_stream: Camera,
    *,
    expected_fps: Optional[int] = None,
    clock_jitter_s: float = 0.0001,
    start_time_s: Optional[float] = None,
    stop_time_s: Optional[float] = None,
)

Bases: Suite

Test suite for validating camera data integrity.

Provides tests for validating video and metadata integrity according to the AIND file format specification for behavior videos.

For more details, see: https://github.com/AllenNeuralDynamics/aind-file-standards/blob/ce0aa517a40064d1ac9764d42c9efe4ae5c61f7b/file_formats/behavior_videos.md

Attributes:

Name Type Description
data_stream Camera

The Camera data stream to test.

expected_fps

Optional expected frames per second for validation.

clock_jitter_s

Maximum allowed time difference between frame timestamps, in seconds.

start_time_s

Optional expected start time for validation, in seconds.

stop_time_s

Optional expected stop time for validation, in seconds.

Examples:

from contraqctor.contract.camera import Camera, CameraParams
from contraqctor.qc.camera import CameraTestSuite
from contraqctor.qc.base import Runner

# Create and load a camera data stream
params = CameraParams(path="recordings/session1/")
camera_stream = Camera("front_camera", reader_params=params).load()

# Create test suite with validation parameters
suite = CameraTestSuite(
    camera_stream,
    expected_fps=30,
    start_time_s=10.0,
    stop_time_s=310.0
)

# Run tests
runner = Runner().add_suite(suite)
results = runner.run_all_with_progress()

Initialize the camera test suite.

Parameters:

Name Type Description Default
data_stream Camera

The Camera data stream to test.

required
expected_fps Optional[int]

Optional expected frames per second for validation.

None
clock_jitter_s float

Maximum allowed time difference between frame timestamps, in seconds.

0.0001
start_time_s Optional[float]

Optional expected start time for validation, in seconds.

None
stop_time_s Optional[float]

Optional expected stop time for validation, in seconds.

None
Source code in src/contraqctor/qc/camera.py
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
def __init__(
    self,
    data_stream: Camera,
    *,
    expected_fps: t.Optional[int] = None,
    clock_jitter_s: float = 1e-4,
    start_time_s: t.Optional[float] = None,
    stop_time_s: t.Optional[float] = None,
):
    """Initialize the camera test suite.

    Args:
        data_stream: The Camera data stream to test.
        expected_fps: Optional expected frames per second for validation.
        clock_jitter_s: Maximum allowed time difference between frame timestamps, in seconds.
        start_time_s: Optional expected start time for validation, in seconds.
        stop_time_s: Optional expected stop time for validation, in seconds.
    """
    self.data_stream: Camera = data_stream
    self.expected_fps = expected_fps
    self.clock_jitter_s = clock_jitter_s
    self.start_time_s = start_time_s
    self.stop_time_s = stop_time_s

description property

description: Optional[str]

Get the description of the test suite from its docstring.

Returns:

Type Description
Optional[str]

Optional[str]: The docstring of the class, or None if not available.

name property

name: str

Get the name of the test suite.

Returns:

Name Type Description
str str

The name of the test suite class.

test_metadata_shape

test_metadata_shape()

Checks if the metadata DataFrame has the expected shape. Including headers.

Source code in src/contraqctor/qc/camera.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
def test_metadata_shape(self):
    """
    Checks if the metadata DataFrame has the expected shape. Including headers.
    """
    if not self.data_stream.has_data:
        return self.fail_test(None, "Data stream does not have loaded data")
    metadata = self.data_stream.data.metadata
    if not isinstance(metadata, pd.DataFrame):
        return self.fail_test(None, "Metadata is not a pandas DataFrame")

    (metadata_cols := list(metadata.columns)).append(metadata.index.name)
    if not all(col in metadata_cols for col in self._expected_columns):
        missing_columns = self._expected_columns - set(metadata_cols)
        return self.fail_test(None, f"Metadata columns do not match expected columns. Missing: {missing_columns}")
    if metadata.empty:
        return self.fail_test(None, "Metadata DataFrame is empty")
    return self.pass_test(None, "Metadata DataFrame has expected shape and columns")

test_check_dropped_frames

test_check_dropped_frames()

Check if there are dropped frames in the metadata DataFrame.

Source code in src/contraqctor/qc/camera.py
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
def test_check_dropped_frames(self):
    """
    Check if there are dropped frames in the metadata DataFrame.
    """
    metadata = (self.data_stream.data.metadata[list(self._expected_columns - {"ReferenceTime"})]).copy()
    metadata.loc[:, "ReferenceTime"] = metadata.index.values
    diff_metadata = metadata.diff()
    # Convert CameraFrameTime to seconds
    diff_metadata["CameraFrameTime"] = diff_metadata["CameraFrameTime"] * 1e-9

    if not all(diff_metadata["CameraFrameNumber"].dropna() == 1):
        return self.fail_test(
            None, f"Detected {sum(diff_metadata['CameraFrameNumber'].dropna() - 1)} dropped frames metadata."
        )

    inter_clock_diff = diff_metadata["CameraFrameTime"] - diff_metadata["ReferenceTime"]
    if not all(inter_clock_diff.dropna() < self.clock_jitter_s):
        return self.fail_test(
            None,
            f"Detected a difference between CameraFrameTime and ReferenceTime greater than the expected threshold: {self.clock_jitter_s} s.",
        )
    return self.pass_test(None, "No dropped frames detected in metadata.")

test_match_expected_fps

test_match_expected_fps()

Check if the frames per second (FPS) of the video metadata matches the expected FPS.

Source code in src/contraqctor/qc/camera.py
120
121
122
123
124
125
126
127
128
129
130
131
def test_match_expected_fps(self):
    """
    Check if the frames per second (FPS) of the video metadata matches the expected FPS."""
    if self.expected_fps is None:
        return self.skip_test("No expected FPS provided, skipping test.")
    period = np.diff(self.data_stream.data.metadata.index.values)
    if np.std(period) > 1e-4:
        return self.fail_test(None, f"High std in frame period detected: {np.std(period)}")
    if abs(_mean := np.mean(period) - (_expected := (1.0 / self.expected_fps))) > (_expected * 0.01):
        return self.fail_test(None, f"Mean frame period ({_mean}) is different than expected: {_expected}")

    return self.pass_test(None, f"Mean frame period ({_mean}) is within expected range: {_expected}")

test_is_start_bounded

test_is_start_bounded()

Check if the start time of the video is bounded by the provided start time.

Source code in src/contraqctor/qc/camera.py
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
def test_is_start_bounded(self):
    """
    Check if the start time of the video is bounded by the provided start time."""
    metadata = self.data_stream.data.metadata
    if self.start_time_s is not None:
        if metadata.index[0] < self.start_time_s:
            return self.fail_test(
                None,
                f"Start time is not bounded. First frame time: {metadata.index[0]}, expected start time: {self.start_time_s}",
            )
        else:
            return self.pass_test(
                None,
                f"Start time is bounded. First frame time: {metadata.index[0]}, expected start time: {self.start_time_s}",
            )
    else:
        return self.skip_test("No start time provided, skipping test.")

test_is_stop_bounded

test_is_stop_bounded()

Check if the stop time of the video is bounded by the provided stop time.

Source code in src/contraqctor/qc/camera.py
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
def test_is_stop_bounded(self):
    """
    Check if the stop time of the video is bounded by the provided stop time."""
    metadata = self.data_stream.data.metadata
    if self.stop_time_s is not None:
        if metadata.index[-1] > self.stop_time_s:
            return self.fail_test(
                None,
                f"Stop time is not bounded. Last frame time: {metadata.index[-1]}, expected stop time: {self.stop_time_s}",
            )
        else:
            return self.pass_test(
                None,
                f"Stop time is bounded. Last frame time: {metadata.index[-1]}, expected stop time: {self.stop_time_s}",
            )
    else:
        return self.skip_test("No stop time provided, skipping test.")

test_video_frame_count

test_video_frame_count()

Check if the number of frames in the video matches the number of rows in the metadata DataFrame.

Source code in src/contraqctor/qc/camera.py
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
def test_video_frame_count(self):
    """
    Check if the number of frames in the video matches the number of rows in the metadata DataFrame.
    """
    data = self.data_stream.data
    if not data.has_video:
        return self.skip_test("No video data available. Skipping test.")

    if (n_frames := data.video_frame_count) != len(data.metadata):
        return self.fail_test(
            None,
            f"Number of frames in video ({n_frames}) does not match number of rows in metadata ({len(data.metadata)})",
        )
    else:
        return self.pass_test(
            None,
            f"Number of frames in video ({n_frames}) matches number of rows in metadata ({len(data.metadata)})",
        )

test_histogram_and_create_asset

test_histogram_and_create_asset()

Checks the histogram of the video and ensures color is well distributed. It also saves an asset with a single frame of the video and color histogram.

Source code in src/contraqctor/qc/camera.py
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
def test_histogram_and_create_asset(self):
    """Checks the histogram of the video and ensures color is well distributed.
    It also saves an asset with a single frame of the video and color histogram."""

    data = self.data_stream.data
    if not data.has_video:
        return self.skip_test("No video data available. Skipping test.")

    with data.as_video_capture() as video:
        video.set(cv2.CAP_PROP_POS_FRAMES, video.get(cv2.CAP_PROP_FRAME_COUNT) // 2)
        ret, frame = video.read()

        if not ret:
            return self.fail_test(None, "Failed to read a frame from the video")
        max_d = 2 ** (frame.dtype.itemsize * 8)

        if frame.shape[2] == 1:
            frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
        elif frame.shape[2] == 3:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        else:
            return self.fail_test(None, f"Frame has unexpected number of channels({frame.shape[2]}).")

        hist_r = cv2.calcHist([frame], [0], None, [max_d], [0, max_d])
        hist_g = cv2.calcHist([frame], [1], None, [max_d], [0, max_d])
        hist_b = cv2.calcHist([frame], [2], None, [max_d], [0, max_d])

        hist_r /= hist_r.sum()
        hist_g /= hist_g.sum()
        hist_b /= hist_b.sum()

        fig, ax = plt.subplots(1, 2, figsize=(15, 5))

        ax[0].imshow(frame)
        ax[0].axis("off")
        ax[0].set_title("Frame from video")
        ax[1].plot(hist_r, color="red", label="Red")
        ax[1].plot(hist_g, color="green", label="Green")
        ax[1].plot(hist_b, color="blue", label="Blue")
        ax[1].set_xlim([0, max_d])
        ax[1].set_xlabel("Pixel Value")
        ax[1].set_ylabel("Normalized Frequency")
        ax[1].set_title("Color Histogram")
        ax[1].legend()
        fig.tight_layout()

        return self.pass_test(
            None, "Histogram and asset created successfully.", context=ContextExportableObj.as_context(fig)
        )

get_tests

get_tests() -> Generator[ITest, None, None]

Find all methods starting with 'test'.

Yields:

Name Type Description
ITest ITest

Test methods found in the suite.

Source code in src/contraqctor/qc/base.py
321
322
323
324
325
326
327
328
329
def get_tests(self) -> t.Generator[ITest, None, None]:
    """Find all methods starting with 'test'.

    Yields:
        ITest: Test methods found in the suite.
    """
    for name, method in inspect.getmembers(self, predicate=inspect.ismethod):
        if name.startswith("test"):
            yield method

pass_test

pass_test() -> Result
pass_test(result: Any) -> Result
pass_test(result: Any, message: str) -> Result
pass_test(result: Any, *, context: Any) -> Result
pass_test(
    result: Any, message: str, *, context: Any
) -> Result
pass_test(
    result: Any = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a passing test result.

Parameters:

Name Type Description Default
result Any

The value to include in the test result.

None
message Optional[str]

Optional message describing why the test passed.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with PASSED status.

Source code in src/contraqctor/qc/base.py
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
def pass_test(
    self, result: t.Any = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a passing test result.

    Args:
        result: The value to include in the test result.
        message: Optional message describing why the test passed.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with PASSED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.PASSED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

warn_test

warn_test() -> Result
warn_test(result: Any) -> Result
warn_test(result: Any, message: str) -> Result
warn_test(result: Any, *, context: Any) -> Result
warn_test(
    result: Any, message: str, *, context: Any
) -> Result
warn_test(
    result: Any = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a warning test result.

Creates a result with WARNING status, or FAILED if warnings are elevated.

Parameters:

Name Type Description Default
result Any

The value to include in the test result.

None
message Optional[str]

Optional message describing the warning.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with WARNING or FAILED status.

Source code in src/contraqctor/qc/base.py
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
def warn_test(
    self, result: t.Any = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a warning test result.

    Creates a result with WARNING status, or FAILED if warnings are elevated.

    Args:
        result: The value to include in the test result.
        message: Optional message describing the warning.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with WARNING or FAILED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.WARNING if not _elevate_warning.get() else Status.FAILED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

fail_test

fail_test() -> Result
fail_test(result: Any) -> Result
fail_test(result: Any, message: str) -> Result
fail_test(
    result: Any, message: str, *, context: Any
) -> Result
fail_test(
    result: Optional[Any] = None,
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a failing test result.

Parameters:

Name Type Description Default
result Optional[Any]

The value to include in the test result.

None
message Optional[str]

Optional message describing why the test failed.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with FAILED status.

Source code in src/contraqctor/qc/base.py
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
def fail_test(
    self, result: t.Optional[t.Any] = None, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None
) -> Result:
    """Create a failing test result.

    Args:
        result: The value to include in the test result.
        message: Optional message describing why the test failed.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with FAILED status.
    """
    calling_func_name, description = self._get_caller_info()

    return Result(
        status=Status.FAILED,
        result=result,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

skip_test

skip_test() -> Result
skip_test(message: str) -> Result
skip_test(message: str, *, context: Any) -> Result
skip_test(
    message: Optional[str] = None,
    *,
    context: Optional[Any] = None,
) -> Result

Create a skipped test result.

Creates a result with SKIPPED status, or FAILED if skips are elevated.

Parameters:

Name Type Description Default
message Optional[str]

Optional message explaining why the test was skipped.

None
context Optional[Any]

Optional contextual data for the test result.

None

Returns:

Name Type Description
Result Result

A Result object with SKIPPED or FAILED status.

Source code in src/contraqctor/qc/base.py
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
def skip_test(self, message: t.Optional[str] = None, *, context: t.Optional[t.Any] = None) -> Result:
    """Create a skipped test result.

    Creates a result with SKIPPED status, or FAILED if skips are elevated.

    Args:
        message: Optional message explaining why the test was skipped.
        context: Optional contextual data for the test result.

    Returns:
        Result: A Result object with SKIPPED or FAILED status.
    """
    calling_func_name, description = self._get_caller_info()
    return Result(
        status=Status.SKIPPED if not _elevate_skippable.get() else Status.FAILED,
        result=None,
        test_name=calling_func_name,
        suite_name=self.name,
        message=message,
        context=context,
        description=description,
    )

setup

setup() -> None

Run before each test method.

This method can be overridden by subclasses to implement setup logic that runs before each test.

Source code in src/contraqctor/qc/base.py
685
686
687
688
689
690
691
def setup(self) -> None:
    """Run before each test method.

    This method can be overridden by subclasses to implement
    setup logic that runs before each test.
    """
    pass

teardown

teardown() -> None

Run after each test method.

This method can be overridden by subclasses to implement teardown logic that runs after each test.

Source code in src/contraqctor/qc/base.py
693
694
695
696
697
698
699
def teardown(self) -> None:
    """Run after each test method.

    This method can be overridden by subclasses to implement
    teardown logic that runs after each test.
    """
    pass

run_test

run_test(
    test_method: ITest,
) -> Generator[Result, None, None]

Run a single test method and yield its results.

Handles setup, test execution, result processing, and teardown.

Parameters:

Name Type Description Default
test_method ITest

The test method to run.

required

Yields:

Name Type Description
Result Result

Result objects produced by the test method.

Source code in src/contraqctor/qc/base.py
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
def run_test(self, test_method: ITest) -> t.Generator[Result, None, None]:
    """Run a single test method and yield its results.

    Handles setup, test execution, result processing, and teardown.

    Args:
        test_method: The test method to run.

    Yields:
        Result: Result objects produced by the test method.
    """
    test_name = test_method.__name__
    suite_name = self.name
    test_description = getattr(test_method, "__doc__", None)

    try:
        self.setup()
        result = test_method()
        if inspect.isgenerator(result):
            for sub_result in result:
                yield self._process_test_result(sub_result, test_method, test_name, test_description)
        else:
            yield self._process_test_result(result, test_method, test_name, test_description)
    except Exception as e:
        tb = traceback.format_exc()
        yield Result(
            status=Status.ERROR,
            result=None,
            test_name=test_name,
            suite_name=suite_name,
            description=test_description,
            message=f"Error during test execution: {str(e)}",
            exception=e,
            traceback=tb,
            test_reference=test_method,
            suite_reference=self,
        )
    finally:
        self.teardown()

run_all

run_all() -> Generator[Result, None, None]

Run all test methods in the suite.

Finds all test methods and runs them in sequence.

Yields:

Name Type Description
Result Result

Result objects produced by all test methods.

Source code in src/contraqctor/qc/base.py
785
786
787
788
789
790
791
792
793
794
def run_all(self) -> t.Generator[Result, None, None]:
    """Run all test methods in the suite.

    Finds all test methods and runs them in sequence.

    Yields:
        Result: Result objects produced by all test methods.
    """
    for test in self.get_tests():
        yield from self.run_test(test)