rig

pydantic model aind_behavior_services.rig.AindBehaviorRigModel[source]

Bases: SchemaVersionedModel

Fields:
Validators:

field computer_name: str [Optional][source]

Computer name

field rig_name: str [Required][source]

Rig name

pydantic model aind_behavior_services.rig.CameraController[source]

Bases: Device, Generic[TCamera]

Fields:
field cameras: Dict[str, TCamera] [Required][source]

Cameras to be instantiated

field device_type: Literal['CameraController'] = 'CameraController'[source]
field frame_rate: int | None = 30[source]

Frame rate of the trigger to all cameras

Constraints:
  • ge = 0

pydantic model aind_behavior_services.rig.ConnectedClockOutput[source]

Bases: BaseModel

Fields:
field output_channel: int [Required][source]

Output channel

Constraints:
  • ge = 0

field target_device: str | None = None[source]

Optional device name to provide user additional information

pydantic model aind_behavior_services.rig.Device[source]

Bases: BaseModel

Fields:
field additional_settings: BaseModel | None = None[source]

Additional settings

field calibration: BaseModel | None = None[source]

Calibration

field device_type: str [Required][source]

Device type

pydantic model aind_behavior_services.rig.DisplayCalibration[source]

Bases: BaseModel

Fields:
field extrinsics: DisplayExtrinsics = DisplayExtrinsics(rotation=Vector3(x=0.0, y=0.0, z=0.0), translation=Vector3(x=0.0, y=1.309016, z=-13.27))[source]

Extrinsics

field intrinsics: DisplayIntrinsics = DisplayIntrinsics(frame_width=1920, frame_height=1080, display_width=20, display_height=15)[source]

Intrinsics

pydantic model aind_behavior_services.rig.DisplayExtrinsics[source]

Bases: BaseModel

Fields:
field rotation: Vector3 = Vector3(x=0.0, y=0.0, z=0.0)[source]

Rotation vector (radians)

field translation: Vector3 = Vector3(x=0.0, y=1.309016, z=-13.27)[source]

Translation (in cm)

pydantic model aind_behavior_services.rig.DisplayIntrinsics[source]

Bases: BaseModel

Fields:
field display_height: float = 15[source]

Display width (cm)

Constraints:
  • ge = 0

field display_width: float = 20[source]

Display width (cm)

Constraints:
  • ge = 0

field frame_height: int = 1080[source]

Frame height (px)

Constraints:
  • ge = 0

field frame_width: int = 1920[source]

Frame width (px)

Constraints:
  • ge = 0

pydantic model aind_behavior_services.rig.DisplaysCalibration[source]

Bases: BaseModel

Fields:
field center: DisplayCalibration = DisplayCalibration(intrinsics=DisplayIntrinsics(frame_width=1920, frame_height=1080, display_width=20, display_height=15), extrinsics=DisplayExtrinsics(rotation=Vector3(x=0.0, y=0.0, z=0.0), translation=Vector3(x=0.0, y=1.309016, z=-13.27)))[source]

Center display calibration

field left: DisplayCalibration = DisplayCalibration(intrinsics=DisplayIntrinsics(frame_width=1920, frame_height=1080, display_width=20, display_height=15), extrinsics=DisplayExtrinsics(rotation=Vector3(x=0.0, y=1.0472, z=0.0), translation=Vector3(x=-16.6917756, y=1.309016, z=-3.575264)))[source]

Left display calibration

field right: DisplayCalibration = DisplayCalibration(intrinsics=DisplayIntrinsics(frame_width=1920, frame_height=1080, display_width=20, display_height=15), extrinsics=DisplayExtrinsics(rotation=Vector3(x=0.0, y=-1.0472, z=0.0), translation=Vector3(x=16.6917756, y=1.309016, z=-3.575264)))[source]

Right display calibration

aind_behavior_services.rig.FFMPEG_INPUT = '-colorspace bt709 -color_primaries bt709 -color_range full -color_trc linear'[source]

Default input arguments

aind_behavior_services.rig.FFMPEG_OUTPUT_16BIT = '-vf "scale=out_color_matrix=bt709:out_range=full,format=rgb48le,scale=out_range=full" -c:v hevc_nvenc -pix_fmt p010le -color_range full -colorspace bt709 -color_trc linear -tune hq -preset p4 -rc vbr -cq 12 -b:v 0M -metadata author="Allen Institute for Neural Dynamics" -maxrate 700M -bufsize 350M'[source]

Default output arguments for 16-bit video encoding

aind_behavior_services.rig.FFMPEG_OUTPUT_8BIT = '-vf "scale=out_color_matrix=bt709:out_range=full,format=bgr24,scale=out_range=full" -c:v h264_nvenc -pix_fmt yuv420p -color_range full -colorspace bt709 -color_trc linear -tune hq -preset p4 -rc vbr -cq 12 -b:v 0M -metadata author="Allen Institute for Neural Dynamics" -maxrate 700M -bufsize 350M'[source]

Default output arguments for 8-bit video encoding

pydantic model aind_behavior_services.rig.HarpAnalogInput[source]

Bases: HarpDeviceGeneric

Fields:
field device_type: Literal[HarpDeviceType.ANALOGINPUT] = HarpDeviceType.ANALOGINPUT[source]
field who_am_i: Literal[1236] = 1236[source]
pydantic model aind_behavior_services.rig.HarpBehavior[source]

Bases: HarpDeviceGeneric

Fields:
field device_type: Literal[HarpDeviceType.BEHAVIOR] = HarpDeviceType.BEHAVIOR[source]
field who_am_i: Literal[1216] = 1216[source]
pydantic model aind_behavior_services.rig.HarpClockGenerator[source]

Bases: HarpDeviceGeneric

Fields:
Validators:
field connected_clock_outputs: List[ConnectedClockOutput] = [][source]

Connected clock outputs

Validated by:
field device_type: Literal[HarpDeviceType.CLOCKGENERATOR] = HarpDeviceType.CLOCKGENERATOR[source]
field who_am_i: Literal[1158] = 1158[source]
validator validate_connected_clock_outputs  »  connected_clock_outputs[source]
pydantic model aind_behavior_services.rig.HarpClockSynchronizer[source]

Bases: HarpDeviceGeneric

Fields:
Validators:
field connected_clock_outputs: List[ConnectedClockOutput] = [][source]

Connected clock outputs

Validated by:
field device_type: Literal[HarpDeviceType.CLOCKSYNCHRONIZER] = HarpDeviceType.CLOCKSYNCHRONIZER[source]
field who_am_i: Literal[1152] = 1152[source]
validator validate_connected_clock_outputs  »  connected_clock_outputs[source]
pydantic model aind_behavior_services.rig.HarpCuttlefish[source]

Bases: HarpDeviceGeneric

Fields:
field device_type: Literal[HarpDeviceType.CUTTLEFISH] = HarpDeviceType.CUTTLEFISH[source]
field who_am_i: Literal[1403] = 1403[source]
pydantic model aind_behavior_services.rig.HarpDeviceGeneric[source]

Bases: Device

Fields:
field device_type: Literal[HarpDeviceType.GENERIC] = HarpDeviceType.GENERIC[source]
field port_name: str [Required][source]

Device port name

field serial_number: str | None = None[source]

Device serial number

field who_am_i: int | None = None[source]

Device WhoAmI

Constraints:
  • ge = 0

  • le = 9999

class aind_behavior_services.rig.HarpDeviceType(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)[source]

Bases: str, Enum

ANALOGINPUT = 'analoginput'[source]
BEHAVIOR = 'behavior'[source]
CLOCKGENERATOR = 'clockgenerator'[source]
CLOCKSYNCHRONIZER = 'clocksynchronizer'[source]
CUTTLEFISH = 'cuttlefish'[source]
ENVIRONMENTSENSOR = 'environmentsensor'[source]
GENERIC = 'generic'[source]
LICKOMETER = 'lickometer'[source]
LOADCELLS = 'loadcells'[source]
OLFACTOMETER = 'olfactometer'[source]
SNIFFDETECTOR = 'sniffdetector'[source]
SOUNDCARD = 'soundcard'[source]
STEPPERDRIVER = 'stepperdriver'[source]
TREADMILL = 'treadmill'[source]
WHITERABBIT = 'whiterabbit'[source]
pydantic model aind_behavior_services.rig.HarpEnvironmentSensor[source]

Bases: HarpDeviceGeneric

Fields:
field device_type: Literal[HarpDeviceType.ENVIRONMENTSENSOR] = HarpDeviceType.ENVIRONMENTSENSOR[source]
field who_am_i: Literal[1405] = 1405[source]
pydantic model aind_behavior_services.rig.HarpLickometer[source]

Bases: HarpDeviceGeneric

Fields:
field device_type: Literal[HarpDeviceType.LICKOMETER] = HarpDeviceType.LICKOMETER[source]
field who_am_i: Literal[1400] = 1400[source]
pydantic model aind_behavior_services.rig.HarpLoadCells[source]

Bases: HarpDeviceGeneric

Fields:
field device_type: Literal[HarpDeviceType.LOADCELLS] = HarpDeviceType.LOADCELLS[source]
field who_am_i: Literal[1232] = 1232[source]
pydantic model aind_behavior_services.rig.HarpOlfactometer[source]

Bases: HarpDeviceGeneric

Fields:
field device_type: Literal[HarpDeviceType.OLFACTOMETER] = HarpDeviceType.OLFACTOMETER[source]
field who_am_i: Literal[1140] = 1140[source]
pydantic model aind_behavior_services.rig.HarpSniffDetector[source]

Bases: HarpDeviceGeneric

Fields:
field device_type: Literal[HarpDeviceType.SNIFFDETECTOR] = HarpDeviceType.SNIFFDETECTOR[source]
field who_am_i: Literal[1401] = 1401[source]
pydantic model aind_behavior_services.rig.HarpSoundCard[source]

Bases: HarpDeviceGeneric

Fields:
field device_type: Literal[HarpDeviceType.SOUNDCARD] = HarpDeviceType.SOUNDCARD[source]
field who_am_i: Literal[1280] = 1280[source]
pydantic model aind_behavior_services.rig.HarpStepperDriver[source]

Bases: HarpDeviceGeneric

Fields:
field device_type: Literal[HarpDeviceType.STEPPERDRIVER] = HarpDeviceType.STEPPERDRIVER[source]
field who_am_i: Literal[1130] = 1130[source]
pydantic model aind_behavior_services.rig.HarpTreadmill[source]

Bases: HarpDeviceGeneric

Fields:
field device_type: Literal[HarpDeviceType.TREADMILL] = HarpDeviceType.TREADMILL[source]
field who_am_i: Literal[1402] = 1402[source]
pydantic model aind_behavior_services.rig.HarpWhiteRabbit[source]

Bases: HarpDeviceGeneric

Fields:
Validators:
field connected_clock_outputs: List[ConnectedClockOutput] = [][source]

Connected clock outputs

Validated by:
field device_type: Literal[HarpDeviceType.WHITERABBIT] = HarpDeviceType.WHITERABBIT[source]
field who_am_i: Literal[1404] = 1404[source]
validator validate_connected_clock_outputs  »  connected_clock_outputs[source]
pydantic model aind_behavior_services.rig.Rect[source]

Bases: BaseModel

Fields:
field height: int = 0[source]

Height of the rectangle

Constraints:
  • ge = 0

field width: int = 0[source]

Width of the rectangle

Constraints:
  • ge = 0

field x: int = 0[source]

X coordinate of the top-left corner

Constraints:
  • ge = 0

field y: int = 0[source]

Y coordinate of the top-left corner

Constraints:
  • ge = 0

pydantic model aind_behavior_services.rig.Screen[source]

Bases: Device

Fields:
field brightness: float = 0[source]

Brightness

Constraints:
  • ge = -1

  • le = 1

field calibration: DisplaysCalibration = DisplaysCalibration(left=DisplayCalibration(intrinsics=DisplayIntrinsics(frame_width=1920, frame_height=1080, display_width=20, display_height=15), extrinsics=DisplayExtrinsics(rotation=Vector3(x=0.0, y=1.0472, z=0.0), translation=Vector3(x=-16.6917756, y=1.309016, z=-3.575264))), center=DisplayCalibration(intrinsics=DisplayIntrinsics(frame_width=1920, frame_height=1080, display_width=20, display_height=15), extrinsics=DisplayExtrinsics(rotation=Vector3(x=0.0, y=0.0, z=0.0), translation=Vector3(x=0.0, y=1.309016, z=-13.27))), right=DisplayCalibration(intrinsics=DisplayIntrinsics(frame_width=1920, frame_height=1080, display_width=20, display_height=15), extrinsics=DisplayExtrinsics(rotation=Vector3(x=0.0, y=-1.0472, z=0.0), translation=Vector3(x=16.6917756, y=1.309016, z=-3.575264))))[source]

Screen calibration

field contrast: float = 1[source]

Contrast

Constraints:
  • ge = -1

  • le = 1

field device_type: Literal['Screen'] = 'Screen'[source]

Device type

field display_index: int = 1[source]

Display index

field target_render_frequency: float = 60[source]

Target render frequency

field target_update_frequency: float = 120[source]

Target update frequency

field texture_assets_directory: str = 'Textures'[source]

Calibration directory

pydantic model aind_behavior_services.rig.SpinnakerCamera[source]

Bases: Device

Fields:
Validators:
field adc_bit_depth: SpinnakerCameraAdcBitDepth | None = SpinnakerCameraAdcBitDepth.ADC8BIT[source]

ADC bit depth. If None will be left as default.

field binning: int = 1[source]

Binning

Constraints:
  • ge = 1

field color_processing: Literal['Default', 'NoColorProcessing'] = 'Default'[source]

Color processing

field device_type: Literal['SpinnakerCamera'] = 'SpinnakerCamera'[source]

Device type

field exposure: int = 1000[source]

Exposure time

Constraints:
  • ge = 100

field gain: float = 0[source]

Gain

Constraints:
  • ge = 0

field gamma: float | None = None[source]

Gamma. If None, will disable gamma correction.

Constraints:
  • ge = 0

field pixel_format: SpinnakerCameraPixelFormat | None = SpinnakerCameraPixelFormat.MONO8[source]

Pixel format. If None will be left as default.

field region_of_interest: Rect = Rect(x=0, y=0, width=0, height=0)[source]

Region of interest

Validated by:
field serial_number: str [Required][source]

Camera serial number

field video_writer: VideoWriter | None = None[source]

Video writer. If not provided, no video will be saved.

validator validate_roi  »  region_of_interest[source]
class aind_behavior_services.rig.SpinnakerCameraAdcBitDepth(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)[source]

Bases: IntEnum

ADC10BIT = 1[source]
ADC12BIT = 2[source]
ADC8BIT = 0[source]
class aind_behavior_services.rig.SpinnakerCameraPixelFormat(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)[source]

Bases: IntEnum

B10 = 101[source]
B12 = 102[source]
B16 = 103[source]
B8 = 100[source]
BAYERBG10 = 48[source]
BAYERBG10P = 38[source]
BAYERBG10PACKED = 33[source]
BAYERBG12 = 49[source]
BAYERBG12P = 23[source]
BAYERBG12PACKED = 15[source]
BAYERBG16 = 10[source]
BAYERBG8 = 6[source]
BAYERGB10 = 50[source]
BAYERGB10P = 37[source]
BAYERGB10PACKED = 32[source]
BAYERGB12 = 51[source]
BAYERGB12P = 22[source]
BAYERGB12PACKED = 14[source]
BAYERGB16 = 9[source]
BAYERGB8 = 5[source]
BAYERGR10 = 52[source]
BAYERGR10P = 35[source]
BAYERGR10PACKED = 30[source]
BAYERGR12 = 53[source]
BAYERGR12P = 20[source]
BAYERGR12PACKED = 12[source]
BAYERGR16 = 7[source]
BAYERGR8 = 3[source]
BAYERRG10 = 54[source]
BAYERRG10P = 36[source]
BAYERRG10PACKED = 31[source]
BAYERRG12 = 55[source]
BAYERRG12P = 21[source]
BAYERRG12PACKED = 13[source]
BAYERRG16 = 8[source]
BAYERRG8 = 4[source]
BGR10 = 85[source]
BGR10P = 86[source]
BGR12 = 87[source]
BGR12P = 88[source]
BGR14 = 89[source]
BGR16 = 90[source]
BGR565P = 91[source]
BGR8 = 27[source]
BGRA10 = 78[source]
BGRA10P = 79[source]
BGRA12 = 80[source]
BGRA12P = 81[source]
BGRA14 = 82[source]
BGRA16 = 83[source]
BGRA8 = 28[source]
CONFIDENCE1 = 139[source]
CONFIDENCE16 = 142[source]
CONFIDENCE1P = 140[source]
CONFIDENCE32F = 143[source]
CONFIDENCE8 = 141[source]
COORD3D_A10P = 125[source]
COORD3D_A12P = 126[source]
COORD3D_A16 = 127[source]
COORD3D_A32F = 128[source]
COORD3D_A8 = 124[source]
COORD3D_ABC10P = 106[source]
COORD3D_ABC10P_PLANAR = 107[source]
COORD3D_ABC12P = 108[source]
COORD3D_ABC12P_PLANAR = 109[source]
COORD3D_ABC16 = 110[source]
COORD3D_ABC16_PLANAR = 111[source]
COORD3D_ABC32F = 112[source]
COORD3D_ABC32F_PLANAR = 113[source]
COORD3D_ABC8 = 104[source]
COORD3D_ABC8_PLANAR = 105[source]
COORD3D_AC10P = 116[source]
COORD3D_AC10P_PLANAR = 117[source]
COORD3D_AC12P = 118[source]
COORD3D_AC12P_PLANAR = 119[source]
COORD3D_AC16 = 120[source]
COORD3D_AC16_PLANAR = 121[source]
COORD3D_AC32F = 122[source]
COORD3D_AC32F_PLANAR = 123[source]
COORD3D_AC8 = 114[source]
COORD3D_AC8_PLANAR = 115[source]
COORD3D_B10P = 130[source]
COORD3D_B12P = 131[source]
COORD3D_B16 = 132[source]
COORD3D_B32F = 133[source]
COORD3D_B8 = 129[source]
COORD3D_C10P = 135[source]
COORD3D_C12P = 136[source]
COORD3D_C16 = 137[source]
COORD3D_C32F = 138[source]
COORD3D_C8 = 134[source]
G10 = 97[source]
G12 = 98[source]
G16 = 99[source]
G8 = 96[source]
MONO10 = 43[source]
MONO10P = 34[source]
MONO10PACKED = 29[source]
MONO12 = 44[source]
MONO12P = 19[source]
MONO12PACKED = 11[source]
MONO14 = 45[source]
MONO16 = 1[source]
MONO16S = 46[source]
MONO1P = 39[source]
MONO2P = 40[source]
MONO32F = 47[source]
MONO4P = 41[source]
MONO8 = 0[source]
MONO8S = 42[source]
R10 = 93[source]
R12 = 94[source]
R16 = 95[source]
R8 = 92[source]
RGB10 = 65[source]
RGB10P = 67[source]
RGB10P32 = 68[source]
RGB10_PLANAR = 66[source]
RGB12 = 69[source]
RGB12P = 71[source]
RGB12_PLANAR = 70[source]
RGB14 = 72[source]
RGB16 = 73[source]
RGB16S = 74[source]
RGB16_PLANAR = 76[source]
RGB32F = 75[source]
RGB565P = 77[source]
RGB8 = 63[source]
RGB8PACKED = 2[source]
RGB8_PLANAR = 64[source]
RGBA10 = 57[source]
RGBA10P = 58[source]
RGBA12 = 59[source]
RGBA12P = 60[source]
RGBA14 = 61[source]
RGBA16 = 62[source]
RGBA32F = 84[source]
RGBA8 = 56[source]
YCBCR411_8 = 26[source]
YCBCR422_8 = 25[source]
YCBCR8 = 24[source]
YUV411PACKED = 16[source]
YUV422PACKED = 17[source]
YUV444PACKED = 18[source]
pydantic model aind_behavior_services.rig.Vector3[source]

Bases: BaseModel

Fields:
field x: float = 0[source]

X coordinate of the point

field y: float = 0[source]

Y coordinate of the point

field z: float = 0[source]

Z coordinate of the point

pydantic model aind_behavior_services.rig.VideoWriterFfmpeg[source]

Bases: BaseModel

Fields:
field container_extension: str = 'mp4'[source]

Container extension

field frame_rate: int = 30[source]

Encoding frame rate

Constraints:
  • ge = 0

field input_arguments: str = '-colorspace bt709 -color_primaries bt709 -color_range full -color_trc linear'[source]

Input arguments

field output_arguments: str = '-vf "scale=out_color_matrix=bt709:out_range=full,format=bgr24,scale=out_range=full" -c:v h264_nvenc -pix_fmt yuv420p -color_range full -colorspace bt709 -color_trc linear -tune hq -preset p4 -rc vbr -cq 12 -b:v 0M -metadata author="Allen Institute for Neural Dynamics" -maxrate 700M -bufsize 350M'[source]

Output arguments

field video_writer_type: Literal['FFMPEG'] = 'FFMPEG'[source]
class aind_behavior_services.rig.VideoWriterFfmpegFactory(bit_depth: Literal[8, 16] = 8, video_writer_ffmpeg_kwargs: Dict[str, Any] = None)[source]

Bases: object

construct_video_writer_ffmpeg() VideoWriterFfmpeg[source]
update_video_writer_ffmpeg_kwargs(video_writer: VideoWriterFfmpeg)[source]
pydantic model aind_behavior_services.rig.VideoWriterOpenCv[source]

Bases: BaseModel

Fields:
field container_extension: str = 'avi'[source]

Container extension

field four_cc: str = 'FMP4'[source]

Four character code

field frame_rate: int = 30[source]

Encoding frame rate

Constraints:
  • ge = 0

field video_writer_type: Literal['OPENCV'] = 'OPENCV'[source]
pydantic model aind_behavior_services.rig.WebCamera[source]

Bases: Device

Fields:
field device_type: Literal['WebCamera'] = 'WebCamera'[source]

Device type

field index: int = 0[source]

Camera index

Constraints:
  • ge = 0

field video_writer: VideoWriter | None = None[source]

Video writer. If not provided, no video will be saved.

aind_behavior_services.rig.validate_harp_clock_output(rig: TRig) TRig[source]