API Reference
This section provides detailed API documentation for all public classes and functions in the OpenScope Experimental Launcher.
Core Classes
BaseExperiment
- class openscope_experimental_launcher.BaseExperiment[source]
Bases:
object
Base class for OpenScope experimental launchers.
Provides core functionality for: - Parameter loading and management - Bonsai process management - Repository setup and version control - Process monitoring and memory management - Basic output file generation
The base experiment class that handles the core functionality of running experiments.
Key Methods:
Properties:
- collect_runtime_information()[source]
Collect key information from user at runtime.
This method can be extended in derived classes to collect rig-specific information.
- post_experiment_processing()[source]
Perform post-experiment processing specific to each rig type. This method should be overridden in each rig-specific launcher.
Default implementation does nothing - each rig should implement its own data reformatting logic here.
- Return type:
- Returns:
True if successful, False otherwise
- determine_session_directory()[source]
Determine or generate output directory path using AIND data schema standards.
- save_experiment_metadata(output_directory, param_file=None)[source]
Save experiment metadata to the output directory.
This includes: - Original parameter JSON file - Command line arguments used to run the experiment - Runtime information and system details - Experiment logs (if available)
- classmethod create_argument_parser(description=None)[source]
Create a standard argument parser for experiment launchers.
- Parameters:
description (
str
) – Description for the argument parser- Return type:
- Returns:
Configured ArgumentParser instance
Slap2Experiment
Data Models
Session Schema
The session data model follows the AIND data schema format:
class SessionData:
"""Session data structure based on AIND schema."""
describedBy: str = "https://raw.githubusercontent.com/AllenNeuralDynamics/aind-data-schema/main/src/aind_data_schema/core/session.py"
schema_version: str = "1.1.2"
protocol_id: List[str] = []
experimenter_full_name: List[str] = []
session_start_time: str = ""
session_end_time: str = ""
session_type: str = ""
iacuc_protocol: Optional[str] = None
rig_id: str = ""
calibrations: List = []
maintenance: List = []
subject_id: str = ""
animal_weight_prior: Optional[float] = None
animal_weight_post: Optional[float] = None
weight_unit: str = "gram"
anaesthesia: Optional[str] = None
data_streams: List = []
stimulus_epochs: List[Dict] = []
mouse_platform_name: str = "Fixed platform"
active_mouse_platform: bool = False
headframe_registration: Optional[Dict] = None
reward_delivery: Optional[Dict] = None
reward_consumed_total: Optional[float] = None
reward_consumed_unit: str = "milliliter"
notes: str = ""
Stimulus Epoch Schema
class StimulusEpoch:
"""Stimulus epoch data structure."""
stimulus_start_time: str = ""
stimulus_end_time: str = ""
stimulus_name: str = ""
session_number: Optional[int] = None
software: List[Dict] = []
script: Dict = {}
stimulus_modalities: List[str] = []
stimulus_parameters: Optional[Dict] = None
stimulus_device_names: List[str] = []
speaker_config: Optional[Dict] = None
light_source_config: List[Dict] = []
objects_in_arena: Optional[Dict] = None
output_parameters: Dict = {}
reward_consumed_during_epoch: Optional[float] = None
reward_consumed_unit: str = "microliter"
trials_total: Optional[int] = None
trials_finished: Optional[int] = None
trials_rewarded: Optional[int] = None
notes: str = ""
Parameter Schema
class ExperimentParameters:
"""Experiment parameter schema."""
# Required parameters
subject_id: str
user_id: str
repository_url: str
bonsai_path: str
# Optional parameters with defaults
repository_commit_hash: str = "main"
local_repository_path: Optional[str] = None
bonsai_exe_path: Optional[str] = None
bonsai_setup_script: Optional[str] = None
output_directory: Optional[str] = None
# Experiment-specific parameters
session_type: str = "Behavior"
rig_id: str = "default_rig"
stimulus_name: str = "Default Stimulus"
trials_total: Optional[int] = None
notes: str = ""
Utility Functions
File Management
Git Operations
Process Management
Validation Functions
Exceptions
BaseExperimentError
ParameterValidationError
RepositoryError
BonsaiProcessError
Constants
Default Values
File Extensions
Example Usage Patterns
Basic Experiment Run
from openscope_experimental_launcher import BaseExperiment
# Create and run experiment
experiment = BaseExperiment()
success = experiment.run("parameters.json")
if success:
print(f"Experiment completed successfully")
print(f"Output saved to: {experiment.session_directory}")
else:
print("Experiment failed")
Advanced Parameter Validation
from openscope_experimental_launcher import BaseExperiment
from openscope_experimental_launcher.validation import validate_parameters
import json
# Load and validate parameters before running
with open("parameters.json") as f:
params = json.load(f)
is_valid, errors = validate_parameters(params)
if not is_valid:
print("Parameter validation failed:")
for error in errors:
print(f" - {error}")
else:
experiment = BaseExperiment()
success = experiment.run("parameters.json")
Custom Experiment Subclass
from openscope_experimental_launcher import BaseExperiment
class CustomExperiment(BaseExperiment):
"""Custom experiment with additional functionality."""
def __init__(self):
super().__init__()
self.custom_parameter = None
def load_parameters(self, param_file):
"""Load parameters with custom validation."""
super().load_parameters(param_file)
# Add custom parameter handling
if 'custom_parameter' in self.params:
self.custom_parameter = self.params['custom_parameter']
self.validate_custom_parameter()
def validate_custom_parameter(self):
"""Validate custom parameter."""
if self.custom_parameter is None:
raise ValueError("Custom parameter is required")
def create_custom_output(self):
"""Create custom output files."""
custom_data = {
'custom_parameter': self.custom_parameter,
'timestamp': self.session_start_time.isoformat()
}
custom_file = os.path.join(self.session_directory, f"{self.session_uuid}_custom.json")
with open(custom_file, 'w') as f:
json.dump(custom_data, f, indent=2)
# Usage
experiment = CustomExperiment()
success = experiment.run("custom_parameters.json")
Asynchronous Experiment Execution
import asyncio
from openscope_experimental_launcher import BaseExperiment
async def run_experiment_async(param_file):
"""Run experiment asynchronously."""
experiment = BaseExperiment()
# Run in executor to avoid blocking
loop = asyncio.get_event_loop()
success = await loop.run_in_executor(
None, experiment.run, param_file
)
return success, experiment
async def run_multiple_experiments(param_files):
"""Run multiple experiments concurrently."""
tasks = [run_experiment_async(pf) for pf in param_files]
results = await asyncio.gather(*tasks)
for i, (success, experiment) in enumerate(results):
param_file = param_files[i]
if success:
print(f"✅ {param_file}: {experiment.session_directory}")
else:
print(f"❌ {param_file}: Failed")
# Usage
param_files = ["exp1.json", "exp2.json", "exp3.json"]
asyncio.run(run_multiple_experiments(param_files))
Error Handling and Logging
import logging
from openscope_experimental_launcher import BaseExperiment
from openscope_experimental_launcher.exceptions import (
ParameterValidationError,
RepositoryError,
BonsaiProcessError
)
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def run_experiment_with_error_handling(param_file):
"""Run experiment with comprehensive error handling."""
try:
experiment = BaseExperiment()
success = experiment.run(param_file)
if success:
logger.info(f"Experiment completed: {experiment.session_directory}")
return True
else:
logger.error("Experiment failed for unknown reasons")
return False
except ParameterValidationError as e:
logger.error(f"Parameter validation failed: {e}")
return False
except RepositoryError as e:
logger.error(f"Repository operation failed: {e}")
return False
except BonsaiProcessError as e:
logger.error(f"Bonsai process failed: {e}")
return False
except Exception as e:
logger.exception(f"Unexpected error: {e}")
return False
# Usage
success = run_experiment_with_error_handling("parameters.json")