Files
samyolo_on_segments/config.yaml

63 lines
1.5 KiB
YAML

# YOLO + SAM2 Video Processing Configuration
input:
video_path: "/path/to/input/video.mp4"
output:
directory: "/path/to/output/"
filename: "processed_video.mp4"
processing:
# Duration of each video segment in seconds
segment_duration: 5
# Scale factor for SAM2 inference (0.5 = half resolution)
inference_scale: 0.5
# YOLO detection confidence threshold
yolo_confidence: 0.6
# Which segments to run YOLO detection on
# Options: "all", [0, 5, 10], or [] for default (all)
detect_segments: "all"
models:
# YOLO model path - can be pretrained (yolov8n.pt) or custom path
yolo_model: "models/yolo/yolov8n.pt"
# SAM2 model configuration
sam2_checkpoint: "models/sam2/checkpoints/sam2.1_hiera_large.pt"
sam2_config: "models/sam2/configs/sam2.1/sam2.1_hiera_l.yaml"
video:
# Use NVIDIA hardware encoding (requires NVENC-capable GPU)
use_nvenc: true
# Output video bitrate
output_bitrate: "50M"
# Preserve original audio track
preserve_audio: true
# Force keyframes for better segment boundaries
force_keyframes: true
advanced:
# Green screen color (RGB values)
green_color: [0, 255, 0]
# Blue screen color for second object (RGB values)
blue_color: [255, 0, 0]
# YOLO human class ID (0 for COCO person class)
human_class_id: 0
# GPU memory management
cleanup_intermediate_files: true
# Logging level (DEBUG, INFO, WARNING, ERROR)
log_level: "INFO"
# Save debug frames with YOLO detections visualized
save_yolo_debug_frames: true