streaming part1

This commit is contained in:
2025-07-27 08:01:08 -07:00
parent 277d554ecc
commit 4b058c2405
17 changed files with 3072 additions and 683 deletions

View File

@@ -1,113 +1,284 @@
#!/bin/bash
# RunPod Quick Setup Script
# VR180 Matting Unified Setup Script for RunPod
# Supports both chunked and streaming implementations
echo "🚀 Setting up VR180 Matting on RunPod..."
set -e # Exit on error
echo "🚀 VR180 Matting Setup for RunPod"
echo "=================================="
echo "GPU: $(nvidia-smi --query-gpu=name --format=csv,noheader)"
echo "VRAM: $(nvidia-smi --query-gpu=memory.total --format=csv,noheader)"
echo ""
# Function to print colored output
print_status() {
echo -e "\n\033[1;34m$1\033[0m"
}
print_success() {
echo -e "\033[1;32m✅ $1\033[0m"
}
print_error() {
echo -e "\033[1;31m❌ $1\033[0m"
}
# Check if running on RunPod
if [ -d "/workspace" ]; then
print_status "Detected RunPod environment"
WORKSPACE="/workspace"
else
print_status "Not on RunPod - using current directory"
WORKSPACE="$(pwd)"
fi
# Update system
echo "📦 Installing system dependencies..."
apt-get update && apt-get install -y ffmpeg git wget nano
print_status "Installing system dependencies..."
apt-get update && apt-get install -y \
ffmpeg \
git \
wget \
nano \
vim \
htop \
nvtop \
libgl1-mesa-glx \
libglib2.0-0 \
libsm6 \
libxext6 \
libxrender-dev \
libgomp1 || print_error "Failed to install some packages"
# Install Python dependencies
echo "🐍 Installing Python dependencies..."
print_status "Installing Python dependencies..."
pip install --upgrade pip
pip install -r requirements.txt
# Install decord for SAM2 video loading
echo "📹 Installing decord for video processing..."
pip install decord
print_status "Installing video processing libraries..."
pip install decord ffmpeg-python
# Install CuPy for GPU acceleration of stereo validation
echo "🚀 Installing CuPy for GPU acceleration..."
print_status "Installing CuPy for GPU acceleration..."
# Auto-detect CUDA version and install appropriate CuPy
python -c "
import torch
if torch.cuda.is_available():
cuda_version = torch.version.cuda
print(f'CUDA version detected: {cuda_version}')
if cuda_version.startswith('11.'):
import subprocess
subprocess.run(['pip', 'install', 'cupy-cuda11x>=12.0.0'])
print('Installed CuPy for CUDA 11.x')
elif cuda_version.startswith('12.'):
import subprocess
subprocess.run(['pip', 'install', 'cupy-cuda12x>=12.0.0'])
print('Installed CuPy for CUDA 12.x')
else:
print(f'Unsupported CUDA version: {cuda_version}')
else:
print('CUDA not available, skipping CuPy installation')
"
# Install SAM2 separately (not on PyPI)
echo "🎯 Installing SAM2..."
pip install git+https://github.com/facebookresearch/segment-anything-2.git
# Install project
echo "📦 Installing VR180 matting package..."
pip install -e .
# Download models
echo "📥 Downloading models..."
mkdir -p models
# Download YOLOv8 models
python -c "from ultralytics import YOLO; YOLO('yolov8n.pt'); YOLO('yolov8m.pt')"
# Clone SAM2 repo for checkpoints
echo "📥 Cloning SAM2 for model checkpoints..."
if [ ! -d "segment-anything-2" ]; then
git clone https://github.com/facebookresearch/segment-anything-2.git
if command -v nvidia-smi &> /dev/null; then
CUDA_VERSION=$(nvidia-smi | grep "CUDA Version" | awk '{print $9}' | cut -d. -f1-2)
echo "Detected CUDA version: $CUDA_VERSION"
if [[ "$CUDA_VERSION" == "11."* ]]; then
pip install cupy-cuda11x>=12.0.0 && print_success "Installed CuPy for CUDA 11.x"
elif [[ "$CUDA_VERSION" == "12."* ]]; then
pip install cupy-cuda12x>=12.0.0 && print_success "Installed CuPy for CUDA 12.x"
else
print_error "Unknown CUDA version, skipping CuPy installation"
fi
else
print_error "NVIDIA GPU not detected, skipping CuPy installation"
fi
# Download SAM2 checkpoints using their official script
# Clone and install SAM2
print_status "Installing Segment Anything 2..."
if [ ! -d "segment-anything-2" ]; then
git clone https://github.com/facebookresearch/segment-anything-2.git
cd segment-anything-2
pip install -e .
cd ..
else
print_status "SAM2 already cloned, updating..."
cd segment-anything-2
git pull
pip install -e . --upgrade
cd ..
fi
# Download SAM2 checkpoints
print_status "Downloading SAM2 checkpoints..."
cd segment-anything-2/checkpoints
if [ ! -f "sam2.1_hiera_large.pt" ]; then
echo "📥 Downloading SAM2 checkpoints..."
chmod +x download_ckpts.sh
bash download_ckpts.sh
bash download_ckpts.sh || {
print_error "Automatic download failed, trying manual download..."
wget https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_large.pt
}
fi
cd ../..
# Download YOLOv8 models
print_status "Downloading YOLO models..."
python -c "from ultralytics import YOLO; YOLO('yolov8n.pt'); print('✅ YOLOv8n downloaded')"
python -c "from ultralytics import YOLO; YOLO('yolov8m.pt'); print('✅ YOLOv8m downloaded')"
# Create working directories
mkdir -p /workspace/data /workspace/output
print_status "Creating directory structure..."
mkdir -p $WORKSPACE/sam2e/{input,output,checkpoints}
mkdir -p /workspace/data /workspace/output # RunPod standard dirs
cd $WORKSPACE/sam2e
# Create example configs if they don't exist
print_status "Creating example configuration files..."
# Chunked approach config
if [ ! -f "config-chunked-runpod.yaml" ]; then
print_status "Creating chunked approach config..."
cat > config-chunked-runpod.yaml << 'EOF'
# VR180 Matting - Chunked Approach (Original)
input:
video_path: "/workspace/data/input_video.mp4"
processing:
scale_factor: 0.5 # 0.5 for 8K input = 4K processing
chunk_size: 600 # Larger chunks for cloud GPU
overlap_frames: 60 # Overlap between chunks
detection:
confidence_threshold: 0.7
model: "yolov8n"
matting:
use_disparity_mapping: true
memory_offload: true
fp16: true
sam2_model_cfg: "sam2.1_hiera_l"
sam2_checkpoint: "segment-anything-2/checkpoints/sam2.1_hiera_large.pt"
output:
path: "/workspace/output/output_video.mp4"
format: "greenscreen" # or "alpha"
background_color: [0, 255, 0]
maintain_sbs: true
hardware:
device: "cuda"
max_vram_gb: 40 # Conservative for 48GB GPU
EOF
print_success "Created config-chunked-runpod.yaml"
fi
# Streaming approach config already exists
if [ ! -f "config-streaming-runpod.yaml" ]; then
print_error "config-streaming-runpod.yaml not found - please check the repository"
fi
# Create convenience run scripts
print_status "Creating run scripts..."
# Chunked approach
cat > run_chunked.sh << 'EOF'
#!/bin/bash
# Run VR180 matting with chunked approach (original)
echo "🎬 Running VR180 matting - Chunked Approach"
echo "==========================================="
python -m vr180_matting.main config-chunked-runpod.yaml "$@"
EOF
chmod +x run_chunked.sh
# Streaming approach
cat > run_streaming.sh << 'EOF'
#!/bin/bash
# Run VR180 matting with streaming approach (optimized)
echo "🎬 Running VR180 matting - Streaming Approach"
echo "============================================="
python -m vr180_streaming.main config-streaming-runpod.yaml "$@"
EOF
chmod +x run_streaming.sh
# Test installation
echo ""
echo "🧪 Testing installation..."
python test_installation.py
print_status "Testing installation..."
python -c "
import sys
print('Python:', sys.version)
try:
import torch
print(f'✅ PyTorch: {torch.__version__}')
print(f' CUDA available: {torch.cuda.is_available()}')
if torch.cuda.is_available():
print(f' GPU: {torch.cuda.get_device_name(0)}')
print(f' VRAM: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f}GB')
except: print('❌ PyTorch not available')
try:
import cv2
print(f'✅ OpenCV: {cv2.__version__}')
except: print('❌ OpenCV not available')
try:
from ultralytics import YOLO
print('✅ YOLO available')
except: print('❌ YOLO not available')
try:
import yaml, numpy, psutil
print('✅ Other dependencies available')
except: print('❌ Some dependencies missing')
"
# Run streaming test if available
if [ -f "test_streaming.py" ]; then
print_status "Running streaming implementation test..."
python test_streaming.py || print_error "Streaming test failed"
fi
# Check which SAM2 models are available
echo ""
echo "📊 SAM2 Models available:"
print_status "SAM2 Models available:"
if [ -f "segment-anything-2/checkpoints/sam2.1_hiera_large.pt" ]; then
echo " ✅ sam2.1_hiera_large.pt (recommended)"
print_success "sam2.1_hiera_large.pt (recommended for quality)"
echo " Config: sam2_model_cfg: 'sam2.1_hiera_l'"
echo " Checkpoint: sam2_checkpoint: 'segment-anything-2/checkpoints/sam2.1_hiera_large.pt'"
fi
if [ -f "segment-anything-2/checkpoints/sam2.1_hiera_base_plus.pt" ]; then
echo " ✅ sam2.1_hiera_base_plus.pt"
echo " Config: sam2_model_cfg: 'sam2.1_hiera_base_plus'"
print_success "sam2.1_hiera_base_plus.pt (balanced)"
echo " Config: sam2_model_cfg: 'sam2.1_hiera_b+'"
fi
if [ -f "segment-anything-2/checkpoints/sam2_hiera_large.pt" ]; then
echo " ✅ sam2_hiera_large.pt (legacy)"
echo " Config: sam2_model_cfg: 'sam2_hiera_l'"
if [ -f "segment-anything-2/checkpoints/sam2.1_hiera_small.pt" ]; then
print_success "sam2.1_hiera_small.pt (fast)"
echo " Config: sam2_model_cfg: 'sam2.1_hiera_s'"
fi
echo ""
echo "Setup complete!"
echo ""
echo "📝 Quick start:"
echo "1. Upload your VR180 video to /workspace/data/"
echo " wget -O /workspace/data/video.mp4 'your-video-url'"
echo ""
echo "2. Use the RunPod optimized config:"
echo " cp config_runpod.yaml config.yaml"
echo " nano config.yaml # Update video path"
echo ""
echo "3. Run the matting:"
echo " vr180-matting config.yaml"
echo ""
echo "💡 For A40 GPU, you can use higher quality settings:"
echo " vr180-matting config.yaml --scale 0.75"
# Print usage instructions
print_success "Setup complete!"
echo
echo "📋 Usage Instructions:"
echo "====================="
echo
echo "1. Upload your VR180 video:"
echo " wget -O /workspace/data/input_video.mp4 'your-video-url'"
echo " # Or use RunPod's file upload feature"
echo
echo "2. Choose your processing approach:"
echo
echo " a) STREAMING (Recommended - 2-3x faster, constant memory):"
echo " ./run_streaming.sh"
echo " # Or: python -m vr180_streaming config-streaming-runpod.yaml"
echo
echo " b) CHUNKED (Original - more stable, higher memory):"
echo " ./run_chunked.sh"
echo " # Or: python -m vr180_matting config-chunked-runpod.yaml"
echo
echo "3. Optional: Edit configs first:"
echo " nano config-streaming-runpod.yaml # For streaming"
echo " nano config-chunked-runpod.yaml # For chunked"
echo
echo "4. Monitor progress:"
echo " - GPU usage: nvtop"
echo " - System resources: htop"
echo " - Output directory: ls -la /workspace/output/"
echo
echo "📊 Performance Tips:"
echo "==================="
echo "- Streaming: Best for long videos, uses ~50GB RAM constant"
echo "- Chunked: More stable but uses 100GB+ RAM in spikes"
echo "- Scale factor: 0.25 (fast) → 0.5 (balanced) → 1.0 (quality)"
echo "- A6000/A100: Can handle 0.5-0.75 scale easily"
echo "- Monitor VRAM with: nvidia-smi -l 1"
echo
echo "🎯 Example Commands:"
echo "==================="
echo "# Process with custom output path:"
echo "./run_streaming.sh --output /workspace/output/my_video.mp4"
echo
echo "# Process specific frame range:"
echo "./run_streaming.sh --start-frame 1000 --max-frames 5000"
echo
echo "# Override scale for quality:"
echo "./run_streaming.sh --scale 0.75"
echo
echo "Happy matting! 🎬"