253 lines
7.7 KiB
Bash
Executable File
253 lines
7.7 KiB
Bash
Executable File
#!/bin/bash
|
|
# VR180 Matting Unified Setup Script for RunPod
|
|
# Supports both chunked and streaming implementations
|
|
|
|
set -e # Exit on error
|
|
|
|
echo "🚀 VR180 Matting Setup for RunPod"
|
|
echo "=================================="
|
|
echo "GPU: $(nvidia-smi --query-gpu=name --format=csv,noheader)"
|
|
echo "VRAM: $(nvidia-smi --query-gpu=memory.total --format=csv,noheader)"
|
|
echo ""
|
|
|
|
# Function to print colored output
|
|
print_status() {
|
|
echo -e "\n\033[1;34m$1\033[0m"
|
|
}
|
|
|
|
print_success() {
|
|
echo -e "\033[1;32m✅ $1\033[0m"
|
|
}
|
|
|
|
print_error() {
|
|
echo -e "\033[1;31m❌ $1\033[0m"
|
|
}
|
|
|
|
# Check if running on RunPod
|
|
if [ -d "/workspace" ]; then
|
|
print_status "Detected RunPod environment"
|
|
WORKSPACE="/workspace"
|
|
else
|
|
print_status "Not on RunPod - using current directory"
|
|
WORKSPACE="$(pwd)"
|
|
fi
|
|
|
|
# Update system
|
|
print_status "Installing system dependencies..."
|
|
apt-get update && apt-get install -y \
|
|
ffmpeg \
|
|
git \
|
|
wget \
|
|
nano \
|
|
vim \
|
|
htop \
|
|
nvtop \
|
|
libgl1-mesa-glx \
|
|
libglib2.0-0 \
|
|
libsm6 \
|
|
libxext6 \
|
|
libxrender-dev \
|
|
libgomp1 || print_error "Failed to install some packages"
|
|
|
|
# Install Python dependencies
|
|
print_status "Installing Python dependencies..."
|
|
pip install --upgrade pip
|
|
pip install -r requirements.txt
|
|
|
|
# Install decord for SAM2 video loading
|
|
print_status "Installing video processing libraries..."
|
|
pip install decord ffmpeg-python
|
|
|
|
# Install CuPy for GPU acceleration (CUDA 12 is standard on modern RunPod)
|
|
print_status "Installing CuPy for GPU acceleration..."
|
|
if command -v nvidia-smi &> /dev/null; then
|
|
print_status "Installing CuPy for CUDA 12.x (standard on RunPod)..."
|
|
pip install cupy-cuda12x>=12.0.0 && print_success "Installed CuPy for CUDA 12.x"
|
|
else
|
|
print_error "NVIDIA GPU not detected, skipping CuPy installation"
|
|
fi
|
|
|
|
# Clone and install SAM2
|
|
print_status "Installing Segment Anything 2..."
|
|
if [ ! -d "segment-anything-2" ]; then
|
|
git clone https://github.com/facebookresearch/segment-anything-2.git
|
|
cd segment-anything-2
|
|
pip install -e .
|
|
cd ..
|
|
else
|
|
print_status "SAM2 already cloned, updating..."
|
|
cd segment-anything-2
|
|
git pull
|
|
pip install -e . --upgrade
|
|
cd ..
|
|
fi
|
|
|
|
# Download SAM2 checkpoints
|
|
print_status "Downloading SAM2 checkpoints..."
|
|
cd segment-anything-2/checkpoints
|
|
if [ ! -f "sam2.1_hiera_large.pt" ]; then
|
|
chmod +x download_ckpts.sh
|
|
bash download_ckpts.sh || {
|
|
print_error "Automatic download failed, trying manual download..."
|
|
wget https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_large.pt
|
|
}
|
|
fi
|
|
cd ../..
|
|
|
|
# Download YOLOv8 models
|
|
print_status "Downloading YOLO models..."
|
|
python -c "from ultralytics import YOLO; YOLO('yolov8n.pt'); print('✅ YOLOv8n downloaded')"
|
|
python -c "from ultralytics import YOLO; YOLO('yolov8m.pt'); print('✅ YOLOv8m downloaded')"
|
|
|
|
# Create working directories
|
|
print_status "Creating directory structure..."
|
|
mkdir -p $WORKSPACE/sam2e/{input,output,checkpoints}
|
|
mkdir -p /workspace/data /workspace/output # RunPod standard dirs
|
|
cd $WORKSPACE/sam2e
|
|
|
|
# Create example configs if they don't exist
|
|
print_status "Creating example configuration files..."
|
|
|
|
# Chunked approach config
|
|
if [ ! -f "config-chunked-runpod.yaml" ]; then
|
|
print_status "Creating chunked approach config..."
|
|
cat > config-chunked-runpod.yaml << 'EOF'
|
|
# VR180 Matting - Chunked Approach (Original)
|
|
input:
|
|
video_path: "/workspace/data/input_video.mp4"
|
|
|
|
processing:
|
|
scale_factor: 0.5 # 0.5 for 8K input = 4K processing
|
|
chunk_size: 600 # Larger chunks for cloud GPU
|
|
overlap_frames: 60 # Overlap between chunks
|
|
|
|
detection:
|
|
confidence_threshold: 0.7
|
|
model: "yolov8n"
|
|
|
|
matting:
|
|
use_disparity_mapping: true
|
|
memory_offload: true
|
|
fp16: true
|
|
sam2_model_cfg: "sam2.1_hiera_l"
|
|
sam2_checkpoint: "segment-anything-2/checkpoints/sam2.1_hiera_large.pt"
|
|
|
|
output:
|
|
path: "/workspace/output/output_video.mp4"
|
|
format: "greenscreen" # or "alpha"
|
|
background_color: [0, 255, 0]
|
|
maintain_sbs: true
|
|
|
|
hardware:
|
|
device: "cuda"
|
|
max_vram_gb: 40 # Conservative for 48GB GPU
|
|
EOF
|
|
print_success "Created config-chunked-runpod.yaml"
|
|
fi
|
|
|
|
# Streaming approach config already exists
|
|
if [ ! -f "config-streaming-runpod.yaml" ]; then
|
|
print_error "config-streaming-runpod.yaml not found - please check the repository"
|
|
fi
|
|
|
|
# Skip creating convenience scripts - use Python directly
|
|
|
|
# Test installation
|
|
print_status "Testing installation..."
|
|
python -c "
|
|
import sys
|
|
print('Python:', sys.version)
|
|
try:
|
|
import torch
|
|
print(f'✅ PyTorch: {torch.__version__}')
|
|
print(f' CUDA available: {torch.cuda.is_available()}')
|
|
if torch.cuda.is_available():
|
|
print(f' GPU: {torch.cuda.get_device_name(0)}')
|
|
print(f' VRAM: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f}GB')
|
|
except: print('❌ PyTorch not available')
|
|
|
|
try:
|
|
import cv2
|
|
print(f'✅ OpenCV: {cv2.__version__}')
|
|
except: print('❌ OpenCV not available')
|
|
|
|
try:
|
|
from ultralytics import YOLO
|
|
print('✅ YOLO available')
|
|
except: print('❌ YOLO not available')
|
|
|
|
try:
|
|
import yaml, numpy, psutil
|
|
print('✅ Other dependencies available')
|
|
except: print('❌ Some dependencies missing')
|
|
"
|
|
|
|
# Run streaming test if available
|
|
if [ -f "test_streaming.py" ]; then
|
|
print_status "Running streaming implementation test..."
|
|
python test_streaming.py || print_error "Streaming test failed"
|
|
fi
|
|
|
|
# Check which SAM2 models are available
|
|
print_status "SAM2 Models available:"
|
|
if [ -f "segment-anything-2/checkpoints/sam2.1_hiera_large.pt" ]; then
|
|
print_success "sam2.1_hiera_large.pt (recommended for quality)"
|
|
echo " Config: sam2_model_cfg: 'sam2.1_hiera_l'"
|
|
fi
|
|
if [ -f "segment-anything-2/checkpoints/sam2.1_hiera_base_plus.pt" ]; then
|
|
print_success "sam2.1_hiera_base_plus.pt (balanced)"
|
|
echo " Config: sam2_model_cfg: 'sam2.1_hiera_b+'"
|
|
fi
|
|
if [ -f "segment-anything-2/checkpoints/sam2.1_hiera_small.pt" ]; then
|
|
print_success "sam2.1_hiera_small.pt (fast)"
|
|
echo " Config: sam2_model_cfg: 'sam2.1_hiera_s'"
|
|
fi
|
|
|
|
# Print usage instructions
|
|
print_success "Setup complete!"
|
|
echo
|
|
echo "📋 Usage Instructions:"
|
|
echo "====================="
|
|
echo
|
|
echo "1. Upload your VR180 video:"
|
|
echo " wget -O /workspace/data/input_video.mp4 'your-video-url'"
|
|
echo " # Or use RunPod's file upload feature"
|
|
echo
|
|
echo "2. Choose your processing approach:"
|
|
echo
|
|
echo " a) STREAMING (Recommended - 2-3x faster, constant memory):"
|
|
echo " python -m vr180_streaming config-streaming-runpod.yaml"
|
|
echo
|
|
echo " b) CHUNKED (Original - more stable, higher memory):"
|
|
echo " python -m vr180_matting config-chunked-runpod.yaml"
|
|
echo
|
|
echo "3. Optional: Edit configs first:"
|
|
echo " nano config-streaming-runpod.yaml # For streaming"
|
|
echo " nano config-chunked-runpod.yaml # For chunked"
|
|
echo
|
|
echo "4. Monitor progress:"
|
|
echo " - GPU usage: nvtop"
|
|
echo " - System resources: htop"
|
|
echo " - Output directory: ls -la /workspace/output/"
|
|
echo
|
|
echo "📊 Performance Tips:"
|
|
echo "==================="
|
|
echo "- Streaming: Best for long videos, uses ~50GB RAM constant"
|
|
echo "- Chunked: More stable but uses 100GB+ RAM in spikes"
|
|
echo "- Scale factor: 0.25 (fast) → 0.5 (balanced) → 1.0 (quality)"
|
|
echo "- A6000/A100: Can handle 0.5-0.75 scale easily"
|
|
echo "- Monitor VRAM with: nvidia-smi -l 1"
|
|
echo
|
|
echo "🎯 Example Commands:"
|
|
echo "==================="
|
|
echo "# Process with custom output path:"
|
|
echo "python -m vr180_streaming config-streaming-runpod.yaml --output /workspace/output/my_video.mp4"
|
|
echo
|
|
echo "# Process specific frame range:"
|
|
echo "python -m vr180_streaming config-streaming-runpod.yaml --start-frame 1000 --max-frames 5000"
|
|
echo
|
|
echo "# Override scale for quality:"
|
|
echo "python -m vr180_streaming config-streaming-runpod.yaml --scale 0.75"
|
|
echo
|
|
echo "Happy matting! 🎬"
|