51 hours to 31 hours

This commit is contained in:
2024-10-19 22:21:50 -07:00
parent 999c6660e9
commit fecb7f5f04
5 changed files with 389 additions and 72 deletions

102
notebooks/create_full_matted.sh Executable file
View File

@@ -0,0 +1,102 @@
#!/bin/bash
# Exit immediately if a command exits with a non-zero status.
set -e
# Function to display informational messages in blue
echo_info() {
echo -e "\033[1;34m[INFO]\033[0m $1"
}
# Function to display error messages in red
echo_error() {
echo -e "\033[1;31m[ERROR]\033[0m $1"
}
# Function to display usage instructions
usage() {
echo "Usage: $0 <segments_directory>"
echo "Example: $0 606-short_segments"
exit 1
}
# Check if exactly one argument is provided
if [ "$#" -ne 1 ]; then
echo_error "Incorrect number of arguments."
usage
fi
# Assign the first argument to SEGMENTS_DIR
SEGMENTS_DIR="$1"
# Resolve the absolute path of SEGMENTS_DIR
SEGMENTS_DIR=$(realpath "$SEGMENTS_DIR")
# Check if the provided argument is a directory
if [ ! -d "$SEGMENTS_DIR" ]; then
echo_error "The provided path '$SEGMENTS_DIR' is not a directory or does not exist."
exit 1
fi
echo_info "Segments directory set to: $SEGMENTS_DIR"
# Check if ffmpeg is installed
if ! command -v ffmpeg &> /dev/null; then
echo_error "ffmpeg could not be found. Please install ffmpeg and ensure it's in your PATH."
exit 1
fi
# Create a temporary file to store the list of videos to concatenate
concat_list=$(mktemp)
# Ensure the temporary file is deleted on script exit
trap "rm -f $concat_list" EXIT
# Iterate over each segment directory within SEGMENTS_DIR in sorted order
# Assuming segments are named as segment_1, segment_2, etc.
for segment_dir in "$SEGMENTS_DIR"/segment_*; do
if [ -d "$segment_dir" ]; then
segment_name=$(basename "$segment_dir")
echo_info "Processing segment: $segment_name"
# Define the path to output-matted.mp4
output_matted="$segment_dir/output-matted.mp4"
# Check if output-matted.mp4 exists
if [ ! -f "$output_matted" ]; then
echo_error " '$output_matted' does not exist. Skipping this segment."
continue
fi
# Append the file path to the concat list in the required format
echo "file '$output_matted'" >> "$concat_list"
echo_info " Added '$output_matted' to the concatenation list."
else
echo_error "'$segment_dir' is not a directory. Skipping."
fi
done
# Check if the concat list has at least one file
if [ ! -s "$concat_list" ]; then
echo_error "No 'output-matted.mp4' files found in the provided segments directory."
exit 1
fi
echo_info "Starting concatenation of videos..."
# Define the output file path
output_full="full_output_matted.mp4"
# Execute ffmpeg to concatenate videos using the concat demuxer
ffmpeg -f concat -safe 0 -i "$concat_list" -c copy "$output_full" \
&& echo_info "Successfully created '$output_full' in $(pwd)"
echo_info "Concatenation process completed."

140
notebooks/create_output_matted.sh Executable file
View File

@@ -0,0 +1,140 @@
#!/bin/bash
# Exit immediately if a command exits with a non-zero status.
set -e
# Enable debugging (optional)
# Uncomment the following line to enable debug mode
# set -x
# Function to display informational messages in blue
echo_info() {
echo -e "\033[1;34m[INFO]\033[0m $1"
}
# Function to display error messages in red
echo_error() {
echo -e "\033[1;31m[ERROR]\033[0m $1"
}
# Function to display usage instructions
usage() {
echo "Usage: $0 <segments_directory>"
echo "Example: $0 606-short_segments"
exit 1
}
# Check if exactly one argument is provided
if [ "$#" -ne 1 ]; then
echo_error "Incorrect number of arguments."
usage
fi
# Assign the first argument to SEGMENTS_DIR
SEGMENTS_DIR="$1"
# Resolve the absolute path of SEGMENTS_DIR
if command -v realpath &> /dev/null; then
SEGMENTS_DIR=$(realpath "$SEGMENTS_DIR")
else
# If realpath is not available, use a fallback
SEGMENTS_DIR="$(cd "$SEGMENTS_DIR" && pwd)"
fi
# Check if the provided argument is a directory
if [ ! -d "$SEGMENTS_DIR" ]; then
echo_error "The provided path '$SEGMENTS_DIR' is not a directory or does not exist."
exit 1
fi
echo_info "Segments directory set to: $SEGMENTS_DIR"
# Check if ffmpeg is installed
if ! command -v ffmpeg &> /dev/null; then
echo_error "ffmpeg could not be found. Please install ffmpeg and ensure it's in your PATH."
exit 1
fi
# Iterate over each segment directory within SEGMENTS_DIR
for segment_dir in "$SEGMENTS_DIR"/segment_*; do
if [ -d "$segment_dir" ]; then
segment_name=$(basename "$segment_dir")
echo_info "Processing directory: $segment_name"
# Define paths
frames_dir="$segment_dir/output_frames" # Using output_frames for video frames
# Check if output_frames directory exists
if [ ! -d "$frames_dir" ]; then
echo_error " Directory 'output_frames' does not exist in '$segment_name'. Skipping."
continue
fi
# Find the audio source file (output_*.mp4)
# Assuming only one output_*.mp4 per segment; adjust if multiple exist
audio_file=$(find "$segment_dir" -maxdepth 1 -type f -iname "output_*.mp4" | head -n 1)
if [ -z "$audio_file" ]; then
echo_error " No 'output_*.mp4' file found in '$segment_name'. Skipping."
continue
fi
echo_info " Found audio source: $(basename "$audio_file")"
# Define output video path
output_video="$segment_dir/output-matted.mp4"
# Check if output video already exists
if [ -f "$output_video" ]; then
echo_info " '$output_video' already exists. Overwriting..."
fi
# Execute ffmpeg command
echo_info " Starting video creation with ffmpeg..."
# Determine the number of digits in frame filenames
# Example: 0001.jpg has 4 digits
first_frame=$(ls "$frames_dir"/*.jpg | head -n 1)
if [[ "$first_frame" =~ ([0-9]+)\.jpg$ ]]; then
num_digits=${#BASH_REMATCH[1]}
else
echo_error " Unable to determine frame numbering in '$frames_dir'. Skipping."
continue
fi
# Create a pattern for ffmpeg based on the number of digits
frame_pattern=$(printf "%%0%dd.jpg" "$num_digits")
# Run ffmpeg to create the video with audio
ffmpeg -y \
-framerate 59.94 \
-i "$frames_dir/$frame_pattern" \
-i "$audio_file" \
-c:v hevc_nvenc \
-preset slow \
-b:v 50M \
-s 3840x1920 \
-r 59.94 \
-c:a copy \
-map 0:v:0 \
-map 1:a:0 \
"$output_video" \
&& echo_info " Successfully created 'output-matted.mp4' in '$segment_name'"
else
echo_error "'$segment_dir' is not a directory. Skipping."
fi
done
echo_info "All segments processed."

View File

@@ -14,14 +14,19 @@
# to determine an input mask and use add_new_mask() instead of selecting
# points.
#
# Each segment has 2 versions of each frame, on high quality used for
# final rendering and 1 low quality used to speed up inference
#
# When the script finishes, each segment should have an output directory
# with the same object tracked throughout the every frame in all the segment directories
#
# I will then turn these back into a video using ffmpeg but that is outside the scope
# of this program
import os
import cv2
import numpy as np
from concurrent.futures import ThreadPoolExecutor
import torch
import sys
from sam2.build_sam import build_sam2_video_predictor
@@ -32,67 +37,19 @@ SAM2_CHECKPOINT = "../checkpoints/sam2.1_hiera_large.pt"
MODEL_CFG = "configs/sam2.1/sam2.1_hiera_l.yaml"
def load_previous_segment_mask(prev_segment_dir):
mask_path = os.path.join(prev_segment_dir, "mask.jpg")
mask_path = os.path.join(prev_segment_dir, "mask.jpg")
mask_image = cv2.imread(mask_path)
# Extract Object A and Object B masks
mask_a = (mask_image[:, :, 1] == 255) # Green channel
mask_b = (mask_image[:, :, 0] == 254) # Blue channel
# show an image of mask a and mask b, resize the window to 300 pixels
#cv2.namedWindow('Mask A', cv2.WINDOW_NORMAL)
#cv2.resizeWindow('Select Points', int(mask_image.shape[1] * (500 / mask_image.shape[0])), 500)
##cv2.imshow('Mask A', mask_a.astype(np.uint8) * 255)
#cv2.imshow('Mask A', mask_image)
per_obj_input_mask = {1: mask_a, 2: mask_b}
input_palette = None # No palette needed for binary mask
return per_obj_input_mask, input_palette
def convert_green_screen_to_mask(frame):
lower_green = np.array([0, 255, 0])
upper_green = np.array([0, 255, 0])
mask = cv2.inRange(frame, lower_green, upper_green)
mask = cv2.bitwise_not(mask)
return mask > 0
def get_per_obj_mask(mask):
object_ids = np.unique(mask)
object_ids = object_ids[object_ids > 0].tolist()
per_obj_mask = {object_id: (mask == object_id) for object_id in object_ids}
return per_obj_mask
def load_masks_from_dir(input_mask_dir, video_name, frame_name, per_obj_png_file, allow_missing=False):
if not per_obj_png_file:
input_mask_path = os.path.join(input_mask_dir, video_name, f"{frame_name}.png")
if allow_missing and not os.path.exists(input_mask_path):
return {}, None
input_mask, input_palette = load_ann_png(input_mask_path)
per_obj_input_mask = get_per_obj_mask(input_mask)
else:
per_obj_input_mask = {}
input_palette = None
for object_name in os.listdir(os.path.join(input_mask_dir, video_name)):
object_id = int(object_name)
input_mask_path = os.path.join(input_mask_dir, video_name, object_name, f"{frame_name}.png")
if allow_missing and not os.path.exists(input_mask_path):
continue
input_mask, input_palette = load_ann_png(input_mask_path)
per_obj_input_mask[object_id] = input_mask > 0
if not per_obj_input_mask:
frame_path = os.path.join(input_mask_dir, video_name, f"{frame_name}.jpg")
if os.path.exists(frame_path):
frame = cv2.imread(frame_path)
mask = convert_green_screen_to_mask(frame)
per_obj_input_mask = {1: mask}
return per_obj_input_mask, input_palette
def apply_green_mask(frame, masks):
def apply_green_mask_oldest(frame, masks):
green_mask = np.zeros_like(frame)
green_mask[:, :] = [0, 255, 0]
@@ -104,9 +61,96 @@ def apply_green_mask(frame, masks):
combined_mask = np.logical_or(combined_mask, mask)
inverted_mask = np.logical_not(combined_mask)
frame[inverted_mask] = green_mask[inverted_mask]
def apply_mask_part(start_row, end_row):
frame[start_row:end_row][inverted_mask[start_row:end_row]] = green_mask[start_row:end_row][inverted_mask[start_row:end_row]]
num_threads = 4
rows_per_thread = frame.shape[0] // num_threads
with ThreadPoolExecutor(max_workers=num_threads) as executor:
futures = [
executor.submit(apply_mask_part, i * rows_per_thread, (i + 1) * rows_per_thread)
for i in range(num_threads)
]
for future in futures:
future.result()
return frame
def apply_green_mask_old_good(frame, masks):
# Initialize combined mask as a boolean array
combined_mask = np.zeros(frame.shape[:2], dtype=bool)
for mask in masks:
mask = mask.squeeze()
# Resize mask if necessary
if mask.shape != frame.shape[:2]:
mask = cv2.resize(mask.astype(np.uint8), (frame.shape[1], frame.shape[0]), interpolation=cv2.INTER_NEAREST)
# Ensure mask is boolean
mask = mask.astype(bool)
# Combine masks using in-place logical OR
combined_mask |= mask
# Invert the combined mask to get background regions
inverted_mask = ~combined_mask
# Apply green color to background regions directly
frame[inverted_mask] = [0, 255, 0]
return frame
def apply_green_mask(frame, masks):
"""
Applies masks to the frame, replacing the background with green.
Parameters:
- frame: numpy array representing the image frame.
- masks: list of numpy arrays representing the masks.
Returns:
- result_frame: numpy array with the green background applied.
"""
# Initialize combined mask as a boolean array
combined_mask = np.zeros(frame.shape[:2], dtype=bool)
for mask in masks:
mask = mask.squeeze()
# Resize the mask if necessary
if mask.shape != frame.shape[:2]:
# Resize the mask using bilinear interpolation
# and convert it to float32 for accurate interpolation
resized_mask = cv2.resize(
mask.astype(np.float32),
(frame.shape[1], frame.shape[0]),
interpolation=cv2.INTER_LINEAR
)
# Threshold the resized mask to obtain a boolean mask
mask = resized_mask > 0.5
else:
# Ensure mask is boolean
mask = mask.astype(bool)
# Combine masks using logical OR
combined_mask |= mask # Now both arrays are bool
# Create a green background image
green_background = np.full_like(frame, [0, 255, 0])
# Use combined mask to overlay the original frame onto the green background
result_frame = np.where(
combined_mask[..., None],
frame,
green_background
)
return result_frame
def initialize_predictor():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
predictor = build_sam2_video_predictor(MODEL_CFG, SAM2_CHECKPOINT, device=device)
@@ -155,7 +199,6 @@ def select_points(first_frame):
cv2.destroyAllWindows()
return np.array(points_a, dtype=np.float32), np.array(points_b, dtype=np.float32)
def add_points_to_predictor(predictor, inference_state, points, obj_id):
labels = np.array([1, 1, 1, 1], np.int32) # Update labels to match 4 points
points = np.array(points, dtype=np.float32) # Ensure points have shape (4, 2)
@@ -215,33 +258,57 @@ def apply_colored_mask(frame, masks_a, masks_b):
mask = mask.squeeze()
if mask.shape != frame.shape[:2]:
mask = cv2.resize(mask, (frame.shape[1], frame.shape[0]), interpolation=cv2.INTER_NEAREST)
indices = np.where(mask)
colored_mask[mask] = [0, 255, 0] # Green for Object A
for mask in masks_b:
mask = mask.squeeze()
if mask.shape != frame.shape[:2]:
mask = cv2.resize(mask, (frame.shape[1], frame.shape[0]), interpolation=cv2.INTER_NEAREST)
indices = np.where(mask)
colored_mask[mask] = [255, 0, 0] # Blue for Object B
return colored_mask
def process_and_save_frames(input_frames_dir, output_frames_dir, frame_names, video_segments, segment_dir):
def process_and_save_frames(input_frames_dir, fullres_frames_dir, output_frames_dir, frame_names, video_segments, segment_dir):
def upscale_masks(masks, frame_shape):
upscaled_masks = []
for mask in masks:
mask = mask.squeeze()
upscaled_mask = cv2.resize(mask.astype(np.float32), (frame_shape[1], frame_shape[0]), interpolation=cv2.INTER_LINEAR)
#convert_mask to bool
upscaled_mask = (upscaled_mask > 0.5).astype(bool)
upscaled_masks.append(upscaled_mask)
return upscaled_masks
for out_frame_idx, frame_name in enumerate(frame_names):
frame_path = os.path.join(input_frames_dir, frame_name)
frame_path = os.path.join(fullres_frames_dir, frame_name)
frame = cv2.imread(frame_path)
masks = [video_segments[out_frame_idx][out_obj_id] for out_obj_id in video_segments[out_frame_idx]]
frame = apply_green_mask(frame, masks)
# Upscale masks to match the full-resolution frame
upscaled_masks = []
for mask in masks:
mask = mask.squeeze()
upscaled_mask = cv2.resize(mask.astype(np.uint8), (frame.shape[1], frame.shape[0]), interpolation=cv2.INTER_NEAREST)
upscaled_masks.append(upscaled_mask)
frame = apply_green_mask(frame, upscaled_masks)
output_path = os.path.join(output_frames_dir, frame_name)
cv2.imwrite(output_path, frame)
# Create and save mask.jpg
final_frame_path = os.path.join(input_frames_dir, frame_names[-1])
final_frame_path = os.path.join(fullres_frames_dir, frame_names[-1])
final_frame = cv2.imread(final_frame_path)
masks_a = [video_segments[len(frame_names) - 1][1]]
masks_b = [video_segments[len(frame_names) - 1][2]]
upscaled_masks_a = upscale_masks(masks_a, final_frame.shape)
upscaled_masks_b = upscale_masks(masks_b, final_frame.shape)
# Apply colored mask
mask_image = apply_colored_mask(final_frame, masks_a, masks_b)
mask_image = apply_colored_mask(final_frame, upscaled_masks_a, upscaled_masks_b)
mask_output_path = os.path.join(segment_dir, "mask.jpg")
cv2.imwrite(mask_output_path, mask_image)
@@ -253,10 +320,11 @@ def main():
parser.add_argument("--segments-collect-points", nargs='+', type=int, help="Segments for which to collect points.")
args = parser.parse_args()
base_dir = "./spirit_2min_segments"
base_dir = "./606-short_segments"
segments = [d for d in os.listdir(base_dir) if os.path.isdir(os.path.join(base_dir, d)) and d.startswith("segment_")]
segments.sort(key=lambda x: int(x.split("_")[1]))
scaled_frames_dir_name = "frames_scaled"
fullres_frames_dir_name = "frames" # iwant to render the final video with these frames
collect_points_segments = args.segments_collect_points if args.segments_collect_points else []
@@ -266,7 +334,7 @@ def main():
segment_dir = os.path.join(base_dir, segment)
points_file = os.path.join(segment_dir, "segment_points")
if segment_index in collect_points_segments and not os.path.exists(points_file):
input_frames_dir = os.path.join(segment_dir, "frames")
input_frames_dir = os.path.join(segment_dir, f"{scaled_frames_dir_name}")
first_frame, _ = load_first_frame(input_frames_dir)
points_a, points_b = select_points(first_frame)
with open(points_file, 'w') as f:
@@ -285,7 +353,8 @@ def main():
points = np.loadtxt(points_file, comments="#")
points_a = points[:4]
points_b = points[4:]
input_frames_dir = os.path.join(segment_dir, "frames")
input_frames_dir = os.path.join(segment_dir, f"{scaled_frames_dir_name}")
fullres_frames_dir = os.path.join(segment_dir, f"{fullres_frames_dir_name}")
output_frames_dir = os.path.join(segment_dir, "output_frames")
os.makedirs(output_frames_dir, exist_ok=True)
first_frame, frame_names = load_first_frame(input_frames_dir)
@@ -306,7 +375,7 @@ def main():
masks_b = [(out_mask_logits_b[i] > 0.0).cpu().numpy() for i in range(len(out_mask_logits_b))]
video_segments = propagate_masks(predictor, inference_state)
predictor.reset_state(inference_state)
process_and_save_frames(input_frames_dir, output_frames_dir, frame_names, video_segments, segment_dir)
process_and_save_frames(input_frames_dir, fullres_frames_dir, output_frames_dir, frame_names, video_segments, segment_dir)
del inference_state
del video_segments
del predictor

View File

@@ -18,10 +18,16 @@ for segment_dir in "$output_folder"/segment_*; do
if [ -n "$segment_file" ]; then
# Create the frames directory
frames_dir="$segment_dir/frames"
frames_scaled_dir="$segment_dir/frames_scaled"
mkdir -p "$frames_dir"
mkdir -p "$frames_scaled_dir"
# Extract frames using ffmpeg with CUDA acceleration and save as JPEGs
ffmpeg -hwaccel cuda -i "$segment_file" -q:v 2 "$frames_dir/%04d.jpg"
ffmpeg -hwaccel cuda -i "$segment_file" -q:v 1 "$frames_dir/%04d.jpg"
#ffmpeg -hwaccel cuda -i "$segment_file" "$frames_dir/%04d.png"
#
## Extract scaled-down frames (50% of original size)
ffmpeg -hwaccel cuda -i "$segment_file" -q:v 1 -vf "scale=iw/2:ih/2" "$frames_scaled_dir/%04d.jpg"
echo "Frames extracted for $segment_file and stored in $frames_dir"
else

View File

@@ -168,7 +168,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"id": "f5f3245e-b4d6-418b-a42a-a67e0b3b5aec",
"metadata": {},
"outputs": [],
@@ -183,7 +183,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 4,
"id": "1a5320fe-06d7-45b8-b888-ae00799d07fa",
"metadata": {},
"outputs": [],
@@ -237,17 +237,17 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 5,
"id": "b94c87ca-fd1a-4011-9609-e8be1cbe3230",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<matplotlib.image.AxesImage at 0x7f31800bed40>"
"<matplotlib.image.AxesImage at 0x7ff9b40164a0>"
]
},
"execution_count": 8,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
},
@@ -300,7 +300,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 6,
"id": "8967aed3-eb82-4866-b8df-0f4743255c2c",
"metadata": {},
"outputs": [
@@ -308,7 +308,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"frame loading (JPEG): 100%|█████████████████████████████████████████████████████████████████| 200/200 [00:03<00:00, 59.37it/s]\n"
"frame loading (JPEG): 100%|██████████████████████████████████████████████████████████████████████████████████████████████████| 200/200 [00:03<00:00, 51.02it/s]\n"
]
}
],
@@ -366,7 +366,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 7,
"id": "3e749bab-0f36-4173-bf8d-0c20cd5214b3",
"metadata": {},
"outputs": [
@@ -427,7 +427,7 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 8,
"id": "e1ab3ec7-2537-4158-bf98-3d0977d8908d",
"metadata": {},
"outputs": [