actually fix streaming save
This commit is contained in:
@@ -411,34 +411,47 @@ class VideoProcessor:
|
||||
try:
|
||||
print(f"📁 Using temp frames dir: {temp_frames_dir}")
|
||||
|
||||
# Process each chunk and save frames directly to disk
|
||||
# Process each chunk frame-by-frame (true streaming)
|
||||
for i, chunk_file in enumerate(chunk_files):
|
||||
print(f"📼 Processing chunk {i+1}/{len(chunk_files)}: {chunk_file.name}")
|
||||
|
||||
# Load chunk (this is the only copy in memory)
|
||||
# Load chunk metadata without loading frames array
|
||||
chunk_data = np.load(str(chunk_file))
|
||||
frames = chunk_data['frames'].tolist() # Convert to list of arrays
|
||||
chunk_data.close()
|
||||
frames_array = chunk_data['frames'] # This is still mmap'd, not loaded
|
||||
total_frames_in_chunk = frames_array.shape[0]
|
||||
|
||||
# Handle overlap blending (simple approach - skip overlapping frames from previous chunks)
|
||||
if i > 0 and overlap_frames > 0:
|
||||
# Skip the first overlap_frames from this chunk (they overlap with previous)
|
||||
frames = frames[overlap_frames:]
|
||||
print(f" ✂️ Skipped {overlap_frames} overlapping frames")
|
||||
# Determine which frames to skip for overlap
|
||||
start_frame_idx = overlap_frames if i > 0 and overlap_frames > 0 else 0
|
||||
frames_to_process = total_frames_in_chunk - start_frame_idx
|
||||
|
||||
# Save frames directly to disk (no accumulation in memory)
|
||||
for frame in frames:
|
||||
if start_frame_idx > 0:
|
||||
print(f" ✂️ Skipping first {start_frame_idx} overlapping frames")
|
||||
|
||||
print(f" 🔄 Processing {frames_to_process} frames one-by-one...")
|
||||
|
||||
# Process frames ONE AT A TIME (true streaming)
|
||||
for frame_idx in range(start_frame_idx, total_frames_in_chunk):
|
||||
# Load only ONE frame at a time
|
||||
frame = frames_array[frame_idx] # Load single frame
|
||||
|
||||
# Save frame directly to disk
|
||||
frame_path = temp_frames_dir / f"frame_{frame_counter:06d}.jpg"
|
||||
# Use high quality JPEG to minimize compression artifacts
|
||||
success = cv2.imwrite(str(frame_path), frame, [cv2.IMWRITE_JPEG_QUALITY, 95])
|
||||
if not success:
|
||||
raise RuntimeError(f"Failed to save frame {frame_counter}")
|
||||
|
||||
frame_counter += 1
|
||||
|
||||
print(f" ✅ Saved {len(frames)} frames to disk (total: {frame_counter})")
|
||||
# Periodic progress and cleanup
|
||||
if frame_counter % 100 == 0:
|
||||
print(f" 💾 Saved {frame_counter} frames...")
|
||||
gc.collect() # Periodic cleanup
|
||||
|
||||
# Immediately free chunk memory
|
||||
del frames, chunk_data
|
||||
print(f" ✅ Saved {frames_to_process} frames to disk (total: {frame_counter})")
|
||||
|
||||
# Close chunk file and cleanup
|
||||
chunk_data.close()
|
||||
del chunk_data, frames_array
|
||||
|
||||
# Delete chunk file to free disk space
|
||||
try:
|
||||
@@ -447,8 +460,17 @@ class VideoProcessor:
|
||||
except Exception as e:
|
||||
print(f" ⚠️ Could not delete {chunk_file.name}: {e}")
|
||||
|
||||
# Aggressive cleanup every chunk
|
||||
self._aggressive_memory_cleanup(f"After processing chunk {i}")
|
||||
# Aggressive cleanup and memory monitoring after each chunk
|
||||
self._aggressive_memory_cleanup(f"After streaming merge chunk {i}")
|
||||
|
||||
# Memory safety check
|
||||
memory_info = self._get_process_memory_info()
|
||||
if memory_info['rss_gb'] > 35: # Warning if approaching 46GB limit
|
||||
print(f"⚠️ High memory usage: {memory_info['rss_gb']:.1f}GB - forcing cleanup")
|
||||
gc.collect()
|
||||
import torch
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
# Create final video directly from frame images using ffmpeg
|
||||
print(f"📹 Creating final video from {frame_counter} frames...")
|
||||
|
||||
Reference in New Issue
Block a user