more memory fixes hopeufly
This commit is contained in:
@@ -387,19 +387,83 @@ class VideoProcessor:
|
||||
# Green screen background
|
||||
return np.full_like(frame, self.config.output.background_color, dtype=np.uint8)
|
||||
|
||||
def merge_chunks_streaming(self, chunk_files: List[Path], output_path: str,
|
||||
overlap_frames: int = 0, audio_source: str = None) -> None:
|
||||
"""
|
||||
Merge processed chunks using streaming approach (no memory accumulation)
|
||||
|
||||
Args:
|
||||
chunk_files: List of chunk result files (.npz)
|
||||
output_path: Final output video path
|
||||
overlap_frames: Number of overlapping frames
|
||||
audio_source: Audio source file for final video
|
||||
"""
|
||||
from .streaming_video_writer import StreamingVideoWriter
|
||||
|
||||
if not chunk_files:
|
||||
raise ValueError("No chunk files to merge")
|
||||
|
||||
print(f"🎬 Streaming merge: {len(chunk_files)} chunks → {output_path}")
|
||||
|
||||
# Initialize streaming writer
|
||||
writer = StreamingVideoWriter(
|
||||
output_path=output_path,
|
||||
fps=self.video_info['fps'],
|
||||
audio_source=audio_source
|
||||
)
|
||||
|
||||
try:
|
||||
# Process each chunk without accumulation
|
||||
for i, chunk_file in enumerate(chunk_files):
|
||||
print(f"📼 Processing chunk {i+1}/{len(chunk_files)}: {chunk_file.name}")
|
||||
|
||||
# Load chunk (this is the only copy in memory)
|
||||
chunk_data = np.load(str(chunk_file))
|
||||
frames = chunk_data['frames'].tolist() # Convert to list of arrays
|
||||
chunk_data.close()
|
||||
|
||||
# Write chunk with streaming writer
|
||||
writer.write_chunk(
|
||||
frames=frames,
|
||||
chunk_index=i,
|
||||
overlap_frames=overlap_frames if i > 0 else 0,
|
||||
blend_with_previous=(i > 0 and overlap_frames > 0)
|
||||
)
|
||||
|
||||
# Immediately free memory
|
||||
del frames, chunk_data
|
||||
|
||||
# Delete chunk file to free disk space
|
||||
try:
|
||||
chunk_file.unlink()
|
||||
print(f" 🗑️ Deleted {chunk_file.name}")
|
||||
except Exception as e:
|
||||
print(f" ⚠️ Could not delete {chunk_file.name}: {e}")
|
||||
|
||||
# Aggressive cleanup every chunk
|
||||
self._aggressive_memory_cleanup(f"After processing chunk {i}")
|
||||
|
||||
# Finalize the video
|
||||
writer.finalize()
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Streaming merge failed: {e}")
|
||||
writer.cleanup()
|
||||
raise
|
||||
|
||||
print(f"✅ Streaming merge complete: {output_path}")
|
||||
|
||||
def merge_overlapping_chunks(self,
|
||||
chunk_results: List[List[np.ndarray]],
|
||||
overlap_frames: int) -> List[np.ndarray]:
|
||||
"""
|
||||
Merge overlapping chunks with blending in overlap regions
|
||||
|
||||
Args:
|
||||
chunk_results: List of chunk results
|
||||
overlap_frames: Number of overlapping frames
|
||||
|
||||
Returns:
|
||||
Merged frame sequence
|
||||
Legacy merge method - DEPRECATED due to memory accumulation
|
||||
Use merge_chunks_streaming() instead for memory efficiency
|
||||
"""
|
||||
import warnings
|
||||
warnings.warn("merge_overlapping_chunks() is deprecated due to memory accumulation. Use merge_chunks_streaming()",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
if len(chunk_results) == 1:
|
||||
return chunk_results[0]
|
||||
|
||||
@@ -640,36 +704,23 @@ class VideoProcessor:
|
||||
if self.memory_manager.should_emergency_cleanup():
|
||||
self.memory_manager.emergency_cleanup()
|
||||
|
||||
# Load and merge chunks from disk
|
||||
print("\nLoading and merging chunks...")
|
||||
chunk_results = []
|
||||
for i, chunk_file in enumerate(chunk_files):
|
||||
print(f"Loading {chunk_file.name}...")
|
||||
chunk_data = np.load(str(chunk_file))
|
||||
chunk_results.append(chunk_data['frames'])
|
||||
chunk_data.close() # Close the file
|
||||
|
||||
# Delete chunk file immediately after loading to free disk space
|
||||
try:
|
||||
chunk_file.unlink()
|
||||
print(f" Deleted chunk file {chunk_file.name}")
|
||||
except Exception as e:
|
||||
print(f" Warning: Could not delete chunk file: {e}")
|
||||
|
||||
# Aggressive cleanup every few chunks to prevent accumulation
|
||||
if i % 3 == 0 and i > 0:
|
||||
self._aggressive_memory_cleanup(f"after loading chunk {i}")
|
||||
# Use streaming merge to avoid memory accumulation (fixes OOM)
|
||||
print("\n🎬 Using streaming merge (no memory accumulation)...")
|
||||
|
||||
# Merge chunks
|
||||
final_frames = self.merge_overlapping_chunks(chunk_results, overlap_frames)
|
||||
# Determine audio source for final video
|
||||
audio_source = None
|
||||
if self.config.output.preserve_audio and Path(self.config.input.video_path).exists():
|
||||
audio_source = self.config.input.video_path
|
||||
|
||||
# Free chunk results after merging - this is critical!
|
||||
del chunk_results
|
||||
self._aggressive_memory_cleanup("after merging chunks")
|
||||
# Stream merge chunks directly to output (no memory accumulation)
|
||||
self.merge_chunks_streaming(
|
||||
chunk_files=chunk_files,
|
||||
output_path=self.config.output.path,
|
||||
overlap_frames=overlap_frames,
|
||||
audio_source=audio_source
|
||||
)
|
||||
|
||||
# Save results
|
||||
print(f"Saving {len(final_frames)} processed frames...")
|
||||
self.save_video(final_frames, self.config.output.path)
|
||||
print("✅ Streaming merge complete - no memory accumulation!")
|
||||
|
||||
# Calculate final statistics
|
||||
self.processing_stats['end_time'] = time.time()
|
||||
|
||||
@@ -398,44 +398,50 @@ class VR180Processor(VideoProcessor):
|
||||
|
||||
self._print_memory_step(f"After SAM2 propagation ({eye_name} eye)")
|
||||
|
||||
# Apply masks - need to reload frames from temp video since we freed the original frames
|
||||
self._print_memory_step(f"Before reloading frames for mask application ({eye_name} eye)")
|
||||
# Apply masks with streaming approach (no frame accumulation)
|
||||
self._print_memory_step(f"Before streaming mask application ({eye_name} eye)")
|
||||
|
||||
# Read frames back from the temp video for mask application
|
||||
# Process frames one at a time without accumulation
|
||||
cap = cv2.VideoCapture(str(temp_video_path))
|
||||
reloaded_frames = []
|
||||
|
||||
for frame_idx in range(num_frames):
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
break
|
||||
reloaded_frames.append(frame)
|
||||
cap.release()
|
||||
|
||||
self._print_memory_step(f"Reloaded {len(reloaded_frames)} frames for mask application")
|
||||
|
||||
# Apply masks
|
||||
matted_frames = []
|
||||
for frame_idx, frame in enumerate(reloaded_frames):
|
||||
if frame_idx in video_segments:
|
||||
frame_masks = video_segments[frame_idx]
|
||||
combined_mask = self.sam2_model.get_combined_mask(frame_masks)
|
||||
|
||||
try:
|
||||
for frame_idx in range(num_frames):
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
break
|
||||
|
||||
matted_frame = self.sam2_model.apply_mask_to_frame(
|
||||
frame, combined_mask,
|
||||
output_format=self.config.output.format,
|
||||
background_color=self.config.output.background_color
|
||||
)
|
||||
else:
|
||||
matted_frame = self._create_empty_mask_frame(frame)
|
||||
|
||||
matted_frames.append(matted_frame)
|
||||
# Apply mask to this single frame
|
||||
if frame_idx in video_segments:
|
||||
frame_masks = video_segments[frame_idx]
|
||||
combined_mask = self.sam2_model.get_combined_mask(frame_masks)
|
||||
|
||||
matted_frame = self.sam2_model.apply_mask_to_frame(
|
||||
frame, combined_mask,
|
||||
output_format=self.config.output.format,
|
||||
background_color=self.config.output.background_color
|
||||
)
|
||||
else:
|
||||
matted_frame = self._create_empty_mask_frame(frame)
|
||||
|
||||
matted_frames.append(matted_frame)
|
||||
|
||||
# Free the original frame immediately (no accumulation)
|
||||
del frame
|
||||
|
||||
# Periodic cleanup during processing
|
||||
if frame_idx % 100 == 0 and frame_idx > 0:
|
||||
import gc
|
||||
gc.collect()
|
||||
|
||||
finally:
|
||||
cap.release()
|
||||
|
||||
# Free reloaded frames and video segments completely
|
||||
del reloaded_frames
|
||||
# Free video segments completely
|
||||
del video_segments # This holds processed masks from SAM2
|
||||
self._aggressive_memory_cleanup(f"After mask application ({eye_name} eye)")
|
||||
self._aggressive_memory_cleanup(f"After streaming mask application ({eye_name} eye)")
|
||||
|
||||
self._print_memory_step(f"Completed streaming mask application ({eye_name} eye)")
|
||||
return matted_frames
|
||||
|
||||
finally:
|
||||
|
||||
Reference in New Issue
Block a user