fix temp file
This commit is contained in:
@@ -89,9 +89,10 @@ class SAM2VideoMatting:
|
||||
if video_frames is None or len(video_frames) == 0:
|
||||
raise ValueError("Either video_path or video_frames must be provided")
|
||||
|
||||
# Create temporary video file
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
temp_video_path = Path(temp_dir) / "temp_video.mp4"
|
||||
# Create temporary video file in current directory
|
||||
import uuid
|
||||
temp_video_name = f"temp_sam2_{uuid.uuid4().hex[:8]}.mp4"
|
||||
temp_video_path = Path.cwd() / temp_video_name
|
||||
|
||||
# Write frames to temporary video
|
||||
height, width = video_frames[0].shape[:2]
|
||||
@@ -266,8 +267,8 @@ class SAM2VideoMatting:
|
||||
if self.temp_video_path is not None:
|
||||
try:
|
||||
if self.temp_video_path.exists():
|
||||
# Remove the temporary directory
|
||||
shutil.rmtree(self.temp_video_path.parent)
|
||||
# Remove the temporary video file
|
||||
self.temp_video_path.unlink()
|
||||
self.temp_video_path = None
|
||||
except Exception as e:
|
||||
warnings.warn(f"Failed to cleanup temp video: {e}")
|
||||
|
||||
@@ -150,52 +150,75 @@ class VR180Processor(VideoProcessor):
|
||||
if not eye_frames:
|
||||
return []
|
||||
|
||||
# Initialize SAM2 with eye frames
|
||||
self.sam2_model.init_video_state(eye_frames)
|
||||
# Create a unique temporary video for this eye processing
|
||||
import uuid
|
||||
temp_video_name = f"temp_sam2_{eye_name}_chunk{chunk_idx}_{uuid.uuid4().hex[:8]}.mp4"
|
||||
temp_video_path = Path.cwd() / temp_video_name
|
||||
|
||||
# Detect persons in first frame
|
||||
first_frame = eye_frames[0]
|
||||
detections = self.detector.detect_persons(first_frame)
|
||||
|
||||
if not detections:
|
||||
warnings.warn(f"No persons detected in {eye_name} eye, chunk {chunk_idx}")
|
||||
return self._create_empty_masks(eye_frames)
|
||||
|
||||
print(f"Detected {len(detections)} persons in {eye_name} eye first frame")
|
||||
|
||||
# Convert to SAM2 prompts
|
||||
box_prompts, labels = self.detector.convert_to_sam_prompts(detections)
|
||||
|
||||
# Add prompts
|
||||
object_ids = self.sam2_model.add_person_prompts(0, box_prompts, labels)
|
||||
|
||||
# Propagate masks
|
||||
video_segments = self.sam2_model.propagate_masks(
|
||||
start_frame=0,
|
||||
max_frames=len(eye_frames)
|
||||
)
|
||||
|
||||
# Apply masks
|
||||
matted_frames = []
|
||||
for frame_idx, frame in enumerate(eye_frames):
|
||||
if frame_idx in video_segments:
|
||||
frame_masks = video_segments[frame_idx]
|
||||
combined_mask = self.sam2_model.get_combined_mask(frame_masks)
|
||||
|
||||
matted_frame = self.sam2_model.apply_mask_to_frame(
|
||||
frame, combined_mask,
|
||||
output_format=self.config.output.format,
|
||||
background_color=self.config.output.background_color
|
||||
)
|
||||
else:
|
||||
matted_frame = self._create_empty_mask_frame(frame)
|
||||
try:
|
||||
# Write frames to temporary video
|
||||
height, width = eye_frames[0].shape[:2]
|
||||
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
||||
writer = cv2.VideoWriter(str(temp_video_path), fourcc, 30.0, (width, height))
|
||||
|
||||
matted_frames.append(matted_frame)
|
||||
|
||||
# Cleanup
|
||||
self.sam2_model.cleanup()
|
||||
|
||||
return matted_frames
|
||||
for frame in eye_frames:
|
||||
writer.write(frame)
|
||||
writer.release()
|
||||
|
||||
# Initialize SAM2 with video path
|
||||
self.sam2_model.init_video_state(video_path=str(temp_video_path))
|
||||
|
||||
# Detect persons in first frame
|
||||
first_frame = eye_frames[0]
|
||||
detections = self.detector.detect_persons(first_frame)
|
||||
|
||||
if not detections:
|
||||
warnings.warn(f"No persons detected in {eye_name} eye, chunk {chunk_idx}")
|
||||
return self._create_empty_masks(eye_frames)
|
||||
|
||||
print(f"Detected {len(detections)} persons in {eye_name} eye first frame")
|
||||
|
||||
# Convert to SAM2 prompts
|
||||
box_prompts, labels = self.detector.convert_to_sam_prompts(detections)
|
||||
|
||||
# Add prompts
|
||||
object_ids = self.sam2_model.add_person_prompts(0, box_prompts, labels)
|
||||
|
||||
# Propagate masks
|
||||
video_segments = self.sam2_model.propagate_masks(
|
||||
start_frame=0,
|
||||
max_frames=len(eye_frames)
|
||||
)
|
||||
|
||||
# Apply masks
|
||||
matted_frames = []
|
||||
for frame_idx, frame in enumerate(eye_frames):
|
||||
if frame_idx in video_segments:
|
||||
frame_masks = video_segments[frame_idx]
|
||||
combined_mask = self.sam2_model.get_combined_mask(frame_masks)
|
||||
|
||||
matted_frame = self.sam2_model.apply_mask_to_frame(
|
||||
frame, combined_mask,
|
||||
output_format=self.config.output.format,
|
||||
background_color=self.config.output.background_color
|
||||
)
|
||||
else:
|
||||
matted_frame = self._create_empty_mask_frame(frame)
|
||||
|
||||
matted_frames.append(matted_frame)
|
||||
|
||||
return matted_frames
|
||||
|
||||
finally:
|
||||
# Always cleanup
|
||||
self.sam2_model.cleanup()
|
||||
|
||||
# Remove temporary video file
|
||||
try:
|
||||
if temp_video_path.exists():
|
||||
temp_video_path.unlink()
|
||||
except Exception as e:
|
||||
warnings.warn(f"Failed to cleanup temp video {temp_video_path}: {e}")
|
||||
|
||||
def _process_eye_sequence_with_validation(self,
|
||||
right_eye_frames: List[np.ndarray],
|
||||
|
||||
Reference in New Issue
Block a user