dont use predictor over and over

This commit is contained in:
2025-07-26 13:40:47 -07:00
parent c368d6dc97
commit 6f93abcb08

View File

@@ -33,6 +33,8 @@ class SAM2VideoMatting:
self.device = device
self.memory_offload = memory_offload
self.fp16 = fp16
self.model_cfg = model_cfg
self.checkpoint_path = checkpoint_path
self.predictor = None
self.inference_state = None
self.video_segments = {}
@@ -75,7 +77,8 @@ class SAM2VideoMatting:
def init_video_state(self, video_frames: List[np.ndarray] = None, video_path: str = None) -> None:
"""Initialize video inference state"""
if self.predictor is None:
raise RuntimeError("SAM2 model not loaded")
# Recreate predictor if it was cleaned up
self._load_model(self.model_cfg, self.checkpoint_path)
if video_path is not None:
# Use video path directly (SAM2's preferred method)
@@ -290,11 +293,17 @@ class SAM2VideoMatting:
if torch.cuda.is_available():
torch.cuda.empty_cache()
# Explicitly delete predictor for fresh creation next time
if self.predictor is not None:
try:
del self.predictor
except Exception as e:
warnings.warn(f"Failed to delete predictor: {e}")
finally:
self.predictor = None
# Force garbage collection (critical for memory leak prevention)
gc.collect()
# Clear predictor reference (but don't delete the object itself)
self.predictor = None
def __del__(self):
"""Destructor to ensure cleanup"""