diff --git a/processors/visualisation/video_hasher.py b/processors/visualisation/video_hasher.py index 9f1e9553c..c9f290280 100644 --- a/processors/visualisation/video_hasher.py +++ b/processors/visualisation/video_hasher.py @@ -10,7 +10,7 @@ import zipfile from videohash import VideoHash -from videohash.exceptions import FFmpegNotFound +from videohash.exceptions import FFmpegNotFound, FFmpegFailedToExtractFrames from backend.lib.processor import BasicProcessor from backend.lib.preset import ProcessorPreset @@ -158,7 +158,10 @@ def process(self): self.dataset.finish(0) return except FileNotFoundError as e: - self.dataset.update_status(f"Unable to create hash for {str(path)}") + self.dataset.update_status(f"Unable to find file {str(path)}") + continue + except FFmpegFailedToExtractFrames as e: + self.dataset.update_status(f"Unable to extract frame for {str(path)}: {e}") continue video_hashes[path.name] = {'videohash': videohash} diff --git a/processors/visualisation/video_timelines.py b/processors/visualisation/video_timelines.py index c0bb155c5..08cd7a9e4 100644 --- a/processors/visualisation/video_timelines.py +++ b/processors/visualisation/video_timelines.py @@ -141,7 +141,7 @@ def process(self): if looping: # Only prep for new timeline if still looping - self.dataset.update_status(f"Rendering video timeline for collection {video}") + self.dataset.update_status(f"Rendering video timeline for collection {video} ({len(timeline_widths)}/{self.source_dataset.num_rows})") self.dataset.update_progress(len(timeline_widths) / self.source_dataset.num_rows) # reset and ready for the next timeline offset_y += base_height diff --git a/setup.py b/setup.py index 7eff86382..9c3c52247 100644 --- a/setup.py +++ b/setup.py @@ -59,7 +59,6 @@ "unidecode~=1.3", "Werkzeug~=2.2", "wordcloud~=1.8", - "videohash==3.0.1", "videohash @ https://github.com/dale-wahl/videohash/archive/refs/heads/main.zip", "vk_api", "yt-dlp",