-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathAVSync.py
More file actions
1997 lines (1714 loc) · 104 KB
/
AVSync.py
File metadata and controls
1997 lines (1714 loc) · 104 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import cv2
import os
import shutil
import subprocess
from tqdm import tqdm
import glob
import concurrent.futures
import numpy as np
import argparse
import re
import tempfile
import sys
# import math # Not strictly necessary anymore after removing older sync methods
from scipy.io import wavfile # For find_audio_start_end
import time
import json # For parsing ffprobe output
import logging
import csv # For CSV output
import platform # Needed for ColorfulFormatter
import ctypes # Needed for ColorfulFormatter on Windows
# --- START OF EMBEDDED colorful_logger ---
class ColorfulFormatter(logging.Formatter):
"""
A custom formatter for colorful console output
"""
# Color codes for Windows CMD
WIN_COLORS = {
'RESET': '',
'BLACK': '',
'RED': '',
'GREEN': '',
'YELLOW': '',
'BLUE': '',
'MAGENTA': '',
'CYAN': '',
'WHITE': '',
'BOLD': '',
'UNDERLINE': ''
}
# ANSI color codes for Unix-like systems
ANSI_COLORS = {
'RESET': '\033[0m',
'BLACK': '\033[30m',
'RED': '\033[31m',
'GREEN': '\033[32m',
'YELLOW': '\033[33m',
'BLUE': '\033[34m',
'MAGENTA': '\033[35m',
'CYAN': '\033[36m',
'WHITE': '\033[37m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m'
}
def __init__(self):
super().__init__('%(message)s')
# Determine if we can use colors
self.use_colors = True
# Windows Command Prompt doesn't support ANSI by default
if platform.system() == 'Windows' and 'ANSICON' not in os.environ and 'WT_SESSION' not in os.environ:
# Try to enable ANSI in Windows 10+
try:
# import ctypes # Import moved to top level
kernel32 = ctypes.windll.kernel32
# Ensure GetStdHandle returns a valid handle before calling SetConsoleMode
handle = kernel32.GetStdHandle(-11) # STD_OUTPUT_HANDLE = -11
if handle and handle != -1: # Check for valid handle (not NULL or INVALID_HANDLE_VALUE)
kernel32.SetConsoleMode(handle, 7) # ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
self.colors = self.ANSI_COLORS
else:
# print("Debug: Could not get valid console handle.", file=sys.stderr) # Optional debug
self.use_colors = False
self.colors = self.WIN_COLORS
except (AttributeError, OSError, TypeError) as e:
# print(f"Debug: Error enabling ANSI on Windows: {e}", file=sys.stderr) # Optional debug
self.use_colors = False
self.colors = self.WIN_COLORS
else:
self.colors = self.ANSI_COLORS
def format(self, record):
# Get the original message
msg = super().format(record)
# Strip existing ANSI codes if any before applying new ones
# This helps prevent nested colors if a subprocess outputs color
# msg = re.sub(r'\x1b\[[0-9;]*m', '', msg)
if not self.use_colors:
return msg # Return potentially stripped message
# Apply custom colors based on level and content
level_prefix = ""
level_color = self.colors['RESET']
if record.levelno >= logging.ERROR:
level_prefix = "❌ ERROR: "
level_color = self.colors['RED'] + self.colors['BOLD']
elif record.levelno >= logging.WARNING:
# Distinguish between script warnings and FFmpeg warnings
if "FFmpeg warnings" in msg or "FFmpeg Stderr" in msg:
level_prefix = "⚠️ FFmpeg: "
level_color = self.colors['YELLOW'] # Keep standard yellow for FFmpeg
else:
level_prefix = "⚠️ WARNING: "
level_color = self.colors['YELLOW'] + self.colors['BOLD'] # Bold script warnings
elif record.levelno >= logging.INFO:
# Color different types of info messages based on patterns
if "---===" in msg: # Stage headers
level_color = self.colors['BOLD'] + self.colors['MAGENTA']
elif msg.startswith("--- "): # Sub-stage headers
level_color = self.colors['CYAN'] + self.colors['BOLD']
elif "-> Running:" in msg: # Task starting
msg = re.sub(r"(-> Running:)(.*)", f"\\1{self.colors['BLUE']}\\2{self.colors['RESET']}", msg) # Blue for task name
elif "-> Completed:" in msg: # Task success
msg = re.sub(r"(-> Completed:)(.*)(\(Time:.*\))", f"\\1{self.colors['GREEN']}\\2{self.colors['RESET']}\\3", msg) # Green for task name
elif "✓" in msg: # Success checkmarks
level_color = self.colors['GREEN']
elif "»" in msg: # Progress/detail markers
level_color = self.colors['BLUE']
# Keep default color for other INFO messages like setup details
elif record.levelno <= logging.DEBUG:
level_prefix = "DEBUG: "
level_color = self.colors['WHITE'] # Dim/default white for debug
# Construct final message
formatted_msg = f"{level_color}{level_prefix}{msg}{self.colors['RESET']}"
return formatted_msg
def setup_colorful_logging(level=logging.INFO):
"""
Set up colorful logging across the application
"""
# Get the root logger
# Using getLogger() without a name gets the root logger
root_logger = logging.getLogger()
# Set the threshold for the logger. Messages below this level will be ignored.
root_logger.setLevel(level)
# Check if handlers already exist (e.g., if setup is called multiple times)
# If handlers exist, assume it's already configured and return
if root_logger.hasHandlers():
# Optionally, you could remove existing handlers first if reconfiguration is desired
# for handler in root_logger.handlers[:]:
# root_logger.removeHandler(handler)
# But for now, just return the existing logger to avoid duplicate handlers
return root_logger
# Create console handler
console_handler = logging.StreamHandler(sys.stdout) # Explicitly use stdout
console_handler.setLevel(level) # Handler level should respect the main level
# Create formatter and add it to the handler
formatter = ColorfulFormatter()
console_handler.setFormatter(formatter)
# Add the handler to the root logger
root_logger.addHandler(console_handler)
# Prevent propagation to avoid potential duplicate logs if other libraries configure logging
# root_logger.propagate = False # Usually not needed unless issues arise
# Add a NullHandler to prevent "No handler found" warnings if no handlers are added
# (though we are adding one). This is good practice for library code.
# root_logger.addHandler(logging.NullHandler()) # Not strictly needed here as we *do* add a handler
return root_logger
# --- END OF EMBEDDED colorful_logger ---
# --- Try Importing Image Similarity Libs ---
try:
import imagehash
from PIL import Image
similarity_libs_available = True
except ImportError:
print("Warning: 'imagehash' or 'Pillow' not found. Similarity filtering will be skipped.")
similarity_libs_available = False
# --- Constants ---
RESIZE_WIDTH = 1280
RESIZE_HEIGHT = 720
DEFAULT_DB_THRESHOLD = -40.0
DEFAULT_SAMPLE_RATE = 48000
DEFAULT_CHANNELS = 2
MIN_ATEMPO = 0.5
MAX_ATEMPO = 100.0
# MIN_ALLOWED_REF_DURATION_S = 5.0 # Replaced by argument --min_segment_duration
DEFAULT_MIN_SEGMENT_DURATION = 5.0 # New default value
MAX_ALLOWED_DURATION_PERCENT_DIFF = 6.0 # Max % difference allowed between ref/foreign segment durations
MIN_DELAY_S = 0.001 # Minimum delay to apply padding
DEFAULT_REF_LANG = "eng"
DEFAULT_FOREIGN_LANG = "foreign" # Changed from "hin"
DEFAULT_MUX_ACODEC = "aac"
DEFAULT_MUX_ABITRATE = "192k"
QC_IMAGE_HEIGHT = 720
FFMPEG_EXEC = None
FFPROBE_EXEC = None
MATCH_WINDOW_PERCENT = 0.06 # Percentage of ref video duration for match search window
# Configure logging using the embedded setup function
logger = setup_colorful_logging(level=logging.INFO)
# --- Utility & FFmpeg/FFprobe Functions ---
def find_executable(name):
"""Finds an executable in the system PATH."""
exec_path = shutil.which(name)
if exec_path:
logger.debug(f"Found executable '{name}' at: {exec_path}")
return exec_path
else:
logger.error(f"'{name}' command not found in system PATH.")
return None
def run_ffmpeg(cmd_list, desc="FFmpeg Task", verbose_success=False, capture_stderr=False):
"""Runs an FFmpeg command with logging and error handling."""
global FFMPEG_EXEC
if not FFMPEG_EXEC:
logger.error("FFmpeg path not set.")
return False, "" if capture_stderr else False
cmd_list[0] = FFMPEG_EXEC
logger.info(f" -> Running: {desc}...")
start_time = time.time()
stderr_output = ""
try:
# Use Popen for better handling of large stderr, prevent potential deadlocks
process = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
text=True, encoding='utf-8', errors='ignore', startupinfo=None)
stdout, stderr = process.communicate() # Wait for process to finish
returncode = process.returncode
elapsed_time = time.time() - start_time
if capture_stderr:
stderr_output = stderr
if returncode != 0:
logger.error(f"\nERROR: {desc} failed! (Exit code: {returncode}, Time: {elapsed_time:.2f}s)")
logger.error(f" FFmpeg Command: {' '.join(cmd_list)}")
# Log only last few lines of stderr if it's huge
stderr_lines = stderr.strip().splitlines()
max_lines = 20
log_stderr = "\n".join(stderr_lines[-max_lines:])
if len(stderr_lines) > max_lines:
log_stderr = f"(Showing last {max_lines} lines)\n" + log_stderr
logger.error(f" FFmpeg Stderr:\n{log_stderr}")
return False, stderr_output if capture_stderr else False
else:
logger.info(f" -> Completed: {desc} (Time: {elapsed_time:.2f}s)")
if verbose_success and stderr:
warnings = [line for line in stderr.splitlines() if 'warning' in line.lower()]
if warnings:
logger.warning(f"FFmpeg warnings during {desc}:\n" + "\n".join(warnings))
return True, stderr_output if capture_stderr else True
except FileNotFoundError:
logger.error(f"'{cmd_list[0]}' not found. Check installation/PATH.")
return False, "" if capture_stderr else False
except Exception as e:
logger.error(f"ERROR: Unexpected error running {desc}: {e}", exc_info=True)
return False, "" if capture_stderr else False
def get_stream_info(video_path):
"""Gets stream information using ffprobe."""
global FFPROBE_EXEC
if not FFPROBE_EXEC:
logger.error("FFprobe path not set.")
return None
if not os.path.exists(video_path):
logger.error(f"Video file not found: {video_path}")
return None
try:
# Added timeout to prevent hangs on corrupted files
result = subprocess.run([FFPROBE_EXEC, '-v', 'error', '-print_format', 'json', '-show_streams', video_path],
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
text=True, encoding='utf-8', timeout=60) # 60 second timeout
return json.loads(result.stdout).get('streams', [])
except subprocess.TimeoutExpired:
logger.error(f"ffprobe timed out processing {os.path.basename(video_path)}")
return None
except Exception as e:
logger.error(f"Failed to get stream info for {os.path.basename(video_path)}: {e}", exc_info=False) # Less verbose on failure
return None
def find_audio_stream_index_by_lang(streams, lang_code):
"""Finds the first audio stream matching the given language code."""
if not streams:
return None
logger.debug(f"Searching for language '{lang_code}' in streams...")
stream_index = None
found_match = False
# Prioritize exact language match
for stream in streams:
if stream.get('codec_type') == 'audio':
idx = stream.get('index') # This is the ABSOLUTE index
tags = stream.get('tags', {})
lang = tags.get('language', 'und') # 'und' for undetermined
if idx is not None and lang.lower() == lang_code.lower():
stream_index = idx
logger.debug(f" ^ Found matching language tag at index {idx}")
found_match = True
break # Take the first match
if found_match:
return stream_index
# If no exact match, fall back to the first audio stream found
logger.warning(f"Language tag '{lang_code}' not found. Looking for first available audio stream.")
for stream in streams:
if stream.get('codec_type') == 'audio':
idx = stream.get('index') # Absolute index
if idx is not None:
logger.warning(f"Falling back to first audio stream found (index: {idx}).")
return idx
logger.error("No audio streams found at all.")
return None
def get_audio_stream_details(video_path):
"""Gets detailed information about audio streams in a video file."""
streams = get_stream_info(video_path)
if not streams:
return []
audio_streams = []
for i, stream in enumerate(streams):
if stream.get('codec_type') == 'audio':
stream_info = {
'index': stream.get('index'), # Absolute index
'codec': stream.get('codec_name', 'unknown'),
'channels': stream.get('channels', 0),
'sample_rate': stream.get('sample_rate', 'unknown'),
'bit_rate': stream.get('bit_rate', 'unknown'),
'language': stream.get('tags', {}).get('language', 'und'),
'title': stream.get('tags', {}).get('title', '')
}
audio_streams.append(stream_info)
return audio_streams
def prompt_user_for_audio_stream(video_path, stream_type="foreign"):
"""Prompts the user to select an audio stream from a video file."""
audio_streams = get_audio_stream_details(video_path)
if not audio_streams:
logger.error(f"No audio streams found in {os.path.basename(video_path)}")
return None
print(f"\n--- Available {stream_type.capitalize()} Audio Streams in {os.path.basename(video_path)} ---")
print("{:<5} {:<10} {:<8} {:<12} {:<10} {:<12}".format(
"Sel#", "Stream#", "Language", "Codec", "Channels", "Sample Rate"))
print("-" * 70)
for i, stream in enumerate(audio_streams):
print("{:<5} {:<10} {:<8} {:<12} {:<10} {:<12}".format(
i, # Selection number (0-based)
stream['index'], # Actual stream index (absolute)
stream['language'],
stream['codec'],
stream['channels'],
stream['sample_rate']))
# Prompt user for selection
while True:
try:
selection = input(f"\nSelect {stream_type} audio stream by Sel# (or press Enter for auto-detection): ")
if not selection.strip():
return None # Auto-detection
selected_idx = int(selection)
# Check if the selection is valid based on our displayed numbers
if 0 <= selected_idx < len(audio_streams):
# Return the actual stream index, not our selection number
return audio_streams[selected_idx]['index'] # Return the absolute index
else:
print(f"Error: Selection number {selected_idx} is out of range. Please choose 0-{len(audio_streams)-1}.")
continue
except ValueError:
print("Error: Please enter a valid number.")
def format_time(seconds):
"""Formats seconds into HH:MM:SS:ms format."""
if seconds is None or seconds < 0:
return "00:00:00:000"
milliseconds = int((seconds - int(seconds)) * 1000)
total_seconds = int(seconds)
hours = total_seconds // 3600
minutes = (total_seconds % 3600) // 60
secs = total_seconds % 60
return f"{hours:02d}:{minutes:02d}:{secs:02d}:{milliseconds:03d}"
def get_file_duration(file_path, media_type='audio'):
"""Get the duration of an audio or video file using ffprobe."""
global FFPROBE_EXEC
if not FFPROBE_EXEC:
logger.error(f"FFprobe path not set for duration check of {os.path.basename(file_path)}.")
return None
if not os.path.exists(file_path):
logger.error(f"File not found for duration check: {file_path}")
return None
try:
probe_cmd = [FFPROBE_EXEC, "-v", "error", "-show_entries", "format=duration",
"-of", "default=noprint_wrappers=1:nokey=1", file_path]
# Added timeout
result = subprocess.run(probe_cmd, capture_output=True, text=True, check=True, timeout=30)
duration_str = result.stdout.strip()
if duration_str:
return float(duration_str)
logger.warning(f"ffprobe returned empty duration for {os.path.basename(file_path)}")
return None
except subprocess.TimeoutExpired:
logger.error(f"ffprobe timed out getting duration for {os.path.basename(file_path)}")
return None
except subprocess.CalledProcessError as e:
logger.error(f"ffprobe failed to get duration for {os.path.basename(file_path)}: {e.stderr}")
return None
except Exception as e:
logger.error(f"Error getting duration for {os.path.basename(file_path)}: {e}", exc_info=False)
return None
# --- Image Pairing Stage Functions ---
def extract_frames_ffmpeg(video_path, output_folder, scene_threshold):
"""Extracts scene change frames using FFmpeg's scene detection."""
os.makedirs(output_folder, exist_ok=True)
parsed_pts_times = []
vid_name = os.path.basename(video_path)
# Command using scene detection filter
ffmpeg_command = [
"ffmpeg", "-hide_banner", "-nostats", "-loglevel", "info",
"-i", video_path,
"-vf", f"select='gt(scene,{scene_threshold})',showinfo", # Select frames where scene score > threshold, showinfo logs PTS
"-vsync", "vfr", # Variable frame rate to capture exact frames
"-q:v", "2", # High quality PNG output
os.path.join(output_folder, "frame_%06d.png")
]
logger.info(f"--- Extracting Scene Change Frames ({vid_name}, Threshold: {scene_threshold}) ---")
success, stderr_output = run_ffmpeg(ffmpeg_command, f"Extract Frames ({vid_name})", verbose_success=False, capture_stderr=True)
if success and stderr_output:
# Regex to find pts_time in showinfo output
pts_time_re = re.compile(r'n:\s*\d+\s+pts:\s*\d+\s+pts_time:(\d+\.?\d*)')
lines = stderr_output.splitlines()
for line in lines:
# Filter for the specific showinfo log lines
if '[Parsed_showinfo' in line and 'pts_time:' in line:
match = pts_time_re.search(line)
if match:
try:
parsed_pts_times.append(float(match.group(1)))
except (ValueError, IndexError):
logger.warning(f"Could not parse pts_time from line: {line}")
elif not success:
logger.error(f" Frame extraction command failed for {vid_name}.")
return False, [], []
# Verify extracted frames match timestamps
frame_files = sorted(glob.glob(os.path.join(output_folder, "frame_*.png")))
frame_count = len(frame_files)
timestamp_count = len(parsed_pts_times)
if frame_count == 0 or timestamp_count == 0:
logger.error(f" Frame extraction yielded zero frames or timestamps for {vid_name}.")
return False, [], []
final_count = 0
if frame_count != timestamp_count:
logger.warning(f" Frame/Timestamp count mismatch ({frame_count} frames vs {timestamp_count} timestamps) for {vid_name}. Using minimum.")
final_count = min(frame_count, timestamp_count)
# Trim lists to the minimum count to maintain correspondence
parsed_pts_times = parsed_pts_times[:final_count]
frame_files = frame_files[:final_count]
else:
final_count = frame_count
logger.info(f" -> Successfully extracted {final_count} frames/timestamps for {vid_name}.")
return True, [os.path.basename(f) for f in frame_files], parsed_pts_times
def process_image_pair_for_match(ref_img_name, foreign_image_list, ref_extract_folder, foreign_extract_folder, match_threshold):
"""Compares one reference image against a list of foreign images using template matching."""
ref_img_path = os.path.join(ref_extract_folder, ref_img_name)
try:
# Read reference image, convert to grayscale, and resize for consistent comparison
ref_frame_orig = cv2.imread(ref_img_path, cv2.IMREAD_GRAYSCALE)
if ref_frame_orig is None:
logger.warning(f"Could not read reference image: {ref_img_name}"); return None
ref_frame_comp = cv2.resize(ref_frame_orig, (RESIZE_WIDTH, RESIZE_HEIGHT), interpolation=cv2.INTER_AREA)
if ref_frame_comp is None or ref_frame_comp.size == 0:
logger.warning(f"Failed to resize reference image: {ref_img_name}"); return None
except Exception as e:
logger.error(f"Error processing reference image {ref_img_name}: {e}", exc_info=False); return None
best_match_foreign_name = None
best_score = -1.0 # Initialize score below any possible match
# Iterate through potential foreign matches (this list might be pre-filtered)
for foreign_img_name in foreign_image_list:
foreign_img_path = os.path.join(foreign_extract_folder, foreign_img_name)
try:
# Read, grayscale, and resize foreign image
foreign_frame_orig = cv2.imread(foreign_img_path, cv2.IMREAD_GRAYSCALE)
if foreign_frame_orig is None: continue # Skip if image can't be read
foreign_frame_comp = cv2.resize(foreign_frame_orig, (RESIZE_WIDTH, RESIZE_HEIGHT), interpolation=cv2.INTER_AREA)
if foreign_frame_comp is None or foreign_frame_comp.size == 0: continue # Skip if resize fails
# Perform template matching
# TM_CCOEFF_NORMED gives a score between -1 and 1, where 1 is a perfect match
result = cv2.matchTemplate(foreign_frame_comp, ref_frame_comp, cv2.TM_CCOEFF_NORMED)
_minVal, maxVal, _minLoc, _maxLoc = cv2.minMaxLoc(result) # We only need the max value
# Update best match if current score is higher
if maxVal > best_score:
best_score = maxVal
best_match_foreign_name = foreign_img_name
except Exception as e:
# Log error but continue checking other foreign images
logger.debug(f"Error comparing {ref_img_name} with {foreign_img_name}: {e}", exc_info=False)
continue
# Return the best match only if its score meets the threshold
if best_match_foreign_name is not None and best_score >= match_threshold:
return (best_match_foreign_name, best_score)
else:
return None # No match found above the threshold
def filter_similar_ref_images(initial_matches_with_times, ref_extract_folder, similarity_threshold):
"""Filters out reference frames that are too visually similar using perceptual hashing."""
global similarity_libs_available
if not similarity_libs_available:
logger.info(" Skipping similarity filtering: imagehash/Pillow libraries not available.")
return initial_matches_with_times
if similarity_threshold < 0:
logger.info(f" Skipping similarity filtering: threshold ({similarity_threshold}) is negative.")
return initial_matches_with_times
if not initial_matches_with_times:
return {} # Return empty if no initial matches
logger.info(f"--- Filtering Similar Reference Frames (pHash Threshold: {similarity_threshold}) ---")
start_time = time.time()
# Sort reference frame names numerically based on frame index (e.g., frame_000001.png)
frame_num_re = re.compile(r'frame_(\d+).png')
try:
ref_names_sorted = sorted(
initial_matches_with_times.keys(),
key=lambda name: int(frame_num_re.search(name).group(1))
)
except Exception as e:
logger.warning(f"Could not sort reference frames numerically, using default sort. Error: {e}")
ref_names_sorted = sorted(initial_matches_with_times.keys())
hashes = {} # Store phash -> (ref_name, file_size)
to_remove_ref_names = set() # Keep track of reference frames to discard
for ref_name in tqdm(ref_names_sorted, desc=" Filtering Similar Refs", unit="frame", ncols=100, leave=False):
if ref_name in to_remove_ref_names:
continue # Skip if already marked for removal
ref_path = os.path.join(ref_extract_folder, ref_name)
if not os.path.exists(ref_path):
logger.warning(f"Reference frame {ref_name} not found, skipping similarity check.")
to_remove_ref_names.add(ref_name)
continue
try:
# Get file size and compute perceptual hash
current_size = os.path.getsize(ref_path)
with Image.open(ref_path) as img_file:
img_hash = imagehash.phash(img_file)
except Exception as e:
logger.warning(f"Could not process {ref_name} for similarity hashing: {e}")
continue # Skip this frame if hashing fails
found_similar = False
hashes_to_update = {} # Store updates for the current hash if it replaces an existing one
hashes_to_delete = [] # Hashes to remove if replaced by a larger frame
# Compare current hash with existing stored hashes
# Create a copy of items to allow modification during iteration
for existing_hash, (existing_ref_name, existing_size) in list(hashes.items()):
# Skip comparison if the existing frame was already marked for removal
if existing_ref_name in to_remove_ref_names:
hashes_to_delete.append(existing_hash) # Mark the old hash for deletion
continue
# Calculate Hamming distance between hashes (lower means more similar)
hash_diff = img_hash - existing_hash
if hash_diff < similarity_threshold:
# Found a similar frame
found_similar = True
# Decide which frame to keep: prefer the one with larger file size (potentially higher quality/detail)
if current_size >= existing_size:
# Current frame is better or equal, mark existing for removal and update hash mapping
logger.debug(f" '{ref_name}' ({current_size}b) replacing similar '{existing_ref_name}' ({existing_size}b), diff={hash_diff}")
to_remove_ref_names.add(existing_ref_name)
hashes_to_update[img_hash] = (ref_name, current_size) # Map new hash to this frame
hashes_to_delete.append(existing_hash) # Mark old hash for deletion
else:
# Existing frame is better, mark current frame for removal
logger.debug(f" '{ref_name}' ({current_size}b) removed due to similarity with '{existing_ref_name}' ({existing_size}b), diff={hash_diff}")
to_remove_ref_names.add(ref_name)
break # Stop comparing once a similar frame is found
# Clean up hashes map after comparisons
for h_del in hashes_to_delete:
if h_del in hashes:
del hashes[h_del]
hashes.update(hashes_to_update) # Apply updates
# If no similar frame was found and this frame wasn't marked for removal, add its hash
if not found_similar and ref_name not in to_remove_ref_names:
hashes[img_hash] = (ref_name, current_size)
# Create the final dictionary excluding the removed frames
filtered_matches = {
ref_name: data
for ref_name, data in initial_matches_with_times.items()
if ref_name not in to_remove_ref_names
}
removed_count = len(initial_matches_with_times) - len(filtered_matches)
elapsed_time = time.time() - start_time
logger.info(f" -> Similarity filtering complete. Removed {removed_count} potentially redundant pairs ({elapsed_time:.2f}s).")
return filtered_matches
def filter_temporal_inconsistency(matches_after_similarity):
"""Filters matches where the foreign frame order doesn't match the reference frame order."""
if not matches_after_similarity:
return [] # Return empty list if no matches
logger.info("--- Filtering Temporal Inconsistencies ---")
start_time = time.time()
# Extract items and sort them based on the reference frame number
match_items = list(matches_after_similarity.items())
frame_num_re = re.compile(r'frame_(\d+).png')
try:
# Sort by reference frame number extracted from filename
match_items.sort(key=lambda item: int(frame_num_re.search(item[0]).group(1)))
except Exception as e:
logger.warning(f"Could not sort matches numerically by reference frame, using timestamp sort. Error: {e}")
# Fallback sort by reference timestamp if filename parsing fails
match_items.sort(key=lambda item: item[1][1]) # Sort by ref_time (index 1 of tuple value)
filtered_list = []
last_accepted_foreign_num = -1 # Track the frame number of the last accepted foreign match
removed_count = 0
for ref_name, (foreign_name, ref_time, foreign_time) in match_items:
# Extract frame number from the foreign filename
foreign_match = frame_num_re.search(foreign_name)
if not foreign_match:
# If filename format is unexpected, keep the match but log a warning
logger.warning(f"Could not parse frame number from foreign image '{foreign_name}'. Keeping match.")
filtered_list.append((ref_name, foreign_name, ref_time, foreign_time))
continue
try:
current_foreign_num = int(foreign_match.group(1))
except ValueError:
logger.warning(f"Could not convert foreign frame number to int for '{foreign_name}'. Keeping match.")
filtered_list.append((ref_name, foreign_name, ref_time, foreign_time))
continue
# Core logic: Check if the current foreign frame number is >= the last accepted one
# This ensures that the sequence of matched foreign frames is monotonically increasing
if current_foreign_num >= last_accepted_foreign_num:
filtered_list.append((ref_name, foreign_name, ref_time, foreign_time))
last_accepted_foreign_num = current_foreign_num # Update the last accepted number
else:
# Temporal inconsistency detected (e.g., Ref frame 5 matches Foreign 10, Ref 6 matches Foreign 8)
logger.debug(f" Temporal inconsistency: Ref '{ref_name}' -> Foreign '{foreign_name}' ({current_foreign_num}) is earlier than last accepted ({last_accepted_foreign_num}). Removing.")
removed_count += 1
elapsed_time = time.time() - start_time
logger.info(f" -> Temporal filtering complete. Removed {removed_count} inconsistent pairs ({elapsed_time:.2f}s).")
# Return a list of tuples: [(ref_filename, foreign_filename, ref_time, foreign_time)]
return filtered_list
def run_image_pairing_stage(ref_video_path, foreign_video_path, temp_dir, scene_threshold, match_threshold, similarity_threshold):
"""Orchestrates the entire image pairing stage."""
logger.info("\n---=== Stage 1: Image Pairing ===---")
stage_start_time = time.time()
# Define paths for extracted frames
ref_extract_path = os.path.join(temp_dir, "Extracted_Reference")
foreign_extract_path = os.path.join(temp_dir, "Extracted_Foreign")
# --- Step 1: Extract Frames ---
ref_extract_ok, ref_filenames, ref_timestamps_list = extract_frames_ffmpeg(ref_video_path, ref_extract_path, scene_threshold)
if not ref_extract_ok:
logger.error("Failed to extract reference frames.")
return None
foreign_extract_ok, foreign_filenames, foreign_timestamps_list = extract_frames_ffmpeg(foreign_video_path, foreign_extract_path, scene_threshold)
if not foreign_extract_ok:
logger.error("Failed to extract foreign frames.")
return None
# --- Step 1.5: Calculate Search Window ---
logger.info("--- Calculating Frame Match Search Window ---")
ref_duration = get_file_duration(ref_video_path, media_type='video')
if ref_duration is None or ref_duration <= 0:
logger.warning("Could not determine reference video duration or duration is zero. Frame matching will compare against ALL foreign frames.")
match_search_window_seconds = float('inf') # Effectively disable windowing
else:
match_search_window_seconds = ref_duration * MATCH_WINDOW_PERCENT
logger.info(f" Reference duration: {ref_duration:.2f}s")
logger.info(f" Calculated frame match search window: +/- {match_search_window_seconds:.2f}s ({MATCH_WINDOW_PERCENT*100}%)")
# --- Step 2: Map filenames to timestamps ---
logger.info("--- Mapping Timestamps to Extracted Frames ---")
ref_timestamps_dict = {name: ts for name, ts in zip(ref_filenames, ref_timestamps_list)}
foreign_timestamps_dict = {name: ts for name, ts in zip(foreign_filenames, foreign_timestamps_list)}
if not ref_timestamps_dict or not foreign_timestamps_dict:
logger.error(" ERROR: Failed to create timestamp dictionaries.")
return None
logger.info(f" -> Mapped {len(ref_timestamps_dict)} reference and {len(foreign_timestamps_dict)} foreign timestamps.")
# --- Step 3: Initial Frame Matching (Parallel with Windowing) ---
logger.info(f"--- Initial Frame Matching (Template Threshold: {match_threshold}, Window: +/- {match_search_window_seconds:.2f}s) ---")
match_start_time = time.time()
initial_matches_dict = {} # Stores {ref_name: (foreign_name, ref_time, foreign_time)}
# === CORRECTION START ===
future_to_ref_name = {} # Use a dictionary to map Futures to ref_names
# === CORRECTION END ===
# Use ThreadPoolExecutor for parallel image comparison
with concurrent.futures.ThreadPoolExecutor() as executor:
# Submit tasks: compare each reference frame against a WINDOW of foreign frames
for ref_name in ref_filenames:
ref_time = ref_timestamps_dict.get(ref_name)
if ref_time is None:
logger.warning(f"Skipping match for ref frame {ref_name}: missing timestamp.")
continue
# Determine the time window for foreign frame search
min_foreign_time = ref_time - match_search_window_seconds
max_foreign_time = ref_time + match_search_window_seconds
# Filter foreign filenames based on timestamp window
candidate_foreign_frames = [
f_name for f_name in foreign_filenames
if min_foreign_time <= foreign_timestamps_dict.get(f_name, -1) <= max_foreign_time # Check if foreign ts is within window
]
# Only submit task if there are candidates within the window
if candidate_foreign_frames:
future = executor.submit(process_image_pair_for_match,
ref_name,
candidate_foreign_frames, # Pass the filtered list
ref_extract_path,
foreign_extract_path,
match_threshold)
# === CORRECTION START ===
future_to_ref_name[future] = ref_name # Store mapping in dictionary
# === CORRECTION END ===
else:
logger.debug(f"No foreign frame candidates found within window for ref frame {ref_name} (time {ref_time:.3f}s)")
# Process results as they complete
num_processed = 0
skipped_count = 0
# === CORRECTION START ===
# Pass only the futures (dictionary keys) to as_completed
futures_iterable = concurrent.futures.as_completed(future_to_ref_name)
progress_bar = tqdm(total=len(future_to_ref_name), desc=" Matching Frames", unit="frame", ncols=100, leave=False)
for future in futures_iterable: # Iterate through completed futures
ref_name = future_to_ref_name[future] # Get the ref_name using the dictionary
# === CORRECTION END ===
try:
result = future.result() # Get result from the completed thread
if result:
# If a match was found (foreign_name, score)
foreign_name = result[0]
# Retrieve corresponding timestamps
# No need to get ref_time again, it's already mapped
foreign_time = foreign_timestamps_dict.get(foreign_name)
# Store the match details if timestamps are valid
if ref_timestamps_dict.get(ref_name) is not None and foreign_time is not None: # Ensure both timestamps are valid
initial_matches_dict[ref_name] = (foreign_name, ref_timestamps_dict[ref_name], foreign_time)
else:
logger.warning(f"Timestamp missing for match: Ref '{ref_name}', Foreign '{foreign_name}'")
skipped_count += 1
# else: No match found above threshold in the window for this ref_frame
except Exception as e:
# Catch errors from individual threads
logger.error(f"\nError processing match task for ref frame {ref_name}: {e}", exc_info=False) # Less verbose stack trace for worker errors
skipped_count += 1
finally:
num_processed += 1
progress_bar.update(1)
progress_bar.close()
match_elapsed_time = time.time() - match_start_time
logger.info(f" -> Initial matching complete. Found {len(initial_matches_dict)} potential pairs ({skipped_count} skipped due to errors/missing data). ({match_elapsed_time:.2f}s).")
if not initial_matches_dict:
logger.error("No initial matches found between reference and foreign frames. Cannot proceed.")
return None
# --- Step 4: Filter Similar Reference Images ---
matches_after_sim_filter = filter_similar_ref_images(initial_matches_dict, ref_extract_path, similarity_threshold)
if not matches_after_sim_filter:
logger.error("No matches remaining after similarity filtering.")
return None
# --- Step 5: Filter Temporal Inconsistencies ---
# Result is a list: [(ref_filename, foreign_filename, ref_time, foreign_time), ...]
visual_anchors_details = filter_temporal_inconsistency(matches_after_sim_filter)
if not visual_anchors_details:
logger.error("No matches remaining after temporal filtering.")
return None
final_anchor_count = len(visual_anchors_details)
stage_elapsed_time = time.time() - stage_start_time
logger.info(f"---=== Image Pairing Stage Finished ({stage_elapsed_time:.2f}s). Generated {final_anchor_count} visual anchors ===---")
return visual_anchors_details # Return list of detailed anchor tuples
# --- Audio Syncing Stage Functions ---
def find_audio_start_end(wav_path, db_threshold):
"""Finds the start and end times of audio content above a dB threshold in a WAV file."""
logger.debug(f"Analyzing audio boundaries for: {os.path.basename(wav_path)} (Threshold: {db_threshold} dB)")
try:
sample_rate, audio_data = wavfile.read(wav_path)
if audio_data.size == 0:
logger.warning(f"Audio data is empty for {os.path.basename(wav_path)}")
return 0.0, 0.0 # Return 0 duration if empty
# Normalize audio data to float range [-1.0, 1.0] for consistent thresholding
if np.issubdtype(audio_data.dtype, np.integer):
dtype_info = np.iinfo(audio_data.dtype)
max_val = float(dtype_info.max)
min_val = float(dtype_info.min)
# Avoid division by zero if audio is silent
norm_factor = max(abs(max_val), abs(min_val))
if norm_factor == 0: return 0.0, 0.0
audio_float = audio_data.astype(np.float64) / norm_factor
elif np.issubdtype(audio_data.dtype, np.floating):
audio_float = audio_data.astype(np.float64)
# Handle potential clipping in float audio > 1.0
abs_max = np.max(np.abs(audio_float)) if audio_float.size > 0 else 0.0
if abs_max > 1.0 and abs_max > 0:
audio_float /= abs_max
elif abs_max == 0: # Silent float audio
return 0.0, 0.0
else:
logger.error(f"Unsupported audio data type: {audio_data.dtype} in {os.path.basename(wav_path)}")
return None, None # Indicate error
# Convert to mono by taking the max absolute amplitude across channels if stereo
if audio_float.ndim > 1 and audio_float.shape[1] > 1:
amplitude = np.max(np.abs(audio_float), axis=1)
else:
amplitude = np.abs(audio_float.flatten())
if amplitude.size == 0: return 0.0, 0.0 # Check again after potential flattening
# Convert dB threshold to linear amplitude threshold
# threshold = 10^(dB/20)
threshold_amplitude = 10.0**(db_threshold / 20.0)
# Find indices where amplitude exceeds the threshold
indices_above_thresh = np.where(amplitude >= threshold_amplitude)[0]
if len(indices_above_thresh) > 0:
start_index = indices_above_thresh[0]
end_index = indices_above_thresh[-1]
# Calculate times in seconds
start_time_sec = start_index / sample_rate
# Add 1 sample duration to end time to include the last sample's duration
end_time_sec = (end_index + 1) / sample_rate
# Ensure end time is strictly after start time (handle edge cases)
if end_time_sec <= start_time_sec:
# If difference is less than half a sample, treat as single point
if abs(end_time_sec - start_time_sec) < (0.5 / sample_rate):
end_time_sec = start_time_sec
else: # Otherwise, force end time to be slightly after start
end_time_sec = start_time_sec + (1.0 / sample_rate)
logger.debug(f" -> Detected boundaries: {start_time_sec:.3f}s - {end_time_sec:.3f}s")
return start_time_sec, end_time_sec
else:
# No audio above threshold found
logger.warning(f" No audio found above {db_threshold:.1f} dB threshold in {os.path.basename(wav_path)}. Returning full duration or zero.")
# Optionally return full duration: return 0.0, audio_data.shape[0] / sample_rate
# Returning zero duration seems safer if threshold is meaningful
return 0.0, 0.0
except FileNotFoundError:
logger.error(f"WAV file not found: {wav_path}")
return None, None
except Exception as e:
logger.error(f"ERROR processing WAV {os.path.basename(wav_path)}: {e}", exc_info=True)
return None, None
def process_segment_iteratively(foreign_wav_full, foreign_start, foreign_end, ref_duration, segment_num, temp_dir, max_iterations=3, target_precision_ms=5):
"""
Processes an audio segment, iteratively adjusting 'atempo' to match a target duration precisely.
Args:
foreign_wav_full (str): Path to the full foreign audio WAV file.
foreign_start (float): Start time (seconds) of the segment in the foreign audio.
foreign_end (float): End time (seconds) of the segment in the foreign audio.
ref_duration (float): The target duration (seconds) for the processed segment.
segment_num (int): The segment number (for logging).
temp_dir (str): Path to the temporary directory for intermediate files.
max_iterations (int): Maximum number of refinement iterations.
target_precision_ms (int): Desired duration precision in milliseconds.
Returns:
str: Path to the final processed segment file meeting the target duration, or None on failure.
"""
target_precision_s = target_precision_ms / 1000.0
foreign_duration = foreign_end - foreign_start
# Basic validation
if ref_duration <= 0 or foreign_duration <= 0:
logger.warning(f" » Segment {segment_num}: Skipped (zero or negative duration: Ref={ref_duration:.3f}s, Foreign={foreign_duration:.3f}s)")
return None
# --- Initial setup ---
# Initial speed factor estimate: target_duration / source_duration
# IMPORTANT: `atempo` filter works inversely: tempo < 1 slows down, tempo > 1 speeds up.
# So, we need foreign_duration / ref_duration
initial_speed_factor = foreign_duration / ref_duration
clamped_speed = max(MIN_ATEMPO, min(MAX_ATEMPO, initial_speed_factor))
segment_output_path = os.path.join(temp_dir, f"segment_{segment_num:04d}_final.wav")
best_segment_path = None # Keep track of the path of the iteration closest to target
best_duration_diff = float('inf')
last_processed_duration = None # For oscillation detection
logger.info(f" » Segment {segment_num}: Target={ref_duration:.3f}s, Input={foreign_duration:.3f}s. Initial speed={clamped_speed:.5f}x")
# --- Iterative Refinement Loop ---
for iteration in range(max_iterations):
iteration_path = os.path.join(temp_dir, f"segment_{segment_num:04d}_iter{iteration}.wav")
# FFmpeg command to extract, trim, and apply atempo
filter_complex = f"atrim=start={foreign_start:.8f}:end={foreign_end:.8f},asetpts=PTS-STARTPTS,atempo={clamped_speed:.8f}"
process_cmd = [
"ffmpeg", "-hide_banner", "-loglevel", "warning", "-nostats",
"-i", foreign_wav_full,
"-af", filter_complex,
"-c:a", "pcm_s16le", "-ar", str(DEFAULT_SAMPLE_RATE), "-ac", str(DEFAULT_CHANNELS),
"-y", iteration_path
]
if not run_ffmpeg(process_cmd, f"Process Segment {segment_num} (Iter {iteration+1}, Speed {clamped_speed:.5f}x)")[0]:
logger.error(f" » Segment {segment_num}: Processing failed on iteration {iteration+1}")
continue # Try next iteration if possible, or fail if last iteration
# Measure the actual duration of the processed segment
processed_duration = get_file_duration(iteration_path, media_type='audio')
if processed_duration is None:
logger.warning(f" » Segment {segment_num}: Could not get duration for iteration {iteration+1}")
continue # Try next iteration or fail
duration_diff = processed_duration - ref_duration # Positive if too long, negative if too short
abs_duration_diff = abs(duration_diff)
logger.info(f" Iter {iteration+1}: Speed={clamped_speed:.5f}x -> Duration={processed_duration:.3f}s (Diff={duration_diff*1000:+.1f}ms)")
# Keep track of the best result so far (closest to target duration)
if abs_duration_diff < best_duration_diff:
best_duration_diff = abs_duration_diff
best_segment_path = iteration_path # Store the path of this iteration's output
# Check if we've reached the target precision
if abs_duration_diff <= target_precision_s:
logger.info(f" ✓ Segment {segment_num}: Achieved target precision ({abs_duration_diff*1000:.1f}ms <= {target_precision_ms}ms) on iteration {iteration+1}")
break # Exit loop early
# --- Adjust speed factor for the next iteration ---
if iteration < max_iterations - 1:
# Calculate the ideal correction factor: target_duration / current_duration
# Avoid division by zero if processed_duration is somehow zero
if processed_duration <= 0:
logger.warning(f" Skipping speed adjustment for iter {iteration+1}: Processed duration is zero or negative.")
continue
ideal_correction = ref_duration / processed_duration
# Apply dampening to prevent overshooting, especially for large corrections