-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathAVSync_v12.py
More file actions
2719 lines (2285 loc) · 131 KB
/
AVSync_v12.py
File metadata and controls
2719 lines (2285 loc) · 131 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import cv2
import os
import shutil
import subprocess
from tqdm import tqdm
import glob
import concurrent.futures
import numpy as np
import argparse
import re
import tempfile
import sys
from scipy.io import wavfile
import time
import json
import logging
import csv
import platform
import ctypes
import pickle
import hashlib
# --- DUAL LOGGING SYSTEM ---
# File logger: detailed output to log file
# Console logger: minimal output with progress indicators
class FileFormatter(logging.Formatter):
"""Detailed formatter for log file"""
def __init__(self):
super().__init__('%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
class ConsoleFormatter(logging.Formatter):
"""Minimal formatter for console - ASCII safe"""
ANSI_COLORS = {
'RESET': '\033[0m', 'RED': '\033[31m', 'GREEN': '\033[32m',
'YELLOW': '\033[33m', 'CYAN': '\033[36m', 'BOLD': '\033[1m', 'DIM': '\033[2m',
}
NO_COLORS = {k: '' for k in ANSI_COLORS.keys()}
def __init__(self):
super().__init__('%(message)s')
self.use_colors = self._init_colors()
def _init_colors(self):
"""Initialize color support"""
if os.getenv('NO_COLOR'):
self.colors = self.NO_COLORS
return False
if platform.system() == 'Windows':
try:
kernel32 = ctypes.windll.kernel32
handle = kernel32.GetStdHandle(-11)
if handle and handle != -1:
mode = ctypes.c_uint32()
if kernel32.GetConsoleMode(handle, ctypes.byref(mode)):
kernel32.SetConsoleMode(handle, mode.value | 0x0004)
self.colors = self.ANSI_COLORS
return True
except: pass
self.colors = self.NO_COLORS
return False
self.colors = self.ANSI_COLORS
return True
def format(self, record):
msg = super().format(record)
c = self.colors
# Only show important messages on console
if record.levelno >= logging.ERROR:
return f"{c['RED']}{c['BOLD']}[ERROR] {msg}{c['RESET']}"
elif record.levelno >= logging.WARNING:
return f"{c['YELLOW']}[WARN] {msg}{c['RESET']}"
elif record.levelno >= logging.INFO:
# Stage headers
if msg.startswith("=====") or msg.startswith("---"):
return f"{c['CYAN']}{c['BOLD']}{msg}{c['RESET']}"
# Success messages
if "[OK]" in msg or "SUCCESS" in msg.upper():
return f"{c['GREEN']}{msg}{c['RESET']}"
# Cache messages
if "[CACHE]" in msg:
return f"{c['DIM']}{msg}{c['RESET']}"
return msg
return msg
# Custom filter to suppress verbose messages on console
class ConsoleFilter(logging.Filter):
"""Filter out verbose messages from console output"""
# Class variable to control warning display (set via --show-warnings)
show_warnings = False
VERBOSE_PATTERNS = [
"Iter ", # Individual iteration details
"-> Running:", # FFmpeg command starts
"-> Completed:", # FFmpeg command completions
"Speed=", # Speed adjustments
"Duration=", # Duration checks
"MinRefDur Filter",
"MaxDiff Filter",
"Skipping visual anchor",
"Process Segment",
"Trim Segment",
"Create Silence",
"Add Silence Pad",
"segment duration",
# Additional patterns for clean progress bar (no scrolling)
"-> Segment ", # Segment processing details
"Segment ", # All segment-related messages
"Target=", # Target duration info
"Achieved target", # Success messages per segment
"First segment:", # First segment adjustments
"Last segment:", # Last segment adjustments
"Adjusting speed", # Speed adjustment debug
"silence pad", # Silence padding messages
]
def filter(self, record):
msg = record.getMessage()
# Errors always shown
if record.levelno >= logging.ERROR:
return True
# Warnings only shown if --show-warnings is set
if record.levelno >= logging.WARNING:
return ConsoleFilter.show_warnings
# Filter out verbose patterns for INFO level
for pattern in self.VERBOSE_PATTERNS:
if pattern in msg:
return False
return True
def setup_logging(log_file=None, verbose=False):
"""
Set up dual logging: file (detailed) + console (minimal).
Args:
log_file: Path to log file. If None, uses default based on output.
verbose: If True, console shows all messages (like old behavior)
Returns:
tuple: (logger, log_file_path)
"""
logger = logging.getLogger('avsync')
logger.setLevel(logging.DEBUG)
logger.handlers.clear()
# File handler - always detailed
if log_file:
file_handler = logging.FileHandler(log_file, mode='w', encoding='utf-8')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(FileFormatter())
logger.addHandler(file_handler)
# Console handler - minimal by default
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.DEBUG if verbose else logging.INFO)
console_handler.setFormatter(ConsoleFormatter())
if not verbose:
console_handler.addFilter(ConsoleFilter())
logger.addHandler(console_handler)
return logger, log_file
# Global logger placeholder
logger = None
def get_log_path(output_video):
"""Generate log file path based on output video"""
output_dir = os.path.dirname(os.path.abspath(output_video))
output_name = os.path.splitext(os.path.basename(output_video))[0]
return os.path.join(output_dir, f"{output_name}_avsync.log")
# --- PROGRESS BAR HELPERS ---
class SegmentProgressBar:
"""Progress bar for segment processing"""
def __init__(self, total_segments, desc="Processing segments"):
self.pbar = tqdm(
total=total_segments,
desc=desc,
unit="seg",
ncols=80,
bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]'
)
self.current = 0
self.warnings = 0
self.corrections = 0
def update(self, correction_applied=False):
"""Update progress bar"""
self.current += 1
if correction_applied:
self.corrections += 1
self.pbar.update(1)
def set_warning(self):
"""Increment warning counter"""
self.warnings += 1
def close(self):
"""Close progress bar and show summary"""
self.pbar.close()
if self.corrections > 0:
print(f" -> {self.corrections} segments needed final correction")
# --- END OF LOGGING SYSTEM ---
# --- CACHING FUNCTIONS ---
def get_file_hash(filepath):
"""Calculate SHA256 hash of file for cache validation"""
sha256_hash = hashlib.sha256()
with open(filepath, "rb") as f:
for byte_block in iter(lambda: f.read(4096), b""):
sha256_hash.update(byte_block)
return sha256_hash.hexdigest()
def get_cache_key(args):
"""
Generate cache key based on inputs and detection parameters.
Excludes adjustment parameters that don't affect detection.
"""
cache_params = {
'ref_video': get_file_hash(args.ref_video) if os.path.exists(args.ref_video) else None,
'foreign_video': get_file_hash(args.foreign_video) if os.path.exists(args.foreign_video) else None,
'scene_threshold': args.scene_threshold,
'match_threshold': args.match_threshold,
'similarity_threshold': args.similarity_threshold,
'db_threshold': args.db_threshold,
'min_segment_duration': args.min_segment_duration,
'ref_lang': args.ref_lang,
'foreign_lang': args.foreign_lang,
}
cache_str = json.dumps(cache_params, sort_keys=True)
return hashlib.sha256(cache_str.encode()).hexdigest()
def get_cache_path(args):
"""Get cache file path based on output video name"""
output_dir = os.path.dirname(os.path.abspath(args.output_video))
output_name = os.path.splitext(os.path.basename(args.output_video))[0]
cache_dir = os.path.join(output_dir, f".avsync_cache")
os.makedirs(cache_dir, exist_ok=True)
cache_key = get_cache_key(args)
cache_file = os.path.join(cache_dir, f"{output_name}_{cache_key[:16]}.pkl")
return cache_file
def save_checkpoint(cache_path, visual_anchors_details):
"""Save checkpoint data to disk"""
checkpoint_data = {
'version': 12,
'visual_anchors_details': visual_anchors_details,
'timestamp': time.time()
}
try:
with open(cache_path, 'wb') as f:
pickle.dump(checkpoint_data, f)
logger.info(f"[CACHE] Checkpoint saved to {cache_path}")
return True
except Exception as e:
logger.warning(f"Failed to save checkpoint: {e}")
return False
def load_checkpoint(cache_path):
"""Load checkpoint data from disk"""
if not os.path.exists(cache_path):
return None
try:
with open(cache_path, 'rb') as f:
checkpoint_data = pickle.load(f)
if checkpoint_data.get('version') != 12:
logger.warning(f"[CACHE] Version mismatch, ignoring cache")
return None
logger.info(f"[CACHE] Loaded checkpoint from {cache_path}")
cache_age = time.time() - checkpoint_data.get('timestamp', 0)
logger.info(f"[CACHE] Age: {cache_age/3600:.1f} hours")
return checkpoint_data
except Exception as e:
logger.warning(f"Failed to load checkpoint: {e}")
return None
# --- END CACHING FUNCTIONS ---
# --- FORCED SYNC POINT FUNCTIONS ---
def parse_timestamp_to_seconds(timestamp_str):
"""
Parse HH:MM:SS:MS timestamp format to seconds.
Format: HH:MM:SS:MS (e.g., "00:01:30:500" = 1 minute, 30 seconds, 500 milliseconds)
Returns:
float: Time in seconds, or None if parsing fails.
"""
timestamp_str = timestamp_str.strip()
parts = timestamp_str.split(':')
if len(parts) != 4:
logger.warning(f"Invalid timestamp format '{timestamp_str}' (expected HH:MM:SS:MS)")
return None
try:
hours = int(parts[0])
minutes = int(parts[1])
seconds = int(parts[2])
milliseconds = int(parts[3])
if minutes >= 60 or seconds >= 60 or milliseconds >= 1000:
logger.warning(f"Invalid timestamp values in '{timestamp_str}' (MM<60, SS<60, MS<1000)")
return None
if hours < 0 or minutes < 0 or seconds < 0 or milliseconds < 0:
logger.warning(f"Timestamp values must be non-negative in '{timestamp_str}'")
return None
total_seconds = hours * 3600 + minutes * 60 + seconds + milliseconds / 1000.0
return total_seconds
except ValueError as e:
logger.warning(f"Could not parse timestamp '{timestamp_str}': {e}")
return None
def parse_force_sync_points(force_sync_str):
"""
Parse forced sync points from command line argument.
Format: "HH:MM:SS:MS>HH:MM:SS:MS,HH:MM:SS:MS>HH:MM:SS:MS,..."
Example: "00:00:10:500>00:00:10:800,00:02:00:300>00:02:01:000"
The '>' separates reference time from foreign time.
Multiple sync points are comma-separated.
Returns:
List of tuples: [(ref_time_seconds, foreign_time_seconds), ...]
Returns empty list if parsing fails or input is None/empty.
"""
if not force_sync_str:
return []
sync_points = []
pairs = force_sync_str.strip().split(',')
for i, pair in enumerate(pairs):
pair = pair.strip()
if not pair:
continue
if '>' not in pair:
logger.warning(f"Invalid sync point format '{pair}' (expected 'HH:MM:SS:MS>HH:MM:SS:MS'). Skipping.")
continue
try:
parts = pair.split('>')
if len(parts) != 2:
logger.warning(f"Invalid sync point format '{pair}' (expected exactly 2 timestamps separated by '>'). Skipping.")
continue
ref_time = parse_timestamp_to_seconds(parts[0])
foreign_time = parse_timestamp_to_seconds(parts[1])
if ref_time is None or foreign_time is None:
logger.warning(f"Skipping sync point '{pair}' due to timestamp parsing errors.")
continue
sync_points.append((ref_time, foreign_time))
logger.debug(f" Parsed forced sync point {i+1}: Ref={ref_time:.3f}s, Foreign={foreign_time:.3f}s")
except Exception as e:
logger.warning(f"Could not parse sync point '{pair}': {e}. Skipping.")
continue
return sync_points
def inject_forced_sync_points(visual_anchors_details, forced_sync_points):
"""
Inject forced sync points into visual_anchors_details list.
Forced sync points are given synthetic filenames starting with 'FORCED_SYNC_'
to distinguish them from scene-detected anchors. This prefix is used later
to ensure they bypass filtering.
Args:
visual_anchors_details: List of tuples (ref_filename, foreign_filename, ref_time, foreign_time)
forced_sync_points: List of tuples (ref_time, foreign_time)
Returns:
New list with forced sync points injected, sorted by reference time.
"""
if not forced_sync_points:
return visual_anchors_details
logger.info(f"--- Injecting {len(forced_sync_points)} Forced Sync Points ---")
# Create a mutable copy
combined = list(visual_anchors_details) if visual_anchors_details else []
# Add forced sync points with synthetic filenames
for i, (ref_time, foreign_time) in enumerate(forced_sync_points):
synthetic_ref_name = f"FORCED_SYNC_{i+1:03d}_ref"
synthetic_foreign_name = f"FORCED_SYNC_{i+1:03d}_foreign"
combined.append((synthetic_ref_name, synthetic_foreign_name, ref_time, foreign_time))
logger.info(f" > Injected forced sync point {i+1}: Ref={format_time(ref_time)} ({ref_time:.3f}s) -> Foreign={format_time(foreign_time)} ({foreign_time:.3f}s)")
# Sort by reference time (index 2 in the tuple)
combined.sort(key=lambda x: x[2])
logger.info(f" -> Total anchors after injection: {len(combined)} ({len(forced_sync_points)} forced + {len(visual_anchors_details) if visual_anchors_details else 0} detected)")
return combined
# --- END FORCED SYNC POINT FUNCTIONS ---
# --- Try Importing Image Similarity Libs ---
try:
import imagehash
from PIL import Image
similarity_libs_available = True
except ImportError as e:
print(f"Warning: 'imagehash' or 'Pillow' not found. Similarity filtering will be skipped. Error: {e}")
similarity_libs_available = False
# --- Constants ---
RESIZE_WIDTH = 1280
RESIZE_HEIGHT = 720
DEFAULT_DB_THRESHOLD = -40.0
DEFAULT_SAMPLE_RATE = 48000
DEFAULT_CHANNELS = 2
MIN_ATEMPO = 0.5
MAX_ATEMPO = 100.0
# MIN_ALLOWED_REF_DURATION_S = 5.0 # Replaced by argument --min_segment_duration
DEFAULT_MIN_SEGMENT_DURATION = 5.0 # New default value
MAX_ALLOWED_DURATION_PERCENT_DIFF = 6.0 # Max % difference allowed between ref/foreign segment durations
MIN_DELAY_S = 0.001 # Minimum delay to apply padding
DEFAULT_REF_LANG = "eng"
DEFAULT_FOREIGN_LANG = "foreign" # Changed from "hin"
DEFAULT_MUX_ACODEC = "aac"
DEFAULT_MUX_ABITRATE = "192k"
QC_IMAGE_HEIGHT = 720
FFMPEG_EXEC = None
FFPROBE_EXEC = None
MATCH_WINDOW_PERCENT = 0.06 # Percentage of ref video duration for match search window
# Logger will be initialized in main() with proper file output
# Use a temporary logger for early errors
logger = logging.getLogger('avsync')
logger.setLevel(logging.INFO)
if not logger.handlers:
_temp_handler = logging.StreamHandler(sys.stdout)
_temp_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(_temp_handler)
# --- Utility & FFmpeg/FFprobe Functions ---
def find_executable(name):
"""Finds an executable in bundled resources first, then system PATH."""
# Check if running as PyInstaller bundle
if getattr(sys, 'frozen', False):
# Running in a PyInstaller bundle
# sys._MEIPASS points to the temporary extraction directory at runtime
bundle_dir = sys._MEIPASS
ext = '.exe' if platform.system() == 'Windows' else ''
# In PyInstaller COLLECT mode, binaries are directly in the extraction directory
bundled_path = os.path.join(bundle_dir, 'bin', f'{name}{ext}')
if os.path.exists(bundled_path):
logger.debug(f"Found bundled executable '{name}' at: {bundled_path}")
return bundled_path
# Fall back to system PATH
exec_path = shutil.which(name)
if exec_path:
logger.debug(f"Found executable '{name}' at: {exec_path}")
return exec_path
else:
logger.error(f"'{name}' command not found in bundled resources or system PATH.")
return None
def run_ffmpeg(cmd_list, desc="FFmpeg Task", verbose_success=False, capture_stderr=False):
"""Runs an FFmpeg command with logging and error handling."""
global FFMPEG_EXEC
if not FFMPEG_EXEC:
logger.error("FFmpeg path not set.")
return False, "" if capture_stderr else False
cmd_list[0] = FFMPEG_EXEC
logger.info(f" -> Running: {desc}...")
start_time = time.time()
stderr_output = ""
try:
# Use Popen for better handling of large stderr, prevent potential deadlocks
process = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
text=True, encoding='utf-8', errors='ignore', startupinfo=None)
stdout, stderr = process.communicate() # Wait for process to finish
returncode = process.returncode
elapsed_time = time.time() - start_time
if capture_stderr:
stderr_output = stderr
if returncode != 0:
logger.error(f"\nERROR: {desc} failed! (Exit code: {returncode}, Time: {elapsed_time:.2f}s)")
logger.error(f" FFmpeg Command: {' '.join(cmd_list)}")
# Log only last few lines of stderr if it's huge
stderr_lines = stderr.strip().splitlines()
max_lines = 20
log_stderr = "\n".join(stderr_lines[-max_lines:])
if len(stderr_lines) > max_lines:
log_stderr = f"(Showing last {max_lines} lines)\n" + log_stderr
logger.error(f" FFmpeg Stderr:\n{log_stderr}")
return False, stderr_output if capture_stderr else False
else:
logger.info(f" -> Completed: {desc} (Time: {elapsed_time:.2f}s)")
if verbose_success and stderr:
warnings = [line for line in stderr.splitlines() if 'warning' in line.lower()]
if warnings:
logger.warning(f"FFmpeg warnings during {desc}:\n" + "\n".join(warnings))
return True, stderr_output if capture_stderr else True
except FileNotFoundError:
logger.error(f"'{cmd_list[0]}' not found. Check installation/PATH.")
return False, "" if capture_stderr else False
except Exception as e:
logger.error(f"ERROR: Unexpected error running {desc}: {e}", exc_info=True)
return False, "" if capture_stderr else False
def get_stream_info(video_path):
"""Gets stream information using ffprobe."""
global FFPROBE_EXEC
if not FFPROBE_EXEC:
logger.error("FFprobe path not set.")
return None
if not os.path.exists(video_path):
logger.error(f"Video file not found: {video_path}")
return None
try:
# Added timeout to prevent hangs on corrupted files
result = subprocess.run([FFPROBE_EXEC, '-v', 'error', '-print_format', 'json', '-show_streams', video_path],
check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
text=True, encoding='utf-8', timeout=60) # 60 second timeout
return json.loads(result.stdout).get('streams', [])
except subprocess.TimeoutExpired:
logger.error(f"ffprobe timed out processing {os.path.basename(video_path)}")
return None
except Exception as e:
logger.error(f"Failed to get stream info for {os.path.basename(video_path)}: {e}", exc_info=False) # Less verbose on failure
return None
def find_audio_stream_index_by_lang(streams, lang_code):
"""Finds the first audio stream matching the given language code."""
if not streams:
return None
logger.debug(f"Searching for language '{lang_code}' in streams...")
stream_index = None
found_match = False
# Prioritize exact language match
for stream in streams:
if stream.get('codec_type') == 'audio':
idx = stream.get('index') # This is the ABSOLUTE index
tags = stream.get('tags', {})
lang = tags.get('language', 'und') # 'und' for undetermined
if idx is not None and lang.lower() == lang_code.lower():
stream_index = idx
logger.debug(f" ^ Found matching language tag at index {idx}")
found_match = True
break # Take the first match
if found_match:
return stream_index
# If no exact match, fall back to the first audio stream found
logger.warning(f"Language tag '{lang_code}' not found. Looking for first available audio stream.")
for stream in streams:
if stream.get('codec_type') == 'audio':
idx = stream.get('index') # Absolute index
if idx is not None:
logger.warning(f"Falling back to first audio stream found (index: {idx}).")
return idx
logger.error("No audio streams found at all.")
return None
def get_audio_stream_details(video_path):
"""Gets detailed information about audio streams in a video file."""
streams = get_stream_info(video_path)
if not streams:
return []
audio_streams = []
for i, stream in enumerate(streams):
if stream.get('codec_type') == 'audio':
stream_info = {
'index': stream.get('index'), # Absolute index
'codec': stream.get('codec_name', 'unknown'),
'channels': stream.get('channels', 0),
'sample_rate': stream.get('sample_rate', 'unknown'),
'bit_rate': stream.get('bit_rate', 'unknown'),
'language': stream.get('tags', {}).get('language', 'und'),
'title': stream.get('tags', {}).get('title', '')
}
audio_streams.append(stream_info)
return audio_streams
def prompt_user_for_audio_stream(video_path, stream_type="foreign"):
"""Prompts the user to select an audio stream from a video file."""
audio_streams = get_audio_stream_details(video_path)
if not audio_streams:
logger.error(f"No audio streams found in {os.path.basename(video_path)}")
return None
print(f"\n--- Available {stream_type.capitalize()} Audio Streams in {os.path.basename(video_path)} ---")
print("{:<5} {:<10} {:<8} {:<12} {:<10} {:<12}".format(
"Sel#", "Stream#", "Language", "Codec", "Channels", "Sample Rate"))
print("-" * 70)
for i, stream in enumerate(audio_streams):
print("{:<5} {:<10} {:<8} {:<12} {:<10} {:<12}".format(
i, # Selection number (0-based)
stream['index'], # Actual stream index (absolute)
stream['language'],
stream['codec'],
stream['channels'],
stream['sample_rate']))
# Prompt user for selection
while True:
try:
selection = input(f"\nSelect {stream_type} audio stream by Sel# (or press Enter for auto-detection): ")
if not selection.strip():
return None # Auto-detection
selected_idx = int(selection)
# Check if the selection is valid based on our displayed numbers
if 0 <= selected_idx < len(audio_streams):
# Return the actual stream index, not our selection number
return audio_streams[selected_idx]['index'] # Return the absolute index
else:
print(f"Error: Selection number {selected_idx} is out of range. Please choose 0-{len(audio_streams)-1}.")
continue
except ValueError:
print("Error: Please enter a valid number.")
def format_time(seconds):
"""Formats seconds into HH:MM:SS:ms format."""
if seconds is None or seconds < 0:
return "00:00:00:000"
milliseconds = int((seconds - int(seconds)) * 1000)
total_seconds = int(seconds)
hours = total_seconds // 3600
minutes = (total_seconds % 3600) // 60
secs = total_seconds % 60
return f"{hours:02d}:{minutes:02d}:{secs:02d}:{milliseconds:03d}"
def get_file_duration(file_path, media_type='audio'):
"""Get the duration of an audio or video file using ffprobe."""
global FFPROBE_EXEC
if not FFPROBE_EXEC:
logger.error(f"FFprobe path not set for duration check of {os.path.basename(file_path)}.")
return None
if not os.path.exists(file_path):
logger.error(f"File not found for duration check: {file_path}")
return None
try:
probe_cmd = [FFPROBE_EXEC, "-v", "error", "-show_entries", "format=duration",
"-of", "default=noprint_wrappers=1:nokey=1", file_path]
# Added timeout
result = subprocess.run(probe_cmd, capture_output=True, text=True, check=True, timeout=30)
duration_str = result.stdout.strip()
if duration_str:
return float(duration_str)
logger.warning(f"ffprobe returned empty duration for {os.path.basename(file_path)}")
return None
except subprocess.TimeoutExpired:
logger.error(f"ffprobe timed out getting duration for {os.path.basename(file_path)}")
return None
except subprocess.CalledProcessError as e:
logger.error(f"ffprobe failed to get duration for {os.path.basename(file_path)}: {e.stderr}")
return None
except Exception as e:
logger.error(f"Error getting duration for {os.path.basename(file_path)}: {e}", exc_info=False)
return None
# --- Image Pairing Stage Functions ---
def extract_frames_ffmpeg(video_path, output_folder, scene_threshold):
"""Extracts scene change frames using FFmpeg's scene detection."""
os.makedirs(output_folder, exist_ok=True)
parsed_pts_times = []
vid_name = os.path.basename(video_path)
# Command using scene detection filter
ffmpeg_command = [
"ffmpeg", "-hide_banner", "-nostats", "-loglevel", "info",
"-i", video_path,
"-vf", f"select='gt(scene,{scene_threshold})',showinfo", # Select frames where scene score > threshold, showinfo logs PTS
"-vsync", "vfr", # Variable frame rate to capture exact frames
"-q:v", "2", # High quality PNG output
os.path.join(output_folder, "frame_%06d.png")
]
logger.info(f"--- Extracting Scene Frames (threshold={scene_threshold}) from {vid_name} ---")
success, stderr_output = run_ffmpeg(ffmpeg_command, f"Extract Frames ({vid_name})", verbose_success=False, capture_stderr=True)
if success and stderr_output:
# Regex to find pts_time in showinfo output
pts_time_re = re.compile(r'n:\s*\d+\s+pts:\s*\d+\s+pts_time:(\d+\.?\d*)')
lines = stderr_output.splitlines()
for line in lines:
# Filter for the specific showinfo log lines
if '[Parsed_showinfo' in line and 'pts_time:' in line:
match = pts_time_re.search(line)
if match:
try:
parsed_pts_times.append(float(match.group(1)))
except (ValueError, IndexError):
logger.warning(f"Could not parse pts_time from line: {line}")
elif not success:
logger.error(f" Frame extraction command failed for {vid_name}.")
return False, [], []
# Verify extracted frames match timestamps
frame_files = sorted(glob.glob(os.path.join(output_folder, "frame_*.png")))
frame_count = len(frame_files)
timestamp_count = len(parsed_pts_times)
if frame_count == 0 or timestamp_count == 0:
logger.error(f" Frame extraction yielded zero frames or timestamps for {vid_name}.")
return False, [], []
final_count = 0
if frame_count != timestamp_count:
logger.warning(f" Frame/Timestamp count mismatch ({frame_count} frames vs {timestamp_count} timestamps) for {vid_name}. Using minimum.")
final_count = min(frame_count, timestamp_count)
# Trim lists to the minimum count to maintain correspondence
parsed_pts_times = parsed_pts_times[:final_count]
frame_files = frame_files[:final_count]
else:
final_count = frame_count
logger.info(f"> Extracted {final_count} scene frames from {vid_name}")
return True, [os.path.basename(f) for f in frame_files], parsed_pts_times
def process_image_pair_for_match(ref_img_name, foreign_image_list, ref_extract_folder, foreign_extract_folder, match_threshold):
"""Compares one reference image against a list of foreign images using template matching."""
ref_img_path = os.path.join(ref_extract_folder, ref_img_name)
try:
# Read reference image, convert to grayscale, and resize for consistent comparison
ref_frame_orig = cv2.imread(ref_img_path, cv2.IMREAD_GRAYSCALE)
if ref_frame_orig is None:
logger.warning(f"Could not read reference image: {ref_img_name}"); return None
ref_frame_comp = cv2.resize(ref_frame_orig, (RESIZE_WIDTH, RESIZE_HEIGHT), interpolation=cv2.INTER_AREA)
if ref_frame_comp is None or ref_frame_comp.size == 0:
logger.warning(f"Failed to resize reference image: {ref_img_name}"); return None
except Exception as e:
logger.error(f"Error processing reference image {ref_img_name}: {e}", exc_info=False); return None
best_match_foreign_name = None
best_score = -1.0 # Initialize score below any possible match
# Iterate through potential foreign matches (this list might be pre-filtered)
for foreign_img_name in foreign_image_list:
foreign_img_path = os.path.join(foreign_extract_folder, foreign_img_name)
try:
# Read, grayscale, and resize foreign image
foreign_frame_orig = cv2.imread(foreign_img_path, cv2.IMREAD_GRAYSCALE)
if foreign_frame_orig is None: continue # Skip if image can't be read
foreign_frame_comp = cv2.resize(foreign_frame_orig, (RESIZE_WIDTH, RESIZE_HEIGHT), interpolation=cv2.INTER_AREA)
if foreign_frame_comp is None or foreign_frame_comp.size == 0: continue # Skip if resize fails
# Perform template matching
# TM_CCOEFF_NORMED gives a score between -1 and 1, where 1 is a perfect match
result = cv2.matchTemplate(foreign_frame_comp, ref_frame_comp, cv2.TM_CCOEFF_NORMED)
_minVal, maxVal, _minLoc, _maxLoc = cv2.minMaxLoc(result) # We only need the max value
# Update best match if current score is higher
if maxVal > best_score:
best_score = maxVal
best_match_foreign_name = foreign_img_name
except Exception as e:
# Log error but continue checking other foreign images
logger.debug(f"Error comparing {ref_img_name} with {foreign_img_name}: {e}", exc_info=False)
continue
# Return the best match only if its score meets the threshold
if best_match_foreign_name is not None and best_score >= match_threshold:
return (best_match_foreign_name, best_score)
else:
return None # No match found above the threshold
def filter_similar_ref_images(initial_matches_with_times, ref_extract_folder, similarity_threshold):
"""Filters out reference frames that are too visually similar using perceptual hashing."""
global similarity_libs_available
if not similarity_libs_available:
logger.info(" Skipping similarity filtering: imagehash/Pillow libraries not available.")
return initial_matches_with_times
if similarity_threshold < 0:
logger.info(f" Skipping similarity filtering: threshold ({similarity_threshold}) is negative.")
return initial_matches_with_times
if not initial_matches_with_times:
return {} # Return empty if no initial matches
logger.info(f"--- Filtering Similar Reference Frames (pHash Threshold: {similarity_threshold}) ---")
start_time = time.time()
# Sort reference frame names numerically based on frame index (e.g., frame_000001.png)
frame_num_re = re.compile(r'frame_(\d+).png')
try:
ref_names_sorted = sorted(
initial_matches_with_times.keys(),
key=lambda name: int(frame_num_re.search(name).group(1))
)
except Exception as e:
logger.warning(f"Could not sort reference frames numerically, using default sort. Error: {e}")
ref_names_sorted = sorted(initial_matches_with_times.keys())
hashes = {} # Store phash -> (ref_name, file_size)
to_remove_ref_names = set() # Keep track of reference frames to discard
for ref_name in tqdm(ref_names_sorted, desc=" Filtering Similar Refs", unit="frame", ncols=100, leave=False):
if ref_name in to_remove_ref_names:
continue # Skip if already marked for removal
ref_path = os.path.join(ref_extract_folder, ref_name)
if not os.path.exists(ref_path):
logger.warning(f"Reference frame {ref_name} not found, skipping similarity check.")
to_remove_ref_names.add(ref_name)
continue
try:
# Get file size and compute perceptual hash
current_size = os.path.getsize(ref_path)
with Image.open(ref_path) as img_file:
img_hash = imagehash.phash(img_file)
except Exception as e:
logger.warning(f"Could not process {ref_name} for similarity hashing: {e}")
continue # Skip this frame if hashing fails
found_similar = False
hashes_to_update = {} # Store updates for the current hash if it replaces an existing one
hashes_to_delete = [] # Hashes to remove if replaced by a larger frame
# Compare current hash with existing stored hashes
# Create a copy of items to allow modification during iteration
for existing_hash, (existing_ref_name, existing_size) in list(hashes.items()):
# Skip comparison if the existing frame was already marked for removal
if existing_ref_name in to_remove_ref_names:
hashes_to_delete.append(existing_hash) # Mark the old hash for deletion
continue
# Calculate Hamming distance between hashes (lower means more similar)
hash_diff = img_hash - existing_hash
if hash_diff < similarity_threshold:
# Found a similar frame
found_similar = True
# Decide which frame to keep: prefer the one with larger file size (potentially higher quality/detail)
if current_size >= existing_size:
# Current frame is better or equal, mark existing for removal and update hash mapping
logger.debug(f" '{ref_name}' ({current_size}b) replacing similar '{existing_ref_name}' ({existing_size}b), diff={hash_diff}")
to_remove_ref_names.add(existing_ref_name)
hashes_to_update[img_hash] = (ref_name, current_size) # Map new hash to this frame
hashes_to_delete.append(existing_hash) # Mark old hash for deletion
else:
# Existing frame is better, mark current frame for removal
logger.debug(f" '{ref_name}' ({current_size}b) removed due to similarity with '{existing_ref_name}' ({existing_size}b), diff={hash_diff}")
to_remove_ref_names.add(ref_name)
break # Stop comparing once a similar frame is found
# Clean up hashes map after comparisons
for h_del in hashes_to_delete:
if h_del in hashes:
del hashes[h_del]
hashes.update(hashes_to_update) # Apply updates
# If no similar frame was found and this frame wasn't marked for removal, add its hash
if not found_similar and ref_name not in to_remove_ref_names:
hashes[img_hash] = (ref_name, current_size)
# Create the final dictionary excluding the removed frames
filtered_matches = {
ref_name: data
for ref_name, data in initial_matches_with_times.items()
if ref_name not in to_remove_ref_names
}
removed_count = len(initial_matches_with_times) - len(filtered_matches)
elapsed_time = time.time() - start_time
logger.info(f" -> Similarity filtering complete. Removed {removed_count} potentially redundant pairs ({elapsed_time:.2f}s).")
return filtered_matches
def filter_temporal_inconsistency(matches_after_similarity):
"""Filters matches where the foreign frame order doesn't match the reference frame order."""
if not matches_after_similarity:
return [] # Return empty list if no matches
logger.info("--- Filtering Temporal Inconsistencies ---")
start_time = time.time()
# Extract items and sort them based on the reference frame number
match_items = list(matches_after_similarity.items())
frame_num_re = re.compile(r'frame_(\d+).png')
try:
# Sort by reference frame number extracted from filename
match_items.sort(key=lambda item: int(frame_num_re.search(item[0]).group(1)))
except Exception as e:
logger.warning(f"Could not sort matches numerically by reference frame, using timestamp sort. Error: {e}")
# Fallback sort by reference timestamp if filename parsing fails
match_items.sort(key=lambda item: item[1][1]) # Sort by ref_time (index 1 of tuple value)
filtered_list = []
last_accepted_foreign_num = -1 # Track the frame number of the last accepted foreign match
removed_count = 0
for ref_name, (foreign_name, ref_time, foreign_time) in match_items:
# Extract frame number from the foreign filename
foreign_match = frame_num_re.search(foreign_name)
if not foreign_match:
# If filename format is unexpected, keep the match but log a warning
logger.warning(f"Could not parse frame number from foreign image '{foreign_name}'. Keeping match.")
filtered_list.append((ref_name, foreign_name, ref_time, foreign_time))
continue
try:
current_foreign_num = int(foreign_match.group(1))
except ValueError:
logger.warning(f"Could not convert foreign frame number to int for '{foreign_name}'. Keeping match.")
filtered_list.append((ref_name, foreign_name, ref_time, foreign_time))
continue
# Core logic: Check if the current foreign frame number is >= the last accepted one
# This ensures that the sequence of matched foreign frames is monotonically increasing
if current_foreign_num >= last_accepted_foreign_num:
filtered_list.append((ref_name, foreign_name, ref_time, foreign_time))
last_accepted_foreign_num = current_foreign_num # Update the last accepted number
else:
# Temporal inconsistency detected (e.g., Ref frame 5 matches Foreign 10, Ref 6 matches Foreign 8)
logger.debug(f" Temporal inconsistency: Ref '{ref_name}' -> Foreign '{foreign_name}' ({current_foreign_num}) is earlier than last accepted ({last_accepted_foreign_num}). Removing.")
removed_count += 1
elapsed_time = time.time() - start_time
logger.info(f" -> Temporal filtering complete. Removed {removed_count} inconsistent pairs ({elapsed_time:.2f}s).")
# Return a list of tuples: [(ref_filename, foreign_filename, ref_time, foreign_time)]
return filtered_list
def run_image_pairing_stage(ref_video_path, foreign_video_path, temp_dir, scene_threshold, match_threshold, similarity_threshold):
"""Orchestrates the entire image pairing stage."""
logger.info("\n===== \1 =====")
stage_start_time = time.time()
# Define paths for extracted frames
ref_extract_path = os.path.join(temp_dir, "Extracted_Reference")
foreign_extract_path = os.path.join(temp_dir, "Extracted_Foreign")
# --- Step 1: Extract Frames ---
ref_extract_ok, ref_filenames, ref_timestamps_list = extract_frames_ffmpeg(ref_video_path, ref_extract_path, scene_threshold)
if not ref_extract_ok:
logger.error("Failed to extract reference frames.")
return None
foreign_extract_ok, foreign_filenames, foreign_timestamps_list = extract_frames_ffmpeg(foreign_video_path, foreign_extract_path, scene_threshold)
if not foreign_extract_ok:
logger.error("Failed to extract foreign frames.")
return None
# --- Step 1.5: Calculate Search Window ---
logger.info("--- Calculating Frame Match Search Window ---")
ref_duration = get_file_duration(ref_video_path, media_type='video')
if ref_duration is None or ref_duration <= 0:
logger.warning("Could not determine reference video duration or duration is zero. Frame matching will compare against ALL foreign frames.")
match_search_window_seconds = float('inf') # Effectively disable windowing
else:
match_search_window_seconds = ref_duration * MATCH_WINDOW_PERCENT
logger.info(f" Reference duration: {ref_duration:.2f}s")
logger.info(f" Calculated frame match search window: +/- {match_search_window_seconds:.2f}s ({MATCH_WINDOW_PERCENT*100}%)")
# --- Step 2: Map filenames to timestamps ---
logger.info("--- Mapping Timestamps to Extracted Frames ---")
ref_timestamps_dict = {name: ts for name, ts in zip(ref_filenames, ref_timestamps_list)}
foreign_timestamps_dict = {name: ts for name, ts in zip(foreign_filenames, foreign_timestamps_list)}
if not ref_timestamps_dict or not foreign_timestamps_dict:
logger.error(" ERROR: Failed to create timestamp dictionaries.")
return None
logger.info(f" -> Mapped {len(ref_timestamps_dict)} reference and {len(foreign_timestamps_dict)} foreign timestamps.")