mirror of
https://github.com/MichaByte/highlight-video-maker.git
synced 2026-01-29 05:32:12 -05:00
working on transitions, not done
This commit is contained in:
parent
88d1aa0446
commit
486d2c2d64
1 changed files with 125 additions and 31 deletions
|
|
@ -11,8 +11,10 @@ import click
|
||||||
|
|
||||||
from .logger import get_logger
|
from .logger import get_logger
|
||||||
|
|
||||||
|
|
||||||
logger: Logger
|
logger: Logger
|
||||||
|
|
||||||
|
XFADE_TRANSITIONS = ["fade", "slideleft", "slidedown", "smoothup", "smoothleft", "circleopen", "diagtl", "horzopen", "fadegrays", "dissolve", "pixelize", "hrwind"]
|
||||||
|
|
||||||
@click.group()
|
@click.group()
|
||||||
@click.option(
|
@click.option(
|
||||||
|
|
@ -113,9 +115,88 @@ def get_amplitude_of_segment(clip: Path):
|
||||||
check=True,
|
check=True,
|
||||||
capture_output=True,
|
capture_output=True,
|
||||||
).stderr
|
).stderr
|
||||||
|
logger.debug(res)
|
||||||
return float(res.decode().split("mean_volume: ")[1].split(" dB")[0])
|
return float(res.decode().split("mean_volume: ")[1].split(" dB")[0])
|
||||||
|
|
||||||
|
|
||||||
|
def build_input_flags(video_files: List[str]) -> str:
|
||||||
|
return " ".join(f'-i "{video}"' for video in video_files)
|
||||||
|
|
||||||
|
def build_preprocess_filters(video_files: List[str]) -> tuple[list[str], List[str], List[str]]:
|
||||||
|
filters: List[str] = []
|
||||||
|
video_labels: List[str] = []
|
||||||
|
audio_labels: List[str] = []
|
||||||
|
for i in range(len(video_files)):
|
||||||
|
filters.append(f'[{i}:v]format=yuv420p,scale=1280:720,setpts=PTS-STARTPTS,fps=30[v{i}];')
|
||||||
|
filters.append(f'[{i}:a]aresample=async=1[a{i}];')
|
||||||
|
video_labels.append(f'v{i}')
|
||||||
|
audio_labels.append(f'a{i}')
|
||||||
|
return filters, video_labels, audio_labels
|
||||||
|
|
||||||
|
def build_transition_filters_dynamic(
|
||||||
|
video_labels: List[str],
|
||||||
|
audio_labels: List[str],
|
||||||
|
xfade_transitions: List[str],
|
||||||
|
durations: List[float],
|
||||||
|
fade_duration: float = 1.0
|
||||||
|
) -> tuple[List[str], List[str], str, str]:
|
||||||
|
vf_filters: List[str] = []
|
||||||
|
af_filters: List[str] = []
|
||||||
|
|
||||||
|
offset = 0.0
|
||||||
|
for i in range(len(video_labels) - 1):
|
||||||
|
transition = random.choice(xfade_transitions)
|
||||||
|
offset += durations[i] - fade_duration
|
||||||
|
|
||||||
|
out_v = f'vxf{i+1}'
|
||||||
|
out_a = f'acf{i+1}'
|
||||||
|
|
||||||
|
vf_filters.append(
|
||||||
|
f'[{video_labels[i]}][{video_labels[i+1]}]xfade='
|
||||||
|
f'transition={transition}:duration={fade_duration}:offset={offset:.2f}[{out_v}];'
|
||||||
|
)
|
||||||
|
video_labels[i+1] = out_v
|
||||||
|
|
||||||
|
af_filters.append(
|
||||||
|
f'[{audio_labels[i]}][{audio_labels[i+1]}]acrossfade='
|
||||||
|
f'd={fade_duration}:c1=tri:c2=tri[{out_a}];'
|
||||||
|
)
|
||||||
|
audio_labels[i+1] = out_a
|
||||||
|
|
||||||
|
return vf_filters, af_filters, video_labels[-1], audio_labels[-1]
|
||||||
|
|
||||||
|
def add_watermark_filter(video_label: str, watermark_index: int) -> str:
|
||||||
|
return (
|
||||||
|
f'[{watermark_index}]format=rgba,colorchannelmixer=aa=0.5[logo];'
|
||||||
|
f'[{video_label}][logo]overlay=W-w-30:H-h-30:format=auto,format=yuv420p[vidout];'
|
||||||
|
)
|
||||||
|
|
||||||
|
def assemble_filter_complex(
|
||||||
|
pre_filters: List[str],
|
||||||
|
xfade_filters: List[str],
|
||||||
|
audio_fades: List[str],
|
||||||
|
watermark_filter: str
|
||||||
|
) -> str:
|
||||||
|
return "\n".join(pre_filters + xfade_filters + audio_fades + [watermark_filter])
|
||||||
|
|
||||||
|
def run_ffmpeg_command(
|
||||||
|
input_flags: str,
|
||||||
|
filter_complex: str,
|
||||||
|
output_file: str,
|
||||||
|
watermark_image: str,
|
||||||
|
final_audio_label: str
|
||||||
|
) -> None:
|
||||||
|
cmd: str = f'''
|
||||||
|
ffmpeg -y {input_flags} -i "{watermark_image}" \
|
||||||
|
-filter_complex "{filter_complex}" \
|
||||||
|
-map "[vidout]" -map "[{final_audio_label}]" \
|
||||||
|
-c:v libvpx-vp9 -b:v 4M \
|
||||||
|
-c:a aac -b:a 128k "{output_file}"
|
||||||
|
'''
|
||||||
|
print(cmd)
|
||||||
|
subprocess.run(cmd, shell=True, check=True)
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
@click.option(
|
@click.option(
|
||||||
"--input-dir",
|
"--input-dir",
|
||||||
|
|
@ -205,10 +286,9 @@ def run(
|
||||||
representative_video_audio_levels[seg] = representative_video_audio_futures[
|
representative_video_audio_levels[seg] = representative_video_audio_futures[
|
||||||
seg
|
seg
|
||||||
].result()
|
].result()
|
||||||
|
|
||||||
highest = dict(Counter(representative_video_audio_levels).most_common(10))
|
highest = dict(Counter(representative_video_audio_levels).most_common(10))
|
||||||
loudest_seg_indexes: List[int] = [int(str(Path(k).stem)) for k in highest.keys()]
|
loudest_seg_indexes: List[int] = [int(str(Path(k).stem)) for k in highest.keys()]
|
||||||
|
print(loudest_seg_indexes, representative_video_segments)
|
||||||
for video in raw_videos[2]:
|
for video in raw_videos[2]:
|
||||||
out_folder = Path(CACHE_DIR, "loudest", Path(video).stem)
|
out_folder = Path(CACHE_DIR, "loudest", Path(video).stem)
|
||||||
out_folder.mkdir(parents=True, exist_ok=True)
|
out_folder.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
@ -219,44 +299,58 @@ def run(
|
||||||
seg,
|
seg,
|
||||||
out_folder.parent,
|
out_folder.parent,
|
||||||
)
|
)
|
||||||
|
video_files: List[str] = []
|
||||||
with open(str(Path(CACHE_DIR, "list.txt")), "w") as f:
|
with open(str(Path(CACHE_DIR, "list.txt")), "w") as f:
|
||||||
for seg in loudest_seg_indexes:
|
for seg in loudest_seg_indexes:
|
||||||
random_seg = Path(random.choice(raw_videos[2]))
|
random_seg = Path(random.choice(raw_videos[2]))
|
||||||
|
vid_path = Path(CACHE_DIR, "loudest", random_seg.stem, str(seg) + random_seg.suffix)
|
||||||
f.write(
|
f.write(
|
||||||
f"file '{Path(CACHE_DIR, "loudest", random_seg.stem, str(seg) + random_seg.suffix)}'\n"
|
f"file '{vid_path}'\n"
|
||||||
)
|
)
|
||||||
|
video_files.append(str(vid_path.resolve()))
|
||||||
|
|
||||||
logger.info("Creating horizontal video...")
|
logger.info("Creating horizontal video...")
|
||||||
# Horizontal Pipeline: Concatenate clips and overlay a semi‑transparent watermark.
|
|
||||||
subprocess.run(
|
input_flags: str = build_input_flags(video_files)
|
||||||
f'''ffmpeg -y -f concat -safe 0 -i "{Path(CACHE_DIR, "list.txt")}" -i "{watermark_image}" \
|
pre_filters, vlabels, alabels = build_preprocess_filters(video_files)
|
||||||
-filter_complex "
|
durations = [get_video_duration(Path(vf)) for vf in video_files]
|
||||||
[1]format=rgba,colorchannelmixer=aa=0.5[logo];
|
vfades, afades, final_v, final_a = build_transition_filters_dynamic(
|
||||||
[0][logo]overlay=W-w-30:H-h-30:format=auto,format=yuv420p
|
vlabels, alabels, XFADE_TRANSITIONS, durations, 0.5
|
||||||
" -c:a aac -b:a 128k "{horiz_output_file}"''',
|
|
||||||
shell=True,
|
|
||||||
check=True,
|
|
||||||
capture_output=True,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info("Creating vertical video...")
|
watermark_filter: str = add_watermark_filter(final_v, len(video_files))
|
||||||
# Vertical Pipeline: Concatenate, crop (zoom), split & blur for a vertical aspect ratio,
|
full_filter: str = assemble_filter_complex(pre_filters, vfades, afades, watermark_filter)
|
||||||
# then overlay a centered, opaque watermark at the bottom.
|
run_ffmpeg_command(watermark_image=str(watermark_image), output_file=str(vert_output_file), input_flags=input_flags, filter_complex=full_filter, final_audio_label=final_a)
|
||||||
subprocess.run(
|
|
||||||
f'''ffmpeg -y -f concat -safe 0 -i "{Path(CACHE_DIR, "list.txt")}" -i "{watermark_image}" \
|
|
||||||
-filter_complex "
|
# # Horizontal Pipeline: Concatenate clips and overlay a semi‑transparent watermark.
|
||||||
[0]crop=3/4*in_w:in_h[zoomed];
|
# subprocess.run(
|
||||||
[zoomed]split[original][copy];
|
# f'''ffmpeg -y -f concat -safe 0 -i "{Path(CACHE_DIR, "list.txt")}" -i "{watermark_image}" \
|
||||||
[copy]scale=-1:ih*(4/3)*(4/3),crop=w=ih*9/16,gblur=sigma=17:steps=5[blurred];
|
# -filter_complex "
|
||||||
[blurred][original]overlay=(main_w-overlay_w)/2:(main_h-overlay_h)/2[vert];
|
# [1]format=rgba,colorchannelmixer=aa=0.5[logo];
|
||||||
[vert][1]overlay=(W-w)/2:H-h-30,format=yuv420p
|
# [0][logo]overlay=W-w-30:H-h-30:format=auto,format=yuv420p
|
||||||
" -c:a aac -b:a 128k "{vert_output_file}"''',
|
# " -c:a aac -b:a 128k "{horiz_output_file}"''',
|
||||||
shell=True,
|
# shell=True,
|
||||||
check=True,
|
# check=True,
|
||||||
capture_output=True,
|
# capture_output=True,
|
||||||
)
|
# )
|
||||||
|
|
||||||
|
# logger.info("Creating vertical video...")
|
||||||
|
# # Vertical Pipeline: Concatenate, crop (zoom), split & blur for a vertical aspect ratio,
|
||||||
|
# # then overlay a centered, opaque watermark at the bottom.
|
||||||
|
# subprocess.run(
|
||||||
|
# f'''ffmpeg -y -f concat -safe 0 -i "{Path(CACHE_DIR, "list.txt")}" -i "{watermark_image}" \
|
||||||
|
# -filter_complex "
|
||||||
|
# [0]crop=3/4*in_w:in_h[zoomed];
|
||||||
|
# [zoomed]split[original][copy];
|
||||||
|
# [copy]scale=-1:ih*(4/3)*(4/3),crop=w=ih*9/16,gblur=sigma=17:steps=5[blurred];
|
||||||
|
# [blurred][original]overlay=(main_w-overlay_w)/2:(main_h-overlay_h)/2[vert];
|
||||||
|
# [vert][1]overlay=(W-w)/2:H-h-30,format=yuv420p
|
||||||
|
# " -c:a aac -b:a 128k "{vert_output_file}"''',
|
||||||
|
# shell=True,
|
||||||
|
# check=True,
|
||||||
|
# capture_output=True,
|
||||||
|
# )
|
||||||
|
|
||||||
logger.info("Video processing pipeline completed.")
|
logger.info("Video processing pipeline completed.")
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue