简介


本软件是基于 MATLAB APP Designer 和 Python 联合编制的一款简易音视频剪辑软件,可以拿来练手MATLAB APP Designer、面向对象编程。假期空闲时和同学一起自学开发,由于时间有限,很多功能还没有完善。

功能主要分音频处理、视频处理两部分:主体功能由 Python 基于 ffmpeg、 opencv 等开源库所编写,用户交互界面由 MATLAB APP Designer 设计,使用方 法同样是面向对象语言移植而来,与 Python 搭配起来简约实用,有着异曲同工之妙。包含对音频信号的录制,剪切,拼接,裁剪,音乐均衡,人声分离,变速不变调,变调不变速;基于百度 AI 开放平台的语音识别(需要申请API接口)、语音合成等功能; 以及对视频信号的剪切,拼接,裁剪,转 GIF,添加分屏、滤镜、字幕、贴图、水印, 变速等功能。

环境要求:需安装ffmpeg,.py脚本最好放在matlab启动路径下,否则无法将.py文件打包进.exe


作品展示


python音频添加间隔 python音频剪辑_python音频添加间隔

python音频添加间隔 python音频剪辑_ffmpeg_02

python音频添加间隔 python音频剪辑_GUI_03

python音频添加间隔 python音频剪辑_python音频添加间隔_04


.mlapp文件附件


由于部分功能如剪辑模块、语音识别模块需要安装FFmpeg和百度语音识别API,若有需要可联系博主或自行尝试调试运行。

Python调用ffmpeg部分:

# coding=utf-8
import ffmpeg
import getpass
import subprocess
import matplotlib.pyplot as plt
import cv2
import numpy as np
import os


def comband_av(AUDIO, VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {AUDIO} -i {VIDEO} comband_va.mp4 -y"
    subprocess.call(cmd, shell=True)


def comband_aa(AUDIO1, AUDIO2):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {AUDIO1} -i {AUDIO2} -filter_complex amix=inputs=2:duration=first:dropout_transition=3 out.wav"
    subprocess.call(cmd, shell=True)
    # inputs=2:两个输入
    # duration=first:输出长度为第一个输入的长度
    # dropout_transition=3:声音淡出时间为3秒


def comband_ad(AUDIO,num):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {AUDIO} -filter_complex adelay={num} out_delay.wav"
    subprocess.call(cmd, shell=True)


def pick_v(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vcodec copy -an Silent_film.mp4 -y"
    subprocess.call(cmd, shell=True)


def accelerate(VIDEO,num):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -filter_complex [0:v]setpts={1/num}*PTS[v];[0:a]atempo={num}[a] -map [v] -map [a] out_ac.mp4 -y"
# -filter_complex 复杂滤镜,[0:v]表示第一个(文件索引号是0)文件的视频作为输入。setpts=0.5*PTS表示每帧视频的pts时间戳都乘0.5 ,也就是差少一半。[v]表示输出的别名。音频同理就不详述了。
# map 可用于处理复杂输出,如可以将指定的多路流输出到一个输出文件,也可以指定输出到多个文件。"[v]" 复杂滤镜输出的别名作为输出文件的一路流。上面 map的用法是将复杂滤镜输出的视频和音频输出到指定文件中。
    subprocess.call(cmd, shell=True)


def cut_v(video, ts, te):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'  # 默认图片库为桌面命名为pics的文件夹
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {video} -ss {ts} -codec copy -t {te} output1.mp4 -y"
    subprocess.call(cmd, shell=True)


def logo_add(video,logo,position):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    if position=="left_up":
        cmd = f"ffmpeg -i {video} -i {logo} -filter_complex overlay output.mp4 -y" #注意文件名,对比视频转图片,类似
    elif position=="left_down":
        cmd = f"ffmpeg -i {video} -i {logo} -filter_complex overlay=0:H-h output.mp4 -y"
    elif position=="right_down":
        cmd = f"ffmpeg -i {video} -i {logo} -filter_complex overlay=W-w:H-houtput.mp4 -y"
    else:
        cmd = f"ffmpeg -i {video} -i {logo} -filter_complex overlay=W-w output.mp4 -y"
    subprocess.call(cmd, shell=True)


def screen_rec():
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    #cmd1=f"ffmpeg -list_devices true -f dshow -i dummy"
    cmd = f"ffmpeg -f gdigrab -framerate 30 -offset_x 0 -offset_y 0 -video_size 1024x768 -show_region 1 -i desktop Screen_Record.mp4 -y"
    #cmd = f"ffmpeg -f gdigrab -framerate 30 -offset_x 0 -offset_y 0 -video_size 1920*1080 -show_region 1 -i desktop output.mkv -y"
    subprocess.call(cmd, shell=True)


def ch_vv(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vcodec copy -an Vtrailer.mp4 -y"
    subprocess.call(cmd, shell=True)


def comband_vv(file):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -f concat -i {file} -c copy  comband_vv.mp4 -y"
    subprocess.call(cmd, shell=True)



def cyber(image):
    # 反转色相
    image_hls = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)
    image_hls = np.asarray(image_hls, np.float32)
    hue = image_hls[:, :, 0]
    hue[hue < 90] = 180 - hue[hue < 90]
    image_hls[:, :, 0] = hue

    image_hls = np.asarray(image_hls, np.uint8)
    image = cv2.cvtColor(image_hls, cv2.COLOR_HLS2BGR)

    image_lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
    image_lab = np.asarray(image_lab, np.float32)

    # 提高像素亮度,让亮的地方更亮
    light_gamma_high = np.power(image_lab[:, :, 0], 0.8)
    light_gamma_high = np.asarray(light_gamma_high / np.max(light_gamma_high) * 255, np.uint8)

    # 降低像素亮度,让暗的地方更暗
    light_gamma_low = np.power(image_lab[:, :, 0], 1.2)
    light_gamma_low = np.asarray(light_gamma_low / np.max(light_gamma_low) * 255, np.uint8)

    # 调色至偏紫
    dark_b = image_lab[:, :, 2] * (light_gamma_low / 255) * 0.1
    dark_a = image_lab[:, :, 2] * (1 - light_gamma_high / 255) * 0.3

    image_lab[:, :, 2] = np.clip(image_lab[:, :, 2] - dark_b, 0, 255)
    image_lab[:, :, 2] = np.clip(image_lab[:, :, 2] - dark_a, 0, 255)

    image_lab = np.asarray(image_lab, np.uint8)
    return cv2.cvtColor(image_lab, cv2.COLOR_Lab2BGR)


def process_cyber(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    # 源视频
    video_path = VIDEO
    print(video_path)
    video_probe = ffmpeg.probe(video_path)
    video_info = next((stream for stream in video_probe['streams'] if stream['codec_type'] == 'video'), None)
    video_frames = int(video_info['nb_frames'])
    width = int(video_info['width'])
    height = int(video_info['height'])
    video_input = ffmpeg.input(video_path)
    in_process = (
        video_input.video.output('pipe:', format='rawvideo', pix_fmt='rgb24', r=30).run_async(pipe_stdout=True)
    )

    # 滤镜视频流
    tmp_path = 'night_tmp.mp4'
    tmp_process = (
        ffmpeg
            .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height), framerate=30)
            .output(tmp_path, pix_fmt='yuv420p', r=30)
            .overwrite_output()
            .run_async(pipe_stdin=True)
    )

    frame_index = 1

    # 视频帧处理
    while True:
        in_bytes = in_process.stdout.read(width * height * 3)
        if not in_bytes:
            break
        in_frame = (
            np
                .frombuffer(in_bytes, np.uint8)
                .reshape([height, width, 3])
        )

        # 渐变式局部滤镜视频,过渡时间 5 秒,帧率为 30,则此处设置的值为 150
        in_frame_bgr = cv2.cvtColor(in_frame, cv2.COLOR_RGB2BGR)
        current_width = int(width * (frame_index / 150))
        in_frame_bgr[:, 0:current_width, :] = cyber(in_frame_bgr[:, 0:current_width, :])
        in_frame = cv2.cvtColor(in_frame_bgr, cv2.COLOR_BGR2RGB)

        tmp_process.stdin.write(
            in_frame
                .astype(np.uint8)
                .tobytes()
        )

        if frame_index < 150:
            frame_index += 1

    # 等待异步处理完毕
    tmp_process.stdin.close()
    in_process.wait()
    tmp_process.wait()

    # 将原始视频的音乐合并到新视频
    result_path = 'night_new.mp4'
    (
        ffmpeg.input(tmp_path)
              .output(video_input.audio, result_path, r=30)
              .run(overwrite_output=True)
    )

    # 删除临时文件
    os.remove(tmp_path)


def narrow_v(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf scale=480:320 -acodec aac -vcodec h264 narrow.mp4 -y"
    subprocess.call(cmd, shell=True)


def procv_pics(VIDEO,FPS):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/' #默认图片库为桌面命名为pics的文件夹
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -r {FPS} -f image2 {str_txt + 'pics/'}image%3d.jpg -y" #可指定保存为jpg,jpeg,png等
    subprocess.call(cmd, shell=True)


def procpics_v(fps):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/images'
    str_txt2 = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -f image2 -i image%03d.jpg  -vcodec libx264 -r {fps} {str_txt2} filter.mp4 -y" #注意文件名,对比视频转图片,类似
    subprocess.call(cmd, shell=True)


def oldfilm_process(rootdir):
    os.getcwd()
    user_name = getpass.getuser()
    for parent, dirnames, filenames in os.walk(rootdir):  # 遍历每一张图片
        for filename in filenames:
            currentPath = os.path.join(parent, filename)
            image = plt.imread(currentPath)
            image = cyber(image)
            res = image
            plt.imsave(f"C:\\Users/{user_name}/Desktop/images" + '//' +filename, res)


def slice(VIDEO,width,height,x,y):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -strict -2 -vf crop={width}:{height}:{x}:{y} slice.mp4 -y"
                                            #crop=width:height:x:y
    subprocess.call(cmd, shell=True)


def SE_colorful(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf hue='H=2*PI*t:s=sin(2*PI*t)+1' colorful.mp4 -y"
    subprocess.call(cmd, shell=True)


def SE_shinning(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf vignette='PI/4+random(1)*PI/50':eval=frame shinning.mp4 -y"
    subprocess.call(cmd, shell=True)


def SE_fadein(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf fade=in:0:90 fadein.mp4 -y"
    subprocess.call(cmd, shell=True)


def SE_black(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf lutyuv='u=128:v=128' black.mp4 -y"
    subprocess.call(cmd, shell=True)

def SE_sharp(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5 sharp.mp4 -y"
    subprocess.call(cmd, shell=True)


def SE_Helo(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf vignette=PI/4 Helo.mp4 -y"
    subprocess.call(cmd, shell=True)


def SE_blur(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf vignette=PI/4 blur.mp4 -y"
    subprocess.call(cmd, shell=True)


def SE_shake(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf crop='in_w/2:in_h/2:(in_w-out_w)/2+((in_w-out_w)/2)*sin(n/10):(in_h-out_h)/2+((in_h-out_h)/2)*sin(n/7)' shake.mp4 -y"
    subprocess.call(cmd, shell=True)


def SE_relief(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2' relief.mp4 -y"
    subprocess.call(cmd, shell=True)


def SE_noise(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf noise=alls=20:allf=t+u noise.mp4 -y"
    subprocess.call(cmd, shell=True)


def SE_fliplr(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf geq=p(W-X\\,Y) fliplr.mp4 -y"
    subprocess.call(cmd, shell=True)


def SE_flipud(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf vflip flipud.mp4 -y"
    subprocess.call(cmd, shell=True)


def SE_mirror(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf crop=iw/2:ih:0:0,split[left][tmp];[tmp]hflip[right];[left]pad=iw*2[a];[a][right]overlay=w mirror.mp4 -y"
    subprocess.call(cmd, shell=True)


def part_silent_v(audio1, ts, te): #音频某一段静音
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {audio1} -af volume=enable='between(t,{ts},{te})':volume=0 part_silent.mp4 -y"
    subprocess.call(cmd, shell=True)


def part_silent_au(audio1, ts, te): #音频某一段静音
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {audio1} -af volume=enable='between(t,{ts},{te})':volume=0 part_silent.mp3 -y"
    subprocess.call(cmd, shell=True)



def text(VIDEO,ts,te,color,size,x,y,text):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf drawtext=enable='between(t,{ts},{te})':fontcolor={color}:fontsize={size}:fontfile=HanYiYanKaiW-2.ttf:text='{text}':x={x}:y={y} text.mp4 -y"
    subprocess.call(cmd, shell=True)


def text_file(VIDEO,ts,te,color,size,x,y):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf drawtext=enable='between(t,{ts},{te})':fontcolor={color}:fontsize={size}:fontfile=HanYiYanKaiW-2.ttf:textfile='fftext.txt':x={x}:y={y} text_file.mp4 -y"
    subprocess.call(cmd, shell=True)


def text_new(VIDEO,ts,te,color,size,ttf,x,y,text):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf drawtext=enable='between(t,{ts},{te})':fontcolor={color}:fontsize={size}:fontfile={ttf}:text='{text}':x={x}:y={y} text.mp4 -y"
    subprocess.call(cmd, shell=True)


def text_newfile(VIDEO,ts,te,color,size,ttf,x,y):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -vf drawtext=enable='between(t,{ts},{te})':fontcolor={color}:fontsize={size}:fontfile={ttf}:textfile='fftext.txt':x={x}:y={y} text_file.mp4 -y"
    subprocess.call(cmd, shell=True)


def superposition(VIDEO1,VIDEO2,x,y):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"Ffmpeg -i {VIDEO1} -i {VIDEO2} -filter_complex  overlay=x={x}:y={y} super_out.mp4 -y"
    subprocess.call(cmd, shell=True)


def GIF_v(VIDEO1,GIF): #任意加文本
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -y -i {VIDEO1} -ignore_loop 0 -i {GIF}  -filter_complex [0:0]scale=iw:ih[a];[1:0]scale=iw/4:-1[wm];[a][wm]overlay=x=10:10:shortest=1:enable='between(t,0,5)' gif_v.mp4 -y"
    subprocess.call(cmd, shell=True)


def avi_mp4(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -c:v libx264 -crf 19 -preset slow -c:a aac -b:a 192k -ac 2 a.mp4 -y"
    subprocess.call(cmd, shell=True)


def v_compress(VIDEO):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {VIDEO} -b:v 700k v_compress.mp4 -y"
    subprocess.call(cmd, shell=True)


def au_fade_out(AUDIO,st,d):
    os.getcwd()
    user_name = getpass.getuser()
    str_txt = 'C:/Users/' + user_name + '/Desktop/'
    os.chdir(str_txt)
    cmd = f"ffmpeg -i {AUDIO} -filter_complex afade=t=out:st={st}:d={d} au_fade_out.mp3"
    subprocess.call(cmd, shell=True)