目录
  • 序言
  • 准备工作
    • 实现原理
    • 模块
    • 素材工具
  • 代码解析
    • 完整代码

      序言

      我们在观看视频的时候,有时候会出现一些奇怪的马赛克,影响我们的观影体验,那么这些马赛克是如何精确的加上去的呢?

      Python实现视频自动打码的示例代码

      本次我们就来用Python实现对视频自动打码!

      准备工作

      环境咱们还是使用 Python3.8 和 pycharm2021 即可

      实现原理

      将视频分为音频和画面;

      画面中出现人脸和目标比对,相应人脸进行打码;

      处理后的视频添加声音;

      模块

      手动安装一下 cv2 模块 ,pip install opencv-python 安装

      素材工具

      我们需要安装一下 ffmpeg 音视频转码工具

      Python实现视频自动打码的示例代码

      代码解析

      导入需要使用的模块

      import cv2  
      import face_recognition  # 人脸识别库  99.7%    cmake  dlib  face_recognition
      import subprocess
      

      将视频转为音频

      def video2mp3(file_name):
          """
          :param file_name: 视频文件路径
          :return:
          """
          outfile_name = file_name.split('.')[0] + '.mp3'
          cmd = 'ffmpeg -i ' + file_name + ' -f mp3 ' + outfile_name
          print(cmd)
          subprocess.call(cmd, shell=False)
      

      打码

      def mask_video(input_video, output_video, mask_path='mask.jpg'):
          """
          :param input_video: 需打码的视频
          :param output_video: 打码后的视频
          :param mask_path: 打码图片
          :return:
          """
          # 读取图片
          mask = cv2.imread(mask_path)
          # 读取视频
          cap = cv2.VideoCapture(input_video)
          # 视频  fps  width  height
          v_fps = cap.get(5)
          v_width = cap.get(3)
          v_height = cap.get(4)
      
          # 设置写入视频参数  格式MP4
          # 画面大小
          size = (int(v_width), int(v_height))
          fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
      
          # 输出视频
          out = cv2.VideoWriter(output_video, fourcc, v_fps, size)
      
          # 已知人脸
          known_image = face_recognition.load_image_file('tmr.jpg')
          biden_encoding = face_recognition.face_encodings(known_image)[0]
      
          cap = cv2.VideoCapture(input_video)
      
          while (cap.isOpened()):
              ret, frame = cap.read()
              if ret:
                  # 检测人脸
                  # 人脸区域
                  face_locations = face_recognition.face_locations(frame)
      
                  for (top_right_y, top_right_x, left_bottom_y, left_bottom_x) in face_locations:
                      print((top_right_y, top_right_x, left_bottom_y, left_bottom_x))
                      unknown_image = frame[top_right_y - 50:left_bottom_y + 50, left_bottom_x - 50:top_right_x + 50]
                      if face_recognition.face_encodings(unknown_image) != []:
                          unknown_encoding = face_recognition.face_encodings(unknown_image)[0]
      
                          # 对比人脸
                          results = face_recognition.compare_faces([biden_encoding], unknown_encoding)
                          # [True]
                          # 贴图
                          if results == [True]:
                              mask = cv2.resize(mask, (top_right_x - left_bottom_x, left_bottom_y - top_right_y))
                              frame[top_right_y:left_bottom_y, left_bottom_x:top_right_x] = mask
                  out.write(frame)
      
      
              else:
                  break
      

      音频添加到画面

      def video_add_mp3(file_name, mp3_file):
          """
          :param file_name: 视频画面文件
          :param mp3_file:  视频音频文件
          :return:
          """
          outfile_name = file_name.split('.')[0] + '-f.mp4'
          subprocess.call('ffmpeg -i ' + file_name + ' -i ' + mp3_file + ' -strict -2 -f mp4 ' + outfile_name, shell=False)
      

      完整代码

      import cv2 
      import face_recognition  # 人脸识别库  99.7%    cmake  dlib  face_recognition
      import subprocess
      
      def video2mp3(file_name):
      
          outfile_name = file_name.split('.')[0] + '.mp3'
          cmd = 'ffmpeg -i ' + file_name + ' -f mp3 ' + outfile_name
          print(cmd)
          subprocess.call(cmd, shell=False)
      
      
      def mask_video(input_video, output_video, mask_path='mask.jpg'):
      
          # 读取图片
          mask = cv2.imread(mask_path)
          # 读取视频
          cap = cv2.VideoCapture(input_video)
          # 视频  fps  width  height
          v_fps = cap.get(5)
          v_width = cap.get(3)
          v_height = cap.get(4)
      
          # 设置写入视频参数  格式MP4
          # 画面大小
          size = (int(v_width), int(v_height))
          fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
      
          # 输出视频
          out = cv2.VideoWriter(output_video, fourcc, v_fps, size)
      
          # 已知人脸
          known_image = face_recognition.load_image_file('tmr.jpg')
          biden_encoding = face_recognition.face_encodings(known_image)[0]
      
          cap = cv2.VideoCapture(input_video)
      
          while (cap.isOpened()):
              ret, frame = cap.read()
              if ret:
                  # 检测人脸
                  # 人脸区域
                  face_locations = face_recognition.face_locations(frame)
      
                  for (top_right_y, top_right_x, left_bottom_y, left_bottom_x) in face_locations:
                      print((top_right_y, top_right_x, left_bottom_y, left_bottom_x))
                      unknown_image = frame[top_right_y - 50:left_bottom_y + 50, left_bottom_x - 50:top_right_x + 50]
                      if face_recognition.face_encodings(unknown_image) != []:
                          unknown_encoding = face_recognition.face_encodings(unknown_image)[0]
      
                          # 对比人脸
                          results = face_recognition.compare_faces([biden_encoding], unknown_encoding)
                          # [True]
                          # 贴图
                          if results == [True]:
                              mask = cv2.resize(mask, (top_right_x - left_bottom_x, left_bottom_y - top_right_y))
                              frame[top_right_y:left_bottom_y, left_bottom_x:top_right_x] = mask
                  out.write(frame)
      
      
              else:
                  break
      
      
      def video_add_mp3(file_name, mp3_file):
      
          outfile_name = file_name.split('.')[0] + '-f.mp4'
          subprocess.call('ffmpeg -i ' + file_name + ' -i ' + mp3_file + ' -strict -2 -f mp4 ' + outfile_name, shell=False)
      
      
      if __name__ == '__main__':
          # 1.
          video2mp3('cut.mp4')
          # 2.
          mask_video(input_video='cut.mp4',output_video='output.mp4')
          # 3.
          video_add_mp3(file_name='output.mp4',mp3_file='cut.mp3')
      

      兄弟们,快去试试吧!

      声明:本站所有文章,如无特殊说明或标注,均为本站原创发布。任何个人或组织,在未征得本站同意时,禁止复制、盗用、采集、发布本站内容到任何网站、书籍等各类媒体平台。如若本站内容侵犯了原著者的合法权益,可联系我们进行处理。