不會畫線稿?PaddlePaddle讓你秒變靈魂畫手!
阿新 • • 發佈:2020-12-13
技術標籤:PaddlePaddle深度學習
不會畫線稿?PaddlePaddle讓你秒變靈魂畫手!
目錄
線稿可以讓空無一物的畫紙產生正形負形,更能以長短虛實、疏密深淡、張弛得當之勢自然勾勒物象之形、神、光、色、體積、質感等,不同造詣的畫者能駕馭出不同的畫面,難度之大深不可測,變化多端甚是神奇。
線描技法源遠流長,可以追溯到我們中國畫的白描,中國古代有許多白描大師,如顧愷之、李公麟等都為我國留下了文藝瑰寶。
但線稿的另一種意義是從某個圖片轉變而來,只有黑色線條,便於臨摹,十分方便。
PaddleHub轉換
由開發者Mr.鄭先生_提供
【PaddleHub模型貢獻】一行程式碼實現從彩色圖提取素描線稿轉換成PaddleHub讓大家更方便的使用
一、效果展示
圖片效果
視訊效果
原始視訊連結:
https://player.bilibili.com/player.html?aid=373068021&bvid=BV14Z4y1g7uG&cid=264240737&page=1
原視訊
將視訊內容轉換為線稿:
https://player.bilibili.com/player.html?aid=800614383&bvid=BV1zy4y1v7kQ&cid=262900480&page=1
paddle帶你將視訊線稿化
二、實現步驟
1.匯入必要的庫和模型
import cv2
from scipy import ndimage
from model import Model
import numpy as np
import os
os.environ['CUDA_VISIBLE_DEVICES']='0'
model = Model('inference_model',use_gpu=True,use_mkldnn=False,combined=False)
2.處理視訊
將視訊按幀進行處理,並儲存到images資料夾中。
def transform_video_to_image (video_file_path, img_path):
'''
將視訊中每一幀儲存成圖片
'''
video_capture = cv2.VideoCapture(video_file_path)
fps = video_capture.get(cv2.CAP_PROP_FPS)
count = 0
while(True):
ret, frame = video_capture.read()
if ret:
cv2.imwrite(img_path + '%d.jpg' % count, frame)
count += 1
else:
break
video_capture.release()
print('視訊圖片儲存成功, 共有 %d 張' % count)
return fps
fps = transform_video_to_image('shipin.mp4', 'images/')
3.圖片線稿化
from function import *
for home, dirs, files in os.walk('images'):
for filename in files:
fullname = os.path.join(home, filename)
from_mat = cv2.imread(fullname)
width = float(from_mat.shape[1])
height = float(from_mat.shape[0])
new_width = 0
new_height = 0
if (width > height):
from_mat = cv2.resize(from_mat, (512, int(512 / width * height)), interpolation=cv2.INTER_AREA)
new_width = 512
new_height = int(512 / width * height)
else:
from_mat = cv2.resize(from_mat, (int(512 / height * width), 512), interpolation=cv2.INTER_AREA)
new_width = int(512 / height * width)
new_height = 512
from_mat = from_mat.transpose((2, 0, 1))
light_map = np.zeros(from_mat.shape, dtype=np.float)
for channel in range(3):
light_map[channel] = get_light_map_single(from_mat[channel])
light_map = normalize_pic(light_map)
light_map = resize_img_512_3d(light_map)
light_map = light_map.astype('float32')
line_mat = model.predict(np.expand_dims(light_map, axis=0).astype('float32'))
# 去除 batch 維度 (512, 512, 3)
line_mat = line_mat.transpose((3, 1, 2, 0))[0]
# 裁剪 (512, 384, 3)
line_mat = line_mat[0:int(new_height), 0:int(new_width), :]
line_mat = np.amax(line_mat, 2)
# 降噪
show_active_img_and_save_denoise(line_mat, './output/' + filename)
print('圖片' + filename + '已經完成')
print('全部圖片轉換成功。')
4.合併視訊
def combine_image_to_video(comb_path, output_file_path, fps=30, is_print=False):
'''
合併影象到視訊
'''
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
file_items = os.listdir(comb_path)
file_len = len(file_items)
# print(comb_path, file_items)
if file_len > 0 :
temp_img = cv2.imread(os.path.join(comb_path, file_items[0]))
img_height, img_width = temp_img.shape[0], temp_img.shape[1]
out = cv2.VideoWriter(output_file_path, fourcc, fps, (img_width, img_height))
for i in range(file_len):
pic_name = os.path.join(comb_path, str(i)+".jpg")
if is_print:
print(i+1,'/', file_len, ' ', pic_name)
img = cv2.imread(pic_name)
out.write(img)
out.release()
combine_image_to_video('output', 'work/mp4_analysis.mp4', fps)
5.合併音訊
#音訊獲取
def getMusic(video_name):
"""
獲取指定視訊的音訊
"""
# 讀取視訊檔案
video = VideoFileClip(video_name)
# 返回音訊
return video.audio
#音訊新增
def addMusic(video_name, audio,output_video):
"""實現混流,給video_name新增音訊"""
# 讀取視訊
video = VideoFileClip(video_name)
# 設定視訊的音訊
video = video.set_audio(audio)
# 儲存新的視訊檔案
video.write_videofile(output_video)
from moviepy.editor import *
addMusic('work/mp4_analysis.mp4',getMusic('shipin.mp4'),'work/mp4_analysisnew.mp4')