inference_realesrgan_video.py 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. import argparse
  2. import glob
  3. import mimetypes
  4. import os
  5. import queue
  6. import shutil
  7. import torch
  8. from basicsr.archs.rrdbnet_arch import RRDBNet
  9. from basicsr.utils.logger import AvgTimer
  10. from tqdm import tqdm
  11. from realesrgan import IOConsumer, PrefetchReader, RealESRGANer
  12. from realesrgan.archs.srvgg_arch import SRVGGNetCompact
  13. def main():
  14. """Inference demo for Real-ESRGAN.
  15. It mainly for restoring anime videos.
  16. """
  17. parser = argparse.ArgumentParser()
  18. parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder')
  19. parser.add_argument(
  20. '-n',
  21. '--model_name',
  22. type=str,
  23. default='RealESRGAN_x4plus',
  24. help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus'
  25. 'RealESRGANv2-anime-xsx2 | RealESRGANv2-animevideo-xsx2-nousm | RealESRGANv2-animevideo-xsx2'
  26. 'RealESRGANv2-anime-xsx4 | RealESRGANv2-animevideo-xsx4-nousm | RealESRGANv2-animevideo-xsx4'))
  27. parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
  28. parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
  29. parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored video')
  30. parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
  31. parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
  32. parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
  33. parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')
  34. parser.add_argument('--half', action='store_true', help='Use half precision during inference')
  35. parser.add_argument('-v', '--video', action='store_true', help='Output a video using ffmpeg')
  36. parser.add_argument('-a', '--audio', action='store_true', help='Keep audio')
  37. parser.add_argument('--fps', type=float, default=None, help='FPS of the output video')
  38. parser.add_argument('--consumer', type=int, default=4, help='Number of IO consumers')
  39. parser.add_argument(
  40. '--alpha_upsampler',
  41. type=str,
  42. default='realesrgan',
  43. help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
  44. parser.add_argument(
  45. '--ext',
  46. type=str,
  47. default='auto',
  48. help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
  49. args = parser.parse_args()
  50. # ---------------------- determine models according to model names ---------------------- #
  51. args.model_name = args.model_name.split('.')[0]
  52. if args.model_name in ['RealESRGAN_x4plus', 'RealESRNet_x4plus']: # x4 RRDBNet model
  53. model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
  54. netscale = 4
  55. elif args.model_name in ['RealESRGAN_x4plus_anime_6B']: # x4 RRDBNet model with 6 blocks
  56. model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
  57. netscale = 4
  58. elif args.model_name in ['RealESRGAN_x2plus']: # x2 RRDBNet model
  59. model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
  60. netscale = 2
  61. elif args.model_name in [
  62. 'RealESRGANv2-anime-xsx2', 'RealESRGANv2-animevideo-xsx2-nousm', 'RealESRGANv2-animevideo-xsx2'
  63. ]: # x2 VGG-style model (XS size)
  64. model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu')
  65. netscale = 2
  66. elif args.model_name in [
  67. 'RealESRGANv2-anime-xsx4', 'RealESRGANv2-animevideo-xsx4-nousm', 'RealESRGANv2-animevideo-xsx4'
  68. ]: # x4 VGG-style model (XS size)
  69. model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
  70. netscale = 4
  71. # ---------------------- determine model paths ---------------------- #
  72. model_path = os.path.join('experiments/pretrained_models', args.model_name + '.pth')
  73. if not os.path.isfile(model_path):
  74. model_path = os.path.join('realesrgan/weights', args.model_name + '.pth')
  75. if not os.path.isfile(model_path):
  76. raise ValueError(f'Model {args.model_name} does not exist.')
  77. # restorer
  78. upsampler = RealESRGANer(
  79. scale=netscale,
  80. model_path=model_path,
  81. model=model,
  82. tile=args.tile,
  83. tile_pad=args.tile_pad,
  84. pre_pad=args.pre_pad,
  85. half=args.half)
  86. if args.face_enhance: # Use GFPGAN for face enhancement
  87. from gfpgan import GFPGANer
  88. face_enhancer = GFPGANer(
  89. model_path='https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth',
  90. upscale=args.outscale,
  91. arch='clean',
  92. channel_multiplier=2,
  93. bg_upsampler=upsampler)
  94. os.makedirs(args.output, exist_ok=True)
  95. # for saving restored frames
  96. save_frame_folder = os.path.join(args.output, 'frames_tmpout')
  97. os.makedirs(save_frame_folder, exist_ok=True)
  98. if mimetypes.guess_type(args.input)[0].startswith('video'): # is a video file
  99. video_name = os.path.splitext(os.path.basename(args.input))[0]
  100. frame_folder = os.path.join('tmp_frames', video_name)
  101. os.makedirs(frame_folder, exist_ok=True)
  102. # use ffmpeg to extract frames
  103. os.system(f'ffmpeg -i {args.input} -qscale:v 1 -qmin 1 -qmax 1 -vsync 0 {frame_folder}/frame%08d.png')
  104. # get image path list
  105. paths = sorted(glob.glob(os.path.join(frame_folder, '*')))
  106. if args.video:
  107. if args.fps is None:
  108. # get input video fps
  109. import ffmpeg
  110. probe = ffmpeg.probe(args.input)
  111. video_streams = [stream for stream in probe['streams'] if stream['codec_type'] == 'video']
  112. args.fps = eval(video_streams[0]['avg_frame_rate'])
  113. elif mimetypes.guess_type(args.input)[0].startswith('image'): # is an image file
  114. paths = [args.input]
  115. video_name = 'video'
  116. else:
  117. paths = sorted(glob.glob(os.path.join(args.input, '*')))
  118. video_name = 'video'
  119. timer = AvgTimer()
  120. timer.start()
  121. pbar = tqdm(total=len(paths), unit='frame', desc='inference')
  122. # set up prefetch reader
  123. reader = PrefetchReader(paths, num_prefetch_queue=4)
  124. reader.start()
  125. que = queue.Queue()
  126. consumers = [IOConsumer(args, que, f'IO_{i}') for i in range(args.consumer)]
  127. for consumer in consumers:
  128. consumer.start()
  129. for idx, (path, img) in enumerate(zip(paths, reader)):
  130. imgname, extension = os.path.splitext(os.path.basename(path))
  131. if len(img.shape) == 3 and img.shape[2] == 4:
  132. img_mode = 'RGBA'
  133. else:
  134. img_mode = None
  135. try:
  136. if args.face_enhance:
  137. _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
  138. else:
  139. output, _ = upsampler.enhance(img, outscale=args.outscale)
  140. except RuntimeError as error:
  141. print('Error', error)
  142. print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
  143. else:
  144. if args.ext == 'auto':
  145. extension = extension[1:]
  146. else:
  147. extension = args.ext
  148. if img_mode == 'RGBA': # RGBA images should be saved in png format
  149. extension = 'png'
  150. save_path = os.path.join(save_frame_folder, f'{imgname}_out.{extension}')
  151. que.put({'output': output, 'save_path': save_path})
  152. pbar.update(1)
  153. torch.cuda.synchronize()
  154. timer.record()
  155. avg_fps = 1. / (timer.get_avg_time() + 1e-7)
  156. pbar.set_description(f'idx {idx}, fps {avg_fps:.2f}')
  157. for _ in range(args.consumer):
  158. que.put('quit')
  159. for consumer in consumers:
  160. consumer.join()
  161. pbar.close()
  162. # merge frames to video
  163. if args.video:
  164. video_save_path = os.path.join(args.output, f'{video_name}_{args.suffix}.mp4')
  165. if args.audio:
  166. os.system(
  167. f'ffmpeg -r {args.fps} -i {save_frame_folder}/frame%08d_out.{extension} -i {args.input}'
  168. f' -map 0:v:0 -map 1:a:0 -c:a copy -c:v libx264 -r {args.fps} -pix_fmt yuv420p {video_save_path}')
  169. else:
  170. os.system(f'ffmpeg -i {save_frame_folder}/frame%08d_out.{extension} '
  171. f'-c:v libx264 -r {args.fps} -pix_fmt yuv420p {video_save_path}')
  172. # delete tmp file
  173. shutil.rmtree(save_frame_folder)
  174. if os.path.isdir(frame_folder):
  175. shutil.rmtree(frame_folder)
  176. if __name__ == '__main__':
  177. main()