add new upscaler test
This commit is contained in:
@@ -314,7 +314,7 @@ class Handler:
|
|||||||
self.command_list.append( ( self.files, fsrpath, quality_setting, i, self.maxlength, self.os_type ) )
|
self.command_list.append( ( self.files, fsrpath, quality_setting, i, self.maxlength, self.os_type ) )
|
||||||
|
|
||||||
self.pool = multiprocessing.Pool( self.threads )
|
self.pool = multiprocessing.Pool( self.threads )
|
||||||
if ( mode == 'NN' ):
|
if ( mode == 'B' ):
|
||||||
self.pool.starmap( bilinearEngine, self.command_list );
|
self.pool.starmap( bilinearEngine, self.command_list );
|
||||||
elif ( mode == 'fsr' ):
|
elif ( mode == 'fsr' ):
|
||||||
self.pool.starmap( upscalerEngine, self.command_list );
|
self.pool.starmap( upscalerEngine, self.command_list );
|
||||||
@@ -447,7 +447,9 @@ def upscalerEngine ( files, fsrpath, quality_setting, number, maxlength, os_type
|
|||||||
print( '\n\nCompleted executing Job\n\n\n PROCESS: ', number, '\n\n\n' );
|
print( '\n\nCompleted executing Job\n\n\n PROCESS: ', number, '\n\n\n' );
|
||||||
|
|
||||||
|
|
||||||
def bilinearEngine ( files, fsrpath, quality_setting, number, maxlength, os_type ):
|
def bilinearEngine ( files, fsrpath, quality_setting, number, maxlength, os_type, version ):
|
||||||
|
if ( version == 'HQC' ):
|
||||||
|
scaler = 'HighQualityCubic'
|
||||||
files = files;
|
files = files;
|
||||||
# Refactoring of commands that are longer than 32K characters
|
# Refactoring of commands that are longer than 32K characters
|
||||||
fileout = [];
|
fileout = [];
|
||||||
|
|||||||
BIN
bin/lib/__pycache__/edi.cpython-311.pyc
Normal file
BIN
bin/lib/__pycache__/edi.cpython-311.pyc
Normal file
Binary file not shown.
154
bin/lib/edi.py
Normal file
154
bin/lib/edi.py
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
import math
|
||||||
|
import sys
|
||||||
|
|
||||||
|
"""
|
||||||
|
Author:
|
||||||
|
|
||||||
|
hu.leying@columbia.edu
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
EDI_predict(img, m, s)
|
||||||
|
|
||||||
|
# img is the input image
|
||||||
|
# m is the sampling window size, not scaling factor! The larger the m, more blurry the image. Ideal m >= 4.
|
||||||
|
# s is the scaling factor, support any s > 0 (e.g. use s=2 to upscale by 2, use s=0.5 to downscale by 2)
|
||||||
|
|
||||||
|
If you want to directly call EDI_upscale to upscale image by the scale of 2:
|
||||||
|
|
||||||
|
EDI_upscale(img, m)
|
||||||
|
|
||||||
|
# m should be the power of 2. Will increment by 1 if input m is odd
|
||||||
|
|
||||||
|
If you want to directly call EDI_downscale to downscale image by the scale of 2:
|
||||||
|
|
||||||
|
EDI_downscale(img)
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def EDI_downscale(img):
|
||||||
|
|
||||||
|
# initializing downgraded image
|
||||||
|
w, h = img.shape
|
||||||
|
imgo2 = np.zeros((w//2, h//2))
|
||||||
|
|
||||||
|
# downgrading image
|
||||||
|
for i in range(w//2):
|
||||||
|
for j in range(h//2):
|
||||||
|
imgo2[i][j] = int(img[2*i][2*j])
|
||||||
|
|
||||||
|
return imgo2.astype(img.dtype)
|
||||||
|
|
||||||
|
def EDI_upscale(img, m):
|
||||||
|
|
||||||
|
# m should be equal to a power of 2
|
||||||
|
if m%2 != 0:
|
||||||
|
m += 1
|
||||||
|
|
||||||
|
# initializing image to be predicted
|
||||||
|
w, h = img.shape
|
||||||
|
imgo = np.zeros((w*2,h*2))
|
||||||
|
|
||||||
|
# Place low-resolution pixels
|
||||||
|
for i in range(w):
|
||||||
|
for j in range(h):
|
||||||
|
imgo[2*i][2*j] = img[i][j]
|
||||||
|
|
||||||
|
y = np.zeros((m**2,1)) # pixels in the window
|
||||||
|
C = np.zeros((m**2,4)) # interpolation neighbours of each pixel in the window
|
||||||
|
|
||||||
|
# Reconstruct the points with the form of (2*i+1,2*j+1)
|
||||||
|
for i in range(math.floor(m/2), w-math.floor(m/2)):
|
||||||
|
for j in range(math.floor(m/2), h-math.floor(m/2)):
|
||||||
|
tmp = 0
|
||||||
|
for ii in range(i-math.floor(m/2), i+math.floor(m/2)):
|
||||||
|
for jj in range(j-math.floor(m/2), j+math.floor(m/2)):
|
||||||
|
y[tmp][0] = imgo[2*ii][2*jj]
|
||||||
|
C[tmp][0] = imgo[2*ii-2][2*jj-2]
|
||||||
|
C[tmp][1] = imgo[2*ii+2][2*jj-2]
|
||||||
|
C[tmp][2] = imgo[2*ii+2][2*jj+2]
|
||||||
|
C[tmp][3] = imgo[2*ii-2][2*jj+2]
|
||||||
|
tmp += 1
|
||||||
|
|
||||||
|
# calculating weights
|
||||||
|
# a = (C^T * C)^(-1) * (C^T * y) = (C^T * C) \ (C^T * y)
|
||||||
|
a = np.matmul(np.matmul(np.linalg.pinv(np.matmul(np.transpose(C),C)), np.transpose(C)), y)
|
||||||
|
imgo[2*i+1][2*j+1] = np.matmul([imgo[2*i][2*j], imgo[2*i+2][2*j], imgo[2*i+2][2*j+2], imgo[2*i][2*j+2]], a)
|
||||||
|
|
||||||
|
# Reconstructed the points with the forms of (2*i+1,2*j) and (2*i,2*j+1)
|
||||||
|
for i in range(math.floor(m/2), w-math.floor(m/2)):
|
||||||
|
for j in range(math.floor(m/2), h-math.floor(m/2)):
|
||||||
|
tmp = 0
|
||||||
|
for ii in range(i-math.floor(m/2), i+math.floor(m/2)):
|
||||||
|
for jj in range(j-math.floor(m/2), j+math.floor(m/2)):
|
||||||
|
y[tmp][0] = imgo[2*ii+1][2*jj-1]
|
||||||
|
C[tmp][0] = imgo[2*ii-1][2*jj-1]
|
||||||
|
C[tmp][1] = imgo[2*ii+1][2*jj-3]
|
||||||
|
C[tmp][2] = imgo[2*ii+3][2*jj-1]
|
||||||
|
C[tmp][3] = imgo[2*ii+1][2*jj+1]
|
||||||
|
tmp += 1
|
||||||
|
|
||||||
|
# calculating weights
|
||||||
|
# a = (C^T * C)^(-1) * (C^T * y) = (C^T * C) \ (C^T * y)
|
||||||
|
a = np.matmul(np.matmul(np.linalg.pinv(np.matmul(np.transpose(C),C)), np.transpose(C)), y)
|
||||||
|
imgo[2*i+1][2*j] = np.matmul([imgo[2*i][2*j], imgo[2*i+1][2*j-1], imgo[2*i+2][2*j], imgo[2*i+1][2*j+1]], a)
|
||||||
|
imgo[2*i][2*j+1] = np.matmul([imgo[2*i-1][2*j+1], imgo[2*i][2*j], imgo[2*i+1][2*j+1], imgo[2*i][2*j+2]], a)
|
||||||
|
|
||||||
|
# Fill the rest with bilinear interpolation
|
||||||
|
np.clip(imgo, 0, 255.0, out=imgo)
|
||||||
|
imgo_bilinear = cv2.resize(img, dsize=(h*2,w*2), interpolation=cv2.INTER_LINEAR)
|
||||||
|
imgo[imgo==0] = imgo_bilinear[imgo==0]
|
||||||
|
|
||||||
|
return imgo.astype(img.dtype)
|
||||||
|
|
||||||
|
def EDI_predict(img, m, s):
|
||||||
|
|
||||||
|
try:
|
||||||
|
w, h = img.shape
|
||||||
|
except:
|
||||||
|
sys.exit("Error input: Please input a valid grayscale image!")
|
||||||
|
|
||||||
|
output_type = img.dtype
|
||||||
|
|
||||||
|
if s <= 0:
|
||||||
|
sys.exit("Error input: Please input s > 0!")
|
||||||
|
|
||||||
|
elif s == 1:
|
||||||
|
print("No need to rescale since s = 1")
|
||||||
|
return img
|
||||||
|
|
||||||
|
elif s < 1:
|
||||||
|
# Calculate how many times to do the EDI downscaling
|
||||||
|
n = math.floor(math.log(1/s, 2))
|
||||||
|
|
||||||
|
# Downscale to the expected size with linear interpolation
|
||||||
|
linear_factor = 1/s / math.pow(2, n)
|
||||||
|
if linear_factor != 1:
|
||||||
|
img = cv2.resize(img, dsize=(int(h/linear_factor),int(w/linear_factor)), interpolation=cv2.INTER_LINEAR).astype(output_type)
|
||||||
|
|
||||||
|
for i in range(n):
|
||||||
|
img = EDI_downscale(img)
|
||||||
|
return img
|
||||||
|
|
||||||
|
elif s < 2:
|
||||||
|
# Linear Interpolation is enough for upscaling not over 2
|
||||||
|
return cv2.resize(img, dsize=(int(h*s),int(w*s)), interpolation=cv2.INTER_LINEAR).astype(output_type)
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Calculate how many times to do the EDI upscaling
|
||||||
|
n = math.floor(math.log(s, 2))
|
||||||
|
for i in range(n):
|
||||||
|
img = EDI_upscale(img, m)
|
||||||
|
|
||||||
|
# Upscale to the expected size with linear interpolation
|
||||||
|
linear_factor = s / math.pow(2, n)
|
||||||
|
if linear_factor == 1:
|
||||||
|
return img.astype(output_type)
|
||||||
|
|
||||||
|
# Update new shape
|
||||||
|
w, h = img.shape
|
||||||
|
return cv2.resize(img, dsize=(int(h*linear_factor),int(w*linear_factor)), interpolation=cv2.INTER_LINEAR).astype(output_type)
|
||||||
|
|
||||||
15
bin/lib/test.py
Normal file
15
bin/lib/test.py
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
import edi
|
||||||
|
import numpy as np
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
ap = argparse.ArgumentParser( description='Testing for edi. NOTE: No error catching!' )
|
||||||
|
ap.add_argument( 'inputfile', help='Input file for upscaling' )
|
||||||
|
ap.add_argument( 'outputfile', help='Output file' )
|
||||||
|
ap.add_argument( '-S', '--scalefactor', help='Scale factor' )
|
||||||
|
ap.add_argument( '-a', '--sampling', help='Sampling window size. The bigger, the blurrier. Best >= 4')
|
||||||
|
ap.set_defaults( sampling=4 )
|
||||||
|
ap.set_defaults( scalefactor=2 )
|
||||||
|
|
||||||
|
args = ap.parse_args()
|
||||||
|
|
||||||
|
print( edi.EDI_predict( np.load( args.inputfile, allow_pickle=True ), args.sampling, args.scalefactor ) )
|
||||||
@@ -15,17 +15,17 @@ import multiprocessing
|
|||||||
allowedFiletypes = [ 'png', 'jpg' ];
|
allowedFiletypes = [ 'png', 'jpg' ];
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
ap = argparse.ArgumentParser( description='FSRImageVideoUpscaler - CLI, a CLI application to upscale videos and images using FSR.' )
|
ap = argparse.ArgumentParser( description='ImageVideoUpscaler - CLI, a CLI application to upscale videos and images using FSR.' )
|
||||||
ap.add_argument( 'inputfile', help='File path for the video / image to be upscaled' )
|
ap.add_argument( 'inputfile', help='File path for the video / image to be upscaled' )
|
||||||
ap.add_argument( 'outputfile', help='File path for the video / image that was upscaled' )
|
ap.add_argument( 'outputfile', help='File path for the video / image that was upscaled' )
|
||||||
ap.add_argument( '-s', '--scalefactor', help='Scale factor for the video / image. Can be a integer from 1 - 4' )
|
ap.add_argument( '-s', '--scalefactor', help='Scale factor for the video / image. Can be a integer from 1 - 4' )
|
||||||
ap.add_argument( '-F', '--filetype', help='Change the file type of the temporary image files. Supports png, jpg. Video quality: png > jpg. Png is default, if not specified.' )
|
ap.add_argument( '-F', '--filetype', help='Change the file type of the temporary image files. Supports png, jpg. Video quality: png > jpg. PNG is default, if not specified.' )
|
||||||
ap.add_argument( '-S', '--sharpening', help='Sharpening factor (between 0 and 1 whereas 0 means no sharpening, 1 the most sharpening. Recommendation: Do not exceed 0.25, as it often looks bad)' )
|
ap.add_argument( '-S', '--sharpening', help='Sharpening factor (between 0 and 1 whereas 0 means no sharpening, 1 the most sharpening. Recommendation: Do not exceed 0.25, as it often looks bad)' )
|
||||||
ap.add_argument( '-N', '--noscaling', help='Do not upscale video, instead only sharpen. Sharpening argument required!', action='store_true' )
|
|
||||||
ap.add_argument( '-t', '--threading', help='Use special threading mode with SS scaler (spawns 16 threads upscaling at one time)', action='store_true' )
|
ap.add_argument( '-t', '--threading', help='Use special threading mode with SS scaler (spawns 16 threads upscaling at one time)', action='store_true' )
|
||||||
ap.add_argument( '-T', '--threads', help='Thread count to use. Cannot exceed CPU thread count. Scaling non-linear (using 2 threads is not exactly 2x the speed of 1 thread). Scales well with FSR, barely with Real-ESRGAN, as it uses mostly the GPU to upscale' )
|
ap.add_argument( '-T', '--threads', help='Thread count to use. Cannot exceed CPU thread count. Scaling non-linear (using 2 threads is not exactly 2x the speed of 1 thread). Scales well with FSR, barely with Real-ESRGAN, as it uses mostly the GPU to upscale' )
|
||||||
ap.add_argument( '-E', '--engine', help='Upscaling engine. Can be fsr, NN (for NearestNeighbor) or SS (for Real-ESRGAN). FSR tends to be lower quality, but faster, NeirestNeighbour is super fast but very ugly, Real-ESRGAN is meant for anime and is super slow. Defaults to fsr' )
|
ap.add_argument( '-E', '--engine', help='Upscaling engine. Can be fsr, C (for Cubic), HQC (for HighQuality Cubic) or SS (for Real-ESRGAN). FSR tends to be higher, Cubic is quite fast but quite low quality, HighQualityCubic is of higher quality, but slower. Real-ESRGAN is meant for anime and is super slow. Defaults to fsr' )
|
||||||
ap.add_argument( '-M', '--model', help='Only available if using Real-ESRGAN. Change the ML-Model used to upsample video, can be: realesr-animevideov3 | realesrgan-x4plus-anime , defaults to realesr-animevideov3' )
|
ap.add_argument( '-M', '--model', help='Only available if using Real-ESRGAN. Change the ML-Model used to upsample video, can be: realesr-animevideov3 | realesrgan-x4plus-anime , defaults to realesr-animevideov3' )
|
||||||
|
ap.add_argument( '-N', '--noscaling', help='Do not upscale video, instead only sharpen. Sharpening argument required!', action='store_true' )
|
||||||
args = ap.parse_args()
|
args = ap.parse_args()
|
||||||
|
|
||||||
handler = bin.handler.Handler()
|
handler = bin.handler.Handler()
|
||||||
|
|||||||
Reference in New Issue
Block a user