Spaces:
Running
Running
Commit
·
7d1fb4b
1
Parent(s):
d0fe8b3
Removed super resolution model load
Browse files
app.py
CHANGED
@@ -8,13 +8,6 @@ from models.network_swinir import SwinIR as net
|
|
8 |
# model load
|
9 |
param_key_g = 'params_ema'
|
10 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
11 |
-
super_res_model = net(upscale=4, in_chans=3, img_size=64, window_size=8,
|
12 |
-
img_range=1., depths=[6, 6, 6, 6, 6, 6, 6, 6, 6], embed_dim=240,
|
13 |
-
num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],
|
14 |
-
mlp_ratio=2, upsampler='nearest+conv', resi_connection='3conv')
|
15 |
-
super_res_pretrained_model = torch.load("model_zoo/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_PSNR.pth")
|
16 |
-
super_res_model.load_state_dict(super_res_pretrained_model[param_key_g] if param_key_g in super_res_pretrained_model.keys() else super_res_pretrained_model, strict=True)
|
17 |
-
super_res_model.eval()
|
18 |
|
19 |
fisheye_correction_model = net(upscale=4, in_chans=3, img_size=64, window_size=8,
|
20 |
img_range=1., depths=[6, 6, 6, 6, 6, 6, 6, 6, 6], embed_dim=240,
|
@@ -24,7 +17,7 @@ fisheye_correction_pretrained_model = torch.load("model_zoo/003_realSR_BSRGAN_DF
|
|
24 |
fisheye_correction_model.load_state_dict(fisheye_correction_pretrained_model[param_key_g] if param_key_g in fisheye_correction_pretrained_model.keys() else fisheye_correction_pretrained_model, strict=True)
|
25 |
fisheye_correction_model.eval()
|
26 |
|
27 |
-
def predict(
|
28 |
out = None
|
29 |
|
30 |
# preprocess input
|
@@ -36,10 +29,7 @@ def predict(radio_btn, input_img):
|
|
36 |
|
37 |
# inference
|
38 |
window_size = 8
|
39 |
-
|
40 |
-
model = super_res_model.to(device)
|
41 |
-
else:
|
42 |
-
model = fisheye_correction_model.to(device)
|
43 |
|
44 |
with torch.no_grad():
|
45 |
# pad input image to be a multiple of window_size
|
@@ -88,7 +78,7 @@ def test(model, img_lq):
|
|
88 |
gr.Interface(
|
89 |
fn=predict,
|
90 |
inputs=[
|
91 |
-
gr.
|
92 |
],
|
93 |
outputs=[
|
94 |
gr.inputs.Image()
|
@@ -96,7 +86,6 @@ gr.Interface(
|
|
96 |
title="SwinIR moon distortion",
|
97 |
description="Description of the app",
|
98 |
examples=[
|
99 |
-
|
100 |
-
["Distortion correction", "render0001_DC.png"], ["Distortion correction", "render1546_DC.png"], ["Distortion correction", "render1682_DC.png"]
|
101 |
]
|
102 |
).launch()
|
|
|
8 |
# model load
|
9 |
param_key_g = 'params_ema'
|
10 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
fisheye_correction_model = net(upscale=4, in_chans=3, img_size=64, window_size=8,
|
13 |
img_range=1., depths=[6, 6, 6, 6, 6, 6, 6, 6, 6], embed_dim=240,
|
|
|
17 |
fisheye_correction_model.load_state_dict(fisheye_correction_pretrained_model[param_key_g] if param_key_g in fisheye_correction_pretrained_model.keys() else fisheye_correction_pretrained_model, strict=True)
|
18 |
fisheye_correction_model.eval()
|
19 |
|
20 |
+
def predict(input_img):
|
21 |
out = None
|
22 |
|
23 |
# preprocess input
|
|
|
29 |
|
30 |
# inference
|
31 |
window_size = 8
|
32 |
+
model = fisheye_correction_model.to(device)
|
|
|
|
|
|
|
33 |
|
34 |
with torch.no_grad():
|
35 |
# pad input image to be a multiple of window_size
|
|
|
78 |
gr.Interface(
|
79 |
fn=predict,
|
80 |
inputs=[
|
81 |
+
gr.inputs.Image()
|
82 |
],
|
83 |
outputs=[
|
84 |
gr.inputs.Image()
|
|
|
86 |
title="SwinIR moon distortion",
|
87 |
description="Description of the app",
|
88 |
examples=[
|
89 |
+
"render0001_DC.png", "render1546_DC.png", "render1682_DC.png"
|
|
|
90 |
]
|
91 |
).launch()
|