updated
Browse files- app.py +11 -9
- models/controlnet.py +2 -2
- utils/tools.py +5 -0
app.py
CHANGED
@@ -18,16 +18,16 @@ DEFAULT_NEGATIVE_PROMPT = "worst quality, abstract, clumsy pose, deformed hand,
|
|
18 |
|
19 |
def ui():
|
20 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
21 |
-
# model_file = hf_hub_download(
|
22 |
-
# repo_id='Lykon/AAM_XL_AnimeMix',
|
23 |
-
# filename='AAM_XL_Anime_Mix.safetensors',
|
24 |
-
# cache_dir=CACHE_DIR,
|
25 |
-
# )
|
26 |
model_file = hf_hub_download(
|
27 |
-
repo_id='
|
28 |
-
filename='
|
29 |
cache_dir=CACHE_DIR,
|
30 |
)
|
|
|
|
|
|
|
|
|
|
|
31 |
unet_file = hf_hub_download(
|
32 |
repo_id=REPO_ID,
|
33 |
filename=UNET_FILENAME,
|
@@ -63,7 +63,9 @@ def ui():
|
|
63 |
|
64 |
with gr.Blocks(css=css) as demo:
|
65 |
gr.Markdown(f"""
|
66 |
-
#
|
|
|
|
|
67 |
""")
|
68 |
with gr.Row():
|
69 |
with gr.Column(scale=9):
|
@@ -180,4 +182,4 @@ def ui():
|
|
180 |
|
181 |
if __name__ == '__main__':
|
182 |
demo = ui()
|
183 |
-
demo.queue().launch()
|
|
|
18 |
|
19 |
def ui():
|
20 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
|
|
|
|
|
|
|
21 |
model_file = hf_hub_download(
|
22 |
+
repo_id='Lykon/AAM_XL_AnimeMix',
|
23 |
+
filename='AAM_XL_Anime_Mix.safetensors',
|
24 |
cache_dir=CACHE_DIR,
|
25 |
)
|
26 |
+
# model_file = hf_hub_download(
|
27 |
+
# repo_id='Eugeoter/artiwaifu-diffusion-1.0',
|
28 |
+
# filename='artiwaifu-diffusion-v1.safetensors',
|
29 |
+
# cache_dir=CACHE_DIR,
|
30 |
+
# )
|
31 |
unet_file = hf_hub_download(
|
32 |
repo_id=REPO_ID,
|
33 |
filename=UNET_FILENAME,
|
|
|
63 |
|
64 |
with gr.Blocks(css=css) as demo:
|
65 |
gr.Markdown(f"""
|
66 |
+
# ControlNeXt-SDXL Demo
|
67 |
+
The Gradio has bug currently and is just for demo.
|
68 |
+
More better results please refer to the [official project page](https://github.com/dvlab-research/ControlNeXt).
|
69 |
""")
|
70 |
with gr.Row():
|
71 |
with gr.Column(scale=9):
|
|
|
182 |
|
183 |
if __name__ == '__main__':
|
184 |
demo = ui()
|
185 |
+
demo.queue().launch(server_port=6006)
|
models/controlnet.py
CHANGED
@@ -353,10 +353,10 @@ class ControlNetModel(ModelMixin, ConfigMixin):
|
|
353 |
nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1),
|
354 |
nn.GroupNorm(2, 64),
|
355 |
nn.ReLU(),
|
356 |
-
nn.Conv2d(64, 64, kernel_size=3
|
357 |
nn.GroupNorm(2, 64),
|
358 |
nn.ReLU(),
|
359 |
-
nn.Conv2d(64, 128, kernel_size=3
|
360 |
nn.GroupNorm(2, 128),
|
361 |
nn.ReLU(),
|
362 |
)
|
|
|
353 |
nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1),
|
354 |
nn.GroupNorm(2, 64),
|
355 |
nn.ReLU(),
|
356 |
+
nn.Conv2d(64, 64, kernel_size=3),
|
357 |
nn.GroupNorm(2, 64),
|
358 |
nn.ReLU(),
|
359 |
+
nn.Conv2d(64, 128, kernel_size=3),
|
360 |
nn.GroupNorm(2, 128),
|
361 |
nn.ReLU(),
|
362 |
)
|
utils/tools.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import os
|
2 |
import torch
|
|
|
3 |
from torch import nn
|
4 |
from diffusers import UniPCMultistepScheduler, AutoencoderKL
|
5 |
from safetensors.torch import load_file
|
@@ -119,6 +120,10 @@ def get_pipeline(
|
|
119 |
if enable_xformers_memory_efficient_attention:
|
120 |
pipeline.enable_xformers_memory_efficient_attention()
|
121 |
|
|
|
|
|
|
|
|
|
122 |
return pipeline
|
123 |
|
124 |
|
|
|
1 |
import os
|
2 |
import torch
|
3 |
+
import gc
|
4 |
from torch import nn
|
5 |
from diffusers import UniPCMultistepScheduler, AutoencoderKL
|
6 |
from safetensors.torch import load_file
|
|
|
120 |
if enable_xformers_memory_efficient_attention:
|
121 |
pipeline.enable_xformers_memory_efficient_attention()
|
122 |
|
123 |
+
gc.collect()
|
124 |
+
if torch.cuda.is_available():
|
125 |
+
torch.cuda.empty_cache()
|
126 |
+
|
127 |
return pipeline
|
128 |
|
129 |
|