INIT
This commit is contained in:
commit
e4b4f364df
10 changed files with 1379 additions and 0 deletions
5
.gitignore
vendored
Normal file
5
.gitignore
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
input/image_out/*.png
|
||||
input/image_out/*.jpg
|
||||
input/assets/*/*
|
||||
|
||||
*.DS_Store
|
||||
38
Dockerfile
Normal file
38
Dockerfile
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
################################################################
|
||||
#
|
||||
# Project: TTI Function
|
||||
#
|
||||
# podman build -t tti-function .
|
||||
# podman run -v /<path_to_folder>/transcript_in/:/app/input/ -v /<path_to_folder>/transcript_out/:/app/images/ --name tti-function_container --rm -t tti-function
|
||||
#
|
||||
################################################################
|
||||
|
||||
FROM python:3.11-slim
|
||||
|
||||
# set the working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Create image folder
|
||||
RUN mkdir /app/images
|
||||
RUN mkdir /app/input
|
||||
RUN mkdir /app/tasks
|
||||
|
||||
RUN mkdir -p /root/.cache/huggingface/hub/models--RunDiffusion--Juggernaut-XL-v8/snapshots/9022a900377ce2d3303d3e6d86b09f6874e1e2a7/text_encoder/
|
||||
RUN mkdir -p /root/.cache/huggingface/hub/models--RunDiffusion--Juggernaut-XL-v8/snapshots/9022a900377ce2d3303d3e6d86b09f6874e1e2a7/text_encoder_2/
|
||||
RUN mkdir -p /root/.cache/huggingface/hub/models--RunDiffusion--Juggernaut-XL-v8/snapshots/9022a900377ce2d3303d3e6d86b09f6874e1e2a7/vae/
|
||||
RUN mkdir -p /root/.cache/huggingface/hub/models--RunDiffusion--Juggernaut-XL-v8/snapshots/9022a900377ce2d3303d3e6d86b09f6874e1e2a7/unet/
|
||||
|
||||
# Update Packagelist
|
||||
RUN apt-get update
|
||||
|
||||
# install dependencies
|
||||
COPY ./requirements.txt /app
|
||||
RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host files.pythonhosted.org --no-cache-dir --upgrade -r requirements.txt
|
||||
|
||||
# copy files to folder
|
||||
COPY ./runner.py /app
|
||||
COPY ./init.sh /app
|
||||
RUN chmod +x /app/init.sh
|
||||
COPY ./styles.json /app
|
||||
|
||||
CMD ["bash", "init.sh"]
|
||||
18
README.md
Normal file
18
README.md
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# TTI-Function
|
||||
With the Text-to-Image (TTI) Function you can create images from a text prompt.
|
||||
Internally the Juggernaut-XL-v8 Modell (https://huggingface.co/RunDiffusion/Juggernaut-XL-v8) is used to generate the image.
|
||||
|
||||
|
||||
## Structure
|
||||
* The container has two folders attached, the input and output folder.
|
||||
The input folder holds .json-files that contain the context of the images, that should be created.
|
||||
The output folder is the location, where the generated images will be stored.
|
||||
|
||||
|
||||
## Setup
|
||||
Make sure [Podman](https://podman.io/docs/installation) or [Docker](https://docs.docker.com/get-docker/) is installed.
|
||||
|
||||
```
|
||||
./build_image.sh
|
||||
podman run -v /path/to/your/input/folder/:/app/input/ -v /path/to/your/output/folder/:/app/images/ --name tti-function_container --rm -t tti-function
|
||||
```
|
||||
19
build_image.sh
Executable file
19
build_image.sh
Executable file
|
|
@ -0,0 +1,19 @@
|
|||
#!/bin/bash
|
||||
|
||||
# 319MB ???
|
||||
mkdir -p ./input/assets/vae/
|
||||
wget -nc "https://huggingface.co/RunDiffusion/Juggernaut-X-v10/resolve/main/vae/diffusion_pytorch_model.bin" -O ./input/assets/vae/diffusion_pytorch_model.bin
|
||||
|
||||
# Large (9,6GB) Modell?!?
|
||||
mkdir -p ./input/assets/unet/
|
||||
wget -nc "https://huggingface.co/RunDiffusion/Juggernaut-X-v10/resolve/main/unet/diffusion_pytorch_model.bin" -O ./input/assets/unet/diffusion_pytorch_model.bin
|
||||
|
||||
# 2,78GB
|
||||
mkdir -p ./input/assets/text_encoder_2/
|
||||
wget -nc "https://huggingface.co/RunDiffusion/Juggernaut-X-v10/resolve/main/text_encoder_2/pytorch_model.bin" -O ./input/assets/text_encoder_2/pytorch_model.bin
|
||||
|
||||
# 492MB
|
||||
mkdir -p ./input/assets/text_encoder/
|
||||
wget -nc "https://huggingface.co/RunDiffusion/Juggernaut-X-v10/resolve/main/text_encoder/pytorch_model.bin" -O ./input/assets/text_encoder/pytorch_model.bin
|
||||
|
||||
podman build -t tti-function .
|
||||
0
images/.gitkeep
Normal file
0
images/.gitkeep
Normal file
16
init.sh
Normal file
16
init.sh
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
#!/bin/bash
|
||||
env >> /etc/environment
|
||||
|
||||
# Copy Models/Artefacts to needed locations
|
||||
cp /app/input/assets/unet/diffusion_pytorch_model.bin /root/.cache/huggingface/hub/models--RunDiffusion--Juggernaut-XL-v8/snapshots/9022a900377ce2d3303d3e6d86b09f6874e1e2a7/unet/diffusion_pytorch_model.bin
|
||||
cp /app/input/assets/vae/diffusion_pytorch_model.bin /root/.cache/huggingface/hub/models--RunDiffusion--Juggernaut-XL-v8/snapshots/9022a900377ce2d3303d3e6d86b09f6874e1e2a7/vae/diffusion_pytorch_model.bin
|
||||
|
||||
cp /app/input/assets/text_encoder_2/pytorch_model.bin /root/.cache/huggingface/hub/models--RunDiffusion--Juggernaut-XL-v8/snapshots/9022a900377ce2d3303d3e6d86b09f6874e1e2a7/text_encoder_2/pytorch_model.bin
|
||||
cp /app/input/assets/text_encoder/pytorch_model.bin /root/.cache/huggingface/hub/models--RunDiffusion--Juggernaut-XL-v8/snapshots/9022a900377ce2d3303d3e6d86b09f6874e1e2a7/text_encoder/pytorch_model.bin
|
||||
|
||||
# Run Application
|
||||
for file in /app/input/file_queues/*.json; do
|
||||
cp $file /app/tasks/current_task.json
|
||||
/usr/local/bin/python /app/runner.py
|
||||
rm /app/tasks/current_task.json
|
||||
done
|
||||
6
input/file_queues/sample.json
Normal file
6
input/file_queues/sample.json
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
"prompt": "Portrait of a person in business clothes, Looking at the camera, Smiling, Centered, Symmetric, Office Background, looking engaged, dynamic, casual chic",
|
||||
"neg_prompt": "",
|
||||
"style": "Fooocus Photograph",
|
||||
"aspect_ratio": "square"
|
||||
}
|
||||
4
requirements.txt
Normal file
4
requirements.txt
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
diffusers
|
||||
transformers
|
||||
accelerate
|
||||
torch
|
||||
131
runner.py
Normal file
131
runner.py
Normal file
|
|
@ -0,0 +1,131 @@
|
|||
import sys
|
||||
import os
|
||||
import uuid
|
||||
import json
|
||||
import logging
|
||||
import traceback
|
||||
from datetime import datetime as dt
|
||||
|
||||
import torch
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
# Setup Logging
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG,
|
||||
# level=logging.INFO,
|
||||
format="%(asctime)s [%(levelname)s] %(message)s",
|
||||
handlers=[
|
||||
logging.FileHandler("/var/log/" + str(dt.today().strftime('%Y-%m-%d')) + "_-_cron.log"),
|
||||
logging.StreamHandler(sys.stdout)
|
||||
]
|
||||
)
|
||||
|
||||
try:
|
||||
for root, dirs, files in os.walk('/app/tasks'):
|
||||
for file in files:
|
||||
try:
|
||||
with open(os.path.join(root, file)) as f:
|
||||
d = json.load(f)
|
||||
print(json.dumps(d, indent=4, sort_keys=True))
|
||||
|
||||
prompt = d["prompt"]
|
||||
neg_prompt = d["neg_prompt"]
|
||||
style = d["style"]
|
||||
|
||||
if "iterations" in d:
|
||||
iterations = int(d["iterations"])
|
||||
else:
|
||||
iterations = 40
|
||||
|
||||
if "format" in d:
|
||||
format = d["format"]
|
||||
else:
|
||||
format = "square"
|
||||
|
||||
match format.lower():
|
||||
case "portrait":
|
||||
format_width = 915
|
||||
format_height = 1144
|
||||
case "widescreen":
|
||||
format_width = 1344
|
||||
format_height = 768
|
||||
case "photo": # Photo (4x3)
|
||||
format_width = 1182
|
||||
format_height = 886
|
||||
case "square":
|
||||
format_width = 1024
|
||||
format_height = 1024
|
||||
|
||||
# For Debugging decrease image size
|
||||
# scale_factor = 0.70 # Error: Image shredded
|
||||
# scale_factor = 0.68 # Error: Image shredded
|
||||
scale_factor = 0.65 # Working on MacBook Pro (M3 - Nov. 2023)
|
||||
|
||||
format_width = int(round(round(format_width * scale_factor) / 8) * 8)
|
||||
format_height = int(round(round(format_height * scale_factor) / 8) * 8)
|
||||
|
||||
with open('/app/styles.json') as f:
|
||||
for entry in json.load(f):
|
||||
if entry['style'] == style:
|
||||
prompt += ", " + entry['prompt_extension']
|
||||
neg_prompt += "extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs"
|
||||
neg_prompt += entry['negative_prompt']
|
||||
|
||||
base = DiffusionPipeline.from_pretrained(
|
||||
"RunDiffusion/Juggernaut-XL-v8"
|
||||
).to("cpu")
|
||||
refiner = DiffusionPipeline.from_pretrained(
|
||||
"RunDiffusion/Juggernaut-XL-v8",
|
||||
text_encoder_2=base.text_encoder_2,
|
||||
vae=base.vae,
|
||||
).to("cpu")
|
||||
|
||||
n_steps = iterations
|
||||
high_noise_frac = 0.8
|
||||
|
||||
image = base(
|
||||
prompt=prompt,
|
||||
generator=torch.Generator(device="cpu").manual_seed(torch.Generator(device="cpu").seed()),
|
||||
# generator=torch.Generator(device="cpu").manual_seed(420),
|
||||
negative_prompt=neg_prompt,
|
||||
num_inference_steps=n_steps,
|
||||
denoising_end=high_noise_frac,
|
||||
width=format_width,
|
||||
height=format_height,
|
||||
original_size=(format_width, format_height),
|
||||
target_size=(format_width, format_height),
|
||||
output_type="latent",
|
||||
).images
|
||||
image = refiner(
|
||||
prompt=prompt,
|
||||
generator=torch.Generator(device="cpu").manual_seed(torch.Generator(device="cpu").seed()),
|
||||
# generator=torch.Generator(device="cpu").manual_seed(420),
|
||||
negative_prompt=neg_prompt,
|
||||
num_inference_steps=n_steps,
|
||||
denoising_start=high_noise_frac,
|
||||
width=format_width,
|
||||
height=format_height,
|
||||
original_size=(format_width, format_height),
|
||||
target_size=(format_width, format_height),
|
||||
image=image,
|
||||
).images[0]
|
||||
|
||||
img_uuid = str(uuid.uuid4())
|
||||
file_name = '/app/images/' + img_uuid + '.png'
|
||||
image.save(file_name)
|
||||
|
||||
logging.debug("Image generated and written to file: " + file_name)
|
||||
sys.exit(0)
|
||||
|
||||
except Exception as e:
|
||||
logging.debug("There was an error: " + str(e))
|
||||
logging.debug("Stacktrace: " + str(traceback.format_exc()))
|
||||
logging.debug("Skipping iteration!!!")
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
logging.debug("There was an error: " + str(e))
|
||||
logging.debug("Stacktrace: " + str(traceback.format_exc()))
|
||||
|
||||
finally:
|
||||
exit(0)
|
||||
1142
styles.json
Normal file
1142
styles.json
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue