import sys import os import uuid import json import logging import traceback from datetime import datetime as dt import torch from diffusers import DiffusionPipeline # Setup Logging logging.basicConfig( level=logging.DEBUG, # level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s", handlers=[ logging.FileHandler("/var/log/" + str(dt.today().strftime('%Y-%m-%d')) + "_-_cron.log"), logging.StreamHandler(sys.stdout) ] ) try: for root, dirs, files in os.walk('/app/tasks'): for file in files: try: with open(os.path.join(root, file)) as f: d = json.load(f) print(json.dumps(d, indent=4, sort_keys=True)) prompt = d["prompt"] neg_prompt = d["neg_prompt"] style = d["style"] if "iterations" in d: iterations = int(d["iterations"]) else: iterations = 40 if "format" in d: format = d["format"] else: format = "square" match format.lower(): case "portrait": format_width = 915 format_height = 1144 case "widescreen": format_width = 1344 format_height = 768 case "photo": # Photo (4x3) format_width = 1182 format_height = 886 case "square": format_width = 1024 format_height = 1024 # For Debugging decrease image size # scale_factor = 0.70 # Error: Image shredded # scale_factor = 0.68 # Error: Image shredded scale_factor = 0.65 # Working on MacBook Pro (M3 - Nov. 2023) format_width = int(round(round(format_width * scale_factor) / 8) * 8) format_height = int(round(round(format_height * scale_factor) / 8) * 8) with open('/app/styles.json') as f: for entry in json.load(f): if entry['style'] == style: prompt += ", " + entry['prompt_extension'] neg_prompt += "extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs" neg_prompt += entry['negative_prompt'] base = DiffusionPipeline.from_pretrained( "RunDiffusion/Juggernaut-XL-v8" ).to("cpu") refiner = DiffusionPipeline.from_pretrained( "RunDiffusion/Juggernaut-XL-v8", text_encoder_2=base.text_encoder_2, vae=base.vae, ).to("cpu") n_steps = iterations high_noise_frac = 0.8 image = base( prompt=prompt, generator=torch.Generator(device="cpu").manual_seed(torch.Generator(device="cpu").seed()), # generator=torch.Generator(device="cpu").manual_seed(420), negative_prompt=neg_prompt, num_inference_steps=n_steps, denoising_end=high_noise_frac, width=format_width, height=format_height, original_size=(format_width, format_height), target_size=(format_width, format_height), output_type="latent", ).images image = refiner( prompt=prompt, generator=torch.Generator(device="cpu").manual_seed(torch.Generator(device="cpu").seed()), # generator=torch.Generator(device="cpu").manual_seed(420), negative_prompt=neg_prompt, num_inference_steps=n_steps, denoising_start=high_noise_frac, width=format_width, height=format_height, original_size=(format_width, format_height), target_size=(format_width, format_height), image=image, ).images[0] img_uuid = str(uuid.uuid4()) file_name = '/app/images/' + img_uuid + '.png' image.save(file_name) logging.debug("Image generated and written to file: " + file_name) sys.exit(0) except Exception as e: logging.debug("There was an error: " + str(e)) logging.debug("Stacktrace: " + str(traceback.format_exc())) logging.debug("Skipping iteration!!!") continue except Exception as e: logging.debug("There was an error: " + str(e)) logging.debug("Stacktrace: " + str(traceback.format_exc())) finally: exit(0)