2022-09-17 15:14:39 +03:00
|
|
|
import json
|
|
|
|
import os
|
|
|
|
import shutil
|
|
|
|
import time
|
2022-10-09 16:27:10 +03:00
|
|
|
from time import strftime
|
2022-09-17 15:14:39 +03:00
|
|
|
from PIL import Image
|
|
|
|
import torch
|
|
|
|
from torch import autocast
|
|
|
|
import numpy as np
|
|
|
|
import piexif
|
|
|
|
import piexif.helper
|
|
|
|
|
|
|
|
from diffusers import (
|
2022-10-09 16:27:10 +03:00
|
|
|
#DDIMScheduler,
|
|
|
|
PNDMScheduler,
|
|
|
|
StableDiffusionImg2ImgPipeline
|
2022-09-17 15:14:39 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
t = torch.cuda.get_device_properties(0).total_memory
|
2022-10-09 16:27:10 +03:00
|
|
|
if t <= 6400000000:
|
2022-09-24 11:03:40 +03:00
|
|
|
print("Running with less than 6gb memory. Working is not garanted")
|
|
|
|
|
2022-10-09 16:27:10 +03:00
|
|
|
file1 = open("prompt.txt", "r+")
|
2022-09-24 11:03:40 +03:00
|
|
|
text = file1.read()
|
|
|
|
print(text)
|
|
|
|
|
2022-10-09 16:27:10 +03:00
|
|
|
# Generating with stable diffusion
|
2022-09-24 11:03:40 +03:00
|
|
|
device = "cuda"
|
|
|
|
model_path = "CompVis/stable-diffusion-v1-4"
|
|
|
|
|
2022-10-09 16:27:10 +03:00
|
|
|
scheduler = PNDMScheduler.from_config(model_path, subfolder="scheduler")
|
|
|
|
#scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False)
|
2022-09-24 11:03:40 +03:00
|
|
|
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
|
|
|
model_path,
|
|
|
|
scheduler=scheduler,
|
2022-10-09 16:27:10 +03:00
|
|
|
revision="fp16",
|
|
|
|
torch_dtype=torch.float16
|
2022-09-24 11:03:40 +03:00
|
|
|
).to(device)
|
2022-10-09 16:27:10 +03:00
|
|
|
pipe.enable_attention_slicing()
|
|
|
|
|
|
|
|
|
2022-09-24 11:03:40 +03:00
|
|
|
def dummy_checker(images, **kwargs):
|
|
|
|
return images, False
|
2022-10-09 16:27:10 +03:00
|
|
|
|
|
|
|
|
2022-09-24 11:03:40 +03:00
|
|
|
pipe.safety_checker = dummy_checker
|
|
|
|
|
2022-10-09 16:27:10 +03:00
|
|
|
|
2022-09-24 11:03:40 +03:00
|
|
|
def preprocess(image):
|
|
|
|
w, h = image.size
|
2022-10-09 16:27:10 +03:00
|
|
|
# resize to integer multiple of 32
|
|
|
|
w, h = map(lambda x: x - x % 32, (w, h))
|
2022-09-24 11:03:40 +03:00
|
|
|
image = image.resize((w, h), Image.Resampling.LANCZOS)
|
2022-10-09 16:27:10 +03:00
|
|
|
image = np.array(image).astype(np.float32)/255.0
|
2022-09-24 11:03:40 +03:00
|
|
|
image = image[None].transpose(0, 3, 1, 2)
|
|
|
|
image = torch.from_numpy(image)
|
2022-10-09 16:27:10 +03:00
|
|
|
return 2.*image-1.
|
|
|
|
|
2022-09-24 11:03:40 +03:00
|
|
|
|
|
|
|
path_img = []
|
|
|
|
|
|
|
|
startStrength = 0.86
|
|
|
|
deltaStrength = 0.02
|
|
|
|
endStrength = 0.941
|
|
|
|
|
|
|
|
startScale = 7.5
|
|
|
|
deltaScale = 2.5
|
|
|
|
endScale = 15.0
|
|
|
|
|
|
|
|
startSeed = 1022
|
|
|
|
endSeed = 1024
|
|
|
|
|
|
|
|
directory_in = "./data/input"
|
|
|
|
if not os.path.exists(directory_in):
|
|
|
|
os.makedirs(directory_in)
|
|
|
|
for root, subdirectories, files in os.walk(directory_in):
|
|
|
|
for filename in files:
|
|
|
|
if filename.endswith(".png"):
|
2022-10-09 16:27:10 +03:00
|
|
|
path_img.append(os.path.join(root, filename))
|
2022-09-24 11:03:40 +03:00
|
|
|
|
2022-10-09 16:27:10 +03:00
|
|
|
print("Found " + str(len(path_img)) + " pictures")
|
2022-09-24 11:03:40 +03:00
|
|
|
start_time = time.time()
|
|
|
|
|
2022-10-09 16:27:10 +03:00
|
|
|
counterr = 0
|
|
|
|
allwork = 0
|
|
|
|
directory = "./data/out/" + strftime("%Y-%m-%d_%H-%M-%S") + "/"
|
2022-09-24 11:03:40 +03:00
|
|
|
if not os.path.exists(directory):
|
|
|
|
os.makedirs(directory)
|
|
|
|
with open(directory+"prompt.txt", 'w') as f:
|
2022-10-09 16:27:10 +03:00
|
|
|
f.write(text)
|
2022-09-24 11:03:40 +03:00
|
|
|
shutil.copytree(directory_in, directory+"input")
|
|
|
|
for i in path_img:
|
|
|
|
print(i)
|
|
|
|
if not os.path.exists(directory+str(counterr)):
|
|
|
|
os.makedirs(directory+str(counterr))
|
|
|
|
|
|
|
|
init_img = Image.open(i)
|
|
|
|
init_img = init_img.resize((768, 512))
|
|
|
|
init_image = preprocess(init_img)
|
|
|
|
prompt = text
|
|
|
|
|
|
|
|
for seed in range(endSeed-startSeed+1):
|
|
|
|
generator = torch.Generator(device=device).manual_seed(startSeed+seed)
|
|
|
|
guidance_scale = startScale
|
|
|
|
while guidance_scale <= endScale:
|
|
|
|
strenght = startStrength
|
|
|
|
while strenght <= endStrength:
|
|
|
|
with autocast("cuda"):
|
2022-10-09 16:27:10 +03:00
|
|
|
image = pipe(prompt=prompt, init_image=init_image, strength=strenght,
|
2022-10-30 11:51:19 +03:00
|
|
|
guidance_scale=guidance_scale, generator=generator).images[0]
|
2022-10-09 16:27:10 +03:00
|
|
|
image.save(directory+str(counterr)+"/" + str(allwork) + ".jpg")
|
2022-09-24 11:03:40 +03:00
|
|
|
|
2022-10-09 16:27:10 +03:00
|
|
|
exif_dict = piexif.load(
|
|
|
|
directory+str(counterr)+"/" + str(allwork) + ".jpg")
|
2022-09-24 11:03:40 +03:00
|
|
|
userCommentAsDict = {}
|
2022-10-09 16:27:10 +03:00
|
|
|
userCommentAsDict['Seed'] = str(seed+startSeed)
|
|
|
|
userCommentAsDict['Strength'] = str(strenght)
|
|
|
|
userCommentAsDict['Guidance_scale'] = str(guidance_scale)
|
|
|
|
user_comment = piexif.helper.UserComment.dump(
|
|
|
|
json.dumps(userCommentAsDict))
|
2022-09-24 11:03:40 +03:00
|
|
|
exif_dict["Exif"][piexif.ExifIFD.UserComment] = user_comment
|
|
|
|
exif_bytes = piexif.dump(exif_dict)
|
2022-10-09 16:27:10 +03:00
|
|
|
piexif.insert(exif_bytes, directory +
|
|
|
|
str(counterr)+"/" + str(allwork) + ".jpg")
|
|
|
|
|
|
|
|
allwork += 1
|
2022-09-24 11:03:40 +03:00
|
|
|
|
2022-10-09 16:27:10 +03:00
|
|
|
strenght += deltaStrength
|
|
|
|
guidance_scale += deltaScale
|
2022-09-24 11:03:40 +03:00
|
|
|
|
2022-10-09 16:27:10 +03:00
|
|
|
counterr += 1
|
2022-09-17 15:14:39 +03:00
|
|
|
|
2022-10-09 16:27:10 +03:00
|
|
|
print("Made " + str(allwork) + " pictures in " +
|
|
|
|
str(time.time()-start_time) + " seconds")
|