Skip to content
Snippets Groups Projects
Commit 2f7e1bb7 authored by Hyunseok_Sang's avatar Hyunseok_Sang
Browse files

객체지향 적용을 시도했습니다.

- 코드 백업을 위한 임시적인 커밋입니다.
parent f792b862
Branches
No related tags found
No related merge requests found
Showing
with 784 additions and 2 deletions
sources/
.env
back-end/clothes-detect-test-0cf2de14676d.json
back-end/*.onnx
\ No newline at end of file
*.onnx
google_cloud_vision_key.json
\ No newline at end of file
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"image_url": {
"type": "string",
"description": "광고 배너에 삽입될 이미지의 원본 주소",
"examples": [
"https://이미지주소"
]
},
"body_parts": {
"type": "array",
"items": {
"type": "string"
},
"description": "(face만 구현, 나머지 미구현)포함하기를 원하는 신체 부위. mediaPipe에서 지원하는 33가지 신체 부위와, 얼굴 전체에 해당하는 face를 포함할수 있습니다. 기본값은 blank",
"examples": [
["face", "Right_ankle"],
["face"],
["Left_knee", "Right_knee"]
]
},
"margins": {
"type": "object",
"description": "bounding box와 실제 crop 영역 간의 간격",
"properties": {
"item_margin": {
"type": "string",
"pattern": "^[0-9]+(px|%)$",
"description": "탐지된 아이템의 bounding box와 실제 crop 영역 간의 간격. px형태와 %형태로 요청 가능. 기본값 0%",
"examples": [
"50px",
"10%"
]
},
"face_margin": {
"type": "string",
"pattern": "^[0-9]+(px|%)$",
"description": "탐지된 얼굴의 bounding box와 실제 crop 영역 간의 간격. px형태와 %형태로 요청 가능. 기본값 10%",
"examples": [
"50px",
"10%"
]
},
"whole_margin": {
"type": "string",
"pattern": "^[0-9]+(px|%)$",
"description": "의류 아이템과 선택한 신체 부위를 모두 포함하는 사각형과 실제 crop 영역 간의 간격. px형태와 %형태로 요청 가능. 기본값 15%",
"examples": [
"50px",
"10%"
]
}
}
},
"aspect_ratio": {
"type": ["object", "string"],
"description": "원하는 너비, 높이 비율을 입력하거나, vertical, horizontal 등을 입력.",
"properties": {
"mode": {
"oneOf": [
{
"type": "object",
"properties": {
"width": {
"type": "number"
},
"height": {
"type": "number"
}
}
},
{
"type": "string",
"enum": ["vertical", "horizontal"]
}
]
}
},
"examples": [
{
"width": 16,
"height": 9
},
"vertical"
]
},
"subject_location": {
"type": "string",
"enum": ["left", "right", "top", "bottom", "center"],
"description": "의상 아이템과 선택한 신체부위를 포함한 최소 영역이 배치될 곳."
},
"return_type": {
"type": "string",
"enum": ["pixel", "px", "norm", "noramalized"],
"description": "원하는 픽셀 응답 형식을 선택. pixel일 경우 ((좌, 상),(우, 하)) 형태로 픽셀 출력. norm일 경우 이를 [0,1]로 정규화해서 출력"
},
"retunr_image": {
"type": "boolean",
"description": "true일 경우 이미지를 리턴, false일 경우 좌표만 리턴. 기본값은 false"
}
},
"required": ["image_url"],
"additionalProperties": true,
"description": "원하는 crop된 이미지의 정보를 담은 request body."
}
back-end/oop/1679752285000-PtrdXn.jpg

53.4 KiB

File added
{
"seller": "ssfshop",
"brand": "beanpole",
"product_list": [
{
"name": "남녀공용 베이직 피케 티셔츠 - 블랙",
"url": "https://img.ssfshop.com/cmd/LB_750x1000/src/https://img.ssfshop.com/goods/BPBR/23/02/15/GM0023021581678_0_ORGINL_20230327140102442.jpg",
"type_of_clothes": "top"
}
]
}
\ No newline at end of file
File moved
import sys
import json
import temp
with open(sys.argv[1], 'r') as file:
shoppingmall_data = json.load(file)
seller_brand_path = shoppingmall_data["seller"]+"/"+shoppingmall_data["brand"]
imageDownloder = temp.ImageDownloader()
imageFactory = temp.ImageFactory()
imageProcessor = temp.ImageProcessor()
faceDetector = temp.FaceDetector()
clothesDetector = temp.ClothesDetector()
databaseManager = temp.DatabaseManager()
for product in shoppingmall_data["product_list"]:
save_path = seller_brand_path+"/"+product["name"]
image_path = imageDownloder.download_image(product["url"], save_path, "original.jpg")
image = imageFactory.create_image(image_path, None)
result_image = imageProcessor.process(image)
faceData = faceDetector.detectFace(image)
face = faceData.getFace(1)
clothesDataList = clothesDetector.localize_objects(image)
clothes = clothesDataList.getClothes(product["type_of_clothes"])
clothes.denormalizeByImageSize(image)
databaseManager.insertToClothesDataCollection(image, result_image, face, clothes)
import requests
import cv2
import numpy as np
import onnxruntime
import os
import subprocess
import temp
class ImageDownloader:
def download_image(self, url, folder_path, file_name):
response = requests.get(url)
if response.status_code == 200:
os.makedirs(folder_path, exist_ok=True)
image_path = folder_path+"/"+file_name
with open(image_path, 'wb') as file:
file.write(response.content)
return image_path
class Image:
def __init__(self, filepath, cv_image):
self.filepath = filepath
if cv_image is not None:
self.cv_image = cv_image
else:
self.cv_image = cv2.imread(filepath)
self.height = self.cv_image.shape[0]
self.width = self.cv_image.shape[1]
class ImageFactory:
def create_image(self,filepath, cv_image):
image = Image(filepath, cv_image)
return image
class Mask(Image):
def __init__(self, filepath, cv_image):
super().__init__(filepath, cv_image)
smallest_box = self.calculate_smallest_box(self.cv_image)
self.smallest_box_x = smallest_box[0]
self.smallest_box_y = smallest_box[1]
self.smallest_box_width = smallest_box[2]
self.smallest_box_height = smallest_box[3]
def calculate_smallest_box(self, mask):
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# 모든 객체의 윤곽선을 하나의 리스트로 병합
all_contours = np.concatenate(contours)
# 윤곽선을 감싸는 최소 사각형 좌표 계산
object_x, object_y, object_w, object_h = cv2.boundingRect(all_contours)
return (object_x, object_y, object_w, object_h)
class MaskGenerator:
def create_mask(self, image):
model = onnxruntime.InferenceSession('unet.onnx')
mask = cv2.resize(image.cv_image, (320, 320))
mask = mask.transpose((2, 0, 1)) # 채널 순서 변경
mask = mask.astype(np.float32) / 255.0 # 정규화
mask = np.expand_dims(mask, axis=0) # 배치 차원 추가
# 모델 추론
input_name = model.get_inputs()[0].name
output_name = model.get_outputs()[0].name
mask = model.run([output_name], {input_name: mask})[0]
# 후처리
mask = mask[0, 0, :, :] # 배치와 채널 차원 제거
mask = cv2.resize(mask, (image.width, image.height)) # 원래 크기로 복원. 이 마스크는 확장 영역을 선택할 때 쓰임.
mask = (mask > 0.5).astype(np.uint8) * 255 # 이진화
cv2.imwrite('mask.png', mask)
mask_obj = Mask(None, mask)
return mask_obj
class PaddedImage(Image):
def __init__(self, filepath, cv_image, offset, ratio):
super().__init__(filepath, cv_image)
self.x_offset = offset[0]
self.y_offset = offset[1]
self.ratio = ratio
class PaddedImageFactory:
def create_padded_image(self, image, mask, ratio):
#객체의 긴 길이 구하기
if mask.smallest_box_width > mask.smallest_box_height:
longer = mask.smallest_box_width
else:
longer = mask.smallest_box_height
transparent_bordered_image = cv2.cvtColor(image.cv_image, cv2.COLOR_RGB2RGBA)
#인물의 객체의 긴 길이가 55%, 1024*1024에서 약 55%(550픽셀)을 차지하도록 설정
new_width, new_height = int(image.width*ratio*10/longer), int(image.height*ratio*10/longer)
transparent_bordered_image = cv2.resize(transparent_bordered_image, (new_width, new_height))
transparent_image = np.zeros((1024, 1024, 4), dtype=np.uint8)
x_offset = int((transparent_image.shape[1] - transparent_bordered_image.shape[1]) / 2)
y_offset = int((transparent_image.shape[0] - transparent_bordered_image.shape[0]) / 2)
transparent_image[y_offset:y_offset + transparent_bordered_image.shape[0], x_offset:x_offset + transparent_bordered_image.shape[1]] = transparent_bordered_image
transparent_bordered_image = transparent_image
padded_image = PaddedImage(None, transparent_bordered_image, (x_offset, y_offset), ratio)
return padded_image
class DallEImage(PaddedImage):
def __init__(self, filepath, cv_image, offset, ratio):
super().__init__(filepath, cv_image, offset, ratio)
class DallEImageGenerator:
def createDallEImage(self, imageDownloader, padded_image, image_path):
if not isinstance(padded_image, PaddedImage):
raise ValueError("Invalid input. Only objects of class PaddedImage are allowed.")
from dotenv import load_dotenv
import openai
load_dotenv()
openai.organization = os.getenv("ORG_ID")
openai.api_key = os.getenv("API_KEY")
cv2.imwrite("temp.png", padded_image.cv_image)
outpainted = openai.Image.create_edit(
image = open("temp.png", "rb"),
prompt="photo of person",
n=1,
size="1024x1024"
)
image_url = outpainted.data[0]['url']
folder_path = os.path.dirname(image_path)
dallE_image_path = imageDownloader.download_image(image_url, folder_path, "DallE.png" )
dallE_CV_image = cv2.imread(dallE_image_path)
dallE_obj = DallEImage(dallE_image_path, dallE_CV_image, (padded_image.x_offset,padded_image.y_offset), padded_image.ratio)
return dallE_obj
class FeatheredImage(Image):
def __init__(self, filepath, cv_image):
super().__init__(filepath, cv_image)
class FeatheredImageFactory():
def applyFeather(self, image):
feather = cv2.cvtColor(image.cv_image, cv2.COLOR_BGR2BGRA)
if image.height >= image.width:
border_size = int(0.05 * image.height)
else:
border_size = int(0.05 * image.width)
for i in range(border_size):
feather[ i,:, 3] = int(255* i/border_size)
feather[-i,:, 3] = int(255* i/border_size)
feather[:, i, 3] = int(255* i/border_size)
feather[:,-i, 3] = int(255* i/border_size)
feather[:border_size , :border_size, 3] = 0
feather[:border_size , -border_size:, 3] = 0
feather[-border_size:, -border_size:, 3] = 0
feather[-border_size:, :border_size, 3] = 0
for radius in range(0, border_size):
for angle in range(0, 90 + 1):
radian = np.deg2rad(angle)
x = int(image.width- border_size + radius * np.cos(radian))
y = int(image.height - border_size + radius * np.sin(radian))
feather[y, x][3] = int(255 - 255* radius/border_size)
for angle in range(90, 180 + 1):
radian = np.deg2rad(angle)
x = int(border_size + radius * np.cos(radian))
y = int(image.height- border_size + radius * np.sin(radian))
feather[y, x][3] = int(255 - 255* radius/border_size)
for angle in range(180, 270 + 1):
radian = np.deg2rad(angle)
x = int(border_size + radius * np.cos(radian))
y = int(border_size + radius * np.sin(radian))
feather[y, x][3] = int(255-255* radius/border_size)
for angle in range(270, 360 + 1):
radian = np.deg2rad(angle)
x = int(image.width - border_size + radius * np.cos(radian))
y = int(border_size + radius * np.sin(radian))
feather[y, x][3] = int(255 - 255* radius/border_size)
feathered_image_obj = FeatheredImage(None, feather)
return feathered_image_obj
class AlphaCompositer:
def alphaCompositingWithResizing(self, image, dallE_image):
inner_width = dallE_image.width - 2 * dallE_image.x_offset
inner_height = dallE_image.height - 2 * dallE_image.y_offset
cv2.imwrite("temp_fearthered.png", image.cv_image)
new_length = int(image.width / inner_width * dallE_image.width)
result = cv2.resize(dallE_image.cv_image, (new_length, new_length))
cv2.imwrite("temp_alpha.png", result)
x_offset = int((new_length - image.width) / 2)
y_offset = int((new_length - image.height) / 2)
print(x_offset, y_offset)
import platform
system = platform.system()
if system == 'Linux':
magick_command = "./magick.appimage"
elif system == 'Windows':
magick_command = "magick"
subprocess.run([magick_command,"composite", "-geometry", "+" + str(x_offset) + "+" +str(y_offset), "temp_fearthered.png", "temp_alpha.png", "temp_alpha.png"])
cv_image_composition = cv2.imread("temp_alpha.png")
AlphaComposited_obj = PaddedImage("temp_alpha.png", cv_image_composition, (x_offset, y_offset), dallE_image.ratio)
return AlphaComposited_obj
class SystemChecker:
def __init__(self):
import platform
self.system = platform.system()
def returnMagickCommand(self):
if self.system == 'Linux':
magick_command = "./magick.appimage"
elif self.system == 'Windows':
magick_command = "magick"
return magick_command
class ImageChopper:
def chopInvadingBorderUsingMask(self, paddedImage, mask, image_path):
print(paddedImage.x_offset,paddedImage.y_offset, mask.smallest_box_x, mask.smallest_box_y, mask.smallest_box_width, mask.smallest_box_height)
object_move_x = paddedImage.x_offset
object_move_y = paddedImage.y_offset
magick_command = SystemChecker.returnMagickCommand
if mask.smallest_box_x == 0:
subprocess.run([magick_command,"convert", "temp_alpha.png", "-gravity", "west", "-chop", (str(paddedImage.x_offset)+"x"+"0"), "temp_alpha.png"])
object_move_x = 0
if mask.smallest_box_y == 0:
subprocess.run([magick_command,"convert", "temp_alpha.png", "-gravity", "north", "-chop", ("0"+"x"+str(paddedImage.y_offset)), "temp_alpha.png"])
object_move_y = 0
if mask.smallest_box_x + mask.smallest_box_width + 10 >= mask.width:
subprocess.run([magick_command,"convert", "temp_alpha.png", "-gravity", "east", "-chop", (str(paddedImage.x_offset)+"x"+"0"), "temp_alpha.png"])
if mask.smallest_box_y + mask.smallest_box_height + 10 >= mask.height:
subprocess.run([magick_command,"convert", "temp_alpha.png", "-gravity", "south", "-chop", ("0"+"x"+str(paddedImage.y_offset)), "temp_alpha.png"])
folder_path = os.path.dirname(image_path)
file_path = folder_path + "/result.png"
os.rename("temp_alpha.png", file_path)
result_cv_image = cv2.imread(file_path)
result = PaddedImage(file_path, result_cv_image, (object_move_x, object_move_y), None)
return result
class Face:
def __init__(self, left, top, right, bottom) -> None:
self.left = left
self.top = top
self.right = right
self.bottom = bottom
class FaceDataList:
def __init__(self, face_dict_data) -> None:
self.data = face_dict_data
def getFace(self, face_number):
if type(self.data) is tuple:
return (None, None)
face_location = self.data["face_"+str(face_number)]["facial_area"]
return Face(face_location[0], face_location[1], face_location[2], face_location[3])
class FaceDetector:
def detectFace(self,image):
from retinaface import RetinaFace
faceDataList = FaceDataList(RetinaFace.detect_faces(image.filepath))
return faceDataList
class BoxDrower:
def drawBox(self, image, left_top, right_bottom):
if left_top == None:
print("there is no face")
return
face_bounding_box = image.cv_image.copy()
cv2.rectangle(face_bounding_box, left_top, right_bottom, (0, 255, 0), 2)
cv2.imwrite(os.path.dirname(image.filepath)+"/"+'face_bounding_box.jpg', face_bounding_box)
class ImageProcessor:
def __init__(self):
self.maskGenerator = temp.MaskGenerator()
self.paddedImageFactory = temp.PaddedImageFactory()
self.dallEImageGenerator= temp.DallEImageGenerator()
self.featheredImageFactory = temp.FeatheredImageFactory()
self.alphaCompositer = temp.AlphaCompositer()
self.imageChopper = temp.ImageChopper()
self.imageDownloder = temp.ImageDownloader()
def process(self, image):
mask = self.maskGenerator.create_mask(image)
padded_image = self.paddedImageFactory.create_padded_image(image, mask, 55)
dallE_image = self.dallEImageGenerator.createDallEImage(self.imageDownloder, padded_image, image.filepath)
feathered_image = self.featheredImageFactory.applyFeather(image)
resized_outpainted_image = self.alphaCompositer.alphaCompositingWithResizing(feathered_image, dallE_image)
result_image = self.imageChopper.chopInvadingBorderUsingMask(resized_outpainted_image, mask, image.filepath)
return result_image
class ClothesDetector:
def localize_objects(self, image):
from google.cloud import vision
client = vision.ImageAnnotatorClient()
with open(image.filepath, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
objects = client.object_localization(
image=image).localized_object_annotations
return ClothesObjectsList(objects)
class Clothes:
def __init__(self, type_of_clothes, left, top, right, bottom) -> None:
self.type_of_clothes = type_of_clothes
self.normalized_left = left
self.normalized_top = top
self.normalized_right = right
self.normalized_bottom = bottom
self.left = None
self.top = None
self.right = None
self.bottom = None
def denormalizeByImageSize(self, image):
self.left = int(self.normalized_left * image.width)
self.top = int(self.normalized_top * image.height)
self.right = int(self.normalized_right * image.width)
self.bottom = int(self.normalized_bottom * image.height)
class ClothesObjectsList:
def __init__(self, objects) -> None:
self.objects = objects
def getClothes(self, type_of_clothes):
for object_ in self.objects:
if object_.name == type_of_clothes:
break
clothes_data_normalized_vertices = object_.bounding_poly.normalized_vertices
normalized_clothes_left = clothes_data_normalized_vertices[0].x
normalized_clothes_top = clothes_data_normalized_vertices[0].y
normalized_clothes_right = clothes_data_normalized_vertices[2].x
normalized_clothes_bottom = clothes_data_normalized_vertices[2].y
return Clothes(type_of_clothes, normalized_clothes_left,normalized_clothes_top, normalized_clothes_right, normalized_clothes_bottom)
class DatabaseManager:
def __init__(self):
from pymongo import MongoClient
self.client = MongoClient('mongodb://localhost:27017/')
def insertToClothesDataCollection(self, orignalImage, finalImage, face, clothes):
db = self.client['fashionImageTest']
collection = db['clothesData']
inserted_data = {
"original_image_path": orignalImage.filepath,
"dalle_image_path": finalImage.filepath,
"dalle_image_width": finalImage.width,
"dalle_image_height": finalImage.height,
"x_offset": finalImage.x_offset,
"y_offset": finalImage.y_offset,
"face": {
"left_line_pixel": face.left + finalImage.x_offset,
"right_line_pixel": face.right + finalImage.x_offset,
"top_line_pixel": face.top + finalImage.y_offset,
"bottom_line_pixel": face.bottop + finalImage.y_offset,
},
"clothes": {
"type_of_clothes": clothes.type_of_clothes,
"left_line_pixel": clothes.left + finalImage.x_offset,
"right_line_pixel": clothes.right + finalImage.x_offset,
"top_line_pixel": clothes.top + finalImage.y_offset,
"bottom_line_pixel": clothes.bottop + finalImage.y_offset,
}
}
insert_result = collection.insert_one(inserted_data)
print(f"Inserted document ID: {insert_result.inserted_id}")
\ No newline at end of file
import temp
imageFactory = temp.ImageFactory()
#faceDetector = temp.FaceDetector()
#boxDrower = temp.BoxDrower()
clothesDetector = temp.ClothesDetector()
imagepath = "/home/sang/automated_image_processing/automated_image_processing/automated_image_processing/back-end/oop/ssfshop/beanpole/남녀공용 베이직 피케 티셔츠 - 블랙/DallE.png"
image = imageFactory.create_image(imagepath, None)
clothesObjectsList = clothesDetector.localize_objects(image)
clothes = clothesObjectsList.getClothes("top")
print(clothes.left, clothes.top, clothes.right, clothes.bottom)
File added
File added
File added
{
"seller": "ssfshop",
"brand": "beanpole",
"image_list": [
{
"product_name": "남녀공용 베이직 피케 티셔츠 - 블랙",
"image_url": "https://img.ssfshop.com/cmd/LB_750x1000/src/https://img.ssfshop.com/goods/BPBR/23/02/15/GM0023021581678_0_ORGINL_20230327140102442.jpg",
"type_of_clothes": "top"
},
{
"product_name": "매직 써커 패턴 슬림핏 팬츠 - 네이비",
"image_url": "https://img.ssfshop.com/cmd/LB_750x1000/src/https://img.ssfshop.com/goods/BPBR/23/02/28/GM0023022838172_0_ORGINL_20230302150258661.jpg",
"type_of_clothes": "pants"
}
]
}
\ No newline at end of file
import requests
import original_image
import os
def download_image(url, folder_path, file_name):
response = requests.get(url)
if response.status_code == 200:
os.makedirs(folder_path, exist_ok=True)
image_path = folder_path+"/"+file_name
with open(image_path, 'wb') as file:
file.write(response.content)
return image_path
\ No newline at end of file
import image_downloader
import mask_processor
import outpainting_processor
import os
import sys
import json
import cv2
with open(sys.argv[1], 'r') as file:
shoppingmall_data = json.load(file)
ratio = 0
new_folder = shoppingmall_data["seller"]+"/"+shoppingmall_data["brand"]
os.makedirs(new_folder, exist_ok=True)
for data in shoppingmall_data["image_list"]:
image_url = data["image_url"]
folder_name = new_folder+"/"+data["product_name"]
original_image_path = image_downloader.download_image(image_url, folder_name, "original.jpg")
mask = mask_processor.generate_mask_of(original_image_path)
transparent_bordered_image = outpainting_processor.make_transparent_border(original_image_path, mask, ratio)
dallE_image_path = outpainting_processor.outpaint_image_with_DallE(transparent_bordered_image, folder_name)
outpainting_processor.recover_orignal_image_size(original_image_path, dallE_image_path, transparent_bordered_image, ratio)
\ No newline at end of file
import onnxruntime
import cv2
import numpy as np
def generate_mask_of(original_image_cv):
if type(original_image_cv) is str: #파일 경로를 입력했을 경우 처리
original_image_cv = cv2.imread(original_image_cv)
model = onnxruntime.InferenceSession('unet.onnx')
original_height, original_width = original_image_cv.shape[0], original_image_cv.shape[1]
mask = cv2.resize(original_image_cv, (320, 320))
mask = mask.transpose((2, 0, 1)) # 채널 순서 변경
mask = mask.astype(np.float32) / 255.0 # 정규화
mask = np.expand_dims(mask, axis=0) # 배치 차원 추가
# 모델 추론
input_name = model.get_inputs()[0].name
output_name = model.get_outputs()[0].name
mask = model.run([output_name], {input_name: mask})[0]
# 후처리
mask = mask[0, 0, :, :] # 배치와 채널 차원 제거
mask = cv2.resize(mask, (original_width, original_height)) # 원래 크기로 복원. 이 마스크는 확장 영역을 선택할 때 쓰임.
mask = (mask > 0.5).astype(np.uint8) * 255 # 이진화
#cv2.imwrite(folder_path+"/"+'mask.png', mask)
box_dictionary = get_mask_box(mask)
mask_dictionary = {
"mask_image": mask,
"box_x": box_dictionary["box_x"],
"box_y": box_dictionary["box_y"],
"box_w": box_dictionary["box_w"],
"box_h": box_dictionary["box_h"]
}
return mask_dictionary
def get_mask_box(mask):
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# 모든 객체의 윤곽선을 하나의 리스트로 병합
all_contours = np.concatenate(contours)
# 윤곽선을 감싸는 최소 사각형 좌표 계산
box_x, box_y, box_w, box_h = cv2.boundingRect(all_contours)
# 왼쪽 위를 기준으로 (x,y) 좌표, w(너비)와 h(높이) 형태로 결과 출력
box_dictionary = {
"box_x": box_x,
"box_y": box_y,
"box_w": box_w,
"box_h": box_h
}
return box_dictionary
\ No newline at end of file
original_image_height = 0
original_image_width = 0
outpaint_border_x_offset = 0
outpaint_border_y_offset = 0
\ No newline at end of file
import mask_processor
import cv2
import numpy as np
from dotenv import load_dotenv
import os
import openai
import image_downloader
def outpaint_image_with_DallE(bordered_image, folder_name):
load_dotenv()
openai.organization = os.getenv("ORG_ID")
openai.api_key = os.getenv("API_KEY")
cv2.imwrite("temp.png", bordered_image["transparent_bordered_image"])
outpainted = openai.Image.create_edit(
image = open("temp.png", "rb"),
prompt="photo of person",
n=1,
size="1024x1024"
)
image_url = outpainted.data[0]['url']
DallE_image_path = image_downloader.download_image(image_url, folder_name, "DallE.png" )
return DallE_image_path
def apply_feather(original_image_path):
if type(original_image_path) is str:
original = cv2.imread(original_image_path)
feather = cv2.cvtColor(original, cv2.COLOR_BGR2BGRA)
original_height = feather.shape[0]
original_width = feather.shape[1]
if feather.shape[0] >= original_width:
border_size = int(0.05 * original_height)
else:
border_size = int(0.05 * original_width)
for i in range(border_size):
feather[ i,:, 3] = int(255* i/border_size)
feather[-i,:, 3] = int(255* i/border_size)
feather[:, i, 3] = int(255* i/border_size)
feather[:,-i, 3] = int(255* i/border_size)
feather[:border_size , :border_size, 3] = 0
feather[:border_size , -border_size:, 3] = 0
feather[-border_size:, -border_size:, 3] = 0
feather[-border_size:, :border_size, 3] = 0
for radius in range(0, border_size):
for angle in range(0, 90 + 1):
radian = np.deg2rad(angle)
x = int(original_width- border_size + radius * np.cos(radian))
y = int(original_height- border_size + radius * np.sin(radian))
feather[y, x][3] = int(255 - 255* radius/border_size)
for angle in range(90, 180 + 1):
radian = np.deg2rad(angle)
x = int(border_size + radius * np.cos(radian))
y = int(original_height- border_size + radius * np.sin(radian))
feather[y, x][3] = int(255 - 255* radius/border_size)
for angle in range(180, 270 + 1):
radian = np.deg2rad(angle)
x = int(border_size + radius * np.cos(radian))
y = int(border_size + radius * np.sin(radian))
feather[y, x][3] = int(255-255* radius/border_size)
for angle in range(270, 360 + 1):
radian = np.deg2rad(angle)
x = int(original_width - border_size + radius * np.cos(radian))
y = int(border_size + radius * np.sin(radian))
feather[y, x][3] = int(255 - 255* radius/border_size)
return feather
def recover_orignal_image_size(original_image_path, dallE_image_path, transparent_bordered_image, ratio):
feather = apply_feather(original_image_path)
# 아웃 페인팅 위에 기존 이미지 올리기
## 반투명 처리된 이미지 이용
result = cv2.imread(dallE_image_path)
new_length = int(1024*longer/ratio)
result = cv2.resize(result, (new_length, new_length))
cv2.imwrite(folder_path+"/"+"alpha_compistion.png", result)
x_offset = int((new_length - original_width) / 2)
y_offset = int((new_length - original_height) / 2)
subprocess.run(["./magick.appimage","composite", "-geometry", "+" + str(x_offset) + "+" +str(y_offset), folder_path+"/"+"feather.png", folder_path+"/"+"alpha_compistion.png", folder_path+"/"+"alpha_compistion.png"])
def make_transparent_border(original_image_cv, mask, ratio):
if type(original_image_cv) is str:
original_image_cv = cv2.imread(original_image_cv)
transparent_bordered_image = cv2.cvtColor(original_image_cv, cv2.COLOR_RGB2RGBA)
#객체의 긴 길이 구하기
box_w, box_h = mask["box_w"], mask["box_h"]
if box_w > box_h:
longer = box_w
else:
longer = box_h
#인물의 객체의 긴 길이가 55%, 1024*1024에서 55%(550픽셀)을 차지하도록 설정
new_width, new_height = int(transparent_bordered_image.shape[1]*ratio*10/longer), int(transparent_bordered_image.shape[0]*ratio*10/longer)
transparent_bordered_image = cv2.resize(transparent_bordered_image, (new_width, new_height))
transparent_image = np.zeros((1024, 1024, 4), dtype=np.uint8)
x_offset = int((transparent_image.shape[1] - transparent_bordered_image.shape[1]) / 2)
y_offset = int((transparent_image.shape[0] - transparent_bordered_image.shape[0]) / 2)
transparent_image[y_offset:y_offset + transparent_bordered_image.shape[0], x_offset:x_offset + transparent_bordered_image.shape[1]] = transparent_bordered_image
transparent_bordered_image = transparent_image
transparent_bordered_image_dictionary = {
"transparent_bordered_image": transparent_bordered_image,
"x_offset": x_offset,
"y_offset": y_offset
}
return transparent_bordered_image_dictionary
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment