Skip to content
Snippets Groups Projects
Commit 494b0e9d authored by Hyunseok_Sang's avatar Hyunseok_Sang
Browse files

서로 다른 주제들의 클래스들을 다른 모듈로 분리했습니다

parent 2f7e1bb7
No related branches found
No related tags found
No related merge requests found
Showing
with 179 additions and 409 deletions
back-end/oop/1679752285000-PtrdXn.jpg

53.4 KiB

{
"seller": "ssfshop",
"brand": "beanpole",
"product_list": [
{
"name": "남녀공용 베이직 피케 티셔츠 - 블랙",
"url": "https://img.ssfshop.com/cmd/LB_750x1000/src/https://img.ssfshop.com/goods/BPBR/23/02/15/GM0023021581678_0_ORGINL_20230327140102442.jpg",
"type_of_clothes": "top"
}
]
}
\ No newline at end of file
import sys
import json
import temp
with open(sys.argv[1], 'r') as file:
shoppingmall_data = json.load(file)
seller_brand_path = shoppingmall_data["seller"]+"/"+shoppingmall_data["brand"]
imageDownloder = temp.ImageDownloader()
imageFactory = temp.ImageFactory()
imageProcessor = temp.ImageProcessor()
faceDetector = temp.FaceDetector()
clothesDetector = temp.ClothesDetector()
databaseManager = temp.DatabaseManager()
for product in shoppingmall_data["product_list"]:
save_path = seller_brand_path+"/"+product["name"]
image_path = imageDownloder.download_image(product["url"], save_path, "original.jpg")
image = imageFactory.create_image(image_path, None)
result_image = imageProcessor.process(image)
faceData = faceDetector.detectFace(image)
face = faceData.getFace(1)
clothesDataList = clothesDetector.localize_objects(image)
clothes = clothesDataList.getClothes(product["type_of_clothes"])
clothes.denormalizeByImageSize(image)
databaseManager.insertToClothesDataCollection(image, result_image, face, clothes)
import temp
imageFactory = temp.ImageFactory()
#faceDetector = temp.FaceDetector()
#boxDrower = temp.BoxDrower()
clothesDetector = temp.ClothesDetector()
imagepath = "/home/sang/automated_image_processing/automated_image_processing/automated_image_processing/back-end/oop/ssfshop/beanpole/남녀공용 베이직 피케 티셔츠 - 블랙/DallE.png"
image = imageFactory.create_image(imagepath, None)
clothesObjectsList = clothesDetector.localize_objects(image)
clothes = clothesObjectsList.getClothes("top")
print(clothes.left, clothes.top, clothes.right, clothes.bottom)
File deleted
File deleted
File deleted
{ {
"seller": "ssfshop", "seller": "ssfshop",
"brand": "beanpole", "brand": "beanpole",
"image_list": [ "product_list": [
{ {
"product_name": "남녀공용 베이직 피케 티셔츠 - 블랙", "name": "남녀공용 베이직 피케 티셔츠 - 블랙",
"image_url": "https://img.ssfshop.com/cmd/LB_750x1000/src/https://img.ssfshop.com/goods/BPBR/23/02/15/GM0023021581678_0_ORGINL_20230327140102442.jpg", "url": "https://img.ssfshop.com/cmd/LB_750x1000/src/https://img.ssfshop.com/goods/BPBR/23/02/15/GM0023021581678_0_ORGINL_20230327140102442.jpg",
"type_of_clothes": "top" "type_of_clothes": "top"
}, },
{ {
"product_name": "매직 써커 패턴 슬림핏 팬츠 - 네이비", "name": "바서티 볼드 스트라이프 럭비 티셔츠 - 블루",
"image_url": "https://img.ssfshop.com/cmd/LB_750x1000/src/https://img.ssfshop.com/goods/BPBR/23/02/28/GM0023022838172_0_ORGINL_20230302150258661.jpg", "url": "https://img.ssfshop.com/cmd/LB_750x1000/src/https://img.ssfshop.com/goods/BPBR/23/01/19/GM0023011962271_0_ORGINL_20230214162711402.jpg",
"type_of_clothes": "pants" "type_of_clothes": "top"
} }
] ]
} }
\ No newline at end of file
import requests
import original_image
import os
def download_image(url, folder_path, file_name):
response = requests.get(url)
if response.status_code == 200:
os.makedirs(folder_path, exist_ok=True)
image_path = folder_path+"/"+file_name
with open(image_path, 'wb') as file:
file.write(response.content)
return image_path
\ No newline at end of file
File moved
import image_downloader
import mask_processor
import outpainting_processor
import os
import sys
import json import json
import cv2 from modules import image_processor
from modules import database
with open(sys.argv[1], 'r') as file: with open("download.json", 'r') as file:
shoppingmall_data = json.load(file) shoppingmall_data = json.load(file)
ratio = 0 seller_brand_path = shoppingmall_data["seller"]+"/"+shoppingmall_data["brand"]
new_folder = shoppingmall_data["seller"]+"/"+shoppingmall_data["brand"]
os.makedirs(new_folder, exist_ok=True) imageDownloder = image_processor.ImageDownloader()
imageFactory = image_processor.ImageFactory()
imageProcessor = image_processor.ImageProcessor()
faceDetector = image_processor.FaceDetector()
clothesDetector = image_processor.ClothesDetector()
databaseManager = database.DatabaseManager()
for product in shoppingmall_data["product_list"]:
save_path = "image_data/"+seller_brand_path+"/"+product["name"]
image_url = product["url"]
image_path = imageDownloder.download_image(image_url, save_path, "original.jpg")
image = imageFactory.create_image(image_path, None)
result_image = imageProcessor.process(image)
faceData = faceDetector.detectFace(image)
face = faceData.getFace(1)
clothesDataList = clothesDetector.localize_objects(image)
clothes = clothesDataList.getClothes(product["type_of_clothes"])
clothes.denormalizeByImageSize(image)
for data in shoppingmall_data["image_list"]: databaseManager.insertToClothesDataCollection(image, image_url, result_image, face, clothes)
image_url = data["image_url"]
folder_name = new_folder+"/"+data["product_name"]
original_image_path = image_downloader.download_image(image_url, folder_name, "original.jpg")
mask = mask_processor.generate_mask_of(original_image_path)
transparent_bordered_image = outpainting_processor.make_transparent_border(original_image_path, mask, ratio)
dallE_image_path = outpainting_processor.outpaint_image_with_DallE(transparent_bordered_image, folder_name)
outpainting_processor.recover_orignal_image_size(original_image_path, dallE_image_path, transparent_bordered_image, ratio)
\ No newline at end of file
import onnxruntime
import cv2
import numpy as np
def generate_mask_of(original_image_cv):
if type(original_image_cv) is str: #파일 경로를 입력했을 경우 처리
original_image_cv = cv2.imread(original_image_cv)
model = onnxruntime.InferenceSession('unet.onnx')
original_height, original_width = original_image_cv.shape[0], original_image_cv.shape[1]
mask = cv2.resize(original_image_cv, (320, 320))
mask = mask.transpose((2, 0, 1)) # 채널 순서 변경
mask = mask.astype(np.float32) / 255.0 # 정규화
mask = np.expand_dims(mask, axis=0) # 배치 차원 추가
# 모델 추론
input_name = model.get_inputs()[0].name
output_name = model.get_outputs()[0].name
mask = model.run([output_name], {input_name: mask})[0]
# 후처리
mask = mask[0, 0, :, :] # 배치와 채널 차원 제거
mask = cv2.resize(mask, (original_width, original_height)) # 원래 크기로 복원. 이 마스크는 확장 영역을 선택할 때 쓰임.
mask = (mask > 0.5).astype(np.uint8) * 255 # 이진화
#cv2.imwrite(folder_path+"/"+'mask.png', mask)
box_dictionary = get_mask_box(mask)
mask_dictionary = {
"mask_image": mask,
"box_x": box_dictionary["box_x"],
"box_y": box_dictionary["box_y"],
"box_w": box_dictionary["box_w"],
"box_h": box_dictionary["box_h"]
}
return mask_dictionary
def get_mask_box(mask):
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# 모든 객체의 윤곽선을 하나의 리스트로 병합
all_contours = np.concatenate(contours)
# 윤곽선을 감싸는 최소 사각형 좌표 계산
box_x, box_y, box_w, box_h = cv2.boundingRect(all_contours)
# 왼쪽 위를 기준으로 (x,y) 좌표, w(너비)와 h(높이) 형태로 결과 출력
box_dictionary = {
"box_x": box_x,
"box_y": box_y,
"box_w": box_w,
"box_h": box_h
}
return box_dictionary
\ No newline at end of file
File added
from pymongo import MongoClient
class DatabaseManager:
def __init__(self):
self.client = MongoClient('mongodb://localhost:27017/')
def is_mongodb_running(self):
try:
self.client.server_info() # 서버 정보 요청
return True # 실행 중인 경우 True 반환
except:
return False
def insertToClothesDataCollection(self, orignalImage, imaga_url, finalImage, face, clothes):
db = self.client['fashionImageTest']
collection = db['clothesData']
inserted_data = {
"original_image": {
"file_path": orignalImage.filepath,
"url": imaga_url
},
"dalle_image_path": finalImage.filepath,
"dalle_image_width": finalImage.width,
"dalle_image_height": finalImage.height,
"x_offset": finalImage.x_offset,
"y_offset": finalImage.y_offset,
"face": {
"left_line_pixel": face.left + finalImage.x_offset,
"right_line_pixel": face.right + finalImage.x_offset,
"top_line_pixel": face.top + finalImage.y_offset,
"bottom_line_pixel": face.bottom + finalImage.y_offset,
},
"clothes": {
"type_of_clothes": clothes.type_of_clothes,
"left_line_pixel": clothes.left + finalImage.x_offset,
"right_line_pixel": clothes.right + finalImage.x_offset,
"top_line_pixel": clothes.top + finalImage.y_offset,
"bottom_line_pixel": clothes.bottom + finalImage.y_offset,
}
}
print(type(inserted_data["clothes"]["left_line_pixel"]))
print(inserted_data["clothes"]["left_line_pixel"])
insert_result = collection.insert_one(inserted_data)
print(f"Inserted document ID: {insert_result.inserted_id}")
\ No newline at end of file
import requests
class ImageDownloader:
def download_image(self, url, folder_path, file_name):
response = requests.get(url)
if response.status_code == 200:
os.makedirs(folder_path, exist_ok=True)
image_path = folder_path+"/"+file_name
with open(image_path, 'wb') as file:
file.write(response.content)
return image_path
import requests
import cv2 import cv2
import numpy as np import numpy as np
import onnxruntime import onnxruntime
import os import os
import subprocess import subprocess
import temp from modules import fileManager
class ImageDownloader:
def download_image(self, url, folder_path, file_name):
response = requests.get(url)
if response.status_code == 200:
os.makedirs(folder_path, exist_ok=True)
image_path = folder_path+"/"+file_name
with open(image_path, 'wb') as file:
file.write(response.content)
return image_path
class Image: class Image:
def __init__(self, filepath, cv_image): def __init__(self, filepath, cv_image):
...@@ -27,6 +15,9 @@ class Image: ...@@ -27,6 +15,9 @@ class Image:
self.cv_image = cv2.imread(filepath) self.cv_image = cv2.imread(filepath)
self.height = self.cv_image.shape[0] self.height = self.cv_image.shape[0]
self.width = self.cv_image.shape[1] self.width = self.cv_image.shape[1]
self.url = None
def setURL(self, url):
self.url = url
class ImageFactory: class ImageFactory:
def create_image(self,filepath, cv_image): def create_image(self,filepath, cv_image):
image = Image(filepath, cv_image) image = Image(filepath, cv_image)
...@@ -227,7 +218,7 @@ class ImageChopper: ...@@ -227,7 +218,7 @@ class ImageChopper:
object_move_x = paddedImage.x_offset object_move_x = paddedImage.x_offset
object_move_y = paddedImage.y_offset object_move_y = paddedImage.y_offset
magick_command = SystemChecker.returnMagickCommand magick_command = SystemChecker().returnMagickCommand()
if mask.smallest_box_x == 0: if mask.smallest_box_x == 0:
subprocess.run([magick_command,"convert", "temp_alpha.png", "-gravity", "west", "-chop", (str(paddedImage.x_offset)+"x"+"0"), "temp_alpha.png"]) subprocess.run([magick_command,"convert", "temp_alpha.png", "-gravity", "west", "-chop", (str(paddedImage.x_offset)+"x"+"0"), "temp_alpha.png"])
...@@ -238,6 +229,7 @@ class ImageChopper: ...@@ -238,6 +229,7 @@ class ImageChopper:
if mask.smallest_box_x + mask.smallest_box_width + 10 >= mask.width: if mask.smallest_box_x + mask.smallest_box_width + 10 >= mask.width:
subprocess.run([magick_command,"convert", "temp_alpha.png", "-gravity", "east", "-chop", (str(paddedImage.x_offset)+"x"+"0"), "temp_alpha.png"]) subprocess.run([magick_command,"convert", "temp_alpha.png", "-gravity", "east", "-chop", (str(paddedImage.x_offset)+"x"+"0"), "temp_alpha.png"])
if mask.smallest_box_y + mask.smallest_box_height + 10 >= mask.height: if mask.smallest_box_y + mask.smallest_box_height + 10 >= mask.height:
print(magick_command,"convert", "temp_alpha.png", "-gravity", "south", "-chop", ("0"+"x"+str(paddedImage.y_offset)), "temp_alpha.png")
subprocess.run([magick_command,"convert", "temp_alpha.png", "-gravity", "south", "-chop", ("0"+"x"+str(paddedImage.y_offset)), "temp_alpha.png"]) subprocess.run([magick_command,"convert", "temp_alpha.png", "-gravity", "south", "-chop", ("0"+"x"+str(paddedImage.y_offset)), "temp_alpha.png"])
folder_path = os.path.dirname(image_path) folder_path = os.path.dirname(image_path)
...@@ -248,44 +240,16 @@ class ImageChopper: ...@@ -248,44 +240,16 @@ class ImageChopper:
return result return result
class Face:
def __init__(self, left, top, right, bottom) -> None:
self.left = left
self.top = top
self.right = right
self.bottom = bottom
class FaceDataList:
def __init__(self, face_dict_data) -> None:
self.data = face_dict_data
def getFace(self, face_number):
if type(self.data) is tuple:
return (None, None)
face_location = self.data["face_"+str(face_number)]["facial_area"]
return Face(face_location[0], face_location[1], face_location[2], face_location[3])
class FaceDetector:
def detectFace(self,image):
from retinaface import RetinaFace
faceDataList = FaceDataList(RetinaFace.detect_faces(image.filepath))
return faceDataList
class BoxDrower:
def drawBox(self, image, left_top, right_bottom):
if left_top == None:
print("there is no face")
return
face_bounding_box = image.cv_image.copy()
cv2.rectangle(face_bounding_box, left_top, right_bottom, (0, 255, 0), 2)
cv2.imwrite(os.path.dirname(image.filepath)+"/"+'face_bounding_box.jpg', face_bounding_box)
class ImageProcessor: class ImageProcessor:
def __init__(self): def __init__(self):
self.maskGenerator = temp.MaskGenerator() self.maskGenerator = MaskGenerator()
self.paddedImageFactory = temp.PaddedImageFactory() self.paddedImageFactory = PaddedImageFactory()
self.dallEImageGenerator= temp.DallEImageGenerator() self.dallEImageGenerator= DallEImageGenerator()
self.featheredImageFactory = temp.FeatheredImageFactory() self.featheredImageFactory = FeatheredImageFactory()
self.alphaCompositer = temp.AlphaCompositer() self.alphaCompositer = AlphaCompositer()
self.imageChopper = temp.ImageChopper() self.imageChopper = ImageChopper()
self.imageDownloder = temp.ImageDownloader() self.imageDownloder = fileManager.ImageDownloader()
def process(self, image): def process(self, image):
mask = self.maskGenerator.create_mask(image) mask = self.maskGenerator.create_mask(image)
padded_image = self.paddedImageFactory.create_padded_image(image, mask, 55) padded_image = self.paddedImageFactory.create_padded_image(image, mask, 55)
...@@ -294,79 +258,3 @@ class ImageProcessor: ...@@ -294,79 +258,3 @@ class ImageProcessor:
resized_outpainted_image = self.alphaCompositer.alphaCompositingWithResizing(feathered_image, dallE_image) resized_outpainted_image = self.alphaCompositer.alphaCompositingWithResizing(feathered_image, dallE_image)
result_image = self.imageChopper.chopInvadingBorderUsingMask(resized_outpainted_image, mask, image.filepath) result_image = self.imageChopper.chopInvadingBorderUsingMask(resized_outpainted_image, mask, image.filepath)
return result_image return result_image
class ClothesDetector:
def localize_objects(self, image):
from google.cloud import vision
client = vision.ImageAnnotatorClient()
with open(image.filepath, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
objects = client.object_localization(
image=image).localized_object_annotations
return ClothesObjectsList(objects)
class Clothes:
def __init__(self, type_of_clothes, left, top, right, bottom) -> None:
self.type_of_clothes = type_of_clothes
self.normalized_left = left
self.normalized_top = top
self.normalized_right = right
self.normalized_bottom = bottom
self.left = None
self.top = None
self.right = None
self.bottom = None
def denormalizeByImageSize(self, image):
self.left = int(self.normalized_left * image.width)
self.top = int(self.normalized_top * image.height)
self.right = int(self.normalized_right * image.width)
self.bottom = int(self.normalized_bottom * image.height)
class ClothesObjectsList:
def __init__(self, objects) -> None:
self.objects = objects
def getClothes(self, type_of_clothes):
for object_ in self.objects:
if object_.name == type_of_clothes:
break
clothes_data_normalized_vertices = object_.bounding_poly.normalized_vertices
normalized_clothes_left = clothes_data_normalized_vertices[0].x
normalized_clothes_top = clothes_data_normalized_vertices[0].y
normalized_clothes_right = clothes_data_normalized_vertices[2].x
normalized_clothes_bottom = clothes_data_normalized_vertices[2].y
return Clothes(type_of_clothes, normalized_clothes_left,normalized_clothes_top, normalized_clothes_right, normalized_clothes_bottom)
class DatabaseManager:
def __init__(self):
from pymongo import MongoClient
self.client = MongoClient('mongodb://localhost:27017/')
def insertToClothesDataCollection(self, orignalImage, finalImage, face, clothes):
db = self.client['fashionImageTest']
collection = db['clothesData']
inserted_data = {
"original_image_path": orignalImage.filepath,
"dalle_image_path": finalImage.filepath,
"dalle_image_width": finalImage.width,
"dalle_image_height": finalImage.height,
"x_offset": finalImage.x_offset,
"y_offset": finalImage.y_offset,
"face": {
"left_line_pixel": face.left + finalImage.x_offset,
"right_line_pixel": face.right + finalImage.x_offset,
"top_line_pixel": face.top + finalImage.y_offset,
"bottom_line_pixel": face.bottop + finalImage.y_offset,
},
"clothes": {
"type_of_clothes": clothes.type_of_clothes,
"left_line_pixel": clothes.left + finalImage.x_offset,
"right_line_pixel": clothes.right + finalImage.x_offset,
"top_line_pixel": clothes.top + finalImage.y_offset,
"bottom_line_pixel": clothes.bottop + finalImage.y_offset,
}
}
insert_result = collection.insert_one(inserted_data)
print(f"Inserted document ID: {insert_result.inserted_id}")
\ No newline at end of file
import cv2
import os
class Face:
def __init__(self, left, top, right, bottom) -> None:
self.left = int(left)
self.top = int(top)
self.right = int(right)
self.bottom = int(bottom)
class FaceDataList:
def __init__(self, face_dict_data) -> None:
self.data = face_dict_data
def getFace(self, face_number):
if type(self.data) is tuple:
return (None, None)
face_location = self.data["face_"+str(face_number)]["facial_area"]
return Face(face_location[0], face_location[1], face_location[2], face_location[3])
class FaceDetector:
def detectFace(self,image):
from retinaface import RetinaFace
faceDataList = FaceDataList(RetinaFace.detect_faces(image.filepath))
return faceDataList
class BoxDrower:
def drawBox(self, image, left_top, right_bottom):
if left_top == None:
print("there is no face")
return
face_bounding_box = image.cv_image.copy()
cv2.rectangle(face_bounding_box, left_top, right_bottom, (0, 255, 0), 2)
cv2.imwrite(os.path.dirname(image.filepath)+"/"+'face_bounding_box.jpg', face_bounding_box)
class ClothesDetector:
def localize_objects(self, image):
from google.cloud import vision
client = vision.ImageAnnotatorClient()
with open(image.filepath, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
objects = client.object_localization(
image=image).localized_object_annotations
return ClothesObjectsList(objects)
class Clothes:
def __init__(self, type_of_clothes, left, top, right, bottom) -> None:
self.type_of_clothes = type_of_clothes
self.normalized_left = left
self.normalized_top = top
self.normalized_right = right
self.normalized_bottom = bottom
self.left = None
self.top = None
self.right = None
self.bottom = None
def denormalizeByImageSize(self, image):
self.left = int(self.normalized_left * image.width)
self.top = int(self.normalized_top * image.height)
self.right = int(self.normalized_right * image.width)
self.bottom = int(self.normalized_bottom * image.height)
class ClothesObjectsList:
def __init__(self, objects) -> None:
self.objects = objects
def getClothes(self, type_of_clothes):
for object_ in self.objects:
if object_.name == type_of_clothes:
break
clothes_data_normalized_vertices = object_.bounding_poly.normalized_vertices
normalized_clothes_left = clothes_data_normalized_vertices[0].x
normalized_clothes_top = clothes_data_normalized_vertices[0].y
normalized_clothes_right = clothes_data_normalized_vertices[2].x
normalized_clothes_bottom = clothes_data_normalized_vertices[2].y
return Clothes(type_of_clothes, normalized_clothes_left,normalized_clothes_top, normalized_clothes_right, normalized_clothes_bottom)
\ No newline at end of file
original_image_height = 0
original_image_width = 0
outpaint_border_x_offset = 0
outpaint_border_y_offset = 0
\ No newline at end of file
import mask_processor
import cv2
import numpy as np
from dotenv import load_dotenv
import os
import openai
import image_downloader
def outpaint_image_with_DallE(bordered_image, folder_name):
load_dotenv()
openai.organization = os.getenv("ORG_ID")
openai.api_key = os.getenv("API_KEY")
cv2.imwrite("temp.png", bordered_image["transparent_bordered_image"])
outpainted = openai.Image.create_edit(
image = open("temp.png", "rb"),
prompt="photo of person",
n=1,
size="1024x1024"
)
image_url = outpainted.data[0]['url']
DallE_image_path = image_downloader.download_image(image_url, folder_name, "DallE.png" )
return DallE_image_path
def apply_feather(original_image_path):
if type(original_image_path) is str:
original = cv2.imread(original_image_path)
feather = cv2.cvtColor(original, cv2.COLOR_BGR2BGRA)
original_height = feather.shape[0]
original_width = feather.shape[1]
if feather.shape[0] >= original_width:
border_size = int(0.05 * original_height)
else:
border_size = int(0.05 * original_width)
for i in range(border_size):
feather[ i,:, 3] = int(255* i/border_size)
feather[-i,:, 3] = int(255* i/border_size)
feather[:, i, 3] = int(255* i/border_size)
feather[:,-i, 3] = int(255* i/border_size)
feather[:border_size , :border_size, 3] = 0
feather[:border_size , -border_size:, 3] = 0
feather[-border_size:, -border_size:, 3] = 0
feather[-border_size:, :border_size, 3] = 0
for radius in range(0, border_size):
for angle in range(0, 90 + 1):
radian = np.deg2rad(angle)
x = int(original_width- border_size + radius * np.cos(radian))
y = int(original_height- border_size + radius * np.sin(radian))
feather[y, x][3] = int(255 - 255* radius/border_size)
for angle in range(90, 180 + 1):
radian = np.deg2rad(angle)
x = int(border_size + radius * np.cos(radian))
y = int(original_height- border_size + radius * np.sin(radian))
feather[y, x][3] = int(255 - 255* radius/border_size)
for angle in range(180, 270 + 1):
radian = np.deg2rad(angle)
x = int(border_size + radius * np.cos(radian))
y = int(border_size + radius * np.sin(radian))
feather[y, x][3] = int(255-255* radius/border_size)
for angle in range(270, 360 + 1):
radian = np.deg2rad(angle)
x = int(original_width - border_size + radius * np.cos(radian))
y = int(border_size + radius * np.sin(radian))
feather[y, x][3] = int(255 - 255* radius/border_size)
return feather
def recover_orignal_image_size(original_image_path, dallE_image_path, transparent_bordered_image, ratio):
feather = apply_feather(original_image_path)
# 아웃 페인팅 위에 기존 이미지 올리기
## 반투명 처리된 이미지 이용
result = cv2.imread(dallE_image_path)
new_length = int(1024*longer/ratio)
result = cv2.resize(result, (new_length, new_length))
cv2.imwrite(folder_path+"/"+"alpha_compistion.png", result)
x_offset = int((new_length - original_width) / 2)
y_offset = int((new_length - original_height) / 2)
subprocess.run(["./magick.appimage","composite", "-geometry", "+" + str(x_offset) + "+" +str(y_offset), folder_path+"/"+"feather.png", folder_path+"/"+"alpha_compistion.png", folder_path+"/"+"alpha_compistion.png"])
def make_transparent_border(original_image_cv, mask, ratio):
if type(original_image_cv) is str:
original_image_cv = cv2.imread(original_image_cv)
transparent_bordered_image = cv2.cvtColor(original_image_cv, cv2.COLOR_RGB2RGBA)
#객체의 긴 길이 구하기
box_w, box_h = mask["box_w"], mask["box_h"]
if box_w > box_h:
longer = box_w
else:
longer = box_h
#인물의 객체의 긴 길이가 55%, 1024*1024에서 55%(550픽셀)을 차지하도록 설정
new_width, new_height = int(transparent_bordered_image.shape[1]*ratio*10/longer), int(transparent_bordered_image.shape[0]*ratio*10/longer)
transparent_bordered_image = cv2.resize(transparent_bordered_image, (new_width, new_height))
transparent_image = np.zeros((1024, 1024, 4), dtype=np.uint8)
x_offset = int((transparent_image.shape[1] - transparent_bordered_image.shape[1]) / 2)
y_offset = int((transparent_image.shape[0] - transparent_bordered_image.shape[0]) / 2)
transparent_image[y_offset:y_offset + transparent_bordered_image.shape[0], x_offset:x_offset + transparent_bordered_image.shape[1]] = transparent_bordered_image
transparent_bordered_image = transparent_image
transparent_bordered_image_dictionary = {
"transparent_bordered_image": transparent_bordered_image,
"x_offset": x_offset,
"y_offset": y_offset
}
return transparent_bordered_image_dictionary
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment