Skip to content
Snippets Groups Projects
Commit 1dd0c3e3 authored by Hyunseok_Sang's avatar Hyunseok_Sang
Browse files

README.MD 수정

parent 01cdb6fd
No related branches found
No related tags found
No related merge requests found
2023년 1학기 자기주도프로젝트
AI 기술을 활용한 광고 이미지 자동 변환 시스템 파일럿 개발의 git repo입니다.
배포를 위한 준비가 부족하여, 코드를 실행시키기 까다로운 상황입니다. 곧 개선할 계획입니다.
실행하기 위해서
back-end 폴더에 u-net 가중치 파일을 배치해야합니다.
.env 파일 생성하고 openAI api키를 삽입해야합니다.
requirment.txt에 기입된 패키지들을 설치해야합니다.
%% Cell type:code id: tags:
``` python
import cv2
import onnxruntime
import numpy as np
import requests
import subprocess
import os
from pymongo import MongoClient
from google.cloud import vision
import urllib.parse
import openai
import os
from dotenv import load_dotenv
ratio = 55
ratio *= 10
product_name = "shoe"
img_url = "https://cdn.shopify.com/s/files/1/0468/9441/files/home_boots.jpg?v=1678983760&width=2000"
image_root_folder = "sources"
clothes_type = "Shoe"
os.makedirs(image_root_folder, exist_ok=True)
```
%% Cell type:code id: tags:
``` python
def download_image(url, file_path):
response = requests.get(url)
if response.status_code == 200:
with open(file_path, 'wb') as file:
file.write(response.content)
print("이미지 다운로드 완료")
else:
print("이미지 다운로드 실패")
def localize_objects(path):
#출처: https://cloud.google.com/vision/docs/libraries?hl=ko
"""Localize objects in the local image.
Args:
path: The path to the local file.
"""
client = vision.ImageAnnotatorClient()
with open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
objects = client.object_localization(
image=image).localized_object_annotations
print(f'Number of objects found: {len(objects)}')
for object_ in objects:
print(f'\n{object_.name} (confidence: {object_.score})')
print('Normalized bounding polygon vertices: ')
for vertex in object_.bounding_poly.normalized_vertices:
print(f' - ({vertex.x}, {vertex.y})')
return objects
def find_smallest_rectangle(boxes):
min_x = float('inf')
min_y = float('inf')
max_x = float('-inf')
max_y = float('-inf')
# 모든 상자의 좌표를 반복하여 최소 및 최대 좌표를 업데이트
for box in boxes:
left_top = box[0] # 왼쪽 위 좌표
right_bottom = box[1] # 오른쪽 아래 좌표
min_x = min(min_x, left_top[0])
min_y = min(min_y, left_top[1])
max_x = max(max_x, right_bottom[0])
max_y = max(max_y, right_bottom[1])
# 최소 및 최대 좌표를 사용하여 직사각형의 좌표를 구함
smallest_rectangle = [(min_x, min_y), (max_x, max_y)]
return smallest_rectangle
def resize_box(box, image_width, image_height, mul):
w = box[1][0] - box[0][0]
h = box[1][1] - box[0][1]
extended_x = (mul - 1) / 2 * w
extended_h = (mul - 1) / 2 * h
new_x1 = int(max(box[0][0] - extended_x, 0))
new_y1 = int(max(box[0][1] - extended_h, 0))
new_x2 = int(min(box[1][0] + extended_x, image_width))
new_y2 = int(min(box[1][1] + extended_h, image_height))
return [(new_x1, new_y1), (new_x2, new_y2)]
```
%% Cell type:code id: tags:
``` python
parsed_url = urllib.parse.urlparse(img_url)
file_name = os.path.basename(parsed_url.path)
file_name = file_name.replace('/', '_')
folder_path = image_root_folder + "/" + file_name
os.makedirs(folder_path, exist_ok=True)
original_img = folder_path+"/"+"original.jpg"
download_image(img_url, original_img)
```
%% Output
이미지 다운로드 완료
%% Cell type:code id: tags:
``` python
# 이미지 로드
## 원본 이미지를 수정해서는 안됨.
original = cv2.imread(original_img)
original_height, original_width = original.shape[0], original.shape[1]
```
%% Cell type:code id: tags:
``` python
# 마스크 생성
#-------------------------------------------------
# 모델 로드
model = onnxruntime.InferenceSession('unet.onnx')
mask = cv2.resize(original, (320, 320))
mask = mask.transpose((2, 0, 1)) # 채널 순서 변경
mask = mask.astype(np.float32) / 255.0 # 정규화
mask = np.expand_dims(mask, axis=0) # 배치 차원 추가
# 모델 추론
input_name = model.get_inputs()[0].name
output_name = model.get_outputs()[0].name
mask = model.run([output_name], {input_name: mask})[0]
# 후처리
mask = mask[0, 0, :, :] # 배치와 채널 차원 제거
mask = cv2.resize(mask, (original_width, original_height)) # 원래 크기로 복원. 이 마스크는 확장 영역을 선택할 때 쓰임.
mask = (mask > 0.5).astype(np.uint8) * 255 # 이진화
cv2.imwrite(folder_path+"/"+'mask.png', mask)
```
%% Output
True
%% Cell type:code id: tags:
``` python
# 이미지 안의 사람의 크기 구함
## 마스크 이용
#-------------------------------------------------
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# 모든 객체의 윤곽선을 하나의 리스트로 병합
all_contours = np.concatenate(contours)
# 윤곽선을 감싸는 최소 사각형 좌표 계산
object_x, object_y, object_w, object_h = cv2.boundingRect(all_contours)
# 왼쪽 위를 기준으로 (x,y) 좌표, w(너비)와 h(높이) 형태로 결과 출력
print(object_x, object_y, object_w, object_h)
```
%% Output
95 0 1117 661
%% Cell type:code id: tags:
``` python
```
%% Cell type:code id: tags:
``` python
# 아웃 페인팅하기 위해 1024*1024 이미지로 크기 조정
## 사람의 크기 이용
outpainting = cv2.cvtColor(original, cv2.COLOR_RGB2RGBA)
print(original.shape, outpainting.shape)
#객체의 긴 길이 구하기
if object_w > object_h:
longer = object_w
else:
longer = object_h
print(longer)
#인물의 객체의 긴 길이가 55%, 1024*1024에서 ratio픽셀을 차지하도록 설정
new_width, new_height = int(outpainting.shape[1]*ratio/longer), int(outpainting.shape[0]*ratio/longer)
outpainting = cv2.resize(outpainting, (new_width, new_height))
print(original.shape, outpainting.shape)
base_image = np.zeros((1024, 1024, 4), dtype=np.uint8)
x_offset = int((base_image.shape[1] - outpainting.shape[1]) / 2)
y_offset = int((base_image.shape[0] - outpainting.shape[0]) / 2)
base_image[y_offset:y_offset + outpainting.shape[0], x_offset:x_offset + outpainting.shape[1]] = outpainting
outpainting = base_image
cv2.imwrite(folder_path+"/"+'outpainting.png', outpainting )
```
%% Output
(730, 1261, 3) (730, 1261, 4)
1117
(730, 1261, 3) (359, 620, 4)
True
%% Cell type:code id: tags:
``` python
########################
# Dall-E로 아웃페인팅
import openai
import os
from dotenv import load_dotenv
load_dotenv()
openai.organization = os.getenv("ORG_ID")
openai.api_key = os.getenv("API_KEY")
outpainted = openai.Image.create_edit(
image=open(folder_path+"/"+"outpainting.png", "rb"),
prompt="photo of fashion model",
n=1,
size="1024x1024"
)
```
%% Cell type:code id: tags:
``` python
image_url = outpainted.data[0]['url']
response = requests.get(image_url)
image_bytes = response.content
download_image(image_url, folder_path+"/"+"Dall_E2.png")
```
%% Output
이미지 다운로드 완료
%% Cell type:code id: tags:
``` python
# 기존 이미지 테두리를 반투명하게 처리
feather = cv2.cvtColor(original, cv2.COLOR_BGR2BGRA)
if original_height >= original_width:
border_size = int(0.05 * original_height)
else:
border_size = int(0.05 * original_width)
for i in range(border_size):
feather[ i,:, 3] = int(255* i/border_size)
feather[-i,:, 3] = int(255* i/border_size)
feather[:, i, 3] = int(255* i/border_size)
feather[:,-i, 3] = int(255* i/border_size)
feather[:border_size , :border_size, 3] = 0
feather[:border_size , -border_size:, 3] = 0
feather[-border_size:, -border_size:, 3] = 0
feather[-border_size:, :border_size, 3] = 0
for radius in range(0, border_size):
for angle in range(0, 90 + 1):
radian = np.deg2rad(angle)
x = int(original.shape[1]- border_size + radius * np.cos(radian))
y = int(original.shape[0]- border_size + radius * np.sin(radian))
feather[y, x][3] = int(255 - 255* radius/border_size)
for angle in range(90, 180 + 1):
radian = np.deg2rad(angle)
x = int(border_size + radius * np.cos(radian))
y = int(original.shape[0]- border_size + radius * np.sin(radian))
feather[y, x][3] = int(255 - 255* radius/border_size)
for angle in range(180, 270 + 1):
radian = np.deg2rad(angle)
x = int(border_size + radius * np.cos(radian))
y = int(border_size + radius * np.sin(radian))
feather[y, x][3] = int(255-255* radius/border_size)
for angle in range(270, 360 + 1):
radian = np.deg2rad(angle)
x = int(original.shape[1] - border_size + radius * np.cos(radian))
y = int(border_size + radius * np.sin(radian))
feather[y, x][3] = int(255 - 255* radius/border_size)
cv2.imwrite(folder_path+"/"+'feather.png', feather)
```
%% Output
True
%% Cell type:code id: tags:
``` python
# 아웃 페인팅 위에 기존 이미지 올리기
## 반투명 처리된 이미지 이용
result = cv2.imread(folder_path+"/"+'Dall_E2.png')
new_length = int(1024*longer/ratio)
result = cv2.resize(result, (new_length, new_length))
cv2.imwrite(folder_path+"/"+"alpha_compistion.png", result)
x_offset = int((new_length - original_width) / 2)
y_offset = int((new_length - original_height) / 2)
subprocess.run(["./magick.appimage","composite", "-geometry", "+" + str(x_offset) + "+" +str(y_offset), folder_path+"/"+"feather.png", folder_path+"/"+"alpha_compistion.png", folder_path+"/"+"alpha_compistion.png"])
```
%% Output
CompletedProcess(args=['./magick.appimage', 'composite', '-geometry', '+409+674', 'sources/home_boots.jpg/feather.png', 'sources/home_boots.jpg/alpha_compistion.png', 'sources/home_boots.jpg/alpha_compistion.png'], returncode=0)
%% Cell type:code id: tags:
``` python
#인체가 잘린 경계에서 확장된 부분 삭제
os.rename(folder_path+"/"+"alpha_compistion.png", folder_path+"/"+"result.png")
object_move_x = x_offset
object_move_y = y_offset
if object_x == 0:
subprocess.run(["./magick.appimage","convert", folder_path+"/"+"result.png", "-gravity", "west", "-chop", (str(x_offset)+"x"+"0"), folder_path+"/"+"result.png"])
object_move_x = 0
if object_y == 0:
subprocess.run(["./magick.appimage","convert", folder_path+"/"+"result.png", "-gravity", "north", "-chop", ("0"+"x"+str(y_offset)), folder_path+"/"+"result.png"])
object_move_y = 0
if object_x + object_w == original_width:
subprocess.run(["./magick.appimage","convert", folder_path+"/"+"result.png", "-gravity", "east", "-chop", (str(x_offset)+"x"+"0"), folder_path+"/"+"result.png"])
if object_y + object_h == original_height:
subprocess.run(["./magick.appimage","convert", folder_path+"/"+"result.png", "-gravity", "south", "-chop", ("0"+"x"+str(y_offset)), folder_path+"/"+"result.png"])
```
%% Cell type:code id: tags:
``` python
# 확장된 영역의 크기에 따라 제공받은 Oject Detection 결과의 좌표를 이동
objects = localize_objects(folder_path + "/" +"original.jpg")
print(objects)
for object_ in objects:
###### 로직 작성해야하는 부분
if object_.name == clothes_type:
#####
break
clothes_xy = object_.bounding_poly.normalized_vertices
clothes_x = int(clothes_xy[0].x*original_width)
clothes_y = int(clothes_xy[0].y*original_height)
clothes_w = int(clothes_xy[2].x*original_width- clothes_xy[0].x*original_width)
clothes_h = int(clothes_xy[2].y*original_height- clothes_xy[0].y*original_height)
clothes_left_top = (clothes_x, clothes_y)
clothes_right_bottom = (clothes_x + clothes_w, clothes_y + clothes_h)
```
%% Output
Number of objects found: 3
Shoe (confidence: 0.9475909471511841)
Normalized bounding polygon vertices:
- (0.06843020766973495, 0.29012230038642883)
- (0.44998350739479065, 0.29012230038642883)
- (0.44998350739479065, 0.8144310712814331)
- (0.06843020766973495, 0.8144310712814331)
Shoe (confidence: 0.9235299825668335)
Normalized bounding polygon vertices:
- (0.3926856517791748, 0.19473238289356232)
- (0.9631771445274353, 0.19473238289356232)
- (0.9631771445274353, 0.9090790748596191)
- (0.3926856517791748, 0.9090790748596191)
Pants (confidence: 0.6304260492324829)
Normalized bounding polygon vertices:
- (0.09170857071876526, 0.0)
- (0.5339842438697815, 0.0)
- (0.5339842438697815, 0.3711642026901245)
- (0.09170857071876526, 0.3711642026901245)
[mid: "/m/06rrc"
name: "Shoe"
score: 0.947590947
bounding_poly {
normalized_vertices {
x: 0.0684302077
y: 0.2901223
}
normalized_vertices {
x: 0.449983507
y: 0.2901223
}
normalized_vertices {
x: 0.449983507
y: 0.814431071
}
normalized_vertices {
x: 0.0684302077
y: 0.814431071
}
}
, mid: "/m/06rrc"
name: "Shoe"
score: 0.92353
bounding_poly {
normalized_vertices {
x: 0.392685652
y: 0.194732383
}
normalized_vertices {
x: 0.963177145
y: 0.194732383
}
normalized_vertices {
x: 0.963177145
y: 0.909079075
}
normalized_vertices {
x: 0.392685652
y: 0.909079075
}
}
, mid: "/m/07mhn"
name: "Pants"
score: 0.630426049
bounding_poly {
normalized_vertices {
x: 0.0917085707
}
normalized_vertices {
x: 0.533984244
}
normalized_vertices {
x: 0.533984244
y: 0.371164203
}
normalized_vertices {
x: 0.0917085707
y: 0.371164203
}
}
]
%% Cell type:code id: tags:
``` python
object_
clothes_type
```
%% Output
'Shoe'
%% Cell type:code id: tags:
``` python
from retinaface import RetinaFace
resp = RetinaFace.detect_faces(folder_path+"/"+'original.jpg') #최초 실행시 모델 다운로드
if "face_1" in resp:
face_location = resp["face_1"]["facial_area"]
face_left_top = (face_location[0], face_location[1])
face_right_bottom = (face_location[2], face_location[3])
# face가 탐지된 경우에 대한 처리
else:
# face가 탐지되지 않은 경우에 대한 처리
face_left_top = (clothes_x, clothes_y)
face_right_bottom = (0, 0)
```
%% Cell type:code id: tags:
``` python
#얼굴 영역에 바운딩 박스
face_bounding_box = original.copy()
cv2.rectangle(face_bounding_box, face_left_top, face_right_bottom, (0, 255, 0), 2)
cv2.imwrite(folder_path+"/"+'face_bounding_box.jpg', face_bounding_box)
```
%% Output
True
%% Cell type:code id: tags:
``` python
#옷 영역에 바운딩 박스
clothes_bounding_box = original.copy()
cv2.rectangle(clothes_bounding_box, clothes_left_top, clothes_right_bottom, (0, 255, 0), 2)
cv2.imwrite(folder_path+"/"+'clothes_bounding_box.jpg', clothes_bounding_box)
```
%% Output
True
%% Cell type:code id: tags:
``` python
#더 넓은 얼굴 영역 바운딩 박스와 두 박스를 포함하는 최소의 박스의 좌표를 구함.
new_face_box_xy = resize_box([face_left_top, face_right_bottom], original_width, original_height, 1.8)
boxes = [[clothes_left_top, clothes_right_bottom], new_face_box_xy]
smallest_rectangle = find_smallest_rectangle(boxes)
```
%% Cell type:code id: tags:
``` python
new_face_box = original.copy()
cv2.rectangle(new_face_box, new_face_box_xy[0], new_face_box_xy[1], (0, 255, 0), 2)
cv2.imwrite(folder_path+"/"+'new_face_box.jpg', new_face_box)
```
%% Output
True
%% Cell type:code id: tags:
``` python
smallest_bounding_box = original.copy()
cv2.rectangle(smallest_bounding_box, smallest_rectangle[0], smallest_rectangle[1], (0, 255, 0), 2)
cv2.imwrite(folder_path+"/"+'smallest_bounding_box.jpg', smallest_bounding_box)
```
%% Output
True
%% Cell type:code id: tags:
``` python
withClothes_xy = resize_box(smallest_rectangle, original_width, original_height, 1.1)
onlyClothes_xy = resize_box([clothes_left_top, clothes_right_bottom], original_width, original_height, 1.1)
```
%% Cell type:code id: tags:
``` python
print(onlyClothes_xy)
print(withClothes_xy)
```
%% Output
[(61, 191), (591, 612)]
[(61, 191), (591, 612)]
%% Cell type:code id: tags:
``` python
subprocess.run(
["./magick.appimage",
"convert", folder_path+"/"+"result.png",
"-crop", (str(onlyClothes_xy[1][0]-onlyClothes_xy[0][0]) + "x" + str(onlyClothes_xy[1][1]-onlyClothes_xy[0][1]) + "+" + str(onlyClothes_xy[0][0] + object_move_x) + "+" + str(onlyClothes_xy[0][1] + object_move_y)),
folder_path+"/"+"result_onlyClothes.png"]
)
```
%% Output
CompletedProcess(args=['./magick.appimage', 'convert', 'sources/home_boots.jpg/result.png', '-crop', '530x421+470+191', 'sources/home_boots.jpg/result_onlyClothes.png'], returncode=0)
%% Cell type:code id: tags:
``` python
subprocess.run(
["./magick.appimage",
"convert", folder_path+"/"+"result.png",
"-crop", (str(withClothes_xy[1][0]-withClothes_xy[0][0]) + "x" + str(withClothes_xy[1][1]-withClothes_xy[0][1]) + "+" + str(withClothes_xy[0][0] + object_move_x) + "+" + str(withClothes_xy[0][1] + object_move_y)),
folder_path+"/"+"result_withClothes.png"]
)
```
%% Output
CompletedProcess(args=['./magick.appimage', 'convert', 'sources/home_boots.jpg/result.png', '-crop', '530x421+470+191', 'sources/home_boots.jpg/result_withClothes.png'], returncode=0)
%% Cell type:code id: tags:
``` python
subprocess.run(
["./magick.appimage",
"convert", folder_path+"/"+"result.png",
"-crop", ("x" + str(onlyClothes_xy[1][1]-onlyClothes_xy[0][1]) + "+" + str(0) + "+" + str(onlyClothes_xy[0][1] + object_move_y)),
folder_path+"/"+"result_panorama_noFace.png"]
)
```
%% Output
CompletedProcess(args=['./magick.appimage', 'convert', 'sources/home_boots.jpg/result.png', '-crop', 'x421+0+191', 'sources/home_boots.jpg/result_panorama_noFace.png'], returncode=0)
%% Cell type:code id: tags:
``` python
subprocess.run(
["./magick.appimage",
"convert", folder_path+"/"+"result.png",
"-crop", ("x" + str(withClothes_xy[1][1]-withClothes_xy[0][1]) + "+" + str(0) + "+" + str(withClothes_xy[0][1] + object_move_y)),
folder_path+"/"+"result_panorama_withFace.png"]
)
```
%% Output
CompletedProcess(args=['./magick.appimage', 'convert', 'sources/home_boots.jpg/result.png', '-crop', 'x421+0+191', 'sources/home_boots.jpg/result_panorama_withFace.png'], returncode=0)
%% Cell type:code id: tags:
``` python
subprocess.run(
["./magick.appimage",
"convert", folder_path+"/"+"result.png",
"-crop", ((str(withClothes_xy[1][0]-withClothes_xy[0][0]) + "x") + "+" + str(withClothes_xy[0][0] + object_move_x) + "+" + str(0)),
folder_path+"/"+"result_vertical_withFace.png"]
)
```
%% Output
CompletedProcess(args=['./magick.appimage', 'convert', 'sources/home_boots.jpg/result.png', '-crop', '530x+470+0', 'sources/home_boots.jpg/result_vertical_withFace.png'], returncode=0)
%% Cell type:code id: tags:
``` python
subprocess.run(
["./magick.appimage",
"convert", folder_path+"/"+"result.png",
"-crop", ((str(onlyClothes_xy[1][0]-onlyClothes_xy[0][0]) + "x") + "+" + str(onlyClothes_xy[0][0] + object_move_x) + "+" + str(0)),
folder_path+"/"+"result_vertical_noFace.png"]
)
```
%% Output
CompletedProcess(args=['./magick.appimage', 'convert', 'sources/home_boots.jpg/result.png', '-crop', '530x+470+0', 'sources/home_boots.jpg/result_vertical_noFace.png'], returncode=0)
%% Cell type:code id: tags:
``` python
# 데이터 베이스에 객체(얼굴, 옷) 좌표 저장
# MongoDB에 연결
client = MongoClient('mongodb://localhost:27017')
db = client['fashionImageTest']
collection = db['clothesData']
data = {
"name": product_name,
"image URL": img_url,
"file directory": os.path.abspath(folder_path+"/"+"result.png"),
"clothes type": "top",
"clothes_xy": [clothes_left_top, clothes_right_bottom]
}
collection.insert_one(data)
```
%% Output
---------------------------------------------------------------------------
ServerSelectionTimeoutError Traceback (most recent call last)
Cell In[178], line 14
6 collection = db['clothesData']
7 data = {
8 "name": product_name,
9 "image URL": img_url,
(...)
12 "clothes_xy": [clothes_left_top, clothes_right_bottom]
13 }
---> 14 collection.insert_one(data)
File ~/.conda/envs/autoImage/lib/python3.10/site-packages/pymongo/collection.py:628, in Collection.insert_one(self, document, bypass_document_validation, session, comment)
624 document["_id"] = ObjectId() # type: ignore[index]
626 write_concern = self._write_concern_for(session)
627 return InsertOneResult(
--> 628 self._insert_one(
629 document,
630 ordered=True,
631 write_concern=write_concern,
632 op_id=None,
633 bypass_doc_val=bypass_document_validation,
634 session=session,
635 comment=comment,
636 ),
637 write_concern.acknowledged,
638 )
File ~/.conda/envs/autoImage/lib/python3.10/site-packages/pymongo/collection.py:569, in Collection._insert_one(self, doc, ordered, write_concern, op_id, bypass_doc_val, session, comment)
557 result = sock_info.command(
558 self.__database.name,
559 command,
(...)
564 retryable_write=retryable_write,
565 )
567 _check_write_command_response(result)
--> 569 self.__database.client._retryable_write(acknowledged, _insert_command, session)
571 if not isinstance(doc, RawBSONDocument):
572 return doc.get("_id")
File ~/.conda/envs/autoImage/lib/python3.10/site-packages/pymongo/mongo_client.py:1475, in MongoClient._retryable_write(self, retryable, func, session)
1473 def _retryable_write(self, retryable, func, session):
1474 """Internal retryable write helper."""
-> 1475 with self._tmp_session(session) as s:
1476 return self._retry_with_session(retryable, func, s, None)
File ~/.conda/envs/autoImage/lib/python3.10/contextlib.py:135, in _GeneratorContextManager.__enter__(self)
133 del self.args, self.kwds, self.func
134 try:
--> 135 return next(self.gen)
136 except StopIteration:
137 raise RuntimeError("generator didn't yield") from None
File ~/.conda/envs/autoImage/lib/python3.10/site-packages/pymongo/mongo_client.py:1757, in MongoClient._tmp_session(self, session, close)
1754 yield session
1755 return
-> 1757 s = self._ensure_session(session)
1758 if s:
1759 try:
File ~/.conda/envs/autoImage/lib/python3.10/site-packages/pymongo/mongo_client.py:1740, in MongoClient._ensure_session(self, session)
1735 return session
1737 try:
1738 # Don't make implicit sessions causally consistent. Applications
1739 # should always opt-in.
-> 1740 return self.__start_session(True, causal_consistency=False)
1741 except (ConfigurationError, InvalidOperation):
1742 # Sessions not supported.
1743 return None
File ~/.conda/envs/autoImage/lib/python3.10/site-packages/pymongo/mongo_client.py:1685, in MongoClient.__start_session(self, implicit, **kwargs)
1682 def __start_session(self, implicit, **kwargs):
1683 # Raises ConfigurationError if sessions are not supported.
1684 if implicit:
-> 1685 self._topology._check_implicit_session_support()
1686 server_session = _EmptyServerSession()
1687 else:
File ~/.conda/envs/autoImage/lib/python3.10/site-packages/pymongo/topology.py:538, in Topology._check_implicit_session_support(self)
536 def _check_implicit_session_support(self):
537 with self._lock:
--> 538 self._check_session_support()
File ~/.conda/envs/autoImage/lib/python3.10/site-packages/pymongo/topology.py:554, in Topology._check_session_support(self)
550 self._select_servers_loop(
551 any_server_selector, self.get_server_selection_timeout(), None
552 )
553 elif not self._description.readable_servers:
--> 554 self._select_servers_loop(
555 readable_server_selector, self.get_server_selection_timeout(), None
556 )
558 session_timeout = self._description.logical_session_timeout_minutes
559 if session_timeout is None:
File ~/.conda/envs/autoImage/lib/python3.10/site-packages/pymongo/topology.py:238, in Topology._select_servers_loop(self, selector, timeout, address)
235 while not server_descriptions:
236 # No suitable servers.
237 if timeout == 0 or now > end_time:
--> 238 raise ServerSelectionTimeoutError(
239 "%s, Timeout: %ss, Topology Description: %r"
240 % (self._error_message(selector), timeout, self.description)
241 )
243 self._ensure_opened()
244 self._request_check_all()
ServerSelectionTimeoutError: localhost:27017: [Errno 111] Connection refused, Timeout: 30s, Topology Description: <TopologyDescription id: 6477869b12de238d683960a7, topology_type: Unknown, servers: [<ServerDescription ('localhost', 27017) server_type: Unknown, rtt: None, error=AutoReconnect('localhost:27017: [Errno 111] Connection refused')>]>
%% Cell type:code id: tags:
``` python
# 포토샵에 이미지 넣어서 배경 분리
# 사람
# 여백, 머리 포함(T/F), 배경색, 객체만(T/F), 비율
# 기본값(기본옵션) 설정해야함 -> 중요
# 엑셀 파일로 json request 명세 작성
# 클로즈업 https://tips.clip-studio.com/ko-kr/articles/4303#ee7cf6ea 옵션 고려
# 시나리오를 먼저 생각하고 -> 옵션 작성
```
%% Cell type:code id: tags:
``` python
#의류타입이 상의인 경우, 상의가 이미지에서 2벌 이상 등장한다면 어떻게 처리할 것인가
#
```
%% Cell type:markdown id: tags:
이미지 경로 / 얼굴 위치 (별도 모델) / 사물 위치 / 상품 정보 1 / 상품 정보 2 / 상품 정보 3 /
......
onnxruntime
opencv-python
google-cloud-vision
python-dotenv
openai
pymongo
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment