diff --git a/.gitignore b/.gitignore
index 2321d1aba46e7476a9611289e85fb43c208a7dd8..81483ab73e66e6b94b4ee95947a285a9873796bc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,4 +8,4 @@ config.json
 *.jpg
 test.py
 ssfshop/
-image_processing_app/etc/secrets/google_key.json
\ No newline at end of file
+image_processing_app/etc/secrets/
diff --git a/image_processing_app/.dockerignore b/image_processing_app/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..a045302b242888d2de0f0caafffb18fd1af020aa
--- /dev/null
+++ b/image_processing_app/.dockerignore
@@ -0,0 +1,5 @@
+.env
+*.png
+*.jpg
+test.py
+etc/secrets/google_key.json
diff --git a/image_processing_app/Dockerfile b/image_processing_app/Dockerfile
index 945b6a1a00863911218dd18347e1dc27bdaa6440..f91b83affa1e68074c7adba02bafd366e1e171dc 100644
--- a/image_processing_app/Dockerfile
+++ b/image_processing_app/Dockerfile
@@ -1,5 +1,5 @@
 # 기본 이미지 설정
-FROM python:3.9-slim-buster
+FROM python:3.11.4-slim-bullseye
 
 # 작업 디렉토리 설정
 WORKDIR /app
@@ -7,10 +7,16 @@ WORKDIR /app
 # 의존성 설치
 COPY requirements.txt requirements.txt
 RUN pip install -r requirements.txt
-
+RUN apt-get update && apt-get install -y \
+    libgl1-mesa-glx
+RUN apt-get update && apt-get install -y \
+    libgl1-mesa-glx \
+    libglib2.0-0
+    
 # 소스코드 복사
 COPY . .
 
 WORKDIR /app/src
 # Flask 명령으로 앱 실행
-CMD [ "python", "-m" , "app", "run", "--host=0.0.0.0"]
\ No newline at end of file
+#CMD [ "python", "-m" , "app", "run", "--host=0.0.0.0"]
+CMD [ "python", "app.py"]
\ No newline at end of file
diff --git a/image_processing_app/README.md b/image_processing_app/README.md
index 5f09de1b1f89710867f7a2ee8aabbd575907ab3c..4ccfe9de02611db6c36cf303ebe1b1bed8366233 100644
--- a/image_processing_app/README.md
+++ b/image_processing_app/README.md
@@ -1 +1,51 @@
-# 이미지 자동 처리
\ No newline at end of file
+# 이미지 프로세싱 앱
+# 소개
+이미지 자동 Crop 앱에 사용될 이미지를 처리하고 객체 인식 데이터를 제공하는 로컬 서버입니다.
+![Flask App 모식도](./docs/images/flask.png)
+
+할 줄 아는 것이 Python 뿐이었던 저는 Python으로 이미지 처리 개발을 시작습니다. 그런데 서버를 Spring으로 구축해야 하는 상황이 오고 만 것입니다. java로 모조리 새로 구현하기는 싫었던 저는, 기존 코드를 기반으로 로컬 서버를 구축하고, Spring 서버가 API를 통해 이 것과 통신을 하도록 했습니다. 이런 것을 마이크로 서비스 아키텍처(Micro Service Architecture)라고 하더군요.  
+
+아무튼, 해당 서비스는 이미지 자동 Crop 앱을 위한 로컬 마이크로 서비스입니다.
+
+## Requirements
+작업 디렉토리를 `image_processing_app`로 변경하고 아래 명령어를 실행합니다. 
+```bash
+pip install -r requirements.txt
+```
+## 실행 방법
+작업 디렉토리를 `image_processing_app/src`로 변경하고 아래 명령어를 실행합니다. 하지만 실행에 앞서 .env를 통해 환경 변수를 등록해야 합니다. 아래를 참고하세요.
+```Bash
+python app.py
+```
+## Docker를 이용한 실행
+Docker를 이용하면, 별도의 패키지 설치나 작업 디렉토리 변경없이 서버를 실행할 수 있습니다. 하지만 실행에 앞서 .env를 통해 환경 변수를 등록해야 합니다. 아래를 참고하세요.
+```bash
+docker pull henry914/image-processing-flask-app:0.1
+docker run -p 5000:5000 --env-file .env henry914/image-processing-flask-app:0.1
+```
+
+## 환경 변수 설정법
+`.env` 파일을 생성하고 OpenAI API의 key를 입력해야 합니다.
+```
+DallE_API_KEY=<Dall-E 서비스 키 json의 위치>
+GOOGLE_APPLICATION_CREDENTIALS=../etc/secrets/google_key.json
+```
+`.env` 파일을 생성했다면 다음의 과정에 따라 배치합니다.
+### `python app.py`을 통해 실행하는 경우
+작업 디렉토리 `image_processing_app/src`에 `.env` 파일을 배치합니다.
+### Docker 이미지를 pull 해서 실행하는 경우
+`docker run`을 실행하는 작업 디렉토리에 `.env` 파일을 배치합니다.
+
+## Google API키 등록 방법
+위의 과정을 전부 따라도 FaceDetection API를 사용하는 것은 불가능합니다. Google API service key를 발급받고, 이를 배치해야 하기 때문입니다. `https://console.cloud.google.com/`에서 서비스를 등록하고, service key를 발급받을 수 있습니다.
+### `python app.py`을 통해 실행하는 경우
+다음 명령어를 실행해줍니다.
+```
+export GOOGLE_APPLICATION_CREDENTIALS=../etc/secrets/google_key.json
+```
+그리고 `image_processing_app/etc/secrets` 경로를 만들고, service_key를 배치합니다
+### Docker 이미지를 pull 해서 실행하는 경우
+`docker run`을 실행할 때, `-v` 옵션을 통해 호스트의 파일을 Docker 컨테이너에 전달하면 됩니다.
+```
+docker run -p 5000:5000 -v /host/(호스트의 service key 위치):/container/app/etc/secrets/google_key.json --env-file .env henry914/image-processing-flask-app:0.1
+```
\ No newline at end of file
diff --git a/image_processing_app/requirements.txt b/image_processing_app/requirements.txt
index 5ae32b3b938e8136466f128e118ff9810290a0b8..cc7ce92f1767fb6c9e300f967a76bc0b91f9e745 100644
--- a/image_processing_app/requirements.txt
+++ b/image_processing_app/requirements.txt
@@ -1,6 +1,7 @@
 onnxruntime
 opencv-python
 google-cloud-vision
-python-dotenv
 openai
-retina-face
\ No newline at end of file
+retina-face
+flask
+python-dotenv
\ No newline at end of file
diff --git a/image_processing_app/src/app.py b/image_processing_app/src/app.py
index f84104e31ccfc479ff2d853823c5b830b894c1bf..3d49e67d0b24ed27002d401062d2a5ca71604ab9 100644
--- a/image_processing_app/src/app.py
+++ b/image_processing_app/src/app.py
@@ -5,4 +5,4 @@ app = Flask(__name__)
 app = create_router(app)
 
 if __name__ == '__main__':
-    app.run()
+    app.run(host='0.0.0.0')
diff --git a/image_processing_app/src/config.py b/image_processing_app/src/config.py
index 05894cfa4581e823660eec2fdd324118247af0d2..fdae0c2a447345de9b17b13c1fbd35a2dab0d1c8 100644
--- a/image_processing_app/src/config.py
+++ b/image_processing_app/src/config.py
@@ -1,12 +1,6 @@
-import json
+from dotenv import load_dotenv
+import os
 
+load_dotenv()
 
-def load_config(file_path):
-    with open(file_path, 'r') as file:
-        config_data = json.load(file)
-    return config_data
-
-config = load_config('../conf/config.json')
-
-unet_model_location = config["unet_model_location"]
-dallE_api_key = config["dallE_api_key"]
\ No newline at end of file
+dallE_api_key = os.getenv('DallE_API_KEY')
\ No newline at end of file
diff --git a/image_processing_app/src/controllers/image_controller.py b/image_processing_app/src/controllers/image_controller.py
index 6d318e8782309aa48bbe618b98eda6dbea8239d0..b22a845e5bc7071ffd4f4144310d9e962d75980d 100644
--- a/image_processing_app/src/controllers/image_controller.py
+++ b/image_processing_app/src/controllers/image_controller.py
@@ -2,6 +2,7 @@ from flask import Blueprint, request, jsonify
 
 from services.aggregated_services.image_processing_service import ImageProcessingService
 from utils.image.image_converter import ImageConverter
+from utils.image.image_encoder import ImageEncoder
 
 image_controller  = Blueprint('image', __name__)
 
@@ -12,6 +13,10 @@ def process_image():
     chop_image = request.args.get('chop_image', 'True').lower() == 'true'
     outpainting_size = int(request.args.get('outpainting_size', 1024))
     removed_border_pixel = int(request.args.get('removed_border_pixel', 2))
+    
     image_bytes = request.files['image_data'].read()
     np_image = ImageConverter().convert_to_np_image(image_bytes)
-    processed_np_image, x_offset, y_offset = ImageProcessingService().process(np_image, ratio, recover_size, chop_image, outpainting_size, removed_border_pixel)
\ No newline at end of file
+    result = ImageProcessingService().process(np_image, ratio, recover_size, chop_image, outpainting_size, removed_border_pixel)
+
+    result["result_image"]["image_data"] = ImageEncoder().encode_np_image_base64(result["result_image"]["image_data"])
+    return jsonify(result)
\ No newline at end of file
diff --git a/image_processing_app/src/models/mask.py b/image_processing_app/src/models/mask.py
index cf773fcae68c67afc6e2e2a3c2e1b469f2d2c750..3bf849cf3c0a401900aa5125d6ebfe6cf0167ba5 100644
--- a/image_processing_app/src/models/mask.py
+++ b/image_processing_app/src/models/mask.py
@@ -53,4 +53,11 @@ class Mask(Image):
             self.touch_border_bottom = True
         else:
             self.touch_border_bottom = False
-            
\ No newline at end of file
+            
+    def relocate_smallest_box(self, reduction_ratio, x_offset, y_offset):
+        self.smallest_box_x = int(reduction_ratio * self.smallest_box_x)
+        self.smallest_box_y = int(reduction_ratio * self.smallest_box_y)
+        self.smallest_box_width = int(reduction_ratio * self.smallest_box_width)
+        self.smallest_box_height = int(reduction_ratio * self.smallest_box_height)
+        self.smallest_box_x += x_offset
+        self.smallest_box_y += y_offset
\ No newline at end of file
diff --git a/image_processing_app/src/services/aggregated_services/image_processing_service.py b/image_processing_app/src/services/aggregated_services/image_processing_service.py
index 62f4ea545122299760bdb4083b3e8fe698c7d124..5dfa4571a9993b4f39d9f37a252c7e1a26a24f2f 100644
--- a/image_processing_app/src/services/aggregated_services/image_processing_service.py
+++ b/image_processing_app/src/services/aggregated_services/image_processing_service.py
@@ -22,16 +22,35 @@ class ImageProcessingService:
             outer_width = compsited_image.outer_image.shape[1]
             outer_height = compsited_image.outer_image.shape[0]
             outpainted_image = ImageResizer().resize(outpainted_image, outer_width, outer_height)
+            reduction_ratio = 1
             x_offset = compsited_image.x_offset
             y_offset = compsited_image.y_offset
             outpainted_image = ImageCompister().composite(outpainted_image, np_image, x_offset, y_offset)
         else:
-            reduction_ratio = outpainting_size * compsited_image.outer_image.shape[0]
+            reduction_ratio = outpainting_size / compsited_image.outer_image.shape[0]
             x_offset = int(compsited_image.x_offset * reduction_ratio)
             y_offset = int(compsited_image.y_offset * reduction_ratio)
         #############################################################
         if chop_image is True:
-            processed_image, x_offset, y_offset = ImageChoppingService().chop_according_to_mask(outpainted_image, mask, x_offset, y_offset)
+            chopped_image, x_offset, y_offset = ImageChoppingService().chop_according_to_mask(outpainted_image, mask, x_offset, y_offset)
+        
+        mask.relocate_smallest_box(reduction_ratio, x_offset, y_offset)
+        
+        result = {
+            "result_image": {
+                "image_data": chopped_image,
+                "width": chopped_image.shape[1],
+                "height": chopped_image.shape[0]
+            },
+            "original_x_offset": x_offset / chopped_image.shape[1],
+            "original_y_offset": y_offset / chopped_image.shape[0],
+            "mask": {
+                "smallest_box_x": mask.smallest_box_x / chopped_image.shape[1],
+                "smallest_box_y": mask.smallest_box_y / chopped_image.shape[0],
+                "smallest_box_width": mask.smallest_box_width / chopped_image.shape[1],
+                "smallest_box_height": mask.smallest_box_height / chopped_image.shape[0]
+            }
+        }
 
-        return processed_image
+        return result
     
\ No newline at end of file
diff --git a/image_processing_app/src/utils/image/image_encoder.py b/image_processing_app/src/utils/image/image_encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..763bbecaf9f78a38348b18ee6d804d385153f92d
--- /dev/null
+++ b/image_processing_app/src/utils/image/image_encoder.py
@@ -0,0 +1,9 @@
+import cv2
+import base64
+
+class ImageEncoder:
+    @staticmethod
+    def encode_np_image_base64(image):
+        _, img_encoded = cv2.imencode('.jpg', image)
+        img_string = base64.b64encode(img_encoded).decode('utf-8')
+        return img_string
\ No newline at end of file
diff --git a/image_processing_app/src/utils/masks/mask_detector.py b/image_processing_app/src/utils/masks/mask_detector.py
index b8b59700ebbdbe9d45896470f399b4bee449d637..b2b9fa508760bf373e4987d578af90a7ee9ff0d9 100644
--- a/image_processing_app/src/utils/masks/mask_detector.py
+++ b/image_processing_app/src/utils/masks/mask_detector.py
@@ -2,11 +2,9 @@ import onnxruntime
 import cv2
 import numpy as np
 
-from config import unet_model_location
-
 class MaskDetector:
     def detect_mask(self, np_image):
-        model = onnxruntime.InferenceSession(unet_model_location)
+        model = onnxruntime.InferenceSession("../static/model_weight/unet.onnx")
         mask = cv2.resize(np_image, (320, 320))
         mask = mask.transpose((2, 0, 1))  # 채널 순서 변경
         mask = mask.astype(np.float32) / 255.0  # 정규화
diff --git "a/ssfshop/beanpole/\353\202\250\353\205\200\352\263\265\354\232\251 \353\262\240\354\235\264\354\247\201 \355\224\274\354\274\200 \355\213\260\354\205\224\354\270\240 - \353\270\224\353\236\231/DallE.png" "b/ssfshop/beanpole/\353\202\250\353\205\200\352\263\265\354\232\251 \353\262\240\354\235\264\354\247\201 \355\224\274\354\274\200 \355\213\260\354\205\224\354\270\240 - \353\270\224\353\236\231/DallE.png"
deleted file mode 100644
index 5407ff9cabf5b7bbbb414dbc9328b25ae29db0fb..0000000000000000000000000000000000000000
Binary files "a/ssfshop/beanpole/\353\202\250\353\205\200\352\263\265\354\232\251 \353\262\240\354\235\264\354\247\201 \355\224\274\354\274\200 \355\213\260\354\205\224\354\270\240 - \353\270\224\353\236\231/DallE.png" and /dev/null differ
diff --git "a/ssfshop/beanpole/\353\202\250\353\205\200\352\263\265\354\232\251 \353\262\240\354\235\264\354\247\201 \355\224\274\354\274\200 \355\213\260\354\205\224\354\270\240 - \353\270\224\353\236\231/original.jpg" "b/ssfshop/beanpole/\353\202\250\353\205\200\352\263\265\354\232\251 \353\262\240\354\235\264\354\247\201 \355\224\274\354\274\200 \355\213\260\354\205\224\354\270\240 - \353\270\224\353\236\231/original.jpg"
deleted file mode 100644
index e02410b14d9f31887f9ca3fcc70376e8e08ad230..0000000000000000000000000000000000000000
Binary files "a/ssfshop/beanpole/\353\202\250\353\205\200\352\263\265\354\232\251 \353\262\240\354\235\264\354\247\201 \355\224\274\354\274\200 \355\213\260\354\205\224\354\270\240 - \353\270\224\353\236\231/original.jpg" and /dev/null differ