Skip to content
Snippets Groups Projects
Commit 1817f9a6 authored by parkminwoo's avatar parkminwoo
Browse files

Upload New File

parent 4e686017
No related branches found
No related tags found
No related merge requests found
%% Cell type:code id: tags:
``` python
import cv2
import numpy as np
```
%% Cell type:code id: tags:
``` python
#############################################################
#Weight file : 훈련된 model
#Cfg file : 구성파일. 알고리즘에 관한 모든 설정이 있다.
#Name files : 알고리즘이 감지할 수 있는 객체의 이름을 포함한다.
#############################################################
# Yolo 로드
net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
classes = []
with open("./coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
```
%% Cell type:code id: tags:
``` python
img = cv2.imread("sample2.jpg")
img = cv2.resize(img, None, fx=0.4, fy=0.4)
height, width, channels = img.shape
```
%% Cell type:code id: tags:
``` python
##################################################
#YOLO 가 허용하는 이미지의 세가지 크기
#320 × 320 : 작고 정확도는 떨어지지 만 속도 빠름
#609 × 609 : 정확도는 더 높지만 속도 느림
#416 × 416 : 중간
##################################################
# Detecting objects
blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
```
%% Cell type:code id: tags:
``` python
###########################################################
#신뢰도가 0.5 이상이라면 물체가 정확히 감지되었다고 간주한다. 아니라면 넘어감..
#임계값은 0에서 1사이의 값을 가지는데
#1에 가까울수록 탐지 정확도가 높고 , 0에 가까울수록 정확도는 낮아지지만 탐지되는 물체의 수는 많아진다.
###########################################################################
# 정보를 화면에 표시
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# 좌표
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
```
%% Cell type:code id: tags:
``` python
#####################################################
#같은 물체에 대한 박스가 많은것을 제거
#Non maximum suppresion이라고 한답니다.
#노이즈 제거
####################################################
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
```
%% Cell type:code id: tags:
``` python
#######################################################
#Box : 감지된 개체를 둘러싼 사각형의 좌표
#Label : 감지된 물체의 이름
#Confidence : 0에서 1까지의 탐지에 대한 신뢰도
########################################################
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
color = colors[i]
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
cv2.putText(img, label, (x, y + 30), font, 3, color, 3)
cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
```
%% Cell type:code id: tags:
``` python
```
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment