Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
M
ML
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
parkminwoo
ML
Commits
1817f9a6
Commit
1817f9a6
authored
4 years ago
by
parkminwoo
Browse files
Options
Downloads
Patches
Plain Diff
Upload New File
parent
4e686017
No related branches found
No related tags found
No related merge requests found
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
Untitled-checkpoint.ipynb
+164
-0
164 additions, 0 deletions
Untitled-checkpoint.ipynb
with
164 additions
and
0 deletions
Untitled-checkpoint.ipynb
0 → 100644
+
164
−
0
View file @
1817f9a6
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"#############################################################\n",
"#Weight file : 훈련된 model\n",
"#Cfg file : 구성파일. 알고리즘에 관한 모든 설정이 있다.\n",
"#Name files : 알고리즘이 감지할 수 있는 객체의 이름을 포함한다.\n",
"#############################################################\n",
"# Yolo 로드\n",
"net = cv2.dnn.readNet(\"yolov3.weights\", \"yolov3.cfg\")\n",
"classes = []\n",
"with open(\"./coco.names\", \"r\") as f:\n",
" classes = [line.strip() for line in f.readlines()]\n",
"layer_names = net.getLayerNames()\n",
"output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n",
"colors = np.random.uniform(0, 255, size=(len(classes), 3))\n"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"img = cv2.imread(\"sample2.jpg\")\n",
"img = cv2.resize(img, None, fx=0.4, fy=0.4)\n",
"height, width, channels = img.shape"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"##################################################\n",
"#YOLO 가 허용하는 이미지의 세가지 크기\n",
"#320 × 320 : 작고 정확도는 떨어지지 만 속도 빠름\n",
"#609 × 609 : 정확도는 더 높지만 속도 느림\n",
"#416 × 416 : 중간\n",
"##################################################\n",
"# Detecting objects\n",
"blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\n",
"net.setInput(blob)\n",
"outs = net.forward(output_layers)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
"###########################################################\n",
"#신뢰도가 0.5 이상이라면 물체가 정확히 감지되었다고 간주한다. 아니라면 넘어감..\n",
"#임계값은 0에서 1사이의 값을 가지는데\n",
"#1에 가까울수록 탐지 정확도가 높고 , 0에 가까울수록 정확도는 낮아지지만 탐지되는 물체의 수는 많아진다.\n",
"###########################################################################\n",
"# 정보를 화면에 표시\n",
"class_ids = []\n",
"confidences = []\n",
"boxes = []\n",
"for out in outs:\n",
" for detection in out:\n",
" scores = detection[5:]\n",
" class_id = np.argmax(scores)\n",
" confidence = scores[class_id]\n",
" if confidence > 0.5:\n",
" # Object detected\n",
" center_x = int(detection[0] * width)\n",
" center_y = int(detection[1] * height)\n",
" w = int(detection[2] * width)\n",
" h = int(detection[3] * height)\n",
" # 좌표\n",
" x = int(center_x - w / 2)\n",
" y = int(center_y - h / 2)\n",
" boxes.append([x, y, w, h])\n",
" confidences.append(float(confidence))\n",
" class_ids.append(class_id)"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
"#####################################################\n",
"#같은 물체에 대한 박스가 많은것을 제거\n",
"#Non maximum suppresion이라고 한답니다.\n",
"#노이즈 제거\n",
"####################################################\n",
"indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [],
"source": [
"#######################################################\n",
"#Box : 감지된 개체를 둘러싼 사각형의 좌표\n",
"#Label : 감지된 물체의 이름\n",
"#Confidence : 0에서 1까지의 탐지에 대한 신뢰도\n",
"########################################################\n",
"font = cv2.FONT_HERSHEY_PLAIN\n",
"for i in range(len(boxes)):\n",
" if i in indexes:\n",
" x, y, w, h = boxes[i]\n",
" label = str(classes[class_ids[i]])\n",
" color = colors[i]\n",
" cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)\n",
" cv2.putText(img, label, (x, y + 30), font, 3, color, 3)\n",
"cv2.imshow(\"Image\", img)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
%% Cell type:code id: tags:
```
python
import
cv2
import
numpy
as
np
```
%% Cell type:code id: tags:
```
python
#############################################################
#Weight file : 훈련된 model
#Cfg file : 구성파일. 알고리즘에 관한 모든 설정이 있다.
#Name files : 알고리즘이 감지할 수 있는 객체의 이름을 포함한다.
#############################################################
# Yolo 로드
net
=
cv2
.
dnn
.
readNet
(
"
yolov3.weights
"
,
"
yolov3.cfg
"
)
classes
=
[]
with
open
(
"
./coco.names
"
,
"
r
"
)
as
f
:
classes
=
[
line
.
strip
()
for
line
in
f
.
readlines
()]
layer_names
=
net
.
getLayerNames
()
output_layers
=
[
layer_names
[
i
[
0
]
-
1
]
for
i
in
net
.
getUnconnectedOutLayers
()]
colors
=
np
.
random
.
uniform
(
0
,
255
,
size
=
(
len
(
classes
),
3
))
```
%% Cell type:code id: tags:
```
python
img
=
cv2
.
imread
(
"
sample2.jpg
"
)
img
=
cv2
.
resize
(
img
,
None
,
fx
=
0.4
,
fy
=
0.4
)
height
,
width
,
channels
=
img
.
shape
```
%% Cell type:code id: tags:
```
python
##################################################
#YOLO 가 허용하는 이미지의 세가지 크기
#320 × 320 : 작고 정확도는 떨어지지 만 속도 빠름
#609 × 609 : 정확도는 더 높지만 속도 느림
#416 × 416 : 중간
##################################################
# Detecting objects
blob
=
cv2
.
dnn
.
blobFromImage
(
img
,
0.00392
,
(
416
,
416
),
(
0
,
0
,
0
),
True
,
crop
=
False
)
net
.
setInput
(
blob
)
outs
=
net
.
forward
(
output_layers
)
```
%% Cell type:code id: tags:
```
python
###########################################################
#신뢰도가 0.5 이상이라면 물체가 정확히 감지되었다고 간주한다. 아니라면 넘어감..
#임계값은 0에서 1사이의 값을 가지는데
#1에 가까울수록 탐지 정확도가 높고 , 0에 가까울수록 정확도는 낮아지지만 탐지되는 물체의 수는 많아진다.
###########################################################################
# 정보를 화면에 표시
class_ids
=
[]
confidences
=
[]
boxes
=
[]
for
out
in
outs
:
for
detection
in
out
:
scores
=
detection
[
5
:]
class_id
=
np
.
argmax
(
scores
)
confidence
=
scores
[
class_id
]
if
confidence
>
0.5
:
# Object detected
center_x
=
int
(
detection
[
0
]
*
width
)
center_y
=
int
(
detection
[
1
]
*
height
)
w
=
int
(
detection
[
2
]
*
width
)
h
=
int
(
detection
[
3
]
*
height
)
# 좌표
x
=
int
(
center_x
-
w
/
2
)
y
=
int
(
center_y
-
h
/
2
)
boxes
.
append
([
x
,
y
,
w
,
h
])
confidences
.
append
(
float
(
confidence
))
class_ids
.
append
(
class_id
)
```
%% Cell type:code id: tags:
```
python
#####################################################
#같은 물체에 대한 박스가 많은것을 제거
#Non maximum suppresion이라고 한답니다.
#노이즈 제거
####################################################
indexes
=
cv2
.
dnn
.
NMSBoxes
(
boxes
,
confidences
,
0.5
,
0.4
)
```
%% Cell type:code id: tags:
```
python
#######################################################
#Box : 감지된 개체를 둘러싼 사각형의 좌표
#Label : 감지된 물체의 이름
#Confidence : 0에서 1까지의 탐지에 대한 신뢰도
########################################################
font
=
cv2
.
FONT_HERSHEY_PLAIN
for
i
in
range
(
len
(
boxes
)):
if
i
in
indexes
:
x
,
y
,
w
,
h
=
boxes
[
i
]
label
=
str
(
classes
[
class_ids
[
i
]])
color
=
colors
[
i
]
cv2
.
rectangle
(
img
,
(
x
,
y
),
(
x
+
w
,
y
+
h
),
color
,
2
)
cv2
.
putText
(
img
,
label
,
(
x
,
y
+
30
),
font
,
3
,
color
,
3
)
cv2
.
imshow
(
"
Image
"
,
img
)
cv2
.
waitKey
(
0
)
cv2
.
destroyAllWindows
()
```
%% Cell type:code id: tags:
```
python
```
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment