import pyautogui
import time
import actgui

import cv2
import numpy as np
from unified_detector import Fingertips
from hand_detector.detector import SOLO, YOLO


def main():
    hand_detection_method = 'yolo'

    if hand_detection_method is 'solo':
        hand = SOLO(weights='weights/solo.h5', threshold=0.8)
    elif hand_detection_method is 'yolo':
        hand = YOLO(weights='weights/yolo.h5', threshold=0.8)
    else:
        assert False, "'" + hand_detection_method + \
                      "' hand detection does not exist. use either 'solo' or 'yolo' as hand detection method"

    fingertips = Fingertips(weights='weights/fingertip.h5')

    cam = cv2.VideoCapture(0)
    print('Unified Gesture & Fingertips Detection')


    while True:
        time.sleep(0.2)
        ret, image = cam.read()
    ################################################
    # openCV CAM flip
    ################################################
        image = cv2.flip(image,1)
    ################################################
        if ret is False:
            break

        # hand detection
        tl, br = hand.detect(image=image)

        if tl and br is not None:
            cropped_image = image[tl[1]:br[1], tl[0]: br[0]]
            height, width, _ = cropped_image.shape

            # gesture classification and fingertips regression
            prob, pos = fingertips.classify(image=cropped_image)
            pos = np.mean(pos, 0)
            
            # post-processing
            prob = np.asarray([(p >= 0.2) * 1.0 for p in prob])
            for i in range(0, len(pos), 2):
                pos[i] = pos[i] * width + tl[0]
                pos[i + 1] = pos[i + 1] * height + tl[1]
                
                
#########################################################
            # start gui
#########################################################

            # hand check
            case = actgui.hand_check(prob)
            print(case)
            actgui.store_pos(pos)

            # 좌표 초기화
            actgui.initialize_coordinate()
            
            # 변수 초기화
            actgui.initialize_variable()
            
            # 종료 확인
            bool = actgui.terminate_check()
            if bool: break
            
            ##########################
            # 작동
            actgui.act_gui()
            
#########################################################
            # end gui
#########################################################
            # drawing
            index = 0
            color = [(15, 15, 240), (15, 240, 155), (240, 155, 15), (240, 15, 155), (240, 15, 240)]
            # 손 범위
            image = cv2.rectangle(image, (tl[0], tl[1]), (br[0], br[1]), (235, 26, 158), 2)
            # 원 그리기
            for c, p in enumerate(prob):
                if p > 0.5:
                    image = cv2.circle(image, (int(pos[index]), int(pos[index + 1])), radius=12,
                                       color=color[c], thickness=-2)
                index = index + 2

        if cv2.waitKey(1) & 0xff == 27:
            break

        # display image
        image = cv2.resize(image, dsize=(0, 0), fx=0.3, fy=0.3, interpolation=cv2.INTER_AREA)
        cv2.imshow('Unified Gesture & Fingertips Detection', image)

    cam.release()
    cv2.destroyAllWindows()


def mainleft():
    hand_detection_method = 'yolo'

    if hand_detection_method is 'solo':
        hand = SOLO(weights='weights/solo.h5', threshold=0.8)
    elif hand_detection_method is 'yolo':
        hand = YOLO(weights='weights/yolo.h5', threshold=0.8)
    else:
        assert False, "'" + hand_detection_method + \
                      "' hand detection does not exist. use either 'solo' or 'yolo' as hand detection method"

    fingertips = Fingertips(weights='weights/fingertip.h5')

    cam = cv2.VideoCapture(0)
    print('Unified Gesture & Fingertips Detection')


    while True:
        time.sleep(0.2)
        ret, image = cam.read()
    ################################################
    
    ################################################
        if ret is False:
            break

        # hand detection
        tl, br = hand.detect(image=image)

        if tl and br is not None:
            cropped_image = image[tl[1]:br[1], tl[0]: br[0]]
            height, width, _ = cropped_image.shape

            # gesture classification and fingertips regression
            prob, pos = fingertips.classify(image=cropped_image)
            pos = np.mean(pos, 0)
            
            # post-processing
            prob = np.asarray([(p >= 0.2) * 1.0 for p in prob])
            for i in range(0, len(pos), 2):
                pos[i] = pos[i] * width + tl[0]
                pos[i + 1] = pos[i + 1] * height + tl[1]
                
                
#########################################################
            # start gui
#########################################################

            # hand check
            case = actgui.hand_check(prob)
            print(case)
            actgui.store_pos(pos)

            # 좌표 초기화
            actgui.initialize_coordinate()
            
            # 변수 초기화
            actgui.initialize_variable()
            
            # 종료 확인
            bool = actgui.terminate_check()
            if bool: break
            
            ##########################
            # 작동
            actgui.act_gui()
            
#########################################################
            # end gui
#########################################################
            # drawing
            index = 0
            color = [(15, 15, 240), (15, 240, 155), (240, 155, 15), (240, 15, 155), (240, 15, 240)]
            # 손 범위
            image = cv2.rectangle(image, (tl[0], tl[1]), (br[0], br[1]), (235, 26, 158), 2)
            # 원 그리기
            for c, p in enumerate(prob):
                if p > 0.5:
                    image = cv2.circle(image, (int(pos[index]), int(pos[index + 1])), radius=12,
                                       color=color[c], thickness=-2)
                index = index + 2

        if cv2.waitKey(1) & 0xff == 27:
            break

        # display image
        image = cv2.resize(image, dsize=(0, 0), fx=0.3, fy=0.3, interpolation=cv2.INTER_AREA)
        cv2.imshow('Unified Gesture & Fingertips Detection', image)

    cam.release()
    cv2.destroyAllWindows()
    


if __name__=="__main__":
    main()