hassan526 commited on
Commit
4d34494
·
verified ·
1 Parent(s): 6760941

Upload 39 files

Browse files
.gitattributes CHANGED
@@ -52,3 +52,14 @@ dependency/openvino/libopenvino_tensorflow_fe.so filter=lfs diff=lfs merge=lfs -
52
  dependency/openvino/libopenvino.so filter=lfs diff=lfs merge=lfs -text
53
  dependency/openvino/pcie-ma2x8x.mvcmd filter=lfs diff=lfs merge=lfs -text
54
  dependency/openvino/usb-ma2x8x.mvcmd filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
52
  dependency/openvino/libopenvino.so filter=lfs diff=lfs merge=lfs -text
53
  dependency/openvino/pcie-ma2x8x.mvcmd filter=lfs diff=lfs merge=lfs -text
54
  dependency/openvino/usb-ma2x8x.mvcmd filter=lfs diff=lfs merge=lfs -text
55
+ fl/engine/libliveness_v7.so filter=lfs diff=lfs merge=lfs -text
56
+ fl/examples/att_1.jpg filter=lfs diff=lfs merge=lfs -text
57
+ fl/examples/att_10.jpg filter=lfs diff=lfs merge=lfs -text
58
+ fl/examples/att_2.jpg filter=lfs diff=lfs merge=lfs -text
59
+ fl/examples/att_3.jpg filter=lfs diff=lfs merge=lfs -text
60
+ fl/examples/att_4.jpg filter=lfs diff=lfs merge=lfs -text
61
+ fl/examples/att_6.jpg filter=lfs diff=lfs merge=lfs -text
62
+ fl/examples/att_7.jpg filter=lfs diff=lfs merge=lfs -text
63
+ fl/examples/att_8.jpg filter=lfs diff=lfs merge=lfs -text
64
+ fl/examples/att_9.jpg filter=lfs diff=lfs merge=lfs -text
65
+ fr/engine/librecognition_v6.so filter=lfs diff=lfs merge=lfs -text
fl/engine/bin/data1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36cf5fcc49345989a86839a53529314ec1fe5d621c377a1952bc7538d55e7f1b
3
+ size 16255630
fl/engine/bin/data2.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f92776f5d0d04415f47e9118a7814a89d89fd840dd1ea74c57ef5265b1d8bdd
3
+ size 194381075
fl/engine/bin/data3.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f25fb0cd3d70cb84c258e7109620f411c087e0875828d6ab86cc9c4838d49bec
3
+ size 11875339
fl/engine/bin/detect.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b505c320dd8add047f107549849a307d0c6f518f01c1d3402bce9e13a765146
3
+ size 28463173
fl/engine/header.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import numpy as np
4
+ import ctypes, ctypes.util
5
+ from enum import Enum
6
+ from ctypes import *
7
+ from numpy.ctypeslib import ndpointer
8
+
9
+ def print_log(fmt): print("[LOG] \033[98m{}\033[00m" .format(fmt))
10
+ def print_info(fmt): print("[INFO] \033[92m{}\033[00m" .format(fmt))
11
+ def print_error(fmt): print("[ERR] \033[91m{}\033[00m" .format(fmt))
12
+ def print_warning(fmt): print("[WARNING] \033[93m{}\033[00m" .format(fmt))
13
+
14
+ class ENGINE_CODE(Enum):
15
+ E_NO_FACE = 0
16
+ E_ACTIVATION_ERROR = -1
17
+ E_ENGINE_INIT_ERROR = -2
18
+
19
+ class LIVENESS_CODE(Enum):
20
+ L_TOO_SMALL_FACE = -100
21
+ L_BORDERLINE_FACE = -200
22
+ L_TOO_TURNED_FACE = -300
23
+ L_COVERED_FACE = -400
24
+ L_MULTIPLE_FACE = -500
25
+ L_DEEP_FAKE = -600
26
+
27
+ lib_path = os.path.abspath(os.path.dirname(__file__)) + '/libliveness_v7.so'
28
+ lib = cdll.LoadLibrary(lib_path)
29
+
30
+ get_version = lib.ttv_version
31
+ get_version.argtypes = []
32
+ get_version.restype = ctypes.c_char_p
33
+
34
+ get_deviceid = lib.ttv_get_hwid
35
+ get_deviceid.argtypes = []
36
+ get_deviceid.restype = ctypes.c_char_p
37
+
38
+ init_sdk = lib.ttv_init
39
+ init_sdk.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
40
+ init_sdk.restype = ctypes.c_int32
41
+
42
+ init_sdk_offline = lib.ttv_init_offline
43
+ init_sdk_offline.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
44
+ init_sdk_offline.restype = ctypes.c_int32
45
+
46
+
47
+ detect_face_rgb = lib.ttv_detect_face
48
+ detect_face_rgb.argtypes = [ndpointer(ctypes.c_ubyte, flags='C_CONTIGUOUS'), ctypes.c_int32, ctypes.c_int32, ndpointer(ctypes.c_int32, flags='C_CONTIGUOUS'), ndpointer(ctypes.c_double, flags='C_CONTIGUOUS'), ndpointer(ctypes.c_double, flags='C_CONTIGUOUS')]
49
+ detect_face_rgb.restype = ctypes.c_int32
50
+
51
+ DEFAULT_THRESHOLD = 0
52
+ def check_liveness(image_mat, spoof_threshold = DEFAULT_THRESHOLD):
53
+ result = ""
54
+ score = 0
55
+
56
+ if image_mat is None:
57
+ result = "Failed to open image"
58
+ return result, None, None, None
59
+
60
+ face_rect = np.zeros([4], dtype=np.int32)
61
+ liveness_score = np.zeros([1], dtype=np.double)
62
+ angles = np.zeros([3], dtype=np.double)
63
+
64
+ width = image_mat.shape[1]
65
+ height = image_mat.shape[0]
66
+
67
+ ret = detect_face_rgb(image_mat, width, height, face_rect, liveness_score, angles)
68
+
69
+ if ret <= 0:
70
+ if ret == ENGINE_CODE.E_ACTIVATION_ERROR.value:
71
+ result = "ACTIVATION ERROR"
72
+ elif ret == ENGINE_CODE.E_ENGINE_INIT_ERROR.value:
73
+ result = "ENGINE INIT ERROR"
74
+ elif ret == ENGINE_CODE.E_NO_FACE.value:
75
+ result = "NO FACE"
76
+ return result, None, None, None
77
+
78
+ score = liveness_score[0]
79
+ if score == LIVENESS_CODE.L_TOO_SMALL_FACE.value:
80
+ result = "TOO SMALL FACE"
81
+ elif score == LIVENESS_CODE.L_BORDERLINE_FACE.value:
82
+ result = "FACE CUT OFF"
83
+ elif score == LIVENESS_CODE.L_TOO_TURNED_FACE.value:
84
+ result = "TOO TURNED FACE"
85
+ elif score == LIVENESS_CODE.L_COVERED_FACE.value:
86
+ result = "COVERED FACE"
87
+ elif score == LIVENESS_CODE.L_MULTIPLE_FACE.value:
88
+ result = "MULTIPLE FACES"
89
+ elif score == LIVENESS_CODE.L_DEEP_FAKE.value:
90
+ result = "DEEP FAKE DETECTED"
91
+ elif score > spoof_threshold:
92
+ result = "REAL"
93
+ else:
94
+ result = "SPOOF"
95
+
96
+ return result, face_rect, score, angles
fl/engine/libliveness_v7.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff4b8566df6107af6e3564c7c5c4167c213d87e671b286c5d4b799013f0aab9b
3
+ size 5447208
fl/examples/att_1.jpg ADDED

Git LFS Details

  • SHA256: 4bf224945b074ebd4692ab6b2a8d11da2cab07ea281b2cb46d489c743a9a636a
  • Pointer size: 131 Bytes
  • Size of remote file: 120 kB
fl/examples/att_10.jpg ADDED

Git LFS Details

  • SHA256: 8e31a8ca32b163184e501e64c8b839ca09853f723acb50203a9849571183b42d
  • Pointer size: 131 Bytes
  • Size of remote file: 189 kB
fl/examples/att_2.jpg ADDED

Git LFS Details

  • SHA256: 35211a2ddc7d963804bf6e7a0c50cb003a445200d0735374bed6cf5c2ada51fa
  • Pointer size: 131 Bytes
  • Size of remote file: 204 kB
fl/examples/att_3.jpg ADDED

Git LFS Details

  • SHA256: 9a76229bc7cd697ea1b30bd033a787cca49e67f8a554030f0d8bb391884f16ad
  • Pointer size: 131 Bytes
  • Size of remote file: 170 kB
fl/examples/att_4.jpg ADDED

Git LFS Details

  • SHA256: 798ed3e9324cb9ba3088d49887c5229564974f926ecbd0c4435ae760a6f94a2f
  • Pointer size: 131 Bytes
  • Size of remote file: 130 kB
fl/examples/att_5.jpg ADDED
fl/examples/att_6.jpg ADDED

Git LFS Details

  • SHA256: 678cd1fa4a8ace32a421bc4b355c588aa8132fa2564bc94bbd0c2539770da278
  • Pointer size: 131 Bytes
  • Size of remote file: 273 kB
fl/examples/att_7.jpg ADDED

Git LFS Details

  • SHA256: 410d8436edd199cb66780a8e498278743f79a9de46bef6a3892f72e3dc511fa5
  • Pointer size: 131 Bytes
  • Size of remote file: 107 kB
fl/examples/att_8.jpg ADDED

Git LFS Details

  • SHA256: 89d36dff57eee50614675b1eed00a44a5933c8993ae3247593e79fe95a3c2757
  • Pointer size: 131 Bytes
  • Size of remote file: 200 kB
fl/examples/att_9.jpg ADDED

Git LFS Details

  • SHA256: d11139982e05c3bac9941b0b5be9fbcdf6ad0d2a6c4fe06e48b96950a687b74d
  • Pointer size: 131 Bytes
  • Size of remote file: 147 kB
fl/flask/app.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append('../')
3
+
4
+ import os
5
+ import base64
6
+ import json
7
+ import cv2
8
+ import numpy as np
9
+ from time import gmtime, strftime
10
+ from flask import Flask, request, jsonify
11
+
12
+ from engine.header import get_version
13
+ from engine.header import get_deviceid
14
+ from engine.header import init_sdk
15
+ from engine.header import init_sdk_offline
16
+
17
+ from engine.header import check_liveness
18
+ from engine.header import print_log, print_error, print_info, print_warning
19
+
20
+ file_path = os.path.abspath(__file__)
21
+ dir_path = os.path.dirname(file_path)
22
+ root_path = os.path.dirname(dir_path)
23
+
24
+ SPOOF_THRESHOLD = 0.5
25
+
26
+ app = Flask(__name__)
27
+ app.config['SITE'] = "http://0.0.0.0:8000/"
28
+ app.config['DEBUG'] = False
29
+
30
+ version = get_version().decode('utf-8')
31
+ print_info('\t <Recognito Liveness> \t version {}'.format(version))
32
+
33
+ device_id = get_deviceid().decode('utf-8')
34
+ print_info('\t <Hardware ID> \t\t {}'.format(device_id))
35
+
36
+ def activate_sdk():
37
+ online_key = os.environ.get("FL_LICENSE_KEY")
38
+ offline_key_path = os.path.join(root_path, "license.txt")
39
+ dict_path = os.path.join(root_path, "engine/bin")
40
+
41
+ ret = -1
42
+ if online_key is None:
43
+ print_warning("Liveness online license key not found!")
44
+ else:
45
+ print_info(f"FL_LICENSE_KEY: {online_key}")
46
+ ret = init_sdk(dict_path.encode('utf-8'), online_key.encode('utf-8'))
47
+
48
+ if ret == 0:
49
+ print_log("Successfully online init SDK!")
50
+ else:
51
+ print_error(f"Failed to online init SDK, Error code {ret}\n Trying offline init SDK...");
52
+ if os.path.exists(offline_key_path) is False:
53
+ print_warning("Liveness offline license key file not found!")
54
+ print_error(f"Falied to offline init SDK, Error code {ret}")
55
+ return ret
56
+ else:
57
+ ret = init_sdk_offline(dict_path.encode('utf-8'), offline_key_path.encode('utf-8'))
58
+ if ret == 0:
59
+ print_log("Successfully offline init SDK!")
60
+ else:
61
+ print_error(f"Falied to offline init SDK, Error code {ret}")
62
+ return ret
63
+ return ret
64
+
65
+ def generate_response(result, face_rect, score, angles):
66
+ status = "ok"
67
+ data = {
68
+ "status": status,
69
+ "data": {}
70
+ }
71
+
72
+ data["data"]["result"] = result
73
+
74
+ if score is not None:
75
+ data["data"]["liveness_score"] = score
76
+
77
+ if face_rect is not None:
78
+ data["data"]["face_rect"] = {
79
+ "x": int(face_rect[0]),
80
+ "y": int(face_rect[1]),
81
+ "w": int(face_rect[2] - face_rect[0] + 1),
82
+ "h": int(face_rect[3] - face_rect[1] + 1)
83
+ }
84
+
85
+ if angles is not None:
86
+ data["data"]["angles"] = {
87
+ "yaw": angles[0],
88
+ "roll": angles[1],
89
+ "pitch": angles[2]
90
+ }
91
+
92
+ response = jsonify(data)
93
+ response.status_code = 200
94
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
95
+ return response
96
+
97
+ @app.route('/api/check_liveness', methods=['POST'])
98
+ def check_liveness_api():
99
+ try:
100
+ file = request.files['image']
101
+ image_mat = cv2.imdecode(np.frombuffer(file.read(), np.uint8), cv2.IMREAD_COLOR)
102
+ except:
103
+ response = generate_response("Failed to open file!", None, None, None)
104
+ return response
105
+
106
+ result, face_rect, score, angles = check_liveness(image_mat, SPOOF_THRESHOLD)
107
+ response = generate_response(result, face_rect, score, angles)
108
+ return response
109
+
110
+ @app.route('/api/check_liveness_base64', methods=['POST'])
111
+ def check_liveness_base64_api():
112
+ try:
113
+ content = request.get_json()
114
+ imageBase64 = content['image']
115
+ image_mat = cv2.imdecode(np.frombuffer(base64.b64decode(imageBase64), dtype=np.uint8), cv2.IMREAD_COLOR)
116
+ except:
117
+ response = generate_response("Failed to open file!", None, None, None)
118
+ return response
119
+
120
+ result, face_rect, score, angles = check_liveness(image_mat, SPOOF_THRESHOLD)
121
+ response = generate_response(result, face_rect, score, angles)
122
+ return response
123
+
124
+ if __name__ == '__main__':
125
+ ret = activate_sdk()
126
+ if ret != 0:
127
+ exit(-1)
128
+ port = int(os.environ.get("PORT", 8000))
129
+ app.run(host='0.0.0.0', port=port)
fl/gradio/app.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append('../')
3
+
4
+ import os
5
+ import gradio as gr
6
+ import cv2
7
+ import time
8
+ import numpy as np
9
+ from PIL import Image
10
+
11
+ from engine.header import get_version
12
+ from engine.header import get_deviceid
13
+ from engine.header import init_sdk
14
+ from engine.header import init_sdk_offline
15
+
16
+ from engine.header import check_liveness
17
+ from engine.header import print_log, print_error, print_info, print_warning
18
+
19
+ file_path = os.path.abspath(__file__)
20
+ dir_path = os.path.dirname(file_path)
21
+ root_path = os.path.dirname(dir_path)
22
+
23
+ version = get_version().decode('utf-8')
24
+ print_info('\t <Recognito Liveness> \t version {}'.format(version))
25
+
26
+ device_id = get_deviceid().decode('utf-8')
27
+ print_info('\t <Hardware ID> \t\t {}'.format(device_id))
28
+
29
+ g_activation_result = -1
30
+ SPOOF_THRESHOLD = 0.5
31
+
32
+ css = """
33
+ .example-image img{
34
+ display: flex; /* Use flexbox to align items */
35
+ justify-content: center; /* Center the image horizontally */
36
+ align-items: center; /* Center the image vertically */
37
+ height: 300px; /* Set the height of the container */
38
+ object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */
39
+ }
40
+
41
+ .example-image{
42
+ display: flex; /* Use flexbox to align items */
43
+ justify-content: center; /* Center the image horizontally */
44
+ align-items: center; /* Center the image vertically */
45
+ height: 350px; /* Set the height of the container */
46
+ object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */
47
+ }
48
+
49
+ .face-row {
50
+ display: flex;
51
+ justify-content: space-around; /* Distribute space evenly between elements */
52
+ align-items: center; /* Align items vertically */
53
+ width: 100%; /* Set the width of the row to 100% */
54
+ }
55
+
56
+ .face-image{
57
+ justify-content: center; /* Center the image horizontally */
58
+ align-items: center; /* Center the image vertically */
59
+ height: 160px; /* Set the height of the container */
60
+ width: 160px;
61
+ object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */
62
+ }
63
+
64
+ .face-image img{
65
+ justify-content: center; /* Center the image horizontally */
66
+ align-items: center; /* Center the image vertically */
67
+ height: 160px; /* Set the height of the container */
68
+ object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */
69
+ }
70
+
71
+ .markdown-success-container {
72
+ background-color: #F6FFED;
73
+ padding: 20px;
74
+ margin: 20px;
75
+ border-radius: 1px;
76
+ border: 2px solid green;
77
+ text-align: center;
78
+ }
79
+
80
+ .markdown-fail-container {
81
+ background-color: #FFF1F0;
82
+ padding: 20px;
83
+ margin: 20px;
84
+ border-radius: 1px;
85
+ border: 2px solid red;
86
+ text-align: center;
87
+ }
88
+
89
+ .markdown-attribute-container {
90
+ display: flex;
91
+ justify-content: space-around; /* Distribute space evenly between elements */
92
+ align-items: center; /* Align items vertically */
93
+ padding: 10px;
94
+ margin: 10px;
95
+ }
96
+
97
+ .block-background {
98
+ # background-color: #202020; /* Set your desired background color */
99
+ border-radius: 5px;
100
+ }
101
+
102
+ table, th, td {
103
+ text-align: center;
104
+ }
105
+ """
106
+
107
+
108
+ def activate_sdk():
109
+ online_key = os.environ.get("FL_LICENSE_KEY")
110
+ offline_key_path = os.path.join(root_path, "license.txt")
111
+ dict_path = os.path.join(root_path, "engine/bin")
112
+
113
+ ret = -1
114
+ if online_key is None:
115
+ print_warning("Liveness online license key not found!")
116
+ else:
117
+ print_info(f"FL_LICENSE_KEY: {online_key}")
118
+ ret = init_sdk(dict_path.encode('utf-8'), online_key.encode('utf-8'))
119
+
120
+ if ret == 0:
121
+ print_log("Successfully online init SDK!")
122
+ else:
123
+ print_error(f"Failed to online init SDK, Error code {ret}\n Trying offline init SDK...");
124
+ if os.path.exists(offline_key_path) is False:
125
+ print_warning("Liveness offline license key file not found!")
126
+ print_error(f"Falied to offline init SDK, Error code {ret}")
127
+ return ret
128
+ else:
129
+ ret = init_sdk_offline(dict_path.encode('utf-8'), offline_key_path.encode('utf-8'))
130
+ if ret == 0:
131
+ print_log("Successfully offline init SDK!")
132
+ else:
133
+ print_error(f"Falied to offline init SDK, Error code {ret}")
134
+ return ret
135
+ return ret
136
+
137
+ def convert_fun(input_str):
138
+ # Remove line breaks and extra whitespaces
139
+ return ' '.join(input_str.split())
140
+
141
+ def check_liveness_clicked(frame, threshold):
142
+ global g_activation_result
143
+ if g_activation_result != 0:
144
+ gr.Warning("SDK Activation Failed!")
145
+ return None, None, None, None
146
+
147
+ try:
148
+ image = open(frame, 'rb')
149
+ except:
150
+ raise gr.Error("Please select image file!")
151
+
152
+ image_mat = cv2.imdecode(np.frombuffer(image.read(), np.uint8), cv2.IMREAD_COLOR)
153
+ start_time = time.time()
154
+ result, face_rect, score, angles = check_liveness(image_mat, float(threshold))
155
+ end_time = time.time()
156
+ process_time = (end_time - start_time) * 1000
157
+
158
+ face_crop, one_line_attribute = None, ""
159
+ try:
160
+ image = Image.open(frame)
161
+
162
+ face = Image.new('RGBA',(150, 150), (80,80,80,0))
163
+
164
+ if face_rect is not None:
165
+ x1 = int(face_rect[0])
166
+ y1 = int(face_rect[1])
167
+ x2 = int(face_rect[2])
168
+ y2 = int(face_rect[3])
169
+
170
+ if x1 < 0:
171
+ x1 = 0
172
+ if y1 < 0:
173
+ y1 = 0
174
+ if x2 >= image.width:
175
+ x2 = image.width - 1
176
+ if y2 >= image.height:
177
+ y2 = image.height - 1
178
+
179
+ face_crop = image.crop((x1, y1, x2, y2))
180
+ face_image_ratio = face_crop.width / float(face_crop.height)
181
+ resized_w = int(face_image_ratio * 150)
182
+ resized_h = 150
183
+
184
+ face_crop = face_crop.resize((int(resized_w), int(resized_h)))
185
+
186
+ if angles is not None:
187
+ yaw = angles[0]
188
+ roll = angles[1]
189
+ pitch = angles[2]
190
+
191
+ attribute = f"""
192
+ <br/>
193
+ <div class="markdown-attribute-container">
194
+ <table>
195
+ <tr>
196
+ <th>Field</th>
197
+ <th colspan="2">Value</th>
198
+ </tr>
199
+ <tr>
200
+ <th rowspan="4">Face Rect</th>
201
+ <td>x</td>
202
+ <td>{x1}</td>
203
+ </tr>
204
+ <tr>
205
+ <td>y</td>
206
+ <td>{y1}</td>
207
+ </tr>
208
+ <tr>
209
+ <td>width</td>
210
+ <td>{x2 - x1 + 1}</td>
211
+ </tr>
212
+ <tr>
213
+ <td>height</td>
214
+ <td>{y2 - y1 + 1}</td>
215
+ </tr>
216
+ <tr>
217
+ <th rowspan="3">Face Angle</th>
218
+ <td>Pitch</td>
219
+ <td>{"{:.4f}".format(pitch)}</td>
220
+ </tr>
221
+ <tr>
222
+ <td>Yaw</td>
223
+ <td>{"{:.4f}".format(yaw)}</td>
224
+ </tr>
225
+ <tr>
226
+ <td>Roll</td>
227
+ <td>{"{:.4f}".format(roll)}</td>
228
+ </tr>
229
+ </table>
230
+ </div>
231
+ """
232
+
233
+ one_line_attribute = convert_fun(attribute)
234
+ except:
235
+ pass
236
+
237
+ str_score = str("{:.4f}".format(score))
238
+ if result == "REAL":
239
+ liveness_result = f"""<br/><div class="markdown-success-container"><p style="text-align: center; font-size: 20px; color: green;">Liveness Check: REAL<br/>Score: {str_score}</p></div>"""
240
+ else:
241
+ liveness_result = f"""<br/><div class="markdown-fail-container"><p style="text-align: center; font-size: 20px; color: red;">Liveness Check: {result}<br/>Score: {str_score}</p></div>"""
242
+
243
+ return face_crop, liveness_result, one_line_attribute, process_time
244
+
245
+ def launch_demo(activate_result):
246
+ with gr.Blocks(css=css) as demo:
247
+ gr.Markdown(
248
+ f"""
249
+ <a href="https://recognito.vision" style="display: flex; align-items: center;">
250
+ <img src="https://recognito.vision/wp-content/uploads/2024/03/Recognito-modified.png" style="width: 3%; margin-right: 15px;"/>
251
+ </a>
252
+ <div style="display: flex; align-items: center;justify-content: center;">
253
+ <p style="font-size: 36px; font-weight: bold;">Face Liveness Detection {version}</p>
254
+ </div>
255
+ <p style="font-size: 20px; font-weight: bold;">🤝 Contact us for our on-premise Face Recognition, Liveness Detection SDKs deployment</p>
256
+ </div>
257
+ <div style="display: flex; align-items: center;">
258
+ &emsp;&emsp;<a target="_blank" href="mailto:[email protected]"><img src="https://img.shields.io/badge/[email protected]?logo=gmail " alt="www.recognito.vision"></a>
259
+ &nbsp;&nbsp;&nbsp;&nbsp;<a target="_blank" href="https://wa.me/+14158003112"><img src="https://img.shields.io/badge/whatsapp-recognito-blue.svg?logo=whatsapp " alt="www.recognito.vision"></a>
260
+ &nbsp;&nbsp;&nbsp;&nbsp;<a target="_blank" href="https://t.me/recognito_vision"><img src="https://img.shields.io/badge/[email protected]?logo=telegram " alt="www.recognito.vision"></a>
261
+ &nbsp;&nbsp;&nbsp;&nbsp;<a target="_blank" href="https://join.slack.com/t/recognito-workspace/shared_invite/zt-2d4kscqgn-"><img src="https://img.shields.io/badge/slack-recognito-blue.svg?logo=slack " alt="www.recognito.vision"></a>
262
+ </div>
263
+ <br/>
264
+ <div style="display: flex; align-items: center;">
265
+ &emsp;&emsp;<a href="https://recognito.vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/recognito_64.png" style="width: 24px; margin-right: 5px;"/></a>
266
+ &nbsp;&nbsp;&nbsp;&nbsp;<a href="https://www.linkedin.com/company/recognito-vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/linkedin64.png" style="width: 24px; margin-right: 5px;"/></a>
267
+ &nbsp;&nbsp;&nbsp;&nbsp;<a href="https://huggingface.co/Recognito" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/hf1_64.png" style="width: 24px; margin-right: 5px;"/></a>
268
+ &nbsp;&nbsp;&nbsp;&nbsp;<a href="https://github.com/Recognito-Vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/github64.png" style="width: 24px; margin-right: 5px;"/></a>
269
+ &nbsp;&nbsp;&nbsp;&nbsp;<a href="https://hub.docker.com/u/recognito" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/docker64.png" style="width: 24px; margin-right: 5px;"/></a>
270
+ </div>
271
+ <br/>
272
+ """
273
+ )
274
+
275
+
276
+ with gr.Group():
277
+ if activate_result == 0:
278
+ gr.Markdown("""<p style="text-align: left; font-size: 20px; color: green;">&emsp;Activation Success!</p>""")
279
+ else:
280
+ gr.Markdown("""<p style="text-align: left; font-size: 20px; color: red;">&emsp;Activation Failed!</p>""")
281
+
282
+ gr.Textbox(device_id, label="Hardware ID")
283
+
284
+
285
+ with gr.Row():
286
+ with gr.Column(scale=1):
287
+ face_input = gr.Image(label="Image", type='filepath', elem_classes="example-image")
288
+ gr.Examples([os.path.join(root_path,'examples/att_1.jpg'),
289
+ os.path.join(root_path,'examples/att_2.jpg'),
290
+ os.path.join(root_path,'examples/att_3.jpg'),
291
+ os.path.join(root_path,'examples/att_4.jpg'),
292
+ os.path.join(root_path,'examples/att_5.jpg'),
293
+ os.path.join(root_path,'examples/att_6.jpg'),
294
+ os.path.join(root_path,'examples/att_7.jpg'),
295
+ os.path.join(root_path,'examples/att_8.jpg'),
296
+ os.path.join(root_path,'examples/att_9.jpg'),
297
+ os.path.join(root_path,'examples/att_10.jpg')],
298
+ inputs=face_input)
299
+
300
+ with gr.Blocks():
301
+ with gr.Column(scale=1, elem_classes="block-background"):
302
+ txt_threshold = gr.Textbox(f"{SPOOF_THRESHOLD}", label="Spoof Threshold", interactive=True)
303
+ check_liveness_button = gr.Button("Check Liveness", variant="primary", size="lg")
304
+ with gr.Row(elem_classes="face-row"):
305
+ face_output = gr.Image(value=os.path.join(dir_path,'icons/face.jpg'), label="Face", scale=0, elem_classes="face-image")
306
+
307
+ liveness_result = gr.Markdown("")
308
+ txt_speed = gr.Textbox(f"", label="Processing Time (ms)", interactive=False, visible=False)
309
+ attribute_result = gr.Markdown("")
310
+
311
+ check_liveness_button.click(check_liveness_clicked, inputs=[face_input, txt_threshold], outputs=[face_output, liveness_result, attribute_result, txt_speed])
312
+
313
+ demo.launch(server_name="0.0.0.0", server_port=7860, show_api=False)
314
+
315
+ if __name__ == '__main__':
316
+ g_activation_result = activate_sdk()
317
+ launch_demo(g_activation_result)
fl/gradio/icons/face.jpg ADDED
fr/engine/bin/data1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36cf5fcc49345989a86839a53529314ec1fe5d621c377a1952bc7538d55e7f1b
3
+ size 16255630
fr/engine/bin/data2.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f25fb0cd3d70cb84c258e7109620f411c087e0875828d6ab86cc9c4838d49bec
3
+ size 11875339
fr/engine/bin/data31.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d791d9e975be933db24fa029ab5d0d752b85fdbda8a540c5d1baf385acf23aa9
3
+ size 130663307
fr/engine/bin/detect.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b505c320dd8add047f107549849a307d0c6f518f01c1d3402bce9e13a765146
3
+ size 28463173
fr/engine/header.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import numpy as np
4
+ import ctypes, ctypes.util
5
+ from enum import Enum
6
+ from ctypes import *
7
+ from numpy.ctypeslib import ndpointer
8
+
9
+ def print_log(fmt): print("[LOG] \033[98m{}\033[00m" .format(fmt))
10
+ def print_info(fmt): print("[INFO] \033[92m{}\033[00m" .format(fmt))
11
+ def print_error(fmt): print("[ERR] \033[91m{}\033[00m" .format(fmt))
12
+ def print_warning(fmt): print("[WARNING] \033[93m{}\033[00m" .format(fmt))
13
+
14
+ class ENGINE_CODE(Enum):
15
+ E_NO_FACE = 0
16
+ E_ACTIVATION_ERROR = -1
17
+ E_ENGINE_INIT_ERROR = -2
18
+
19
+ lib_path = os.path.abspath(os.path.dirname(__file__)) + '/librecognition_v6.so'
20
+ lib = cdll.LoadLibrary(lib_path)
21
+
22
+ get_version = lib.ttv_version
23
+ get_version.argtypes = []
24
+ get_version.restype = ctypes.c_char_p
25
+
26
+ get_deviceid = lib.ttv_get_hwid
27
+ get_deviceid.argtypes = []
28
+ get_deviceid.restype = ctypes.c_char_p
29
+
30
+ init_sdk = lib.ttv_init
31
+ init_sdk.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
32
+ init_sdk.restype = ctypes.c_int32
33
+
34
+ init_sdk_offline = lib.ttv_init_offline
35
+ init_sdk_offline.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
36
+ init_sdk_offline.restype = ctypes.c_int32
37
+
38
+ extract_template = lib.ttv_extract_feature
39
+ extract_template.argtypes = [ndpointer(ctypes.c_ubyte, flags='C_CONTIGUOUS'), ctypes.c_int32, ctypes.c_int32, ndpointer(ctypes.c_int32, flags='C_CONTIGUOUS'), ndpointer(ctypes.c_ubyte, flags='C_CONTIGUOUS'), ndpointer(ctypes.c_int32, flags='C_CONTIGUOUS')]
40
+ extract_template.restype = ctypes.c_int
41
+
42
+ calculate_similarity = lib.ttv_compare_feature
43
+ calculate_similarity.argtypes = [ndpointer(ctypes.c_ubyte, flags='C_CONTIGUOUS'), ndpointer(ctypes.c_ubyte, flags='C_CONTIGUOUS')]
44
+ calculate_similarity.restype = ctypes.c_double
45
+
46
+ DEFAULT_THRESHOLD = 0.67
47
+ def compare_face(image_mat1, image_mat2, match_threshold=DEFAULT_THRESHOLD):
48
+ result = ""
49
+ if image_mat1 is None:
50
+ result = "Failed to open image1"
51
+ return result, None, None, None
52
+
53
+ if image_mat2 is None:
54
+ result = "Failed to open image2"
55
+ return result, None, None, None
56
+
57
+ face_bbox_1 = np.zeros([4], dtype=np.int32)
58
+ template_1 = np.zeros([2048], dtype=np.uint8)
59
+ template_len_1 = np.zeros([1], dtype=np.int32)
60
+ width_1 = image_mat1.shape[1]
61
+ height_1 = image_mat1.shape[0]
62
+
63
+ ret = extract_template(image_mat1, width_1, height_1, face_bbox_1, template_1, template_len_1)
64
+ if ret <= 0:
65
+ if ret == ENGINE_CODE.E_ACTIVATION_ERROR.value:
66
+ result = "ACTIVATION ERROR"
67
+ elif ret == ENGINE_CODE.E_ENGINE_INIT_ERROR.value:
68
+ result = "ENGINE INIT ERROR"
69
+ elif ret == ENGINE_CODE.E_NO_FACE.value:
70
+ result = "NO FACE in image1"
71
+ return result, None, None, None
72
+
73
+
74
+ face_bbox_2 = np.zeros([4], dtype=np.int32)
75
+ template_2 = np.zeros([2048], dtype=np.uint8)
76
+ template_len_2 = np.zeros([1], dtype=np.int32)
77
+ width_2 = image_mat2.shape[1]
78
+ height_2 = image_mat2.shape[0]
79
+
80
+ ret = extract_template(image_mat2, width_2, height_2, face_bbox_2, template_2, template_len_2)
81
+ if ret <= 0:
82
+ if ret == ENGINE_CODE.E_ACTIVATION_ERROR.value:
83
+ result = "ACTIVATION ERROR"
84
+ elif ret == ENGINE_CODE.E_ENGINE_INIT_ERROR.value:
85
+ result = "ENGINE INIT ERROR"
86
+ elif ret == ENGINE_CODE.E_NO_FACE.value:
87
+ result = "NO FACE in image2"
88
+ return result, None, None, None
89
+
90
+ match_score = calculate_similarity(template_1, template_2)
91
+ if match_score > match_threshold:
92
+ result = "SAME PERSON"
93
+ else:
94
+ result = "DIFFERENT PERSON"
95
+
96
+ return result, match_score, [face_bbox_1, face_bbox_2], [template_1, template_2]
fr/engine/librecognition_v6.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3305d2f3b42dccfd83f6ce80b26b737e40cefc676261ad0b849fe4c9946aae3e
3
+ size 5067600
fr/examples/1.jpg ADDED
fr/examples/2.jpg ADDED
fr/examples/3.jpg ADDED
fr/examples/4.jpg ADDED
fr/examples/5.jpg ADDED
fr/examples/6.jpg ADDED
fr/examples/7.jpg ADDED
fr/examples/8.jpg ADDED
fr/flask/app.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append('../')
3
+
4
+ import os
5
+ import base64
6
+ import json
7
+ import cv2
8
+ import numpy as np
9
+ from flask import Flask, request, jsonify
10
+ from time import gmtime, strftime
11
+
12
+ from engine.header import *
13
+
14
+ file_path = os.path.abspath(__file__)
15
+ dir_path = os.path.dirname(file_path)
16
+ root_path = os.path.dirname(dir_path)
17
+
18
+ MATCH_THRESHOLD = 0.67
19
+
20
+ app = Flask(__name__)
21
+ app.config['SITE'] = "http://0.0.0.0:8000/"
22
+ app.config['DEBUG'] = False
23
+
24
+ version = get_version().decode('utf-8')
25
+ print_info('\t <Recognito Face Recognition> \t version {}'.format(version))
26
+
27
+ device_id = get_deviceid().decode('utf-8')
28
+ print_info('\t <Hardware ID> \t\t {}'.format(device_id))
29
+
30
+ def activate_sdk():
31
+ online_key = os.environ.get("FR_LICENSE_KEY")
32
+ offline_key_path = os.path.join(root_path, "license.txt")
33
+ dict_path = os.path.join(root_path, "engine/bin")
34
+
35
+ ret = -1
36
+ if online_key is None:
37
+ print_warning("Recognition online license key not found!")
38
+ else:
39
+ print_info(f"FR_LICENSE_KEY: {online_key}")
40
+ ret = init_sdk(dict_path.encode('utf-8'), online_key.encode('utf-8'))
41
+
42
+ if ret == 0:
43
+ print_log("Successfully online init SDK!")
44
+ else:
45
+ print_error(f"Failed to online init SDK, Error code {ret}\n Trying offline init SDK...");
46
+ if os.path.exists(offline_key_path) is False:
47
+ print_warning("Recognition offline license key file not found!")
48
+ print_error(f"Falied to offline init SDK, Error code {ret}")
49
+ return ret
50
+ else:
51
+ ret = init_sdk_offline(dict_path.encode('utf-8'), offline_key_path.encode('utf-8'))
52
+ if ret == 0:
53
+ print_log("Successfully offline init SDK!")
54
+ else:
55
+ print_error(f"Falied to offline init SDK, Error code {ret}")
56
+ return ret
57
+
58
+ return ret
59
+
60
+ def generate_response(result, similarity=None, face_bboxes=None, face_features=None):
61
+ status = "ok"
62
+ data = {
63
+ "status": status,
64
+ "data": {}
65
+ }
66
+
67
+ data["data"]["result"] = result
68
+ if similarity is not None:
69
+ data["data"]["similarity"] = float(similarity)
70
+ images = [{}, {}]
71
+ if face_bboxes is not None:
72
+ for i, bbox in enumerate(face_bboxes):
73
+ box = {
74
+ "x" : int(bbox[0]),
75
+ "y" : int(bbox[1]),
76
+ "width" : int(bbox[2] - bbox[0] + 1),
77
+ "height" : int(bbox[3] - bbox[1] + 1)
78
+ }
79
+ images[i]["detection"] = box
80
+
81
+ if face_features is not None:
82
+ for i, feat in enumerate(face_features):
83
+ json_string = json.dumps(feat.tolist(), indent=0).replace('\n','')
84
+ images[i]["feature"] = json_string
85
+
86
+ data["data"]["image1"] = images[0]
87
+ data["data"]["image2"] = images[1]
88
+
89
+ response = jsonify(data)
90
+ response.status_code = 200
91
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
92
+ return response
93
+
94
+ @app.route('/api/compare_face', methods=['POST'])
95
+ def compare_face_api():
96
+ try:
97
+ file1 = request.files['image1']
98
+ image_mat1 = cv2.imdecode(np.frombuffer(file1.read(), np.uint8), cv2.IMREAD_COLOR)
99
+ except:
100
+ response = generate_response("Failed to open image1")
101
+ return response
102
+
103
+ try:
104
+ file2 = request.files['image2']
105
+ image_mat2 = cv2.imdecode(np.frombuffer(file2.read(), np.uint8), cv2.IMREAD_COLOR)
106
+ except:
107
+ response = generate_response("Failed to open image2")
108
+ return response
109
+
110
+ result, score, face_bboxes, face_features = compare_face(image_mat1, image_mat2, MATCH_THRESHOLD)
111
+ response = generate_response(result, score, face_bboxes, face_features)
112
+ return response
113
+
114
+
115
+ @app.route('/api/compare_face_base64', methods=['POST'])
116
+ def coompare_face_base64_api():
117
+ content = request.get_json()
118
+
119
+ try:
120
+ image_base64_1 = content['image1']
121
+ image_mat1 = cv2.imdecode(np.frombuffer(base64.b64decode(image_base64_1), dtype=np.uint8), cv2.IMREAD_COLOR)
122
+ except:
123
+ response = generate_response("Failed to open image1")
124
+ return response
125
+
126
+ try:
127
+ image_base64_2 = content['image2']
128
+ image_mat2 = cv2.imdecode(np.frombuffer(base64.b64decode(image_base64_2), dtype=np.uint8), cv2.IMREAD_COLOR)
129
+ except:
130
+ response = generate_response("Failed to open image2")
131
+ return response
132
+
133
+ result, score, face_bboxes, face_features = compare_face(image_mat1, image_mat2, MATCH_THRESHOLD)
134
+ response = generate_response(result, score, face_bboxes, face_features)
135
+ return response
136
+
137
+ if __name__ == '__main__':
138
+ ret = activate_sdk()
139
+ if ret != 0:
140
+ exit(-1)
141
+ port = int(os.environ.get("PORT", 8000))
142
+ app.run(host='0.0.0.0', port=port)
fr/gradio/app.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append('../')
3
+
4
+ import os
5
+ import gradio as gr
6
+ import cv2
7
+ import time
8
+ import numpy as np
9
+ from PIL import Image
10
+
11
+ from engine.header import *
12
+
13
+ file_path = os.path.abspath(__file__)
14
+ gradio_path = os.path.dirname(file_path)
15
+ root_path = os.path.dirname(gradio_path)
16
+
17
+ version = get_version().decode('utf-8')
18
+ print_info('\t <Recognito Face Recognition> \t version {}'.format(version))
19
+
20
+ device_id = get_deviceid().decode('utf-8')
21
+ print_info('\t <Hardware ID> \t\t {}'.format(device_id))
22
+
23
+ g_activation_result = -1
24
+ MATCH_THRESHOLD = 0.67
25
+
26
+ css = """
27
+ .example-image img{
28
+ display: flex; /* Use flexbox to align items */
29
+ justify-content: center; /* Center the image horizontally */
30
+ align-items: center; /* Center the image vertically */
31
+ height: 300px; /* Set the height of the container */
32
+ object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */
33
+ }
34
+
35
+ .example-image{
36
+ display: flex; /* Use flexbox to align items */
37
+ justify-content: center; /* Center the image horizontally */
38
+ align-items: center; /* Center the image vertically */
39
+ height: 350px; /* Set the height of the container */
40
+ object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */
41
+ }
42
+
43
+ .face-row {
44
+ display: flex;
45
+ justify-content: space-around; /* Distribute space evenly between elements */
46
+ align-items: center; /* Align items vertically */
47
+ width: 100%; /* Set the width of the row to 100% */
48
+ }
49
+
50
+ .face-image{
51
+ justify-content: center; /* Center the image horizontally */
52
+ align-items: center; /* Center the image vertically */
53
+ height: 160px; /* Set the height of the container */
54
+ width: 160px;
55
+ object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */
56
+ }
57
+
58
+ .face-image img{
59
+ justify-content: center; /* Center the image horizontally */
60
+ align-items: center; /* Center the image vertically */
61
+ height: 160px; /* Set the height of the container */
62
+ object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */
63
+ }
64
+
65
+ .markdown-success-container {
66
+ background-color: #F6FFED;
67
+ padding: 20px;
68
+ margin: 20px;
69
+ border-radius: 1px;
70
+ border: 2px solid green;
71
+ text-align: center;
72
+ }
73
+
74
+ .markdown-fail-container {
75
+ background-color: #FFF1F0;
76
+ padding: 20px;
77
+ margin: 20px;
78
+ border-radius: 1px;
79
+ border: 2px solid red;
80
+ text-align: center;
81
+ }
82
+
83
+ .block-background {
84
+ # background-color: #202020; /* Set your desired background color */
85
+ border-radius: 5px;
86
+ }
87
+
88
+ """
89
+
90
+ def activate_sdk():
91
+ online_key = os.environ.get("FR_LICENSE_KEY")
92
+ offline_key_path = os.path.join(root_path, "license.txt")
93
+ dict_path = os.path.join(root_path, "engine/bin")
94
+
95
+ ret = -1
96
+ if online_key is None:
97
+ print_warning("Recognition online license key not found!")
98
+ else:
99
+ print_info(f"FR_LICENSE_KEY: {online_key}")
100
+ ret = init_sdk(dict_path.encode('utf-8'), online_key.encode('utf-8'))
101
+
102
+ if ret == 0:
103
+ print_log("Successfully online init SDK!")
104
+ else:
105
+ print_error(f"Failed to online init SDK, Error code {ret}\n Trying offline init SDK...");
106
+ if os.path.exists(offline_key_path) is False:
107
+ print_warning("Recognition offline license key file not found!")
108
+ print_error(f"Falied to offline init SDK, Error code {ret}")
109
+ return ret
110
+ else:
111
+ ret = init_sdk_offline(dict_path.encode('utf-8'), offline_key_path.encode('utf-8'))
112
+ if ret == 0:
113
+ print_log("Successfully offline init SDK!")
114
+ else:
115
+ print_error(f"Falied to offline init SDK, Error code {ret}")
116
+ return ret
117
+
118
+ return ret
119
+
120
+ def compare_face_clicked(frame1, frame2, threshold):
121
+ global g_activation_result
122
+ if g_activation_result != 0:
123
+ gr.Warning("SDK Activation Failed!")
124
+ return None, None, None, None, None, None, None, None, None
125
+
126
+ try:
127
+ image1 = open(frame1, 'rb')
128
+ image2 = open(frame2, 'rb')
129
+ except:
130
+ raise gr.Error("Please select images files!")
131
+
132
+ image_mat1 = cv2.imdecode(np.frombuffer(image1.read(), np.uint8), cv2.IMREAD_COLOR)
133
+ image_mat2 = cv2.imdecode(np.frombuffer(image2.read(), np.uint8), cv2.IMREAD_COLOR)
134
+ start_time = time.time()
135
+ result, score, face_bboxes, face_features = compare_face(image_mat1, image_mat2, float(threshold))
136
+ end_time = time.time()
137
+ process_time = (end_time - start_time) * 1000
138
+
139
+ try:
140
+ image1 = Image.open(frame1)
141
+ image2 = Image.open(frame2)
142
+ images = [image1, image2]
143
+
144
+ face1 = Image.new('RGBA',(150, 150), (80,80,80,0))
145
+ face2 = Image.new('RGBA',(150, 150), (80,80,80,0))
146
+ faces = [face1, face2]
147
+
148
+ face_bboxes_result = []
149
+ if face_bboxes is not None:
150
+ for i, bbox in enumerate(face_bboxes):
151
+ x1 = bbox[0]
152
+ y1 = bbox[1]
153
+ x2 = bbox[2]
154
+ y2 = bbox[3]
155
+ if x1 < 0:
156
+ x1 = 0
157
+ if y1 < 0:
158
+ y1 = 0
159
+ if x2 >= images[i].width:
160
+ x2 = images[i].width - 1
161
+ if y2 >= images[i].height:
162
+ y2 = images[i].height - 1
163
+
164
+ face_bbox_str = f"x1: {x1}, y1: {y1}, x2: {x2}, y2: {y2}"
165
+ face_bboxes_result.append(face_bbox_str)
166
+
167
+ faces[i] = images[i].crop((x1, y1, x2, y2))
168
+ face_image_ratio = faces[i].width / float(faces[i].height)
169
+ resized_w = int(face_image_ratio * 150)
170
+ resized_h = 150
171
+
172
+ faces[i] = faces[i].resize((int(resized_w), int(resized_h)))
173
+ except:
174
+ pass
175
+
176
+ matching_result = Image.open(os.path.join(gradio_path, "icons/blank.png"))
177
+ similarity_score = ""
178
+ if faces[0] is not None and faces[1] is not None:
179
+ if score is not None:
180
+ str_score = str("{:.4f}".format(score))
181
+ if result == "SAME PERSON":
182
+ matching_result = Image.open(os.path.join(gradio_path, "icons/same.png"))
183
+ similarity_score = f"""<br/><div class="markdown-success-container"><p style="text-align: center; font-size: 20px; color: green;">Similarity score: {str_score}</p></div>"""
184
+ else:
185
+ matching_result = Image.open(os.path.join(gradio_path, "icons/different.png"))
186
+ similarity_score = f"""<br/><div class="markdown-fail-container"><p style="text-align: center; font-size: 20px; color: red;">Similarity score: {str_score}</p></div>"""
187
+
188
+ return faces[0], faces[1], matching_result, similarity_score, face_bboxes_result[0], face_bboxes_result[1], face_features[0], face_features[1], str(process_time)
189
+
190
+ def launch_demo(activate_result):
191
+ with gr.Blocks(css=css) as demo:
192
+ gr.Markdown(
193
+ f"""
194
+ <a href="https://recognito.vision" style="display: flex; align-items: center;">
195
+ <img src="https://recognito.vision/wp-content/uploads/2024/03/Recognito-modified.png" style="width: 3%; margin-right: 15px;"/>
196
+ </a>
197
+ <div style="display: flex; align-items: center;justify-content: center;">
198
+ <p style="font-size: 36px; font-weight: bold;">Face Recognition {version}</p>
199
+ </div>
200
+ <p style="font-size: 20px; font-weight: bold;">🤝 Contact us for our on-premise Face Recognition, Liveness Detection SDKs deployment</p>
201
+ </div>
202
+ <div style="display: flex; align-items: center;">
203
+ &emsp;&emsp;<a target="_blank" href="mailto:[email protected]"><img src="https://img.shields.io/badge/[email protected]?logo=gmail " alt="www.recognito.vision"></a>
204
+ &nbsp;&nbsp;&nbsp;&nbsp;<a target="_blank" href="https://wa.me/+14158003112"><img src="https://img.shields.io/badge/whatsapp-recognito-blue.svg?logo=whatsapp " alt="www.recognito.vision"></a>
205
+ &nbsp;&nbsp;&nbsp;&nbsp;<a target="_blank" href="https://t.me/recognito_vision"><img src="https://img.shields.io/badge/[email protected]?logo=telegram " alt="www.recognito.vision"></a>
206
+ &nbsp;&nbsp;&nbsp;&nbsp;<a target="_blank" href="https://join.slack.com/t/recognito-workspace/shared_invite/zt-2d4kscqgn-"><img src="https://img.shields.io/badge/slack-recognito-blue.svg?logo=slack " alt="www.recognito.vision"></a>
207
+ </div>
208
+ <br/>
209
+ <div style="display: flex; align-items: center;">
210
+ &emsp;&emsp;<a href="https://recognito.vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/recognito_64.png" style="width: 24px; margin-right: 5px;"/></a>
211
+ &nbsp;&nbsp;&nbsp;&nbsp;<a href="https://www.linkedin.com/company/recognito-vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/linkedin64.png" style="width: 24px; margin-right: 5px;"/></a>
212
+ &nbsp;&nbsp;&nbsp;&nbsp;<a href="https://huggingface.co/Recognito" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/hf1_64.png" style="width: 24px; margin-right: 5px;"/></a>
213
+ &nbsp;&nbsp;&nbsp;&nbsp;<a href="https://github.com/Recognito-Vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/github64.png" style="width: 24px; margin-right: 5px;"/></a>
214
+ &nbsp;&nbsp;&nbsp;&nbsp;<a href="https://hub.docker.com/u/recognito" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/docker64.png" style="width: 24px; margin-right: 5px;"/></a>
215
+ </div>
216
+ <br/>
217
+ """
218
+ )
219
+
220
+ with gr.Group():
221
+ if activate_result == 0:
222
+ gr.Markdown("""<p style="text-align: left; font-size: 20px; color: green;">&emsp;Activation Success!</p>""")
223
+ else:
224
+ gr.Markdown("""<p style="text-align: left; font-size: 20px; color: red;">&emsp;Activation Failed!</p>""")
225
+
226
+ gr.Textbox(device_id, label="Hardware ID")
227
+
228
+ with gr.Row():
229
+ with gr.Column(scale=2):
230
+ with gr.Row():
231
+ with gr.Column(scale=1):
232
+ compare_face_input1 = gr.Image(label="Image1", type='filepath', elem_classes="example-image")
233
+ gr.Examples([os.path.join(root_path,'examples/1.jpg'),
234
+ os.path.join(root_path,'examples/2.jpg'),
235
+ os.path.join(root_path,'examples/3.jpg'),
236
+ os.path.join(root_path,'examples/4.jpg')],
237
+ inputs=compare_face_input1)
238
+ with gr.Column(scale=1):
239
+ compare_face_input2 = gr.Image(label="Image2", type='filepath', elem_classes="example-image")
240
+ gr.Examples([os.path.join(root_path,'examples/5.jpg'),
241
+ os.path.join(root_path,'examples/6.jpg'),
242
+ os.path.join(root_path,'examples/7.jpg'),
243
+ os.path.join(root_path,'examples/8.jpg')],
244
+ inputs=compare_face_input2)
245
+
246
+ with gr.Blocks():
247
+ with gr.Column(scale=1, min_width=400, elem_classes="block-background"):
248
+ txt_threshold = gr.Textbox(f"{MATCH_THRESHOLD}", label="Matching Threshold", interactive=True)
249
+ compare_face_button = gr.Button("Compare Face", variant="primary", size="lg")
250
+ with gr.Row(elem_classes="face-row"):
251
+ face_output1 = gr.Image(value=os.path.join(gradio_path,'icons/face.jpg'), label="Face 1", scale=0, elem_classes="face-image")
252
+ compare_result = gr.Image(value=os.path.join(gradio_path,'icons/blank.png'), min_width=30, scale=0, show_download_button=False, show_label=False)
253
+ face_output2 = gr.Image(value=os.path.join(gradio_path,'icons/face.jpg'), label="Face 2", scale=0, elem_classes="face-image")
254
+ similarity_markdown = gr.Markdown("")
255
+ txt_speed = gr.Textbox(f"", label="Processing Time (ms)", interactive=False, visible=False)
256
+ with gr.Group():
257
+ gr.Markdown("""&nbsp;face1""")
258
+ txt_bbox1 = gr.Textbox(f"", label="Rect", interactive=False)
259
+ txt_feature1 = gr.Textbox(f"", label="Feature", interactive=False, max_lines=5)
260
+ with gr.Group():
261
+ gr.Markdown("""&nbsp;face2""")
262
+ txt_bbox2 = gr.Textbox(f"", label="Rect", interactive=False)
263
+ txt_feature2 = gr.Textbox(f"", label="Feature", interactive=False, max_lines=5)
264
+
265
+ compare_face_button.click(compare_face_clicked, inputs=[compare_face_input1, compare_face_input2, txt_threshold], outputs=[face_output1, face_output2, compare_result, similarity_markdown, txt_bbox1, txt_bbox2, txt_feature1, txt_feature2, txt_speed])
266
+
267
+ demo.launch(server_name="0.0.0.0", server_port=7860, show_api=False)
268
+
269
+ if __name__ == '__main__':
270
+ g_activation_result = activate_sdk()
271
+ launch_demo(g_activation_result)
fr/gradio/icons/blank.png ADDED
fr/gradio/icons/different.png ADDED
fr/gradio/icons/face.jpg ADDED
fr/gradio/icons/same.png ADDED