mirror of
https://github.com/yakhyo/uniface.git
synced 2025-12-30 09:02:25 +00:00
improve logging system with verbose flag
- silent by default (only warnings/errors) - add --verbose flag to all scripts - add enable_logging() function for library users - cleaner output for end users
This commit is contained in:
@@ -26,7 +26,7 @@ def run_inference(detector, image_path: str, vis_threshold: float = 0.6, save_di
|
|||||||
|
|
||||||
# 1. Get the list of face dictionaries from the detector
|
# 1. Get the list of face dictionaries from the detector
|
||||||
faces = detector.detect(image)
|
faces = detector.detect(image)
|
||||||
|
|
||||||
if faces:
|
if faces:
|
||||||
# 2. Unpack the data into separate lists
|
# 2. Unpack the data into separate lists
|
||||||
bboxes = [face['bbox'] for face in faces]
|
bboxes = [face['bbox'] for face in faces]
|
||||||
@@ -56,9 +56,14 @@ def main():
|
|||||||
parser.add_argument("--threshold", type=float, default=0.6, help="Visualization confidence threshold")
|
parser.add_argument("--threshold", type=float, default=0.6, help="Visualization confidence threshold")
|
||||||
parser.add_argument("--iterations", type=int, default=1, help="Number of inference runs for benchmarking")
|
parser.add_argument("--iterations", type=int, default=1, help="Number of inference runs for benchmarking")
|
||||||
parser.add_argument("--save_dir", type=str, default="outputs", help="Directory to save output images")
|
parser.add_argument("--save_dir", type=str, default="outputs", help="Directory to save output images")
|
||||||
|
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.verbose:
|
||||||
|
from uniface import enable_logging
|
||||||
|
enable_logging()
|
||||||
|
|
||||||
print(f"Initializing detector: {args.method}")
|
print(f"Initializing detector: {args.method}")
|
||||||
detector = create_detector(method=args.method)
|
detector = create_detector(method=args.method)
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
import cv2
|
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
# Use the new high-level factory functions
|
# Use the new high-level factory functions
|
||||||
from uniface.detection import create_detector
|
from uniface.detection import create_detector
|
||||||
from uniface.recognition import create_recognizer
|
|
||||||
from uniface.face_utils import compute_similarity
|
from uniface.face_utils import compute_similarity
|
||||||
|
from uniface.recognition import create_recognizer
|
||||||
|
|
||||||
|
|
||||||
def extract_reference_embedding(detector, recognizer, image_path: str) -> np.ndarray:
|
def extract_reference_embedding(detector, recognizer, image_path: str) -> np.ndarray:
|
||||||
@@ -19,8 +20,8 @@ def extract_reference_embedding(detector, recognizer, image_path: str) -> np.nda
|
|||||||
raise RuntimeError("No faces found in reference image.")
|
raise RuntimeError("No faces found in reference image.")
|
||||||
|
|
||||||
# Get landmarks from the first detected face dictionary
|
# Get landmarks from the first detected face dictionary
|
||||||
landmarks = np.array(faces[0]['landmarks'])
|
landmarks = np.array(faces[0]["landmarks"])
|
||||||
|
|
||||||
# Use normalized embedding for more reliable similarity comparison
|
# Use normalized embedding for more reliable similarity comparison
|
||||||
embedding = recognizer.get_normalized_embedding(image, landmarks)
|
embedding = recognizer.get_normalized_embedding(image, landmarks)
|
||||||
return embedding
|
return embedding
|
||||||
@@ -43,17 +44,17 @@ def run_video(detector, recognizer, ref_embedding: np.ndarray, threshold: float
|
|||||||
# Loop through each detected face
|
# Loop through each detected face
|
||||||
for face in faces:
|
for face in faces:
|
||||||
# Extract bbox and landmarks from the dictionary
|
# Extract bbox and landmarks from the dictionary
|
||||||
bbox = face['bbox']
|
bbox = face["bbox"]
|
||||||
landmarks = np.array(face['landmarks'])
|
landmarks = np.array(face["landmarks"])
|
||||||
|
|
||||||
x1, y1, x2, y2 = map(int, bbox)
|
x1, y1, x2, y2 = map(int, bbox)
|
||||||
|
|
||||||
# Get the normalized embedding for the current face
|
# Get the normalized embedding for the current face
|
||||||
embedding = recognizer.get_normalized_embedding(frame, landmarks)
|
embedding = recognizer.get_normalized_embedding(frame, landmarks)
|
||||||
|
|
||||||
# Compare with the reference embedding
|
# Compare with the reference embedding
|
||||||
sim = compute_similarity(ref_embedding, embedding)
|
sim = compute_similarity(ref_embedding, embedding)
|
||||||
|
|
||||||
# Draw results
|
# Draw results
|
||||||
label = f"Match ({sim:.2f})" if sim > threshold else f"Unknown ({sim:.2f})"
|
label = f"Match ({sim:.2f})" if sim > threshold else f"Unknown ({sim:.2f})"
|
||||||
color = (0, 255, 0) if sim > threshold else (0, 0, 255)
|
color = (0, 255, 0) if sim > threshold else (0, 0, 255)
|
||||||
@@ -61,7 +62,7 @@ def run_video(detector, recognizer, ref_embedding: np.ndarray, threshold: float
|
|||||||
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
|
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
|
||||||
|
|
||||||
cv2.imshow("Face Recognition", frame)
|
cv2.imshow("Face Recognition", frame)
|
||||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||||
break
|
break
|
||||||
|
|
||||||
cap.release()
|
cap.release()
|
||||||
@@ -72,30 +73,32 @@ def main():
|
|||||||
parser = argparse.ArgumentParser(description="Face recognition using a reference image.")
|
parser = argparse.ArgumentParser(description="Face recognition using a reference image.")
|
||||||
parser.add_argument("--image", type=str, required=True, help="Path to the reference face image.")
|
parser.add_argument("--image", type=str, required=True, help="Path to the reference face image.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--detector",
|
"--detector", type=str, default="scrfd", choices=["retinaface", "scrfd"], help="Face detection method."
|
||||||
type=str,
|
|
||||||
default="scrfd",
|
|
||||||
choices=['retinaface', 'scrfd'],
|
|
||||||
help="Face detection method."
|
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--recognizer",
|
"--recognizer",
|
||||||
type=str,
|
type=str,
|
||||||
default="arcface",
|
default="arcface",
|
||||||
choices=['arcface', 'mobileface', 'sphereface'],
|
choices=["arcface", "mobileface", "sphereface"],
|
||||||
help="Face recognition method."
|
help="Face recognition method.",
|
||||||
)
|
)
|
||||||
|
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.verbose:
|
||||||
|
from uniface import enable_logging
|
||||||
|
|
||||||
|
enable_logging()
|
||||||
|
|
||||||
print("Initializing models...")
|
print("Initializing models...")
|
||||||
detector = create_detector(method=args.detector)
|
detector = create_detector(method=args.detector)
|
||||||
recognizer = create_recognizer(method=args.recognizer)
|
recognizer = create_recognizer(method=args.recognizer)
|
||||||
|
|
||||||
print("Extracting reference embedding...")
|
print("Extracting reference embedding...")
|
||||||
ref_embedding = extract_reference_embedding(detector, recognizer, args.image)
|
ref_embedding = extract_reference_embedding(detector, recognizer, args.image)
|
||||||
|
|
||||||
run_video(detector, recognizer, ref_embedding)
|
run_video(detector, recognizer, ref_embedding)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|||||||
@@ -60,9 +60,14 @@ def main():
|
|||||||
choices=['arcface', 'mobileface', 'sphereface'],
|
choices=['arcface', 'mobileface', 'sphereface'],
|
||||||
help="Face recognition method to use."
|
help="Face recognition method to use."
|
||||||
)
|
)
|
||||||
|
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.verbose:
|
||||||
|
from uniface import enable_logging
|
||||||
|
enable_logging()
|
||||||
|
|
||||||
print(f"Initializing detector: {args.detector}")
|
print(f"Initializing detector: {args.detector}")
|
||||||
detector = create_detector(method=args.detector)
|
detector = create_detector(method=args.detector)
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ __version__ = "0.1.9"
|
|||||||
|
|
||||||
|
|
||||||
from uniface.face_utils import compute_similarity, face_alignment
|
from uniface.face_utils import compute_similarity, face_alignment
|
||||||
from uniface.log import Logger
|
from uniface.log import Logger, enable_logging
|
||||||
from uniface.model_store import verify_model_weights
|
from uniface.model_store import verify_model_weights
|
||||||
from uniface.visualization import draw_detections
|
from uniface.visualization import draw_detections
|
||||||
|
|
||||||
@@ -54,4 +54,5 @@ __all__ = [
|
|||||||
"face_alignment",
|
"face_alignment",
|
||||||
"verify_model_weights",
|
"verify_model_weights",
|
||||||
"Logger",
|
"Logger",
|
||||||
|
"enable_logging",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -144,7 +144,7 @@ class RetinaFace(BaseDetector):
|
|||||||
metric (Literal["default", "max"]): Metric for ranking detections when `max_num` is limited.
|
metric (Literal["default", "max"]): Metric for ranking detections when `max_num` is limited.
|
||||||
- "default": Prioritize detections closer to the image center.
|
- "default": Prioritize detections closer to the image center.
|
||||||
- "max": Prioritize detections with larger bounding box areas.
|
- "max": Prioritize detections with larger bounding box areas.
|
||||||
center_weight (float): Weight for penalizing detections farther from the image center
|
center_weight (float): Weight for penalizing detections farther from the image center
|
||||||
when using the "default" metric. Defaults to 2.0.
|
when using the "default" metric. Defaults to 2.0.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ class Landmark106(BaseLandmarker):
|
|||||||
width, height = bbox[2] - bbox[0], bbox[3] - bbox[1]
|
width, height = bbox[2] - bbox[0], bbox[3] - bbox[1]
|
||||||
center = ((bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2)
|
center = ((bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2)
|
||||||
scale = self.input_size[0] / (max(width, height) * 1.5)
|
scale = self.input_size[0] / (max(width, height) * 1.5)
|
||||||
|
|
||||||
aligned_face, transform_matrix = bbox_center_alignment(image, center, self.input_size[0], scale, 0.0)
|
aligned_face, transform_matrix = bbox_center_alignment(image, center, self.input_size[0], scale, 0.0)
|
||||||
|
|
||||||
face_blob = cv2.dnn.blobFromImage(
|
face_blob = cv2.dnn.blobFromImage(
|
||||||
@@ -130,7 +130,7 @@ class Landmark106(BaseLandmarker):
|
|||||||
landmarks = predictions.reshape((-1, 2))
|
landmarks = predictions.reshape((-1, 2))
|
||||||
landmarks[:, 0:2] += 1
|
landmarks[:, 0:2] += 1
|
||||||
landmarks[:, 0:2] *= (self.input_size[0] // 2)
|
landmarks[:, 0:2] *= (self.input_size[0] // 2)
|
||||||
|
|
||||||
inverse_matrix = cv2.invertAffineTransform(transform_matrix)
|
inverse_matrix = cv2.invertAffineTransform(transform_matrix)
|
||||||
landmarks = transform_points_2d(landmarks, inverse_matrix)
|
landmarks = transform_points_2d(landmarks, inverse_matrix)
|
||||||
return landmarks
|
return landmarks
|
||||||
@@ -193,7 +193,7 @@ if __name__ == "__main__":
|
|||||||
for face in faces:
|
for face in faces:
|
||||||
# Extract the bounding box
|
# Extract the bounding box
|
||||||
bbox = face['bbox']
|
bbox = face['bbox']
|
||||||
|
|
||||||
# 4. Get landmarks for the current face using its bounding box
|
# 4. Get landmarks for the current face using its bounding box
|
||||||
landmarks = landmarker.get_landmarks(frame, bbox)
|
landmarks = landmarker.get_landmarks(frame, bbox)
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,28 @@
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
logging.basicConfig(
|
# Create logger for uniface
|
||||||
level=logging.INFO,
|
|
||||||
format="%(asctime)s - %(levelname)s - %(message)s",
|
|
||||||
datefmt="%Y-%m-%d %H:%M:%S"
|
|
||||||
)
|
|
||||||
Logger = logging.getLogger("uniface")
|
Logger = logging.getLogger("uniface")
|
||||||
|
Logger.setLevel(logging.WARNING) # Only show warnings/errors by default
|
||||||
|
Logger.addHandler(logging.NullHandler())
|
||||||
|
|
||||||
|
|
||||||
|
def enable_logging(level=logging.INFO):
|
||||||
|
"""
|
||||||
|
Enable verbose logging for uniface.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
level: Logging level (logging.DEBUG, logging.INFO, etc.)
|
||||||
|
|
||||||
|
Example:
|
||||||
|
>>> from uniface import enable_logging
|
||||||
|
>>> enable_logging() # Show INFO logs
|
||||||
|
"""
|
||||||
|
Logger.handlers.clear()
|
||||||
|
handler = logging.StreamHandler()
|
||||||
|
handler.setFormatter(logging.Formatter(
|
||||||
|
"%(asctime)s - %(levelname)s - %(message)s",
|
||||||
|
datefmt="%Y-%m-%d %H:%M:%S"
|
||||||
|
))
|
||||||
|
Logger.addHandler(handler)
|
||||||
|
Logger.setLevel(level)
|
||||||
|
Logger.propagate = False
|
||||||
|
|||||||
@@ -1,112 +1,112 @@
|
|||||||
# Copyright 2025 Yakhyokhuja Valikhujaev
|
# Copyright 2025 Yakhyokhuja Valikhujaev
|
||||||
# Author: Yakhyokhuja Valikhujaev
|
# Author: Yakhyokhuja Valikhujaev
|
||||||
# GitHub: https://github.com/yakhyo
|
# GitHub: https://github.com/yakhyo
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import hashlib
|
import hashlib
|
||||||
import requests
|
import requests
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
from uniface.log import Logger
|
from uniface.log import Logger
|
||||||
import uniface.constants as const
|
import uniface.constants as const
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['verify_model_weights']
|
__all__ = ['verify_model_weights']
|
||||||
|
|
||||||
|
|
||||||
def verify_model_weights(model_name: str, root: str = '~/.uniface/models') -> str:
|
def verify_model_weights(model_name: str, root: str = '~/.uniface/models') -> str:
|
||||||
"""
|
"""
|
||||||
Ensure model weights are present, downloading and verifying them using SHA-256 if necessary.
|
Ensure model weights are present, downloading and verifying them using SHA-256 if necessary.
|
||||||
|
|
||||||
Given a model identifier from an Enum class (e.g., `RetinaFaceWeights.MNET_V2`), this function checks if
|
Given a model identifier from an Enum class (e.g., `RetinaFaceWeights.MNET_V2`), this function checks if
|
||||||
the corresponding `.onnx` weight file exists locally. If not, it downloads the file from a predefined URL.
|
the corresponding `.onnx` weight file exists locally. If not, it downloads the file from a predefined URL.
|
||||||
After download, the file’s integrity is verified using a SHA-256 hash. If verification fails, the file is deleted
|
After download, the file’s integrity is verified using a SHA-256 hash. If verification fails, the file is deleted
|
||||||
and an error is raised.
|
and an error is raised.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
model_name (Enum): Model weight identifier (e.g., `RetinaFaceWeights.MNET_V2`, `ArcFaceWeights.RESNET`, etc.).
|
model_name (Enum): Model weight identifier (e.g., `RetinaFaceWeights.MNET_V2`, `ArcFaceWeights.RESNET`, etc.).
|
||||||
root (str, optional): Directory to store or locate the model weights. Defaults to '~/.uniface/models'.
|
root (str, optional): Directory to store or locate the model weights. Defaults to '~/.uniface/models'.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
str: Absolute path to the verified model weights file.
|
str: Absolute path to the verified model weights file.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: If the model is unknown or SHA-256 verification fails.
|
ValueError: If the model is unknown or SHA-256 verification fails.
|
||||||
ConnectionError: If downloading the file fails.
|
ConnectionError: If downloading the file fails.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> from uniface.models import RetinaFaceWeights, verify_model_weights
|
>>> from uniface.models import RetinaFaceWeights, verify_model_weights
|
||||||
>>> verify_model_weights(RetinaFaceWeights.MNET_V2)
|
>>> verify_model_weights(RetinaFaceWeights.MNET_V2)
|
||||||
'/home/user/.uniface/models/retinaface_mnet_v2.onnx'
|
'/home/user/.uniface/models/retinaface_mnet_v2.onnx'
|
||||||
|
|
||||||
>>> verify_model_weights(RetinaFaceWeights.RESNET34, root='/custom/dir')
|
>>> verify_model_weights(RetinaFaceWeights.RESNET34, root='/custom/dir')
|
||||||
'/custom/dir/retinaface_r34.onnx'
|
'/custom/dir/retinaface_r34.onnx'
|
||||||
"""
|
"""
|
||||||
|
|
||||||
root = os.path.expanduser(root)
|
root = os.path.expanduser(root)
|
||||||
os.makedirs(root, exist_ok=True)
|
os.makedirs(root, exist_ok=True)
|
||||||
|
|
||||||
# Keep model_name as enum for dictionary lookup
|
# Keep model_name as enum for dictionary lookup
|
||||||
url = const.MODEL_URLS.get(model_name)
|
url = const.MODEL_URLS.get(model_name)
|
||||||
if not url:
|
if not url:
|
||||||
Logger.error(f"No URL found for model '{model_name}'")
|
Logger.error(f"No URL found for model '{model_name}'")
|
||||||
raise ValueError(f"No URL found for model '{model_name}'")
|
raise ValueError(f"No URL found for model '{model_name}'")
|
||||||
|
|
||||||
file_ext = os.path.splitext(url)[1]
|
file_ext = os.path.splitext(url)[1]
|
||||||
model_path = os.path.normpath(os.path.join(root, f'{model_name.value}{file_ext}'))
|
model_path = os.path.normpath(os.path.join(root, f'{model_name.value}{file_ext}'))
|
||||||
|
|
||||||
if not os.path.exists(model_path):
|
if not os.path.exists(model_path):
|
||||||
Logger.info(f"Downloading model '{model_name}' from {url}")
|
Logger.info(f"Downloading model '{model_name}' from {url}")
|
||||||
try:
|
try:
|
||||||
download_file(url, model_path)
|
download_file(url, model_path)
|
||||||
Logger.info(f"Successfully downloaded '{model_name}' to {model_path}")
|
Logger.info(f"Successfully downloaded '{model_name}' to {model_path}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
Logger.error(f"Failed to download model '{model_name}': {e}")
|
Logger.error(f"Failed to download model '{model_name}': {e}")
|
||||||
raise ConnectionError(f"Download failed for '{model_name}'")
|
raise ConnectionError(f"Download failed for '{model_name}'")
|
||||||
|
|
||||||
expected_hash = const.MODEL_SHA256.get(model_name)
|
expected_hash = const.MODEL_SHA256.get(model_name)
|
||||||
if expected_hash and not verify_file_hash(model_path, expected_hash):
|
if expected_hash and not verify_file_hash(model_path, expected_hash):
|
||||||
os.remove(model_path) # Remove corrupted file
|
os.remove(model_path) # Remove corrupted file
|
||||||
Logger.warning("Corrupted weight detected. Removing...")
|
Logger.warning("Corrupted weight detected. Removing...")
|
||||||
raise ValueError(f"Hash mismatch for '{model_name}'. The file may be corrupted; please try downloading again.")
|
raise ValueError(f"Hash mismatch for '{model_name}'. The file may be corrupted; please try downloading again.")
|
||||||
|
|
||||||
return model_path
|
return model_path
|
||||||
|
|
||||||
|
|
||||||
def download_file(url: str, dest_path: str) -> None:
|
def download_file(url: str, dest_path: str) -> None:
|
||||||
"""Download a file from a URL in chunks and save it to the destination path."""
|
"""Download a file from a URL in chunks and save it to the destination path."""
|
||||||
try:
|
try:
|
||||||
response = requests.get(url, stream=True)
|
response = requests.get(url, stream=True)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
with open(dest_path, "wb") as file, tqdm(
|
with open(dest_path, "wb") as file, tqdm(
|
||||||
desc=f"Downloading {dest_path}",
|
desc=f"Downloading {dest_path}",
|
||||||
unit='B',
|
unit='B',
|
||||||
unit_scale=True,
|
unit_scale=True,
|
||||||
unit_divisor=1024
|
unit_divisor=1024
|
||||||
) as progress:
|
) as progress:
|
||||||
for chunk in response.iter_content(chunk_size=const.CHUNK_SIZE):
|
for chunk in response.iter_content(chunk_size=const.CHUNK_SIZE):
|
||||||
if chunk:
|
if chunk:
|
||||||
file.write(chunk)
|
file.write(chunk)
|
||||||
progress.update(len(chunk))
|
progress.update(len(chunk))
|
||||||
except requests.RequestException as e:
|
except requests.RequestException as e:
|
||||||
raise ConnectionError(f"Failed to download file from {url}. Error: {e}")
|
raise ConnectionError(f"Failed to download file from {url}. Error: {e}")
|
||||||
|
|
||||||
|
|
||||||
def verify_file_hash(file_path: str, expected_hash: str) -> bool:
|
def verify_file_hash(file_path: str, expected_hash: str) -> bool:
|
||||||
"""Compute the SHA-256 hash of the file and compare it with the expected hash."""
|
"""Compute the SHA-256 hash of the file and compare it with the expected hash."""
|
||||||
file_hash = hashlib.sha256()
|
file_hash = hashlib.sha256()
|
||||||
with open(file_path, "rb") as f:
|
with open(file_path, "rb") as f:
|
||||||
for chunk in iter(lambda: f.read(const.CHUNK_SIZE), b""):
|
for chunk in iter(lambda: f.read(const.CHUNK_SIZE), b""):
|
||||||
file_hash.update(chunk)
|
file_hash.update(chunk)
|
||||||
actual_hash = file_hash.hexdigest()
|
actual_hash = file_hash.hexdigest()
|
||||||
if actual_hash != expected_hash:
|
if actual_hash != expected_hash:
|
||||||
Logger.warning(f"Expected hash: {expected_hash}, but got: {actual_hash}")
|
Logger.warning(f"Expected hash: {expected_hash}, but got: {actual_hash}")
|
||||||
return actual_hash == expected_hash
|
return actual_hash == expected_hash
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
model_names = [model.value for model in const.RetinaFaceWeights]
|
model_names = [model.value for model in const.RetinaFaceWeights]
|
||||||
|
|
||||||
# Download each model in the list
|
# Download each model in the list
|
||||||
for model_name in model_names:
|
for model_name in model_names:
|
||||||
model_path = verify_model_weights(model_name)
|
model_path = verify_model_weights(model_name)
|
||||||
|
|||||||
Reference in New Issue
Block a user