Upgrade InspireFace to version 1.22

This commit is contained in:
Jingyu
2025-06-15 01:41:04 +08:00
parent 5cfeed1f4b
commit 622174325d
37 changed files with 1024 additions and 331 deletions

View File

@@ -1,108 +1,102 @@
# InspireFace Python API
We provide a Python API for calling InspireFace, which is implemented by wrapping the dynamic link library using ctypes. You can install the latest release version on your computer via pip from PyPI, or you can configure it using a self-compiled dynamic library with this project.
InspireFace 提供了简单易用的 Python API通过 ctypes 封装底层动态链接库实现。您可以通过 pip 安装最新发布版本,或使用项目自行编译的动态库进行配置。
## Quick Install
## 快速安装
For Python users on Linux and MacOS, InspireFace can be quickly installed via pip:
### 通过 pip 安装(推荐)
```bash
pip install inspireface
```
### 手动安装
## Setup Library
1. 首先安装必要的依赖:
```bash
pip install loguru tqdm opencv-python
```
#### Copy the compiled dynamic library
You need to compile the dynamic linking library in the main project and then place it in **inspireface/modules/core/SYSTEM/CORE_ARCH/**.
```Bash
# copy or link
2. 将编译好的动态库复制到指定目录:
```bash
# 将编译好的动态库复制到对应系统架构目录
cp YOUR_BUILD_DIR/libInspireFace.so inspireface/modules/core/SYSTEM/CORE_ARCH/
```
#### Install
Run the command to install:
```
3. 安装 Python 包:
```bash
python setup.py install
```
## Require
## 快速开始
You need to install some dependencies beforehand.
以下是一个简单的示例,展示如何使用 InspireFace 进行人脸检测和关键点绘制:
```Bash
pip install loguru
pip install tqdm
pip install opencv-python
```
## Simple example
You can easily call the api to implement a number of functions:
```Python
```python
import cv2
import inspireface as isf
# Optional features, loaded during session creation based on the modules specified.
opt = isf.HF_ENABLE_NONE
session = isf.InspireFaceSession(opt, isf.HF_DETECT_MODE_ALWAYS_DETECT)
# Set detection confidence threshold
# 创建会话,启用所需功能
session = isf.InspireFaceSession(
opt=isf.HF_ENABLE_NONE, # 可选功能
detect_mode=isf.HF_DETECT_MODE_ALWAYS_DETECT # 检测模式
)
# 设置检测置信度阈值
session.set_detection_confidence_threshold(0.5)
# Load the image using OpenCV.
image = cv2.imread(image_path)
assert image is not None, "Please check that the image path is correct."
# 读取图像
image = cv2.imread("path/to/your/image.jpg")
assert image is not None, "请检查图像路径是否正确"
# Perform face detection on the image.
# 执行人脸检测
faces = session.face_detection(image)
print(f"face detection: {len(faces)} found")
print(f"检测到 {len(faces)} 个人脸")
# Copy the image for drawing the bounding boxes.
# 在图像上绘制检测结果
draw = image.copy()
for idx, face in enumerate(faces):
print(f"{'==' * 20}")
print(f"idx: {idx}")
print(f"detection confidence: {face.detection_confidence}")
# Print Euler angles of the face.
print(f"roll: {face.roll}, yaw: {face.yaw}, pitch: {face.pitch}")
# Get face bounding box
# 获取人脸框位置
x1, y1, x2, y2 = face.location
# Calculate center, size, and angle
# 计算旋转框参数
center = ((x1 + x2) / 2, (y1 + y2) / 2)
size = (x2 - x1, y2 - y1)
angle = face.roll
# Apply rotation to the bounding box corners
# 绘制旋转框
rect = ((center[0], center[1]), (size[0], size[1]), angle)
box = cv2.boxPoints(rect)
box = box.astype(int)
# Draw the rotated bounding box
cv2.drawContours(draw, [box], 0, (100, 180, 29), 2)
# Draw landmarks
lmk = session.get_face_dense_landmark(face)
for x, y in lmk.astype(int):
# 绘制关键点
landmarks = session.get_face_dense_landmark(face)
for x, y in landmarks.astype(int):
cv2.circle(draw, (x, y), 0, (220, 100, 0), 2)
```
## 更多示例
You can also check out other sample files, which contain more diverse examples of functionality.
项目提供了多个示例文件,展示了不同的功能:
## Test
- `sample_face_detection.py`: 基础人脸检测
- `sample_face_track_from_video.py`: 视频人脸跟踪
- `sample_face_recognition.py`: 人脸识别
- `sample_face_comparison.py`: 人脸比对
- `sample_feature_hub.py`: 特征提取
- `sample_system_resource_statistics.py`: 系统资源统计
## 运行测试
In the Python API, we have integrated a relatively simple unit test. You can adjust the content of the unit test by modifying the parameters in the configuration file **test/test_settings.py**.
项目包含单元测试,您可以通过修改 `test/test_settings.py` 中的参数来调整测试内容:
```Bash
# Run total test
```bash
python -m unittest discover -s test
```
## 注意事项
1. 确保系统已安装 OpenCV 和其他必要依赖
2. 使用前请确保动态库已正确安装
3. 建议使用 Python 3.7 或更高版本

View File

@@ -1,8 +1,9 @@
from .inspireface import ImageStream, FaceExtended, FaceInformation, SessionCustomParameter, InspireFaceSession, \
launch, FeatureHubConfiguration, feature_hub_enable, feature_hub_disable, feature_comparison, \
launch, terminate, FeatureHubConfiguration, feature_hub_enable, feature_hub_disable, feature_comparison, \
FaceIdentity, feature_hub_set_search_threshold, feature_hub_face_insert, SearchResult, \
feature_hub_face_search, feature_hub_face_search_top_k, feature_hub_face_update, feature_hub_face_remove, \
feature_hub_get_face_identity, feature_hub_get_face_count, view_table_in_terminal, version, query_launch_status, reload, \
feature_hub_get_face_identity, feature_hub_get_face_count, feature_hub_get_face_id_list, view_table_in_terminal, version, query_launch_status, reload, \
set_logging_level, disable_logging, show_system_resource_statistics, get_recommended_cosine_threshold, cosine_similarity_convert_to_percentage, \
get_similarity_converter_config, set_similarity_converter_config, pull_latest_model, switch_landmark_engine, \
HF_PK_AUTO_INCREMENT, HF_PK_MANUAL_INPUT, HF_SEARCH_MODE_EAGER, HF_SEARCH_MODE_EXHAUSTIVE
HF_PK_AUTO_INCREMENT, HF_PK_MANUAL_INPUT, HF_SEARCH_MODE_EAGER, HF_SEARCH_MODE_EXHAUSTIVE, \
ignore_check_latest_model, set_cuda_device_id, get_cuda_device_id, print_cuda_device_info, get_num_cuda_devices, check_cuda_device_support, terminate

View File

@@ -7,6 +7,13 @@ from dataclasses import dataclass
from loguru import logger
from .utils import ResourceManager
# If True, the latest model will not be verified
IGNORE_VERIFICATION_OF_THE_LATEST_MODEL = False
def ignore_check_latest_model(ignore: bool):
global IGNORE_VERIFICATION_OF_THE_LATEST_MODEL
IGNORE_VERIFICATION_OF_THE_LATEST_MODEL = ignore
class ImageStream(object):
"""
ImageStream class handles the conversion of image data from various sources into a format compatible with the InspireFace library.
@@ -165,6 +172,10 @@ class FaceExtended:
race: int
gender: int
age_bracket: int
emotion: int
def __repr__(self) -> str:
return f"FaceExtended(rgb_liveness_confidence={self.rgb_liveness_confidence}, mask_confidence={self.mask_confidence}, quality_confidence={self.quality_confidence}, left_eye_status_confidence={self.left_eye_status_confidence}, right_eye_status_confidence={self.right_eye_status_confidence}, action_normal={self.action_normal}, action_jaw_open={self.action_jaw_open}, action_shake={self.action_shake}, action_blink={self.action_blink}, action_head_raise={self.action_head_raise}, race={self.race}, gender={self.gender}, age_bracket={self.age_bracket}, emotion={self.emotion})"
class FaceInformation:
@@ -214,6 +225,9 @@ class FaceInformation:
self._token.size = buffer_size
self._token.data = cast(addressof(self.buffer), c_void_p)
def __repr__(self) -> str:
return f"FaceInformation(track_id={self.track_id}, detection_confidence={self.detection_confidence}, location={self.location}, roll={self.roll}, yaw={self.yaw}, pitch={self.pitch})"
@dataclass
class SessionCustomParameter:
@@ -232,6 +246,7 @@ class SessionCustomParameter:
enable_face_attribute: bool = False
enable_face_quality: bool = False
enable_interaction_liveness: bool = False
enable_face_emotion: bool = False
def _c_struct(self):
"""
@@ -247,11 +262,15 @@ class SessionCustomParameter:
enable_mask_detect=int(self.enable_mask_detect),
enable_face_attribute=int(self.enable_face_attribute),
enable_face_quality=int(self.enable_face_quality),
enable_interaction_liveness=int(self.enable_interaction_liveness)
enable_interaction_liveness=int(self.enable_interaction_liveness),
enable_face_emotion=int(self.enable_face_emotion)
)
return custom_param
def __repr__(self) -> str:
return f"SessionCustomParameter(enable_recognition={self.enable_recognition}, enable_liveness={self.enable_liveness}, enable_ir_liveness={self.enable_ir_liveness}, enable_mask_detect={self.enable_mask_detect}, enable_face_attribute={self.enable_face_attribute}, enable_face_quality={self.enable_face_quality}, enable_interaction_liveness={self.enable_interaction_liveness}, enable_face_emotion={self.enable_face_emotion})"
class InspireFaceSession(object):
"""
@@ -431,6 +450,11 @@ class InspireFaceSession(object):
if ret != 0:
logger.error(f"Set track model detect interval error: {ret}")
def set_landmark_augmentation_num(self, num=1):
ret = HFSessionSetLandmarkAugmentationNum(self._sess, num)
if ret != 0:
logger.error(f"Set landmark augmentation num error: {ret}")
def face_pipeline(self, image, faces: List[FaceInformation], exec_param) -> List[FaceExtended]:
"""
Processes detected faces to extract additional attributes based on the provided execution parameters.
@@ -461,12 +485,13 @@ class InspireFaceSession(object):
logger.error(f"Face pipeline error: {ret}")
return []
extends = [FaceExtended(-1.0, -1.0, -1.0, -1.0, -1.0, 0, 0, 0, 0, 0, -1, -1, -1) for _ in range(len(faces))]
extends = [FaceExtended(-1.0, -1.0, -1.0, -1.0, -1.0, 0, 0, 0, 0, 0, -1, -1, -1, -1) for _ in range(len(faces))]
self._update_mask_confidence(exec_param, flag, extends)
self._update_rgb_liveness_confidence(exec_param, flag, extends)
self._update_face_quality_confidence(exec_param, flag, extends)
self._update_face_attribute_confidence(exec_param, flag, extends)
self._update_face_interact_confidence(exec_param, flag, extends)
self._update_face_emotion_confidence(exec_param, flag, extends)
return extends
@@ -550,6 +575,17 @@ class InspireFaceSession(object):
else:
logger.error(f"Get face action result error: {ret}")
def _update_face_emotion_confidence(self, exec_param, flag, extends):
if (flag == "object" and exec_param.enable_face_emotion) or (
flag == "bitmask" and exec_param & HF_ENABLE_FACE_EMOTION):
emotion_results = HFFaceEmotionResult()
ret = HFGetFaceEmotionResult(self._sess, PHFFaceEmotionResult(emotion_results))
if ret == 0:
for i in range(emotion_results.num):
extends[i].emotion = emotion_results.emotion[i]
else:
logger.error(f"Get face emotion result error: {ret}")
def _update_rgb_liveness_confidence(self, exec_param, flag, extends: List[FaceExtended]):
if (flag == "object" and exec_param.enable_liveness) or (
flag == "bitmask" and exec_param & HF_ENABLE_LIVENESS):
@@ -639,7 +675,7 @@ def launch(model_name: str = "Pikachu", resource_path: str = None) -> bool:
"""
if resource_path is None:
sm = ResourceManager()
resource_path = sm.get_model(model_name)
resource_path = sm.get_model(model_name, ignore_verification=IGNORE_VERIFICATION_OF_THE_LATEST_MODEL)
path_c = String(bytes(resource_path, encoding="utf8"))
ret = HFLaunchInspireFace(path_c)
if ret != 0:
@@ -651,16 +687,31 @@ def launch(model_name: str = "Pikachu", resource_path: str = None) -> bool:
return False
return True
def pull_latest_model(model_name: str = "Pikachu") -> str:
"""
Pulls the latest model from the resource manager.
Args:
model_name (str): the name of the model to use.
Returns:
"""
sm = ResourceManager()
resource_path = sm.get_model(model_name, re_download=True)
return resource_path
def reload(model_name: str = "Pikachu", resource_path: str = None) -> bool:
"""
Reloads the InspireFace system with the specified resource directory.
Args:
model_name (str): the name of the model to use.
resource_path (str): if None, use the default model path.
Returns:
"""
if resource_path is None:
sm = ResourceManager()
resource_path = sm.get_model(model_name)
path_c = String(bytes(resource_path, encoding="utf8"))
ret = HFReloadInspireFace(path_c)
if ret != 0:
@@ -672,6 +723,20 @@ def reload(model_name: str = "Pikachu", resource_path: str = None) -> bool:
return False
return True
def terminate() -> bool:
"""
Terminates the InspireFace system.
Returns:
bool: True if the system was successfully terminated, False otherwise.
Notes:
"""
ret = HFTerminateInspireFace()
if ret != 0:
logger.error(f"Terminate InspireFace failure: {ret}")
return False
return True
def query_launch_status() -> bool:
"""
@@ -828,6 +893,9 @@ class FaceIdentity(object):
self.feature = data
self.id = id
def __repr__(self) -> str:
return f"FaceIdentity(id={self.id}, feature={self.feature})"
@staticmethod
def from_ctypes(raw_identity: HFFaceFeatureIdentity):
"""
@@ -906,6 +974,8 @@ class SearchResult:
confidence: float
similar_identity: FaceIdentity
def __repr__(self) -> str:
return f"SearchResult(confidence={self.confidence}, similar_identity={self.similar_identity})"
def feature_hub_face_search(data: np.ndarray) -> SearchResult:
"""
@@ -1048,6 +1118,20 @@ def feature_hub_get_face_count() -> int:
return int(count.value)
def feature_hub_get_face_id_list() -> List[int]:
"""
Retrieves a list of face IDs from the feature hub.
Returns:
List[int]: A list of face IDs.
"""
ids = HFFeatureHubExistingIds()
ptr = PHFFeatureHubExistingIds(ids)
ret = HFFeatureHubGetExistingIds(ptr)
if ret != 0:
logger.error(f"Failed to get face id list: {ret}")
return [int(ids.ids[i]) for i in range(ids.size)]
def view_table_in_terminal():
"""
Displays the database table of face identities in the terminal.
@@ -1170,3 +1254,47 @@ def query_expansive_hardware_rockchip_dma_heap_path() -> str:
return None
return str(path.value)
def set_cuda_device_id(device_id: int):
"""
Sets the CUDA device ID.
"""
ret = HFSetCudaDeviceId(device_id)
if ret != 0:
logger.error(f"Failed to set CUDA device ID: {ret}")
def get_cuda_device_id() -> int:
"""
Gets the CUDA device ID.
"""
id = HInt32()
ret = HFGetCudaDeviceId(id)
if ret != 0:
logger.error(f"Failed to get CUDA device ID: {ret}")
return int(id.value)
def print_cuda_device_info():
"""
Prints the CUDA device information.
"""
HFPrintCudaDeviceInfo()
def get_num_cuda_devices() -> int:
"""
Gets the number of CUDA devices.
"""
num = HInt32()
ret = HFGetNumCudaDevices(num)
if ret != 0:
logger.error(f"Failed to get number of CUDA devices: {ret}")
return int(num.value)
def check_cuda_device_support() -> bool:
"""
Checks if the CUDA device is supported.
"""
is_support = HInt32()
ret = HFCheckCudaDeviceSupport(is_support)
if ret != 0:
logger.error(f"Failed to check CUDA device support: {ret}")
return bool(is_support.value)

View File

@@ -28,37 +28,38 @@ class ResourceManager:
"Pikachu": {
"url": "https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Pikachu",
"filename": "Pikachu",
"md5": "f2983a2d884902229c1443fdc921b8e5f49cf2daba8a4f103cd127910dc9e7cd"
"md5": "a7ca2d8de26fb1adc1114b437971d841e14afc894fa9869618139da10e0d4357"
},
"Megatron": {
"url": "https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Megatron",
"filename": "Megatron",
"md5": "28f2284c5e7cf53b0e152ff524a416c966ab21e724002643b1304aedc4af6b06"
"md5": "709fddf024d9f34ec034d8ef79a4779e1543b867b05e428c1d4b766f69287050"
},
"Megatron_TRT": {
"url": "https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Megatron_TRT",
"filename": "Megatron_TRT",
"md5": "25fb4a585b73b0114ff0d64c2bc4071bd005a32a77149b66c474985077dc8f8a"
"md5": "bc9123bdc510954b28d703b8ffe6023f469fb81123fd0b0b27fd452dfa369bab"
},
"Gundam_RK356X": {
"url": "https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Gundam_RK356X",
"filename": "Gundam_RK356X",
"md5": "69ea23b89851a38c729b32bb0ed33cf62ebd3c891ea5d596afeadeb1f1c79c69"
"md5": "0fa12a425337ed98bd82610768a50de71cf93ef42a0929ba06cc94c86f4bd415"
},
"Gundam_RK3588": {
"url": "https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Gundam_RK3588",
"filename": "Gundam_RK3588",
"md5": "030965798c5257aef11640657f85b89d82e9d170c3798d0b4f2b62ee6aa245ea"
},
"md5": "66070e8d654408b666a2210bd498a976bbad8b33aef138c623e652f8d956641e"
}
}
def get_model(self, name: str, re_download: bool = False) -> str:
def get_model(self, name: str, re_download: bool = False, ignore_verification: bool = False) -> str:
"""
Get model path. Download if not exists or re_download is True.
Args:
name: Model name
re_download: Force re-download if True
ignore_verification: Skip model hash verification if True
Returns:
str: Full path to model file
@@ -72,6 +73,10 @@ class ResourceManager:
# Check if model exists and is complete
if model_file.exists() and not downloading_flag.exists() and not re_download:
if ignore_verification:
print(f"Warning: Model verification skipped for '{name}' as requested.")
return str(model_file)
current_hash = get_file_hash_sha256(model_file)
if current_hash == model_info["md5"]:
return str(model_file)

View File

@@ -2,7 +2,7 @@
# Session option
from inspireface.modules.core.native import HF_ENABLE_NONE, HF_ENABLE_FACE_RECOGNITION, HF_ENABLE_LIVENESS, HF_ENABLE_IR_LIVENESS, \
HF_ENABLE_MASK_DETECT, HF_ENABLE_FACE_ATTRIBUTE, HF_ENABLE_QUALITY, HF_ENABLE_INTERACTION, HF_ENABLE_FACE_POSE, HF_PK_AUTO_INCREMENT, HF_PK_MANUAL_INPUT, \
HF_ENABLE_MASK_DETECT, HF_ENABLE_FACE_ATTRIBUTE, HF_ENABLE_QUALITY, HF_ENABLE_INTERACTION, HF_ENABLE_FACE_POSE, HF_ENABLE_FACE_EMOTION, HF_PK_AUTO_INCREMENT, HF_PK_MANUAL_INPUT, \
HF_LANDMARK_HYPLMV2_0_25, HF_LANDMARK_HYPLMV2_0_50, HF_LANDMARK_INSIGHTFACE_2D106_TRACK
# Face track mode

View File

@@ -10,6 +10,7 @@ age_bracket_tags = [
"0-2 years old", "3-9 years old", "10-19 years old", "20-29 years old", "30-39 years old",
"40-49 years old", "50-59 years old", "60-69 years old", "more than 70 years old"
]
emotion_tags = ["Neutral", "Happy", "Sad", "Surprise", "Fear", "Disgust", "Anger"]
@click.command()
@click.argument('image_path')
@@ -20,10 +21,9 @@ def case_face_detection_image(image_path, show):
It also includes pipeline extensions such as RGB liveness, mask detection, and face quality evaluation.
"""
opt = isf.HF_ENABLE_FACE_RECOGNITION | isf.HF_ENABLE_QUALITY | isf.HF_ENABLE_MASK_DETECT | \
isf.HF_ENABLE_LIVENESS | isf.HF_ENABLE_INTERACTION | isf.HF_ENABLE_FACE_ATTRIBUTE
isf.HF_ENABLE_LIVENESS | isf.HF_ENABLE_INTERACTION | isf.HF_ENABLE_FACE_ATTRIBUTE | isf.HF_ENABLE_FACE_EMOTION
session = isf.InspireFaceSession(opt, isf.HF_DETECT_MODE_ALWAYS_DETECT)
session.set_detection_confidence_threshold(0.5)
# Load image
image = cv2.imread(image_path)
assert image is not None, "Please check that the image path is correct."
@@ -37,6 +37,7 @@ def case_face_detection_image(image_path, show):
# Detect faces
faces = session.face_detection(image)
print(faces)
print(f"face detection: {len(faces)} found")
draw = image.copy()
@@ -67,9 +68,9 @@ def case_face_detection_image(image_path, show):
# Execute extended functions (optional modules)
select_exec_func = isf.HF_ENABLE_QUALITY | isf.HF_ENABLE_MASK_DETECT | \
isf.HF_ENABLE_LIVENESS | isf.HF_ENABLE_INTERACTION | isf.HF_ENABLE_FACE_ATTRIBUTE
isf.HF_ENABLE_LIVENESS | isf.HF_ENABLE_INTERACTION | isf.HF_ENABLE_FACE_ATTRIBUTE | isf.HF_ENABLE_FACE_EMOTION
extends = session.face_pipeline(image, faces, select_exec_func)
print(extends)
for idx, ext in enumerate(extends):
print(f"{'==' * 20}")
print(f"idx: {idx}")
@@ -80,6 +81,7 @@ def case_face_detection_image(image_path, show):
print(f"gender: {gender_tags[ext.gender]}")
print(f"race: {race_tags[ext.race]}")
print(f"age: {age_bracket_tags[ext.age_bracket]}")
print(f"emotion: {emotion_tags[ext.emotion]}")
# Save the annotated image
save_path = os.path.join("tmp", "det.jpg")

View File

@@ -50,12 +50,11 @@ def case_face_tracker_from_video(source, show, out):
"""
# Optional features, loaded during session creation based on the modules specified.
opt = isf.HF_ENABLE_NONE | isf.HF_ENABLE_INTERACTION
session = isf.InspireFaceSession(opt, isf.HF_DETECT_MODE_LIGHT_TRACK, max_detect_num=25, detect_pixel_level=160) # Use video mode
session = isf.InspireFaceSession(opt, isf.HF_DETECT_MODE_LIGHT_TRACK, max_detect_num=25, detect_pixel_level=320) # Use video mode
session.set_track_mode_smooth_ratio(0.06)
session.set_track_mode_num_smooth_cache_frame(15)
session.set_filter_minimum_face_pixel_size(0)
session.set_track_model_detect_interval(0)
session.set_landmark_augmentation_num(1)
session.set_enable_track_cost_spend(True)
# Determine if the source is a digital webcam index or a video file path.
try:

View File

@@ -11,7 +11,7 @@ def case_feature_hub():
db_path = "test.db"
# Configure the feature management system.
feature_hub_config = isf.FeatureHubConfiguration(
primary_key_mode=isf.HF_PK_MANUAL_INPUT,
primary_key_mode=isf.HF_PK_AUTO_INCREMENT,
enable_persistence=True,
persistence_db_path=db_path,
search_threshold=0.48,
@@ -23,14 +23,15 @@ def case_feature_hub():
for i in range(10):
v = np.random.rand(512).astype(np.float32)
feature = isf.FaceIdentity(v, i)
ret, new_id = isf.feature_hub_face_insert(feature)
ret, _ = isf.feature_hub_face_insert(feature)
assert ret, "Failed to insert face feature data into FeatureHub."
assert new_id == i, "Failed to get the correct new id."
feature = isf.FaceIdentity(gen, -1)
isf.feature_hub_face_insert(feature)
result = isf.feature_hub_face_search(gen)
print(f"result: {result}")
assert os.path.exists(db_path), "FeatureHub database file not found."
ids = isf.feature_hub_get_face_id_list()
print(f"ids: {ids}")