Skip to content

Commit 1d22992

Browse files
committed
1.Use enqueuev2 instead of enqueue
2.update README
1 parent 93eaff5 commit 1d22992

File tree

11 files changed

+84
-49
lines changed

11 files changed

+84
-49
lines changed

FaceAlgorithm/face_detect_yolov5face/detector_yolov5face.cpp

+18-17
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,7 @@ HZFLAG Detector_Yolov5Face::InitDetector_Yolov5Face(Config& config)
4141
return HZ_WITHOUTMODEL;
4242
}
4343
Onnx2Ttr onnx2trt;
44-
//IHostMemory* modelStream{ nullptr };
45-
onnx2trt.onnxToTRTModel(config.Yolov5FactDetectModelPath.c_str(),config.yolov5face_detect_bs,out_engine.c_str());
44+
onnx2trt.onnxToTRTModel(gLogger,config.Yolov5FactDetectModelPath.c_str(),config.yolov5face_detect_bs,out_engine.c_str());
4645
}
4746
size_t size{0};
4847
std::ifstream file(out_engine, std::ios::binary);
@@ -97,23 +96,25 @@ HZFLAG Detector_Yolov5Face::Detect_Yolov5Face(std::vector<cv::Mat>&ImgVec,std::v
9796
float* buffer_idx = (float*)buffers[inputIndex];
9897
for (int b = 0; b < detector_batchsize; b++)
9998
{
100-
if (ImgVec[b].empty()||ImgVec[b].data==NULL)
101-
{
102-
continue;
103-
}
104-
ImgVec[b] = ImgVec[b].clone();
105-
size_t size_image = ImgVec[b].cols * ImgVec[b].rows * 3*sizeof(uint8_t);
106-
size_t size_image_dst = INPUT_H * INPUT_W * 3*sizeof(uint8_t);
107-
//copy data to pinned memory
108-
memcpy(img_host,ImgVec[b].data,size_image);
109-
//copy data to device memory
110-
CHECK(cudaMemcpy(img_device,img_host,size_image,cudaMemcpyHostToDevice));
111-
preprocess_kernel_img_yolov5_face(img_device,ImgVec[b].cols,ImgVec[b].rows, buffer_idx, INPUT_W, INPUT_H, stream);
112-
buffer_idx += size_image_dst;
99+
if (ImgVec[b].empty()||ImgVec[b].data==NULL)
100+
{
101+
continue;
102+
}
103+
ImgVec[b] = ImgVec[b].clone();
104+
size_t size_image = ImgVec[b].cols * ImgVec[b].rows * 3*sizeof(uint8_t);
105+
size_t size_image_dst = INPUT_H * INPUT_W * 3*sizeof(uint8_t);
106+
//copy data to pinned memory
107+
memcpy(img_host,ImgVec[b].data,size_image);
108+
//copy data to device memory
109+
CHECK(cudaMemcpy(img_device,img_host,size_image,cudaMemcpyHostToDevice));
110+
preprocess_kernel_img_yolov5_face(img_device,ImgVec[b].cols,ImgVec[b].rows, buffer_idx, INPUT_W, INPUT_H, stream);
111+
buffer_idx += size_image_dst;
113112
}
114113
// Run inference
115-
116-
doInference(*context,stream,(void**)buffers,prob,detector_batchsize);
114+
//(*context).enqueue(detector_batchsize, buffers, stream, nullptr);
115+
(*context).enqueueV2(buffers, stream, nullptr);
116+
CHECK(cudaMemcpyAsync(prob, buffers[1], detector_batchsize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
117+
cudaStreamSynchronize(stream);
117118
for (int b = 0; b < detector_batchsize; b++)
118119
{
119120
std::vector<decodeplugin_yolov5face::Detection> res;

FaceAlgorithm/face_detect_yolov7face/detector_yolov7face.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ HZFLAG Detector_Yolov7Face::InitDetector_Yolov7Face(Config& config)
4242
}
4343
Onnx2Ttr onnx2trt;
4444
//IHostMemory* modelStream{ nullptr };
45-
onnx2trt.onnxToTRTModel(config.Yolov7FactDetectModelPath.c_str(),config.yolov7face_detect_bs,out_engine.c_str());
45+
onnx2trt.onnxToTRTModel(gLogger,config.Yolov7FactDetectModelPath.c_str(),config.yolov7face_detect_bs,out_engine.c_str());
4646
}
4747
size_t size{0};
4848
std::ifstream file(out_engine, std::ios::binary);//out_engine"/home/pcb/FaceRecognition_Linux_Release/yolov7face_test/yolov7-face-tensorrt/yolov7s-face_batch=1.engine"
@@ -129,7 +129,8 @@ HZFLAG Detector_Yolov7Face::Detect_Yolov7Face(std::vector<cv::Mat>&ImgVec,std::v
129129
buffer_idx += size_image_dst;
130130
}
131131
//inference
132-
(*context).enqueue(detector_batchsize,(void**)this->buffers, stream, nullptr);
132+
//(*context).enqueue(detector_batchsize,(void**)this->buffers, stream, nullptr);
133+
(*context).enqueueV2((void**)this->buffers, stream, nullptr);
133134

134135
//postprocess
135136
float *predict = (float *)this->buffers[outputIndex];

FaceAlgorithm/gender_age_recognition/GenderAgeRecognition.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ HZFLAG GenderAgeRecognition:: GenderAgeRecognitionInit(Config&config)
3737
}
3838
Onnx2Ttr onnx2trt;
3939
//IHostMemory* modelStream{ nullptr };
40-
onnx2trt.onnxToTRTModel(config.GenderAgeModelPath.c_str(),config.gender_age_bs,out_engine.c_str());//config.classs_path
40+
onnx2trt.onnxToTRTModel(gLogger,config.GenderAgeModelPath.c_str(),config.gender_age_bs,out_engine.c_str());//config.classs_path
4141
//assert(modelStream != nullptr);
4242
//modelStream->destroy();
4343
}

FaceAlgorithm/mask_recognition/MaskRecognition.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ HZFLAG MaskRecognition:: MaskRecognitionInit(Config&config)
3737
}
3838
Onnx2Ttr onnx2trt;
3939
//IHostMemory* modelStream{ nullptr };
40-
onnx2trt.onnxToTRTModel(config.MaskReconitionModelPath.c_str(),1,out_engine.c_str());//config.classs_path
40+
onnx2trt.onnxToTRTModel(gLogger,config.MaskReconitionModelPath.c_str(),1,out_engine.c_str());//config.classs_path
4141
//assert(modelStream != nullptr);
4242
//modelStream->destroy();
4343
}

FaceAlgorithm/silent_face_anti_spoofing/SilentFaceAntiSpoofing.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ HZFLAG SilentFaceAntiSpoofing:: SilentFaceAntiSpoofingInit(Config&config)
3636
return HZ_ERROR;
3737
}
3838
Onnx2Ttr onnx2trt;
39-
onnx2trt.onnxToTRTModel(config.FaceSilentModelPath.c_str(),config.silent_face_anti_spoofing_bs,out_engine.c_str());
39+
onnx2trt.onnxToTRTModel(gLogger,config.FaceSilentModelPath.c_str(),config.silent_face_anti_spoofing_bs,out_engine.c_str());
4040

4141
}
4242
std::ifstream file(out_engine, std::ios::binary);

FaceAlgorithm_Test/main.cpp

+17-7
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#define mask_recognition 1 //口罩识别
1515
#define gender_age_recognition 1 //性别年龄识别
1616
#define silnet_face_anti_spoofing 1 //静默活体检测
17+
#define show 1 //显示
1718

1819
/*-----------------------------------------
1920
人脸识别流程
@@ -157,10 +158,11 @@ int main(int argc, char** argv)
157158
cv::putText(RawImageVec[i], label3, cv::Point(dets[i][j].bbox.xmin, dets[i][j].bbox.ymin), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(255, 255, 0), 2);
158159
//std::cout << "yaw angle:" << dets[i][j].YawAngle<< " pitch angle:" << dets[i][j].PitchAngle<<" 瞳距:"<<dets[i][j].InterDis<< std::endl;
159160
}
161+
#if show
160162
cv::imshow("show", RawImageVec[i]);
161-
cv::waitKey(1);
162-
}
163-
163+
cv::waitKey(1);
164+
#endif
165+
}
164166
}
165167
std::cout<<"face_detect test finash!"<<std::endl;
166168
#endif
@@ -201,7 +203,7 @@ int main(int argc, char** argv)
201203
auto start = std::chrono::system_clock::now();
202204
Yolov5Face_Detect(RawImageVec,dets);
203205
auto end = std::chrono::system_clock::now();
204-
//std::cout<<"time:"<<(std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count())<<"ms"<<std::endl;
206+
std::cout<<"yolov5 face detect average time:"<<(std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count())/(config.yolov5face_detect_bs*1.0)<<"ms"<<std::endl;
205207
for (int i = 0; i < dets.size(); i++)
206208
{
207209
for (size_t j = 0; j < dets[i].size(); j++)
@@ -217,8 +219,10 @@ int main(int argc, char** argv)
217219
cv::putText(RawImageVec[i], label3, cv::Point(dets[i][j].bbox.xmin, dets[i][j].bbox.ymin), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(255, 255, 0), 2);
218220
//std::cout << "yaw angle:" << dets[i][j].YawAngle<< " pitch angle:" << dets[i][j].PitchAngle<<" 瞳距:"<<dets[i][j].InterDis<< std::endl;
219221
}
222+
#if show
220223
cv::imshow("show", RawImageVec[i]);
221-
cv::waitKey(1);
224+
cv::waitKey(1);
225+
#endif
222226
}
223227

224228
}
@@ -260,7 +264,7 @@ int main(int argc, char** argv)
260264
auto start = std::chrono::system_clock::now();
261265
Yolov7Face_Detect(RawImageVec,dets);
262266
auto end = std::chrono::system_clock::now();
263-
//std::cout<<"time:"<<(std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count())<<"ms"<<std::endl;
267+
std::cout<<"yolov7face average time:"<<(std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count())/(config.yolov7face_detect_bs*1.0)<<"ms"<<std::endl;
264268
for (int k = 0; k < dets.size(); k++)
265269
{
266270
for (size_t f = 0; f < dets[k].size(); f++)
@@ -276,8 +280,10 @@ int main(int argc, char** argv)
276280
cv::putText(RawImageVec[k], label3, cv::Point(dets[k][f].bbox.xmin, dets[k][f].bbox.ymin), cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255, 255, 0), 1);
277281
//std::cout << "yaw angle:" << dets[k][f].YawAngle<< " pitch angle:" << dets[k][f].PitchAngle<<" 瞳距:"<<dets[k][f].InterDis<< std::endl;
278282
}
283+
#if show
279284
cv::imshow("show", RawImageVec[k]);
280-
cv::waitKey(1);
285+
cv::waitKey(1);
286+
#endif
281287
}
282288

283289
}
@@ -324,8 +330,10 @@ int main(int argc, char** argv)
324330
//std::cout << "yaw angle:" << dets[i][j].YawAngle<< " pitch angle:" << dets[i][j].PitchAngle<<" 瞳距:"<<dets[i][j].InterDis<< std::endl;
325331
}
326332
}
333+
#if show
327334
cv::imshow("show", img);
328335
cv::waitKey(1);
336+
#endif
329337
}
330338

331339
}
@@ -411,9 +419,11 @@ int main(int argc, char** argv)
411419
//step6.计算人脸的相似度
412420
float simi2=Cal_Score(Face_Grop[0].face_feature,Face_Grop[1].face_feature);
413421
std::cout<<"simi2:"<<simi2<<std::endl;
422+
#if show
414423
cv::imshow("face",MatVec[0]);
415424
cv::waitKey(0);
416425
#endif
426+
#endif
417427

418428

419429
#if mask_recognition

README.md

+41-17
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
5)人脸特征比对(人脸相似度计算)
3030

3131
8. 条件编译测试说明
32-
| 测试种类 | enable | 说明 |
32+
| 测试类别 | enable | 说明 |
3333
|:----------|:----------|:----------|
3434
|face_detect |1| 人脸检测 |
3535
|yolov5face_detect |1| yolov5face 人脸检测 |
@@ -42,28 +42,52 @@
4242
|silnet_face_anti_spoofing |1| 静默活体检测 |
4343

4444
## 算法说明
45-
### 1.人脸检测
46-
1. retinaface(mobilenet0.25,R50需要自己修改代码)
47-
2. yolov5face(yolov5sface,n,m,l,x需要自己转换对应的onnx)
48-
3. yolov7face(yolov7sface,另外不同大小的模型需要自己转换)
49-
4. yolov8facee(TO DO))
45+
### 1人脸检测
46+
#### 1)人脸检测retinaface(mobilenet0.25,R50需要自己修改代码)
47+
![demoimg1](https://insightface.ai/assets/img/github/11513D05.jpg)
48+
#### 2)yolov5face(yolov5sface,n,m,l,x需要自己转换对应的onnx)
49+
<img src="./resources/yolov5face_test.jpg" alt="drawing" width="800"/>
50+
51+
#### 3)yolov7face(yolov7sface,另外不同大小的模型需要自己转换)
52+
<img src="./resources/yolov7face_test.jpg" alt="drawing" width="800"/>
53+
54+
#### 4)yolov8facee(TO DO)
5055

5156

5257
### 2.人脸识别
53-
1. arcface(R50)
54-
2. arcface(R101,需要自己下载模型修改代码)
58+
59+
#### 1) arcface(R50)
60+
61+
#### 2)arcface(R101,需要自己下载模型修改代码)
62+
<div align="left">
63+
<img src="https://insightface.ai/assets/img/github/facerecognitionfromvideo.PNG" width="800"/>
64+
</div>
65+
5566

5667
### 3.带口罩识别
57-
1. 分类模型
68+
#### 1)检测->裁剪->识别(分类)
69+
![demoimg1](https://insightface.ai/assets/img/github/cov_test.jpg)
5870

5971
### 4.年龄性别
60-
1. InsightFace中的年龄和性别识别;
72+
#### 1)人脸检测->裁剪->年龄和性别识别
73+
<div align="left">
74+
<img src="https://insightface.ai/assets/img/github/t1_genderage.jpg" width="800"/>
75+
</div>
6176

6277
### 5.静默活体识别
63-
1. Silent-Face-Anti-Spoofing
78+
#### 1)Silent-Face-Anti-Spoofing
79+
80+
| | sample| result |
81+
|:----------:|:----------:|:----------|
82+
0.jpg|<img src="./FaceAlgorithm_Test/antispoofing/0.jpg" width="300" height="300"/>|fake
83+
1.jpg|<img src="./FaceAlgorithm_Test/antispoofing/1.jpg" width="300" height="300"/>|fake
84+
2.jpg|<img src="./FaceAlgorithm_Test/antispoofing/2.jpg" width="300" height="300"/>|real
85+
3.jpg|<img src="./FaceAlgorithm_Test/antispoofing/3.jpg" width="300" height="300"/>|real
86+
4.jpg|<img src="./FaceAlgorithm_Test/antispoofing/4.jpg" width="300" height="300"/>|fake
87+
5.jpg|<img src="./FaceAlgorithm_Test/antispoofing/5.jpg" width="300" height="300"/>|fake
6488

6589
### 6.跟踪
66-
1. ByteTracker(加上人脸bbox和人脸关键点作为跟踪的输入,修改Bug)
90+
#### 1)ByteTracker(加上人脸bbox和人脸关键点作为跟踪的输入,修改Bug)
6791

6892
### 7.算法接口
6993
```
@@ -170,16 +194,16 @@ HZFLAG Release(Config& config);
170194
模型 ([Baidu Drive](https://pan.baidu.com/s/1c8NQO2cZpAqwEMbfZxsJZg) code: 5xaa)
171195

172196
测试数据 ([Baidu Drive](https://pan.baidu.com/s/1nNHUCFHza2JzAnMZhA_9gQ) code: bphn)
173-
| 模型 | 作用 | 说明 |
174-
|:----------|:----------|:----------|
197+
| name | 功能 | 说明 |
198+
|:----------:|:----------:|:----------:|
175199
|FaceDetect.wts |人脸检测|
176200
|FaceRecognition.wts |人脸识别|
177201
|GenderAge.onnx |年龄性别识别|
178202
|MaskRecognition.onnx |口罩识别|
179203
|yolov5s-face_bs=1.onnx |yolov5s人脸检测|
180-
|yolov5s-face_bs=4.onnx |yolov5s人脸检测| batchsize=4
204+
|yolov5s-face_bs=4.onnx |yolov5s人脸检测| bs=4
181205
|yolov7s-face_bs=1.onnx |yolov7s人脸检测|
182-
|yolov7s-face_bs=4.onnx |yolov7s人脸检测| batchsize=4
206+
|yolov7s-face_bs=4.onnx |yolov7s人脸检测| bs=4
183207
|2.7_80x80_MiniFASNetV2.onnx |静默活体检测|
184208

185209
## 2.环境
@@ -219,4 +243,4 @@ set(TensorRT_LIB "/xxx/xxx/TensorRT-8.2.5.1/lib" CACHE INTERNAL "TensorRT Librar
219243
4. https://github.com/linghu8812/tensorrt_inference
220244
5. https://github.com/derronqi/yolov7-face/tree/main
221245
6. https://github.com/we0091234/yolov7-face-tensorrt
222-
246+
7. https://github.com/deepinsight/insightface

common/ONNX2TRT.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ int Onnx2Ttr::get_stream_from_file(const char* filename, unsigned char* buf, siz
4040
}
4141
}
4242

43-
void Onnx2Ttr::onnxToTRTModel(const char* modelFile, // name of the onnx model
43+
void Onnx2Ttr::onnxToTRTModel(Logger gLogger,const char* modelFile, // name of the onnx model
4444
unsigned int maxBatchSize, // batch size - NB must be at least as large as the batch we want to run with
4545
const char* out_trtfile)
4646
{

common/ONNX2TRT.h

+1-2
Original file line numberDiff line numberDiff line change
@@ -28,14 +28,13 @@ using namespace nvinfer1;
2828
class Onnx2Ttr
2929
{
3030
private:
31-
Logger gLogger;
3231
int gUseDLACore;
3332
public:
3433
Onnx2Ttr(/* args */);
3534
~Onnx2Ttr();
3635
void enableDLA(IBuilderConfig* b, int useDLACore);
3736
int get_stream_from_file(const char* filename, unsigned char* buf, size_t* size);
38-
void onnxToTRTModel(const char* modelFile, // name of the onnx model
37+
void onnxToTRTModel(Logger gLogger,const char* modelFile, // name of the onnx model
3938
unsigned int maxBatchSize, // batch size - NB must be at least as large as the batch we want to run with
4039
const char* out_trtfile);
4140

resources/yolov5face_test.jpg

1.27 MB
Loading

resources/yolov7face_test.jpg

1.12 MB
Loading

0 commit comments

Comments
 (0)