diff --git a/models/face_detection_yunet/demo.cpp b/models/face_detection_yunet/demo.cpp index 396358ea..45ebb747 100644 --- a/models/face_detection_yunet/demo.cpp +++ b/models/face_detection_yunet/demo.cpp @@ -31,13 +31,6 @@ class YuNet model = cv::FaceDetectorYN::create(model_path_, "", input_size_, conf_threshold_, nms_threshold_, top_k_, backend_id_, target_id_); } - void setBackendAndTarget(int backend_id, int target_id) - { - backend_id_ = backend_id; - target_id_ = target_id; - model = cv::FaceDetectorYN::create(model_path_, "", input_size_, conf_threshold_, nms_threshold_, top_k_, backend_id_, target_id_); - } - /* Overwrite the input size when creating the model. Size format: [Width, Height]. */ void setInputSize(const cv::Size& input_size) diff --git a/models/image_classification_mobilenet/demo.cpp b/models/image_classification_mobilenet/demo.cpp index c106204a..22612877 100644 --- a/models/image_classification_mobilenet/demo.cpp +++ b/models/image_classification_mobilenet/demo.cpp @@ -69,7 +69,7 @@ int main(int argc, char** argv) else cap.open(0); if (!cap.isOpened()) - CV_Error(Error::StsError, "Cannot opend video or file"); + CV_Error(Error::StsError, "Cannot open video or file"); Mat frame, blob; static const std::string kWinName = model; int nbInference = 0; diff --git a/models/object_detection_nanodet/demo.cpp b/models/object_detection_nanodet/demo.cpp index 2e32276f..cce9165c 100644 --- a/models/object_detection_nanodet/demo.cpp +++ b/models/object_detection_nanodet/demo.cpp @@ -46,7 +46,8 @@ class NanoDet { this->strides = { 8, 16, 32, 64 }; this->net = readNet(modelPath); - setBackendAndTarget(bId, tId); + this->net.setPreferableBackend(bId); + this->net.setPreferableTarget(tId); this->project = Mat::zeros(1, this->regMax + 1, CV_32F); for (size_t i = 0; i <= this->regMax; ++i) { @@ -57,12 +58,6 @@ class NanoDet this->generateAnchors(); } - void setBackendAndTarget(Backend bId, Target tId) - { - this->net.setPreferableBackend(bId); - this->net.setPreferableTarget(tId); - } - Mat preProcess(const Mat& inputImage) { Image2BlobParams paramNanodet; diff --git a/models/object_detection_nanodet/demo.py b/models/object_detection_nanodet/demo.py index e849552b..b1971670 100644 --- a/models/object_detection_nanodet/demo.py +++ b/models/object_detection_nanodet/demo.py @@ -148,7 +148,7 @@ def vis(preds, res_img, letterbox_scale, fps=None): img = vis(preds, image, letterbox_scale) if args.save: - print('Resutls saved to result.jpg\n') + print('Results saved to result.jpg\n') cv.imwrite('result.jpg', img) if args.vis: diff --git a/models/object_detection_yolox/demo.cpp b/models/object_detection_yolox/demo.cpp index 074e02f4..3b84ae7e 100644 --- a/models/object_detection_yolox/demo.cpp +++ b/models/object_detection_yolox/demo.cpp @@ -61,14 +61,6 @@ class YoloX { this->generateAnchors(); } - void setBackendAndTarget(dnn::Backend bId, dnn::Target tId) - { - this->backendId = bId; - this->targetId = tId; - this->net.setPreferableBackend(this->backendId); - this->net.setPreferableTarget(this->targetId); - } - Mat preprocess(Mat img) { Mat blob; @@ -137,7 +129,7 @@ class YoloX { boxesXYXY[r].height = boxes_xyxy.at(r, 3); } - vector< int > keep; + vector keep; NMSBoxesBatched(boxesXYXY, maxScores, maxScoreIdx, this->confThreshold, this->nmsThreshold, keep); Mat candidates(int(keep.size()), 6, CV_32FC1); int row = 0; @@ -282,7 +274,7 @@ int main(int argc, char** argv) else cap.open(0); if (!cap.isOpened()) - CV_Error(Error::StsError, "Cannot opend video or file"); + CV_Error(Error::StsError, "Cannot open video or file"); Mat frame, inputBlob; double letterboxScale; diff --git a/models/object_detection_yolox/demo.py b/models/object_detection_yolox/demo.py index 6b4823a3..099b701b 100644 --- a/models/object_detection_yolox/demo.py +++ b/models/object_detection_yolox/demo.py @@ -120,7 +120,7 @@ def vis(dets, srcimg, letterbox_scale, fps=None): img = vis(preds, image, letterbox_scale) if args.save: - print('Resutls saved to result.jpg\n') + print('Results saved to result.jpg\n') cv.imwrite('result.jpg', img) if args.vis: diff --git a/models/person_detection_mediapipe/demo.cpp b/models/person_detection_mediapipe/demo.cpp index 0e4f0378..7fdbc493 100644 --- a/models/person_detection_mediapipe/demo.cpp +++ b/models/person_detection_mediapipe/demo.cpp @@ -43,14 +43,6 @@ class MPPersonDet { this->anchors = getMediapipeAnchor(); } - void setBackendAndTarget(dnn::Backend bId, dnn::Target tId) - { - this->backendId = bId; - this->targetId = tId; - this->net.setPreferableBackend(this->backendId); - this->net.setPreferableTarget(this->targetId); - } - pair preprocess(Mat img) { Mat blob; @@ -237,10 +229,9 @@ int main(int argc, char** argv) backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second); //! [Open a video file or an image file or a camera stream] if (!cap.isOpened()) - CV_Error(Error::StsError, "Cannot opend video or file"); + CV_Error(Error::StsError, "Cannot open video or file"); static const std::string kWinName = "MPPersonDet Demo"; - int nbInference = 0; while (waitKey(1) < 0) { cap >> frame; diff --git a/models/pose_estimation_mediapipe/demo.cpp b/models/pose_estimation_mediapipe/demo.cpp index 51206719..4b3af9fb 100644 --- a/models/pose_estimation_mediapipe/demo.cpp +++ b/models/pose_estimation_mediapipe/demo.cpp @@ -45,14 +45,6 @@ class MPPersonDet { this->anchors = getMediapipeAnchor(); } - void setBackendAndTarget(dnn::Backend bId, dnn::Target tId) - { - this->backendId = bId; - this->targetId = tId; - this->net.setPreferableBackend(this->backendId); - this->net.setPreferableTarget(this->targetId); - } - pair preprocess(Mat img) { Mat blob; @@ -124,7 +116,7 @@ class MPPersonDet { { rotBoxes[i] = Rect2d(Point2d(boxes.at(i, 0), boxes.at(i, 1)), Point2d(boxes.at(i, 2), boxes.at(i, 3))); } - vector< int > keep; + vector keep; NMSBoxes(rotBoxes, score, this->scoreThreshold, this->nmsThreshold, keep, 1.0f, this->topK); if (keep.size() == 0) return Mat(); @@ -179,14 +171,6 @@ class MPPose { this->personBoxEnlargeFactor = 1.25; } - void setBackendAndTarget(dnn::Backend bId, dnn::Target tId) - { - this->backendId = bId; - this->targetId = tId; - this->net.setPreferableBackend(this->backendId); - this->net.setPreferableTarget(this->targetId); - } - tuple preprocess(Mat image, Mat person) { /*** @@ -567,7 +551,7 @@ int main(int argc, char** argv) MPPose poseEstimator(model, confThreshold, backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second); //! [Open a video file or an image file or a camera stream] if (!cap.isOpened()) - CV_Error(Error::StsError, "Cannot opend video or file"); + CV_Error(Error::StsError, "Cannot open video or file"); static const std::string kWinName = "MPPose Demo"; while (waitKey(1) < 0) diff --git a/models/text_detection_ppocr/demo.cpp b/models/text_detection_ppocr/demo.cpp index d14fe116..c1faa757 100644 --- a/models/text_detection_ppocr/demo.cpp +++ b/models/text_detection_ppocr/demo.cpp @@ -124,7 +124,7 @@ int main(int argc, char** argv) else cap.open(0); if (!cap.isOpened()) - CV_Error(Error::StsError, "Cannot opend video or file"); + CV_Error(Error::StsError, "Cannot open video or file"); Mat originalImage; static const std::string kWinName = modelName; while (waitKey(1) < 0) diff --git a/models/text_recognition_crnn/demo.cpp b/models/text_recognition_crnn/demo.cpp index 91a89e8f..0da944c9 100644 --- a/models/text_recognition_crnn/demo.cpp +++ b/models/text_recognition_crnn/demo.cpp @@ -224,7 +224,7 @@ int main(int argc, char** argv) else cap.open(0); if (!cap.isOpened()) - CV_Error(Error::StsError, "Cannot opend video or file"); + CV_Error(Error::StsError, "Cannot open video or file"); Mat originalImage; static const std::string kWinName = modelPath; while (waitKey(1) < 0) diff --git a/models/text_recognition_crnn/demo.py b/models/text_recognition_crnn/demo.py index 438c4d87..b409833b 100644 --- a/models/text_recognition_crnn/demo.py +++ b/models/text_recognition_crnn/demo.py @@ -106,7 +106,7 @@ def visualize(image, boxes, texts, color=(0, 255, 0), isClosed=True, thickness=2 # Save results if save is true if args.save: - print('Resutls saved to result.jpg\n') + print('Results saved to result.jpg\n') cv.imwrite('result.jpg', original_image) # Visualize results in a new window