*EDIT*: Added Beraks suggestion of downscale/upscale.
**EDIT**: Added additional Changes, Inspired by Tetragram.
Inspired by the Mathworks [Tutorial](https://de.mathworks.com/videos/object-recognition-and-tracking-for-augmented-reality-90546.html) for Augmented Reality I wanted to create a similar Application for Android, where the Recognition and Tracking are implemented.
After some research I've seen, that the mentioned [pointTracker](https://de.mathworks.com/help/vision/ref/vision.pointtracker-class.html) from Mathworks uses the KLT-Algorithm which is also implemented in OpenCV with [`calcOpticalFlowPyrLK`](http://docs.opencv.org/2.4/modules/video/doc/motion_analysis_and_object_tracking.html) I've implemented the algorithm, which takes the last frame, where I recognized my Points and try to estimate their new position in the current frame with this method:
int BruteForceMatcher::trackWithOpticalFlow(std::vector prevPyr, std::vector nextPyr, std::vector&srcPoints, std::vector&srcCorners){
std::vector estPoints;
std::vector estCorners;
std::vector goodPoints;
std::vector leftsrc;
std::vector status;
std::vector error;
if(srcPoints.size() > 0) {
cv::calcOpticalFlowPyrLK(prevPyr, nextPyr, srcPoints, estPoints, status, error);
for (int i = 0; i < estPoints.size(); i++) {
if (error[i] < 20.f) {
//LOGW("ERROR : %f\n", error[i]);
//upscaling of the Points
goodPoints.push_back(estPoints[i] *= 4);
leftsrc.push_back(srcPoints[i] *= 4);
}
}
//LOGD("Left Points (est/src): %i, %i", goodPoints.size(), leftsrc.size());
if(goodPoints.size() <= 0){
//LOGD("No good Points calculated");
return 0;
}
cv::Mat f = cv::findHomography(leftsrc, goodPoints);
if(cv::countNonZero(f) < 1){
//LOGD("Homography Matrix is empty!");
return 0;
}
cv::perspectiveTransform(srcCorners, estCorners, f);
srcCorners.swap(estCorners);
srcPoints.swap(goodPoints);
status.clear();
error.clear();
return srcPoints.size();
}
return 0;
}
And the Method which will be called through a JNICALL:
std::vector findBruteForceMatches(cv::Mat img){
int matches = 0;
std::vector ransacs;
BruteForceMatcher *bruteForceMatcher = new BruteForceMatcher();
double tf = cv::getTickFrequency();
if(trackKLT){
LOGD("TRACK WITH KLT");
std::vector nextPyr;
cv::resize(img, img, cv::Size(img.cols/4, img.rows/4));
cv::buildOpticalFlowPyramid(img, nextPyr, cv::Size(8,8), 3);
double kltTime = (double) cv::getTickCount();
matches = bruteForceMatcher->trackWithOpticalFlow(prevPyr, nextPyr, srcPoints, scene_corners);
kltTime = (double) cv::getTickCount() - kltTime;
LOGD("KLT Track Time: %f\n", kltTime*1000./tf);
if(matches > 10){
trackKLT = true;
prevPyr.swap(currPyr);
delete bruteForceMatcher;
return scene_corners;
}else{
trackKLT = false;
prevPyr.clear();
srcPoints.clear();
scene_corners.clear();
delete bruteForceMatcher;
return scene_corners;
}
} else{
LOGD("RECOGNIZE OBJECT");
double bfMatchTime = (double) cv::getTickCount();
matches = bruteForceMatcher->findMatchesBF(img, features2d, descriptors, scene_corners, ransacs);
bfMatchTime = (double) cv::getTickCount() - bfMatchTime;
LOGD("BruteForceMatch Time: %f\n", bfMatchTime*1000./tf);
if(matches > 3){
trackKLT = true;
cv::resize(img, img, cv::Size(img.cols/4, img.rows/4));
cv::buildOpticalFlowPyramid(img, prevPyr, cv::Size(8,8), 3);
for(int i = 0; i < ransacs.size(); i++){
ransacs[i] *= 0.25;
}
srcPoints.swap(ransacs);
delete bruteForceMatcher;
return scene_corners;
}else{
img.release();
scene_corners.clear();
ransacs.clear();
delete bruteForceMatcher;
return scene_corners;
}
}
}
Unfortunately this method runs only at 200 ms (~5 Fps) which is to slow for my Application.
Is there any other similar algorithm, which could track a couple of points in a Image? Or is there a way, to speed up my algorithm?
In a paper I read, that they use a cross correlation tracking algorithm, is there sth. like that in openCV?
Some specs:
Phone: Nexus 5x (6.0.1)
↧