Project

General

Profile

BitmapToolkit Scol plugin
ArFeaturedMarker.cpp
Go to the documentation of this file.
1/*
2-----------------------------------------------------------------------------
3This source file is part of OpenSpace3D
4For the latest info, see http://www.openspace3d.com
5
6Copyright (c) 2012 I-maginer
7
8This program is free software; you can redistribute it and/or modify it under
9the terms of the GNU Lesser General Public License as published by the Free Software
10Foundation; either version 2 of the License, or (at your option) any later
11version.
12
13This program is distributed in the hope that it will be useful, but WITHOUT
14ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
15FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
16
17You should have received a copy of the GNU Lesser General Public License along with
18this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19Place - Suite 330, Boston, MA 02111-1307, USA, or go to
20http://www.gnu.org/copyleft/lesser.txt
21
22-----------------------------------------------------------------------------
23*/
24
25/*
26 Toolkit based on OpenCV library
27 First version : dec 2010
28 Author : Bastien BOURINEAU
29 */
30
31#include <scolPlugin.h>
32
33#include "ArFeaturedMarker.h"
34#include <cstdio>
35#include <set>
36
37class symTestParallel : public cv::ParallelLoopBody
38{
39public:
40 symTestParallel(const std::vector<std::vector<cv::DMatch> >& _matches1, const std::vector<std::vector<cv::DMatch> >& _matches2, std::vector<std::vector<cv::DMatch> > &_omatchVec)
41 : matches1(_matches1),
42 matches2(_matches2),
43 omatchVec(_omatchVec)
44 {}
45
46 void operator()(const cv::Range& range) const
47 {
48 const int start = range.start;
49 const int end = range.end;
50
51 // for all matches image 1 -> image 2
52 for (std::vector<std::vector<cv::DMatch> >::const_iterator matchIterator1 = (matches1.begin() + start); matchIterator1 != (matches1.begin() + end); ++matchIterator1)
53 {
54 // ignore deleted matches
55 if (matchIterator1->empty() || matchIterator1->size() < 2)
56 continue;
57
58 // for all matches image 2 -> image 1
59 for (std::vector<std::vector<cv::DMatch> >::const_iterator
60 matchIterator2 = matches2.begin(); matchIterator2 != matches2.end(); ++matchIterator2)
61 {
62 // ignore deleted matches
63 if (matchIterator2->empty() || matchIterator2->size() < 2)
64 continue;
65
66 // Match symmetry test
67 if ((*matchIterator1)[0].queryIdx ==
68 (*matchIterator2)[0].trainIdx &&
69 (*matchIterator2)[0].queryIdx ==
70 (*matchIterator1)[0].trainIdx)
71 {
72 // add symmetrical match
73 omatchVec[cv::getThreadNum()].push_back(cv::DMatch((*matchIterator1)[0].queryIdx, (*matchIterator1)[0].trainIdx, (*matchIterator1)[0].distance));
74 break; // next match in image 1 -> image 2
75 }
76 }
77 }
78 }
79
80private:
81 symTestParallel& operator=(const symTestParallel&); // to quiet MSVC
82 std::vector<std::vector<cv::DMatch> > matches1;
83 std::vector<std::vector<cv::DMatch> > matches2;
84 std::vector<std::vector<cv::DMatch> > &omatchVec;
85};
86
88{
89 // ORB is the default feature
90 /*
91 detector_ = cv::ORB::create();
92
93 // BruteFroce matcher with Norm Hamming is the default matcher
94 matcher_ = cv::makePtr<cv::BFMatcher>((int)cv::NORM_HAMMING, false);
95 */
96}
97
101
102int RobustMatcher::ratioTest(std::vector<std::vector<cv::DMatch> > &matches)
103{
104 int removed = 0;
105 // for all matches
106 for (std::vector<std::vector<cv::DMatch> >::iterator matchIterator = matches.begin(); matchIterator != matches.end(); ++matchIterator)
107 {
108 // if 2 NN has been identified
109 if (matchIterator->size() > 1)
110 {
111 // check distance ratio
112 if ((*matchIterator)[0].distance / (*matchIterator)[1].distance > ratio_)
113 {
114 matchIterator->clear(); // remove match
115 removed++;
116 }
117 }
118 else
119 { // does not have 2 neighbours
120 matchIterator->clear(); // remove match
121 removed++;
122 }
123 }
124 return removed;
125}
126
127void RobustMatcher::symmetryTest(const std::vector<std::vector<cv::DMatch> >& matches1,
128 const std::vector<std::vector<cv::DMatch> >& matches2,
129 std::vector<cv::DMatch>& symMatches)
130{
131 std::vector<std::vector<cv::DMatch> > symParallel(cv::getNumThreads());
132
133 //do it parallel
134 cv::parallel_for_(cv::Range(0, matches1.size()), symTestParallel(matches1, matches2, symParallel));
135
136 //merge vectors
137 for (unsigned int i = 0; i < symParallel.size(); i++)
138 {
139 symMatches.insert(symMatches.end(), symParallel[i].begin(), symParallel[i].end());
140 }
141}
142
143void RobustMatcher::robustMatch(const cv::Mat& frame, std::vector<cv::DMatch>& good_matches,
144 std::vector<cv::KeyPoint>& keypoints_frame, const cv::Mat& descriptors_model, const cv::Mat& mask)
145{
146 cv::Mat descriptors_frame;
147
148 // 1a. Detection of the features
149 detector_->detectAndCompute(frame, mask, keypoints_frame, descriptors_frame);
150
151 // 2. Match the two image descriptors
152 std::vector<std::vector<cv::DMatch> > matches12, matches21;
153
154 // 2a. From image 1 to image 2
155 matcher_->knnMatch(descriptors_frame, descriptors_model, matches12, 2); // return 2 nearest neighbours
156
157 // 2b. From image 2 to image 1
158 matcher_->knnMatch(descriptors_model, descriptors_frame, matches21, 2); // return 2 nearest neighbours
159
160 // 3. Remove matches for which NN ratio is > than threshold
161 // clean image 1 -> image 2 matches
162 ratioTest(matches12);
163 // clean image 2 -> image 1 matches
164 ratioTest(matches21);
165
166 // 4. Remove non-symmetrical matches
167 symmetryTest(matches12, matches21, good_matches);
168}
169
170void RobustMatcher::fastRobustMatch(const cv::Mat& frame, std::vector<cv::DMatch>& good_matches,
171 std::vector<cv::KeyPoint>& keypoints_frame, const cv::Mat& mask)
172{
173 cv::Mat descriptors_frame;
174 good_matches.clear();
175
176 // 1a. Detection of the features
177 detector_->detectAndCompute(frame, mask, keypoints_frame, descriptors_frame);
178
179 // 2. Match the two image descriptors
180 std::vector<std::vector<cv::DMatch> > matches;
181 matcher_->knnMatch(descriptors_frame, matches, 2);
182
183 // 3. Remove matches for which NN ratio is > than threshold
184 ratioTest(matches);
185
186 // 4. Fill good matches container
187 for (std::vector<std::vector<cv::DMatch> >::iterator matchIterator = matches.begin(); matchIterator != matches.end(); ++matchIterator)
188 {
189 if (!matchIterator->empty()) good_matches.push_back((*matchIterator)[0]);
190 }
191}
192
193bool RobustMatcher::addTrainData(const cv::Mat& frame, std::vector<cv::KeyPoint> &marker_keypoints)
194{
195 //if (marker_keypoints.size() > 2000)
196 //return false;
197
198 cv::Mat descriptors_marker;
199 std::vector<cv::KeyPoint> keypoints_marker;
200 std::vector<cv::Mat> descMat;
201
202 try
203 {
204 // 1a. Detection of the features
205 traindetector_->detectAndCompute(frame, cv::Mat(), keypoints_marker, descriptors_marker);
206 if (keypoints_marker.size() < 30)
207 return false;
208
209 descMat = matcher_->getTrainDescriptors();
210 matcher_->clear();
211
212 marker_keypoints.insert(marker_keypoints.end(), keypoints_marker.begin(), keypoints_marker.end());
213 descMat.push_back(descriptors_marker);
214
215 if (descMat.size() > 1)
216 {
217 cv::vconcat(descMat, descriptors_marker);
218 descMat.clear();
219 descMat.push_back(descriptors_marker);
220 }
221
222 matcher_->add(descMat);
223 matcher_->train();
224 return true;
225 }
226 catch (std::exception &)
227 {
228 return false;
229 }
230}
231
235ArFeaturedMarker::ArFeaturedMarker(std::string filePath, unsigned int nid, float size, unsigned int maxFeatures) : ArMarker(nid, size, ArMarker::AR_FFT_MARKER)
236{
237 m_file = filePath;
238 m_maxFeatures = maxFeatures;
239
240 if (!m_file.empty())
241 {
242#ifdef ANDROID
243 long lSize = 0;
244 char* buff = 0;
245
246 FILE* pFile = fopen(m_file.c_str(), "rb");
247 if (pFile)
248 {
249 // obtain file size.
250 fseek(pFile, 0, SEEK_END);
251 lSize = ftell(pFile);
252 rewind(pFile);
253
254 // allocate memory to contain the whole file.
255 buff = (char*)malloc(lSize);
256 if (buff)
257 {
258 // copy the file into the buffer.
259 fread(buff, 1, lSize, pFile);
260 std::vector<char> data(buff, buff + lSize);
261
262 // terminate
263 fclose(pFile);
264
265 m_image = cv::imdecode(data, cv::IMREAD_GRAYSCALE);
266 free(buff);
267 }
268 if (m_image.empty())
269 MMechostr(MSKDEBUG, ">>>>> Picture not loaded ko\n");
270 else
271 MMechostr(MSKDEBUG, ">>>>> Picture loaded ok\n");
272 }
273 else
274 {
275 MMechostr(MSKDEBUG, ">>>>> Picture failed to open : %s\n", filePath.c_str());
276 }
277#else
278 m_image = cv::imread(m_file, cv::IMREAD_GRAYSCALE);
279#endif
280 }
281
282 CommonConstructor();
283}
284
288ArFeaturedMarker::ArFeaturedMarker(cv::Mat tpl, unsigned int nid, float size, unsigned int maxFeatures) : ArMarker(nid, size, ArMarker::AR_FFT_MARKER)
289{
290 m_file = "";
291 m_maxFeatures = maxFeatures;
292
293 cv::cvtColor(tpl, m_image, cv::COLOR_RGB2GRAY);
294
295 CommonConstructor();
296}
297
298//for face detection
299ArFeaturedMarker::ArFeaturedMarker(unsigned int nid, float size) : ArMarker(nid, size, ArMarker::AR_FACE_MARKER)
300{
301 m_file = "";
302 m_maxFeatures = 400;
303
304 CommonConstructor();
305}
306
308{
309 tpl.copyTo(m_image);
310
311 if (!m_image.empty())
312 {
313 // resize the marker for bigger detection and this avoid an infinite loop on pyramid detection on small pictures
314 cv::Size nsize;
315
316 nsize.width = m_image.cols * 2;
317 nsize.height = m_image.rows * 2;
318
319 cv::resize(m_image, m_image, nsize, 0, 0, cv::INTER_CUBIC);
320 //cv::blur(m_image, m_image, cv::Size(2, 2));
321 //imshow("marker", m_image);
322
323 initTrackingPicture();
324 }
325}
326
330void ArFeaturedMarker::CommonConstructor()
331{
332 m_detector.release();
333 m_matcher.release();
334 m_robustMatcher = 0;
335 m_registerNextFrame = false;
336 m_lastFound = false;
337 m_KalmanInit = false;
338 m_lastTick = 0;
339 m_minMatches = 6;
340 m_HomographyThreshold = 8;
341
342 m_CornerSmoothers.Init(0.2f, 0.2f, 0.2f, 25.0f, 35.0f);
343
344 //#ifdef ANDROID
345 // m_scale = cv::Point2f(0.5f, 0.5f);
346 //#else
347 // m_scale = cv::Point2f(1.0f, 1.0f);
348 //#endif
349
350 int msize = 1024;
351
352 if (!m_image.empty())
353 {
354 // resize the marker for bigger detection and this avoid an infinite loop on pyramid detection on small pictures
355 cv::Size nsize;
356 if (m_image.rows >= m_image.cols)
357 {
358 nsize.width = (int)(((float)m_image.cols * (float)msize) / (float)m_image.rows);
359 nsize.height = msize;
360 }
361 else
362 {
363 nsize.width = msize;
364 nsize.height = (int)(((float)m_image.rows * (float)msize) / (float)m_image.cols);
365 }
366
367 cv::resize(m_image, m_image, nsize, 0, 0, cv::INTER_CUBIC);
368 //cv::equalizeHist(m_image, m_image);
369 //cv::blur(m_image, m_image, cv::Size(2, 2));
370
371 //imshow("marker", m_image);
372
373 initTrackingPicture();
374 }
375}
376
381{
382 std::vector<int> empty(2);
383 empty[0] = 0; empty[1] = 0;
384 m_image = cv::Mat(empty);
385 m_pcorners.clear();
386
387 if (m_robustMatcher)
388 {
389 delete(m_robustMatcher);
390 m_robustMatcher = 0;
391 }
392
393 if (!m_detector.empty())
394 {
395 m_detector.release();
396 }
397
398 if (!m_matcher.empty())
399 {
400 m_matcher.release();
401 }
402}
403
404bool ArFeaturedMarker::initTrackingPicture()
405{
406 m_pcorners.clear();
407 m_lastTick = 0;
408 if (m_robustMatcher)
409 {
410 delete(m_robustMatcher);
411 m_robustMatcher = 0;
412 }
413
414 if (!m_detector.empty())
415 {
416 m_detector.release();
417 }
418
419 if (!m_matcher.empty())
420 {
421 m_matcher.release();
422 }
423
424 m_dmask.setTo(255);
425
426 m_pcorners.clear();
427 m_pcorners.push_back(cv::Point2f(0.0f, 0.0f));
428 m_pcorners.push_back(cv::Point2f(static_cast<float>(m_image.cols), 0.0f));
429 m_pcorners.push_back(cv::Point2f(static_cast<float>(m_image.cols), static_cast<float>(m_image.rows)));
430 m_pcorners.push_back(cv::Point2f(0.0f, static_cast<float>(m_image.rows)));
431
432 //m_matcher = cv::makePtr<cv::BFMatcher>(cv::NORM_HAMMING);
433 cv::Ptr<cv::flann::IndexParams> indexParams = cv::makePtr<cv::flann::LshIndexParams>(6, 12, 1); // instantiate LSH index parameters
434 cv::Ptr<cv::flann::SearchParams> searchParams = cv::makePtr<cv::flann::SearchParams>(50); // instantiate flann search parameters
435 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(indexParams, searchParams);
436
437 m_robustMatcher = new RobustMatcher();
438 m_robustMatcher->setDescriptorMatcher(m_matcher);
439 return trainMatcher();
440}
441
442void ArFeaturedMarker::RegisterNextFrame(cv::Point point, cv::Size size)
443{
444 m_cropPos = point;
445 m_cropSize = size;
446 m_registerNextFrame = true;
447}
448
449bool ArFeaturedMarker::trainMatcher()
450{
451 m_matcher->clear();
452 m_trainKeypoints.clear();
453 if (!m_detector.empty())
454 {
455 m_detector.release();
456 }
457
458 //m_detector = cv::ORB::create(m_maxFeatures, 1.0f, 1, 8, 0, 2, 1, 8);
459 //m_extractor = cv::ORB::create(m_maxFeatures, 1.35f, 24, 31, 0, 2, 1, 31);
460
461 float threshold = 0.003f / ((float)m_maxFeatures / 100.0f);
462 m_detector = cv::AKAZE::create(cv::AKAZE::DESCRIPTOR_MLDB, 0, 3, threshold, 1, 1);
463
464 m_robustMatcher->setFeatureDetector(m_detector);
465
466 std::vector<cv::Mat> descMat;
467 //cv::Ptr<cv::ORB> sdetector = cv::ORB::create(500000, 1.2f, 16, 31, 0, 2, 1, 16);
468
469 cv::Ptr<cv::AKAZE> sdetector = cv::AKAZE::create(cv::AKAZE::DESCRIPTOR_MLDB, 0, 3, threshold, 32, 4, cv::KAZE::DIFF_PM_G2);
470 m_robustMatcher->setTrainFeatureDetector(sdetector);
471 m_robustMatcher->addTrainData(m_image, m_trainKeypoints);
472
473 //cv::Mat eq;
474 //cv::equalizeHist(m_image, eq);
475 //m_robustMatcher->addTrainData(eq, m_trainKeypoints);
476
477 /*
478 cv::Mat warp;
479 cv::Mat warped;
480 cv::Mat hmat;
481
482 rotateImage(m_image, warp, hmat, 45, 0, 0, 0, 0, 2000, 500);
483 cv::warpPerspective(warp, warped, hmat, cv::Size(warp.cols, warp.rows), cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
484 m_robustMatcher->addTrainData(warped, m_trainKeypoints);
485
486 rotateImage(m_image, warp, hmat, -45, 0, 0, 0, 0, 2000, 500);
487 cv::warpPerspective(warp, warped, hmat, cv::Size(warp.cols, warp.rows), cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
488 m_robustMatcher->addTrainData(warped, m_trainKeypoints);
489
490 rotateImage(m_image, warp, hmat, 0, 45, 0, 0, 0, 2000, 500);
491 cv::warpPerspective(warp, warped, hmat, cv::Size(warp.cols, warp.rows), cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
492 m_robustMatcher->addTrainData(warped, m_trainKeypoints);
493
494 rotateImage(m_image, warp, hmat, 0, -45, 0, 0, 0, 2000, 500);
495 cv::warpPerspective(warp, warped, hmat, cv::Size(warp.cols, warp.rows), cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
496 m_robustMatcher->addTrainData(warped, m_trainKeypoints);
497
498 rotateImage(m_image, warp, hmat, 85, 0, 0, 0, 0, 2000, 500);
499 cv::warpPerspective(warp, warped, hmat, cv::Size(warp.cols, warp.rows), cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
500 m_robustMatcher->addTrainData(warped, m_trainKeypoints);
501
502 rotateImage(m_image, warp, hmat, -85, 0, 0, 0, 0, 2000, 500);
503 cv::warpPerspective(warp, warped, hmat, cv::Size(warp.cols, warp.rows), cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
504 m_robustMatcher->addTrainData(warped, m_trainKeypoints);
505
506 rotateImage(m_image, warp, hmat, 0, 85, 0, 0, 0, 2000, 500);
507 cv::warpPerspective(warp, warped, hmat, cv::Size(warp.cols, warp.rows), cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
508 m_robustMatcher->addTrainData(warped, m_trainKeypoints);
509
510 rotateImage(m_image, warp, hmat, 0, -85, 0, 0, 0, 2000, 500);
511 cv::warpPerspective(warp, warped, hmat, cv::Size(warp.cols, warp.rows), cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
512 m_robustMatcher->addTrainData(warped, m_trainKeypoints);
513
514 */
515
516 if (m_trainKeypoints.size() < 50)
517 return false;
518
519 return true;
520}
521
522bool ArFeaturedMarker::GetPointsFromMatches(const std::vector<cv::KeyPoint> &querykeypoints, const std::vector<cv::KeyPoint> &trainKeypoints, std::vector<cv::DMatch> matches, std::vector<cv::Point2f> &trainPoints, std::vector<cv::Point2f> &framePoints)
523{
524 if (matches.size() >= m_minMatches)
525 {
526 for (unsigned int i = 0; i < matches.size(); i++)
527 {
528 cv::DMatch& match = matches[i];
529 trainPoints.push_back(trainKeypoints[match.trainIdx].pt);
530 framePoints.push_back(querykeypoints[match.queryIdx].pt);
531 }
532 return true;
533 }
534 else
535 return false;
536}
537
538bool ArFeaturedMarker::GetHomography(std::vector<cv::Point2f> &trainPoints, std::vector<cv::Point2f> &framePoints, cv::Mat &HMatrix)
539{
540 try
541 {
542 //std::vector<uchar> status;
543 HMatrix = findHomography(trainPoints, framePoints, cv::FM_RANSAC, m_HomographyThreshold);// , status);
544
545 /*
546 std::vector<cv::Point2f> nGoodFramePoints;
547 std::vector<cv::Point2f> nTrainPoints;
548 unsigned int nbMatches = 0;
549 for (unsigned int i = 0; i < trainPoints.size(); i++)
550 {
551 if (status[i] > 0)
552 {
553 nbMatches++;
554 nTrainPoints.push_back(trainPoints[i]);
555 nGoodFramePoints.push_back(framePoints[i]);
556 }
557 }
558
559 trainPoints = nTrainPoints;
560 framePoints = nGoodFramePoints;
561 */
562 }
563 catch (cv::Exception &e)
564 {
565 //std::string wt = e.what();
566 //error
567 }
568
569 if (HMatrix.empty())
570 {
571 m_dmask.setTo(255);
572 return false;
573 }
574 else
575 return true;
576}
577
578bool ArFeaturedMarker::GetCorners(cv::Mat &HMatrix)
579{
580 unsigned int i;
581 std::vector<cv::Point2f> corners;
582 cv::Mat transformedPoints;
583 bool isvalid = false;
584
585 cv::perspectiveTransform(cv::Mat(m_pcorners), transformedPoints, HMatrix);
586
587 for (i = 0; i < m_pcorners.size(); i++)
588 corners.push_back(transformedPoints.at<cv::Point2f>(i, 0));
589
590 //clear marker corners
591 this->clear();
592
593 if ((corners.size() == 4) && (checkRectShape(corners)))
594 {
595 isvalid = NiceHomography(HMatrix);
596 }
597
598 if (!isvalid)
599 {
600 m_dmask.setTo(255);
601 return false;
602 }
603
604 m_CornerSmoothers.Update(corners);
605 corners = m_CornerSmoothers.GetFilteredPoints();
606
607 for (i = 0; i < corners.size(); i++)
608 {
609 this->push_back(cv::Point2f(corners[i].x, corners[i].y));
610 }
611
612 //keep last marker zone for next detection
613 m_dmask = createMask(m_dmask.size(), corners);
614 return true;
615}
616
617bool ArFeaturedMarker::detectMotionFlow(cv::Mat &frame, cv::Mat &color, const std::vector<cv::Point2f> &trainPoints, const std::vector<cv::Point2f> &framePoints, std::vector<cv::Point2f> &gtrainPoints, std::vector<cv::Point2f> &gframePoints)
618{
619 std::vector<cv::Point2f> newFramePoints;
620 std::vector<uchar> status;
621 std::vector<float> err;
622 cv::TermCriteria termcrit(cv::TermCriteria::MAX_ITER | cv::TermCriteria::EPS, 200, 0.02f);
623 cv::Size winSize(41, 41);
624
625 cv::buildOpticalFlowPyramid(frame, m_nextPyr, winSize, 8, true);
626
627 if (framePoints.empty() || m_prevPyr.empty())
628 {
629 m_prevPyr.swap(m_nextPyr);
630 return false;
631 }
632
633 try
634 {
635 cv::calcOpticalFlowPyrLK(m_prevPyr, m_nextPyr, framePoints, newFramePoints, status, err, winSize, 8, termcrit, cv::OPTFLOW_FARNEBACK_GAUSSIAN, 0.001f);
636 m_prevPyr.swap(m_nextPyr);
637
638 unsigned int nbMatches = 0;
639 for (unsigned int i = 0; i < trainPoints.size(); i++)
640 {
641 //cv::circle(color, framePoints[i], 4, cv::Scalar(255, 0, 0), 1, 8, 0);
642 //cv::circle(color, newFramePoints[i], 4, cv::Scalar(0, 255, 0), 1, 8, 0);
643 //cv::line(color, framePoints[i], newFramePoints[i], cv::Scalar(255, 0, 0), 1, CV_AA);
644
645 if (status[i] == 1 && err[i] < 150.0f)
646 {
647 nbMatches++;
648 gtrainPoints.push_back(trainPoints[i]);
649 gframePoints.push_back(newFramePoints[i]);
650 }
651 }
652
653 if (nbMatches >= m_minMatches)
654 return true;
655 }
656 catch(std::exception &e)
657 {
658 std::string wt = e.what();
659 return false;
660 }
661
662 return false;
663}
664
666{
667 boost::mutex::scoped_lock l(m_mutex);
668 m_warped.copyTo(image);
669
670 if (image.empty())
671 return false;
672 else
673 return true;
674}
675
676/**** feature detection *****/
677bool ArFeaturedMarker::detectFeatured(cv::Mat &frame, cv::Mat &color, cv::Size camsize)
678{
679 if (m_lastFrameSize.width != frame.cols || m_lastFrameSize.height != frame.rows || m_registerNextFrame)
680 {
681 m_prevPyr.clear();
682 m_prevFramePoints.clear();
683 m_prevTrainPoints.clear();
684 m_dmask.setTo(255);
685 m_lastTick = 0;
686
687 m_CornerSmoothers.Reset();
688 }
689
690 m_lastFrameSize = cv::Size(frame.cols, frame.rows);
691 cv::Mat HMatrix;
692
693 if (m_registerNextFrame && camsize.width != 0 && camsize.height != 0 && m_cropSize.width > 1 && m_cropSize.height > 1)
694 {
695 //crop the picture with the coords
696 cv::Point2f scale = cv::Point2f((float)frame.cols / (float)camsize.width, (float)frame.rows / (float)camsize.height);
697
698 m_cropPos.x = (int)((float)m_cropPos.x * scale.x);
699 m_cropPos.y = (int)((float)m_cropPos.y * scale.y);
700 m_cropSize.width = (int)((float)m_cropSize.width * scale.x);
701 m_cropSize.height = (int)((float)m_cropSize.height * scale.x);
702
703 int msize = 1024;
704 try
705 {
706 cv::Mat crop(frame, cv::Rect(m_cropPos.x, m_cropPos.y, m_cropSize.width, m_cropSize.height)); // NOTE: this will only give you a reference to the ROI of the original data
707 cv::Size nsize;
708 if (crop.rows >= crop.cols)
709 {
710 nsize.width = (int)(((float)crop.cols * (float)msize) / (float)crop.rows);
711 nsize.height = msize;
712 }
713 else
714 {
715 nsize.width = msize;
716 nsize.height = (int)(((float)crop.rows * (float)msize) / (float)crop.cols);
717 }
718 cv::Mat marker;
719 cv::resize(crop, marker, nsize, 0, 0, cv::INTER_CUBIC);
720 //cv::blur(marker, marker, cv::Size(2, 2));
721
722 m_registerNextFrame = (m_robustMatcher->addTrainData(marker, m_trainKeypoints)) ? false : true;
723 }
724 catch (std::exception &)
725 {
726 //could not set current frame
727 MMechostr(MSKDEBUG, "detectFeatured : failed to set the current frame as marker.\n");
728 }
729 }
730
731 if (frame.empty() || m_image.empty())
732 return false;
733
734 if (m_lastFound)
735 {
736 // try with optical flow
737 std::vector<cv::Point2f> gFramePoints;
738 std::vector<cv::Point2f> gTrainPoints;
739
740 bool motion = detectMotionFlow(frame, color, m_prevTrainPoints, m_prevFramePoints, gTrainPoints, gFramePoints);
741
742 //try with motion flow
743 if (motion && !gFramePoints.empty() && !gTrainPoints.empty())
744 {
745 if ((m_lastFound = GetHomography(gTrainPoints, gFramePoints, HMatrix)))
746 {
747 m_lastFound = GetCorners(HMatrix);
748
749 cv::Mat prev = cv::Mat(m_prevTrainPoints, false);
750 cv::Mat next;
751 cv::perspectiveTransform(prev, next, HMatrix);
752 next.copyTo(cv::Mat(m_prevFramePoints, false));
753 }
754 }
755 else
756 {
757 m_lastFound = false;
758 }
759 }
760
761 if (!m_lastFound)
762 {
763 //init the detection mask
764 if ((m_dmask.rows == 0) || (m_dmask.cols == 0) || m_dmask.empty() || (m_dmask.rows != frame.rows) || m_dmask.cols != frame.cols)
765 {
766 m_dmask = cv::Mat(frame.rows, frame.cols, CV_8UC1);
767 m_dmask.setTo(255);
768 }
769
770 std::vector<cv::KeyPoint> keypoints;
771 std::vector<cv::DMatch> matches;
772 try
773 {
774 m_robustMatcher->fastRobustMatch(frame, matches, keypoints, m_dmask);
775
776 std::vector<cv::Point2f> trainPoints;
777 std::vector<cv::Point2f> framePoints;
778
779 if ((m_lastFound = GetPointsFromMatches(keypoints, m_trainKeypoints, matches, trainPoints, framePoints)))
780 {
781 m_prevFramePoints = framePoints;
782 m_prevTrainPoints = trainPoints;
783
784 /*
785 cv::Size winSize = cv::Size(5, 5);
786 cv::Size zeroZone = cv::Size(-1, -1);
787 cv::TermCriteria criteria = cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001);
788 cv::cornerSubPix(frame, m_prevFramePoints, winSize, zeroZone, criteria);
789 cv::cornerSubPix(m_image, m_prevTrainPoints, winSize, zeroZone, criteria);
790 */
791
792 if ((m_lastFound = GetHomography(trainPoints, framePoints, HMatrix)) && (m_lastFound = GetCorners(HMatrix)))
793 {
794 // reset motion flow
795 std::vector<cv::Point2f> gFramePoints;
796 std::vector<cv::Point2f> gTrainPoints;
797 m_prevPyr.clear();
798 detectMotionFlow(frame, color, m_prevTrainPoints, m_prevFramePoints, gFramePoints, gTrainPoints);
799
800 // get warped detected marker
801 boost::mutex::scoped_lock l(m_mutex);
802 cv::warpPerspective(color, m_warped, HMatrix, cv::Size(m_image.cols, m_image.rows), cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
803 }
804 }
805 }
806 catch (std::exception&)
807 {
808 m_lastFound = false;
809 }
810 }
811
812 //reset mask
813 if (!m_lastFound)
814 {
815 m_KalmanInit = false;
816 m_prevPyr.clear();
817 m_prevFramePoints.clear();
818 m_prevTrainPoints.clear();
819 m_dmask.setTo(255);
820 m_lastTick = 0;
821 m_CornerSmoothers.Reset();
822 }
823 /*
824 // too slow on android
825 else
826 {
827 // get warped detected marker
828 boost::mutex::scoped_lock l(m_mutex);
829 cv::warpPerspective(color, m_warped, HMatrix, cv::Size(m_image.cols, m_image.rows), cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
830 }
831 */
832
833 return m_lastFound;
834}
835
838void ArFeaturedMarker::calculateFeaturedExtrinsics(float markerSizeMeters, aruco::CameraParameters& camParams, bool setYPerperdicular)
839{
840 if (!camParams.isValid())
841 throw cv::Exception(9004, "!isValid(): invalid camera parameters. It is not possible to calculate extrinsics", "calculateExtrinsics", __FILE__, __LINE__);
842
843 if (!isValid())
844 throw cv::Exception(9004, "!isValid(): invalid marker. It is not possible to calculate extrinsics", "calculateExtrinsics", __FILE__, __LINE__);
845 if (markerSizeMeters <= 0)
846 throw cv::Exception(9004, "markerSize<=0: invalid markerSize", "calculateExtrinsics", __FILE__, __LINE__);
847 if (camParams.CameraMatrix.rows == 0 || camParams.CameraMatrix.cols == 0)
848 throw cv::Exception(9004, "CameraMatrix is empty", "calculateExtrinsics", __FILE__, __LINE__);
849
850 float halfSizeX = 0.0f;
851 float halfSizeY = 0.0f;
852 if (m_image.rows >= m_image.cols)
853 {
854 halfSizeX = markerSizeMeters / 2.0f;
855 halfSizeY = ((m_image.cols * markerSizeMeters) / m_image.rows) / 2.0f;
856 }
857 else
858 {
859 halfSizeX = ((m_image.rows * markerSizeMeters) / m_image.cols) / 2.0f;
860 halfSizeY = markerSizeMeters / 2.0f;
861 }
862
863 cv::Mat ObjPoints(4, 3, CV_32FC1);
864 ObjPoints.at<float>(1, 0) = -halfSizeX;
865 ObjPoints.at<float>(1, 1) = halfSizeY;
866 ObjPoints.at<float>(1, 2) = 0;
867 ObjPoints.at<float>(2, 0) = halfSizeX;
868 ObjPoints.at<float>(2, 1) = halfSizeY;
869 ObjPoints.at<float>(2, 2) = 0;
870 ObjPoints.at<float>(3, 0) = halfSizeX;
871 ObjPoints.at<float>(3, 1) = -halfSizeY;
872 ObjPoints.at<float>(3, 2) = 0;
873 ObjPoints.at<float>(0, 0) = -halfSizeX;
874 ObjPoints.at<float>(0, 1) = -halfSizeY;
875 ObjPoints.at<float>(0, 2) = 0;
876
877 cv::Mat ImagePoints(4, 2, CV_32FC1);
878
879 cv::Point2f scale = cv::Point2f((float)m_lastFrameSize.width / (float)camParams.CamSize.width, (float)m_lastFrameSize.height / (float)camParams.CamSize.height);
880
881 //Set image points from the marker
882 for (int c = 0; c < 4; c++)
883 {
884 (*this)[c % 4].x = ((*this)[c % 4].x) * (1.0f / scale.x);
885 (*this)[c % 4].y = ((*this)[c % 4].y) * (1.0f / scale.y);
886
887 ImagePoints.at<float>(c, 0) = ((*this)[c % 4].x);
888 ImagePoints.at<float>(c, 1) = ((*this)[c % 4].y);
889 }
890
891 cv::Mat raux, taux;
892 cv::solvePnP(ObjPoints, ImagePoints, camParams.CameraMatrix, camParams.Distorsion, raux, taux);
893 //cv::solvePnPRansac(ObjPoints, ImagePoints, camParams.CameraMatrix, camParams.Distorsion, raux, taux, false, 100, 9.0f, 0.95, cv::noArray(), CV_P3P);
894
895 raux.convertTo(Rvec, CV_32F);
896 taux.convertTo(Tvec, CV_32F);
897
898 //rotate the X axis so that Y is perpendicular to the marker plane
899 if (setYPerperdicular)
900 rotateXAxis(Rvec);
901}
902
903void ArFeaturedMarker::Update(cv::Mat frame, cv::Mat color, aruco::CameraParameters& camparam, bool reverse)
904{
905 boost::mutex::scoped_lock l(killMutex);
906 m_bUpdating = true;
907 //bool found = (m_visible && m_fmarker->detectMotionFlow(m_lastFrame)) ? true : m_fmarker->detectFeatured(m_lastFrame);
908 bool found = detectFeatured(frame, color, camparam.CamSize);
909 if (found)
910 {
911 m_bUpdating = false;
912
913 //Get Matrix
914 double modelview_matrix[16];
915
916 try
917 {
918 //recalculate Rvec and Tvec with marker size
920 glGetModelViewMatrix(modelview_matrix);
921 }
922 catch (std::exception&)
923 {
924 return;
925 }
926
927 //determine the centroid
928 Vector3 pixelPosition(0.0, 0.0, 0.0);
929 for (int j = 0; j<4; j++)
930 {
931 pixelPosition.x += (*this).at(j).x;
932 pixelPosition.y += (*this).at(j).y;
933 }
934
935 pixelPosition.z = sqrt(pow(((*this).at(1).x - (*this).at(0).x), 2) + pow(((*this).at(1).y - (*this).at(0).y), 2));
936 pixelPosition.z += sqrt(pow(((*this).at(2).x - (*this).at(1).x), 2) + pow(((*this).at(2).y - (*this).at(1).y), 2));
937 pixelPosition.z += sqrt(pow(((*this).at(3).x - (*this).at(2).x), 2) + pow(((*this).at(3).y - (*this).at(2).y), 2));
938 pixelPosition.z += sqrt(pow(((*this).at(0).x - (*this).at(3).x), 2) + pow(((*this).at(0).y - (*this).at(3).y), 2));
939
940 pixelPosition.x /= 4.;
941 pixelPosition.y /= 4.;
942 pixelPosition.z /= 4.;
943
944 //cv::circle(image, Point(pixelPosition.x, pixelPosition.y),pixelPosition.z/2,cv::Scalar(255,0,255));
945
946 SetPixelPosition(pixelPosition);
947
948 SetPosition(Vector3(static_cast<float>(reverse ? -modelview_matrix[12] : modelview_matrix[12]), static_cast<float>(modelview_matrix[13]), static_cast<float>(modelview_matrix[14])));
949 SetOrientation(BtQuaternion::FromRotationMatrix(modelview_matrix, reverse));
950 SetVisible(true);
951 }
952 else
953 {
954 SetVisible(false);
955 }
956
957 m_needUpdate = true;
958 m_bUpdating = false;
959}
boost::mutex killMutex
Definition ArMarker.h:49
void rotateXAxis(cv::Mat &rotation)
Definition ArMarker.cpp:308
void SetPixelPosition(Vector3 pixelpos)
Definition ArMarker.cpp:62
void SetPosition(Vector3 pos)
Definition ArMarker.cpp:57
void SetOrientation(BtQuaternion orientation)
Definition ArMarker.cpp:67
bool m_needUpdate
Definition ArMarker.h:55
float m_size
Definition ArMarker.h:56
bool m_bUpdating
Definition ArMarker.h:57
std::vector< cv::Point2f > GetCorners()
Definition ArMarker.cpp:325
void SetVisible(bool visible)
Definition ArMarker.cpp:100
static BtQuaternion FromRotationMatrix(double rotMatrix[16], bool reverseX=false, bool reverseY=true)
std::vector< cv::Point2f > GetFilteredPoints()
void Update(std::vector< cv::Point2f > points)
void Init(float fSmoothing=0.25f, float fCorrection=0.25f, float fPrediction=0.25f, float fJitterRadius=0.03f, float fMaxDeviationRadius=0.05f)
void setDescriptorMatcher(const cv::Ptr< cv::DescriptorMatcher > &match)
void setFeatureDetector(const cv::Ptr< cv::FeatureDetector > &detect)
bool addTrainData(const cv::Mat &frame, std::vector< cv::KeyPoint > &marker_keypoints)
void fastRobustMatch(const cv::Mat &frame, std::vector< cv::DMatch > &good_matches, std::vector< cv::KeyPoint > &keypoints_frame, const cv::Mat &mask)
int ratioTest(std::vector< std::vector< cv::DMatch > > &matches)
void setTrainFeatureDetector(const cv::Ptr< cv::FeatureDetector > &detect)
void robustMatch(const cv::Mat &frame, std::vector< cv::DMatch > &good_matches, std::vector< cv::KeyPoint > &keypoints_frame, const cv::Mat &descriptors_model, const cv::Mat &mask)
void symmetryTest(const std::vector< std::vector< cv::DMatch > > &matches1, const std::vector< std::vector< cv::DMatch > > &matches2, std::vector< cv::DMatch > &symMatches)
virtual ~RobustMatcher()