Visual Servoing Platform version 3.6.0
Loading...
Searching...
No Matches
tutorial-mb-generic-tracker-rgbd-realsense.cpp
1
2#include <iostream>
3
4#include <visp3/core/vpConfig.h>
5
6#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_OPENCV)
7#include <visp3/core/vpDisplay.h>
8#include <visp3/core/vpIoTools.h>
9#include <visp3/core/vpXmlParserCamera.h>
10#include <visp3/gui/vpDisplayGDI.h>
11#include <visp3/gui/vpDisplayOpenCV.h>
12#include <visp3/gui/vpDisplayX.h>
13#include <visp3/mbt/vpMbGenericTracker.h>
14#include <visp3/sensor/vpRealSense2.h>
15#include <visp3/vision/vpKeyPoint.h>
16
17int main(int argc, char *argv[])
18{
19 std::string config_color = "", config_depth = "";
20 std::string model_color = "", model_depth = "";
21 std::string init_file = "";
22 bool use_ogre = false;
23 bool use_scanline = false;
24 bool use_edges = true;
25 bool use_klt = true;
26 bool use_depth = true;
27 bool learn = false;
28 bool auto_init = false;
29 double proj_error_threshold = 25;
30 std::string learning_data = "learning/data-learned.bin";
31 bool display_projection_error = false;
32
33 for (int i = 1; i < argc; i++) {
34 if (std::string(argv[i]) == "--config_color" && i + 1 < argc) {
35 config_color = std::string(argv[i + 1]);
36 } else if (std::string(argv[i]) == "--config_depth" && i + 1 < argc) {
37 config_depth = std::string(argv[i + 1]);
38 } else if (std::string(argv[i]) == "--model_color" && i + 1 < argc) {
39 model_color = std::string(argv[i + 1]);
40 } else if (std::string(argv[i]) == "--model_depth" && i + 1 < argc) {
41 model_depth = std::string(argv[i + 1]);
42 } else if (std::string(argv[i]) == "--init_file" && i + 1 < argc) {
43 init_file = std::string(argv[i + 1]);
44 } else if (std::string(argv[i]) == "--proj_error_threshold" && i + 1 < argc) {
45 proj_error_threshold = std::atof(argv[i + 1]);
46 } else if (std::string(argv[i]) == "--use_ogre") {
47 use_ogre = true;
48 } else if (std::string(argv[i]) == "--use_scanline") {
49 use_scanline = true;
50 } else if (std::string(argv[i]) == "--use_edges" && i + 1 < argc) {
51 use_edges = (std::atoi(argv[i + 1]) == 0 ? false : true);
52 } else if (std::string(argv[i]) == "--use_klt" && i + 1 < argc) {
53 use_klt = (std::atoi(argv[i + 1]) == 0 ? false : true);
54 } else if (std::string(argv[i]) == "--use_depth" && i + 1 < argc) {
55 use_depth = (std::atoi(argv[i + 1]) == 0 ? false : true);
56 } else if (std::string(argv[i]) == "--learn") {
57 learn = true;
58 } else if (std::string(argv[i]) == "--learning_data" && i + 1 < argc) {
59 learning_data = argv[i + 1];
60 } else if (std::string(argv[i]) == "--auto_init") {
61 auto_init = true;
62 } else if (std::string(argv[i]) == "--display_proj_error") {
63 display_projection_error = true;
64 } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
65 std::cout << "Usage: \n"
66 << argv[0]
67 << " [--model_color <object.cao>] [--model_depth <object.cao>]"
68 " [--config_color <object.xml>] [--config_depth <object.xml>]"
69 " [--init_file <object.init>] [--use_ogre] [--use_scanline]"
70 " [--proj_error_threshold <threshold between 0 and 90> (default: "
71 << proj_error_threshold
72 << ")]"
73 " [--use_edges <0|1> (default: 1)] [--use_klt <0|1> (default: 1)] [--use_depth <0|1> (default: 1)]"
74 " [--learn] [--auto_init] [--learning_data <path to .bin> (default: learning/data-learned.bin)]"
75 " [--display_proj_error]"
76 << std::endl;
77
78 std::cout << "\n** How to track a 4.2 cm width cube with manual initialization:\n"
79 << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1" << std::endl;
80 std::cout << "\n** How to learn the cube and create a learning database:\n"
81 << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --learn"
82 << std::endl;
83 std::cout << "\n** How to track the cube with initialization from learning database:\n"
84 << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --auto_init"
85 << std::endl;
86
87 return EXIT_SUCCESS;
88 }
89 }
90
91 if (model_depth.empty()) {
92 model_depth = model_color;
93 }
94 std::string parentname = vpIoTools::getParent(model_color);
95 if (config_color.empty()) {
96 config_color = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".xml";
97 }
98 if (config_depth.empty()) {
99 config_depth = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + "_depth.xml";
100 }
101 if (init_file.empty()) {
102 init_file = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".init";
103 }
104 std::cout << "Tracked features: " << std::endl;
105 std::cout << " Use edges : " << use_edges << std::endl;
106 std::cout << " Use klt : " << use_klt << std::endl;
107 std::cout << " Use depth : " << use_depth << std::endl;
108 std::cout << "Tracker options: " << std::endl;
109 std::cout << " Use ogre : " << use_ogre << std::endl;
110 std::cout << " Use scanline: " << use_scanline << std::endl;
111 std::cout << " Proj. error : " << proj_error_threshold << std::endl;
112 std::cout << " Display proj. error: " << display_projection_error << std::endl;
113 std::cout << "Config files: " << std::endl;
114 std::cout << " Config color: "
115 << "\"" << config_color << "\"" << std::endl;
116 std::cout << " Config depth: "
117 << "\"" << config_depth << "\"" << std::endl;
118 std::cout << " Model color : "
119 << "\"" << model_color << "\"" << std::endl;
120 std::cout << " Model depth : "
121 << "\"" << model_depth << "\"" << std::endl;
122 std::cout << " Init file : "
123 << "\"" << init_file << "\"" << std::endl;
124 std::cout << "Learning options : " << std::endl;
125 std::cout << " Learn : " << learn << std::endl;
126 std::cout << " Auto init : " << auto_init << std::endl;
127 std::cout << " Learning data: " << learning_data << std::endl;
128
129 if (!use_edges && !use_klt && !use_depth) {
130 std::cout << "You must choose at least one visual features between edge, KLT and depth." << std::endl;
131 return EXIT_FAILURE;
132 }
133
134 if (config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || init_file.empty()) {
135 std::cout << "config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || "
136 "init_file.empty()"
137 << std::endl;
138 return EXIT_FAILURE;
139 }
140
141 vpRealSense2 realsense;
142 int width = 640, height = 480;
143 int fps = 30;
144 rs2::config config;
145 config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps);
146 config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps);
147
148 try {
149 realsense.open(config);
150 } catch (const vpException &e) {
151 std::cout << "Catch an exception: " << e.what() << std::endl;
152 std::cout << "Check if the Realsense camera is connected..." << std::endl;
153 return EXIT_SUCCESS;
154 }
155
156 vpCameraParameters cam_color =
158 vpCameraParameters cam_depth =
160
161 std::cout << "Sensor internal camera parameters for color camera: " << cam_color << std::endl;
162 std::cout << "Sensor internal camera parameters for depth camera: " << cam_depth << std::endl;
163
164 vpImage<vpRGBa> I_color(height, width);
165 vpImage<unsigned char> I_gray(height, width);
166 vpImage<unsigned char> I_depth(height, width);
167 vpImage<uint16_t> I_depth_raw(height, width);
168
169 unsigned int _posx = 100, _posy = 50;
170
171#ifdef VISP_HAVE_X11
172 vpDisplayX d1, d2;
173#elif defined(VISP_HAVE_GDI)
174 vpDisplayGDI d1, d2;
175#elif defined(HAVE_OPENCV_HIGHGUI)
176 vpDisplayOpenCV d1, d2;
177#endif
178 if (use_edges || use_klt)
179 d1.init(I_gray, _posx, _posy, "Color stream");
180 if (use_depth)
181 d2.init(I_depth, _posx + I_gray.getWidth() + 10, _posy, "Depth stream");
182
183 while (true) {
184 realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, NULL, NULL);
185
186 if (use_edges || use_klt) {
187 vpImageConvert::convert(I_color, I_gray);
188 vpDisplay::display(I_gray);
189 vpDisplay::displayText(I_gray, 20, 20, "Click when ready.", vpColor::red);
190 vpDisplay::flush(I_gray);
191
192 if (vpDisplay::getClick(I_gray, false)) {
193 break;
194 }
195 }
196 if (use_depth) {
197 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
198
199 vpDisplay::display(I_depth);
200 vpDisplay::displayText(I_depth, 20, 20, "Click when ready.", vpColor::red);
201 vpDisplay::flush(I_depth);
202
203 if (vpDisplay::getClick(I_depth, false)) {
204 break;
205 }
206 }
207 }
208
209 std::vector<int> trackerTypes;
210 if (use_edges && use_klt)
212 else if (use_edges)
213 trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER);
214 else if (use_klt)
215 trackerTypes.push_back(vpMbGenericTracker::KLT_TRACKER);
216
217 if (use_depth)
218 trackerTypes.push_back(vpMbGenericTracker::DEPTH_DENSE_TRACKER);
219
220 vpHomogeneousMatrix depth_M_color = realsense.getTransformation(RS2_STREAM_COLOR, RS2_STREAM_DEPTH);
221 std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
222 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
223 std::map<std::string, std::string> mapOfInitFiles;
224 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
225 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
226 std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
227
228 std::vector<vpColVector> pointcloud;
229
230 vpMbGenericTracker tracker(trackerTypes);
231
232 if ((use_edges || use_klt) && use_depth) {
233 tracker.loadConfigFile(config_color, config_depth);
234 tracker.loadModel(model_color, model_depth);
235 std::cout << "Sensor internal depth_M_color: \n" << depth_M_color << std::endl;
236 mapOfCameraTransformations["Camera2"] = depth_M_color;
237 tracker.setCameraTransformationMatrix(mapOfCameraTransformations);
238 mapOfImages["Camera1"] = &I_gray;
239 mapOfImages["Camera2"] = &I_depth;
240 mapOfInitFiles["Camera1"] = init_file;
241 tracker.setCameraParameters(cam_color, cam_depth);
242 } else if (use_edges || use_klt) {
243 tracker.loadConfigFile(config_color);
244 tracker.loadModel(model_color);
245 tracker.setCameraParameters(cam_color);
246 } else if (use_depth) {
247 tracker.loadConfigFile(config_depth);
248 tracker.loadModel(model_depth);
249 tracker.setCameraParameters(cam_depth);
250 }
251
252 tracker.setDisplayFeatures(true);
253 tracker.setOgreVisibilityTest(use_ogre);
254 tracker.setScanLineVisibilityTest(use_scanline);
255 tracker.setProjectionErrorComputation(true);
256 tracker.setProjectionErrorDisplay(display_projection_error);
257
258#if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)) || \
259 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
260 std::string detectorName = "SIFT";
261 std::string extractorName = "SIFT";
262 std::string matcherName = "BruteForce";
263#else
264 std::string detectorName = "FAST";
265 std::string extractorName = "ORB";
266 std::string matcherName = "BruteForce-Hamming";
267#endif
268 vpKeyPoint keypoint;
269 if (learn || auto_init) {
270 keypoint.setDetector(detectorName);
271 keypoint.setExtractor(extractorName);
272 keypoint.setMatcher(matcherName);
273#if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
274#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
275 keypoint.setDetectorParameter("ORB", "nLevels", 1);
276#else
277 cv::Ptr<cv::ORB> orb_detector = keypoint.getDetector("ORB").dynamicCast<cv::ORB>();
278 if (orb_detector) {
279 orb_detector->setNLevels(1);
280 }
281#endif
282#endif
283 }
284
285 if (auto_init) {
286 if (!vpIoTools::checkFilename(learning_data)) {
287 std::cout << "Cannot enable auto detection. Learning file \"" << learning_data << "\" doesn't exist" << std::endl;
288 return EXIT_FAILURE;
289 }
290 keypoint.loadLearningData(learning_data, true);
291 } else {
292 if ((use_edges || use_klt) && use_depth)
293 tracker.initClick(mapOfImages, mapOfInitFiles, true);
294 else if (use_edges || use_klt)
295 tracker.initClick(I_gray, init_file, true);
296 else if (use_depth)
297 tracker.initClick(I_depth, init_file, true);
298
299 if (learn)
301 }
302
303 bool run_auto_init = false;
304 if (auto_init) {
305 run_auto_init = true;
306 }
307 std::vector<double> times_vec;
308
309 try {
310 // To be able to display keypoints matching with test-detection-rs2
311 int learn_id = 1;
312 bool quit = false;
313 bool learn_position = false;
314 double loop_t = 0;
316
317 while (!quit) {
318 double t = vpTime::measureTimeMs();
319 bool tracking_failed = false;
320
321 // Acquire images and update tracker input data
322 realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointcloud, NULL, NULL);
323
324 if (use_edges || use_klt || run_auto_init) {
325 vpImageConvert::convert(I_color, I_gray);
326 vpDisplay::display(I_gray);
327 }
328 if (use_depth) {
329 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
330 vpDisplay::display(I_depth);
331 }
332
333 if ((use_edges || use_klt) && use_depth) {
334 mapOfImages["Camera1"] = &I_gray;
335 mapOfPointclouds["Camera2"] = &pointcloud;
336 mapOfWidths["Camera2"] = width;
337 mapOfHeights["Camera2"] = height;
338 } else if (use_edges || use_klt) {
339 mapOfImages["Camera"] = &I_gray;
340 } else if (use_depth) {
341 mapOfPointclouds["Camera"] = &pointcloud;
342 mapOfWidths["Camera"] = width;
343 mapOfHeights["Camera"] = height;
344 }
345
346 // Run auto initialization from learned data
347 if (run_auto_init) {
348 if (keypoint.matchPoint(I_gray, cam_color, cMo)) {
349 std::cout << "Auto init succeed" << std::endl;
350 if ((use_edges || use_klt) && use_depth) {
351 mapOfCameraPoses["Camera1"] = cMo;
352 mapOfCameraPoses["Camera2"] = depth_M_color * cMo;
353 tracker.initFromPose(mapOfImages, mapOfCameraPoses);
354 } else if (use_edges || use_klt) {
355 tracker.initFromPose(I_gray, cMo);
356 } else if (use_depth) {
357 tracker.initFromPose(I_depth, depth_M_color * cMo);
358 }
359 } else {
360 if (use_edges || use_klt) {
361 vpDisplay::flush(I_gray);
362 }
363 if (use_depth) {
364 vpDisplay::flush(I_depth);
365 }
366 continue;
367 }
368 }
369
370 // Run the tracker
371 try {
372 if (run_auto_init) {
373 // Turn display features off just after auto init to not display wrong moving-edge if the tracker fails
374 tracker.setDisplayFeatures(false);
375
376 run_auto_init = false;
377 }
378 if ((use_edges || use_klt) && use_depth) {
379 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
380 } else if (use_edges || use_klt) {
381 tracker.track(I_gray);
382 } else if (use_depth) {
383 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
384 }
385 } catch (const vpException &e) {
386 std::cout << "Tracker exception: " << e.getStringMessage() << std::endl;
387 tracking_failed = true;
388 if (auto_init) {
389 std::cout << "Tracker needs to restart (tracking exception)" << std::endl;
390 run_auto_init = true;
391 }
392 }
393
394 // Get object pose
395 cMo = tracker.getPose();
396
397 // Check tracking errors
398 double proj_error = 0;
399 if (tracker.getTrackerType() & vpMbGenericTracker::EDGE_TRACKER) {
400 // Check tracking errors
401 proj_error = tracker.getProjectionError();
402 } else {
403 proj_error = tracker.computeCurrentProjectionError(I_gray, cMo, cam_color);
404 }
405
406 if (auto_init && proj_error > proj_error_threshold) {
407 std::cout << "Tracker needs to restart (projection error detected: " << proj_error << ")" << std::endl;
408 run_auto_init = true;
409 tracking_failed = true;
410 }
411
412 // Display tracking results
413 if (!tracking_failed) {
414 // Turn display features on
415 tracker.setDisplayFeatures(true);
416
417 if ((use_edges || use_klt) && use_depth) {
418 tracker.display(I_gray, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth, vpColor::red, 3);
419 vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
420 vpDisplay::displayFrame(I_depth, depth_M_color * cMo, cam_depth, 0.05, vpColor::none, 3);
421 } else if (use_edges || use_klt) {
422 tracker.display(I_gray, cMo, cam_color, vpColor::red, 3);
423 vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
424 } else if (use_depth) {
425 tracker.display(I_depth, cMo, cam_depth, vpColor::red, 3);
426 vpDisplay::displayFrame(I_depth, cMo, cam_depth, 0.05, vpColor::none, 3);
427 }
428
429 {
430 std::stringstream ss;
431 ss << "Nb features: " << tracker.getError().size();
432 vpDisplay::displayText(I_gray, I_gray.getHeight() - 50, 20, ss.str(), vpColor::red);
433 }
434 {
435 std::stringstream ss;
436 ss << "Features: edges " << tracker.getNbFeaturesEdge() << ", klt " << tracker.getNbFeaturesKlt()
437 << ", depth " << tracker.getNbFeaturesDepthDense();
438 vpDisplay::displayText(I_gray, I_gray.getHeight() - 30, 20, ss.str(), vpColor::red);
439 }
440 }
441
442 std::stringstream ss;
443 ss << "Loop time: " << loop_t << " ms";
444
446 if (use_edges || use_klt) {
447 vpDisplay::displayText(I_gray, 20, 20, ss.str(), vpColor::red);
448 if (learn)
449 vpDisplay::displayText(I_gray, 35, 20, "Left click: learn Right click: quit", vpColor::red);
450 else if (auto_init)
451 vpDisplay::displayText(I_gray, 35, 20, "Left click: auto_init Right click: quit", vpColor::red);
452 else
453 vpDisplay::displayText(I_gray, 35, 20, "Right click: quit", vpColor::red);
454
455 vpDisplay::flush(I_gray);
456
457 if (vpDisplay::getClick(I_gray, button, false)) {
458 if (button == vpMouseButton::button3) {
459 quit = true;
460 } else if (button == vpMouseButton::button1 && learn) {
461 learn_position = true;
462 } else if (button == vpMouseButton::button1 && auto_init && !learn) {
463 run_auto_init = true;
464 }
465 }
466 }
467 if (use_depth) {
468 vpDisplay::displayText(I_depth, 20, 20, ss.str(), vpColor::red);
469 vpDisplay::displayText(I_depth, 40, 20, "Click to quit", vpColor::red);
470 vpDisplay::flush(I_depth);
471
472 if (vpDisplay::getClick(I_depth, false)) {
473 quit = true;
474 }
475 }
476
477 if (learn_position) {
478 // Detect keypoints on the current image
479 std::vector<cv::KeyPoint> trainKeyPoints;
480 keypoint.detect(I_gray, trainKeyPoints);
481
482 // Keep only keypoints on the cube
483 std::vector<vpPolygon> polygons;
484 std::vector<std::vector<vpPoint> > roisPt;
485 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
486 polygons = pair.first;
487 roisPt = pair.second;
488
489 // Compute the 3D coordinates
490 std::vector<cv::Point3f> points3f;
491 vpKeyPoint::compute3DForPointsInPolygons(cMo, cam_color, trainKeyPoints, polygons, roisPt, points3f);
492
493 // Build the reference keypoints
494 keypoint.buildReference(I_gray, trainKeyPoints, points3f, true, learn_id++);
495
496 // Display learned data
497 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
498 vpDisplay::displayCross(I_gray, (int)it->pt.y, (int)it->pt.x, 10, vpColor::yellow, 3);
499 }
500 learn_position = false;
501 std::cout << "Data learned" << std::endl;
502 }
503 loop_t = vpTime::measureTimeMs() - t;
504 times_vec.push_back(loop_t);
505 }
506 if (learn) {
507 std::cout << "Save learning file: " << learning_data << std::endl;
508 keypoint.saveLearningData(learning_data, true, true);
509 }
510 } catch (const vpException &e) {
511 std::cout << "Catch an exception: " << e.what() << std::endl;
512 }
513
514 if (!times_vec.empty()) {
515 std::cout << "\nProcessing time, Mean: " << vpMath::getMean(times_vec)
516 << " ms ; Median: " << vpMath::getMedian(times_vec) << " ; Std: " << vpMath::getStdev(times_vec) << " ms"
517 << std::endl;
518 }
519
520 return EXIT_SUCCESS;
521}
522#elif defined(VISP_HAVE_REALSENSE2)
523int main()
524{
525 std::cout << "Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
526 return EXIT_SUCCESS;
527}
528#else
529int main()
530{
531 std::cout << "Install librealsense2 3rd party, configure and build ViSP again to use this example" << std::endl;
532 return EXIT_SUCCESS;
533}
534#endif
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor red
Definition vpColor.h:211
static const vpColor none
Definition vpColor.h:223
static const vpColor yellow
Definition vpColor.h:219
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition vpDisplayX.h:132
void init(vpImage< unsigned char > &I, int win_x=-1, int win_y=-1, const std::string &win_title="")
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition vpException.h:59
const std::string & getStringMessage() const
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
Definition vpImage.h:135
static bool checkFilename(const std::string &filename)
static void makeDirectory(const std::string &dirname)
static std::string getNameWE(const std::string &pathname)
static std::string getParent(const std::string &pathname)
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition vpKeyPoint.h:212
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition vpKeyPoint.h:823
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void setMatcher(const std::string &matcherName)
Definition vpKeyPoint.h:899
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
Definition vpKeyPoint.h:765
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition vpKeyPoint.h:480
unsigned int buildReference(const vpImage< unsigned char > &I)
static double getMedian(const std::vector< double > &v)
Definition vpMath.cpp:314
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition vpMath.cpp:345
static double getMean(const std::vector< double > &v)
Definition vpMath.cpp:294
Real-time 6D object pose tracking using its CAD model.
void acquire(vpImage< unsigned char > &grey, double *ts=NULL)
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
bool open(const rs2::config &cfg=rs2::config())
vpHomogeneousMatrix getTransformation(const rs2_stream &from, const rs2_stream &to, int from_index=-1) const
VISP_EXPORT double measureTimeMs()