Visual Servoing Platform version 3.6.0
Loading...
Searching...
No Matches
tutorial-mb-generic-tracker-rgbd-structure-core.cpp
#include <iostream>
#include <visp3/core/vpConfig.h>
#if defined(VISP_HAVE_OCCIPITAL_STRUCTURE) && defined(VISP_HAVE_OPENCV)
#include <visp3/core/vpDisplay.h>
#include <visp3/core/vpIoTools.h>
#include <visp3/core/vpXmlParserCamera.h>
#include <visp3/gui/vpDisplayGDI.h>
#include <visp3/gui/vpDisplayOpenCV.h>
#include <visp3/gui/vpDisplayX.h>
#include <visp3/mbt/vpMbGenericTracker.h>
#include <visp3/sensor/vpOccipitalStructure.h>
#include <visp3/vision/vpKeyPoint.h>
int main(int argc, char *argv[])
{
std::string config_color = "", config_depth = "";
std::string model_color = "", model_depth = "";
std::string init_file = "";
bool use_ogre = false;
bool use_scanline = false;
bool use_edges = true;
bool use_klt = true;
bool use_depth = true;
bool learn = false;
bool auto_init = false;
double proj_error_threshold = 25;
std::string learning_data = "learning/data-learned.bin";
bool display_projection_error = false;
for (int i = 1; i < argc; i++) {
if (std::string(argv[i]) == "--config_color" && i + 1 < argc) {
config_color = std::string(argv[i + 1]);
} else if (std::string(argv[i]) == "--config_depth" && i + 1 < argc) {
config_depth = std::string(argv[i + 1]);
} else if (std::string(argv[i]) == "--model_color" && i + 1 < argc) {
model_color = std::string(argv[i + 1]);
} else if (std::string(argv[i]) == "--model_depth" && i + 1 < argc) {
model_depth = std::string(argv[i + 1]);
} else if (std::string(argv[i]) == "--init_file" && i + 1 < argc) {
init_file = std::string(argv[i + 1]);
} else if (std::string(argv[i]) == "--proj_error_threshold" && i + 1 < argc) {
proj_error_threshold = std::atof(argv[i + 1]);
} else if (std::string(argv[i]) == "--use_ogre") {
use_ogre = true;
} else if (std::string(argv[i]) == "--use_scanline") {
use_scanline = true;
} else if (std::string(argv[i]) == "--use_edges" && i + 1 < argc) {
use_edges = (std::atoi(argv[i + 1]) == 0 ? false : true);
} else if (std::string(argv[i]) == "--use_klt" && i + 1 < argc) {
use_klt = (std::atoi(argv[i + 1]) == 0 ? false : true);
} else if (std::string(argv[i]) == "--use_depth" && i + 1 < argc) {
use_depth = (std::atoi(argv[i + 1]) == 0 ? false : true);
} else if (std::string(argv[i]) == "--learn") {
learn = true;
} else if (std::string(argv[i]) == "--learning_data" && i + 1 < argc) {
learning_data = argv[i + 1];
} else if (std::string(argv[i]) == "--auto_init") {
auto_init = true;
} else if (std::string(argv[i]) == "--display_proj_error") {
display_projection_error = true;
} else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
std::cout << "Usage: \n"
<< argv[0]
<< " [--model_color <object.cao>] [--model_depth <object.cao>]"
" [--config_color <object.xml>] [--config_depth <object.xml>]"
" [--init_file <object.init>] [--use_ogre] [--use_scanline]"
" [--proj_error_threshold <threshold between 0 and 90> (default: "
<< proj_error_threshold
<< ")]"
" [--use_edges <0|1> (default: 1)] [--use_klt <0|1> (default: 1)] [--use_depth <0|1> (default: 1)]"
" [--learn] [--auto_init] [--learning_data <path to .bin> (default: learning/data-learned.bin)]"
" [--display_proj_error]"
<< std::endl;
std::cout << "\n** How to track a 4.2 cm width cube with manual initialization:\n"
<< argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1" << std::endl;
std::cout << "\n** How to learn the cube and create a learning database:\n"
<< argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --learn"
<< std::endl;
std::cout << "\n** How to track the cube with initialization from learning database:\n"
<< argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --auto_init"
<< std::endl;
return EXIT_SUCCESS;
}
}
if (model_depth.empty()) {
model_depth = model_color;
}
std::string parentname = vpIoTools::getParent(model_color);
if (config_color.empty()) {
config_color = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".xml";
}
if (config_depth.empty()) {
config_depth = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + "_depth.xml";
}
if (init_file.empty()) {
init_file = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".init";
}
std::cout << "Tracked features: " << std::endl;
std::cout << " Use edges : " << use_edges << std::endl;
std::cout << " Use klt : " << use_klt << std::endl;
std::cout << " Use depth : " << use_depth << std::endl;
std::cout << "Tracker options: " << std::endl;
std::cout << " Use ogre : " << use_ogre << std::endl;
std::cout << " Use scanline: " << use_scanline << std::endl;
std::cout << " Proj. error : " << proj_error_threshold << std::endl;
std::cout << " Display proj. error: " << display_projection_error << std::endl;
std::cout << "Config files: " << std::endl;
std::cout << " Config color: "
<< "\"" << config_color << "\"" << std::endl;
std::cout << " Config depth: "
<< "\"" << config_depth << "\"" << std::endl;
std::cout << " Model color : "
<< "\"" << model_color << "\"" << std::endl;
std::cout << " Model depth : "
<< "\"" << model_depth << "\"" << std::endl;
std::cout << " Init file : "
<< "\"" << init_file << "\"" << std::endl;
std::cout << "Learning options : " << std::endl;
std::cout << " Learn : " << learn << std::endl;
std::cout << " Auto init : " << auto_init << std::endl;
std::cout << " Learning data: " << learning_data << std::endl;
if (!use_edges && !use_klt && !use_depth) {
std::cout << "You must choose at least one visual features between edge, KLT and depth." << std::endl;
return EXIT_FAILURE;
}
if (config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || init_file.empty()) {
std::cout << "config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || "
"init_file.empty()"
<< std::endl;
return EXIT_FAILURE;
}
ST::CaptureSessionSettings settings;
settings.source = ST::CaptureSessionSourceId::StructureCore;
settings.structureCore.visibleEnabled = true;
settings.applyExpensiveCorrection = true; // Apply a correction and clean filter to the depth before streaming.
try {
sc.open(settings);
} catch (const vpException &e) {
std::cout << "Catch an exception: " << e.what() << std::endl;
std::cout << "Check if the Structure Core camera is connected..." << std::endl;
return EXIT_SUCCESS;
}
std::cout << "Sensor internal camera parameters for color camera: " << cam_color << std::endl;
std::cout << "Sensor internal camera parameters for depth camera: " << cam_depth << std::endl;
vpImage<vpRGBa> I_color(height, width);
vpImage<unsigned char> I_gray(height, width), I_depth(height, width);
vpImage<float> I_depth_raw(height, width);
unsigned int _posx = 100, _posy = 50;
#ifdef VISP_HAVE_X11
vpDisplayX d1, d2;
#elif defined(VISP_HAVE_GDI)
vpDisplayGDI d1, d2;
#elif defined(HAVE_OPENCV_HIGHGUI)
#endif
if (use_edges || use_klt)
d1.init(I_gray, _posx, _posy, "Color stream");
if (use_depth)
d2.init(I_depth, _posx + I_gray.getWidth() + 10, _posy, "Depth stream");
while (true) {
sc.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, NULL, NULL, NULL);
if (use_edges || use_klt) {
vpImageConvert::convert(I_color, I_gray);
vpDisplay::displayText(I_gray, 20, 20, "Click when ready.", vpColor::red);
if (vpDisplay::getClick(I_gray, false)) {
break;
}
}
if (use_depth) {
vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
vpDisplay::displayText(I_depth, 20, 20, "Click when ready.", vpColor::red);
vpDisplay::flush(I_depth);
if (vpDisplay::getClick(I_depth, false)) {
break;
}
}
}
std::vector<int> trackerTypes;
if (use_edges && use_klt)
else if (use_edges)
trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER);
else if (use_klt)
trackerTypes.push_back(vpMbGenericTracker::KLT_TRACKER);
if (use_depth)
trackerTypes.push_back(vpMbGenericTracker::DEPTH_DENSE_TRACKER);
vpHomogeneousMatrix depth_M_color = color_M_depth.inverse();
std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
std::map<std::string, std::string> mapOfInitFiles;
std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
std::vector<vpColVector> pointcloud;
vpMbGenericTracker tracker(trackerTypes);
if ((use_edges || use_klt) && use_depth) {
tracker.loadConfigFile(config_color, config_depth);
tracker.loadModel(model_color, model_depth);
std::cout << "Sensor internal depth_M_color: \n" << depth_M_color << std::endl;
mapOfCameraTransformations["Camera2"] = depth_M_color;
tracker.setCameraTransformationMatrix(mapOfCameraTransformations);
mapOfImages["Camera1"] = &I_gray;
mapOfImages["Camera2"] = &I_depth;
mapOfInitFiles["Camera1"] = init_file;
tracker.setCameraParameters(cam_color, cam_depth);
} else if (use_edges || use_klt) {
tracker.loadConfigFile(config_color);
tracker.loadModel(model_color);
tracker.setCameraParameters(cam_color);
} else if (use_depth) {
tracker.loadConfigFile(config_depth);
tracker.loadModel(model_depth);
tracker.setCameraParameters(cam_depth);
}
tracker.setDisplayFeatures(true);
tracker.setOgreVisibilityTest(use_ogre);
tracker.setScanLineVisibilityTest(use_scanline);
tracker.setProjectionErrorDisplay(display_projection_error);
#if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)) || \
(VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
std::string detectorName = "SIFT";
std::string extractorName = "SIFT";
std::string matcherName = "BruteForce";
#else
std::string detectorName = "FAST";
std::string extractorName = "ORB";
std::string matcherName = "BruteForce-Hamming";
#endif
vpKeyPoint keypoint;
if (learn || auto_init) {
keypoint.setDetector(detectorName);
keypoint.setExtractor(extractorName);
keypoint.setMatcher(matcherName);
#if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
keypoint.setDetectorParameter("ORB", "nLevels", 1);
#else
cv::Ptr<cv::ORB> orb_detector = keypoint.getDetector("ORB").dynamicCast<cv::ORB>();
if (orb_detector) {
orb_detector->setNLevels(1);
}
#endif
#endif
}
if (auto_init) {
if (!vpIoTools::checkFilename(learning_data)) {
std::cout << "Cannot enable auto detection. Learning file \"" << learning_data << "\" doesn't exist" << std::endl;
return EXIT_FAILURE;
}
keypoint.loadLearningData(learning_data, true);
} else {
if ((use_edges || use_klt) && use_depth)
tracker.initClick(mapOfImages, mapOfInitFiles, true);
else if (use_edges || use_klt)
tracker.initClick(I_gray, init_file, true);
else if (use_depth)
tracker.initClick(I_depth, init_file, true);
if (learn)
}
bool run_auto_init = false;
if (auto_init) {
run_auto_init = true;
}
std::vector<double> times_vec;
try {
// To be able to display keypoints matching with test-detection-rs2
int learn_id = 1;
bool quit = false;
bool learn_position = false;
double loop_t = 0;
while (!quit) {
double t = vpTime::measureTimeMs();
bool tracking_failed = false;
// Acquire images and update tracker input data
sc.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointcloud);
if (use_edges || use_klt || run_auto_init) {
vpImageConvert::convert(I_color, I_gray);
}
if (use_depth) {
vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
}
if ((use_edges || use_klt) && use_depth) {
mapOfImages["Camera1"] = &I_gray;
mapOfPointclouds["Camera2"] = &pointcloud;
mapOfWidths["Camera2"] = width;
mapOfHeights["Camera2"] = height;
} else if (use_edges || use_klt) {
mapOfImages["Camera"] = &I_gray;
} else if (use_depth) {
mapOfPointclouds["Camera"] = &pointcloud;
mapOfWidths["Camera"] = width;
mapOfHeights["Camera"] = height;
}
// Run auto initialization from learned data
if (run_auto_init) {
if (keypoint.matchPoint(I_gray, cam_color, cMo)) {
std::cout << "Auto init succeed" << std::endl;
if ((use_edges || use_klt) && use_depth) {
mapOfCameraPoses["Camera1"] = cMo;
mapOfCameraPoses["Camera2"] = depth_M_color * cMo;
tracker.initFromPose(mapOfImages, mapOfCameraPoses);
} else if (use_edges || use_klt) {
tracker.initFromPose(I_gray, cMo);
} else if (use_depth) {
tracker.initFromPose(I_depth, depth_M_color * cMo);
}
} else {
if (use_edges || use_klt) {
}
if (use_depth) {
vpDisplay::flush(I_depth);
}
continue;
}
}
// Run the tracker
try {
if (run_auto_init) {
// Turn display features off just after auto init to not display wrong moving-edge if the tracker fails
tracker.setDisplayFeatures(true);
run_auto_init = false;
}
if ((use_edges || use_klt) && use_depth) {
tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
} else if (use_edges || use_klt) {
tracker.track(I_gray);
} else if (use_depth) {
tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
}
} catch (const vpException &e) {
std::cout << "Tracker exception: " << e.getStringMessage() << std::endl;
tracking_failed = true;
if (auto_init) {
std::cout << "Tracker needs to restart (tracking exception)" << std::endl;
run_auto_init = true;
}
}
// Get object pose
cMo = tracker.getPose();
// Check tracking errors
double proj_error = 0;
// Check tracking errors
proj_error = tracker.getProjectionError();
} else {
proj_error = tracker.computeCurrentProjectionError(I_gray, cMo, cam_color);
}
if (auto_init && proj_error > proj_error_threshold) {
std::cout << "Tracker needs to restart (projection error detected: " << proj_error << ")" << std::endl;
run_auto_init = true;
tracking_failed = true;
}
// Display tracking results
if (!tracking_failed) {
// Turn display features on
tracker.setDisplayFeatures(true);
if ((use_edges || use_klt) && use_depth) {
tracker.display(I_gray, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth, vpColor::red, 3);
vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
vpDisplay::displayFrame(I_depth, depth_M_color * cMo, cam_depth, 0.05, vpColor::none, 3);
} else if (use_edges || use_klt) {
tracker.display(I_gray, cMo, cam_color, vpColor::red, 3);
vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
} else if (use_depth) {
tracker.display(I_depth, cMo, cam_depth, vpColor::red, 3);
vpDisplay::displayFrame(I_depth, cMo, cam_depth, 0.05, vpColor::none, 3);
}
{
std::stringstream ss;
ss << "Nb features: " << tracker.getError().size();
vpDisplay::displayText(I_gray, I_gray.getHeight() - 50, 20, ss.str(), vpColor::red);
}
{
std::stringstream ss;
ss << "Features: edges " << tracker.getNbFeaturesEdge() << ", klt " << tracker.getNbFeaturesKlt()
<< ", depth " << tracker.getNbFeaturesDepthDense();
vpDisplay::displayText(I_gray, I_gray.getHeight() - 30, 20, ss.str(), vpColor::red);
}
}
std::stringstream ss;
ss << "Loop time: " << loop_t << " ms";
if (use_edges || use_klt) {
vpDisplay::displayText(I_gray, 20, 20, ss.str(), vpColor::red);
if (learn)
vpDisplay::displayText(I_gray, 35, 20, "Left click: learn Right click: quit", vpColor::red);
else if (auto_init)
vpDisplay::displayText(I_gray, 35, 20, "Left click: auto_init Right click: quit", vpColor::red);
else
vpDisplay::displayText(I_gray, 35, 20, "Right click: quit", vpColor::red);
if (vpDisplay::getClick(I_gray, button, false)) {
if (button == vpMouseButton::button3) {
quit = true;
} else if (button == vpMouseButton::button1 && learn) {
learn_position = true;
} else if (button == vpMouseButton::button1 && auto_init && !learn) {
run_auto_init = true;
}
}
}
if (use_depth) {
vpDisplay::displayText(I_depth, 20, 20, ss.str(), vpColor::red);
vpDisplay::displayText(I_depth, 40, 20, "Click to quit", vpColor::red);
vpDisplay::flush(I_depth);
if (vpDisplay::getClick(I_depth, false)) {
quit = true;
}
}
if (learn_position) {
// Detect keypoints on the current image
std::vector<cv::KeyPoint> trainKeyPoints;
keypoint.detect(I_gray, trainKeyPoints);
// Keep only keypoints on the cube
std::vector<vpPolygon> polygons;
std::vector<std::vector<vpPoint> > roisPt;
std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
polygons = pair.first;
roisPt = pair.second;
// Compute the 3D coordinates
std::vector<cv::Point3f> points3f;
vpKeyPoint::compute3DForPointsInPolygons(cMo, cam_color, trainKeyPoints, polygons, roisPt, points3f);
// Build the reference keypoints
keypoint.buildReference(I_gray, trainKeyPoints, points3f, true, learn_id++);
// Display learned data
for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
vpDisplay::displayCross(I_gray, (int)it->pt.y, (int)it->pt.x, 10, vpColor::yellow, 3);
}
learn_position = false;
std::cout << "Data learned" << std::endl;
}
loop_t = vpTime::measureTimeMs() - t;
times_vec.push_back(loop_t);
}
if (learn) {
std::cout << "Save learning file: " << learning_data << std::endl;
keypoint.saveLearningData(learning_data, true, true);
}
} catch (const vpException &e) {
std::cout << "Catch an exception: " << e.what() << std::endl;
}
if (!times_vec.empty()) {
std::cout << "\nProcessing time, Mean: " << vpMath::getMean(times_vec)
<< " ms ; Median: " << vpMath::getMedian(times_vec) << " ; Std: " << vpMath::getStdev(times_vec) << " ms"
<< std::endl;
}
return EXIT_SUCCESS;
}
#elif defined(VISP_HAVE_OCCIPITAL_STRUCTURE)
int main()
{
std::cout << "Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
return EXIT_SUCCESS;
}
#else
int main()
{
std::cout << "Install libStructure 3rd party, configure and build ViSP again to use this example" << std::endl;
return EXIT_SUCCESS;
}
#endif
unsigned int size() const
Return the number of elements of the 2D array.
Definition vpArray2D.h:292
Generic class defining intrinsic camera parameters.
static const vpColor red
Definition vpColor.h:211
static const vpColor none
Definition vpColor.h:223
static const vpColor yellow
Definition vpColor.h:219
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition vpDisplayX.h:132
void init(vpImage< unsigned char > &I, int win_x=-1, int win_y=-1, const std::string &win_title="")
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition vpException.h:59
const std::string & getStringMessage() const
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
Definition vpImage.h:135
unsigned int getWidth() const
Definition vpImage.h:242
Type * bitmap
points toward the bitmap
Definition vpImage.h:139
static bool checkFilename(const std::string &filename)
static void makeDirectory(const std::string &dirname)
static std::string getNameWE(const std::string &pathname)
static std::string getParent(const std::string &pathname)
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition vpKeyPoint.h:212
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition vpKeyPoint.h:823
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void setMatcher(const std::string &matcherName)
Definition vpKeyPoint.h:899
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
Definition vpKeyPoint.h:765
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition vpKeyPoint.h:480
unsigned int buildReference(const vpImage< unsigned char > &I)
static double getMedian(const std::vector< double > &v)
Definition vpMath.cpp:314
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition vpMath.cpp:345
static double getMean(const std::vector< double > &v)
Definition vpMath.cpp:294
Real-time 6D object pose tracking using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &camera)
virtual void getPose(vpHomogeneousMatrix &cMo) const
virtual void setDisplayFeatures(bool displayF)
virtual int getTrackerType() const
virtual void setProjectionErrorComputation(const bool &flag)
virtual unsigned int getNbFeaturesEdge() const
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo)
virtual unsigned int getNbFeaturesKlt() const
virtual void setScanLineVisibilityTest(const bool &v)
virtual unsigned int getNbFeaturesDepthDense() const
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false)
virtual void setProjectionErrorDisplay(bool display)
virtual void initClick(const vpImage< unsigned char > &I1, const vpImage< unsigned char > &I2, const std::string &initFile1, const std::string &initFile2, bool displayHelp=false, const vpHomogeneousMatrix &T1=vpHomogeneousMatrix(), const vpHomogeneousMatrix &T2=vpHomogeneousMatrix())
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam)
virtual void loadConfigFile(const std::string &configFile, bool verbose=true)
virtual void setCameraTransformationMatrix(const std::string &cameraName, const vpHomogeneousMatrix &cameraTransformationMatrix)
virtual void setOgreVisibilityTest(const bool &v)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual vpColVector getError() const
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
virtual void track(const vpImage< unsigned char > &I)
virtual double getProjectionError() const
unsigned int getHeight(vpOccipitalStructureStream stream_type)
vpCameraParameters getCameraParameters(const vpOccipitalStructureStream stream_type, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithoutDistortion)
void acquire(vpImage< unsigned char > &gray, bool undistorted=false, double *ts=NULL)
unsigned int getWidth(vpOccipitalStructureStream stream_type)
vpHomogeneousMatrix getTransform(const vpOccipitalStructureStream from, const vpOccipitalStructureStream to)
bool open(const ST::CaptureSessionSettings &settings)
VISP_EXPORT double measureTimeMs()