36#include <visp3/core/vpCPUFeatures.h>
37#include <visp3/mbt/vpMbtFaceDepthDense.h>
40#include <pcl/common/point_tests.h>
43#if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2)
45#define VISP_HAVE_SSE2 1
49#if !defined(__FMA__) && defined(__AVX2__)
53#if defined _WIN32 && defined(_M_ARM64)
54#define _ARM64_DISTINCT_NEON_TYPES
57#define VISP_HAVE_NEON 1
58#elif (defined(__ARM_NEON__) || defined (__ARM_NEON)) && defined(__aarch64__)
60#define VISP_HAVE_NEON 1
63#define USE_SIMD_CODE 1
65#if VISP_HAVE_SSE2 && USE_SIMD_CODE
71#if VISP_HAVE_NEON && USE_SIMD_CODE
77#if (VISP_HAVE_OPENCV_VERSION >= 0x040101 || (VISP_HAVE_OPENCV_VERSION < 0x040000 && VISP_HAVE_OPENCV_VERSION >= 0x030407)) && USE_SIMD_CODE
84#if (VISP_HAVE_OPENCV_VERSION >= 0x040B00) || (VISP_HAVE_OPENCV_VERSION < 0x040900) || \
85 ( (VISP_HAVE_OPENCV_VERSION >= 0x040900) && (VISP_HAVE_OPENCV_VERSION < 0x040B00) && (USE_SSE || USE_NEON) )
86#define USE_OPENCV_HAL 1
87#include <opencv2/core/simd_intrinsics.hpp>
88#include <opencv2/core/hal/intrin.hpp>
93#if !USE_OPENCV_HAL && (USE_SSE || USE_NEON)
94#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
101inline void v_load_deinterleave(
const uint64_t *ptr, __m128i& a, __m128i& b, __m128i& c)
103 __m128i t0 = _mm_loadu_si128((
const __m128i*)ptr);
104 __m128i t1 = _mm_loadu_si128((
const __m128i*)(ptr + 2));
105 __m128i t2 = _mm_loadu_si128((
const __m128i*)(ptr + 4));
107 t1 = _mm_shuffle_epi32(t1, 0x4e);
109 a = _mm_unpacklo_epi64(t0, t1);
110 b = _mm_unpacklo_epi64(_mm_unpackhi_epi64(t0, t0), t2);
111 c = _mm_unpackhi_epi64(t1, t2);
114inline void v_load_deinterleave(
const double* ptr, __m128d& a0, __m128d& b0, __m128d& c0)
117 v_load_deinterleave((
const uint64_t*)ptr, a1, b1, c1);
118 a0 = _mm_castsi128_pd(a1);
119 b0 = _mm_castsi128_pd(b1);
120 c0 = _mm_castsi128_pd(c1);
123inline __m128d v_combine_low(
const __m128d& a,
const __m128d& b)
125 __m128i a1 = _mm_castpd_si128(a), b1 = _mm_castpd_si128(b);
126 return _mm_castsi128_pd(_mm_unpacklo_epi64(a1, b1));
129inline __m128d v_combine_high(
const __m128d& a,
const __m128d& b)
131 __m128i a1 = _mm_castpd_si128(a), b1 = _mm_castpd_si128(b);
132 return _mm_castsi128_pd(_mm_unpackhi_epi64(a1, b1));
135inline __m128d v_fma(
const __m128d& a,
const __m128d& b,
const __m128d& c)
138 return _mm_fmadd_pd(a, b, c);
140 return _mm_add_pd(_mm_mul_pd(a, b), c);
144inline void v_load_deinterleave(
const double* ptr, float64x2_t& a0, float64x2_t& b0, float64x2_t& c0)
146 float64x2x3_t v = vld3q_f64(ptr);
152inline float64x2_t v_combine_low(
const float64x2_t& a,
const float64x2_t& b)
154 return vcombine_f64(vget_low_f64(a), vget_low_f64(b));
157inline float64x2_t v_combine_high(
const float64x2_t& a,
const float64x2_t& b)
159 return vcombine_f64(vget_high_f64(a), vget_high_f64(b));
162inline float64x2_t v_fma(
const float64x2_t& a,
const float64x2_t& b,
const float64x2_t& c)
164 return vfmaq_f64(c, a, b);
171 : m_cam(), m_clippingFlag(
vpPolygon3D::NO_CLIPPING), m_distFarClip(100), m_distNearClip(0.001), m_hiddenFace(NULL),
172 m_planeObject(), m_polygon(NULL), m_useScanLine(false),
173 m_depthDenseFilteringMethod(DEPTH_OCCUPANCY_RATIO_FILTERING), m_depthDenseFilteringMaxDist(3.0),
174 m_depthDenseFilteringMinDist(0.8), m_depthDenseFilteringOccupancyRatio(0.3), m_isTrackedDepthDenseFace(true),
175 m_isVisible(false), m_listOfFaceLines(), m_planeCamera(), m_pointCloudFace(), m_polygonLines()
201 vpUniRand &rand_gen,
int polygon, std::string name)
204 PolygonLine polygon_line;
207 polygon_line.m_poly.setNbPoint(2);
208 polygon_line.m_poly.addPoint(0, P1);
209 polygon_line.m_poly.addPoint(1, P2);
215 polygon_line.m_p1 = &polygon_line.m_poly.p[0];
216 polygon_line.m_p2 = &polygon_line.m_poly.p[1];
221 bool already_here =
false;
262 const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud,
263 unsigned int stepX,
unsigned int stepY
264#
if DEBUG_DISPLAY_DEPTH_DENSE
267 std::vector<std::vector<vpImagePoint> > &roiPts_vec
272 unsigned int width = point_cloud->width, height = point_cloud->height;
275 if (point_cloud->width == 0 || point_cloud->height == 0)
278 std::vector<vpImagePoint> roiPts;
279 double distanceToFace;
281#
if DEBUG_DISPLAY_DEPTH_DENSE
288 if (roiPts.size() <= 2) {
290 std::cerr <<
"Error: roiPts.size() <= 2 in computeDesiredFeatures" << std::endl;
303 unsigned int top = (
unsigned int)std::max(0.0, bb.
getTop());
304 unsigned int bottom = (
unsigned int)std::min((
double)height, std::max(0.0, bb.
getBottom()));
305 unsigned int left = (
unsigned int)std::max(0.0, bb.
getLeft());
306 unsigned int right = (
unsigned int)std::min((
double)width, std::max(0.0, bb.
getRight()));
319 int totalTheoreticalPoints = 0, totalPoints = 0;
320 for (
unsigned int i = top; i < bottom; i += stepY) {
321 for (
unsigned int j = left; j < right; j += stepX) {
322 if ((
m_useScanLine ? (i < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getHeight() &&
323 j < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getWidth() &&
326 totalTheoreticalPoints++;
328 if (
vpMeTracker::inMask(mask, i, j) && pcl::isFinite((*point_cloud)(j, i)) && (*point_cloud)(j, i).z > 0) {
335#if DEBUG_DISPLAY_DEPTH_DENSE
336 debugImage[i][j] = 255;
353 unsigned int height,
const std::vector<vpColVector> &point_cloud,
354 unsigned int stepX,
unsigned int stepY
355#
if DEBUG_DISPLAY_DEPTH_DENSE
358 std::vector<std::vector<vpImagePoint> > &roiPts_vec
365 if (width == 0 || height == 0)
368 std::vector<vpImagePoint> roiPts;
369 double distanceToFace;
371#
if DEBUG_DISPLAY_DEPTH_DENSE
378 if (roiPts.size() <= 2) {
380 std::cerr <<
"Error: roiPts.size() <= 2 in computeDesiredFeatures" << std::endl;
393 unsigned int top = (
unsigned int)std::max(0.0, bb.
getTop());
394 unsigned int bottom = (
unsigned int)std::min((
double)height, std::max(0.0, bb.
getBottom()));
395 unsigned int left = (
unsigned int)std::max(0.0, bb.
getLeft());
396 unsigned int right = (
unsigned int)std::min((
double)width, std::max(0.0, bb.
getRight()));
405 int totalTheoreticalPoints = 0, totalPoints = 0;
406 for (
unsigned int i = top; i < bottom; i += stepY) {
407 for (
unsigned int j = left; j < right; j += stepX) {
408 if ((
m_useScanLine ? (i < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getHeight() &&
409 j < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getWidth() &&
412 totalTheoreticalPoints++;
421#if DEBUG_DISPLAY_DEPTH_DENSE
422 debugImage[i][j] = 255;
446 bool isvisible =
false;
450 int index = *itindex;
497#if !USE_SSE && !USE_NEON && !USE_OPENCV_HAL
502#if USE_SSE || USE_NEON|| USE_OPENCV_HAL
506 double *ptr_L = L.data;
507 double *ptr_error = error.
data;
510 const cv::v_float64x2 vnx = cv::v_setall_f64(nx);
511 const cv::v_float64x2 vny = cv::v_setall_f64(ny);
512 const cv::v_float64x2 vnz = cv::v_setall_f64(nz);
513 const cv::v_float64x2 vd = cv::v_setall_f64(D);
515 const __m128d vnx = _mm_set1_pd(nx);
516 const __m128d vny = _mm_set1_pd(ny);
517 const __m128d vnz = _mm_set1_pd(nz);
518 const __m128d vd = _mm_set1_pd(D);
520 const float64x2_t vnx = vdupq_n_f64(nx);
521 const float64x2_t vny = vdupq_n_f64(ny);
522 const float64x2_t vnz = vdupq_n_f64(nz);
523 const float64x2_t vd = vdupq_n_f64(D);
526 for (; cpt <=
m_pointCloudFace.size() - 6; cpt += 6, ptr_point_cloud += 6) {
528 cv::v_float64x2 vx, vy, vz;
529 cv::v_load_deinterleave(ptr_point_cloud, vx, vy, vz);
531#if (VISP_HAVE_OPENCV_VERSION >= 0x040900)
532 cv::v_float64x2 va1 = cv::v_sub(cv::v_mul(vnz, vy), cv::v_mul(vny, vz));
533 cv::v_float64x2 va2 = cv::v_sub(cv::v_mul(vnx, vz), cv::v_mul(vnz, vx));
534 cv::v_float64x2 va3 = cv::v_sub(cv::v_mul(vny, vx), cv::v_mul(vnx, vy));
536 cv::v_float64x2 va1 = vnz*vy - vny*vz;
537 cv::v_float64x2 va2 = vnx*vz - vnz*vx;
538 cv::v_float64x2 va3 = vny*vx - vnx*vy;
541 cv::v_float64x2 vnxy = cv::v_combine_low(vnx, vny);
542 cv::v_store(ptr_L, vnxy);
544 vnxy = cv::v_combine_low(vnz, va1);
545 cv::v_store(ptr_L, vnxy);
547 vnxy = cv::v_combine_low(va2, va3);
548 cv::v_store(ptr_L, vnxy);
551 vnxy = cv::v_combine_high(vnx, vny);
552 cv::v_store(ptr_L, vnxy);
554 vnxy = cv::v_combine_high(vnz, va1);
555 cv::v_store(ptr_L, vnxy);
557 vnxy = cv::v_combine_high(va2, va3);
558 cv::v_store(ptr_L, vnxy);
561#if (VISP_HAVE_OPENCV_VERSION >= 0x040900)
562 cv::v_float64x2 verr = cv::v_add(vd, cv::v_muladd(vnx, vx, cv::v_muladd(vny, vy, cv::v_mul(vnz, vz))));
564 cv::v_float64x2 verr = vd + cv::v_muladd(vnx, vx, cv::v_muladd(vny, vy, vnz*vz));
567 cv::v_store(ptr_error, verr);
571 v_load_deinterleave(ptr_point_cloud, vx, vy, vz);
573 __m128d va1 = _mm_sub_pd(_mm_mul_pd(vnz, vy), _mm_mul_pd(vny, vz));
574 __m128d va2 = _mm_sub_pd(_mm_mul_pd(vnx, vz), _mm_mul_pd(vnz, vx));
575 __m128d va3 = _mm_sub_pd(_mm_mul_pd(vny, vx), _mm_mul_pd(vnx, vy));
577 __m128d vnxy = v_combine_low(vnx, vny);
578 _mm_storeu_pd(ptr_L, vnxy);
580 vnxy = v_combine_low(vnz, va1);
581 _mm_storeu_pd(ptr_L, vnxy);
583 vnxy = v_combine_low(va2, va3);
584 _mm_storeu_pd(ptr_L, vnxy);
587 vnxy = v_combine_high(vnx, vny);
588 _mm_storeu_pd(ptr_L, vnxy);
590 vnxy = v_combine_high(vnz, va1);
591 _mm_storeu_pd(ptr_L, vnxy);
593 vnxy = v_combine_high(va2, va3);
594 _mm_storeu_pd(ptr_L, vnxy);
597 const __m128d verror = _mm_add_pd(vd, v_fma(vnx, vx, v_fma(vny, vy, _mm_mul_pd(vnz, vz))));
598 _mm_storeu_pd(ptr_error, verror);
601 float64x2_t vx, vy, vz;
602 v_load_deinterleave(ptr_point_cloud, vx, vy, vz);
604 float64x2_t va1 = vsubq_f64(vmulq_f64(vnz, vy), vmulq_f64(vny, vz));
605 float64x2_t va2 = vsubq_f64(vmulq_f64(vnx, vz), vmulq_f64(vnz, vx));
606 float64x2_t va3 = vsubq_f64(vmulq_f64(vny, vx), vmulq_f64(vnx, vy));
608 float64x2_t vnxy = v_combine_low(vnx, vny);
609 vst1q_f64(ptr_L, vnxy);
611 vnxy = v_combine_low(vnz, va1);
612 vst1q_f64(ptr_L, vnxy);
614 vnxy = v_combine_low(va2, va3);
615 vst1q_f64(ptr_L, vnxy);
618 vnxy = v_combine_high(vnx, vny);
619 vst1q_f64(ptr_L, vnxy);
621 vnxy = v_combine_high(vnz, va1);
622 vst1q_f64(ptr_L, vnxy);
624 vnxy = v_combine_high(va2, va3);
625 vst1q_f64(ptr_L, vnxy);
628 const float64x2_t verror = vaddq_f64(vd, v_fma(vnx, vx, v_fma(vny, vy, vmulq_f64(vnz, vz))));
629 vst1q_f64(ptr_error, verror);
640 double _a1 = (nz * y) - (ny * z);
641 double _a2 = (nx * z) - (nz * x);
642 double _a3 = (ny * x) - (nx * y);
645 L[(
unsigned int)(cpt / 3)][0] = nx;
646 L[(
unsigned int)(cpt / 3)][1] = ny;
647 L[(
unsigned int)(cpt / 3)][2] = nz;
648 L[(
unsigned int)(cpt / 3)][3] = _a1;
649 L[(
unsigned int)(cpt / 3)][4] = _a2;
650 L[(
unsigned int)(cpt / 3)][5] = _a3;
663 error[(
unsigned int)(cpt / 3)] = D + (normal.
t() * pt);
673 unsigned int idx = 0;
679 double _a1 = (nz * y) - (ny * z);
680 double _a2 = (nx * z) - (nz * x);
681 double _a3 = (ny * x) - (nx * y);
695 error[idx] = D + (normal.
t() * pt);
701 std::vector<vpImagePoint> &roiPts
702#
if DEBUG_DISPLAY_DEPTH_DENSE
704 std::vector<std::vector<vpImagePoint> > &roiPts_vec
707 double &distanceToFace)
714 it->m_p1->changeFrame(cMo);
715 it->m_p2->changeFrame(cMo);
719 it->m_poly.changeFrame(cMo);
720 it->m_poly.computePolygonClipped(
m_cam);
722 if (it->m_poly.polyClipped.size() == 2 &&
730 std::vector<std::pair<vpPoint, vpPoint> > linesLst;
736 for (
unsigned int i = 0; i < linesLst.size(); i++) {
738 linesLst[i].second.project();
746 roiPts.push_back(ip1);
747 roiPts.push_back(ip2);
749 faceCentroid.
set_X(faceCentroid.
get_X() + linesLst[i].first.get_X() + linesLst[i].second.get_X());
750 faceCentroid.
set_Y(faceCentroid.
get_Y() + linesLst[i].first.get_Y() + linesLst[i].second.get_Y());
751 faceCentroid.
set_Z(faceCentroid.
get_Z() + linesLst[i].first.get_Z() + linesLst[i].second.get_Z());
753#if DEBUG_DISPLAY_DEPTH_DENSE
754 std::vector<vpImagePoint> roiPts_;
755 roiPts_.push_back(ip1);
756 roiPts_.push_back(ip2);
757 roiPts_vec.push_back(roiPts_);
761 if (linesLst.empty()) {
762 distanceToFace = std::numeric_limits<double>::max();
764 faceCentroid.
set_X(faceCentroid.
get_X() / (2 * linesLst.size()));
765 faceCentroid.
set_Y(faceCentroid.
get_Y() / (2 * linesLst.size()));
766 faceCentroid.
set_Z(faceCentroid.
get_Z() / (2 * linesLst.size()));
769 sqrt(faceCentroid.
get_X() * faceCentroid.
get_X() + faceCentroid.
get_Y() * faceCentroid.
get_Y() +
779 std::vector<vpPoint> polygonsClipped;
782 if (polygonsClipped.empty()) {
783 distanceToFace = std::numeric_limits<double>::max();
787 for (
size_t i = 0; i < polygonsClipped.size(); i++) {
788 faceCentroid.
set_X(faceCentroid.
get_X() + polygonsClipped[i].get_X());
789 faceCentroid.
set_Y(faceCentroid.
get_Y() + polygonsClipped[i].get_Y());
790 faceCentroid.
set_Z(faceCentroid.
get_Z() + polygonsClipped[i].get_Z());
793 faceCentroid.
set_X(faceCentroid.
get_X() / polygonsClipped.size());
794 faceCentroid.
set_Y(faceCentroid.
get_Y() / polygonsClipped.size());
795 faceCentroid.
set_Z(faceCentroid.
get_Z() / polygonsClipped.size());
797 distanceToFace = sqrt(faceCentroid.
get_X() * faceCentroid.
get_X() + faceCentroid.
get_Y() * faceCentroid.
get_Y() +
801#if DEBUG_DISPLAY_DEPTH_DENSE
802 roiPts_vec.push_back(roiPts);
809 bool displayFullModel)
811 std::vector<std::vector<double> > models =
814 for (
size_t i = 0; i < models.size(); i++) {
823 bool displayFullModel)
825 std::vector<std::vector<double> > models =
828 for (
size_t i = 0; i < models.size(); i++) {
861 bool displayFullModel)
863 std::vector<std::vector<double> > models;
871 std::vector<std::vector<double> > lineModels =
873 models.insert(models.end(), lineModels.begin(), lineModels.end());
895 if (dx <= std::numeric_limits<double>::epsilon() && dy <= std::numeric_limits<double>::epsilon() &&
896 dz <= std::numeric_limits<double>::epsilon())
908 (*it)->setCameraParameters(camera);
918 (*it)->useScanLine = v;
Type * data
Address of the first element of the data array.
Generic class defining intrinsic camera parameters.
void computeFov(const unsigned int &w, const unsigned int &h)
Implementation of column vector and the associated operations.
void resize(unsigned int i, bool flagNullify=true)
Class to define RGB colors available for display functionalities.
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
Implementation of an homogeneous matrix and operations on such kind of matrices.
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
unsigned int getWidth() const
unsigned int getHeight() const
Implementation of a matrix and operations on matrices.
Implementation of the polygons management for the model-based trackers.
bool isVisible(unsigned int i)
void computeScanLineQuery(const vpPoint &a, const vpPoint &b, std::vector< std::pair< vpPoint, vpPoint > > &lines, const bool &displayResults=false)
vpMbScanLine & getMbScanLineRenderer()
Manage the line of a polygon used in the model-based tracker.
void setIndex(unsigned int i)
vpPoint * p2
The second extremity.
std::list< int > Lindex_polygon
Index of the faces which contain the line.
void buildFrom(vpPoint &_p1, vpPoint &_p2, vpUniRand &rand_gen)
vpMbHiddenFaces< vpMbtPolygon > * hiddenface
Pointer to the list of faces.
std::vector< std::vector< double > > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, bool displayFullModel=false)
vpMbtPolygon & getPolygon()
bool useScanLine
Use scanline rendering.
vpPoint * p1
The first extremity.
void setCameraParameters(const vpCameraParameters &camera)
void setName(const std::string &line_name)
void setVisible(bool _isvisible)
void addPolygon(const int &index)
double m_depthDenseFilteringMinDist
Minimum distance threshold.
vpMbHiddenFaces< vpMbtPolygon > * m_hiddenFace
Pointer to the list of faces.
void computeVisibilityDisplay()
virtual ~vpMbtFaceDepthDense()
bool m_isVisible
Visibility flag.
double m_distFarClip
Distance for near clipping.
std::vector< double > m_pointCloudFace
List of depth points inside the face.
vpPlane m_planeObject
Plane equation described in the object frame.
void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
void setCameraParameters(const vpCameraParameters &camera)
bool computeDesiredFeatures(const vpHomogeneousMatrix &cMo, const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &point_cloud, unsigned int stepX, unsigned int stepY, const vpImage< bool > *mask=NULL)
std::vector< std::vector< double > > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, bool displayFullModel=false)
void computeROI(const vpHomogeneousMatrix &cMo, unsigned int width, unsigned int height, std::vector< vpImagePoint > &roiPts, double &distanceToFace)
unsigned int getNbFeatures() const
bool samePoint(const vpPoint &P1, const vpPoint &P2) const
void computeInteractionMatrixAndResidu(const vpHomogeneousMatrix &cMo, vpMatrix &L, vpColVector &error)
void setScanLineVisibilityTest(bool v)
vpMbtPolygon * m_polygon
Polygon defining the face.
bool m_useScanLine
Scan line visibility.
bool m_isTrackedDepthDenseFace
Flag to define if the face should be tracked or not.
double m_depthDenseFilteringMaxDist
Maximum distance threshold.
std::vector< PolygonLine > m_polygonLines
Polygon lines used for scan-line visibility.
int m_depthDenseFilteringMethod
Method to use to consider or not the face.
@ DEPTH_OCCUPANCY_RATIO_FILTERING
unsigned int m_clippingFlag
Flags specifying which clipping to used.
std::vector< vpMbtDistanceLine * > m_listOfFaceLines
vpCameraParameters m_cam
Camera intrinsic parameters.
double m_depthDenseFilteringOccupancyRatio
Ratio between available depth points and theoretical number of points.
double m_distNearClip
Distance for near clipping.
void displayFeature(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double scale=0.05, unsigned int thickness=1)
void addLine(vpPoint &p1, vpPoint &p2, vpMbHiddenFaces< vpMbtPolygon > *const faces, vpUniRand &rand_gen, int polygon=-1, std::string name="")
virtual bool isVisible(const vpHomogeneousMatrix &cMo, double alpha, const bool &modulo=false, const vpCameraParameters &cam=vpCameraParameters(), unsigned int width=0, unsigned int height=0)
static bool inMask(const vpImage< bool > *mask, unsigned int i, unsigned int j)
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
void changeFrame(const vpHomogeneousMatrix &cMo)
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
double get_oX() const
Get the point oX coordinate in the object frame.
double get_Y() const
Get the point cY coordinate in the camera frame.
double get_oZ() const
Get the point oZ coordinate in the object frame.
void set_X(double cX)
Set the point cX coordinate in the camera frame.
void set_Y(double cY)
Set the point cY coordinate in the camera frame.
double get_Z() const
Get the point cZ coordinate in the camera frame.
void set_Z(double cZ)
Set the point cZ coordinate in the camera frame.
double get_oY() const
Get the point oY coordinate in the object frame.
double get_X() const
Get the point cX coordinate in the camera frame.
Implements a 3D polygon with render functionalities like clipping.
void setFarClippingDistance(const double &dist)
void setNearClippingDistance(const double &dist)
void setClipping(const unsigned int &flags)
void getRoiClipped(const vpCameraParameters &cam, std::vector< vpImagePoint > &roi)
void getPolygonClipped(std::vector< std::pair< vpPoint, unsigned int > > &poly)
Defines a generic 2D polygon.
vpRect getBoundingBox() const
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Defines a rectangle in the plane.
void setRight(double pos)
void setBottom(double pos)
Class for generating random numbers with uniform probability density.
VISP_EXPORT bool checkSSE2()
VISP_EXPORT bool checkNeon()