diff --git a/include/MORB_SLAM/System.h b/include/MORB_SLAM/System.h index bfb40fde..bd10d28b 100644 --- a/include/MORB_SLAM/System.h +++ b/include/MORB_SLAM/System.h @@ -93,18 +93,18 @@ class System // Proccess the given stereo frame. Images must be synchronized and rectified. // Input images: RGB (CV_8UC3) or grayscale (CV_8U). RGB is converted to grayscale. // Returns the camera pose (empty if tracking fails). - Sophus::SE3f TrackStereo(const cv::Mat &imLeft, const cv::Mat &imRight, const double ×tamp, const std::vector& vImuMeas = std::vector(), std::string filename=""); + Sophus::SE3f TrackStereo(const cv::Mat &imLeft, const cv::Mat &imRight, double timestamp, const std::vector& vImuMeas = std::vector(), std::string filename=""); // Process the given rgbd frame. Depthmap must be registered to the RGB frame. // Input image: RGB (CV_8UC3) or grayscale (CV_8U). RGB is converted to grayscale. // Input depthmap: Float (CV_32F). // Returns the camera pose (empty if tracking fails). - Sophus::SE3f TrackRGBD(const cv::Mat &im, const cv::Mat &depthmap, const double ×tamp, const std::vector& vImuMeas = std::vector(), std::string filename=""); + Sophus::SE3f TrackRGBD(const cv::Mat &im, const cv::Mat &depthmap, double timestamp, const std::vector& vImuMeas = std::vector(), std::string filename=""); // Proccess the given monocular frame and optionally imu data // Input images: RGB (CV_8UC3) or grayscale (CV_8U). RGB is converted to grayscale. // Returns the camera pose (empty if tracking fails). - Sophus::SE3f TrackMonocular(const cv::Mat &im, const double ×tamp, const std::vector& vImuMeas = std::vector(), std::string filename=""); + Sophus::SE3f TrackMonocular(const cv::Mat &im, double timestamp, const std::vector& vImuMeas = std::vector(), std::string filename=""); // This stops local mapping thread (map building) and performs only camera tracking. diff --git a/include/MORB_SLAM/Tracking.h b/include/MORB_SLAM/Tracking.h index a3aa4fea..a2604272 100644 --- a/include/MORB_SLAM/Tracking.h +++ b/include/MORB_SLAM/Tracking.h @@ -76,7 +76,7 @@ class Tracking { Sophus::SE3f GrabImageMonocular(const cv::Mat& im, const double& timestamp, const std::string &filename, const Camera_ptr &cam); - void GrabImuData(const IMU::Point& imuMeasurement); + void GrabImuData(const std::vector& imuMeasurements); void SetLocalMapper(LocalMapping* pLocalMapper); void SetLoopClosing(LoopClosing* pLoopClosing); diff --git a/src/System.cc b/src/System.cc index f64c935c..37618a27 100644 --- a/src/System.cc +++ b/src/System.cc @@ -253,7 +253,7 @@ System::System(const std::string& strVocFile, const std::string& strSettingsFile } Sophus::SE3f System::TrackStereo(const cv::Mat& imLeft, const cv::Mat& imRight, - const double& timestamp, + double timestamp, const std::vector& vImuMeas, std::string filename) { if (mSensor != CameraType::STEREO && mSensor != CameraType::IMU_STEREO) { @@ -315,8 +315,7 @@ Sophus::SE3f System::TrackStereo(const cv::Mat& imLeft, const cv::Mat& imRight, } if (mSensor == CameraType::IMU_STEREO) - for (size_t i_imu = 0; i_imu < vImuMeas.size(); i_imu++) - mpTracker->GrabImuData(vImuMeas[i_imu]); + mpTracker->GrabImuData(vImuMeas); // std::cout << "start GrabImageStereo" << std::endl; Sophus::SE3f Tcw = mpTracker->GrabImageStereo(imLeftToFeed, imRightToFeed, @@ -335,7 +334,7 @@ Sophus::SE3f System::TrackStereo(const cv::Mat& imLeft, const cv::Mat& imRight, } Sophus::SE3f System::TrackRGBD(const cv::Mat& im, const cv::Mat& depthmap, - const double& timestamp, + double timestamp, const std::vector& vImuMeas, std::string filename) { if (mSensor != CameraType::RGBD && mSensor != CameraType::IMU_RGBD) { @@ -389,8 +388,7 @@ Sophus::SE3f System::TrackRGBD(const cv::Mat& im, const cv::Mat& depthmap, } if (mSensor == CameraType::IMU_RGBD) - for (size_t i_imu = 0; i_imu < vImuMeas.size(); i_imu++) - mpTracker->GrabImuData(vImuMeas[i_imu]); + mpTracker->GrabImuData(vImuMeas); Sophus::SE3f Tcw = mpTracker->GrabImageRGBD(imToFeed, imDepthToFeed, timestamp, filename, cameras[0]); // for now we know cameras[0] is providing the image @@ -402,7 +400,7 @@ Sophus::SE3f System::TrackRGBD(const cv::Mat& im, const cv::Mat& depthmap, return Tcw; } -Sophus::SE3f System::TrackMonocular(const cv::Mat& im, const double& timestamp, +Sophus::SE3f System::TrackMonocular(const cv::Mat& im, double timestamp, const std::vector& vImuMeas, std::string filename) { // { @@ -460,8 +458,7 @@ Sophus::SE3f System::TrackMonocular(const cv::Mat& im, const double& timestamp, } if (mSensor == CameraType::IMU_MONOCULAR) - for (size_t i_imu = 0; i_imu < vImuMeas.size(); i_imu++) - mpTracker->GrabImuData(vImuMeas[i_imu]); + mpTracker->GrabImuData(vImuMeas); Sophus::SE3f Tcw = mpTracker->GrabImageMonocular(imToFeed, timestamp, filename, cameras[0]); // for now we know cameras[0] is providing the image diff --git a/src/Tracking.cc b/src/Tracking.cc index fb35950a..6ec122f2 100644 --- a/src/Tracking.cc +++ b/src/Tracking.cc @@ -1519,9 +1519,10 @@ Sophus::SE3f Tracking::GrabImageMonocular(const cv::Mat& im, return mCurrentFrame.GetPose(); } -void Tracking::GrabImuData(const IMU::Point& imuMeasurement) { +void Tracking::GrabImuData(const std::vector& imuMeasurements) { std::unique_lock lock(mMutexImuQueue); - mlQueueImuData.push_back(imuMeasurement); + for(auto &point : imuMeasurements) + mlQueueImuData.emplace_back(point); // copy ctor } void Tracking::PreintegrateIMU() { @@ -1540,32 +1541,28 @@ void Tracking::PreintegrateIMU() { return; } - while (true) { - std::unique_lock lock(mMutexImuQueue); - bool bSleep = false; + // std::cout << "mImuPer " << mImuPer << std::endl; { - if (!mlQueueImuData.empty()) { - IMU::Point* m = &mlQueueImuData.front(); - std::cout.precision(17); - if (m->t < mCurrentFrame.mpPrevFrame->mTimeStamp - mImuPer) { - mlQueueImuData.pop_front(); - } else if (m->t < mCurrentFrame.mTimeStamp - mImuPer) { - mvImuFromLastFrame.push_back(*m); - mlQueueImuData.pop_front(); - } else { - mvImuFromLastFrame.push_back(*m); + std::unique_lock lock(mMutexImuQueue); + auto itr = mlQueueImuData.begin(); + auto lastItr = itr; + for(; itr != mlQueueImuData.end(); ++itr){ + IMU::Point &point = *itr; + if(point.t < mCurrentFrame.mpPrevFrame->mTimeStamp - mImuPer) { + // pass + }else if(point.t < mCurrentFrame.mTimeStamp - mImuPer){ + mvImuFromLastFrame.emplace_back(point); + }else{ break; } - } else { - break; - bSleep = true; + lastItr = itr; } - } - if (bSleep) usleep(500); + if(!mvImuFromLastFrame.empty()) + mlQueueImuData.erase(mlQueueImuData.begin(), lastItr); } const int n = mvImuFromLastFrame.size() - 1; - if (n == 0) { + if (n <= 0) { std::cout << "Empty IMU measurements vector!!!\n"; return; }