OpenCV PointCloud from depth map

I am trying to create a point cloud using my Minoru3D mini camera, but it doesn't work. The generated points are definitely wrong.
The camera is correctly calibrated. My corrected images (rec1, rec2), disparity map (disp) and depth map (depth):

enter image description here

And pointcloud: enter image description here

I create a depth image using reprojectImageTo3D and then read points at each pixel.

Complete code:

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/contrib/contrib.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include "libcam.h"
#include <stdio.h>
#include <fstream>

using namespace cv;
using namespace std;

int main(int argc, char** argv) {

    // The camera properties
    int w = 640;
    int h = 480;
    int fps = 20;

    // The images, which are proceeded
    Mat img1, img2;
    // The grayscale versions of the images
    Mat gray1, gray2;
    // The disparity and depth map
    Mat disparity;
    Mat depth(Size(h, w), CV_32FC3);
    // The iplImage versions of the images used to get the images from the camera
    IplImage *iplImg1 = cvCreateImage(cvSize(w, h), 8, 3), *iplImg2 = cvCreateImage(cvSize(w, h), 8, 3);

    // The cameras
    Camera cam1("/dev/video0", w, h, fps), cam2("/dev/video1", w, h, fps);

    // Load the camera model
    Mat CM1, CM2, D1, D2, R, T, E, F, R1, R2, P1, P2, Q;
    FileStorage fs("data/stereocalib.yml", FileStorage::READ);
    fs["CM1"] >> CM1;
    fs["CM2"] >> CM2;
    fs["D1"] >> D1;
    fs["D2"] >> D2;
    fs["R"] >> R;
    fs["T"] >> T;
    fs["E"] >> E;
    fs["F"] >> F;
    fs["R1"] >> R1;
    fs["R2"] >> R2;
    fs["P1"] >> P1;
    fs["P2"] >> P2;
    fs["Q"] >> Q;
    fs.release();

    Mat map1x = Mat(h, w, CV_32F);
    Mat map1y = Mat(h, w, CV_32F);
    Mat map2x = Mat(h, w, CV_32F);
    Mat map2y = Mat(h, w, CV_32F);
    initUndistortRectifyMap(CM1, D1, R1, P1, cvSize(w, h), CV_32FC1, map1x, map1y);
    initUndistortRectifyMap(CM2, D2, R2, P2, cvSize(w, h), CV_32FC1, map2x, map2y);

    // The rectified images
    Mat imgU1 = Mat(img1.size(), img1.type()), imgU2 = Mat(img2.size(), img2.type());

    Ptr<StereoBM> sbm = createStereoBM(16 * 10, 5);

    while (true) {

        // Get the images from the cameras
        cam1.Update();
        cam2.Update();
        cam1.toIplImage(iplImg1);
        cam2.toIplImage(iplImg2);
        img1 = cvarrToMat(iplImg1);
        img2 = cvarrToMat(iplImg2);

        // Rectify
        remap(img1, imgU1, map1x, map1y, INTER_LINEAR, BORDER_CONSTANT, Scalar());
        remap(img2, imgU2, map2x, map2y, INTER_LINEAR, BORDER_CONSTANT, Scalar());

        // Convert the rectified images to grayscale images
        cvtColor(imgU1, gray1, CV_BGR2GRAY);
        cvtColor(imgU2, gray2, CV_BGR2GRAY);

        // Create disparity map
        sbm->compute(gray1, gray2, disparity);

        // Create depth map
        reprojectImageTo3D(disparity, depth, Q, true, CV_32F);

        //imshow("img1", img1);
        //imshow("img2", img2);
        imshow("rec1", gray1);
        imshow("rec2", gray2);
        imshow("disp", disparity);
        imshow("depth", depth);

        int key = waitKey(10);
        if (key == 'q') {
            break;
        }
        else if (key == 's') {

            stringstream output;
            for (int x = 0; x < img1.rows; x++) {
                for (int y = 0; y < img1.cols; y++) {
                    Point3f p = depth.at<Point3f>(x, y);
                    if(p.z >= 10000) continue;  // Filter errors
                    output << p.x << "," << p.y << "," << p.z << endl;
                }
            }

            ofstream outputFile("points");
            outputFile << output.str();
            outputFile.close();

            cout << "saved" << endl;
        }
        else if (key == 'p') {
            waitKey(0);
        }
    }
    destroyAllWindows();

    return 0;
}

      

+3


source to share





All Articles