標籤:

透視變換(進階)

一 前言

之前在公眾號中寫過一篇文章——圖像處理的仿射變換與透視變換,這篇文章是對透視變換做了進一步深入研究。

二透視變換

透視變換比仿射變換更普遍。它們不一定保持線條之間的"平行性",但是因為它們更普遍,也更實用,幾乎所有在日常圖像中遇到的變換都是透視變換。有沒有想過為什麼兩條軌道似乎在遠處會相遇?

圖1.1 鐵軌

這是因為您的眼睛中的對圖像好比做了透視變換,透視變換不一定保持平行線平行。如果你站在上面觀察圖1.1中的鐵軌,他們似乎根本不會相會。

給定一個3×3透視變換矩陣,warpPerspective()應用下面的變換:

請注意,透視變換矩陣的左上角2×2部分的行列式不需要+1。而且,由於前面所示的變換中的分割,將透視比那換矩陣的所有元素乘以常數並不會再所表示的變換中產生任何差異。因此,計算透視變換矩陣時,令M33 = 1是常見的。這使得我們在M中具有八個自由數,並且因此四對對應點足以恢復兩個圖像之間的透視變換。 OpenCV函數findHomography()為你做到了這一點。有趣的是,如果您在調用此函數時指定了標誌CV_RANSAC,它甚至可以佔用四個以上的點並使用RANSAC演算法來魯棒地估計所有這些點的變換。 RANSAC使變換估計過程免受嘈雜的「錯誤」對應關係影響。以下提供的代碼讀取了兩個圖像(通過透視變換相關),要求用戶點擊八對點,使用RANSAC魯棒地估計透視變換,並顯示原始和新的透視變換圖像之間的差異,以驗證估計的變換。整個工程測試代碼鏈接已上傳至GitHub,可關注微信公眾號:視覺IMAX獲取。

#include <opencv2/opencv.hpp> using namespace std; using namespace cv;void on_mouse(int event, int x, int y, int, void* _p){ Point2f* p = (Point2f *)_p; if (event == CV_EVENT_LBUTTONUP) { p->x = x; p->y = y; }}class perspective_transformer {private: Mat im, im_transformed, im_perspective_transformed, im_show, im_transformed_show; vector<Point2f> points, points_transformed; Mat M; Point2f get_click(string, Mat);public: perspective_transformer(); void estimate_perspective(); void show_diff();};perspective_transformer::perspective_transformer(){ im = imread("./DataFiles/image.bmp"); im_transformed = imread("./DataFiles/transformed.bmp");}Point2f perspective_transformer::get_click(string window_name, Mat im){ Point2f p(-1, -1); setMouseCallback(window_name, on_mouse, (void *)&p); while (p.x == -1 && p.y == -1) { imshow(window_name, im); waitKey(20); } return p;}void perspective_transformer::estimate_perspective(){ namedWindow("Original", 1); namedWindow("Transformed", 1); imshow("Original", im); imshow("Transformed", im_transformed); cout << "To estimate the Perspective transform between the original and transformed images you will have to click on 8 matching pairs of points" << endl; im_show = im.clone(); im_transformed_show = im_transformed.clone(); Point2f p; for (int i = 0; i < 8; i++) { cout << "POINT " << i << endl; cout << "Click on a distinguished point in the ORIGINAL image" << endl; p = get_click("Original", im_show); cout << p << endl; points.push_back(p); circle(im_show, p, 2, Scalar(0, 0, 255), -1); imshow("Original", im_show); cout << "Click on a distinguished point in the TRANSFORMED image" << endl; p = get_click("Transformed", im_transformed_show); cout << p << endl; points_transformed.push_back(p); circle(im_transformed_show, p, 2, Scalar(0, 0, 255), -1); imshow("Transformed", im_transformed_show); } //Estimate perspective transform M = findHomography(points, points_transformed, CV_RANSAC, 2); cout << "Estimated Perspective transform = " << M << endl; // Apply estimated perspecive trasnform warpPerspective(im, im_perspective_transformed, M, im.size()); namedWindow("Estimated Perspective transform", 1); imshow("Estimated Perspective transform", im_perspective_transformed); imwrite("./DataFiles/im_perspective_transformed.bmp", im_perspective_transformed);}void perspective_transformer::show_diff(){ imshow("Difference", im_transformed - im_perspective_transformed);}int main(){ perspective_transformer a; a.estimate_perspective(); cout << "Press d to show difference, q to end" << endl; if (char(waitKey(-1)) == d) { a.show_diff(); cout << "Press q to end" << endl; if (char(waitKey(-1)) == q) return 0; } else return 0;#include <opencv2/opencv.hpp> using namespace std; using namespace cv;void on_mouse(int event, int x, int y, int, void* _p){ Point2f* p = (Point2f *)_p; if (event == CV_EVENT_LBUTTONUP) { p->x = x; p->y = y; }}class perspective_transformer {private: Mat im, im_transformed, im_perspective_transformed, im_show, im_transformed_show; vector<Point2f> points, points_transformed; Mat M; Point2f get_click(string, Mat);public: perspective_transformer(); void estimate_perspective(); void show_diff();};perspective_transformer::perspective_transformer(){ im = imread("./DataFiles/image.bmp"); im_transformed = imread("./DataFiles/transformed.bmp");}Point2f perspective_transformer::get_click(string window_name, Mat im){ Point2f p(-1, -1); setMouseCallback(window_name, on_mouse, (void *)&p); while (p.x == -1 && p.y == -1) { imshow(window_name, im); waitKey(20); } return p;}void perspective_transformer::estimate_perspective(){ namedWindow("Original", 1); namedWindow("Transformed", 1); imshow("Original", im); imshow("Transformed", im_transformed); cout << "To estimate the Perspective transform between the original and transformed images you will have to click on 8 matching pairs of points" << endl; im_show = im.clone(); im_transformed_show = im_transformed.clone(); Point2f p; for (int i = 0; i < 8; i++) { cout << "POINT " << i << endl; cout << "Click on a distinguished point in the ORIGINAL image" << endl; p = get_click("Original", im_show); cout << p << endl; points.push_back(p); circle(im_show, p, 2, Scalar(0, 0, 255), -1); imshow("Original", im_show); cout << "Click on a distinguished point in the TRANSFORMED image" << endl; p = get_click("Transformed", im_transformed_show); cout << p << endl; points_transformed.push_back(p); circle(im_transformed_show, p, 2, Scalar(0, 0, 255), -1); imshow("Transformed", im_transformed_show); } //Estimate perspective transform M = findHomography(points, points_transformed, CV_RANSAC, 2); cout << "Estimated Perspective transform = " << M << endl; // Apply estimated perspecive trasnform warpPerspective(im, im_perspective_transformed, M, im.size()); namedWindow("Estimated Perspective transform", 1); imshow("Estimated Perspective transform", im_perspective_transformed); imwrite("./DataFiles/im_perspective_transformed.bmp", im_perspective_transformed);}void perspective_transformer::show_diff(){ imshow("Difference", im_transformed - im_perspective_transformed);}int main(){ perspective_transformer a; a.estimate_perspective(); cout << "Press d to show difference, q to end" << endl; if (char(waitKey(-1)) == d) { a.show_diff(); cout << "Press q to end" << endl; if (char(waitKey(-1)) == q) return 0; } else return 0;}}

由上述分析,對於透視變換,選取多個點時(此處需要≥8),使用findHomography()求的對應性矩陣M比使用getPerspectiveTransform()(此函數對於多個點默認選前四個點)求得的對應性效果更佳。

三 實踐

對於我們需要將下述圖片中的編碼標誌點進行透視變換,以使得其能夠將橢圓透視變換為圓,周圍的環帶也進行矯正。我們需要找到下述圖片中和矯正後的圖片中至少八組對應點。

圖3.1 原圖

橢圓透視變換為圓之後,橢圓上的各個位置坐標與圓的對應點坐標關係如圖3.2所示:

圖3.2 透視投影變換

根據如上關係,便可以很容易地找到九組對應點:長短軸與坐標軸的四個交點坐標、對角線的四個坐標,以及中心點坐標。

經過九組對應點透視變換後的效果圖如圖3.3所示:

圖3.3 多組對應點透視變換

四 總結

這篇文章既是對上一篇文章的補充,但同時也是不同的方法,可以根據需求進行選用。

最後,感謝朱禹軻在橢圓對應點問題上的幫助,以及感謝張老師對於我探索新方法與新思路的莫大關懷與指導。

微信公眾號:視覺IMAX


推薦閱讀:

專業技法--一點透視的原理與畫法
構圖0分,透視0分,人體0分,但它依然是神作 | 看懂名畫
廠的宇宙觀(四)

TAG:透視 | OpenCV |