1. 概述
我在之前的文章《基于均值坐标(Mean-Value Coordinates)的图像融合算法的具体实现》中,根据《Coordinates for Instant Image Cloning》这篇论文,详细论述了图像融合中泊松融合算法的优化算法——均值坐标(Mean-Value Coordinates)融合算法的具体实现。其实在这篇论文中,还提出了两种优化实现,能够进一步提升效率,这里就论述一下其优化算法的具体实现。
2. 实现
2.1. 原理
均值坐标融合算法的核心思想是算出ROI中每个点的MVC(Mean-Value Coordinates),如果ROI中存在n个点,ROI边界像素为m个点,那么该算法的时间复杂度至少为O(nm)。根据《Coordinates for Instant Image Cloning》的描述,MVC融合算法修正值其实是一个线性区间,只要得到其中一些关键点的融合修正值,其他点的融合修正值就可以根据周围的融合修正值线性插值出来。
因此,可以通过CGAL来对ROI多边形边界构建一个自适应三角网,以边界上每个栅格点作为约束构网,为了满足Delaunay特性,就会在ROI内部新添加一些点,这样就会出现边界小而密集,边界大而稀疏的自适应三角网(可参看这篇文章《通过CGAL将一个多边形剖分成Delaunay三角网》):
这样,n个点就会将为常数级别个数c,也就是时间复杂度至少为O(cm)。当然从三角面上插值也会有时间消耗,但时间复杂度会明显小于计算均值坐标。
2.2. 核心代码
具体核心代码实现如下(完整的代码实现地址见文章末尾):
//三角网优化 void QImageShowWidget::MVCBlendOptimize(int posX, int posY) { QTime startTime = QTime::currentTime(); //Step1:找到边界上所有的像素点 vector<Vector2d> ROIBoundPointList; CalBoundPoint(ROIBoundPointList); // CDT cdt; vector<Vertex_handle> vertexList; for(int i = 0; i<ROIBoundPointList.size(); i++) { //cout<<ROIBoundPointList[i].x<<','<<ROIBoundPointList[i].y<<'\t'; //vertexList.push_back(cdt.insert(Point(pointList[i].x(), pointList[i].y() ))); vertexList.push_back(cdt.insert(CDTPoint(ROIBoundPointList[i].x, ROIBoundPointList[i].y ))); } for(unsigned int i =0;i<vertexList.size()-1;i++) { cdt.insert_constraint(vertexList[i],vertexList[i+1]); } std::cout << "Number of vertices: " << cdt.number_of_vertices() <<std::endl; std::cout << "Meshing the triangulation..." << std::endl; CGAL::refine_Delaunay_mesh_2(cdt, Criteria()); std::cout << "Number of vertices: " << cdt.number_of_vertices() <<std::endl; vector<Vector2d> vertex_list; map<Vector2d, size_t> vertex_map; for(CDT::Vertex_iterator vit = cdt.vertices_begin(); vit!= cdt.vertices_end(); ++vit) { vertex_map.insert(make_pair(Vector2d(vit->point().x(), vit->point().y()), vertex_list.size())); vertex_list.push_back(Vector2d(vit->point().x(), vit->point().y())); } //计算边界的像素差值 vector<int> diff; for(size_t i = 0; i < ROIBoundPointList.size()-1; i++) { //size_t l = (size_t) srcImg.cols * ROIBoundPointList[i].y + ROIBoundPointList[i].x; for(int bi = 0; bi < winBandNum; bi++) { size_t m = (size_t) dstImg.cols * winBandNum * (ROIBoundPointList[i].y + posY)+ winBandNum * (ROIBoundPointList[i].x + posX) + bi; size_t n = (size_t) srcImg.cols * winBandNum * ROIBoundPointList[i].y + winBandNum * ROIBoundPointList[i].x + bi; int d = (int)(dstImg.data[m]) - (int)(srcImg.data[n]); diff.push_back(d); //rMat.data[n] = d; } //clipMap[l] = false; //在多边形边上的点没法计算MVC } cout<<"开始计算 mean-value coordinates..." << endl; vector<Vec3d> tri_mesh_vertex_R(vertex_list.size()); #pragma omp parallel for //开启OpenMP并行加速 for (int vi = 0; vi < vertex_list.size(); ++vi) { //逐点计算MVC vector<double> alphaAngle(ROIBoundPointList.size()); for(size_t pi = 1; pi < ROIBoundPointList.size(); pi++) { alphaAngle[pi] = threePointCalAngle(ROIBoundPointList[pi-1], vertex_list[vi], ROIBoundPointList[pi]); } alphaAngle[0] = alphaAngle[ROIBoundPointList.size()-1]; vector<double> MVC(ROIBoundPointList.size()-1, 0); for(size_t pi = 1; pi < ROIBoundPointList.size(); pi++) { double w_a = tan(alphaAngle[pi-1]/2) + tan(alphaAngle[pi]/2); double w_b = (ROIBoundPointList[pi-1] - vertex_list[vi]).Mod(); MVC[pi-1] = w_a / w_b; if(_isnan(MVC[pi-1])==1) { MVC[pi-1] = 0; } } double sum = 0; for(size_t pi = 0; pi < MVC.size(); pi++) { sum = sum + MVC[pi]; } for(size_t pi = 0; pi < MVC.size(); pi++) { MVC[pi] = MVC[pi] / sum; } Vec3d r(0.0,0.0,0.0); for(size_t pi = 0; pi < MVC.size(); pi++) { for(int bi = 0; bi < winBandNum; bi++) { r[bi] = r[bi] + MVC[pi] * diff[pi * winBandNum + bi]; } } tri_mesh_vertex_R[vi] = r; } cout<<"计算完成!" << endl; //遍历每一个三角面 vector<vector<size_t>> face_vertex_index; CDT::Face_iterator fit; for (fit = cdt.faces_begin(); fit!= cdt.faces_end(); ++fit) { vector<size_t> index(3); for(int i = 0; i<3; i++) { auto iter = vertex_map.find(Vector2d(fit->vertex(i)->point().x(), fit->vertex(i)->point().y())); if(iter == vertex_map.end()) { continue; } index[i] = iter->second; } face_vertex_index.push_back(index); } size_t srcImgBufNum = static_cast<size_t>(srcImg.cols) * static_cast<size_t>(srcImg.rows); vector<size_t> clipMap(srcImgBufNum, 0); //标识范围内的点: 0标识初始不能写入,1以上标识在那个三角形 #pragma omp parallel for //开启OpenMP并行加速 for(int fi = 0; fi < face_vertex_index.size(); fi++) { Vector2d v0 = vertex_list[face_vertex_index[fi][0]]; Vector2d v1 = vertex_list[face_vertex_index[fi][1]]; Vector2d v2 = vertex_list[face_vertex_index[fi][2]]; double minX = std::min(std::min(v0.x, v1.x), v2.x); double minY = std::min(std::min(v0.y, v1.y), v2.y); double maxX = std::max(std::max(v0.x, v1.x), v2.x); double maxY = std::max(std::max(v0.y, v1.y), v2.y); int sX = std::max(int(floor(minX)), 0); int sY = std::max(int(floor(minY)), 0); int eX = std::max(int(ceil(maxX)), srcImg.cols - 1); int eY = std::max(int(ceil(maxY)), srcImg.rows - 1); for(int yi = sY; yi <= eY; yi++) { for(int xi = sX; xi <= eX; xi++) { if(PointinTriangle(Vector3d(v0), Vector3d(v1), Vector3d(v2), Vector3d(xi, yi, 0))) { size_t m = static_cast<size_t>(srcImg.cols) * static_cast<size_t>(yi) + xi; clipMap[m] = fi+1; } } } } cout<<"开始插值计算..." << endl; //Mat result(srcImg.rows, srcImg.cols, CV_8UC1); #pragma omp parallel for for (int ri = 0; ri < srcImg.rows; ++ri) { for (int ci = 0; ci < srcImg.cols; ++ci) { size_t l = (size_t) srcImg.cols * ri + ci; if(clipMap[l] == 0) { continue; } if(!Point_In_Polygon_2D(ci, ri, ROIBoundPointList)) { continue; } size_t fi = clipMap[l]-1; size_t index0 = face_vertex_index[fi][0]; size_t index1 = face_vertex_index[fi][1]; size_t index2 = face_vertex_index[fi][2]; vector<double> r(winBandNum, 0); for(int bi = 0; bi < winBandNum; bi++) { Vector3d p0(vertex_list[index0].x, vertex_list[index0].y, tri_mesh_vertex_R[index0][bi]); Vector3d p1(vertex_list[index1].x, vertex_list[index1].y, tri_mesh_vertex_R[index1][bi]); Vector3d p2(vertex_list[index2].x, vertex_list[index2].y, tri_mesh_vertex_R[index2][bi]); Vector3d vp(ci, ri, 0); CalPlanePointZ(p0, p1, p2, vp); r[bi] = vp.z; } for(int bi = 0; bi < winBandNum; bi++) { size_t n = (size_t) srcImg.cols * winBandNum * ri + winBandNum * ci + bi; size_t m = (size_t) dstImg.cols * winBandNum * (ri + posY)+ winBandNum * (ci + posX) + bi; dstImg.data[m] = min(max(srcImg.data[n] + r[bi], 0.0), 255.0); } } } //imwrite("D:/result.tif", result); cout<<"插值完成!" << endl; QTime stopTime = QTime::currentTime(); int elapsed = startTime.msecsTo(stopTime); cout<<"总结完成用时"<<elapsed<<"毫秒"; }
主要思路还是通过ROI多边形栅格建立三角网,计算网格点的MVC,继而计算融合修正值;而其他点的融合修正值则通过所在三角形顶点的融合修正值插值得到。
注意这里麻烦的地方是还得计算每个点是在那个三角形内,我这里是采取索引数组的办法。如果直接采取遍历每个点与每个三角形的办法,那么时间复杂度可能会超过计算MVC的复杂度。而插值的算法可以参考这篇文章《已知空间三点组成的面求该面上某点的Z值》。
2.3. 第二种优化
在《Coordinates for Instant Image Cloning》这篇论文中,还介绍了第二种优化算法。算法思想是除了减少ROI内的点,还可以减少ROI边界上的点:每个点的MVC值其实可以不用到边界上所有的点,可以通过一种规则算法来指定需要的点。可惜这个规则算法我也没看明白,有机会再进一步研究。
3. 结果
融合的源影像:
融合的目标影像:
融合的结果:
运行的时间:
这里可以看到,优化后的融合效率为501毫秒,而优化之前的效率为1秒,效率提升了50%。
来源:https://www.cnblogs.com/charlee44/p/12528218.html