介绍
将稠密光流场按照孟塞尔颜色体系(Munsell Color System)转化为RBG图,这个其实没什么卵用,只不过是开发光流估计算法或者光流应用时提供一种结果可视化的途径。
实现过程时参照python版本的flownet2提供的代码。
需要注意的是不要用unsigned char 类型的变量存储数值变量,我用一天的时间才把这个bug修复
flo2img_tmp.hpp
#ifndef FLO2IMGTMP_HPP_
#define FLO2IMGTMP_HPP_
#ifdef USE_OPENCV
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#endif // USE_OPENCV
//typedef unsigned short UINT;
class Flo2Img {
public:
Flo2Img();
private:
unsigned short colorwheel[55][3];
unsigned short ncols;
};
void flo2img(cv::Mat& flo, cv::Mat &img);
#endif
flo2img_tmp.cpp
#ifdef USE_OPENCV
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#endif // USE_OPENCV
#include <flo2img_tmp.hpp>
#include <iostream>
#include "math.h"
#define UNKNOWN_THRESH 1e5
#define EPS 1e-10
#define pi 3.1415926
void flo2img(cv::Mat& flo, cv::Mat& img)
{
unsigned char ncols;
unsigned short RY = 15;
unsigned short YG = 6;
unsigned short GC = 4;
unsigned short CB = 11;
unsigned short BM = 13;
unsigned short MR = 6;
ncols = RY + YG + GC + CB + BM + MR;
float colorwheel[55][3];
unsigned short nchans = 3;
unsigned short col = 0;
//RY
for (int i = 0; i<RY; i++)
{
colorwheel[col + i][0] = 255;
colorwheel[col + i][1] = 255 * i / RY;
colorwheel[col + i][2] = 0;
//std::cout << colorwheel[i][1] << '\n';
}
col += RY;
//YG
for (int i = 0; i<YG; i++)
{
colorwheel[col + i][0] = 255 - 255 * i / YG;
colorwheel[col + i][1] = 255;
colorwheel[col + i][2] = 0;
}
col += YG;
//GC
for (int i = 0; i < GC; i++)
{
colorwheel[col + i][1] = 255;
colorwheel[col + i][2] = 255 * i / GC;
colorwheel[col + i][0] = 0;
}
col += GC;
//CB
for (int i = 0; i < CB; i++)
{
colorwheel[col + i][1] = 255 - 255 * i / CB;
colorwheel[col + i][2] = 255;
colorwheel[col + i][0] = 0;
}
col += CB;
//BM
for (int i = 0; i < BM; i++)
{
colorwheel[col + i][2] = 255;
colorwheel[col + i][0] = 255 * i / BM;
colorwheel[col + i][1] = 0;
}
col += BM;
//MR
for (int i = 0; i < MR; i++)
{
colorwheel[col + i][2] = 255 - 255 * i / MR;
colorwheel[col + i][0] = 255;
colorwheel[col + i][1] = 0;
}
//std::cout << '\n';
//for (int i = 0; i < 90; i++)
//{
// for (int j = 0; j < 3; j++)
// {
// std::cout << colorwheel[i][j] << " | ";
// }
// std::cout << '\n';
//}
int row = flo.rows;
int cols = flo.cols;
float max_norm = 1e-10;
//compute the max norm
for (int i = 0; i < row; i++)
{
for (int j = 0; j < cols; j++)
{
float* data = flo.ptr<float>(i, j);
float u = data[0];
float v = data[1];
float norm = sqrt(u*u + v*v);
if (norm > UNKNOWN_THRESH)
{
data[0] = 0;
data[1] = 0;
}
else if (norm > max_norm)
{
max_norm = norm;
}
}
}
//calculate the rgb value
for (int i = 0; i < row; i++)
{
for (int j = 0; j < cols; j++)
{
float* data = flo.ptr<float>(i, j);
unsigned char* img_data = img.ptr<unsigned char>(i, j);
float u = data[0];
float v = data[1];
float norm = sqrt(u*u + v*v) / max_norm;
float angle = atan2(-v,-u) / pi;
float fk = (angle + 1) / 2 * (float(ncols) - 1);
int k0 = (int)floor(fk);
int k1 = k0 + 1;
if (k1 == ncols) {
k1 = 0;
}
float f = fk - k0;
for (int k = 0; k < 3; k++) {
float col0 = (colorwheel[k0][k] / 255);
float col1 = (colorwheel[k1][k] / 255);
float col3 = (1 - f)*col0 + f*col1;
if (norm <= 1) {
col3 = 1 - norm*(1 - col3);
}
else {
col3 *= 0.75;
}
img_data[k] = (unsigned char)(255 * col3);
}
}
}
}
void test() {
cv::Mat opticalflow = cv::Mat::zeros(cv::Size(100, 100), CV_32FC2);
int nr = opticalflow.rows; // number of rows
int nc = opticalflow.cols * opticalflow.channels(); // total number of elements per line
for (int j = 0; j<nr; j++) {
float* data = opticalflow.ptr<float>(j);
for (int i = 0; i < nc; i += 2) {
data[i] = i / 2 - 50;
data[i + 1] = j - 50;
}
}
cv::Mat florgb(opticalflow.size(), CV_8UC3);
flo2img(opticalflow, florgb);
cv::imshow("rgb", florgb);
cv::waitKey(0);
}
测试
- 按照之前的文章计算光流
or
- 然后再在此基础上显示结果,修改如下
\caffe\include\caffe\util 添加
flo2img.hpp
\caffe\src\caffe\util 添加
flo2img.cpp
\caffe\examples\cpp_classification 修改 classification.cpp
#include <caffe/caffe.hpp>
#ifdef USE_OPENCV
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#endif // USE_OPENCV
#include <algorithm>
#include <iosfwd>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <iostream>
#include <caffe/util/flo2img.hpp>
#ifdef USE_OPENCV
using namespace caffe; // NOLINT(build/namespaces)
//using namespace std;
using std::string;
//A class for optical flow estimation
//定义一个用于光流估计的类
class Estimator {
public:
//Initial funciton
//类初始化函数
Estimator(const string& model_file,
const string& trained_file);
//Estimate function
//光流估计函数 输入为两个Mat图像, 输出为float类型, 通道数为2的Mat对象,里面的数据是光流值
cv::Mat Estimate(const cv::Mat& img0, const cv::Mat& img1);
private:
//用于将输入层的blob封装成Mat类型的向量
void WrapInputLayer(std::vector<cv::Mat>* input_channels, int input_blob);
//用于将输出层的blob封装成Mat类型的向量
void WrapOutputLayer(std::vector<cv::Mat>* output_channels, int output_blob);
//用于对输入图像进行缩放、通道数调整等处理并拷贝到输入层
void Preprocess_input(const cv::Mat& img, std::vector<cv::Mat>* input_channels);
//用于把输出数据拷贝到Mat类型对象中
void Preprocess_output(cv::Mat* img, std::vector<cv::Mat>* output_channels);
private:
//FlowNet2网络对象实例
shared_ptr<Net<float> > net_;
//网络输入层的大小
cv::Size input_geometry_;
//网络输入层的通道数
int num_channels_;
};
//Initial funciton
//类初始化函数
/*
param model_file input the address of the network defined file ->**.prototxt
param trained_file input the address of the pre-trained model file -> **.caffemodel
*/
Estimator::Estimator(const string& model_file,
const string& trained_file) {
#ifdef CPU_ONLY
Caffe::set_mode(Caffe::CPU);
#else
Caffe::set_mode(Caffe::GPU);
#endif
/* Load the network. */
net_.reset(new Net<float>(model_file, TEST));
/* Load the pre-trained parameters. */
net_->CopyTrainedLayersFrom(trained_file);
/* obtain the num_channels_ and input_geometry_ */
Blob<float> * input_layer = net_->input_blobs()[0];
num_channels_ = input_layer->channels();
input_geometry_ = cv::Size(input_layer->width(), input_layer->height());
}
//function for print the data of Mat
//used for observing the result produced by flownet2
void printMat(cv::Mat &image) {
int nr = image.rows; // number of rows
int nc = image.cols * image.channels(); // total number of elements per line
for (int j = 0; j<nr; j+=10) {
float* data = image.ptr<float>(j);
for (int i = 0; i<nc; i+=10) {
std::cout << data[i] << " ";
}
std::cout << '\n';
}
}
cv::Mat Estimator::Estimate(const cv::Mat& img0, const cv::Mat& img1)
{
Blob<float>* input_layer0 = net_->input_blobs()[0];
Blob<float>* input_layer1 = net_->input_blobs()[1];
input_layer0->Reshape(1, num_channels_,
input_geometry_.height, input_geometry_.width);
input_layer1->Reshape(1, num_channels_,
input_geometry_.height, input_geometry_.width);
net_->Reshape();
std::vector<cv::Mat> input_channels0;
std::vector<cv::Mat> input_channels1;
WrapInputLayer(&input_channels0, 0);
WrapInputLayer(&input_channels1, 1);
Preprocess_input(img0, &input_channels0);
Preprocess_input(img1, &input_channels1);
net_->Forward();
std::vector<cv::Mat> output_channels;
WrapOutputLayer(&output_channels, 0);
cv::Mat output;
Preprocess_output(&output, &output_channels);
printMat(output);
Flo2Img flo2img;
cv::Mat florgb(output.size(),CV_8UC3);
std::cout<<'\n'<<florgb.type()<<'\n'<<florgb.size()<<'\n';
flo2img.flo2img(output, florgb);
cv::namedWindow("rgb");
cv::imshow("rgb", florgb);
cv::waitKey(0);
return output;
}
void Estimator::Preprocess_input(const cv::Mat& img,
std::vector<cv::Mat>* input_channels) {
/* Convert the input image to the input image format of the network. */
cv::Mat sample;
if (img.channels() == 3 && num_channels_ == 1)
cv::cvtColor(img, sample, cv::COLOR_BGR2GRAY);
else if (img.channels() == 4 && num_channels_ == 1)
cv::cvtColor(img, sample, cv::COLOR_BGRA2GRAY);
else if (img.channels() == 4 && num_channels_ == 3)
cv::cvtColor(img, sample, cv::COLOR_BGRA2BGR);
else if (img.channels() == 1 && num_channels_ == 3)
cv::cvtColor(img, sample, cv::COLOR_GRAY2BGR);
else
sample = img;
cv::Mat sample_resized;
if (sample.size() != input_geometry_)
cv::resize(sample, sample_resized, input_geometry_);
else
sample_resized = sample;
cv::Mat sample_float;
if (num_channels_ == 3)
sample_resized.convertTo(sample_float, CV_32FC3);
else
sample_resized.convertTo(sample_float, CV_32FC1);
/*cv::Mat sample_normalized;
cv::subtract(sample_float, mean_, sample_normalized);*/
/* This operation will write the separate BGR planes directly to the
* input layer of the network because it is wrapped by the cv::Mat
* objects in input_channels. */
cv::split(sample_float, *input_channels);
/*CHECK(reinterpret_cast<float*>(input_channels->at(0).data)
== net_->input_blobs()[0]->cpu_data())
<< "Input channels are not wrapping the input layer of the network.";*/
}
void Estimator::Preprocess_output(cv::Mat* img,
std::vector<cv::Mat>* output_channels) {
//cv::Mat result;
cv::merge(*output_channels, *img);
/*CHECK(reinterpret_cast<float*>(output_channels->at(0).data)
== net_->output_blobs()[0]->cpu_data())
<< "output channels are not wrapping the input layer of the network.";*/
}
void Estimator::WrapInputLayer(std::vector<cv::Mat>* input_channels, int input_blob = 0) {
Blob<float>* input_layer = net_->input_blobs()[input_blob];
int width = input_layer->width();
int height = input_layer->height();
float* input_data = input_layer->mutable_cpu_data();
for (int i = 0; i < input_layer->channels(); ++i) {
cv::Mat channel(height, width, CV_32FC1, input_data);
input_channels->push_back(channel);
input_data += width * height;
}
}
void Estimator::WrapOutputLayer(std::vector<cv::Mat>* output_channels, int output_blob = 0) {
Blob<float>* output_layer = net_->output_blobs()[output_blob];
int width = output_layer->width();
int height = output_layer->height();
float* output_data = output_layer->mutable_cpu_data();
for (int i = 0; i < output_layer->channels(); ++i) {
cv::Mat channel(height, width, CV_32FC1, output_data);
output_channels->push_back(channel);
output_data += width * height;
}
}
int main(int argc, char** argv) {
::google::InitGoogleLogging(argv[0]);
/*string model_file = argv[1];
string trained_file = argv[2];
string mean_file = argv[3];
string label_file = argv[4];*/
string model_file = "D:\\nettest\\flownet2.prototxt";
string trained_file = "D:\\nettest\\flownet2.caffemodel";
//string model_file = "D:\\nettest\\liteflownet.prototxt";
//string trained_file = "D:\\nettest\\liteflownet.caffemodel";
Estimator Estimator(model_file, trained_file);
string file1 = "D:\\nettest\\0005.png";
string file2 = "D:\\nettest\\0006.png";
cv::Mat img1 = cv::imread(file1, -1);
cv::Mat img2 = cv::imread(file2, -1);
CHECK(!img1.empty()) << "Unable to decode image " << file1;
CHECK(!img2.empty()) << "Unable to decode image " << file2;
cv::Mat opticalflow = Estimator.Estimate(img1, img2);
}
#else
int main(int argc, char** argv) {
LOG(FATAL) << "This example requires OpenCV; compile with USE_OPENCV.";
}
#endif // USE_OPENCV
来源:CSDN
作者:梦之泪殇
链接:https://blog.csdn.net/qq_25379821/article/details/81233767