用c++怎麼去調用你訓練好的caffe模型啊?我一點頭緒都沒有,求大神指教
剛好寫過一個Caffe教程,你可以看看:shicai/Caffe_Manual · GitHub
-----------------------------------------20161229----------------------------
先別著急上c++代碼if 懂python 看classification.ipynbBVLC/caffe
if 懂matlab 看classification_demo.mBVLC/caffe看懂其中任何一個之後,再看cpp_classificationBVLC/caffe,之後會發現這三個教程的思路是完全一樣一樣的。
簡單說一下,使用caffe基本上可以分為三步(以python為例)1. 載入網路net = caffe.Net(model_def, model_weights, caffe.TEST)
net.blobs["data"].data[...] = transformed_image#c++ 就是用了caffe_copy,本質是memcpy
這裡『data" 要和網路的第零層的name一致
3. 前向,取出結果output = net.forward()
output_prob = output["prob"][0]
這裡的』prob" 要和網路的最後一層的name一致。
這時output_prob(值的範圍是[0, 1])就是結果矩陣, 取出top_k即可。----------------------------------------詳細用法-----------------------------
caffe 提供了三個介面python,matlab,c++。前兩個更多是演算法驗證的時候用到,真正的把caffe應用到工程上必然需要使用c++介面,即使用從c++代碼的層次去調用caffe。其實caffe裡邊提供了一個classification的demo(caffe/examples/cpp_classification/classification.cpp),如果把這部分代碼看懂了,直接使用c++介面就沒有什麼難度了。
提供一種研究caffe代碼的方法,下載windows版本caffe,完成vs 上的調試和代碼研讀,很方面的。提供一個針對我自己業務在classification.cpp基礎上修改的代碼。CNNClassifier.h// CNNClassifier
// use caffe c++ interface to complete the task that recognise the cloud.
// author :visint::zhangzhen
// date : 2016/9/26
#pragma once
#include &
#include &
#include &
#include &
#include &
#include &
#include &
#include "params.hpp" //參數類,所有的參數都保存在這個類中,單例模式
using namespace caffe;
class CNNClassifier
{
public:
CNNClassifier(const parameter params);
~CNNClassifier();
void put(int index, const cv::Mat src); CNNClassifier.cpp using namespace std; //CNNClassifier::CNNClassifier(const std::string model_file, CNNClassifier::CNNClassifier(const parameter params): params_(params){ CNNClassifier::~CNNClassifier(){ void CNNClassifier::setMean(const std::string mean_file) { cv::Mat wrapimg; if (num_channels_ == 3){ //int width = input_layer-&>width(); vector& for (size_t i = 0; i &< wrapimginchannel.size(); ++i){
caffe::caffe_copy(input_layer-&>count(2), wrapimginchannel[i].ptr& } //LOG(INFO) &<&< "predict start :" &<&< start &<&< " end :" &<&< end;
predict(tmplabels, tmpconfidences);
labels.insert(labels.end(), tmplabels.begin(), tmplabels.end());
}
}
void CNNClassifier::pridict(const std::vector& //LOG(INFO) &<&< "predict start :" &<&< start &<&< " end :" &<&< end;
predict(tmplabels, tmpconfidences);
labels.insert(labels.end(), tmplabels.begin(), tmplabels.end());
}
}
void CNNClassifier::predict(vector& std::vector& std::vector&
void reshape(int n);
void pridict(const std::vector&
void pridict(const std::vector&
private:
void initCNN();
std::vector&
void setMean(const std::string mean_file);
void predict(std::vector&
private:
boost::shared_ptr&
cv::Mat mean_;
int num_channels_;
bool hasmean_;
cv::Size input_geometry_;
int stdHeight_;
int stdWidth_;
const parameter params_;
};
#include "CNNClassifier.h"
using namespace cv;
// const std::string trained_file,
// const std::string mean_file = "") : BATCHNUM(10){
//
// LOG(INFO) &<&< "init caffe model";
//
//#ifdef CPU_ONLY
// LOG(INFO) &<&< "CPU mode";
// Caffe::set_mode(Caffe::CPU);
//#else
// LOG(INFO) &<&< "GPU mode";
// Caffe::set_mode(Caffe::GPU);
//#endif
// net_.reset(new Net&
// net_-&>CopyTrainedLayersFrom(trained_file);
//
//
// Blob&
// num_channels_ = input_layer-&>channels();
// stdHeight_ = input_layer-&>height();
// stdWidth_ = input_layer-&>width();
//
// input_geometry_ = cv::Size(input_layer-&>width(), input_layer-&>height());
//
//
// hasmean_ = false;
// setMean(mean_file);
//}
initCNN();
}
void CNNClassifier::initCNN(){
LOG(INFO) &<&< "init cnn";
#ifdef CPU_ONLY
LOG(INFO) &<&< "CPU mode";
Caffe::set_mode(Caffe::CPU);
#else
LOG(INFO) &<&< "GPU mode";
Caffe::set_mode(Caffe::GPU);
#endif
net_.reset(new Net&
net_-&>CopyTrainedLayersFrom(params_.trained_file_);
Blob&
num_channels_ = input_layer-&>channels();
stdHeight_ = input_layer-&>height();
stdWidth_ = input_layer-&>width();
input_geometry_ = cv::Size(input_layer-&>width(), input_layer-&>height());
hasmean_ = false;
setMean(params_.mean_file_);
}
}
if (mean_file != ""){
LOG(INFO) &<&< "set Mean";
BlobProto blob_proto;
ReadProtoFromBinaryFileOrDie(mean_file.c_str(), blob_proto);
/* Convert from BlobProto to Blob&
Blob&
mean_blob.FromProto(blob_proto);
CHECK_EQ(mean_blob.channels(), num_channels_)
&<&< "Number of channels of mean file doesn"t match input layer.";
/* The format of the mean file is planar 32-bit float BGR or grayscale. */
std::vector&
float* data = mean_blob.mutable_cpu_data();
for (int i = 0; i &< num_channels_; ++i) {
/* Extract an individual channel. */
cv::Mat channel(mean_blob.height(), mean_blob.width(), CV_32FC1, data);
channels.push_back(channel);
data += mean_blob.height() * mean_blob.width();
}
/* Merge the separate channels into a single image. */
cv::Mat mean;
cv::merge(channels, mean);
/* Compute the global mean pixel value and create a mean image
* filled with this value. */
//cv::Scalar channel_mean = cv::mean(mean);
//mean_ = cv::Mat(input_geometry_, mean.type(), channel_mean);
cv::resize(mean, mean_, input_geometry_, 0.0, 0.0, INTER_CUBIC);
hasmean_ = true;
}
}
void CNNClassifier::put(int index, const cv::Mat src){
CHECK_EQ(src.channels(), num_channels_);
Blob&
if (hasmean_){
src.convertTo(wrapimg, CV_32FC3);
}
else{
src.convertTo(wrapimg, CV_32FC1);
}
wrapimg = wrapimg - mean_;
}
else{
if (num_channels_ == 3){
src.convertTo(wrapimg, CV_32FC3);
}
else{
src.convertTo(wrapimg, CV_32FC1);
}
wrapimg /= 255.0;
}
//int height = input_layer-&>height();
float* input_data = input_layer-&>mutable_cpu_data();
cv::split(wrapimg, wrapimginchannel);
int offset = input_layer-&>offset(index);
}
inline void CNNClassifier::reshape(int n){
Blob&
if (input-&>num() != n){
LOG(INFO) &<&< "reshape :" &<&< n;
input-&>Reshape(n, num_channels_, input-&>height(), input-&>width());
net_-&>Reshape();
}
}
void CNNClassifier::pridict(const std::vector&
const int BATCHNUM = params_.batchnum_;
size_t N = imgpath.size();
size_t cnt = (int)ceil(1.0 * N / BATCHNUM);
for (size_t i = 0; i &< cnt; ++i){
size_t start = i * BATCHNUM;
size_t end = std::min(N, (i + 1) * BATCHNUM);
reshape(end - start);
for (size_t j = start; j &< end; ++j){
cv::Mat src = cv::imread(imgpath[j]);
cv::Mat roiCloud = src(cv::Rect(params_.center_.first - params_.radius_, params_.center_.second - params_.radius_, params_.radius_ * 2, params_.radius_*2));
cv::Mat stdRoiCloud;
cv::resize(roiCloud, stdRoiCloud, input_geometry_);
put(j % BATCHNUM, stdRoiCloud);
}
vector&
vector&
const int BATCHNUM = params_.batchnum_;
size_t N = imgs.size();
size_t cnt = (int)ceil(1.0 * N / BATCHNUM);
for (size_t i = 0; i &< cnt; ++i){
size_t start = i * BATCHNUM;
size_t end = std::min(N, (i + 1) * BATCHNUM);
reshape(end - start);
for (size_t j = start; j &< end; ++j){
cv::Mat src = imgs[j];
cv::Mat roiCloud = src(cv::Rect(params_.center_.first - params_.radius_, params_.center_.second - params_.radius_, params_.radius_ * 2, params_.radius_ * 2));
// 掩碼
//std::vector&
//cv::split(roiCloud, roiCloudChannels);
//for (size_t i = 0; i &< roiCloudChannels.size(); ++i){
// //roiCloudChannels[i] = roiCloudChannels[i] * params_.mask_;
// cv::multiply(roiCloudChannels[i], params_.mask_, roiCloudChannels[i]);
//}
cv::Mat stdRoiCloud;
//cv::merge(roiCloudChannels, stdRoiCloud);
//cv::resize(stdRoiCloud, stdRoiCloud, input_geometry_);
cv::resize(roiCloud, stdRoiCloud, input_geometry_);
put(j % BATCHNUM, stdRoiCloud);
}
vector&
vector&
std::vector&
Blob&
int label;
float confidence;
for (int i = 0; i &< output_layer-&>shape(0); i++) {
int start = i* output_layer-&>channels();
const float* begin = output_layer-&>cpu_data() + start;
const float* end = begin + output_layer-&>channels();
vector&
const int topN = 1;
vector&
label = label_result[0];
confidence = confidence_result[label];
confidences.emplace_back(confidence);
labels.emplace_back(label);
}
}
std::vector&
for (size_t i = 0; i &< v.size(); ++i)
pairs.push_back(std::make_pair(v[i], static_cast&
std::partial_sort(pairs.begin(), pairs.begin() + N, pairs.end(), [](std::pair&
其實都是一個方式:linux下,寫一個.sh文件
前段時間用過一點caffe,用手機憑記憶瞎說兩句,如有錯誤請指正。大概流程是,首先創建一個Net對象,然後載入模型和參數,然後從Net裡面抽出輸入層和輸出層的layer的指針,然後把數據複製到輸入層的blob裡面,Net-&>forwardprefilled(),最後從輸出層的blob里取出數據就好了。詳情請閱讀caffe.cpp詳情請閱讀caffe.cpp 詳情請閱讀caffe.cpp
caffe 的example裡面有個實例的~
caffe_root/cpp_classification/cpp_classification.cpp就是一個實例啊
推薦閱讀:
※Mac需要装cuda吗?
※cuDNN怎麼無法下載呢?
※打算用caffe做人臉識別,用的是vgg,如何使用model?
※有沒有雲端的深度學習計算服務?
TAG:Caffe深度學習框架 |