by shicai yang(@星空下的巫師)on 2015/08/06
#include "caffe/caffe.hpp"
#include #include using namespace caffe;
char *proto = "h:\\models\\caffe\\deploy.prototxt"; /* 載入caffenet的配置 */
phase phase = test; /* or train */
caffe::set_mode(caffe::cpu);
// caffe::set_mode(caffe::gpu);
// caffe::setdevice(0);
//! note: 後文所有提到的net,都是這個net
boost::shared_ptr< net> net(new caffe::net(proto, phase));
char *model = "h:\\models\\caffe\\bvlc_reference_caffenet.caffemodel";
net->copytrainedlayersfrom(model);
char *mean_file = "h:\\models\\caffe\\imagenet_mean.binaryproto";
blobimage_mean;
blobproto blob_proto;
const float *mean_ptr;
unsigned int num_pixel;
bool succeed = readprotofrombinaryfile(mean_file, &blob_proto);
if (succeed)
//! note: data_ptr指向已經處理好(去均值的,符合網路輸入影象的長寬和batch size)的資料
void caffe_forward(boost::shared_ptr< net> & net, float *data_ptr)
net->forwardprefilled();
}
//! note: net的blob是指,每個層的輸出資料,即feature maps
// char *query_blob_name = "conv1";
unsigned int get_blob_index(boost::shared_ptr< net> & net, char *query_blob_name)
}log(fatal) << "unknown blob name: " << str_query;
}
//! note: 根據caffenet的deploy.prototxt檔案,該net共有15個blob,從data一直到prob
char *query_blob_name = "conv1"; /* data, conv1, pool1, norm1, fc6, prob, etc */
unsigned int blob_id = get_blob_index(net, query_blob_name);
boost::shared_ptr> blob = net->blobs()[blob_id];
unsigned int num_data = blob->count(); /* nchw=10x96x55x55 */
const float *blob_ptr = (const float *) blob->cpu_data();
//! note: layer包括神經網路所有層,比如,caffenet共有23層
// char *query_layer_name = "conv1";
unsigned int get_layer_index(boost::shared_ptr< net> & net, char *query_layer_name)
}log(fatal) << "unknown layer name: " << str_query;
}
//! note: 不同於net的blob是feature maps,layer的blob是指conv和fc等層的weight和bias
char *query_layer_name = "conv1";
const float *weight_ptr, *bias_ptr;
unsigned int layer_id = get_layer_index(net, query_layer_name);
boost::shared_ptr> layer = net->layers()[layer_id];
std::vector>> blobs = layer->blobs();
if (blobs.size() > 0)
//! note: 訓練模式下,讀取指定layer的梯度資料,與此相似,唯一的區別是將cpu_data改為cpu_diff
const float* data_ptr; /* 指向待寫入資料的指標, 源資料指標*/
float* weight_ptr = null; /* 指向網路中某層權重的指標,目標資料指標*/
unsigned int data_size; /* 待寫入的資料量 */
char *layer_name = "conv1"; /* 需要修改的layer名字 */
unsigned int layer_id = get_layer_index(net, query_layer_name);
boost::shared_ptr> blob = net->layers()[layer_id]->blobs()[0];
check(data_size == blob->count());
switch (caffe::mode())
caffe_copy(blob->count(), data_ptr, weight_ptr);
//! note: 訓練模式下,手動修改指定layer的梯度資料,與此相似
// mutable_cpu_data改為mutable_cpu_diff,mutable_gpu_data改為mutable_gpu_diff
char* weights_file = "bvlc_reference_caffenet_new.caffemodel";
netparameter net_param;
net->toproto(&net_param, false);
writeprototobinaryfile(net_param, weights_file);
Caffe使用教程
caffe官網 1 include caffe caffe.hpp 2 include 3 include 4 using namespace caffe 56 char proto h models caffe deploy.prototxt 載入caffenet的配置 7 phase phase...
caffe教程翻譯 Loss
與絕大多數的機器學習引擎一樣,caffe是由乙個lossfunction 損失函式 驅動的。loss function也稱為 error function,cost function或者objective function。loss function利用模型中的引數 比如模型中網路的weights ...
Caffe簡明教程1 Caffe簡介
您可以檢視所有文章的索引 caffe簡明教程0 文章列表 caffe是乙個很常用的深度學習框架,官網 在我個人的經歷中,經常遇到 作者在學術 中使用caffe來作為實驗框架。目前,caffe由伯克利ai研究所 bair 以及開源社群成員進行開發和維護。其原始作者是賈揚清。另外,當選擇乙個框架時,我們...