#include // NOLINT(build/include_alpha)
// Produce deprecation warnings (needs to come before arrayobject.h inclusion).
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include
#include
#include
#include
#include
// these need to be included after boost on OS X
#include // NOLINT(build/include_order)
#include // NOLINT(build/include_order)
#include // NOLINT
#include "caffe/caffe.hpp"
#include "caffe/python_layer.hpp"
// Temporary solution for numpy < 1.7 versions: old macro, no promises.
// You're strongly advised to upgrade to >= 1.7.
#ifndef NPY_ARRAY_C_CONTIGUOUS
#define NPY_ARRAY_C_CONTIGUOUS NPY_C_CONTIGUOUS
#define PyArray_SetBaseObject(arr, x) (PyArray_BASE(arr) = (x))
#endif
namespace bp = boost::python;
namespace caffe {
// For Python, for now, we'll just always use float as the type.
typedef float Dtype;
const int NPY_DTYPE = NPY_FLOAT32;
// Selecting mode.
void set_mode_cpu() { Caffe::set_mode(Caffe::CPU); }
void set_mode_gpu() { Caffe::set_mode(Caffe::GPU); }
// For convenience, check that input files can be opened, and raise an
// exception that boost will send to Python if not (caffe could still crash
// later if the input files are disturbed before they are actually used, but
// this saves frustration in most cases).
static void CheckFile(const string& filename) {
std::ifstream f(filename.c_str());
if (!f.good()) {
f.close();
throw std::runtime_error("Could not open file " + filename);
}
f.close();
}
void CheckContiguousArray(PyArrayObject* arr, string name,
int channels, int height, int width) {
if (!(PyArray_FLAGS(arr) & NPY_ARRAY_C_CONTIGUOUS)) {
throw std::runtime_error(name + " must be C contiguous");
}
if (PyArray_NDIM(arr) != 4) {
throw std::runtime_error(name + " must be 4-d");
}
if (PyArray_TYPE(arr) != NPY_FLOAT32) {
throw std::runtime_error(name + " must be float32");
}
if (PyArray_DIMS(arr)[1] != channels) {
throw std::runtime_error(name + " has wrong number of channels");
}
if (PyArray_DIMS(arr)[2] != height) {
throw std::runtime_error(name + " has wrong height");
}
if (PyArray_DIMS(arr)[3] != width) {
throw std::runtime_error(name + " has wrong width");
}
}
// Net constructor for passing phase as int
shared_ptr > Net_Init(
string param_file, int phase) {
CheckFile(param_file);
shared_ptr > net(new Net(param_file,
static_cast(phase)));
return net;
}
// Net construct-and-load convenience constructor
shared_ptr > Net_Init_Load(
string param_file, string pretrained_param_file, int phase) {
CheckFile(param_file);
CheckFile(pretrained_param_file);
shared_ptr > net(new Net(param_file,
static_cast(phase)));
net->CopyTrainedLayersFrom(pretrained_param_file);
return net;
}
void Net_Save(const Net& net, string filename) {
NetParameter net_param;
net.ToProto(&net_param, false);
WriteProtoToBinaryFile(net_param, filename.c_str());
}
void Net_SetInputArrays(Net* net, bp::object data_obj,
bp::object labels_obj) {
// check that this network has an input MemoryDataLayer
shared_ptr > md_layer =
boost::dynamic_pointer_cast >(net->layers()[0]);
if (!md_layer) {
throw std::runtime_error("set_input_arrays may only be called if the"
" first layer is a MemoryDataLayer");
}
// check that we were passed appropriately-sized contiguous memory
PyArrayObject* data_arr =
reinterpret_cast(data_obj.ptr());
PyArrayObject* labels_arr =
reinterpret_cast(labels_obj.ptr());
CheckContiguousArray(data_arr, "data array", md_layer->channels(),
md_layer->height(), md_layer->width());
CheckContiguousArray(labels_arr, "labels array", 1, 1, 1);
if (PyArray_DIMS(data_arr)[0] != PyArray_DIMS(labels_arr)[0]) {
throw std::runtime_error("data and labels must have the same first"
" dimension");
}
if (PyArray_DIMS(data_arr)[0] % md_layer->batch_size() != 0) {
throw std::runtime_error("first dimensions of input arrays must be a"
" multiple of batch size");
}
md_layer->Reset(static_cast(PyArray_DATA(data_arr)),
static_cast(PyArray_DATA(labels_arr)),
PyArray_DIMS(data_arr)[0]);
}
Solver* GetSolverFromFile(const string& filename) {
SolverParameter param;
ReadProtoFromTextFileOrDie(filename, ¶m);
return GetSolver(param);
}
struct NdarrayConverterGenerator {
template struct apply;
};
template <>
struct NdarrayConverterGenerator::apply {
struct type {
PyObject* operator() (Dtype* data) const {
// Just store the data pointer, and add the shape information in postcall.
return PyArray_SimpleNewFromData(0, NULL, NPY_DTYPE, data);
}
const PyTypeObject* get_pytype() {
return &PyArray_Type;
}
};
};
struct NdarrayCallPolicies : public bp::default_call_policies {
typedef NdarrayConverterGenerator result_converter;
PyObject* postcall(PyObject* pyargs, PyObject* result) {
bp::object pyblob = bp::extract<:tuple>(pyargs)()[0];
shared_ptr > blob =
bp::extract > >(pyblob);
// Free the temporary pointer-holding array, and construct a new one with
// the shape information from the blob.
void* data = PyArray_DATA(reinterpret_cast(result));
Py_DECREF(result);
const int num_axes = blob->num_axes();
vector dims(blob->shape().begin(), blob->shape().end());
PyObject *arr_obj = PyArray_SimpleNewFromData(num_axes, dims.data(),
NPY_FLOAT32, data);
// SetBaseObject steals a ref, so we need to INCREF.
Py_INCREF(pyblob.ptr());
PyArray_SetBaseObject(reinterpret_cast(arr_obj),
pyblob.ptr());
return arr_obj;
}
};
bp::object Blob_Reshape(bp::tuple args, bp::dict kwargs) {
if (bp::len(kwargs) > 0) {
throw std::runtime_error("Blob.reshape takes no kwargs");
}
Blob* self = bp::extract*>(args[0]);
vector shape(bp::len(args) - 1);
for (int i = 1; i < bp::len(args); ++i) {
shape[i - 1] = bp::extract(args[i]);
}
self->Reshape(shape);
// We need to explicitly return None to use bp::raw_function.
return bp::object();
}
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(SolveOverloads, Solve, 0, 1);
BOOST_PYTHON_MODULE(_caffe) {
// below, we prepend an underscore to methods that will be replaced
// in Python
// Caffe utility functions
bp::def("set_mode_cpu", &set_mode_cpu);
bp::def("set_mode_gpu", &set_mode_gpu);
bp::def("set_device", &Caffe::SetDevice);
bp::def("layer_type_list", &LayerRegistry::LayerTypeList);
bp::class_, shared_ptr >, boost::noncopyable >("Net",
bp::no_init)
.def("__init__", bp::make_constructor(&Net_Init))
.def("__init__", bp::make_constructor(&Net_Init_Load))
.def("_forward", &Net::ForwardFromTo)
.def("_backward", &Net::BackwardFromTo)
.def("reshape", &Net::Reshape)
// The cast is to select a particular overload.
.def("copy_from", static_cast::*)(const string)>(
&Net::CopyTrainedLayersFrom))
.def("share_with", &Net::ShareTrainedLayersWith)
.add_property("_blob_loss_weights", bp::make_function(
&Net::blob_loss_weights, bp::return_internal_reference<>()))
.add_property("_blobs", bp::make_function(&Net::blobs,
bp::return_internal_reference<>()))
.add_property("layers", bp::make_function(&Net::layers,
bp::return_internal_reference<>()))
.add_property("_blob_names", bp::make_function(&Net::blob_names,
bp::return_value_policy<:copy_const_reference>()))
.add_property("_layer_names", bp::make_function(&Net::layer_names,
bp::return_value_policy<:copy_const_reference>()))
.add_property("_inputs", bp::make_function(&Net::input_blob_indices,
bp::return_value_policy<:copy_const_reference>()))
.add_property("_outputs",
bp::make_function(&Net::output_blob_indices,
bp::return_value_policy<:copy_const_reference>()))
.def("_set_input_arrays", &Net_SetInputArrays,
bp::with_custodian_and_ward<1, 2, bp::with_custodian_and_ward<1, 3> >())
.def("save", &Net_Save);
bp::class_, shared_ptr >, boost::noncopyable>(
"Blob", bp::no_init)
.add_property("shape",
bp::make_function(
static_cast& (Blob::*)() const>(
&Blob::shape),
bp::return_value_policy<:copy_const_reference>()))
.add_property("num", &Blob::num)
.add_property("channels", &Blob::channels)
.add_property("height", &Blob::height)
.add_property("width", &Blob::width)
.add_property("count", static_cast::*)() const>(
&Blob::count))
.def("reshape", bp::raw_function(&Blob_Reshape))
.add_property("data", bp::make_function(&Blob::mutable_cpu_data,
NdarrayCallPolicies()))
.add_property("diff", bp::make_function(&Blob::mutable_cpu_diff,
NdarrayCallPolicies()));
bp::class_, shared_ptr >,
boost::noncopyable>("Layer", bp::init())
.add_property("blobs", bp::make_function(&Layer::blobs,
bp::return_internal_reference<>()))
.def("setup", &Layer::LayerSetUp)
.def("reshape", &Layer::Reshape)
.add_property("type", bp::make_function(&Layer::type));
bp::register_ptr_to_python > >();
bp::class_("LayerParameter", bp::no_init);
bp::class_, shared_ptr >, boost::noncopyable>(
"Solver", bp::no_init)
.add_property("net", &Solver::net)
.add_property("test_nets", bp::make_function(&Solver::test_nets,
bp::return_internal_reference<>()))
.add_property("iter", &Solver::iter)
.def("solve", static_cast::*)(const char*)>(
&Solver::Solve), SolveOverloads())
.def("step", &Solver::Step)
.def("restore", &Solver::Restore);
bp::class_, bp::bases >,
shared_ptr >, boost::noncopyable>(
"SGDSolver", bp::init());
bp::class_, bp::bases >,
shared_ptr >, boost::noncopyable>(
"NesterovSolver", bp::init());
bp::class_, bp::bases >,
shared_ptr >, boost::noncopyable>(
"AdaGradSolver", bp::init());
bp::def("get_solver", &GetSolverFromFile,
bp::return_value_policy<:manage_new_object>());
// vector wrappers for all the vector types we use
bp::class_ > > >("BlobVec")
.def(bp::vector_indexing_suite > >, true>());
bp::class_*> >("RawBlobVec")
.def(bp::vector_indexing_suite*>, true>());
bp::class_ > > >("LayerVec")
.def(bp::vector_indexing_suite > >, true>());
bp::class_ >("StringVec")
.def(bp::vector_indexing_suite >());
bp::class_ >("IntVec")
.def(bp::vector_indexing_suite >());
bp::class_ >("DtypeVec")
.def(bp::vector_indexing_suite >());
bp::class_ > > >("NetVec")
.def(bp::vector_indexing_suite > >, true>());
bp::class_ >("BoolVec")
.def(bp::vector_indexing_suite >());
// boost python expects a void (missing) return value, while import_array
// returns NULL for python3. import_array1() forces a void return value.
import_array1();
}
} // namespace caffe