Move OperatorSchema default inference function implementations to .cc… (#40845)
Summary:
… file
This prevents implementation of those functions(as lambdas) to be embedded as weak symbol into every shared library that includes this header.
Combination of this and https://github.com/pytorch/pytorch/pull/40844 reduces size of `libcaffe2_module_test_dynamic.so` from 500kb to 50Kb.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/40845
Differential Revision: D22334779
Pulled By: malfet
fbshipit-source-id: 64706918fc2947350a58c0877f294b1b8b085455
diff --git a/caffe2/core/operator_schema.cc b/caffe2/core/operator_schema.cc
index 63c5670..3009ba4 100644
--- a/caffe2/core/operator_schema.cc
+++ b/caffe2/core/operator_schema.cc
@@ -3,6 +3,25 @@
namespace caffe2 {
+OpSchema::OpSchema(const string& type, const string& file, const int line)
+ : type_(type), file_(file), line_(line), tensor_inference_function_(
+ [](const OperatorDef& def, const vector<TensorShape>&) {
+ vector<TensorShape> out;
+ for (int i = 0; i < def.output_size(); i++) {
+ TensorShape ts;
+ ts.set_unknown_shape(true);
+ out.push_back(ts);
+ }
+ return out;
+ }), device_inference_function_(
+ [](const OperatorDef& def) {
+ auto op_device =
+ def.has_device_option() ? def.device_option() : DeviceOption();
+ vector<DeviceOption> in_dev(def.input_size(), op_device);
+ vector<DeviceOption> out_dev(def.output_size(), op_device);
+ return std::make_pair(in_dev, out_dev);
+ }) {}
+
bool OpSchema::Verify(const OperatorDef& def) const {
// Check the number of inputs.
if (def.input_size() < min_input_ || def.input_size() > max_input_) {
diff --git a/caffe2/core/operator_schema.h b/caffe2/core/operator_schema.h
index dceaffc..deca56a 100644
--- a/caffe2/core/operator_schema.h
+++ b/caffe2/core/operator_schema.h
@@ -39,9 +39,8 @@
*/
class CAFFE2_API OpSchema {
public:
- OpSchema() : type_("unknown"), file_("unknown"), line_(0) {}
- OpSchema(const string& type, const string& file, const int line)
- : type_(type), file_(file), line_(line) {}
+ OpSchema() : OpSchema("unknown", "unknown", 0) {}
+ OpSchema(const string& type, const string& file, const int line);
/**
* @brief Returns the file that the op schema is registered from.
@@ -443,25 +442,9 @@
std::function<bool(int, int)> inplace_enforced_ = [](int, int) {
return false;
};
- TensorInferenceFunctionType tensor_inference_function_ =
- [](const OperatorDef& def, const vector<TensorShape>&) {
- vector<TensorShape> out;
- for (int i = 0; i < def.output_size(); i++) {
- TensorShape ts;
- ts.set_unknown_shape(true);
- out.push_back(ts);
- }
- return out;
- };
+ TensorInferenceFunctionType tensor_inference_function_;
std::unique_ptr<CostInferenceFunctionType> cost_inference_function_ = nullptr;
- DeviceInferenceFunctionType device_inference_function_ =
- [](const OperatorDef& def) {
- auto op_device =
- def.has_device_option() ? def.device_option() : DeviceOption();
- vector<DeviceOption> in_dev(def.input_size(), op_device);
- vector<DeviceOption> out_dev(def.output_size(), op_device);
- return std::make_pair(in_dev, out_dev);
- };
+ DeviceInferenceFunctionType device_inference_function_;
std::function<std::vector<TensorFiller>(
const std::vector<std::vector<int64_t>>&)>